mm, vmscan: move LRU lists to node
[deliverable/linux.git] / include / linux / swap.h
1 #ifndef _LINUX_SWAP_H
2 #define _LINUX_SWAP_H
3
4 #include <linux/spinlock.h>
5 #include <linux/linkage.h>
6 #include <linux/mmzone.h>
7 #include <linux/list.h>
8 #include <linux/memcontrol.h>
9 #include <linux/sched.h>
10 #include <linux/node.h>
11 #include <linux/fs.h>
12 #include <linux/atomic.h>
13 #include <linux/page-flags.h>
14 #include <asm/page.h>
15
16 struct notifier_block;
17
18 struct bio;
19
20 #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
21 #define SWAP_FLAG_PRIO_MASK 0x7fff
22 #define SWAP_FLAG_PRIO_SHIFT 0
23 #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
24 #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
25 #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
26
27 #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
28 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
29 SWAP_FLAG_DISCARD_PAGES)
30
31 static inline int current_is_kswapd(void)
32 {
33 return current->flags & PF_KSWAPD;
34 }
35
36 /*
37 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
38 * be swapped to. The swap type and the offset into that swap type are
39 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
40 * for the type means that the maximum number of swapcache pages is 27 bits
41 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
42 * the type/offset into the pte as 5/27 as well.
43 */
44 #define MAX_SWAPFILES_SHIFT 5
45
46 /*
47 * Use some of the swap files numbers for other purposes. This
48 * is a convenient way to hook into the VM to trigger special
49 * actions on faults.
50 */
51
52 /*
53 * NUMA node memory migration support
54 */
55 #ifdef CONFIG_MIGRATION
56 #define SWP_MIGRATION_NUM 2
57 #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
58 #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
59 #else
60 #define SWP_MIGRATION_NUM 0
61 #endif
62
63 /*
64 * Handling of hardware poisoned pages with memory corruption.
65 */
66 #ifdef CONFIG_MEMORY_FAILURE
67 #define SWP_HWPOISON_NUM 1
68 #define SWP_HWPOISON MAX_SWAPFILES
69 #else
70 #define SWP_HWPOISON_NUM 0
71 #endif
72
73 #define MAX_SWAPFILES \
74 ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
75
76 /*
77 * Magic header for a swap area. The first part of the union is
78 * what the swap magic looks like for the old (limited to 128MB)
79 * swap area format, the second part of the union adds - in the
80 * old reserved area - some extra information. Note that the first
81 * kilobyte is reserved for boot loader or disk label stuff...
82 *
83 * Having the magic at the end of the PAGE_SIZE makes detecting swap
84 * areas somewhat tricky on machines that support multiple page sizes.
85 * For 2.5 we'll probably want to move the magic to just beyond the
86 * bootbits...
87 */
88 union swap_header {
89 struct {
90 char reserved[PAGE_SIZE - 10];
91 char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */
92 } magic;
93 struct {
94 char bootbits[1024]; /* Space for disklabel etc. */
95 __u32 version;
96 __u32 last_page;
97 __u32 nr_badpages;
98 unsigned char sws_uuid[16];
99 unsigned char sws_volume[16];
100 __u32 padding[117];
101 __u32 badpages[1];
102 } info;
103 };
104
105 /*
106 * current->reclaim_state points to one of these when a task is running
107 * memory reclaim
108 */
109 struct reclaim_state {
110 unsigned long reclaimed_slab;
111 };
112
113 #ifdef __KERNEL__
114
115 struct address_space;
116 struct sysinfo;
117 struct writeback_control;
118 struct zone;
119
120 /*
121 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
122 * disk blocks. A list of swap extents maps the entire swapfile. (Where the
123 * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart
124 * from setup, they're handled identically.
125 *
126 * We always assume that blocks are of size PAGE_SIZE.
127 */
128 struct swap_extent {
129 struct list_head list;
130 pgoff_t start_page;
131 pgoff_t nr_pages;
132 sector_t start_block;
133 };
134
135 /*
136 * Max bad pages in the new format..
137 */
138 #define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
139 #define MAX_SWAP_BADPAGES \
140 ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
141
142 enum {
143 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
144 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
145 SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */
146 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
147 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
148 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
149 SWP_BLKDEV = (1 << 6), /* its a block device */
150 SWP_FILE = (1 << 7), /* set after swap_activate success */
151 SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */
152 SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */
153 /* add others here before... */
154 SWP_SCANNING = (1 << 10), /* refcount in scan_swap_map */
155 };
156
157 #define SWAP_CLUSTER_MAX 32UL
158 #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
159
160 /*
161 * Ratio between zone->managed_pages and the "gap" that above the per-zone
162 * "high_wmark". While balancing nodes, We allow kswapd to shrink zones that
163 * do not meet the (high_wmark + gap) watermark, even which already met the
164 * high_wmark, in order to provide better per-zone lru behavior. We are ok to
165 * spend not more than 1% of the memory for this zone balancing "gap".
166 */
167 #define KSWAPD_ZONE_BALANCE_GAP_RATIO 100
168
169 #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
170 #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
171 #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
172 #define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */
173 #define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */
174 #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
175
176 /*
177 * We use this to track usage of a cluster. A cluster is a block of swap disk
178 * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
179 * free clusters are organized into a list. We fetch an entry from the list to
180 * get a free cluster.
181 *
182 * The data field stores next cluster if the cluster is free or cluster usage
183 * counter otherwise. The flags field determines if a cluster is free. This is
184 * protected by swap_info_struct.lock.
185 */
186 struct swap_cluster_info {
187 unsigned int data:24;
188 unsigned int flags:8;
189 };
190 #define CLUSTER_FLAG_FREE 1 /* This cluster is free */
191 #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
192
193 /*
194 * We assign a cluster to each CPU, so each CPU can allocate swap entry from
195 * its own cluster and swapout sequentially. The purpose is to optimize swapout
196 * throughput.
197 */
198 struct percpu_cluster {
199 struct swap_cluster_info index; /* Current cluster index */
200 unsigned int next; /* Likely next allocation offset */
201 };
202
203 /*
204 * The in-memory structure used to track swap areas.
205 */
206 struct swap_info_struct {
207 unsigned long flags; /* SWP_USED etc: see above */
208 signed short prio; /* swap priority of this type */
209 struct plist_node list; /* entry in swap_active_head */
210 struct plist_node avail_list; /* entry in swap_avail_head */
211 signed char type; /* strange name for an index */
212 unsigned int max; /* extent of the swap_map */
213 unsigned char *swap_map; /* vmalloc'ed array of usage counts */
214 struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
215 struct swap_cluster_info free_cluster_head; /* free cluster list head */
216 struct swap_cluster_info free_cluster_tail; /* free cluster list tail */
217 unsigned int lowest_bit; /* index of first free in swap_map */
218 unsigned int highest_bit; /* index of last free in swap_map */
219 unsigned int pages; /* total of usable pages of swap */
220 unsigned int inuse_pages; /* number of those currently in use */
221 unsigned int cluster_next; /* likely index for next allocation */
222 unsigned int cluster_nr; /* countdown to next cluster search */
223 struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
224 struct swap_extent *curr_swap_extent;
225 struct swap_extent first_swap_extent;
226 struct block_device *bdev; /* swap device or bdev of swap file */
227 struct file *swap_file; /* seldom referenced */
228 unsigned int old_block_size; /* seldom referenced */
229 #ifdef CONFIG_FRONTSWAP
230 unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
231 atomic_t frontswap_pages; /* frontswap pages in-use counter */
232 #endif
233 spinlock_t lock; /*
234 * protect map scan related fields like
235 * swap_map, lowest_bit, highest_bit,
236 * inuse_pages, cluster_next,
237 * cluster_nr, lowest_alloc,
238 * highest_alloc, free/discard cluster
239 * list. other fields are only changed
240 * at swapon/swapoff, so are protected
241 * by swap_lock. changing flags need
242 * hold this lock and swap_lock. If
243 * both locks need hold, hold swap_lock
244 * first.
245 */
246 struct work_struct discard_work; /* discard worker */
247 struct swap_cluster_info discard_cluster_head; /* list head of discard clusters */
248 struct swap_cluster_info discard_cluster_tail; /* list tail of discard clusters */
249 };
250
251 /* linux/mm/workingset.c */
252 void *workingset_eviction(struct address_space *mapping, struct page *page);
253 bool workingset_refault(void *shadow);
254 void workingset_activation(struct page *page);
255 extern struct list_lru workingset_shadow_nodes;
256
257 static inline unsigned int workingset_node_pages(struct radix_tree_node *node)
258 {
259 return node->count & RADIX_TREE_COUNT_MASK;
260 }
261
262 static inline void workingset_node_pages_inc(struct radix_tree_node *node)
263 {
264 node->count++;
265 }
266
267 static inline void workingset_node_pages_dec(struct radix_tree_node *node)
268 {
269 node->count--;
270 }
271
272 static inline unsigned int workingset_node_shadows(struct radix_tree_node *node)
273 {
274 return node->count >> RADIX_TREE_COUNT_SHIFT;
275 }
276
277 static inline void workingset_node_shadows_inc(struct radix_tree_node *node)
278 {
279 node->count += 1U << RADIX_TREE_COUNT_SHIFT;
280 }
281
282 static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
283 {
284 node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
285 }
286
287 /* linux/mm/page_alloc.c */
288 extern unsigned long totalram_pages;
289 extern unsigned long totalreserve_pages;
290 extern unsigned long nr_free_buffer_pages(void);
291 extern unsigned long nr_free_pagecache_pages(void);
292
293 /* Definition of global_page_state not available yet */
294 #define nr_free_pages() global_page_state(NR_FREE_PAGES)
295
296
297 /* linux/mm/swap.c */
298 extern void lru_cache_add(struct page *);
299 extern void lru_cache_add_anon(struct page *page);
300 extern void lru_cache_add_file(struct page *page);
301 extern void lru_add_page_tail(struct page *page, struct page *page_tail,
302 struct lruvec *lruvec, struct list_head *head);
303 extern void activate_page(struct page *);
304 extern void mark_page_accessed(struct page *);
305 extern void lru_add_drain(void);
306 extern void lru_add_drain_cpu(int cpu);
307 extern void lru_add_drain_all(void);
308 extern void rotate_reclaimable_page(struct page *page);
309 extern void deactivate_file_page(struct page *page);
310 extern void deactivate_page(struct page *page);
311 extern void swap_setup(void);
312
313 extern void add_page_to_unevictable_list(struct page *page);
314
315 extern void lru_cache_add_active_or_unevictable(struct page *page,
316 struct vm_area_struct *vma);
317
318 /* linux/mm/vmscan.c */
319 extern unsigned long zone_reclaimable_pages(struct zone *zone);
320 extern unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat);
321 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
322 gfp_t gfp_mask, nodemask_t *mask);
323 extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
324 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
325 unsigned long nr_pages,
326 gfp_t gfp_mask,
327 bool may_swap);
328 extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
329 gfp_t gfp_mask, bool noswap,
330 struct zone *zone,
331 unsigned long *nr_scanned);
332 extern unsigned long shrink_all_memory(unsigned long nr_pages);
333 extern int vm_swappiness;
334 extern int remove_mapping(struct address_space *mapping, struct page *page);
335 extern unsigned long vm_total_pages;
336
337 #ifdef CONFIG_NUMA
338 extern int zone_reclaim_mode;
339 extern int sysctl_min_unmapped_ratio;
340 extern int sysctl_min_slab_ratio;
341 extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
342 #else
343 #define zone_reclaim_mode 0
344 static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
345 {
346 return 0;
347 }
348 #endif
349
350 extern int page_evictable(struct page *page);
351 extern void check_move_unevictable_pages(struct page **, int nr_pages);
352
353 extern int kswapd_run(int nid);
354 extern void kswapd_stop(int nid);
355
356 #ifdef CONFIG_SWAP
357 /* linux/mm/page_io.c */
358 extern int swap_readpage(struct page *);
359 extern int swap_writepage(struct page *page, struct writeback_control *wbc);
360 extern void end_swap_bio_write(struct bio *bio);
361 extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
362 bio_end_io_t end_write_func);
363 extern int swap_set_page_dirty(struct page *page);
364
365 int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
366 unsigned long nr_pages, sector_t start_block);
367 int generic_swapfile_activate(struct swap_info_struct *, struct file *,
368 sector_t *);
369
370 /* linux/mm/swap_state.c */
371 extern struct address_space swapper_spaces[];
372 #define swap_address_space(entry) (&swapper_spaces[swp_type(entry)])
373 extern unsigned long total_swapcache_pages(void);
374 extern void show_swap_cache_info(void);
375 extern int add_to_swap(struct page *, struct list_head *list);
376 extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
377 extern int __add_to_swap_cache(struct page *page, swp_entry_t entry);
378 extern void __delete_from_swap_cache(struct page *);
379 extern void delete_from_swap_cache(struct page *);
380 extern void free_page_and_swap_cache(struct page *);
381 extern void free_pages_and_swap_cache(struct page **, int);
382 extern struct page *lookup_swap_cache(swp_entry_t);
383 extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
384 struct vm_area_struct *vma, unsigned long addr);
385 extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
386 struct vm_area_struct *vma, unsigned long addr,
387 bool *new_page_allocated);
388 extern struct page *swapin_readahead(swp_entry_t, gfp_t,
389 struct vm_area_struct *vma, unsigned long addr);
390
391 /* linux/mm/swapfile.c */
392 extern atomic_long_t nr_swap_pages;
393 extern long total_swap_pages;
394
395 /* Swap 50% full? Release swapcache more aggressively.. */
396 static inline bool vm_swap_full(void)
397 {
398 return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
399 }
400
401 static inline long get_nr_swap_pages(void)
402 {
403 return atomic_long_read(&nr_swap_pages);
404 }
405
406 extern void si_swapinfo(struct sysinfo *);
407 extern swp_entry_t get_swap_page(void);
408 extern swp_entry_t get_swap_page_of_type(int);
409 extern int add_swap_count_continuation(swp_entry_t, gfp_t);
410 extern void swap_shmem_alloc(swp_entry_t);
411 extern int swap_duplicate(swp_entry_t);
412 extern int swapcache_prepare(swp_entry_t);
413 extern void swap_free(swp_entry_t);
414 extern void swapcache_free(swp_entry_t);
415 extern int free_swap_and_cache(swp_entry_t);
416 extern int swap_type_of(dev_t, sector_t, struct block_device **);
417 extern unsigned int count_swap_pages(int, int);
418 extern sector_t map_swap_page(struct page *, struct block_device **);
419 extern sector_t swapdev_block(int, pgoff_t);
420 extern int page_swapcount(struct page *);
421 extern int swp_swapcount(swp_entry_t entry);
422 extern struct swap_info_struct *page_swap_info(struct page *);
423 extern bool reuse_swap_page(struct page *, int *);
424 extern int try_to_free_swap(struct page *);
425 struct backing_dev_info;
426
427 #else /* CONFIG_SWAP */
428
429 #define swap_address_space(entry) (NULL)
430 #define get_nr_swap_pages() 0L
431 #define total_swap_pages 0L
432 #define total_swapcache_pages() 0UL
433 #define vm_swap_full() 0
434
435 #define si_swapinfo(val) \
436 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
437 /* only sparc can not include linux/pagemap.h in this file
438 * so leave put_page and release_pages undeclared... */
439 #define free_page_and_swap_cache(page) \
440 put_page(page)
441 #define free_pages_and_swap_cache(pages, nr) \
442 release_pages((pages), (nr), false);
443
444 static inline void show_swap_cache_info(void)
445 {
446 }
447
448 #define free_swap_and_cache(swp) is_migration_entry(swp)
449 #define swapcache_prepare(swp) is_migration_entry(swp)
450
451 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
452 {
453 return 0;
454 }
455
456 static inline void swap_shmem_alloc(swp_entry_t swp)
457 {
458 }
459
460 static inline int swap_duplicate(swp_entry_t swp)
461 {
462 return 0;
463 }
464
465 static inline void swap_free(swp_entry_t swp)
466 {
467 }
468
469 static inline void swapcache_free(swp_entry_t swp)
470 {
471 }
472
473 static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
474 struct vm_area_struct *vma, unsigned long addr)
475 {
476 return NULL;
477 }
478
479 static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
480 {
481 return 0;
482 }
483
484 static inline struct page *lookup_swap_cache(swp_entry_t swp)
485 {
486 return NULL;
487 }
488
489 static inline int add_to_swap(struct page *page, struct list_head *list)
490 {
491 return 0;
492 }
493
494 static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
495 gfp_t gfp_mask)
496 {
497 return -1;
498 }
499
500 static inline void __delete_from_swap_cache(struct page *page)
501 {
502 }
503
504 static inline void delete_from_swap_cache(struct page *page)
505 {
506 }
507
508 static inline int page_swapcount(struct page *page)
509 {
510 return 0;
511 }
512
513 static inline int swp_swapcount(swp_entry_t entry)
514 {
515 return 0;
516 }
517
518 #define reuse_swap_page(page, total_mapcount) \
519 (page_trans_huge_mapcount(page, total_mapcount) == 1)
520
521 static inline int try_to_free_swap(struct page *page)
522 {
523 return 0;
524 }
525
526 static inline swp_entry_t get_swap_page(void)
527 {
528 swp_entry_t entry;
529 entry.val = 0;
530 return entry;
531 }
532
533 #endif /* CONFIG_SWAP */
534
535 #ifdef CONFIG_MEMCG
536 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
537 {
538 /* Cgroup2 doesn't have per-cgroup swappiness */
539 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
540 return vm_swappiness;
541
542 /* root ? */
543 if (mem_cgroup_disabled() || !memcg->css.parent)
544 return vm_swappiness;
545
546 return memcg->swappiness;
547 }
548
549 #else
550 static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
551 {
552 return vm_swappiness;
553 }
554 #endif
555
556 #ifdef CONFIG_MEMCG_SWAP
557 extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
558 extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
559 extern void mem_cgroup_uncharge_swap(swp_entry_t entry);
560 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
561 extern bool mem_cgroup_swap_full(struct page *page);
562 #else
563 static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
564 {
565 }
566
567 static inline int mem_cgroup_try_charge_swap(struct page *page,
568 swp_entry_t entry)
569 {
570 return 0;
571 }
572
573 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry)
574 {
575 }
576
577 static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
578 {
579 return get_nr_swap_pages();
580 }
581
582 static inline bool mem_cgroup_swap_full(struct page *page)
583 {
584 return vm_swap_full();
585 }
586 #endif
587
588 #endif /* __KERNEL__*/
589 #endif /* _LINUX_SWAP_H */
This page took 0.043628 seconds and 5 git commands to generate.