Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_SWAP_H |
2 | #define _LINUX_SWAP_H | |
3 | ||
1da177e4 LT |
4 | #include <linux/spinlock.h> |
5 | #include <linux/linkage.h> | |
6 | #include <linux/mmzone.h> | |
7 | #include <linux/list.h> | |
66e1707b | 8 | #include <linux/memcontrol.h> |
1da177e4 | 9 | #include <linux/sched.h> |
af936a16 | 10 | #include <linux/node.h> |
33806f06 | 11 | #include <linux/fs.h> |
60063497 | 12 | #include <linux/atomic.h> |
c53954a0 | 13 | #include <linux/page-flags.h> |
1da177e4 LT |
14 | #include <asm/page.h> |
15 | ||
8bc719d3 MS |
16 | struct notifier_block; |
17 | ||
ab954160 AM |
18 | struct bio; |
19 | ||
1da177e4 LT |
20 | #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ |
21 | #define SWAP_FLAG_PRIO_MASK 0x7fff | |
22 | #define SWAP_FLAG_PRIO_SHIFT 0 | |
dcf6b7dd RA |
23 | #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */ |
24 | #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */ | |
25 | #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */ | |
1da177e4 | 26 | |
d15cab97 | 27 | #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \ |
dcf6b7dd RA |
28 | SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \ |
29 | SWAP_FLAG_DISCARD_PAGES) | |
d15cab97 | 30 | |
1da177e4 LT |
31 | static inline int current_is_kswapd(void) |
32 | { | |
33 | return current->flags & PF_KSWAPD; | |
34 | } | |
35 | ||
36 | /* | |
37 | * MAX_SWAPFILES defines the maximum number of swaptypes: things which can | |
38 | * be swapped to. The swap type and the offset into that swap type are | |
39 | * encoded into pte's and into pgoff_t's in the swapcache. Using five bits | |
40 | * for the type means that the maximum number of swapcache pages is 27 bits | |
41 | * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs | |
42 | * the type/offset into the pte as 5/27 as well. | |
43 | */ | |
44 | #define MAX_SWAPFILES_SHIFT 5 | |
a7420aa5 AK |
45 | |
46 | /* | |
47 | * Use some of the swap files numbers for other purposes. This | |
48 | * is a convenient way to hook into the VM to trigger special | |
49 | * actions on faults. | |
50 | */ | |
51 | ||
52 | /* | |
53 | * NUMA node memory migration support | |
54 | */ | |
55 | #ifdef CONFIG_MIGRATION | |
56 | #define SWP_MIGRATION_NUM 2 | |
57 | #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM) | |
58 | #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1) | |
0697212a | 59 | #else |
a7420aa5 | 60 | #define SWP_MIGRATION_NUM 0 |
0697212a | 61 | #endif |
1da177e4 | 62 | |
a7420aa5 AK |
63 | /* |
64 | * Handling of hardware poisoned pages with memory corruption. | |
65 | */ | |
66 | #ifdef CONFIG_MEMORY_FAILURE | |
67 | #define SWP_HWPOISON_NUM 1 | |
68 | #define SWP_HWPOISON MAX_SWAPFILES | |
69 | #else | |
70 | #define SWP_HWPOISON_NUM 0 | |
71 | #endif | |
72 | ||
73 | #define MAX_SWAPFILES \ | |
74 | ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM) | |
75 | ||
1da177e4 LT |
76 | /* |
77 | * Magic header for a swap area. The first part of the union is | |
78 | * what the swap magic looks like for the old (limited to 128MB) | |
79 | * swap area format, the second part of the union adds - in the | |
80 | * old reserved area - some extra information. Note that the first | |
81 | * kilobyte is reserved for boot loader or disk label stuff... | |
82 | * | |
83 | * Having the magic at the end of the PAGE_SIZE makes detecting swap | |
84 | * areas somewhat tricky on machines that support multiple page sizes. | |
85 | * For 2.5 we'll probably want to move the magic to just beyond the | |
86 | * bootbits... | |
87 | */ | |
88 | union swap_header { | |
89 | struct { | |
90 | char reserved[PAGE_SIZE - 10]; | |
91 | char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */ | |
92 | } magic; | |
93 | struct { | |
e8f03d02 AD |
94 | char bootbits[1024]; /* Space for disklabel etc. */ |
95 | __u32 version; | |
96 | __u32 last_page; | |
97 | __u32 nr_badpages; | |
98 | unsigned char sws_uuid[16]; | |
99 | unsigned char sws_volume[16]; | |
100 | __u32 padding[117]; | |
101 | __u32 badpages[1]; | |
1da177e4 LT |
102 | } info; |
103 | }; | |
104 | ||
1da177e4 LT |
105 | /* |
106 | * current->reclaim_state points to one of these when a task is running | |
107 | * memory reclaim | |
108 | */ | |
109 | struct reclaim_state { | |
110 | unsigned long reclaimed_slab; | |
111 | }; | |
112 | ||
113 | #ifdef __KERNEL__ | |
114 | ||
115 | struct address_space; | |
116 | struct sysinfo; | |
117 | struct writeback_control; | |
118 | struct zone; | |
119 | ||
120 | /* | |
121 | * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of | |
122 | * disk blocks. A list of swap extents maps the entire swapfile. (Where the | |
123 | * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart | |
124 | * from setup, they're handled identically. | |
125 | * | |
126 | * We always assume that blocks are of size PAGE_SIZE. | |
127 | */ | |
128 | struct swap_extent { | |
129 | struct list_head list; | |
130 | pgoff_t start_page; | |
131 | pgoff_t nr_pages; | |
132 | sector_t start_block; | |
133 | }; | |
134 | ||
135 | /* | |
136 | * Max bad pages in the new format.. | |
137 | */ | |
138 | #define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x) | |
139 | #define MAX_SWAP_BADPAGES \ | |
140 | ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int)) | |
141 | ||
142 | enum { | |
143 | SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ | |
144 | SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ | |
dcf6b7dd | 145 | SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */ |
7992fde7 | 146 | SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ |
20137a49 | 147 | SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ |
570a335b | 148 | SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ |
b2725643 | 149 | SWP_BLKDEV = (1 << 6), /* its a block device */ |
62c230bc | 150 | SWP_FILE = (1 << 7), /* set after swap_activate success */ |
dcf6b7dd RA |
151 | SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */ |
152 | SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */ | |
52b7efdb | 153 | /* add others here before... */ |
dcf6b7dd | 154 | SWP_SCANNING = (1 << 10), /* refcount in scan_swap_map */ |
1da177e4 LT |
155 | }; |
156 | ||
d778df51 | 157 | #define SWAP_CLUSTER_MAX 32UL |
748446bb | 158 | #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX |
1da177e4 | 159 | |
8afdcece | 160 | /* |
4be89a34 JZ |
161 | * Ratio between zone->managed_pages and the "gap" that above the per-zone |
162 | * "high_wmark". While balancing nodes, We allow kswapd to shrink zones that | |
163 | * do not meet the (high_wmark + gap) watermark, even which already met the | |
164 | * high_wmark, in order to provide better per-zone lru behavior. We are ok to | |
8afdcece MG |
165 | * spend not more than 1% of the memory for this zone balancing "gap". |
166 | */ | |
167 | #define KSWAPD_ZONE_BALANCE_GAP_RATIO 100 | |
168 | ||
570a335b HD |
169 | #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ |
170 | #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ | |
171 | #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ | |
172 | #define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */ | |
173 | #define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */ | |
aaa46865 | 174 | #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */ |
253d553b | 175 | |
2a8f9449 SL |
176 | /* |
177 | * We use this to track usage of a cluster. A cluster is a block of swap disk | |
178 | * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All | |
179 | * free clusters are organized into a list. We fetch an entry from the list to | |
180 | * get a free cluster. | |
181 | * | |
182 | * The data field stores next cluster if the cluster is free or cluster usage | |
183 | * counter otherwise. The flags field determines if a cluster is free. This is | |
184 | * protected by swap_info_struct.lock. | |
185 | */ | |
186 | struct swap_cluster_info { | |
187 | unsigned int data:24; | |
188 | unsigned int flags:8; | |
189 | }; | |
190 | #define CLUSTER_FLAG_FREE 1 /* This cluster is free */ | |
191 | #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */ | |
192 | ||
ebc2a1a6 SL |
193 | /* |
194 | * We assign a cluster to each CPU, so each CPU can allocate swap entry from | |
195 | * its own cluster and swapout sequentially. The purpose is to optimize swapout | |
196 | * throughput. | |
197 | */ | |
198 | struct percpu_cluster { | |
199 | struct swap_cluster_info index; /* Current cluster index */ | |
200 | unsigned int next; /* Likely next allocation offset */ | |
201 | }; | |
202 | ||
1da177e4 LT |
203 | /* |
204 | * The in-memory structure used to track swap areas. | |
1da177e4 LT |
205 | */ |
206 | struct swap_info_struct { | |
efa90a98 HD |
207 | unsigned long flags; /* SWP_USED etc: see above */ |
208 | signed short prio; /* swap priority of this type */ | |
18ab4d4c DS |
209 | struct plist_node list; /* entry in swap_active_head */ |
210 | struct plist_node avail_list; /* entry in swap_avail_head */ | |
efa90a98 | 211 | signed char type; /* strange name for an index */ |
7509765a HD |
212 | unsigned int max; /* extent of the swap_map */ |
213 | unsigned char *swap_map; /* vmalloc'ed array of usage counts */ | |
2a8f9449 SL |
214 | struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ |
215 | struct swap_cluster_info free_cluster_head; /* free cluster list head */ | |
216 | struct swap_cluster_info free_cluster_tail; /* free cluster list tail */ | |
7509765a HD |
217 | unsigned int lowest_bit; /* index of first free in swap_map */ |
218 | unsigned int highest_bit; /* index of last free in swap_map */ | |
219 | unsigned int pages; /* total of usable pages of swap */ | |
220 | unsigned int inuse_pages; /* number of those currently in use */ | |
221 | unsigned int cluster_next; /* likely index for next allocation */ | |
222 | unsigned int cluster_nr; /* countdown to next cluster search */ | |
ebc2a1a6 | 223 | struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ |
7509765a HD |
224 | struct swap_extent *curr_swap_extent; |
225 | struct swap_extent first_swap_extent; | |
226 | struct block_device *bdev; /* swap device or bdev of swap file */ | |
227 | struct file *swap_file; /* seldom referenced */ | |
228 | unsigned int old_block_size; /* seldom referenced */ | |
38b5faf4 DM |
229 | #ifdef CONFIG_FRONTSWAP |
230 | unsigned long *frontswap_map; /* frontswap in-use, one bit per page */ | |
231 | atomic_t frontswap_pages; /* frontswap pages in-use counter */ | |
232 | #endif | |
ec8acf20 SL |
233 | spinlock_t lock; /* |
234 | * protect map scan related fields like | |
235 | * swap_map, lowest_bit, highest_bit, | |
236 | * inuse_pages, cluster_next, | |
815c2c54 SL |
237 | * cluster_nr, lowest_alloc, |
238 | * highest_alloc, free/discard cluster | |
239 | * list. other fields are only changed | |
240 | * at swapon/swapoff, so are protected | |
241 | * by swap_lock. changing flags need | |
242 | * hold this lock and swap_lock. If | |
243 | * both locks need hold, hold swap_lock | |
244 | * first. | |
ec8acf20 | 245 | */ |
815c2c54 SL |
246 | struct work_struct discard_work; /* discard worker */ |
247 | struct swap_cluster_info discard_cluster_head; /* list head of discard clusters */ | |
248 | struct swap_cluster_info discard_cluster_tail; /* list tail of discard clusters */ | |
1da177e4 LT |
249 | }; |
250 | ||
a528910e JW |
251 | /* linux/mm/workingset.c */ |
252 | void *workingset_eviction(struct address_space *mapping, struct page *page); | |
253 | bool workingset_refault(void *shadow); | |
254 | void workingset_activation(struct page *page); | |
449dd698 JW |
255 | extern struct list_lru workingset_shadow_nodes; |
256 | ||
257 | static inline unsigned int workingset_node_pages(struct radix_tree_node *node) | |
258 | { | |
259 | return node->count & RADIX_TREE_COUNT_MASK; | |
260 | } | |
261 | ||
262 | static inline void workingset_node_pages_inc(struct radix_tree_node *node) | |
263 | { | |
264 | node->count++; | |
265 | } | |
266 | ||
267 | static inline void workingset_node_pages_dec(struct radix_tree_node *node) | |
268 | { | |
269 | node->count--; | |
270 | } | |
271 | ||
272 | static inline unsigned int workingset_node_shadows(struct radix_tree_node *node) | |
273 | { | |
274 | return node->count >> RADIX_TREE_COUNT_SHIFT; | |
275 | } | |
276 | ||
277 | static inline void workingset_node_shadows_inc(struct radix_tree_node *node) | |
278 | { | |
279 | node->count += 1U << RADIX_TREE_COUNT_SHIFT; | |
280 | } | |
281 | ||
282 | static inline void workingset_node_shadows_dec(struct radix_tree_node *node) | |
283 | { | |
284 | node->count -= 1U << RADIX_TREE_COUNT_SHIFT; | |
285 | } | |
a528910e | 286 | |
1da177e4 LT |
287 | /* linux/mm/page_alloc.c */ |
288 | extern unsigned long totalram_pages; | |
cb45b0e9 | 289 | extern unsigned long totalreserve_pages; |
ab8fabd4 | 290 | extern unsigned long dirty_balance_reserve; |
ebec3862 ZY |
291 | extern unsigned long nr_free_buffer_pages(void); |
292 | extern unsigned long nr_free_pagecache_pages(void); | |
1da177e4 | 293 | |
96177299 CL |
294 | /* Definition of global_page_state not available yet */ |
295 | #define nr_free_pages() global_page_state(NR_FREE_PAGES) | |
296 | ||
297 | ||
1da177e4 | 298 | /* linux/mm/swap.c */ |
c53954a0 | 299 | extern void lru_cache_add(struct page *); |
2329d375 JZ |
300 | extern void lru_cache_add_anon(struct page *page); |
301 | extern void lru_cache_add_file(struct page *page); | |
fa9add64 | 302 | extern void lru_add_page_tail(struct page *page, struct page *page_tail, |
5bc7b8ac | 303 | struct lruvec *lruvec, struct list_head *head); |
b3c97528 HH |
304 | extern void activate_page(struct page *); |
305 | extern void mark_page_accessed(struct page *); | |
1da177e4 | 306 | extern void lru_add_drain(void); |
f0cb3c76 | 307 | extern void lru_add_drain_cpu(int cpu); |
5fbc4616 | 308 | extern void lru_add_drain_all(void); |
ac6aadb2 | 309 | extern void rotate_reclaimable_page(struct page *page); |
31560180 | 310 | extern void deactivate_page(struct page *page); |
1da177e4 LT |
311 | extern void swap_setup(void); |
312 | ||
894bc310 LS |
313 | extern void add_page_to_unevictable_list(struct page *page); |
314 | ||
00501b53 JW |
315 | extern void lru_cache_add_active_or_unevictable(struct page *page, |
316 | struct vm_area_struct *vma); | |
317 | ||
1da177e4 | 318 | /* linux/mm/vmscan.c */ |
dac1d27b | 319 | extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, |
327c0e96 | 320 | gfp_t gfp_mask, nodemask_t *mask); |
f3fd4a61 | 321 | extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); |
b70a2a21 JW |
322 | extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, |
323 | unsigned long nr_pages, | |
324 | gfp_t gfp_mask, | |
325 | bool may_swap); | |
185efc0f JW |
326 | extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, |
327 | gfp_t gfp_mask, bool noswap, | |
328 | struct zone *zone, | |
329 | unsigned long *nr_scanned); | |
69e05944 | 330 | extern unsigned long shrink_all_memory(unsigned long nr_pages); |
1da177e4 | 331 | extern int vm_swappiness; |
b20a3503 | 332 | extern int remove_mapping(struct address_space *mapping, struct page *page); |
b21e0b90 | 333 | extern unsigned long vm_total_pages; |
b20a3503 | 334 | |
9eeff239 CL |
335 | #ifdef CONFIG_NUMA |
336 | extern int zone_reclaim_mode; | |
9614634f | 337 | extern int sysctl_min_unmapped_ratio; |
0ff38490 | 338 | extern int sysctl_min_slab_ratio; |
9eeff239 CL |
339 | extern int zone_reclaim(struct zone *, gfp_t, unsigned int); |
340 | #else | |
341 | #define zone_reclaim_mode 0 | |
342 | static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) | |
343 | { | |
344 | return 0; | |
345 | } | |
346 | #endif | |
347 | ||
39b5f29a | 348 | extern int page_evictable(struct page *page); |
24513264 | 349 | extern void check_move_unevictable_pages(struct page **, int nr_pages); |
af936a16 | 350 | |
3218ae14 | 351 | extern int kswapd_run(int nid); |
8fe23e05 | 352 | extern void kswapd_stop(int nid); |
c255a458 | 353 | #ifdef CONFIG_MEMCG |
1f4c025b KH |
354 | extern int mem_cgroup_swappiness(struct mem_cgroup *mem); |
355 | #else | |
356 | static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) | |
357 | { | |
358 | return vm_swappiness; | |
359 | } | |
360 | #endif | |
c255a458 | 361 | #ifdef CONFIG_MEMCG_SWAP |
0a31bc97 JW |
362 | extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry); |
363 | extern void mem_cgroup_uncharge_swap(swp_entry_t entry); | |
dac23b0d | 364 | #else |
0a31bc97 JW |
365 | static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry) |
366 | { | |
367 | } | |
368 | static inline void mem_cgroup_uncharge_swap(swp_entry_t entry) | |
dac23b0d MH |
369 | { |
370 | } | |
371 | #endif | |
1da177e4 LT |
372 | #ifdef CONFIG_SWAP |
373 | /* linux/mm/page_io.c */ | |
aca8bf32 | 374 | extern int swap_readpage(struct page *); |
1da177e4 | 375 | extern int swap_writepage(struct page *page, struct writeback_control *wbc); |
1eec6702 SJ |
376 | extern void end_swap_bio_write(struct bio *bio, int err); |
377 | extern int __swap_writepage(struct page *page, struct writeback_control *wbc, | |
378 | void (*end_write_func)(struct bio *, int)); | |
62c230bc | 379 | extern int swap_set_page_dirty(struct page *page); |
6712ecf8 | 380 | extern void end_swap_bio_read(struct bio *bio, int err); |
1da177e4 | 381 | |
a509bc1a MG |
382 | int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, |
383 | unsigned long nr_pages, sector_t start_block); | |
384 | int generic_swapfile_activate(struct swap_info_struct *, struct file *, | |
385 | sector_t *); | |
386 | ||
1da177e4 | 387 | /* linux/mm/swap_state.c */ |
33806f06 SL |
388 | extern struct address_space swapper_spaces[]; |
389 | #define swap_address_space(entry) (&swapper_spaces[swp_type(entry)]) | |
390 | extern unsigned long total_swapcache_pages(void); | |
1da177e4 | 391 | extern void show_swap_cache_info(void); |
5bc7b8ac | 392 | extern int add_to_swap(struct page *, struct list_head *list); |
73b1262f | 393 | extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); |
2f772e6c | 394 | extern int __add_to_swap_cache(struct page *page, swp_entry_t entry); |
1da177e4 LT |
395 | extern void __delete_from_swap_cache(struct page *); |
396 | extern void delete_from_swap_cache(struct page *); | |
1da177e4 LT |
397 | extern void free_page_and_swap_cache(struct page *); |
398 | extern void free_pages_and_swap_cache(struct page **, int); | |
46017e95 | 399 | extern struct page *lookup_swap_cache(swp_entry_t); |
02098fea | 400 | extern struct page *read_swap_cache_async(swp_entry_t, gfp_t, |
46017e95 | 401 | struct vm_area_struct *vma, unsigned long addr); |
02098fea | 402 | extern struct page *swapin_readahead(swp_entry_t, gfp_t, |
46017e95 HD |
403 | struct vm_area_struct *vma, unsigned long addr); |
404 | ||
1da177e4 | 405 | /* linux/mm/swapfile.c */ |
ec8acf20 | 406 | extern atomic_long_t nr_swap_pages; |
1da177e4 | 407 | extern long total_swap_pages; |
ec8acf20 SL |
408 | |
409 | /* Swap 50% full? Release swapcache more aggressively.. */ | |
410 | static inline bool vm_swap_full(void) | |
411 | { | |
412 | return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages; | |
413 | } | |
414 | ||
415 | static inline long get_nr_swap_pages(void) | |
416 | { | |
417 | return atomic_long_read(&nr_swap_pages); | |
418 | } | |
419 | ||
1da177e4 LT |
420 | extern void si_swapinfo(struct sysinfo *); |
421 | extern swp_entry_t get_swap_page(void); | |
910321ea | 422 | extern swp_entry_t get_swap_page_of_type(int); |
570a335b | 423 | extern int add_swap_count_continuation(swp_entry_t, gfp_t); |
aaa46865 | 424 | extern void swap_shmem_alloc(swp_entry_t); |
570a335b HD |
425 | extern int swap_duplicate(swp_entry_t); |
426 | extern int swapcache_prepare(swp_entry_t); | |
1da177e4 | 427 | extern void swap_free(swp_entry_t); |
0a31bc97 | 428 | extern void swapcache_free(swp_entry_t); |
2509ef26 | 429 | extern int free_swap_and_cache(swp_entry_t); |
7bf23687 | 430 | extern int swap_type_of(dev_t, sector_t, struct block_device **); |
f577eb30 | 431 | extern unsigned int count_swap_pages(int, int); |
d4906e1a | 432 | extern sector_t map_swap_page(struct page *, struct block_device **); |
3aef83e0 | 433 | extern sector_t swapdev_block(int, pgoff_t); |
bde05d1c | 434 | extern int page_swapcount(struct page *); |
f981c595 | 435 | extern struct swap_info_struct *page_swap_info(struct page *); |
7b1fe597 | 436 | extern int reuse_swap_page(struct page *); |
a2c43eed | 437 | extern int try_to_free_swap(struct page *); |
1da177e4 LT |
438 | struct backing_dev_info; |
439 | ||
1da177e4 LT |
440 | #else /* CONFIG_SWAP */ |
441 | ||
d2cf5ad6 | 442 | #define swap_address_space(entry) (NULL) |
ec8acf20 | 443 | #define get_nr_swap_pages() 0L |
b962716b | 444 | #define total_swap_pages 0L |
33806f06 | 445 | #define total_swapcache_pages() 0UL |
ec8acf20 | 446 | #define vm_swap_full() 0 |
1da177e4 LT |
447 | |
448 | #define si_swapinfo(val) \ | |
449 | do { (val)->freeswap = (val)->totalswap = 0; } while (0) | |
9ae5b3c7 OH |
450 | /* only sparc can not include linux/pagemap.h in this file |
451 | * so leave page_cache_release and release_pages undeclared... */ | |
1da177e4 LT |
452 | #define free_page_and_swap_cache(page) \ |
453 | page_cache_release(page) | |
454 | #define free_pages_and_swap_cache(pages, nr) \ | |
b745bc85 | 455 | release_pages((pages), (nr), false); |
1da177e4 | 456 | |
bd96b9eb CK |
457 | static inline void show_swap_cache_info(void) |
458 | { | |
459 | } | |
460 | ||
2509ef26 | 461 | #define free_swap_and_cache(swp) is_migration_entry(swp) |
cb4b86ba | 462 | #define swapcache_prepare(swp) is_migration_entry(swp) |
bd96b9eb | 463 | |
570a335b | 464 | static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) |
355cfa73 | 465 | { |
570a335b HD |
466 | return 0; |
467 | } | |
468 | ||
aaa46865 HD |
469 | static inline void swap_shmem_alloc(swp_entry_t swp) |
470 | { | |
471 | } | |
472 | ||
570a335b HD |
473 | static inline int swap_duplicate(swp_entry_t swp) |
474 | { | |
475 | return 0; | |
355cfa73 KH |
476 | } |
477 | ||
bd96b9eb CK |
478 | static inline void swap_free(swp_entry_t swp) |
479 | { | |
480 | } | |
481 | ||
0a31bc97 | 482 | static inline void swapcache_free(swp_entry_t swp) |
cb4b86ba KH |
483 | { |
484 | } | |
485 | ||
02098fea | 486 | static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, |
bd96b9eb CK |
487 | struct vm_area_struct *vma, unsigned long addr) |
488 | { | |
489 | return NULL; | |
490 | } | |
491 | ||
9fab5619 HD |
492 | static inline int swap_writepage(struct page *p, struct writeback_control *wbc) |
493 | { | |
494 | return 0; | |
495 | } | |
496 | ||
bd96b9eb CK |
497 | static inline struct page *lookup_swap_cache(swp_entry_t swp) |
498 | { | |
499 | return NULL; | |
500 | } | |
501 | ||
5bc7b8ac | 502 | static inline int add_to_swap(struct page *page, struct list_head *list) |
60371d97 HD |
503 | { |
504 | return 0; | |
505 | } | |
506 | ||
73b1262f HD |
507 | static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, |
508 | gfp_t gfp_mask) | |
bd96b9eb | 509 | { |
73b1262f | 510 | return -1; |
bd96b9eb CK |
511 | } |
512 | ||
513 | static inline void __delete_from_swap_cache(struct page *page) | |
514 | { | |
515 | } | |
516 | ||
517 | static inline void delete_from_swap_cache(struct page *page) | |
518 | { | |
519 | } | |
520 | ||
bde05d1c HD |
521 | static inline int page_swapcount(struct page *page) |
522 | { | |
523 | return 0; | |
524 | } | |
525 | ||
7b1fe597 | 526 | #define reuse_swap_page(page) (page_mapcount(page) == 1) |
1da177e4 | 527 | |
a2c43eed | 528 | static inline int try_to_free_swap(struct page *page) |
68a22394 RR |
529 | { |
530 | return 0; | |
531 | } | |
532 | ||
1da177e4 LT |
533 | static inline swp_entry_t get_swap_page(void) |
534 | { | |
535 | swp_entry_t entry; | |
536 | entry.val = 0; | |
537 | return entry; | |
538 | } | |
539 | ||
1da177e4 LT |
540 | #endif /* CONFIG_SWAP */ |
541 | #endif /* __KERNEL__*/ | |
542 | #endif /* _LINUX_SWAP_H */ |