Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Resizable virtual memory filesystem for Linux. | |
3 | * | |
4 | * Copyright (C) 2000 Linus Torvalds. | |
5 | * 2000 Transmeta Corp. | |
6 | * 2000-2001 Christoph Rohland | |
7 | * 2000-2001 SAP AG | |
8 | * 2002 Red Hat Inc. | |
0edd73b3 HD |
9 | * Copyright (C) 2002-2005 Hugh Dickins. |
10 | * Copyright (C) 2002-2005 VERITAS Software Corporation. | |
1da177e4 LT |
11 | * Copyright (C) 2004 Andi Kleen, SuSE Labs |
12 | * | |
13 | * Extended attribute support for tmpfs: | |
14 | * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> | |
15 | * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> | |
16 | * | |
17 | * This file is released under the GPL. | |
18 | */ | |
19 | ||
20 | /* | |
21 | * This virtual memory filesystem is heavily based on the ramfs. It | |
22 | * extends ramfs by the ability to use swap and honor resource limits | |
23 | * which makes it a completely usable filesystem. | |
24 | */ | |
25 | ||
26 | #include <linux/config.h> | |
27 | #include <linux/module.h> | |
28 | #include <linux/init.h> | |
29 | #include <linux/devfs_fs_kernel.h> | |
30 | #include <linux/fs.h> | |
31 | #include <linux/mm.h> | |
32 | #include <linux/mman.h> | |
33 | #include <linux/file.h> | |
34 | #include <linux/swap.h> | |
35 | #include <linux/pagemap.h> | |
36 | #include <linux/string.h> | |
37 | #include <linux/slab.h> | |
38 | #include <linux/backing-dev.h> | |
39 | #include <linux/shmem_fs.h> | |
40 | #include <linux/mount.h> | |
41 | #include <linux/writeback.h> | |
42 | #include <linux/vfs.h> | |
43 | #include <linux/blkdev.h> | |
44 | #include <linux/security.h> | |
45 | #include <linux/swapops.h> | |
46 | #include <linux/mempolicy.h> | |
47 | #include <linux/namei.h> | |
48 | #include <linux/xattr.h> | |
49 | #include <asm/uaccess.h> | |
50 | #include <asm/div64.h> | |
51 | #include <asm/pgtable.h> | |
52 | ||
53 | /* This magic number is used in glibc for posix shared memory */ | |
54 | #define TMPFS_MAGIC 0x01021994 | |
55 | ||
56 | #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long)) | |
57 | #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) | |
58 | #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) | |
59 | ||
60 | #define SHMEM_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1)) | |
61 | #define SHMEM_MAX_BYTES ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT) | |
62 | ||
63 | #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) | |
64 | ||
65 | /* info->flags needs VM_flags to handle pagein/truncate races efficiently */ | |
66 | #define SHMEM_PAGEIN VM_READ | |
67 | #define SHMEM_TRUNCATE VM_WRITE | |
68 | ||
69 | /* Definition to limit shmem_truncate's steps between cond_rescheds */ | |
70 | #define LATENCY_LIMIT 64 | |
71 | ||
72 | /* Pretend that each entry is of this size in directory's i_size */ | |
73 | #define BOGO_DIRENT_SIZE 20 | |
74 | ||
75 | /* Keep swapped page count in private field of indirect struct page */ | |
76 | #define nr_swapped private | |
77 | ||
78 | /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */ | |
79 | enum sgp_type { | |
80 | SGP_QUICK, /* don't try more than file page cache lookup */ | |
81 | SGP_READ, /* don't exceed i_size, don't allocate page */ | |
82 | SGP_CACHE, /* don't exceed i_size, may allocate page */ | |
83 | SGP_WRITE, /* may exceed i_size, may allocate page */ | |
84 | }; | |
85 | ||
86 | static int shmem_getpage(struct inode *inode, unsigned long idx, | |
87 | struct page **pagep, enum sgp_type sgp, int *type); | |
88 | ||
89 | static inline struct page *shmem_dir_alloc(unsigned int gfp_mask) | |
90 | { | |
91 | /* | |
92 | * The above definition of ENTRIES_PER_PAGE, and the use of | |
93 | * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE: | |
94 | * might be reconsidered if it ever diverges from PAGE_SIZE. | |
95 | */ | |
96 | return alloc_pages(gfp_mask, PAGE_CACHE_SHIFT-PAGE_SHIFT); | |
97 | } | |
98 | ||
99 | static inline void shmem_dir_free(struct page *page) | |
100 | { | |
101 | __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT); | |
102 | } | |
103 | ||
104 | static struct page **shmem_dir_map(struct page *page) | |
105 | { | |
106 | return (struct page **)kmap_atomic(page, KM_USER0); | |
107 | } | |
108 | ||
109 | static inline void shmem_dir_unmap(struct page **dir) | |
110 | { | |
111 | kunmap_atomic(dir, KM_USER0); | |
112 | } | |
113 | ||
114 | static swp_entry_t *shmem_swp_map(struct page *page) | |
115 | { | |
116 | return (swp_entry_t *)kmap_atomic(page, KM_USER1); | |
117 | } | |
118 | ||
119 | static inline void shmem_swp_balance_unmap(void) | |
120 | { | |
121 | /* | |
122 | * When passing a pointer to an i_direct entry, to code which | |
123 | * also handles indirect entries and so will shmem_swp_unmap, | |
124 | * we must arrange for the preempt count to remain in balance. | |
125 | * What kmap_atomic of a lowmem page does depends on config | |
126 | * and architecture, so pretend to kmap_atomic some lowmem page. | |
127 | */ | |
128 | (void) kmap_atomic(ZERO_PAGE(0), KM_USER1); | |
129 | } | |
130 | ||
131 | static inline void shmem_swp_unmap(swp_entry_t *entry) | |
132 | { | |
133 | kunmap_atomic(entry, KM_USER1); | |
134 | } | |
135 | ||
136 | static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) | |
137 | { | |
138 | return sb->s_fs_info; | |
139 | } | |
140 | ||
141 | /* | |
142 | * shmem_file_setup pre-accounts the whole fixed size of a VM object, | |
143 | * for shared memory and for shared anonymous (/dev/zero) mappings | |
144 | * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), | |
145 | * consistent with the pre-accounting of private mappings ... | |
146 | */ | |
147 | static inline int shmem_acct_size(unsigned long flags, loff_t size) | |
148 | { | |
149 | return (flags & VM_ACCOUNT)? | |
150 | security_vm_enough_memory(VM_ACCT(size)): 0; | |
151 | } | |
152 | ||
153 | static inline void shmem_unacct_size(unsigned long flags, loff_t size) | |
154 | { | |
155 | if (flags & VM_ACCOUNT) | |
156 | vm_unacct_memory(VM_ACCT(size)); | |
157 | } | |
158 | ||
159 | /* | |
160 | * ... whereas tmpfs objects are accounted incrementally as | |
161 | * pages are allocated, in order to allow huge sparse files. | |
162 | * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, | |
163 | * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. | |
164 | */ | |
165 | static inline int shmem_acct_block(unsigned long flags) | |
166 | { | |
167 | return (flags & VM_ACCOUNT)? | |
168 | 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE)); | |
169 | } | |
170 | ||
171 | static inline void shmem_unacct_blocks(unsigned long flags, long pages) | |
172 | { | |
173 | if (!(flags & VM_ACCOUNT)) | |
174 | vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); | |
175 | } | |
176 | ||
177 | static struct super_operations shmem_ops; | |
178 | static struct address_space_operations shmem_aops; | |
179 | static struct file_operations shmem_file_operations; | |
180 | static struct inode_operations shmem_inode_operations; | |
181 | static struct inode_operations shmem_dir_inode_operations; | |
182 | static struct inode_operations shmem_special_inode_operations; | |
183 | static struct vm_operations_struct shmem_vm_ops; | |
184 | ||
185 | static struct backing_dev_info shmem_backing_dev_info = { | |
186 | .ra_pages = 0, /* No readahead */ | |
187 | .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, | |
188 | .unplug_io_fn = default_unplug_io_fn, | |
189 | }; | |
190 | ||
191 | static LIST_HEAD(shmem_swaplist); | |
192 | static DEFINE_SPINLOCK(shmem_swaplist_lock); | |
193 | ||
194 | static void shmem_free_blocks(struct inode *inode, long pages) | |
195 | { | |
196 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); | |
0edd73b3 | 197 | if (sbinfo->max_blocks) { |
1da177e4 LT |
198 | spin_lock(&sbinfo->stat_lock); |
199 | sbinfo->free_blocks += pages; | |
200 | inode->i_blocks -= pages*BLOCKS_PER_PAGE; | |
201 | spin_unlock(&sbinfo->stat_lock); | |
202 | } | |
203 | } | |
204 | ||
205 | /* | |
206 | * shmem_recalc_inode - recalculate the size of an inode | |
207 | * | |
208 | * @inode: inode to recalc | |
209 | * | |
210 | * We have to calculate the free blocks since the mm can drop | |
211 | * undirtied hole pages behind our back. | |
212 | * | |
213 | * But normally info->alloced == inode->i_mapping->nrpages + info->swapped | |
214 | * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) | |
215 | * | |
216 | * It has to be called with the spinlock held. | |
217 | */ | |
218 | static void shmem_recalc_inode(struct inode *inode) | |
219 | { | |
220 | struct shmem_inode_info *info = SHMEM_I(inode); | |
221 | long freed; | |
222 | ||
223 | freed = info->alloced - info->swapped - inode->i_mapping->nrpages; | |
224 | if (freed > 0) { | |
225 | info->alloced -= freed; | |
226 | shmem_unacct_blocks(info->flags, freed); | |
227 | shmem_free_blocks(inode, freed); | |
228 | } | |
229 | } | |
230 | ||
231 | /* | |
232 | * shmem_swp_entry - find the swap vector position in the info structure | |
233 | * | |
234 | * @info: info structure for the inode | |
235 | * @index: index of the page to find | |
236 | * @page: optional page to add to the structure. Has to be preset to | |
237 | * all zeros | |
238 | * | |
239 | * If there is no space allocated yet it will return NULL when | |
240 | * page is NULL, else it will use the page for the needed block, | |
241 | * setting it to NULL on return to indicate that it has been used. | |
242 | * | |
243 | * The swap vector is organized the following way: | |
244 | * | |
245 | * There are SHMEM_NR_DIRECT entries directly stored in the | |
246 | * shmem_inode_info structure. So small files do not need an addional | |
247 | * allocation. | |
248 | * | |
249 | * For pages with index > SHMEM_NR_DIRECT there is the pointer | |
250 | * i_indirect which points to a page which holds in the first half | |
251 | * doubly indirect blocks, in the second half triple indirect blocks: | |
252 | * | |
253 | * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the | |
254 | * following layout (for SHMEM_NR_DIRECT == 16): | |
255 | * | |
256 | * i_indirect -> dir --> 16-19 | |
257 | * | +-> 20-23 | |
258 | * | | |
259 | * +-->dir2 --> 24-27 | |
260 | * | +-> 28-31 | |
261 | * | +-> 32-35 | |
262 | * | +-> 36-39 | |
263 | * | | |
264 | * +-->dir3 --> 40-43 | |
265 | * +-> 44-47 | |
266 | * +-> 48-51 | |
267 | * +-> 52-55 | |
268 | */ | |
269 | static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page) | |
270 | { | |
271 | unsigned long offset; | |
272 | struct page **dir; | |
273 | struct page *subdir; | |
274 | ||
275 | if (index < SHMEM_NR_DIRECT) { | |
276 | shmem_swp_balance_unmap(); | |
277 | return info->i_direct+index; | |
278 | } | |
279 | if (!info->i_indirect) { | |
280 | if (page) { | |
281 | info->i_indirect = *page; | |
282 | *page = NULL; | |
283 | } | |
284 | return NULL; /* need another page */ | |
285 | } | |
286 | ||
287 | index -= SHMEM_NR_DIRECT; | |
288 | offset = index % ENTRIES_PER_PAGE; | |
289 | index /= ENTRIES_PER_PAGE; | |
290 | dir = shmem_dir_map(info->i_indirect); | |
291 | ||
292 | if (index >= ENTRIES_PER_PAGE/2) { | |
293 | index -= ENTRIES_PER_PAGE/2; | |
294 | dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE; | |
295 | index %= ENTRIES_PER_PAGE; | |
296 | subdir = *dir; | |
297 | if (!subdir) { | |
298 | if (page) { | |
299 | *dir = *page; | |
300 | *page = NULL; | |
301 | } | |
302 | shmem_dir_unmap(dir); | |
303 | return NULL; /* need another page */ | |
304 | } | |
305 | shmem_dir_unmap(dir); | |
306 | dir = shmem_dir_map(subdir); | |
307 | } | |
308 | ||
309 | dir += index; | |
310 | subdir = *dir; | |
311 | if (!subdir) { | |
312 | if (!page || !(subdir = *page)) { | |
313 | shmem_dir_unmap(dir); | |
314 | return NULL; /* need a page */ | |
315 | } | |
316 | *dir = subdir; | |
317 | *page = NULL; | |
318 | } | |
319 | shmem_dir_unmap(dir); | |
320 | return shmem_swp_map(subdir) + offset; | |
321 | } | |
322 | ||
323 | static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value) | |
324 | { | |
325 | long incdec = value? 1: -1; | |
326 | ||
327 | entry->val = value; | |
328 | info->swapped += incdec; | |
329 | if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) | |
330 | kmap_atomic_to_page(entry)->nr_swapped += incdec; | |
331 | } | |
332 | ||
333 | /* | |
334 | * shmem_swp_alloc - get the position of the swap entry for the page. | |
335 | * If it does not exist allocate the entry. | |
336 | * | |
337 | * @info: info structure for the inode | |
338 | * @index: index of the page to find | |
339 | * @sgp: check and recheck i_size? skip allocation? | |
340 | */ | |
341 | static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp) | |
342 | { | |
343 | struct inode *inode = &info->vfs_inode; | |
344 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); | |
345 | struct page *page = NULL; | |
346 | swp_entry_t *entry; | |
347 | ||
348 | if (sgp != SGP_WRITE && | |
349 | ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) | |
350 | return ERR_PTR(-EINVAL); | |
351 | ||
352 | while (!(entry = shmem_swp_entry(info, index, &page))) { | |
353 | if (sgp == SGP_READ) | |
354 | return shmem_swp_map(ZERO_PAGE(0)); | |
355 | /* | |
356 | * Test free_blocks against 1 not 0, since we have 1 data | |
357 | * page (and perhaps indirect index pages) yet to allocate: | |
358 | * a waste to allocate index if we cannot allocate data. | |
359 | */ | |
0edd73b3 | 360 | if (sbinfo->max_blocks) { |
1da177e4 LT |
361 | spin_lock(&sbinfo->stat_lock); |
362 | if (sbinfo->free_blocks <= 1) { | |
363 | spin_unlock(&sbinfo->stat_lock); | |
364 | return ERR_PTR(-ENOSPC); | |
365 | } | |
366 | sbinfo->free_blocks--; | |
367 | inode->i_blocks += BLOCKS_PER_PAGE; | |
368 | spin_unlock(&sbinfo->stat_lock); | |
369 | } | |
370 | ||
371 | spin_unlock(&info->lock); | |
372 | page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO); | |
373 | if (page) { | |
374 | page->nr_swapped = 0; | |
375 | } | |
376 | spin_lock(&info->lock); | |
377 | ||
378 | if (!page) { | |
379 | shmem_free_blocks(inode, 1); | |
380 | return ERR_PTR(-ENOMEM); | |
381 | } | |
382 | if (sgp != SGP_WRITE && | |
383 | ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { | |
384 | entry = ERR_PTR(-EINVAL); | |
385 | break; | |
386 | } | |
387 | if (info->next_index <= index) | |
388 | info->next_index = index + 1; | |
389 | } | |
390 | if (page) { | |
391 | /* another task gave its page, or truncated the file */ | |
392 | shmem_free_blocks(inode, 1); | |
393 | shmem_dir_free(page); | |
394 | } | |
395 | if (info->next_index <= index && !IS_ERR(entry)) | |
396 | info->next_index = index + 1; | |
397 | return entry; | |
398 | } | |
399 | ||
400 | /* | |
401 | * shmem_free_swp - free some swap entries in a directory | |
402 | * | |
403 | * @dir: pointer to the directory | |
404 | * @edir: pointer after last entry of the directory | |
405 | */ | |
406 | static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir) | |
407 | { | |
408 | swp_entry_t *ptr; | |
409 | int freed = 0; | |
410 | ||
411 | for (ptr = dir; ptr < edir; ptr++) { | |
412 | if (ptr->val) { | |
413 | free_swap_and_cache(*ptr); | |
414 | *ptr = (swp_entry_t){0}; | |
415 | freed++; | |
416 | } | |
417 | } | |
418 | return freed; | |
419 | } | |
420 | ||
421 | static int shmem_map_and_free_swp(struct page *subdir, | |
422 | int offset, int limit, struct page ***dir) | |
423 | { | |
424 | swp_entry_t *ptr; | |
425 | int freed = 0; | |
426 | ||
427 | ptr = shmem_swp_map(subdir); | |
428 | for (; offset < limit; offset += LATENCY_LIMIT) { | |
429 | int size = limit - offset; | |
430 | if (size > LATENCY_LIMIT) | |
431 | size = LATENCY_LIMIT; | |
432 | freed += shmem_free_swp(ptr+offset, ptr+offset+size); | |
433 | if (need_resched()) { | |
434 | shmem_swp_unmap(ptr); | |
435 | if (*dir) { | |
436 | shmem_dir_unmap(*dir); | |
437 | *dir = NULL; | |
438 | } | |
439 | cond_resched(); | |
440 | ptr = shmem_swp_map(subdir); | |
441 | } | |
442 | } | |
443 | shmem_swp_unmap(ptr); | |
444 | return freed; | |
445 | } | |
446 | ||
447 | static void shmem_free_pages(struct list_head *next) | |
448 | { | |
449 | struct page *page; | |
450 | int freed = 0; | |
451 | ||
452 | do { | |
453 | page = container_of(next, struct page, lru); | |
454 | next = next->next; | |
455 | shmem_dir_free(page); | |
456 | freed++; | |
457 | if (freed >= LATENCY_LIMIT) { | |
458 | cond_resched(); | |
459 | freed = 0; | |
460 | } | |
461 | } while (next); | |
462 | } | |
463 | ||
464 | static void shmem_truncate(struct inode *inode) | |
465 | { | |
466 | struct shmem_inode_info *info = SHMEM_I(inode); | |
467 | unsigned long idx; | |
468 | unsigned long size; | |
469 | unsigned long limit; | |
470 | unsigned long stage; | |
471 | unsigned long diroff; | |
472 | struct page **dir; | |
473 | struct page *topdir; | |
474 | struct page *middir; | |
475 | struct page *subdir; | |
476 | swp_entry_t *ptr; | |
477 | LIST_HEAD(pages_to_free); | |
478 | long nr_pages_to_free = 0; | |
479 | long nr_swaps_freed = 0; | |
480 | int offset; | |
481 | int freed; | |
482 | ||
483 | inode->i_ctime = inode->i_mtime = CURRENT_TIME; | |
484 | idx = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | |
485 | if (idx >= info->next_index) | |
486 | return; | |
487 | ||
488 | spin_lock(&info->lock); | |
489 | info->flags |= SHMEM_TRUNCATE; | |
490 | limit = info->next_index; | |
491 | info->next_index = idx; | |
492 | topdir = info->i_indirect; | |
493 | if (topdir && idx <= SHMEM_NR_DIRECT) { | |
494 | info->i_indirect = NULL; | |
495 | nr_pages_to_free++; | |
496 | list_add(&topdir->lru, &pages_to_free); | |
497 | } | |
498 | spin_unlock(&info->lock); | |
499 | ||
500 | if (info->swapped && idx < SHMEM_NR_DIRECT) { | |
501 | ptr = info->i_direct; | |
502 | size = limit; | |
503 | if (size > SHMEM_NR_DIRECT) | |
504 | size = SHMEM_NR_DIRECT; | |
505 | nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size); | |
506 | } | |
507 | if (!topdir) | |
508 | goto done2; | |
509 | ||
510 | BUG_ON(limit <= SHMEM_NR_DIRECT); | |
511 | limit -= SHMEM_NR_DIRECT; | |
512 | idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0; | |
513 | offset = idx % ENTRIES_PER_PAGE; | |
514 | idx -= offset; | |
515 | ||
516 | dir = shmem_dir_map(topdir); | |
517 | stage = ENTRIES_PER_PAGEPAGE/2; | |
518 | if (idx < ENTRIES_PER_PAGEPAGE/2) { | |
519 | middir = topdir; | |
520 | diroff = idx/ENTRIES_PER_PAGE; | |
521 | } else { | |
522 | dir += ENTRIES_PER_PAGE/2; | |
523 | dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE; | |
524 | while (stage <= idx) | |
525 | stage += ENTRIES_PER_PAGEPAGE; | |
526 | middir = *dir; | |
527 | if (*dir) { | |
528 | diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) % | |
529 | ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE; | |
530 | if (!diroff && !offset) { | |
531 | *dir = NULL; | |
532 | nr_pages_to_free++; | |
533 | list_add(&middir->lru, &pages_to_free); | |
534 | } | |
535 | shmem_dir_unmap(dir); | |
536 | dir = shmem_dir_map(middir); | |
537 | } else { | |
538 | diroff = 0; | |
539 | offset = 0; | |
540 | idx = stage; | |
541 | } | |
542 | } | |
543 | ||
544 | for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) { | |
545 | if (unlikely(idx == stage)) { | |
546 | shmem_dir_unmap(dir); | |
547 | dir = shmem_dir_map(topdir) + | |
548 | ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; | |
549 | while (!*dir) { | |
550 | dir++; | |
551 | idx += ENTRIES_PER_PAGEPAGE; | |
552 | if (idx >= limit) | |
553 | goto done1; | |
554 | } | |
555 | stage = idx + ENTRIES_PER_PAGEPAGE; | |
556 | middir = *dir; | |
557 | *dir = NULL; | |
558 | nr_pages_to_free++; | |
559 | list_add(&middir->lru, &pages_to_free); | |
560 | shmem_dir_unmap(dir); | |
561 | cond_resched(); | |
562 | dir = shmem_dir_map(middir); | |
563 | diroff = 0; | |
564 | } | |
565 | subdir = dir[diroff]; | |
566 | if (subdir && subdir->nr_swapped) { | |
567 | size = limit - idx; | |
568 | if (size > ENTRIES_PER_PAGE) | |
569 | size = ENTRIES_PER_PAGE; | |
570 | freed = shmem_map_and_free_swp(subdir, | |
571 | offset, size, &dir); | |
572 | if (!dir) | |
573 | dir = shmem_dir_map(middir); | |
574 | nr_swaps_freed += freed; | |
575 | if (offset) | |
576 | spin_lock(&info->lock); | |
577 | subdir->nr_swapped -= freed; | |
578 | if (offset) | |
579 | spin_unlock(&info->lock); | |
580 | BUG_ON(subdir->nr_swapped > offset); | |
581 | } | |
582 | if (offset) | |
583 | offset = 0; | |
584 | else if (subdir) { | |
585 | dir[diroff] = NULL; | |
586 | nr_pages_to_free++; | |
587 | list_add(&subdir->lru, &pages_to_free); | |
588 | } | |
589 | } | |
590 | done1: | |
591 | shmem_dir_unmap(dir); | |
592 | done2: | |
593 | if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) { | |
594 | /* | |
595 | * Call truncate_inode_pages again: racing shmem_unuse_inode | |
596 | * may have swizzled a page in from swap since vmtruncate or | |
597 | * generic_delete_inode did it, before we lowered next_index. | |
598 | * Also, though shmem_getpage checks i_size before adding to | |
599 | * cache, no recheck after: so fix the narrow window there too. | |
600 | */ | |
601 | truncate_inode_pages(inode->i_mapping, inode->i_size); | |
602 | } | |
603 | ||
604 | spin_lock(&info->lock); | |
605 | info->flags &= ~SHMEM_TRUNCATE; | |
606 | info->swapped -= nr_swaps_freed; | |
607 | if (nr_pages_to_free) | |
608 | shmem_free_blocks(inode, nr_pages_to_free); | |
609 | shmem_recalc_inode(inode); | |
610 | spin_unlock(&info->lock); | |
611 | ||
612 | /* | |
613 | * Empty swap vector directory pages to be freed? | |
614 | */ | |
615 | if (!list_empty(&pages_to_free)) { | |
616 | pages_to_free.prev->next = NULL; | |
617 | shmem_free_pages(pages_to_free.next); | |
618 | } | |
619 | } | |
620 | ||
621 | static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) | |
622 | { | |
623 | struct inode *inode = dentry->d_inode; | |
624 | struct page *page = NULL; | |
625 | int error; | |
626 | ||
627 | if (attr->ia_valid & ATTR_SIZE) { | |
628 | if (attr->ia_size < inode->i_size) { | |
629 | /* | |
630 | * If truncating down to a partial page, then | |
631 | * if that page is already allocated, hold it | |
632 | * in memory until the truncation is over, so | |
633 | * truncate_partial_page cannnot miss it were | |
634 | * it assigned to swap. | |
635 | */ | |
636 | if (attr->ia_size & (PAGE_CACHE_SIZE-1)) { | |
637 | (void) shmem_getpage(inode, | |
638 | attr->ia_size>>PAGE_CACHE_SHIFT, | |
639 | &page, SGP_READ, NULL); | |
640 | } | |
641 | /* | |
642 | * Reset SHMEM_PAGEIN flag so that shmem_truncate can | |
643 | * detect if any pages might have been added to cache | |
644 | * after truncate_inode_pages. But we needn't bother | |
645 | * if it's being fully truncated to zero-length: the | |
646 | * nrpages check is efficient enough in that case. | |
647 | */ | |
648 | if (attr->ia_size) { | |
649 | struct shmem_inode_info *info = SHMEM_I(inode); | |
650 | spin_lock(&info->lock); | |
651 | info->flags &= ~SHMEM_PAGEIN; | |
652 | spin_unlock(&info->lock); | |
653 | } | |
654 | } | |
655 | } | |
656 | ||
657 | error = inode_change_ok(inode, attr); | |
658 | if (!error) | |
659 | error = inode_setattr(inode, attr); | |
660 | if (page) | |
661 | page_cache_release(page); | |
662 | return error; | |
663 | } | |
664 | ||
665 | static void shmem_delete_inode(struct inode *inode) | |
666 | { | |
667 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); | |
668 | struct shmem_inode_info *info = SHMEM_I(inode); | |
669 | ||
670 | if (inode->i_op->truncate == shmem_truncate) { | |
671 | shmem_unacct_size(info->flags, inode->i_size); | |
672 | inode->i_size = 0; | |
673 | shmem_truncate(inode); | |
674 | if (!list_empty(&info->swaplist)) { | |
675 | spin_lock(&shmem_swaplist_lock); | |
676 | list_del_init(&info->swaplist); | |
677 | spin_unlock(&shmem_swaplist_lock); | |
678 | } | |
679 | } | |
0edd73b3 HD |
680 | BUG_ON(inode->i_blocks); |
681 | if (sbinfo->max_inodes) { | |
1da177e4 LT |
682 | spin_lock(&sbinfo->stat_lock); |
683 | sbinfo->free_inodes++; | |
684 | spin_unlock(&sbinfo->stat_lock); | |
685 | } | |
686 | clear_inode(inode); | |
687 | } | |
688 | ||
689 | static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir) | |
690 | { | |
691 | swp_entry_t *ptr; | |
692 | ||
693 | for (ptr = dir; ptr < edir; ptr++) { | |
694 | if (ptr->val == entry.val) | |
695 | return ptr - dir; | |
696 | } | |
697 | return -1; | |
698 | } | |
699 | ||
700 | static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page) | |
701 | { | |
702 | struct inode *inode; | |
703 | unsigned long idx; | |
704 | unsigned long size; | |
705 | unsigned long limit; | |
706 | unsigned long stage; | |
707 | struct page **dir; | |
708 | struct page *subdir; | |
709 | swp_entry_t *ptr; | |
710 | int offset; | |
711 | ||
712 | idx = 0; | |
713 | ptr = info->i_direct; | |
714 | spin_lock(&info->lock); | |
715 | limit = info->next_index; | |
716 | size = limit; | |
717 | if (size > SHMEM_NR_DIRECT) | |
718 | size = SHMEM_NR_DIRECT; | |
719 | offset = shmem_find_swp(entry, ptr, ptr+size); | |
720 | if (offset >= 0) { | |
721 | shmem_swp_balance_unmap(); | |
722 | goto found; | |
723 | } | |
724 | if (!info->i_indirect) | |
725 | goto lost2; | |
726 | ||
727 | dir = shmem_dir_map(info->i_indirect); | |
728 | stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2; | |
729 | ||
730 | for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) { | |
731 | if (unlikely(idx == stage)) { | |
732 | shmem_dir_unmap(dir-1); | |
733 | dir = shmem_dir_map(info->i_indirect) + | |
734 | ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; | |
735 | while (!*dir) { | |
736 | dir++; | |
737 | idx += ENTRIES_PER_PAGEPAGE; | |
738 | if (idx >= limit) | |
739 | goto lost1; | |
740 | } | |
741 | stage = idx + ENTRIES_PER_PAGEPAGE; | |
742 | subdir = *dir; | |
743 | shmem_dir_unmap(dir); | |
744 | dir = shmem_dir_map(subdir); | |
745 | } | |
746 | subdir = *dir; | |
747 | if (subdir && subdir->nr_swapped) { | |
748 | ptr = shmem_swp_map(subdir); | |
749 | size = limit - idx; | |
750 | if (size > ENTRIES_PER_PAGE) | |
751 | size = ENTRIES_PER_PAGE; | |
752 | offset = shmem_find_swp(entry, ptr, ptr+size); | |
753 | if (offset >= 0) { | |
754 | shmem_dir_unmap(dir); | |
755 | goto found; | |
756 | } | |
757 | shmem_swp_unmap(ptr); | |
758 | } | |
759 | } | |
760 | lost1: | |
761 | shmem_dir_unmap(dir-1); | |
762 | lost2: | |
763 | spin_unlock(&info->lock); | |
764 | return 0; | |
765 | found: | |
766 | idx += offset; | |
767 | inode = &info->vfs_inode; | |
768 | if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) { | |
769 | info->flags |= SHMEM_PAGEIN; | |
770 | shmem_swp_set(info, ptr + offset, 0); | |
771 | } | |
772 | shmem_swp_unmap(ptr); | |
773 | spin_unlock(&info->lock); | |
774 | /* | |
775 | * Decrement swap count even when the entry is left behind: | |
776 | * try_to_unuse will skip over mms, then reincrement count. | |
777 | */ | |
778 | swap_free(entry); | |
779 | return 1; | |
780 | } | |
781 | ||
782 | /* | |
783 | * shmem_unuse() search for an eventually swapped out shmem page. | |
784 | */ | |
785 | int shmem_unuse(swp_entry_t entry, struct page *page) | |
786 | { | |
787 | struct list_head *p, *next; | |
788 | struct shmem_inode_info *info; | |
789 | int found = 0; | |
790 | ||
791 | spin_lock(&shmem_swaplist_lock); | |
792 | list_for_each_safe(p, next, &shmem_swaplist) { | |
793 | info = list_entry(p, struct shmem_inode_info, swaplist); | |
794 | if (!info->swapped) | |
795 | list_del_init(&info->swaplist); | |
796 | else if (shmem_unuse_inode(info, entry, page)) { | |
797 | /* move head to start search for next from here */ | |
798 | list_move_tail(&shmem_swaplist, &info->swaplist); | |
799 | found = 1; | |
800 | break; | |
801 | } | |
802 | } | |
803 | spin_unlock(&shmem_swaplist_lock); | |
804 | return found; | |
805 | } | |
806 | ||
807 | /* | |
808 | * Move the page from the page cache to the swap cache. | |
809 | */ | |
810 | static int shmem_writepage(struct page *page, struct writeback_control *wbc) | |
811 | { | |
812 | struct shmem_inode_info *info; | |
813 | swp_entry_t *entry, swap; | |
814 | struct address_space *mapping; | |
815 | unsigned long index; | |
816 | struct inode *inode; | |
817 | ||
818 | BUG_ON(!PageLocked(page)); | |
819 | BUG_ON(page_mapped(page)); | |
820 | ||
821 | mapping = page->mapping; | |
822 | index = page->index; | |
823 | inode = mapping->host; | |
824 | info = SHMEM_I(inode); | |
825 | if (info->flags & VM_LOCKED) | |
826 | goto redirty; | |
827 | swap = get_swap_page(); | |
828 | if (!swap.val) | |
829 | goto redirty; | |
830 | ||
831 | spin_lock(&info->lock); | |
832 | shmem_recalc_inode(inode); | |
833 | if (index >= info->next_index) { | |
834 | BUG_ON(!(info->flags & SHMEM_TRUNCATE)); | |
835 | goto unlock; | |
836 | } | |
837 | entry = shmem_swp_entry(info, index, NULL); | |
838 | BUG_ON(!entry); | |
839 | BUG_ON(entry->val); | |
840 | ||
841 | if (move_to_swap_cache(page, swap) == 0) { | |
842 | shmem_swp_set(info, entry, swap.val); | |
843 | shmem_swp_unmap(entry); | |
844 | spin_unlock(&info->lock); | |
845 | if (list_empty(&info->swaplist)) { | |
846 | spin_lock(&shmem_swaplist_lock); | |
847 | /* move instead of add in case we're racing */ | |
848 | list_move_tail(&info->swaplist, &shmem_swaplist); | |
849 | spin_unlock(&shmem_swaplist_lock); | |
850 | } | |
851 | unlock_page(page); | |
852 | return 0; | |
853 | } | |
854 | ||
855 | shmem_swp_unmap(entry); | |
856 | unlock: | |
857 | spin_unlock(&info->lock); | |
858 | swap_free(swap); | |
859 | redirty: | |
860 | set_page_dirty(page); | |
861 | return WRITEPAGE_ACTIVATE; /* Return with the page locked */ | |
862 | } | |
863 | ||
864 | #ifdef CONFIG_NUMA | |
865 | static struct page *shmem_swapin_async(struct shared_policy *p, | |
866 | swp_entry_t entry, unsigned long idx) | |
867 | { | |
868 | struct page *page; | |
869 | struct vm_area_struct pvma; | |
870 | ||
871 | /* Create a pseudo vma that just contains the policy */ | |
872 | memset(&pvma, 0, sizeof(struct vm_area_struct)); | |
873 | pvma.vm_end = PAGE_SIZE; | |
874 | pvma.vm_pgoff = idx; | |
875 | pvma.vm_policy = mpol_shared_policy_lookup(p, idx); | |
876 | page = read_swap_cache_async(entry, &pvma, 0); | |
877 | mpol_free(pvma.vm_policy); | |
878 | return page; | |
879 | } | |
880 | ||
881 | struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry, | |
882 | unsigned long idx) | |
883 | { | |
884 | struct shared_policy *p = &info->policy; | |
885 | int i, num; | |
886 | struct page *page; | |
887 | unsigned long offset; | |
888 | ||
889 | num = valid_swaphandles(entry, &offset); | |
890 | for (i = 0; i < num; offset++, i++) { | |
891 | page = shmem_swapin_async(p, | |
892 | swp_entry(swp_type(entry), offset), idx); | |
893 | if (!page) | |
894 | break; | |
895 | page_cache_release(page); | |
896 | } | |
897 | lru_add_drain(); /* Push any new pages onto the LRU now */ | |
898 | return shmem_swapin_async(p, entry, idx); | |
899 | } | |
900 | ||
901 | static struct page * | |
902 | shmem_alloc_page(unsigned long gfp, struct shmem_inode_info *info, | |
903 | unsigned long idx) | |
904 | { | |
905 | struct vm_area_struct pvma; | |
906 | struct page *page; | |
907 | ||
908 | memset(&pvma, 0, sizeof(struct vm_area_struct)); | |
909 | pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); | |
910 | pvma.vm_pgoff = idx; | |
911 | pvma.vm_end = PAGE_SIZE; | |
912 | page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0); | |
913 | mpol_free(pvma.vm_policy); | |
914 | return page; | |
915 | } | |
916 | #else | |
917 | static inline struct page * | |
918 | shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx) | |
919 | { | |
920 | swapin_readahead(entry, 0, NULL); | |
921 | return read_swap_cache_async(entry, NULL, 0); | |
922 | } | |
923 | ||
924 | static inline struct page * | |
925 | shmem_alloc_page(unsigned int __nocast gfp,struct shmem_inode_info *info, | |
926 | unsigned long idx) | |
927 | { | |
928 | return alloc_page(gfp | __GFP_ZERO); | |
929 | } | |
930 | #endif | |
931 | ||
932 | /* | |
933 | * shmem_getpage - either get the page from swap or allocate a new one | |
934 | * | |
935 | * If we allocate a new one we do not mark it dirty. That's up to the | |
936 | * vm. If we swap it in we mark it dirty since we also free the swap | |
937 | * entry since a page cannot live in both the swap and page cache | |
938 | */ | |
939 | static int shmem_getpage(struct inode *inode, unsigned long idx, | |
940 | struct page **pagep, enum sgp_type sgp, int *type) | |
941 | { | |
942 | struct address_space *mapping = inode->i_mapping; | |
943 | struct shmem_inode_info *info = SHMEM_I(inode); | |
944 | struct shmem_sb_info *sbinfo; | |
945 | struct page *filepage = *pagep; | |
946 | struct page *swappage; | |
947 | swp_entry_t *entry; | |
948 | swp_entry_t swap; | |
949 | int error; | |
950 | ||
951 | if (idx >= SHMEM_MAX_INDEX) | |
952 | return -EFBIG; | |
953 | /* | |
954 | * Normally, filepage is NULL on entry, and either found | |
955 | * uptodate immediately, or allocated and zeroed, or read | |
956 | * in under swappage, which is then assigned to filepage. | |
957 | * But shmem_prepare_write passes in a locked filepage, | |
958 | * which may be found not uptodate by other callers too, | |
959 | * and may need to be copied from the swappage read in. | |
960 | */ | |
961 | repeat: | |
962 | if (!filepage) | |
963 | filepage = find_lock_page(mapping, idx); | |
964 | if (filepage && PageUptodate(filepage)) | |
965 | goto done; | |
966 | error = 0; | |
967 | if (sgp == SGP_QUICK) | |
968 | goto failed; | |
969 | ||
970 | spin_lock(&info->lock); | |
971 | shmem_recalc_inode(inode); | |
972 | entry = shmem_swp_alloc(info, idx, sgp); | |
973 | if (IS_ERR(entry)) { | |
974 | spin_unlock(&info->lock); | |
975 | error = PTR_ERR(entry); | |
976 | goto failed; | |
977 | } | |
978 | swap = *entry; | |
979 | ||
980 | if (swap.val) { | |
981 | /* Look it up and read it in.. */ | |
982 | swappage = lookup_swap_cache(swap); | |
983 | if (!swappage) { | |
984 | shmem_swp_unmap(entry); | |
985 | spin_unlock(&info->lock); | |
986 | /* here we actually do the io */ | |
987 | if (type && *type == VM_FAULT_MINOR) { | |
988 | inc_page_state(pgmajfault); | |
989 | *type = VM_FAULT_MAJOR; | |
990 | } | |
991 | swappage = shmem_swapin(info, swap, idx); | |
992 | if (!swappage) { | |
993 | spin_lock(&info->lock); | |
994 | entry = shmem_swp_alloc(info, idx, sgp); | |
995 | if (IS_ERR(entry)) | |
996 | error = PTR_ERR(entry); | |
997 | else { | |
998 | if (entry->val == swap.val) | |
999 | error = -ENOMEM; | |
1000 | shmem_swp_unmap(entry); | |
1001 | } | |
1002 | spin_unlock(&info->lock); | |
1003 | if (error) | |
1004 | goto failed; | |
1005 | goto repeat; | |
1006 | } | |
1007 | wait_on_page_locked(swappage); | |
1008 | page_cache_release(swappage); | |
1009 | goto repeat; | |
1010 | } | |
1011 | ||
1012 | /* We have to do this with page locked to prevent races */ | |
1013 | if (TestSetPageLocked(swappage)) { | |
1014 | shmem_swp_unmap(entry); | |
1015 | spin_unlock(&info->lock); | |
1016 | wait_on_page_locked(swappage); | |
1017 | page_cache_release(swappage); | |
1018 | goto repeat; | |
1019 | } | |
1020 | if (PageWriteback(swappage)) { | |
1021 | shmem_swp_unmap(entry); | |
1022 | spin_unlock(&info->lock); | |
1023 | wait_on_page_writeback(swappage); | |
1024 | unlock_page(swappage); | |
1025 | page_cache_release(swappage); | |
1026 | goto repeat; | |
1027 | } | |
1028 | if (!PageUptodate(swappage)) { | |
1029 | shmem_swp_unmap(entry); | |
1030 | spin_unlock(&info->lock); | |
1031 | unlock_page(swappage); | |
1032 | page_cache_release(swappage); | |
1033 | error = -EIO; | |
1034 | goto failed; | |
1035 | } | |
1036 | ||
1037 | if (filepage) { | |
1038 | shmem_swp_set(info, entry, 0); | |
1039 | shmem_swp_unmap(entry); | |
1040 | delete_from_swap_cache(swappage); | |
1041 | spin_unlock(&info->lock); | |
1042 | copy_highpage(filepage, swappage); | |
1043 | unlock_page(swappage); | |
1044 | page_cache_release(swappage); | |
1045 | flush_dcache_page(filepage); | |
1046 | SetPageUptodate(filepage); | |
1047 | set_page_dirty(filepage); | |
1048 | swap_free(swap); | |
1049 | } else if (!(error = move_from_swap_cache( | |
1050 | swappage, idx, mapping))) { | |
1051 | info->flags |= SHMEM_PAGEIN; | |
1052 | shmem_swp_set(info, entry, 0); | |
1053 | shmem_swp_unmap(entry); | |
1054 | spin_unlock(&info->lock); | |
1055 | filepage = swappage; | |
1056 | swap_free(swap); | |
1057 | } else { | |
1058 | shmem_swp_unmap(entry); | |
1059 | spin_unlock(&info->lock); | |
1060 | unlock_page(swappage); | |
1061 | page_cache_release(swappage); | |
1062 | if (error == -ENOMEM) { | |
1063 | /* let kswapd refresh zone for GFP_ATOMICs */ | |
1064 | blk_congestion_wait(WRITE, HZ/50); | |
1065 | } | |
1066 | goto repeat; | |
1067 | } | |
1068 | } else if (sgp == SGP_READ && !filepage) { | |
1069 | shmem_swp_unmap(entry); | |
1070 | filepage = find_get_page(mapping, idx); | |
1071 | if (filepage && | |
1072 | (!PageUptodate(filepage) || TestSetPageLocked(filepage))) { | |
1073 | spin_unlock(&info->lock); | |
1074 | wait_on_page_locked(filepage); | |
1075 | page_cache_release(filepage); | |
1076 | filepage = NULL; | |
1077 | goto repeat; | |
1078 | } | |
1079 | spin_unlock(&info->lock); | |
1080 | } else { | |
1081 | shmem_swp_unmap(entry); | |
1082 | sbinfo = SHMEM_SB(inode->i_sb); | |
0edd73b3 | 1083 | if (sbinfo->max_blocks) { |
1da177e4 LT |
1084 | spin_lock(&sbinfo->stat_lock); |
1085 | if (sbinfo->free_blocks == 0 || | |
1086 | shmem_acct_block(info->flags)) { | |
1087 | spin_unlock(&sbinfo->stat_lock); | |
1088 | spin_unlock(&info->lock); | |
1089 | error = -ENOSPC; | |
1090 | goto failed; | |
1091 | } | |
1092 | sbinfo->free_blocks--; | |
1093 | inode->i_blocks += BLOCKS_PER_PAGE; | |
1094 | spin_unlock(&sbinfo->stat_lock); | |
1095 | } else if (shmem_acct_block(info->flags)) { | |
1096 | spin_unlock(&info->lock); | |
1097 | error = -ENOSPC; | |
1098 | goto failed; | |
1099 | } | |
1100 | ||
1101 | if (!filepage) { | |
1102 | spin_unlock(&info->lock); | |
1103 | filepage = shmem_alloc_page(mapping_gfp_mask(mapping), | |
1104 | info, | |
1105 | idx); | |
1106 | if (!filepage) { | |
1107 | shmem_unacct_blocks(info->flags, 1); | |
1108 | shmem_free_blocks(inode, 1); | |
1109 | error = -ENOMEM; | |
1110 | goto failed; | |
1111 | } | |
1112 | ||
1113 | spin_lock(&info->lock); | |
1114 | entry = shmem_swp_alloc(info, idx, sgp); | |
1115 | if (IS_ERR(entry)) | |
1116 | error = PTR_ERR(entry); | |
1117 | else { | |
1118 | swap = *entry; | |
1119 | shmem_swp_unmap(entry); | |
1120 | } | |
1121 | if (error || swap.val || 0 != add_to_page_cache_lru( | |
1122 | filepage, mapping, idx, GFP_ATOMIC)) { | |
1123 | spin_unlock(&info->lock); | |
1124 | page_cache_release(filepage); | |
1125 | shmem_unacct_blocks(info->flags, 1); | |
1126 | shmem_free_blocks(inode, 1); | |
1127 | filepage = NULL; | |
1128 | if (error) | |
1129 | goto failed; | |
1130 | goto repeat; | |
1131 | } | |
1132 | info->flags |= SHMEM_PAGEIN; | |
1133 | } | |
1134 | ||
1135 | info->alloced++; | |
1136 | spin_unlock(&info->lock); | |
1137 | flush_dcache_page(filepage); | |
1138 | SetPageUptodate(filepage); | |
1139 | } | |
1140 | done: | |
1141 | if (*pagep != filepage) { | |
1142 | unlock_page(filepage); | |
1143 | *pagep = filepage; | |
1144 | } | |
1145 | return 0; | |
1146 | ||
1147 | failed: | |
1148 | if (*pagep != filepage) { | |
1149 | unlock_page(filepage); | |
1150 | page_cache_release(filepage); | |
1151 | } | |
1152 | return error; | |
1153 | } | |
1154 | ||
1155 | struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int *type) | |
1156 | { | |
1157 | struct inode *inode = vma->vm_file->f_dentry->d_inode; | |
1158 | struct page *page = NULL; | |
1159 | unsigned long idx; | |
1160 | int error; | |
1161 | ||
1162 | idx = (address - vma->vm_start) >> PAGE_SHIFT; | |
1163 | idx += vma->vm_pgoff; | |
1164 | idx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT; | |
1165 | if (((loff_t) idx << PAGE_CACHE_SHIFT) >= i_size_read(inode)) | |
1166 | return NOPAGE_SIGBUS; | |
1167 | ||
1168 | error = shmem_getpage(inode, idx, &page, SGP_CACHE, type); | |
1169 | if (error) | |
1170 | return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS; | |
1171 | ||
1172 | mark_page_accessed(page); | |
1173 | return page; | |
1174 | } | |
1175 | ||
1176 | static int shmem_populate(struct vm_area_struct *vma, | |
1177 | unsigned long addr, unsigned long len, | |
1178 | pgprot_t prot, unsigned long pgoff, int nonblock) | |
1179 | { | |
1180 | struct inode *inode = vma->vm_file->f_dentry->d_inode; | |
1181 | struct mm_struct *mm = vma->vm_mm; | |
1182 | enum sgp_type sgp = nonblock? SGP_QUICK: SGP_CACHE; | |
1183 | unsigned long size; | |
1184 | ||
1185 | size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
1186 | if (pgoff >= size || pgoff + (len >> PAGE_SHIFT) > size) | |
1187 | return -EINVAL; | |
1188 | ||
1189 | while ((long) len > 0) { | |
1190 | struct page *page = NULL; | |
1191 | int err; | |
1192 | /* | |
1193 | * Will need changing if PAGE_CACHE_SIZE != PAGE_SIZE | |
1194 | */ | |
1195 | err = shmem_getpage(inode, pgoff, &page, sgp, NULL); | |
1196 | if (err) | |
1197 | return err; | |
1198 | if (page) { | |
1199 | mark_page_accessed(page); | |
1200 | err = install_page(mm, vma, addr, page, prot); | |
1201 | if (err) { | |
1202 | page_cache_release(page); | |
1203 | return err; | |
1204 | } | |
1205 | } else if (nonblock) { | |
1206 | err = install_file_pte(mm, vma, addr, pgoff, prot); | |
1207 | if (err) | |
1208 | return err; | |
1209 | } | |
1210 | ||
1211 | len -= PAGE_SIZE; | |
1212 | addr += PAGE_SIZE; | |
1213 | pgoff++; | |
1214 | } | |
1215 | return 0; | |
1216 | } | |
1217 | ||
1218 | #ifdef CONFIG_NUMA | |
1219 | int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new) | |
1220 | { | |
1221 | struct inode *i = vma->vm_file->f_dentry->d_inode; | |
1222 | return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new); | |
1223 | } | |
1224 | ||
1225 | struct mempolicy * | |
1226 | shmem_get_policy(struct vm_area_struct *vma, unsigned long addr) | |
1227 | { | |
1228 | struct inode *i = vma->vm_file->f_dentry->d_inode; | |
1229 | unsigned long idx; | |
1230 | ||
1231 | idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; | |
1232 | return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx); | |
1233 | } | |
1234 | #endif | |
1235 | ||
1236 | int shmem_lock(struct file *file, int lock, struct user_struct *user) | |
1237 | { | |
1238 | struct inode *inode = file->f_dentry->d_inode; | |
1239 | struct shmem_inode_info *info = SHMEM_I(inode); | |
1240 | int retval = -ENOMEM; | |
1241 | ||
1242 | spin_lock(&info->lock); | |
1243 | if (lock && !(info->flags & VM_LOCKED)) { | |
1244 | if (!user_shm_lock(inode->i_size, user)) | |
1245 | goto out_nomem; | |
1246 | info->flags |= VM_LOCKED; | |
1247 | } | |
1248 | if (!lock && (info->flags & VM_LOCKED) && user) { | |
1249 | user_shm_unlock(inode->i_size, user); | |
1250 | info->flags &= ~VM_LOCKED; | |
1251 | } | |
1252 | retval = 0; | |
1253 | out_nomem: | |
1254 | spin_unlock(&info->lock); | |
1255 | return retval; | |
1256 | } | |
1257 | ||
1258 | static int shmem_mmap(struct file *file, struct vm_area_struct *vma) | |
1259 | { | |
1260 | file_accessed(file); | |
1261 | vma->vm_ops = &shmem_vm_ops; | |
1262 | return 0; | |
1263 | } | |
1264 | ||
1265 | static struct inode * | |
1266 | shmem_get_inode(struct super_block *sb, int mode, dev_t dev) | |
1267 | { | |
1268 | struct inode *inode; | |
1269 | struct shmem_inode_info *info; | |
1270 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); | |
1271 | ||
0edd73b3 | 1272 | if (sbinfo->max_inodes) { |
1da177e4 LT |
1273 | spin_lock(&sbinfo->stat_lock); |
1274 | if (!sbinfo->free_inodes) { | |
1275 | spin_unlock(&sbinfo->stat_lock); | |
1276 | return NULL; | |
1277 | } | |
1278 | sbinfo->free_inodes--; | |
1279 | spin_unlock(&sbinfo->stat_lock); | |
1280 | } | |
1281 | ||
1282 | inode = new_inode(sb); | |
1283 | if (inode) { | |
1284 | inode->i_mode = mode; | |
1285 | inode->i_uid = current->fsuid; | |
1286 | inode->i_gid = current->fsgid; | |
1287 | inode->i_blksize = PAGE_CACHE_SIZE; | |
1288 | inode->i_blocks = 0; | |
1289 | inode->i_mapping->a_ops = &shmem_aops; | |
1290 | inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; | |
1291 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | |
1292 | info = SHMEM_I(inode); | |
1293 | memset(info, 0, (char *)inode - (char *)info); | |
1294 | spin_lock_init(&info->lock); | |
1295 | INIT_LIST_HEAD(&info->swaplist); | |
1296 | ||
1297 | switch (mode & S_IFMT) { | |
1298 | default: | |
1299 | inode->i_op = &shmem_special_inode_operations; | |
1300 | init_special_inode(inode, mode, dev); | |
1301 | break; | |
1302 | case S_IFREG: | |
1303 | inode->i_op = &shmem_inode_operations; | |
1304 | inode->i_fop = &shmem_file_operations; | |
1305 | mpol_shared_policy_init(&info->policy); | |
1306 | break; | |
1307 | case S_IFDIR: | |
1308 | inode->i_nlink++; | |
1309 | /* Some things misbehave if size == 0 on a directory */ | |
1310 | inode->i_size = 2 * BOGO_DIRENT_SIZE; | |
1311 | inode->i_op = &shmem_dir_inode_operations; | |
1312 | inode->i_fop = &simple_dir_operations; | |
1313 | break; | |
1314 | case S_IFLNK: | |
1315 | /* | |
1316 | * Must not load anything in the rbtree, | |
1317 | * mpol_free_shared_policy will not be called. | |
1318 | */ | |
1319 | mpol_shared_policy_init(&info->policy); | |
1320 | break; | |
1321 | } | |
0edd73b3 | 1322 | } else if (sbinfo->max_inodes) { |
1da177e4 LT |
1323 | spin_lock(&sbinfo->stat_lock); |
1324 | sbinfo->free_inodes++; | |
1325 | spin_unlock(&sbinfo->stat_lock); | |
1326 | } | |
1327 | return inode; | |
1328 | } | |
1329 | ||
1330 | #ifdef CONFIG_TMPFS | |
1da177e4 LT |
1331 | static struct inode_operations shmem_symlink_inode_operations; |
1332 | static struct inode_operations shmem_symlink_inline_operations; | |
1333 | ||
1334 | /* | |
1335 | * Normally tmpfs makes no use of shmem_prepare_write, but it | |
1336 | * lets a tmpfs file be used read-write below the loop driver. | |
1337 | */ | |
1338 | static int | |
1339 | shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to) | |
1340 | { | |
1341 | struct inode *inode = page->mapping->host; | |
1342 | return shmem_getpage(inode, page->index, &page, SGP_WRITE, NULL); | |
1343 | } | |
1344 | ||
1345 | static ssize_t | |
1346 | shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) | |
1347 | { | |
1348 | struct inode *inode = file->f_dentry->d_inode; | |
1349 | loff_t pos; | |
1350 | unsigned long written; | |
1351 | ssize_t err; | |
1352 | ||
1353 | if ((ssize_t) count < 0) | |
1354 | return -EINVAL; | |
1355 | ||
1356 | if (!access_ok(VERIFY_READ, buf, count)) | |
1357 | return -EFAULT; | |
1358 | ||
1359 | down(&inode->i_sem); | |
1360 | ||
1361 | pos = *ppos; | |
1362 | written = 0; | |
1363 | ||
1364 | err = generic_write_checks(file, &pos, &count, 0); | |
1365 | if (err || !count) | |
1366 | goto out; | |
1367 | ||
1368 | err = remove_suid(file->f_dentry); | |
1369 | if (err) | |
1370 | goto out; | |
1371 | ||
1372 | inode->i_ctime = inode->i_mtime = CURRENT_TIME; | |
1373 | ||
1374 | do { | |
1375 | struct page *page = NULL; | |
1376 | unsigned long bytes, index, offset; | |
1377 | char *kaddr; | |
1378 | int left; | |
1379 | ||
1380 | offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ | |
1381 | index = pos >> PAGE_CACHE_SHIFT; | |
1382 | bytes = PAGE_CACHE_SIZE - offset; | |
1383 | if (bytes > count) | |
1384 | bytes = count; | |
1385 | ||
1386 | /* | |
1387 | * We don't hold page lock across copy from user - | |
1388 | * what would it guard against? - so no deadlock here. | |
1389 | * But it still may be a good idea to prefault below. | |
1390 | */ | |
1391 | ||
1392 | err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL); | |
1393 | if (err) | |
1394 | break; | |
1395 | ||
1396 | left = bytes; | |
1397 | if (PageHighMem(page)) { | |
1398 | volatile unsigned char dummy; | |
1399 | __get_user(dummy, buf); | |
1400 | __get_user(dummy, buf + bytes - 1); | |
1401 | ||
1402 | kaddr = kmap_atomic(page, KM_USER0); | |
1403 | left = __copy_from_user_inatomic(kaddr + offset, | |
1404 | buf, bytes); | |
1405 | kunmap_atomic(kaddr, KM_USER0); | |
1406 | } | |
1407 | if (left) { | |
1408 | kaddr = kmap(page); | |
1409 | left = __copy_from_user(kaddr + offset, buf, bytes); | |
1410 | kunmap(page); | |
1411 | } | |
1412 | ||
1413 | written += bytes; | |
1414 | count -= bytes; | |
1415 | pos += bytes; | |
1416 | buf += bytes; | |
1417 | if (pos > inode->i_size) | |
1418 | i_size_write(inode, pos); | |
1419 | ||
1420 | flush_dcache_page(page); | |
1421 | set_page_dirty(page); | |
1422 | mark_page_accessed(page); | |
1423 | page_cache_release(page); | |
1424 | ||
1425 | if (left) { | |
1426 | pos -= left; | |
1427 | written -= left; | |
1428 | err = -EFAULT; | |
1429 | break; | |
1430 | } | |
1431 | ||
1432 | /* | |
1433 | * Our dirty pages are not counted in nr_dirty, | |
1434 | * and we do not attempt to balance dirty pages. | |
1435 | */ | |
1436 | ||
1437 | cond_resched(); | |
1438 | } while (count); | |
1439 | ||
1440 | *ppos = pos; | |
1441 | if (written) | |
1442 | err = written; | |
1443 | out: | |
1444 | up(&inode->i_sem); | |
1445 | return err; | |
1446 | } | |
1447 | ||
1448 | static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) | |
1449 | { | |
1450 | struct inode *inode = filp->f_dentry->d_inode; | |
1451 | struct address_space *mapping = inode->i_mapping; | |
1452 | unsigned long index, offset; | |
1453 | ||
1454 | index = *ppos >> PAGE_CACHE_SHIFT; | |
1455 | offset = *ppos & ~PAGE_CACHE_MASK; | |
1456 | ||
1457 | for (;;) { | |
1458 | struct page *page = NULL; | |
1459 | unsigned long end_index, nr, ret; | |
1460 | loff_t i_size = i_size_read(inode); | |
1461 | ||
1462 | end_index = i_size >> PAGE_CACHE_SHIFT; | |
1463 | if (index > end_index) | |
1464 | break; | |
1465 | if (index == end_index) { | |
1466 | nr = i_size & ~PAGE_CACHE_MASK; | |
1467 | if (nr <= offset) | |
1468 | break; | |
1469 | } | |
1470 | ||
1471 | desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL); | |
1472 | if (desc->error) { | |
1473 | if (desc->error == -EINVAL) | |
1474 | desc->error = 0; | |
1475 | break; | |
1476 | } | |
1477 | ||
1478 | /* | |
1479 | * We must evaluate after, since reads (unlike writes) | |
1480 | * are called without i_sem protection against truncate | |
1481 | */ | |
1482 | nr = PAGE_CACHE_SIZE; | |
1483 | i_size = i_size_read(inode); | |
1484 | end_index = i_size >> PAGE_CACHE_SHIFT; | |
1485 | if (index == end_index) { | |
1486 | nr = i_size & ~PAGE_CACHE_MASK; | |
1487 | if (nr <= offset) { | |
1488 | if (page) | |
1489 | page_cache_release(page); | |
1490 | break; | |
1491 | } | |
1492 | } | |
1493 | nr -= offset; | |
1494 | ||
1495 | if (page) { | |
1496 | /* | |
1497 | * If users can be writing to this page using arbitrary | |
1498 | * virtual addresses, take care about potential aliasing | |
1499 | * before reading the page on the kernel side. | |
1500 | */ | |
1501 | if (mapping_writably_mapped(mapping)) | |
1502 | flush_dcache_page(page); | |
1503 | /* | |
1504 | * Mark the page accessed if we read the beginning. | |
1505 | */ | |
1506 | if (!offset) | |
1507 | mark_page_accessed(page); | |
1508 | } else | |
1509 | page = ZERO_PAGE(0); | |
1510 | ||
1511 | /* | |
1512 | * Ok, we have the page, and it's up-to-date, so | |
1513 | * now we can copy it to user space... | |
1514 | * | |
1515 | * The actor routine returns how many bytes were actually used.. | |
1516 | * NOTE! This may not be the same as how much of a user buffer | |
1517 | * we filled up (we may be padding etc), so we can only update | |
1518 | * "pos" here (the actor routine has to update the user buffer | |
1519 | * pointers and the remaining count). | |
1520 | */ | |
1521 | ret = actor(desc, page, offset, nr); | |
1522 | offset += ret; | |
1523 | index += offset >> PAGE_CACHE_SHIFT; | |
1524 | offset &= ~PAGE_CACHE_MASK; | |
1525 | ||
1526 | page_cache_release(page); | |
1527 | if (ret != nr || !desc->count) | |
1528 | break; | |
1529 | ||
1530 | cond_resched(); | |
1531 | } | |
1532 | ||
1533 | *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; | |
1534 | file_accessed(filp); | |
1535 | } | |
1536 | ||
1537 | static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) | |
1538 | { | |
1539 | read_descriptor_t desc; | |
1540 | ||
1541 | if ((ssize_t) count < 0) | |
1542 | return -EINVAL; | |
1543 | if (!access_ok(VERIFY_WRITE, buf, count)) | |
1544 | return -EFAULT; | |
1545 | if (!count) | |
1546 | return 0; | |
1547 | ||
1548 | desc.written = 0; | |
1549 | desc.count = count; | |
1550 | desc.arg.buf = buf; | |
1551 | desc.error = 0; | |
1552 | ||
1553 | do_shmem_file_read(filp, ppos, &desc, file_read_actor); | |
1554 | if (desc.written) | |
1555 | return desc.written; | |
1556 | return desc.error; | |
1557 | } | |
1558 | ||
1559 | static ssize_t shmem_file_sendfile(struct file *in_file, loff_t *ppos, | |
1560 | size_t count, read_actor_t actor, void *target) | |
1561 | { | |
1562 | read_descriptor_t desc; | |
1563 | ||
1564 | if (!count) | |
1565 | return 0; | |
1566 | ||
1567 | desc.written = 0; | |
1568 | desc.count = count; | |
1569 | desc.arg.data = target; | |
1570 | desc.error = 0; | |
1571 | ||
1572 | do_shmem_file_read(in_file, ppos, &desc, actor); | |
1573 | if (desc.written) | |
1574 | return desc.written; | |
1575 | return desc.error; | |
1576 | } | |
1577 | ||
1578 | static int shmem_statfs(struct super_block *sb, struct kstatfs *buf) | |
1579 | { | |
1580 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); | |
1581 | ||
1582 | buf->f_type = TMPFS_MAGIC; | |
1583 | buf->f_bsize = PAGE_CACHE_SIZE; | |
1584 | buf->f_namelen = NAME_MAX; | |
0edd73b3 HD |
1585 | spin_lock(&sbinfo->stat_lock); |
1586 | if (sbinfo->max_blocks) { | |
1da177e4 LT |
1587 | buf->f_blocks = sbinfo->max_blocks; |
1588 | buf->f_bavail = buf->f_bfree = sbinfo->free_blocks; | |
0edd73b3 HD |
1589 | } |
1590 | if (sbinfo->max_inodes) { | |
1da177e4 LT |
1591 | buf->f_files = sbinfo->max_inodes; |
1592 | buf->f_ffree = sbinfo->free_inodes; | |
1da177e4 LT |
1593 | } |
1594 | /* else leave those fields 0 like simple_statfs */ | |
0edd73b3 | 1595 | spin_unlock(&sbinfo->stat_lock); |
1da177e4 LT |
1596 | return 0; |
1597 | } | |
1598 | ||
1599 | /* | |
1600 | * File creation. Allocate an inode, and we're done.. | |
1601 | */ | |
1602 | static int | |
1603 | shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) | |
1604 | { | |
1605 | struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev); | |
1606 | int error = -ENOSPC; | |
1607 | ||
1608 | if (inode) { | |
1609 | if (dir->i_mode & S_ISGID) { | |
1610 | inode->i_gid = dir->i_gid; | |
1611 | if (S_ISDIR(mode)) | |
1612 | inode->i_mode |= S_ISGID; | |
1613 | } | |
1614 | dir->i_size += BOGO_DIRENT_SIZE; | |
1615 | dir->i_ctime = dir->i_mtime = CURRENT_TIME; | |
1616 | d_instantiate(dentry, inode); | |
1617 | dget(dentry); /* Extra count - pin the dentry in core */ | |
1618 | error = 0; | |
1619 | } | |
1620 | return error; | |
1621 | } | |
1622 | ||
1623 | static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |
1624 | { | |
1625 | int error; | |
1626 | ||
1627 | if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) | |
1628 | return error; | |
1629 | dir->i_nlink++; | |
1630 | return 0; | |
1631 | } | |
1632 | ||
1633 | static int shmem_create(struct inode *dir, struct dentry *dentry, int mode, | |
1634 | struct nameidata *nd) | |
1635 | { | |
1636 | return shmem_mknod(dir, dentry, mode | S_IFREG, 0); | |
1637 | } | |
1638 | ||
1639 | /* | |
1640 | * Link a file.. | |
1641 | */ | |
1642 | static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) | |
1643 | { | |
1644 | struct inode *inode = old_dentry->d_inode; | |
1645 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); | |
1646 | ||
1647 | /* | |
1648 | * No ordinary (disk based) filesystem counts links as inodes; | |
1649 | * but each new link needs a new dentry, pinning lowmem, and | |
1650 | * tmpfs dentries cannot be pruned until they are unlinked. | |
1651 | */ | |
0edd73b3 | 1652 | if (sbinfo->max_inodes) { |
1da177e4 LT |
1653 | spin_lock(&sbinfo->stat_lock); |
1654 | if (!sbinfo->free_inodes) { | |
1655 | spin_unlock(&sbinfo->stat_lock); | |
1656 | return -ENOSPC; | |
1657 | } | |
1658 | sbinfo->free_inodes--; | |
1659 | spin_unlock(&sbinfo->stat_lock); | |
1660 | } | |
1661 | ||
1662 | dir->i_size += BOGO_DIRENT_SIZE; | |
1663 | inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; | |
1664 | inode->i_nlink++; | |
1665 | atomic_inc(&inode->i_count); /* New dentry reference */ | |
1666 | dget(dentry); /* Extra pinning count for the created dentry */ | |
1667 | d_instantiate(dentry, inode); | |
1668 | return 0; | |
1669 | } | |
1670 | ||
1671 | static int shmem_unlink(struct inode *dir, struct dentry *dentry) | |
1672 | { | |
1673 | struct inode *inode = dentry->d_inode; | |
1674 | ||
1675 | if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) { | |
1676 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); | |
0edd73b3 | 1677 | if (sbinfo->max_inodes) { |
1da177e4 LT |
1678 | spin_lock(&sbinfo->stat_lock); |
1679 | sbinfo->free_inodes++; | |
1680 | spin_unlock(&sbinfo->stat_lock); | |
1681 | } | |
1682 | } | |
1683 | ||
1684 | dir->i_size -= BOGO_DIRENT_SIZE; | |
1685 | inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; | |
1686 | inode->i_nlink--; | |
1687 | dput(dentry); /* Undo the count from "create" - this does all the work */ | |
1688 | return 0; | |
1689 | } | |
1690 | ||
1691 | static int shmem_rmdir(struct inode *dir, struct dentry *dentry) | |
1692 | { | |
1693 | if (!simple_empty(dentry)) | |
1694 | return -ENOTEMPTY; | |
1695 | ||
1696 | dir->i_nlink--; | |
1697 | return shmem_unlink(dir, dentry); | |
1698 | } | |
1699 | ||
1700 | /* | |
1701 | * The VFS layer already does all the dentry stuff for rename, | |
1702 | * we just have to decrement the usage count for the target if | |
1703 | * it exists so that the VFS layer correctly free's it when it | |
1704 | * gets overwritten. | |
1705 | */ | |
1706 | static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) | |
1707 | { | |
1708 | struct inode *inode = old_dentry->d_inode; | |
1709 | int they_are_dirs = S_ISDIR(inode->i_mode); | |
1710 | ||
1711 | if (!simple_empty(new_dentry)) | |
1712 | return -ENOTEMPTY; | |
1713 | ||
1714 | if (new_dentry->d_inode) { | |
1715 | (void) shmem_unlink(new_dir, new_dentry); | |
1716 | if (they_are_dirs) | |
1717 | old_dir->i_nlink--; | |
1718 | } else if (they_are_dirs) { | |
1719 | old_dir->i_nlink--; | |
1720 | new_dir->i_nlink++; | |
1721 | } | |
1722 | ||
1723 | old_dir->i_size -= BOGO_DIRENT_SIZE; | |
1724 | new_dir->i_size += BOGO_DIRENT_SIZE; | |
1725 | old_dir->i_ctime = old_dir->i_mtime = | |
1726 | new_dir->i_ctime = new_dir->i_mtime = | |
1727 | inode->i_ctime = CURRENT_TIME; | |
1728 | return 0; | |
1729 | } | |
1730 | ||
1731 | static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) | |
1732 | { | |
1733 | int error; | |
1734 | int len; | |
1735 | struct inode *inode; | |
1736 | struct page *page = NULL; | |
1737 | char *kaddr; | |
1738 | struct shmem_inode_info *info; | |
1739 | ||
1740 | len = strlen(symname) + 1; | |
1741 | if (len > PAGE_CACHE_SIZE) | |
1742 | return -ENAMETOOLONG; | |
1743 | ||
1744 | inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0); | |
1745 | if (!inode) | |
1746 | return -ENOSPC; | |
1747 | ||
1748 | info = SHMEM_I(inode); | |
1749 | inode->i_size = len-1; | |
1750 | if (len <= (char *)inode - (char *)info) { | |
1751 | /* do it inline */ | |
1752 | memcpy(info, symname, len); | |
1753 | inode->i_op = &shmem_symlink_inline_operations; | |
1754 | } else { | |
1755 | error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); | |
1756 | if (error) { | |
1757 | iput(inode); | |
1758 | return error; | |
1759 | } | |
1760 | inode->i_op = &shmem_symlink_inode_operations; | |
1761 | kaddr = kmap_atomic(page, KM_USER0); | |
1762 | memcpy(kaddr, symname, len); | |
1763 | kunmap_atomic(kaddr, KM_USER0); | |
1764 | set_page_dirty(page); | |
1765 | page_cache_release(page); | |
1766 | } | |
1767 | if (dir->i_mode & S_ISGID) | |
1768 | inode->i_gid = dir->i_gid; | |
1769 | dir->i_size += BOGO_DIRENT_SIZE; | |
1770 | dir->i_ctime = dir->i_mtime = CURRENT_TIME; | |
1771 | d_instantiate(dentry, inode); | |
1772 | dget(dentry); | |
1773 | return 0; | |
1774 | } | |
1775 | ||
cc314eef | 1776 | static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd) |
1da177e4 LT |
1777 | { |
1778 | nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode)); | |
cc314eef | 1779 | return NULL; |
1da177e4 LT |
1780 | } |
1781 | ||
cc314eef | 1782 | static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) |
1da177e4 LT |
1783 | { |
1784 | struct page *page = NULL; | |
1785 | int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); | |
1786 | nd_set_link(nd, res ? ERR_PTR(res) : kmap(page)); | |
cc314eef | 1787 | return page; |
1da177e4 LT |
1788 | } |
1789 | ||
cc314eef | 1790 | static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) |
1da177e4 LT |
1791 | { |
1792 | if (!IS_ERR(nd_get_link(nd))) { | |
cc314eef | 1793 | struct page *page = cookie; |
1da177e4 LT |
1794 | kunmap(page); |
1795 | mark_page_accessed(page); | |
1796 | page_cache_release(page); | |
1da177e4 LT |
1797 | } |
1798 | } | |
1799 | ||
1800 | static struct inode_operations shmem_symlink_inline_operations = { | |
1801 | .readlink = generic_readlink, | |
1802 | .follow_link = shmem_follow_link_inline, | |
1803 | #ifdef CONFIG_TMPFS_XATTR | |
1804 | .setxattr = generic_setxattr, | |
1805 | .getxattr = generic_getxattr, | |
1806 | .listxattr = generic_listxattr, | |
1807 | .removexattr = generic_removexattr, | |
1808 | #endif | |
1809 | }; | |
1810 | ||
1811 | static struct inode_operations shmem_symlink_inode_operations = { | |
1812 | .truncate = shmem_truncate, | |
1813 | .readlink = generic_readlink, | |
1814 | .follow_link = shmem_follow_link, | |
1815 | .put_link = shmem_put_link, | |
1816 | #ifdef CONFIG_TMPFS_XATTR | |
1817 | .setxattr = generic_setxattr, | |
1818 | .getxattr = generic_getxattr, | |
1819 | .listxattr = generic_listxattr, | |
1820 | .removexattr = generic_removexattr, | |
1821 | #endif | |
1822 | }; | |
1823 | ||
1824 | static int shmem_parse_options(char *options, int *mode, uid_t *uid, gid_t *gid, unsigned long *blocks, unsigned long *inodes) | |
1825 | { | |
1826 | char *this_char, *value, *rest; | |
1827 | ||
1828 | while ((this_char = strsep(&options, ",")) != NULL) { | |
1829 | if (!*this_char) | |
1830 | continue; | |
1831 | if ((value = strchr(this_char,'=')) != NULL) { | |
1832 | *value++ = 0; | |
1833 | } else { | |
1834 | printk(KERN_ERR | |
1835 | "tmpfs: No value for mount option '%s'\n", | |
1836 | this_char); | |
1837 | return 1; | |
1838 | } | |
1839 | ||
1840 | if (!strcmp(this_char,"size")) { | |
1841 | unsigned long long size; | |
1842 | size = memparse(value,&rest); | |
1843 | if (*rest == '%') { | |
1844 | size <<= PAGE_SHIFT; | |
1845 | size *= totalram_pages; | |
1846 | do_div(size, 100); | |
1847 | rest++; | |
1848 | } | |
1849 | if (*rest) | |
1850 | goto bad_val; | |
1851 | *blocks = size >> PAGE_CACHE_SHIFT; | |
1852 | } else if (!strcmp(this_char,"nr_blocks")) { | |
1853 | *blocks = memparse(value,&rest); | |
1854 | if (*rest) | |
1855 | goto bad_val; | |
1856 | } else if (!strcmp(this_char,"nr_inodes")) { | |
1857 | *inodes = memparse(value,&rest); | |
1858 | if (*rest) | |
1859 | goto bad_val; | |
1860 | } else if (!strcmp(this_char,"mode")) { | |
1861 | if (!mode) | |
1862 | continue; | |
1863 | *mode = simple_strtoul(value,&rest,8); | |
1864 | if (*rest) | |
1865 | goto bad_val; | |
1866 | } else if (!strcmp(this_char,"uid")) { | |
1867 | if (!uid) | |
1868 | continue; | |
1869 | *uid = simple_strtoul(value,&rest,0); | |
1870 | if (*rest) | |
1871 | goto bad_val; | |
1872 | } else if (!strcmp(this_char,"gid")) { | |
1873 | if (!gid) | |
1874 | continue; | |
1875 | *gid = simple_strtoul(value,&rest,0); | |
1876 | if (*rest) | |
1877 | goto bad_val; | |
1878 | } else { | |
1879 | printk(KERN_ERR "tmpfs: Bad mount option %s\n", | |
1880 | this_char); | |
1881 | return 1; | |
1882 | } | |
1883 | } | |
1884 | return 0; | |
1885 | ||
1886 | bad_val: | |
1887 | printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", | |
1888 | value, this_char); | |
1889 | return 1; | |
1890 | ||
1891 | } | |
1892 | ||
1893 | static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) | |
1894 | { | |
1895 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); | |
0edd73b3 HD |
1896 | unsigned long max_blocks = sbinfo->max_blocks; |
1897 | unsigned long max_inodes = sbinfo->max_inodes; | |
1898 | unsigned long blocks; | |
1899 | unsigned long inodes; | |
1900 | int error = -EINVAL; | |
1901 | ||
1902 | if (shmem_parse_options(data, NULL, NULL, NULL, | |
1903 | &max_blocks, &max_inodes)) | |
1904 | return error; | |
1da177e4 | 1905 | |
0edd73b3 HD |
1906 | spin_lock(&sbinfo->stat_lock); |
1907 | blocks = sbinfo->max_blocks - sbinfo->free_blocks; | |
1908 | inodes = sbinfo->max_inodes - sbinfo->free_inodes; | |
1909 | if (max_blocks < blocks) | |
1910 | goto out; | |
1911 | if (max_inodes < inodes) | |
1912 | goto out; | |
1913 | /* | |
1914 | * Those tests also disallow limited->unlimited while any are in | |
1915 | * use, so i_blocks will always be zero when max_blocks is zero; | |
1916 | * but we must separately disallow unlimited->limited, because | |
1917 | * in that case we have no record of how much is already in use. | |
1918 | */ | |
1919 | if (max_blocks && !sbinfo->max_blocks) | |
1920 | goto out; | |
1921 | if (max_inodes && !sbinfo->max_inodes) | |
1922 | goto out; | |
1923 | ||
1924 | error = 0; | |
1925 | sbinfo->max_blocks = max_blocks; | |
1926 | sbinfo->free_blocks = max_blocks - blocks; | |
1927 | sbinfo->max_inodes = max_inodes; | |
1928 | sbinfo->free_inodes = max_inodes - inodes; | |
1929 | out: | |
1930 | spin_unlock(&sbinfo->stat_lock); | |
1931 | return error; | |
1da177e4 LT |
1932 | } |
1933 | #endif | |
1934 | ||
1935 | static void shmem_put_super(struct super_block *sb) | |
1936 | { | |
1937 | kfree(sb->s_fs_info); | |
1938 | sb->s_fs_info = NULL; | |
1939 | } | |
1940 | ||
1941 | #ifdef CONFIG_TMPFS_XATTR | |
1942 | static struct xattr_handler *shmem_xattr_handlers[]; | |
1943 | #else | |
1944 | #define shmem_xattr_handlers NULL | |
1945 | #endif | |
1946 | ||
1947 | static int shmem_fill_super(struct super_block *sb, | |
1948 | void *data, int silent) | |
1949 | { | |
1950 | struct inode *inode; | |
1951 | struct dentry *root; | |
1952 | int mode = S_IRWXUGO | S_ISVTX; | |
1953 | uid_t uid = current->fsuid; | |
1954 | gid_t gid = current->fsgid; | |
1955 | int err = -ENOMEM; | |
0edd73b3 | 1956 | struct shmem_sb_info *sbinfo; |
1da177e4 LT |
1957 | unsigned long blocks = 0; |
1958 | unsigned long inodes = 0; | |
1959 | ||
0edd73b3 | 1960 | #ifdef CONFIG_TMPFS |
1da177e4 LT |
1961 | /* |
1962 | * Per default we only allow half of the physical ram per | |
1963 | * tmpfs instance, limiting inodes to one per page of lowmem; | |
1964 | * but the internal instance is left unlimited. | |
1965 | */ | |
1966 | if (!(sb->s_flags & MS_NOUSER)) { | |
1967 | blocks = totalram_pages / 2; | |
1968 | inodes = totalram_pages - totalhigh_pages; | |
1969 | if (inodes > blocks) | |
1970 | inodes = blocks; | |
0edd73b3 HD |
1971 | if (shmem_parse_options(data, &mode, &uid, &gid, |
1972 | &blocks, &inodes)) | |
1da177e4 LT |
1973 | return -EINVAL; |
1974 | } | |
1da177e4 LT |
1975 | #else |
1976 | sb->s_flags |= MS_NOUSER; | |
1977 | #endif | |
1978 | ||
0edd73b3 HD |
1979 | /* Round up to L1_CACHE_BYTES to resist false sharing */ |
1980 | sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info), | |
1981 | L1_CACHE_BYTES), GFP_KERNEL); | |
1982 | if (!sbinfo) | |
1983 | return -ENOMEM; | |
1984 | ||
1985 | spin_lock_init(&sbinfo->stat_lock); | |
1986 | sbinfo->max_blocks = blocks; | |
1987 | sbinfo->free_blocks = blocks; | |
1988 | sbinfo->max_inodes = inodes; | |
1989 | sbinfo->free_inodes = inodes; | |
1990 | ||
1991 | sb->s_fs_info = sbinfo; | |
1da177e4 LT |
1992 | sb->s_maxbytes = SHMEM_MAX_BYTES; |
1993 | sb->s_blocksize = PAGE_CACHE_SIZE; | |
1994 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; | |
1995 | sb->s_magic = TMPFS_MAGIC; | |
1996 | sb->s_op = &shmem_ops; | |
0edd73b3 HD |
1997 | sb->s_xattr = shmem_xattr_handlers; |
1998 | ||
1da177e4 LT |
1999 | inode = shmem_get_inode(sb, S_IFDIR | mode, 0); |
2000 | if (!inode) | |
2001 | goto failed; | |
2002 | inode->i_uid = uid; | |
2003 | inode->i_gid = gid; | |
2004 | root = d_alloc_root(inode); | |
2005 | if (!root) | |
2006 | goto failed_iput; | |
2007 | sb->s_root = root; | |
2008 | return 0; | |
2009 | ||
2010 | failed_iput: | |
2011 | iput(inode); | |
2012 | failed: | |
2013 | shmem_put_super(sb); | |
2014 | return err; | |
2015 | } | |
2016 | ||
2017 | static kmem_cache_t *shmem_inode_cachep; | |
2018 | ||
2019 | static struct inode *shmem_alloc_inode(struct super_block *sb) | |
2020 | { | |
2021 | struct shmem_inode_info *p; | |
2022 | p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, SLAB_KERNEL); | |
2023 | if (!p) | |
2024 | return NULL; | |
2025 | return &p->vfs_inode; | |
2026 | } | |
2027 | ||
2028 | static void shmem_destroy_inode(struct inode *inode) | |
2029 | { | |
2030 | if ((inode->i_mode & S_IFMT) == S_IFREG) { | |
2031 | /* only struct inode is valid if it's an inline symlink */ | |
2032 | mpol_free_shared_policy(&SHMEM_I(inode)->policy); | |
2033 | } | |
2034 | kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); | |
2035 | } | |
2036 | ||
2037 | static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) | |
2038 | { | |
2039 | struct shmem_inode_info *p = (struct shmem_inode_info *) foo; | |
2040 | ||
2041 | if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == | |
2042 | SLAB_CTOR_CONSTRUCTOR) { | |
2043 | inode_init_once(&p->vfs_inode); | |
2044 | } | |
2045 | } | |
2046 | ||
2047 | static int init_inodecache(void) | |
2048 | { | |
2049 | shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", | |
2050 | sizeof(struct shmem_inode_info), | |
2051 | 0, 0, init_once, NULL); | |
2052 | if (shmem_inode_cachep == NULL) | |
2053 | return -ENOMEM; | |
2054 | return 0; | |
2055 | } | |
2056 | ||
2057 | static void destroy_inodecache(void) | |
2058 | { | |
2059 | if (kmem_cache_destroy(shmem_inode_cachep)) | |
2060 | printk(KERN_INFO "shmem_inode_cache: not all structures were freed\n"); | |
2061 | } | |
2062 | ||
2063 | static struct address_space_operations shmem_aops = { | |
2064 | .writepage = shmem_writepage, | |
2065 | .set_page_dirty = __set_page_dirty_nobuffers, | |
2066 | #ifdef CONFIG_TMPFS | |
2067 | .prepare_write = shmem_prepare_write, | |
2068 | .commit_write = simple_commit_write, | |
2069 | #endif | |
2070 | }; | |
2071 | ||
2072 | static struct file_operations shmem_file_operations = { | |
2073 | .mmap = shmem_mmap, | |
2074 | #ifdef CONFIG_TMPFS | |
2075 | .llseek = generic_file_llseek, | |
2076 | .read = shmem_file_read, | |
2077 | .write = shmem_file_write, | |
2078 | .fsync = simple_sync_file, | |
2079 | .sendfile = shmem_file_sendfile, | |
2080 | #endif | |
2081 | }; | |
2082 | ||
2083 | static struct inode_operations shmem_inode_operations = { | |
2084 | .truncate = shmem_truncate, | |
2085 | .setattr = shmem_notify_change, | |
2086 | #ifdef CONFIG_TMPFS_XATTR | |
2087 | .setxattr = generic_setxattr, | |
2088 | .getxattr = generic_getxattr, | |
2089 | .listxattr = generic_listxattr, | |
2090 | .removexattr = generic_removexattr, | |
2091 | #endif | |
2092 | }; | |
2093 | ||
2094 | static struct inode_operations shmem_dir_inode_operations = { | |
2095 | #ifdef CONFIG_TMPFS | |
2096 | .create = shmem_create, | |
2097 | .lookup = simple_lookup, | |
2098 | .link = shmem_link, | |
2099 | .unlink = shmem_unlink, | |
2100 | .symlink = shmem_symlink, | |
2101 | .mkdir = shmem_mkdir, | |
2102 | .rmdir = shmem_rmdir, | |
2103 | .mknod = shmem_mknod, | |
2104 | .rename = shmem_rename, | |
2105 | #ifdef CONFIG_TMPFS_XATTR | |
2106 | .setxattr = generic_setxattr, | |
2107 | .getxattr = generic_getxattr, | |
2108 | .listxattr = generic_listxattr, | |
2109 | .removexattr = generic_removexattr, | |
2110 | #endif | |
2111 | #endif | |
2112 | }; | |
2113 | ||
2114 | static struct inode_operations shmem_special_inode_operations = { | |
2115 | #ifdef CONFIG_TMPFS_XATTR | |
2116 | .setxattr = generic_setxattr, | |
2117 | .getxattr = generic_getxattr, | |
2118 | .listxattr = generic_listxattr, | |
2119 | .removexattr = generic_removexattr, | |
2120 | #endif | |
2121 | }; | |
2122 | ||
2123 | static struct super_operations shmem_ops = { | |
2124 | .alloc_inode = shmem_alloc_inode, | |
2125 | .destroy_inode = shmem_destroy_inode, | |
2126 | #ifdef CONFIG_TMPFS | |
2127 | .statfs = shmem_statfs, | |
2128 | .remount_fs = shmem_remount_fs, | |
2129 | #endif | |
2130 | .delete_inode = shmem_delete_inode, | |
2131 | .drop_inode = generic_delete_inode, | |
2132 | .put_super = shmem_put_super, | |
2133 | }; | |
2134 | ||
2135 | static struct vm_operations_struct shmem_vm_ops = { | |
2136 | .nopage = shmem_nopage, | |
2137 | .populate = shmem_populate, | |
2138 | #ifdef CONFIG_NUMA | |
2139 | .set_policy = shmem_set_policy, | |
2140 | .get_policy = shmem_get_policy, | |
2141 | #endif | |
2142 | }; | |
2143 | ||
2144 | ||
2145 | #ifdef CONFIG_TMPFS_SECURITY | |
2146 | ||
2147 | static size_t shmem_xattr_security_list(struct inode *inode, char *list, size_t list_len, | |
2148 | const char *name, size_t name_len) | |
2149 | { | |
2150 | return security_inode_listsecurity(inode, list, list_len); | |
2151 | } | |
2152 | ||
2153 | static int shmem_xattr_security_get(struct inode *inode, const char *name, void *buffer, size_t size) | |
2154 | { | |
2155 | if (strcmp(name, "") == 0) | |
2156 | return -EINVAL; | |
2157 | return security_inode_getsecurity(inode, name, buffer, size); | |
2158 | } | |
2159 | ||
2160 | static int shmem_xattr_security_set(struct inode *inode, const char *name, const void *value, size_t size, int flags) | |
2161 | { | |
2162 | if (strcmp(name, "") == 0) | |
2163 | return -EINVAL; | |
2164 | return security_inode_setsecurity(inode, name, value, size, flags); | |
2165 | } | |
2166 | ||
2167 | static struct xattr_handler shmem_xattr_security_handler = { | |
2168 | .prefix = XATTR_SECURITY_PREFIX, | |
2169 | .list = shmem_xattr_security_list, | |
2170 | .get = shmem_xattr_security_get, | |
2171 | .set = shmem_xattr_security_set, | |
2172 | }; | |
2173 | ||
2174 | #endif /* CONFIG_TMPFS_SECURITY */ | |
2175 | ||
2176 | #ifdef CONFIG_TMPFS_XATTR | |
2177 | ||
2178 | static struct xattr_handler *shmem_xattr_handlers[] = { | |
2179 | #ifdef CONFIG_TMPFS_SECURITY | |
2180 | &shmem_xattr_security_handler, | |
2181 | #endif | |
2182 | NULL | |
2183 | }; | |
2184 | ||
2185 | #endif /* CONFIG_TMPFS_XATTR */ | |
2186 | ||
2187 | static struct super_block *shmem_get_sb(struct file_system_type *fs_type, | |
2188 | int flags, const char *dev_name, void *data) | |
2189 | { | |
2190 | return get_sb_nodev(fs_type, flags, data, shmem_fill_super); | |
2191 | } | |
2192 | ||
2193 | static struct file_system_type tmpfs_fs_type = { | |
2194 | .owner = THIS_MODULE, | |
2195 | .name = "tmpfs", | |
2196 | .get_sb = shmem_get_sb, | |
2197 | .kill_sb = kill_litter_super, | |
2198 | }; | |
2199 | static struct vfsmount *shm_mnt; | |
2200 | ||
2201 | static int __init init_tmpfs(void) | |
2202 | { | |
2203 | int error; | |
2204 | ||
2205 | error = init_inodecache(); | |
2206 | if (error) | |
2207 | goto out3; | |
2208 | ||
2209 | error = register_filesystem(&tmpfs_fs_type); | |
2210 | if (error) { | |
2211 | printk(KERN_ERR "Could not register tmpfs\n"); | |
2212 | goto out2; | |
2213 | } | |
2214 | #ifdef CONFIG_TMPFS | |
2215 | devfs_mk_dir("shm"); | |
2216 | #endif | |
2217 | shm_mnt = do_kern_mount(tmpfs_fs_type.name, MS_NOUSER, | |
2218 | tmpfs_fs_type.name, NULL); | |
2219 | if (IS_ERR(shm_mnt)) { | |
2220 | error = PTR_ERR(shm_mnt); | |
2221 | printk(KERN_ERR "Could not kern_mount tmpfs\n"); | |
2222 | goto out1; | |
2223 | } | |
2224 | return 0; | |
2225 | ||
2226 | out1: | |
2227 | unregister_filesystem(&tmpfs_fs_type); | |
2228 | out2: | |
2229 | destroy_inodecache(); | |
2230 | out3: | |
2231 | shm_mnt = ERR_PTR(error); | |
2232 | return error; | |
2233 | } | |
2234 | module_init(init_tmpfs) | |
2235 | ||
2236 | /* | |
2237 | * shmem_file_setup - get an unlinked file living in tmpfs | |
2238 | * | |
2239 | * @name: name for dentry (to be seen in /proc/<pid>/maps | |
2240 | * @size: size to be set for the file | |
2241 | * | |
2242 | */ | |
2243 | struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) | |
2244 | { | |
2245 | int error; | |
2246 | struct file *file; | |
2247 | struct inode *inode; | |
2248 | struct dentry *dentry, *root; | |
2249 | struct qstr this; | |
2250 | ||
2251 | if (IS_ERR(shm_mnt)) | |
2252 | return (void *)shm_mnt; | |
2253 | ||
2254 | if (size < 0 || size > SHMEM_MAX_BYTES) | |
2255 | return ERR_PTR(-EINVAL); | |
2256 | ||
2257 | if (shmem_acct_size(flags, size)) | |
2258 | return ERR_PTR(-ENOMEM); | |
2259 | ||
2260 | error = -ENOMEM; | |
2261 | this.name = name; | |
2262 | this.len = strlen(name); | |
2263 | this.hash = 0; /* will go */ | |
2264 | root = shm_mnt->mnt_root; | |
2265 | dentry = d_alloc(root, &this); | |
2266 | if (!dentry) | |
2267 | goto put_memory; | |
2268 | ||
2269 | error = -ENFILE; | |
2270 | file = get_empty_filp(); | |
2271 | if (!file) | |
2272 | goto put_dentry; | |
2273 | ||
2274 | error = -ENOSPC; | |
2275 | inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0); | |
2276 | if (!inode) | |
2277 | goto close_file; | |
2278 | ||
2279 | SHMEM_I(inode)->flags = flags & VM_ACCOUNT; | |
2280 | d_instantiate(dentry, inode); | |
2281 | inode->i_size = size; | |
2282 | inode->i_nlink = 0; /* It is unlinked */ | |
2283 | file->f_vfsmnt = mntget(shm_mnt); | |
2284 | file->f_dentry = dentry; | |
2285 | file->f_mapping = inode->i_mapping; | |
2286 | file->f_op = &shmem_file_operations; | |
2287 | file->f_mode = FMODE_WRITE | FMODE_READ; | |
2288 | return file; | |
2289 | ||
2290 | close_file: | |
2291 | put_filp(file); | |
2292 | put_dentry: | |
2293 | dput(dentry); | |
2294 | put_memory: | |
2295 | shmem_unacct_size(flags, size); | |
2296 | return ERR_PTR(error); | |
2297 | } | |
2298 | ||
2299 | /* | |
2300 | * shmem_zero_setup - setup a shared anonymous mapping | |
2301 | * | |
2302 | * @vma: the vma to be mmapped is prepared by do_mmap_pgoff | |
2303 | */ | |
2304 | int shmem_zero_setup(struct vm_area_struct *vma) | |
2305 | { | |
2306 | struct file *file; | |
2307 | loff_t size = vma->vm_end - vma->vm_start; | |
2308 | ||
2309 | file = shmem_file_setup("dev/zero", size, vma->vm_flags); | |
2310 | if (IS_ERR(file)) | |
2311 | return PTR_ERR(file); | |
2312 | ||
2313 | if (vma->vm_file) | |
2314 | fput(vma->vm_file); | |
2315 | vma->vm_file = file; | |
2316 | vma->vm_ops = &shmem_vm_ops; | |
2317 | return 0; | |
2318 | } |