| 1 | /* |
| 2 | * linux/fs/fcntl.c |
| 3 | * |
| 4 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 5 | */ |
| 6 | |
| 7 | #include <linux/syscalls.h> |
| 8 | #include <linux/init.h> |
| 9 | #include <linux/mm.h> |
| 10 | #include <linux/fs.h> |
| 11 | #include <linux/file.h> |
| 12 | #include <linux/fdtable.h> |
| 13 | #include <linux/capability.h> |
| 14 | #include <linux/dnotify.h> |
| 15 | #include <linux/slab.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/pipe_fs_i.h> |
| 18 | #include <linux/security.h> |
| 19 | #include <linux/ptrace.h> |
| 20 | #include <linux/signal.h> |
| 21 | #include <linux/rcupdate.h> |
| 22 | #include <linux/pid_namespace.h> |
| 23 | #include <linux/user_namespace.h> |
| 24 | #include <linux/shmem_fs.h> |
| 25 | |
| 26 | #include <asm/poll.h> |
| 27 | #include <asm/siginfo.h> |
| 28 | #include <asm/uaccess.h> |
| 29 | |
| 30 | #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME) |
| 31 | |
| 32 | static int setfl(int fd, struct file * filp, unsigned long arg) |
| 33 | { |
| 34 | struct inode * inode = file_inode(filp); |
| 35 | int error = 0; |
| 36 | |
| 37 | /* |
| 38 | * O_APPEND cannot be cleared if the file is marked as append-only |
| 39 | * and the file is open for write. |
| 40 | */ |
| 41 | if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode)) |
| 42 | return -EPERM; |
| 43 | |
| 44 | /* O_NOATIME can only be set by the owner or superuser */ |
| 45 | if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME)) |
| 46 | if (!inode_owner_or_capable(inode)) |
| 47 | return -EPERM; |
| 48 | |
| 49 | /* required for strict SunOS emulation */ |
| 50 | if (O_NONBLOCK != O_NDELAY) |
| 51 | if (arg & O_NDELAY) |
| 52 | arg |= O_NONBLOCK; |
| 53 | |
| 54 | if (arg & O_DIRECT) { |
| 55 | if (!filp->f_mapping || !filp->f_mapping->a_ops || |
| 56 | !filp->f_mapping->a_ops->direct_IO) |
| 57 | return -EINVAL; |
| 58 | } |
| 59 | |
| 60 | if (filp->f_op->check_flags) |
| 61 | error = filp->f_op->check_flags(arg); |
| 62 | if (error) |
| 63 | return error; |
| 64 | |
| 65 | /* |
| 66 | * ->fasync() is responsible for setting the FASYNC bit. |
| 67 | */ |
| 68 | if (((arg ^ filp->f_flags) & FASYNC) && filp->f_op->fasync) { |
| 69 | error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0); |
| 70 | if (error < 0) |
| 71 | goto out; |
| 72 | if (error > 0) |
| 73 | error = 0; |
| 74 | } |
| 75 | spin_lock(&filp->f_lock); |
| 76 | filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK); |
| 77 | spin_unlock(&filp->f_lock); |
| 78 | |
| 79 | out: |
| 80 | return error; |
| 81 | } |
| 82 | |
| 83 | static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, |
| 84 | int force) |
| 85 | { |
| 86 | write_lock_irq(&filp->f_owner.lock); |
| 87 | if (force || !filp->f_owner.pid) { |
| 88 | put_pid(filp->f_owner.pid); |
| 89 | filp->f_owner.pid = get_pid(pid); |
| 90 | filp->f_owner.pid_type = type; |
| 91 | |
| 92 | if (pid) { |
| 93 | const struct cred *cred = current_cred(); |
| 94 | filp->f_owner.uid = cred->uid; |
| 95 | filp->f_owner.euid = cred->euid; |
| 96 | } |
| 97 | } |
| 98 | write_unlock_irq(&filp->f_owner.lock); |
| 99 | } |
| 100 | |
| 101 | void __f_setown(struct file *filp, struct pid *pid, enum pid_type type, |
| 102 | int force) |
| 103 | { |
| 104 | security_file_set_fowner(filp); |
| 105 | f_modown(filp, pid, type, force); |
| 106 | } |
| 107 | EXPORT_SYMBOL(__f_setown); |
| 108 | |
| 109 | void f_setown(struct file *filp, unsigned long arg, int force) |
| 110 | { |
| 111 | enum pid_type type; |
| 112 | struct pid *pid; |
| 113 | int who = arg; |
| 114 | type = PIDTYPE_PID; |
| 115 | if (who < 0) { |
| 116 | type = PIDTYPE_PGID; |
| 117 | who = -who; |
| 118 | } |
| 119 | rcu_read_lock(); |
| 120 | pid = find_vpid(who); |
| 121 | __f_setown(filp, pid, type, force); |
| 122 | rcu_read_unlock(); |
| 123 | } |
| 124 | EXPORT_SYMBOL(f_setown); |
| 125 | |
| 126 | void f_delown(struct file *filp) |
| 127 | { |
| 128 | f_modown(filp, NULL, PIDTYPE_PID, 1); |
| 129 | } |
| 130 | |
| 131 | pid_t f_getown(struct file *filp) |
| 132 | { |
| 133 | pid_t pid; |
| 134 | read_lock(&filp->f_owner.lock); |
| 135 | pid = pid_vnr(filp->f_owner.pid); |
| 136 | if (filp->f_owner.pid_type == PIDTYPE_PGID) |
| 137 | pid = -pid; |
| 138 | read_unlock(&filp->f_owner.lock); |
| 139 | return pid; |
| 140 | } |
| 141 | |
| 142 | static int f_setown_ex(struct file *filp, unsigned long arg) |
| 143 | { |
| 144 | struct f_owner_ex __user *owner_p = (void __user *)arg; |
| 145 | struct f_owner_ex owner; |
| 146 | struct pid *pid; |
| 147 | int type; |
| 148 | int ret; |
| 149 | |
| 150 | ret = copy_from_user(&owner, owner_p, sizeof(owner)); |
| 151 | if (ret) |
| 152 | return -EFAULT; |
| 153 | |
| 154 | switch (owner.type) { |
| 155 | case F_OWNER_TID: |
| 156 | type = PIDTYPE_MAX; |
| 157 | break; |
| 158 | |
| 159 | case F_OWNER_PID: |
| 160 | type = PIDTYPE_PID; |
| 161 | break; |
| 162 | |
| 163 | case F_OWNER_PGRP: |
| 164 | type = PIDTYPE_PGID; |
| 165 | break; |
| 166 | |
| 167 | default: |
| 168 | return -EINVAL; |
| 169 | } |
| 170 | |
| 171 | rcu_read_lock(); |
| 172 | pid = find_vpid(owner.pid); |
| 173 | if (owner.pid && !pid) |
| 174 | ret = -ESRCH; |
| 175 | else |
| 176 | __f_setown(filp, pid, type, 1); |
| 177 | rcu_read_unlock(); |
| 178 | |
| 179 | return ret; |
| 180 | } |
| 181 | |
| 182 | static int f_getown_ex(struct file *filp, unsigned long arg) |
| 183 | { |
| 184 | struct f_owner_ex __user *owner_p = (void __user *)arg; |
| 185 | struct f_owner_ex owner; |
| 186 | int ret = 0; |
| 187 | |
| 188 | read_lock(&filp->f_owner.lock); |
| 189 | owner.pid = pid_vnr(filp->f_owner.pid); |
| 190 | switch (filp->f_owner.pid_type) { |
| 191 | case PIDTYPE_MAX: |
| 192 | owner.type = F_OWNER_TID; |
| 193 | break; |
| 194 | |
| 195 | case PIDTYPE_PID: |
| 196 | owner.type = F_OWNER_PID; |
| 197 | break; |
| 198 | |
| 199 | case PIDTYPE_PGID: |
| 200 | owner.type = F_OWNER_PGRP; |
| 201 | break; |
| 202 | |
| 203 | default: |
| 204 | WARN_ON(1); |
| 205 | ret = -EINVAL; |
| 206 | break; |
| 207 | } |
| 208 | read_unlock(&filp->f_owner.lock); |
| 209 | |
| 210 | if (!ret) { |
| 211 | ret = copy_to_user(owner_p, &owner, sizeof(owner)); |
| 212 | if (ret) |
| 213 | ret = -EFAULT; |
| 214 | } |
| 215 | return ret; |
| 216 | } |
| 217 | |
| 218 | #ifdef CONFIG_CHECKPOINT_RESTORE |
| 219 | static int f_getowner_uids(struct file *filp, unsigned long arg) |
| 220 | { |
| 221 | struct user_namespace *user_ns = current_user_ns(); |
| 222 | uid_t __user *dst = (void __user *)arg; |
| 223 | uid_t src[2]; |
| 224 | int err; |
| 225 | |
| 226 | read_lock(&filp->f_owner.lock); |
| 227 | src[0] = from_kuid(user_ns, filp->f_owner.uid); |
| 228 | src[1] = from_kuid(user_ns, filp->f_owner.euid); |
| 229 | read_unlock(&filp->f_owner.lock); |
| 230 | |
| 231 | err = put_user(src[0], &dst[0]); |
| 232 | err |= put_user(src[1], &dst[1]); |
| 233 | |
| 234 | return err; |
| 235 | } |
| 236 | #else |
| 237 | static int f_getowner_uids(struct file *filp, unsigned long arg) |
| 238 | { |
| 239 | return -EINVAL; |
| 240 | } |
| 241 | #endif |
| 242 | |
| 243 | static long do_fcntl(int fd, unsigned int cmd, unsigned long arg, |
| 244 | struct file *filp) |
| 245 | { |
| 246 | long err = -EINVAL; |
| 247 | |
| 248 | switch (cmd) { |
| 249 | case F_DUPFD: |
| 250 | err = f_dupfd(arg, filp, 0); |
| 251 | break; |
| 252 | case F_DUPFD_CLOEXEC: |
| 253 | err = f_dupfd(arg, filp, O_CLOEXEC); |
| 254 | break; |
| 255 | case F_GETFD: |
| 256 | err = get_close_on_exec(fd) ? FD_CLOEXEC : 0; |
| 257 | break; |
| 258 | case F_SETFD: |
| 259 | err = 0; |
| 260 | set_close_on_exec(fd, arg & FD_CLOEXEC); |
| 261 | break; |
| 262 | case F_GETFL: |
| 263 | err = filp->f_flags; |
| 264 | break; |
| 265 | case F_SETFL: |
| 266 | err = setfl(fd, filp, arg); |
| 267 | break; |
| 268 | #if BITS_PER_LONG != 32 |
| 269 | /* 32-bit arches must use fcntl64() */ |
| 270 | case F_OFD_GETLK: |
| 271 | #endif |
| 272 | case F_GETLK: |
| 273 | err = fcntl_getlk(filp, cmd, (struct flock __user *) arg); |
| 274 | break; |
| 275 | #if BITS_PER_LONG != 32 |
| 276 | /* 32-bit arches must use fcntl64() */ |
| 277 | case F_OFD_SETLK: |
| 278 | case F_OFD_SETLKW: |
| 279 | #endif |
| 280 | /* Fallthrough */ |
| 281 | case F_SETLK: |
| 282 | case F_SETLKW: |
| 283 | err = fcntl_setlk(fd, filp, cmd, (struct flock __user *) arg); |
| 284 | break; |
| 285 | case F_GETOWN: |
| 286 | /* |
| 287 | * XXX If f_owner is a process group, the |
| 288 | * negative return value will get converted |
| 289 | * into an error. Oops. If we keep the |
| 290 | * current syscall conventions, the only way |
| 291 | * to fix this will be in libc. |
| 292 | */ |
| 293 | err = f_getown(filp); |
| 294 | force_successful_syscall_return(); |
| 295 | break; |
| 296 | case F_SETOWN: |
| 297 | f_setown(filp, arg, 1); |
| 298 | err = 0; |
| 299 | break; |
| 300 | case F_GETOWN_EX: |
| 301 | err = f_getown_ex(filp, arg); |
| 302 | break; |
| 303 | case F_SETOWN_EX: |
| 304 | err = f_setown_ex(filp, arg); |
| 305 | break; |
| 306 | case F_GETOWNER_UIDS: |
| 307 | err = f_getowner_uids(filp, arg); |
| 308 | break; |
| 309 | case F_GETSIG: |
| 310 | err = filp->f_owner.signum; |
| 311 | break; |
| 312 | case F_SETSIG: |
| 313 | /* arg == 0 restores default behaviour. */ |
| 314 | if (!valid_signal(arg)) { |
| 315 | break; |
| 316 | } |
| 317 | err = 0; |
| 318 | filp->f_owner.signum = arg; |
| 319 | break; |
| 320 | case F_GETLEASE: |
| 321 | err = fcntl_getlease(filp); |
| 322 | break; |
| 323 | case F_SETLEASE: |
| 324 | err = fcntl_setlease(fd, filp, arg); |
| 325 | break; |
| 326 | case F_NOTIFY: |
| 327 | err = fcntl_dirnotify(fd, filp, arg); |
| 328 | break; |
| 329 | case F_SETPIPE_SZ: |
| 330 | case F_GETPIPE_SZ: |
| 331 | err = pipe_fcntl(filp, cmd, arg); |
| 332 | break; |
| 333 | case F_ADD_SEALS: |
| 334 | case F_GET_SEALS: |
| 335 | err = shmem_fcntl(filp, cmd, arg); |
| 336 | break; |
| 337 | default: |
| 338 | break; |
| 339 | } |
| 340 | return err; |
| 341 | } |
| 342 | |
| 343 | static int check_fcntl_cmd(unsigned cmd) |
| 344 | { |
| 345 | switch (cmd) { |
| 346 | case F_DUPFD: |
| 347 | case F_DUPFD_CLOEXEC: |
| 348 | case F_GETFD: |
| 349 | case F_SETFD: |
| 350 | case F_GETFL: |
| 351 | return 1; |
| 352 | } |
| 353 | return 0; |
| 354 | } |
| 355 | |
| 356 | SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg) |
| 357 | { |
| 358 | struct fd f = fdget_raw(fd); |
| 359 | long err = -EBADF; |
| 360 | |
| 361 | if (!f.file) |
| 362 | goto out; |
| 363 | |
| 364 | if (unlikely(f.file->f_mode & FMODE_PATH)) { |
| 365 | if (!check_fcntl_cmd(cmd)) |
| 366 | goto out1; |
| 367 | } |
| 368 | |
| 369 | err = security_file_fcntl(f.file, cmd, arg); |
| 370 | if (!err) |
| 371 | err = do_fcntl(fd, cmd, arg, f.file); |
| 372 | |
| 373 | out1: |
| 374 | fdput(f); |
| 375 | out: |
| 376 | return err; |
| 377 | } |
| 378 | |
| 379 | #if BITS_PER_LONG == 32 |
| 380 | SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd, |
| 381 | unsigned long, arg) |
| 382 | { |
| 383 | struct fd f = fdget_raw(fd); |
| 384 | long err = -EBADF; |
| 385 | |
| 386 | if (!f.file) |
| 387 | goto out; |
| 388 | |
| 389 | if (unlikely(f.file->f_mode & FMODE_PATH)) { |
| 390 | if (!check_fcntl_cmd(cmd)) |
| 391 | goto out1; |
| 392 | } |
| 393 | |
| 394 | err = security_file_fcntl(f.file, cmd, arg); |
| 395 | if (err) |
| 396 | goto out1; |
| 397 | |
| 398 | switch (cmd) { |
| 399 | case F_GETLK64: |
| 400 | case F_OFD_GETLK: |
| 401 | err = fcntl_getlk64(f.file, cmd, (struct flock64 __user *) arg); |
| 402 | break; |
| 403 | case F_SETLK64: |
| 404 | case F_SETLKW64: |
| 405 | case F_OFD_SETLK: |
| 406 | case F_OFD_SETLKW: |
| 407 | err = fcntl_setlk64(fd, f.file, cmd, |
| 408 | (struct flock64 __user *) arg); |
| 409 | break; |
| 410 | default: |
| 411 | err = do_fcntl(fd, cmd, arg, f.file); |
| 412 | break; |
| 413 | } |
| 414 | out1: |
| 415 | fdput(f); |
| 416 | out: |
| 417 | return err; |
| 418 | } |
| 419 | #endif |
| 420 | |
| 421 | /* Table to convert sigio signal codes into poll band bitmaps */ |
| 422 | |
| 423 | static const long band_table[NSIGPOLL] = { |
| 424 | POLLIN | POLLRDNORM, /* POLL_IN */ |
| 425 | POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */ |
| 426 | POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */ |
| 427 | POLLERR, /* POLL_ERR */ |
| 428 | POLLPRI | POLLRDBAND, /* POLL_PRI */ |
| 429 | POLLHUP | POLLERR /* POLL_HUP */ |
| 430 | }; |
| 431 | |
| 432 | static inline int sigio_perm(struct task_struct *p, |
| 433 | struct fown_struct *fown, int sig) |
| 434 | { |
| 435 | const struct cred *cred; |
| 436 | int ret; |
| 437 | |
| 438 | rcu_read_lock(); |
| 439 | cred = __task_cred(p); |
| 440 | ret = ((uid_eq(fown->euid, GLOBAL_ROOT_UID) || |
| 441 | uid_eq(fown->euid, cred->suid) || uid_eq(fown->euid, cred->uid) || |
| 442 | uid_eq(fown->uid, cred->suid) || uid_eq(fown->uid, cred->uid)) && |
| 443 | !security_file_send_sigiotask(p, fown, sig)); |
| 444 | rcu_read_unlock(); |
| 445 | return ret; |
| 446 | } |
| 447 | |
| 448 | static void send_sigio_to_task(struct task_struct *p, |
| 449 | struct fown_struct *fown, |
| 450 | int fd, int reason, int group) |
| 451 | { |
| 452 | /* |
| 453 | * F_SETSIG can change ->signum lockless in parallel, make |
| 454 | * sure we read it once and use the same value throughout. |
| 455 | */ |
| 456 | int signum = ACCESS_ONCE(fown->signum); |
| 457 | |
| 458 | if (!sigio_perm(p, fown, signum)) |
| 459 | return; |
| 460 | |
| 461 | switch (signum) { |
| 462 | siginfo_t si; |
| 463 | default: |
| 464 | /* Queue a rt signal with the appropriate fd as its |
| 465 | value. We use SI_SIGIO as the source, not |
| 466 | SI_KERNEL, since kernel signals always get |
| 467 | delivered even if we can't queue. Failure to |
| 468 | queue in this case _should_ be reported; we fall |
| 469 | back to SIGIO in that case. --sct */ |
| 470 | si.si_signo = signum; |
| 471 | si.si_errno = 0; |
| 472 | si.si_code = reason; |
| 473 | /* Make sure we are called with one of the POLL_* |
| 474 | reasons, otherwise we could leak kernel stack into |
| 475 | userspace. */ |
| 476 | BUG_ON((reason & __SI_MASK) != __SI_POLL); |
| 477 | if (reason - POLL_IN >= NSIGPOLL) |
| 478 | si.si_band = ~0L; |
| 479 | else |
| 480 | si.si_band = band_table[reason - POLL_IN]; |
| 481 | si.si_fd = fd; |
| 482 | if (!do_send_sig_info(signum, &si, p, group)) |
| 483 | break; |
| 484 | /* fall-through: fall back on the old plain SIGIO signal */ |
| 485 | case 0: |
| 486 | do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, group); |
| 487 | } |
| 488 | } |
| 489 | |
| 490 | void send_sigio(struct fown_struct *fown, int fd, int band) |
| 491 | { |
| 492 | struct task_struct *p; |
| 493 | enum pid_type type; |
| 494 | struct pid *pid; |
| 495 | int group = 1; |
| 496 | |
| 497 | read_lock(&fown->lock); |
| 498 | |
| 499 | type = fown->pid_type; |
| 500 | if (type == PIDTYPE_MAX) { |
| 501 | group = 0; |
| 502 | type = PIDTYPE_PID; |
| 503 | } |
| 504 | |
| 505 | pid = fown->pid; |
| 506 | if (!pid) |
| 507 | goto out_unlock_fown; |
| 508 | |
| 509 | read_lock(&tasklist_lock); |
| 510 | do_each_pid_task(pid, type, p) { |
| 511 | send_sigio_to_task(p, fown, fd, band, group); |
| 512 | } while_each_pid_task(pid, type, p); |
| 513 | read_unlock(&tasklist_lock); |
| 514 | out_unlock_fown: |
| 515 | read_unlock(&fown->lock); |
| 516 | } |
| 517 | |
| 518 | static void send_sigurg_to_task(struct task_struct *p, |
| 519 | struct fown_struct *fown, int group) |
| 520 | { |
| 521 | if (sigio_perm(p, fown, SIGURG)) |
| 522 | do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, group); |
| 523 | } |
| 524 | |
| 525 | int send_sigurg(struct fown_struct *fown) |
| 526 | { |
| 527 | struct task_struct *p; |
| 528 | enum pid_type type; |
| 529 | struct pid *pid; |
| 530 | int group = 1; |
| 531 | int ret = 0; |
| 532 | |
| 533 | read_lock(&fown->lock); |
| 534 | |
| 535 | type = fown->pid_type; |
| 536 | if (type == PIDTYPE_MAX) { |
| 537 | group = 0; |
| 538 | type = PIDTYPE_PID; |
| 539 | } |
| 540 | |
| 541 | pid = fown->pid; |
| 542 | if (!pid) |
| 543 | goto out_unlock_fown; |
| 544 | |
| 545 | ret = 1; |
| 546 | |
| 547 | read_lock(&tasklist_lock); |
| 548 | do_each_pid_task(pid, type, p) { |
| 549 | send_sigurg_to_task(p, fown, group); |
| 550 | } while_each_pid_task(pid, type, p); |
| 551 | read_unlock(&tasklist_lock); |
| 552 | out_unlock_fown: |
| 553 | read_unlock(&fown->lock); |
| 554 | return ret; |
| 555 | } |
| 556 | |
| 557 | static DEFINE_SPINLOCK(fasync_lock); |
| 558 | static struct kmem_cache *fasync_cache __read_mostly; |
| 559 | |
| 560 | static void fasync_free_rcu(struct rcu_head *head) |
| 561 | { |
| 562 | kmem_cache_free(fasync_cache, |
| 563 | container_of(head, struct fasync_struct, fa_rcu)); |
| 564 | } |
| 565 | |
| 566 | /* |
| 567 | * Remove a fasync entry. If successfully removed, return |
| 568 | * positive and clear the FASYNC flag. If no entry exists, |
| 569 | * do nothing and return 0. |
| 570 | * |
| 571 | * NOTE! It is very important that the FASYNC flag always |
| 572 | * match the state "is the filp on a fasync list". |
| 573 | * |
| 574 | */ |
| 575 | int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp) |
| 576 | { |
| 577 | struct fasync_struct *fa, **fp; |
| 578 | int result = 0; |
| 579 | |
| 580 | spin_lock(&filp->f_lock); |
| 581 | spin_lock(&fasync_lock); |
| 582 | for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) { |
| 583 | if (fa->fa_file != filp) |
| 584 | continue; |
| 585 | |
| 586 | spin_lock_irq(&fa->fa_lock); |
| 587 | fa->fa_file = NULL; |
| 588 | spin_unlock_irq(&fa->fa_lock); |
| 589 | |
| 590 | *fp = fa->fa_next; |
| 591 | call_rcu(&fa->fa_rcu, fasync_free_rcu); |
| 592 | filp->f_flags &= ~FASYNC; |
| 593 | result = 1; |
| 594 | break; |
| 595 | } |
| 596 | spin_unlock(&fasync_lock); |
| 597 | spin_unlock(&filp->f_lock); |
| 598 | return result; |
| 599 | } |
| 600 | |
| 601 | struct fasync_struct *fasync_alloc(void) |
| 602 | { |
| 603 | return kmem_cache_alloc(fasync_cache, GFP_KERNEL); |
| 604 | } |
| 605 | |
| 606 | /* |
| 607 | * NOTE! This can be used only for unused fasync entries: |
| 608 | * entries that actually got inserted on the fasync list |
| 609 | * need to be released by rcu - see fasync_remove_entry. |
| 610 | */ |
| 611 | void fasync_free(struct fasync_struct *new) |
| 612 | { |
| 613 | kmem_cache_free(fasync_cache, new); |
| 614 | } |
| 615 | |
| 616 | /* |
| 617 | * Insert a new entry into the fasync list. Return the pointer to the |
| 618 | * old one if we didn't use the new one. |
| 619 | * |
| 620 | * NOTE! It is very important that the FASYNC flag always |
| 621 | * match the state "is the filp on a fasync list". |
| 622 | */ |
| 623 | struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new) |
| 624 | { |
| 625 | struct fasync_struct *fa, **fp; |
| 626 | |
| 627 | spin_lock(&filp->f_lock); |
| 628 | spin_lock(&fasync_lock); |
| 629 | for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) { |
| 630 | if (fa->fa_file != filp) |
| 631 | continue; |
| 632 | |
| 633 | spin_lock_irq(&fa->fa_lock); |
| 634 | fa->fa_fd = fd; |
| 635 | spin_unlock_irq(&fa->fa_lock); |
| 636 | goto out; |
| 637 | } |
| 638 | |
| 639 | spin_lock_init(&new->fa_lock); |
| 640 | new->magic = FASYNC_MAGIC; |
| 641 | new->fa_file = filp; |
| 642 | new->fa_fd = fd; |
| 643 | new->fa_next = *fapp; |
| 644 | rcu_assign_pointer(*fapp, new); |
| 645 | filp->f_flags |= FASYNC; |
| 646 | |
| 647 | out: |
| 648 | spin_unlock(&fasync_lock); |
| 649 | spin_unlock(&filp->f_lock); |
| 650 | return fa; |
| 651 | } |
| 652 | |
| 653 | /* |
| 654 | * Add a fasync entry. Return negative on error, positive if |
| 655 | * added, and zero if did nothing but change an existing one. |
| 656 | */ |
| 657 | static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp) |
| 658 | { |
| 659 | struct fasync_struct *new; |
| 660 | |
| 661 | new = fasync_alloc(); |
| 662 | if (!new) |
| 663 | return -ENOMEM; |
| 664 | |
| 665 | /* |
| 666 | * fasync_insert_entry() returns the old (update) entry if |
| 667 | * it existed. |
| 668 | * |
| 669 | * So free the (unused) new entry and return 0 to let the |
| 670 | * caller know that we didn't add any new fasync entries. |
| 671 | */ |
| 672 | if (fasync_insert_entry(fd, filp, fapp, new)) { |
| 673 | fasync_free(new); |
| 674 | return 0; |
| 675 | } |
| 676 | |
| 677 | return 1; |
| 678 | } |
| 679 | |
| 680 | /* |
| 681 | * fasync_helper() is used by almost all character device drivers |
| 682 | * to set up the fasync queue, and for regular files by the file |
| 683 | * lease code. It returns negative on error, 0 if it did no changes |
| 684 | * and positive if it added/deleted the entry. |
| 685 | */ |
| 686 | int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp) |
| 687 | { |
| 688 | if (!on) |
| 689 | return fasync_remove_entry(filp, fapp); |
| 690 | return fasync_add_entry(fd, filp, fapp); |
| 691 | } |
| 692 | |
| 693 | EXPORT_SYMBOL(fasync_helper); |
| 694 | |
| 695 | /* |
| 696 | * rcu_read_lock() is held |
| 697 | */ |
| 698 | static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band) |
| 699 | { |
| 700 | while (fa) { |
| 701 | struct fown_struct *fown; |
| 702 | unsigned long flags; |
| 703 | |
| 704 | if (fa->magic != FASYNC_MAGIC) { |
| 705 | printk(KERN_ERR "kill_fasync: bad magic number in " |
| 706 | "fasync_struct!\n"); |
| 707 | return; |
| 708 | } |
| 709 | spin_lock_irqsave(&fa->fa_lock, flags); |
| 710 | if (fa->fa_file) { |
| 711 | fown = &fa->fa_file->f_owner; |
| 712 | /* Don't send SIGURG to processes which have not set a |
| 713 | queued signum: SIGURG has its own default signalling |
| 714 | mechanism. */ |
| 715 | if (!(sig == SIGURG && fown->signum == 0)) |
| 716 | send_sigio(fown, fa->fa_fd, band); |
| 717 | } |
| 718 | spin_unlock_irqrestore(&fa->fa_lock, flags); |
| 719 | fa = rcu_dereference(fa->fa_next); |
| 720 | } |
| 721 | } |
| 722 | |
| 723 | void kill_fasync(struct fasync_struct **fp, int sig, int band) |
| 724 | { |
| 725 | /* First a quick test without locking: usually |
| 726 | * the list is empty. |
| 727 | */ |
| 728 | if (*fp) { |
| 729 | rcu_read_lock(); |
| 730 | kill_fasync_rcu(rcu_dereference(*fp), sig, band); |
| 731 | rcu_read_unlock(); |
| 732 | } |
| 733 | } |
| 734 | EXPORT_SYMBOL(kill_fasync); |
| 735 | |
| 736 | static int __init fcntl_init(void) |
| 737 | { |
| 738 | /* |
| 739 | * Please add new bits here to ensure allocation uniqueness. |
| 740 | * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY |
| 741 | * is defined as O_NONBLOCK on some platforms and not on others. |
| 742 | */ |
| 743 | BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( |
| 744 | O_RDONLY | O_WRONLY | O_RDWR | |
| 745 | O_CREAT | O_EXCL | O_NOCTTY | |
| 746 | O_TRUNC | O_APPEND | /* O_NONBLOCK | */ |
| 747 | __O_SYNC | O_DSYNC | FASYNC | |
| 748 | O_DIRECT | O_LARGEFILE | O_DIRECTORY | |
| 749 | O_NOFOLLOW | O_NOATIME | O_CLOEXEC | |
| 750 | __FMODE_EXEC | O_PATH | __O_TMPFILE | |
| 751 | __FMODE_NONOTIFY |
| 752 | )); |
| 753 | |
| 754 | fasync_cache = kmem_cache_create("fasync_cache", |
| 755 | sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL); |
| 756 | return 0; |
| 757 | } |
| 758 | |
| 759 | module_init(fcntl_init) |