aio: kill ki_key
[deliverable/linux.git] / include / linux / aio.h
1 #ifndef __LINUX__AIO_H
2 #define __LINUX__AIO_H
3
4 #include <linux/list.h>
5 #include <linux/workqueue.h>
6 #include <linux/aio_abi.h>
7 #include <linux/uio.h>
8 #include <linux/rcupdate.h>
9
10 #include <linux/atomic.h>
11
12 struct kioctx;
13 struct kiocb;
14
15 #define KIOCB_KEY 0
16
17 /*
18 * We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either
19 * cancelled or completed (this makes a certain amount of sense because
20 * successful cancellation - io_cancel() - does deliver the completion to
21 * userspace).
22 *
23 * And since most things don't implement kiocb cancellation and we'd really like
24 * kiocb completion to be lockless when possible, we use ki_cancel to
25 * synchronize cancellation and completion - we only set it to KIOCB_CANCELLED
26 * with xchg() or cmpxchg(), see batch_complete_aio() and kiocb_cancel().
27 */
28 #define KIOCB_CANCELLED ((void *) (~0ULL))
29
30 typedef int (kiocb_cancel_fn)(struct kiocb *, struct io_event *);
31
32 /* is there a better place to document function pointer methods? */
33 /**
34 * ki_retry - iocb forward progress callback
35 * @kiocb: The kiocb struct to advance by performing an operation.
36 *
37 * This callback is called when the AIO core wants a given AIO operation
38 * to make forward progress. The kiocb argument describes the operation
39 * that is to be performed. As the operation proceeds, perhaps partially,
40 * ki_retry is expected to update the kiocb with progress made. Typically
41 * ki_retry is set in the AIO core and it itself calls file_operations
42 * helpers.
43 *
44 * ki_retry's return value determines when the AIO operation is completed
45 * and an event is generated in the AIO event ring. Except the special
46 * return values described below, the value that is returned from ki_retry
47 * is transferred directly into the completion ring as the operation's
48 * resulting status. Once this has happened ki_retry *MUST NOT* reference
49 * the kiocb pointer again.
50 *
51 * If ki_retry returns -EIOCBQUEUED it has made a promise that aio_complete()
52 * will be called on the kiocb pointer in the future. The AIO core will
53 * not ask the method again -- ki_retry must ensure forward progress.
54 * aio_complete() must be called once and only once in the future, multiple
55 * calls may result in undefined behaviour.
56 */
57 struct kiocb {
58 atomic_t ki_users;
59
60 struct file *ki_filp;
61 struct kioctx *ki_ctx; /* NULL for sync ops */
62 kiocb_cancel_fn *ki_cancel;
63 ssize_t (*ki_retry)(struct kiocb *);
64 void (*ki_dtor)(struct kiocb *);
65
66 union {
67 void __user *user;
68 struct task_struct *tsk;
69 } ki_obj;
70
71 __u64 ki_user_data; /* user's data for completion */
72 loff_t ki_pos;
73
74 void *private;
75 /* State that we remember to be able to restart/retry */
76 unsigned short ki_opcode;
77 size_t ki_nbytes; /* copy of iocb->aio_nbytes */
78 char __user *ki_buf; /* remaining iocb->aio_buf */
79 size_t ki_left; /* remaining bytes */
80 struct iovec ki_inline_vec; /* inline vector */
81 struct iovec *ki_iovec;
82 unsigned long ki_nr_segs;
83 unsigned long ki_cur_seg;
84
85 struct list_head ki_list; /* the aio core uses this
86 * for cancellation */
87
88 /*
89 * If the aio_resfd field of the userspace iocb is not zero,
90 * this is the underlying eventfd context to deliver events to.
91 */
92 struct eventfd_ctx *ki_eventfd;
93 };
94
95 static inline bool is_sync_kiocb(struct kiocb *kiocb)
96 {
97 return kiocb->ki_ctx == NULL;
98 }
99
100 static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
101 {
102 *kiocb = (struct kiocb) {
103 .ki_users = ATOMIC_INIT(1),
104 .ki_ctx = NULL,
105 .ki_filp = filp,
106 .ki_obj.tsk = current,
107 };
108 }
109
110 /* prototypes */
111 #ifdef CONFIG_AIO
112 extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
113 extern void aio_put_req(struct kiocb *iocb);
114 extern void aio_complete(struct kiocb *iocb, long res, long res2);
115 struct mm_struct;
116 extern void exit_aio(struct mm_struct *mm);
117 extern long do_io_submit(aio_context_t ctx_id, long nr,
118 struct iocb __user *__user *iocbpp, bool compat);
119 void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel);
120 #else
121 static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
122 static inline void aio_put_req(struct kiocb *iocb) { }
123 static inline void aio_complete(struct kiocb *iocb, long res, long res2) { }
124 struct mm_struct;
125 static inline void exit_aio(struct mm_struct *mm) { }
126 static inline long do_io_submit(aio_context_t ctx_id, long nr,
127 struct iocb __user * __user *iocbpp,
128 bool compat) { return 0; }
129 static inline void kiocb_set_cancel_fn(struct kiocb *req,
130 kiocb_cancel_fn *cancel) { }
131 #endif /* CONFIG_AIO */
132
133 static inline struct kiocb *list_kiocb(struct list_head *h)
134 {
135 return list_entry(h, struct kiocb, ki_list);
136 }
137
138 /* for sysctl: */
139 extern unsigned long aio_nr;
140 extern unsigned long aio_max_nr;
141
142 #endif /* __LINUX__AIO_H */
This page took 0.056714 seconds and 5 git commands to generate.