1 /*
2 * fs/eventfd.c
3 *
4 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
5 *
6 */
7
8 #include <linux/file.h>
9 #include <linux/poll.h>
10 #include <linux/init.h>
11 #include <linux/fs.h>
12 #include <linux/sched/signal.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/list.h>
16 #include <linux/spinlock.h>
17 #include <linux/anon_inodes.h>
18 #include <linux/syscalls.h>
19 #include <linux/export.h>
20 #include <linux/kref.h>
21 #include <linux/eventfd.h>
22 #include <linux/proc_fs.h>
23 #include <linux/seq_file.h>
24
25 DEFINE_PER_CPU(int, eventfd_wake_count);
26
27 struct eventfd_ctx {
28 struct kref kref;
29 wait_queue_head_t wqh;
30 /*
31 * Every time that a write(2) is performed on an eventfd, the
32 * value of the __u64 being written is added to "count" and a
33 * wakeup is performed on "wqh". A read(2) will return the "count"
34 * value to userspace, and will reset "count" to zero. The kernel
35 * side eventfd_signal() also, adds to the "count" counter and
36 * issue a wakeup.
37 */
38 __u64 count;
39 unsigned int flags;
40 };
41
42 /**
43 * eventfd_signal - Adds @n to the eventfd counter.
44 * @ctx: [in] Pointer to the eventfd context.
45 * @n: [in] Value of the counter to be added to the eventfd internal counter.
46 * The value cannot be negative.
47 *
48 * This function is supposed to be called by the kernel in paths that do not
49 * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
50 * value, and we signal this as overflow condition by returning a EPOLLERR
51 * to poll(2).
52 *
53 * Returns the amount by which the counter was incremented. This will be less
54 * than @n if the counter has overflowed.
55 */
eventfd_signal(struct eventfd_ctx * ctx,__u64 n)56 __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
57 {
58 unsigned long flags;
59
60 /*
61 * Deadlock or stack overflow issues can happen if we recurse here
62 * through waitqueue wakeup handlers. If the caller users potentially
63 * nested waitqueues with custom wakeup handlers, then it should
64 * check eventfd_signal_count() before calling this function. If
65 * it returns true, the eventfd_signal() call should be deferred to a
66 * safe context.
67 */
68 if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
69 return 0;
70
71 spin_lock_irqsave(&ctx->wqh.lock, flags);
72 this_cpu_inc(eventfd_wake_count);
73 if (ULLONG_MAX - ctx->count < n)
74 n = ULLONG_MAX - ctx->count;
75 ctx->count += n;
76 if (waitqueue_active(&ctx->wqh))
77 wake_up_locked_poll(&ctx->wqh, EPOLLIN);
78 this_cpu_dec(eventfd_wake_count);
79 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
80
81 return n;
82 }
83 EXPORT_SYMBOL_GPL(eventfd_signal);
84
eventfd_free_ctx(struct eventfd_ctx * ctx)85 static void eventfd_free_ctx(struct eventfd_ctx *ctx)
86 {
87 kfree(ctx);
88 }
89
eventfd_free(struct kref * kref)90 static void eventfd_free(struct kref *kref)
91 {
92 struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref);
93
94 eventfd_free_ctx(ctx);
95 }
96
97 /**
98 * eventfd_ctx_put - Releases a reference to the internal eventfd context.
99 * @ctx: [in] Pointer to eventfd context.
100 *
101 * The eventfd context reference must have been previously acquired either
102 * with eventfd_ctx_fdget() or eventfd_ctx_fileget().
103 */
eventfd_ctx_put(struct eventfd_ctx * ctx)104 void eventfd_ctx_put(struct eventfd_ctx *ctx)
105 {
106 kref_put(&ctx->kref, eventfd_free);
107 }
108 EXPORT_SYMBOL_GPL(eventfd_ctx_put);
109
eventfd_release(struct inode * inode,struct file * file)110 static int eventfd_release(struct inode *inode, struct file *file)
111 {
112 struct eventfd_ctx *ctx = file->private_data;
113
114 wake_up_poll(&ctx->wqh, EPOLLHUP);
115 eventfd_ctx_put(ctx);
116 return 0;
117 }
118
eventfd_poll(struct file * file,poll_table * wait)119 static __poll_t eventfd_poll(struct file *file, poll_table *wait)
120 {
121 struct eventfd_ctx *ctx = file->private_data;
122 __poll_t events = 0;
123 u64 count;
124
125 poll_wait(file, &ctx->wqh, wait);
126
127 /*
128 * All writes to ctx->count occur within ctx->wqh.lock. This read
129 * can be done outside ctx->wqh.lock because we know that poll_wait
130 * takes that lock (through add_wait_queue) if our caller will sleep.
131 *
132 * The read _can_ therefore seep into add_wait_queue's critical
133 * section, but cannot move above it! add_wait_queue's spin_lock acts
134 * as an acquire barrier and ensures that the read be ordered properly
135 * against the writes. The following CAN happen and is safe:
136 *
137 * poll write
138 * ----------------- ------------
139 * lock ctx->wqh.lock (in poll_wait)
140 * count = ctx->count
141 * __add_wait_queue
142 * unlock ctx->wqh.lock
143 * lock ctx->qwh.lock
144 * ctx->count += n
145 * if (waitqueue_active)
146 * wake_up_locked_poll
147 * unlock ctx->qwh.lock
148 * eventfd_poll returns 0
149 *
150 * but the following, which would miss a wakeup, cannot happen:
151 *
152 * poll write
153 * ----------------- ------------
154 * count = ctx->count (INVALID!)
155 * lock ctx->qwh.lock
156 * ctx->count += n
157 * **waitqueue_active is false**
158 * **no wake_up_locked_poll!**
159 * unlock ctx->qwh.lock
160 * lock ctx->wqh.lock (in poll_wait)
161 * __add_wait_queue
162 * unlock ctx->wqh.lock
163 * eventfd_poll returns 0
164 */
165 count = READ_ONCE(ctx->count);
166
167 if (count > 0)
168 events |= EPOLLIN;
169 if (count == ULLONG_MAX)
170 events |= EPOLLERR;
171 if (ULLONG_MAX - 1 > count)
172 events |= EPOLLOUT;
173
174 return events;
175 }
176
eventfd_ctx_do_read(struct eventfd_ctx * ctx,__u64 * cnt)177 void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
178 {
179 lockdep_assert_held(&ctx->wqh.lock);
180
181 *cnt = ((ctx->flags & EFD_SEMAPHORE) && ctx->count) ? 1 : ctx->count;
182 ctx->count -= *cnt;
183 }
184 EXPORT_SYMBOL_GPL(eventfd_ctx_do_read);
185
186 /**
187 * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.
188 * @ctx: [in] Pointer to eventfd context.
189 * @wait: [in] Wait queue to be removed.
190 * @cnt: [out] Pointer to the 64-bit counter value.
191 *
192 * Returns %0 if successful, or the following error codes:
193 *
194 * -EAGAIN : The operation would have blocked.
195 *
196 * This is used to atomically remove a wait queue entry from the eventfd wait
197 * queue head, and read/reset the counter value.
198 */
eventfd_ctx_remove_wait_queue(struct eventfd_ctx * ctx,wait_queue_entry_t * wait,__u64 * cnt)199 int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
200 __u64 *cnt)
201 {
202 unsigned long flags;
203
204 spin_lock_irqsave(&ctx->wqh.lock, flags);
205 eventfd_ctx_do_read(ctx, cnt);
206 __remove_wait_queue(&ctx->wqh, wait);
207 if (*cnt != 0 && waitqueue_active(&ctx->wqh))
208 wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
209 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
210
211 return *cnt != 0 ? 0 : -EAGAIN;
212 }
213 EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);
214
eventfd_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)215 static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
216 loff_t *ppos)
217 {
218 struct eventfd_ctx *ctx = file->private_data;
219 ssize_t res;
220 __u64 ucnt = 0;
221 DECLARE_WAITQUEUE(wait, current);
222
223 if (count < sizeof(ucnt))
224 return -EINVAL;
225
226 spin_lock_irq(&ctx->wqh.lock);
227 res = -EAGAIN;
228 if (ctx->count > 0)
229 res = sizeof(ucnt);
230 else if (!(file->f_flags & O_NONBLOCK)) {
231 __add_wait_queue(&ctx->wqh, &wait);
232 for (;;) {
233 set_current_state(TASK_INTERRUPTIBLE);
234 if (ctx->count > 0) {
235 res = sizeof(ucnt);
236 break;
237 }
238 if (signal_pending(current)) {
239 res = -ERESTARTSYS;
240 break;
241 }
242 spin_unlock_irq(&ctx->wqh.lock);
243 schedule();
244 spin_lock_irq(&ctx->wqh.lock);
245 }
246 __remove_wait_queue(&ctx->wqh, &wait);
247 __set_current_state(TASK_RUNNING);
248 }
249 if (likely(res > 0)) {
250 eventfd_ctx_do_read(ctx, &ucnt);
251 if (waitqueue_active(&ctx->wqh))
252 wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
253 }
254 spin_unlock_irq(&ctx->wqh.lock);
255
256 if (res > 0 && put_user(ucnt, (__u64 __user *)buf))
257 return -EFAULT;
258
259 return res;
260 }
261
eventfd_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)262 static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
263 loff_t *ppos)
264 {
265 struct eventfd_ctx *ctx = file->private_data;
266 ssize_t res;
267 __u64 ucnt;
268 DECLARE_WAITQUEUE(wait, current);
269
270 if (count < sizeof(ucnt))
271 return -EINVAL;
272 if (copy_from_user(&ucnt, buf, sizeof(ucnt)))
273 return -EFAULT;
274 if (ucnt == ULLONG_MAX)
275 return -EINVAL;
276 spin_lock_irq(&ctx->wqh.lock);
277 res = -EAGAIN;
278 if (ULLONG_MAX - ctx->count > ucnt)
279 res = sizeof(ucnt);
280 else if (!(file->f_flags & O_NONBLOCK)) {
281 __add_wait_queue(&ctx->wqh, &wait);
282 for (res = 0;;) {
283 set_current_state(TASK_INTERRUPTIBLE);
284 if (ULLONG_MAX - ctx->count > ucnt) {
285 res = sizeof(ucnt);
286 break;
287 }
288 if (signal_pending(current)) {
289 res = -ERESTARTSYS;
290 break;
291 }
292 spin_unlock_irq(&ctx->wqh.lock);
293 schedule();
294 spin_lock_irq(&ctx->wqh.lock);
295 }
296 __remove_wait_queue(&ctx->wqh, &wait);
297 __set_current_state(TASK_RUNNING);
298 }
299 if (likely(res > 0)) {
300 ctx->count += ucnt;
301 if (waitqueue_active(&ctx->wqh))
302 wake_up_locked_poll(&ctx->wqh, EPOLLIN);
303 }
304 spin_unlock_irq(&ctx->wqh.lock);
305
306 return res;
307 }
308
309 #ifdef CONFIG_PROC_FS
eventfd_show_fdinfo(struct seq_file * m,struct file * f)310 static void eventfd_show_fdinfo(struct seq_file *m, struct file *f)
311 {
312 struct eventfd_ctx *ctx = f->private_data;
313
314 spin_lock_irq(&ctx->wqh.lock);
315 seq_printf(m, "eventfd-count: %16llx\n",
316 (unsigned long long)ctx->count);
317 spin_unlock_irq(&ctx->wqh.lock);
318 }
319 #endif
320
321 static const struct file_operations eventfd_fops = {
322 #ifdef CONFIG_PROC_FS
323 .show_fdinfo = eventfd_show_fdinfo,
324 #endif
325 .release = eventfd_release,
326 .poll = eventfd_poll,
327 .read = eventfd_read,
328 .write = eventfd_write,
329 .llseek = noop_llseek,
330 };
331
332 /**
333 * eventfd_fget - Acquire a reference of an eventfd file descriptor.
334 * @fd: [in] Eventfd file descriptor.
335 *
336 * Returns a pointer to the eventfd file structure in case of success, or the
337 * following error pointer:
338 *
339 * -EBADF : Invalid @fd file descriptor.
340 * -EINVAL : The @fd file descriptor is not an eventfd file.
341 */
eventfd_fget(int fd)342 struct file *eventfd_fget(int fd)
343 {
344 struct file *file;
345
346 file = fget(fd);
347 if (!file)
348 return ERR_PTR(-EBADF);
349 if (file->f_op != &eventfd_fops) {
350 fput(file);
351 return ERR_PTR(-EINVAL);
352 }
353
354 return file;
355 }
356 EXPORT_SYMBOL_GPL(eventfd_fget);
357
358 /**
359 * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context.
360 * @fd: [in] Eventfd file descriptor.
361 *
362 * Returns a pointer to the internal eventfd context, otherwise the error
363 * pointers returned by the following functions:
364 *
365 * eventfd_fget
366 */
eventfd_ctx_fdget(int fd)367 struct eventfd_ctx *eventfd_ctx_fdget(int fd)
368 {
369 struct eventfd_ctx *ctx;
370 struct fd f = fdget(fd);
371 if (!f.file)
372 return ERR_PTR(-EBADF);
373 ctx = eventfd_ctx_fileget(f.file);
374 fdput(f);
375 return ctx;
376 }
377 EXPORT_SYMBOL_GPL(eventfd_ctx_fdget);
378
379 /**
380 * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context.
381 * @file: [in] Eventfd file pointer.
382 *
383 * Returns a pointer to the internal eventfd context, otherwise the error
384 * pointer:
385 *
386 * -EINVAL : The @fd file descriptor is not an eventfd file.
387 */
eventfd_ctx_fileget(struct file * file)388 struct eventfd_ctx *eventfd_ctx_fileget(struct file *file)
389 {
390 struct eventfd_ctx *ctx;
391
392 if (file->f_op != &eventfd_fops)
393 return ERR_PTR(-EINVAL);
394
395 ctx = file->private_data;
396 kref_get(&ctx->kref);
397 return ctx;
398 }
399 EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
400
do_eventfd(unsigned int count,int flags)401 static int do_eventfd(unsigned int count, int flags)
402 {
403 struct eventfd_ctx *ctx;
404 int fd;
405
406 /* Check the EFD_* constants for consistency. */
407 BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC);
408 BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK);
409
410 if (flags & ~EFD_FLAGS_SET)
411 return -EINVAL;
412
413 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
414 if (!ctx)
415 return -ENOMEM;
416
417 kref_init(&ctx->kref);
418 init_waitqueue_head(&ctx->wqh);
419 ctx->count = count;
420 ctx->flags = flags;
421
422 fd = anon_inode_getfd("[eventfd]", &eventfd_fops, ctx,
423 O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS));
424 if (fd < 0)
425 eventfd_free_ctx(ctx);
426
427 return fd;
428 }
429
SYSCALL_DEFINE2(eventfd2,unsigned int,count,int,flags)430 SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
431 {
432 return do_eventfd(count, flags);
433 }
434
SYSCALL_DEFINE1(eventfd,unsigned int,count)435 SYSCALL_DEFINE1(eventfd, unsigned int, count)
436 {
437 return do_eventfd(count, 0);
438 }
439
440