1 /*
2 * fs/inotify_user.c - inotify support for userspace
3 *
4 * Authors:
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
7 *
8 * Copyright (C) 2005 John McCutchan
9 * Copyright 2006 Hewlett-Packard Development Company, L.P.
10 *
11 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
12 * inotify was largely rewriten to make use of the fsnotify infrastructure
13 *
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2, or (at your option) any
17 * later version.
18 *
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
23 */
24
25 #include <linux/file.h>
26 #include <linux/fs.h> /* struct inode */
27 #include <linux/fsnotify_backend.h>
28 #include <linux/idr.h>
29 #include <linux/init.h> /* fs_initcall */
30 #include <linux/inotify.h>
31 #include <linux/kernel.h> /* roundup() */
32 #include <linux/namei.h> /* LOOKUP_FOLLOW */
33 #include <linux/sched/signal.h>
34 #include <linux/slab.h> /* struct kmem_cache */
35 #include <linux/syscalls.h>
36 #include <linux/types.h>
37 #include <linux/anon_inodes.h>
38 #include <linux/uaccess.h>
39 #include <linux/poll.h>
40 #include <linux/wait.h>
41 #include <linux/memcontrol.h>
42
43 #include "inotify.h"
44 #include "../fdinfo.h"
45
46 #include <asm/ioctls.h>
47
48 /* configurable via /proc/sys/fs/inotify/ */
49 static int inotify_max_queued_events __read_mostly;
50
51 struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
52
53 #ifdef CONFIG_SYSCTL
54
55 #include <linux/sysctl.h>
56
57 static int zero;
58
59 struct ctl_table inotify_table[] = {
60 {
61 .procname = "max_user_instances",
62 .data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES],
63 .maxlen = sizeof(int),
64 .mode = 0644,
65 .proc_handler = proc_dointvec_minmax,
66 .extra1 = &zero,
67 },
68 {
69 .procname = "max_user_watches",
70 .data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES],
71 .maxlen = sizeof(int),
72 .mode = 0644,
73 .proc_handler = proc_dointvec_minmax,
74 .extra1 = &zero,
75 },
76 {
77 .procname = "max_queued_events",
78 .data = &inotify_max_queued_events,
79 .maxlen = sizeof(int),
80 .mode = 0644,
81 .proc_handler = proc_dointvec_minmax,
82 .extra1 = &zero
83 },
84 { }
85 };
86 #endif /* CONFIG_SYSCTL */
87
inotify_arg_to_mask(u32 arg)88 static inline __u32 inotify_arg_to_mask(u32 arg)
89 {
90 __u32 mask;
91
92 /*
93 * everything should accept their own ignored, cares about children,
94 * and should receive events when the inode is unmounted
95 */
96 mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT);
97
98 /* mask off the flags used to open the fd */
99 mask |= (arg & INOTIFY_USER_MASK);
100
101 return mask;
102 }
103
inotify_mask_to_arg(__u32 mask)104 static inline u32 inotify_mask_to_arg(__u32 mask)
105 {
106 return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
107 IN_Q_OVERFLOW);
108 }
109
110 /* intofiy userspace file descriptor functions */
inotify_poll(struct file * file,poll_table * wait)111 static __poll_t inotify_poll(struct file *file, poll_table *wait)
112 {
113 struct fsnotify_group *group = file->private_data;
114 __poll_t ret = 0;
115
116 poll_wait(file, &group->notification_waitq, wait);
117 spin_lock(&group->notification_lock);
118 if (!fsnotify_notify_queue_is_empty(group))
119 ret = EPOLLIN | EPOLLRDNORM;
120 spin_unlock(&group->notification_lock);
121
122 return ret;
123 }
124
round_event_name_len(struct fsnotify_event * fsn_event)125 static int round_event_name_len(struct fsnotify_event *fsn_event)
126 {
127 struct inotify_event_info *event;
128
129 event = INOTIFY_E(fsn_event);
130 if (!event->name_len)
131 return 0;
132 return roundup(event->name_len + 1, sizeof(struct inotify_event));
133 }
134
135 /*
136 * Get an inotify_kernel_event if one exists and is small
137 * enough to fit in "count". Return an error pointer if
138 * not large enough.
139 *
140 * Called with the group->notification_lock held.
141 */
get_one_event(struct fsnotify_group * group,size_t count)142 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
143 size_t count)
144 {
145 size_t event_size = sizeof(struct inotify_event);
146 struct fsnotify_event *event;
147
148 if (fsnotify_notify_queue_is_empty(group))
149 return NULL;
150
151 event = fsnotify_peek_first_event(group);
152
153 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
154
155 event_size += round_event_name_len(event);
156 if (event_size > count)
157 return ERR_PTR(-EINVAL);
158
159 /* held the notification_lock the whole time, so this is the
160 * same event we peeked above */
161 fsnotify_remove_first_event(group);
162
163 return event;
164 }
165
166 /*
167 * Copy an event to user space, returning how much we copied.
168 *
169 * We already checked that the event size is smaller than the
170 * buffer we had in "get_one_event()" above.
171 */
copy_event_to_user(struct fsnotify_group * group,struct fsnotify_event * fsn_event,char __user * buf)172 static ssize_t copy_event_to_user(struct fsnotify_group *group,
173 struct fsnotify_event *fsn_event,
174 char __user *buf)
175 {
176 struct inotify_event inotify_event;
177 struct inotify_event_info *event;
178 size_t event_size = sizeof(struct inotify_event);
179 size_t name_len;
180 size_t pad_name_len;
181
182 pr_debug("%s: group=%p event=%p\n", __func__, group, fsn_event);
183
184 event = INOTIFY_E(fsn_event);
185 name_len = event->name_len;
186 /*
187 * round up name length so it is a multiple of event_size
188 * plus an extra byte for the terminating '\0'.
189 */
190 pad_name_len = round_event_name_len(fsn_event);
191 inotify_event.len = pad_name_len;
192 inotify_event.mask = inotify_mask_to_arg(fsn_event->mask);
193 inotify_event.wd = event->wd;
194 inotify_event.cookie = event->sync_cookie;
195
196 /* send the main event */
197 if (copy_to_user(buf, &inotify_event, event_size))
198 return -EFAULT;
199
200 buf += event_size;
201
202 /*
203 * fsnotify only stores the pathname, so here we have to send the pathname
204 * and then pad that pathname out to a multiple of sizeof(inotify_event)
205 * with zeros.
206 */
207 if (pad_name_len) {
208 /* copy the path name */
209 if (copy_to_user(buf, event->name, name_len))
210 return -EFAULT;
211 buf += name_len;
212
213 /* fill userspace with 0's */
214 if (clear_user(buf, pad_name_len - name_len))
215 return -EFAULT;
216 event_size += pad_name_len;
217 }
218
219 return event_size;
220 }
221
inotify_read(struct file * file,char __user * buf,size_t count,loff_t * pos)222 static ssize_t inotify_read(struct file *file, char __user *buf,
223 size_t count, loff_t *pos)
224 {
225 struct fsnotify_group *group;
226 struct fsnotify_event *kevent;
227 char __user *start;
228 int ret;
229 DEFINE_WAIT_FUNC(wait, woken_wake_function);
230
231 start = buf;
232 group = file->private_data;
233
234 add_wait_queue(&group->notification_waitq, &wait);
235 while (1) {
236 spin_lock(&group->notification_lock);
237 kevent = get_one_event(group, count);
238 spin_unlock(&group->notification_lock);
239
240 pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
241
242 if (kevent) {
243 ret = PTR_ERR(kevent);
244 if (IS_ERR(kevent))
245 break;
246 ret = copy_event_to_user(group, kevent, buf);
247 fsnotify_destroy_event(group, kevent);
248 if (ret < 0)
249 break;
250 buf += ret;
251 count -= ret;
252 continue;
253 }
254
255 ret = -EAGAIN;
256 if (file->f_flags & O_NONBLOCK)
257 break;
258 ret = -ERESTARTSYS;
259 if (signal_pending(current))
260 break;
261
262 if (start != buf)
263 break;
264
265 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
266 }
267 remove_wait_queue(&group->notification_waitq, &wait);
268
269 if (start != buf && ret != -EFAULT)
270 ret = buf - start;
271 return ret;
272 }
273
inotify_release(struct inode * ignored,struct file * file)274 static int inotify_release(struct inode *ignored, struct file *file)
275 {
276 struct fsnotify_group *group = file->private_data;
277
278 pr_debug("%s: group=%p\n", __func__, group);
279
280 /* free this group, matching get was inotify_init->fsnotify_obtain_group */
281 fsnotify_destroy_group(group);
282
283 return 0;
284 }
285
inotify_ioctl(struct file * file,unsigned int cmd,unsigned long arg)286 static long inotify_ioctl(struct file *file, unsigned int cmd,
287 unsigned long arg)
288 {
289 struct fsnotify_group *group;
290 struct fsnotify_event *fsn_event;
291 void __user *p;
292 int ret = -ENOTTY;
293 size_t send_len = 0;
294
295 group = file->private_data;
296 p = (void __user *) arg;
297
298 pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd);
299
300 switch (cmd) {
301 case FIONREAD:
302 spin_lock(&group->notification_lock);
303 list_for_each_entry(fsn_event, &group->notification_list,
304 list) {
305 send_len += sizeof(struct inotify_event);
306 send_len += round_event_name_len(fsn_event);
307 }
308 spin_unlock(&group->notification_lock);
309 ret = put_user(send_len, (int __user *) p);
310 break;
311 #ifdef CONFIG_CHECKPOINT_RESTORE
312 case INOTIFY_IOC_SETNEXTWD:
313 ret = -EINVAL;
314 if (arg >= 1 && arg <= INT_MAX) {
315 struct inotify_group_private_data *data;
316
317 data = &group->inotify_data;
318 spin_lock(&data->idr_lock);
319 idr_set_cursor(&data->idr, (unsigned int)arg);
320 spin_unlock(&data->idr_lock);
321 ret = 0;
322 }
323 break;
324 #endif /* CONFIG_CHECKPOINT_RESTORE */
325 }
326
327 return ret;
328 }
329
330 static const struct file_operations inotify_fops = {
331 .show_fdinfo = inotify_show_fdinfo,
332 .poll = inotify_poll,
333 .read = inotify_read,
334 .fasync = fsnotify_fasync,
335 .release = inotify_release,
336 .unlocked_ioctl = inotify_ioctl,
337 .compat_ioctl = inotify_ioctl,
338 .llseek = noop_llseek,
339 };
340
341
342 /*
343 * find_inode - resolve a user-given path to a specific inode
344 */
inotify_find_inode(const char __user * dirname,struct path * path,unsigned flags)345 static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags)
346 {
347 int error;
348
349 error = user_path_at(AT_FDCWD, dirname, flags, path);
350 if (error)
351 return error;
352 /* you can only watch an inode if you have read permissions on it */
353 error = inode_permission(path->dentry->d_inode, MAY_READ);
354 if (error)
355 path_put(path);
356 return error;
357 }
358
inotify_add_to_idr(struct idr * idr,spinlock_t * idr_lock,struct inotify_inode_mark * i_mark)359 static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
360 struct inotify_inode_mark *i_mark)
361 {
362 int ret;
363
364 idr_preload(GFP_KERNEL);
365 spin_lock(idr_lock);
366
367 ret = idr_alloc_cyclic(idr, i_mark, 1, 0, GFP_NOWAIT);
368 if (ret >= 0) {
369 /* we added the mark to the idr, take a reference */
370 i_mark->wd = ret;
371 fsnotify_get_mark(&i_mark->fsn_mark);
372 }
373
374 spin_unlock(idr_lock);
375 idr_preload_end();
376 return ret < 0 ? ret : 0;
377 }
378
inotify_idr_find_locked(struct fsnotify_group * group,int wd)379 static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
380 int wd)
381 {
382 struct idr *idr = &group->inotify_data.idr;
383 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
384 struct inotify_inode_mark *i_mark;
385
386 assert_spin_locked(idr_lock);
387
388 i_mark = idr_find(idr, wd);
389 if (i_mark) {
390 struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
391
392 fsnotify_get_mark(fsn_mark);
393 /* One ref for being in the idr, one ref we just took */
394 BUG_ON(refcount_read(&fsn_mark->refcnt) < 2);
395 }
396
397 return i_mark;
398 }
399
inotify_idr_find(struct fsnotify_group * group,int wd)400 static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
401 int wd)
402 {
403 struct inotify_inode_mark *i_mark;
404 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
405
406 spin_lock(idr_lock);
407 i_mark = inotify_idr_find_locked(group, wd);
408 spin_unlock(idr_lock);
409
410 return i_mark;
411 }
412
413 /*
414 * Remove the mark from the idr (if present) and drop the reference
415 * on the mark because it was in the idr.
416 */
inotify_remove_from_idr(struct fsnotify_group * group,struct inotify_inode_mark * i_mark)417 static void inotify_remove_from_idr(struct fsnotify_group *group,
418 struct inotify_inode_mark *i_mark)
419 {
420 struct idr *idr = &group->inotify_data.idr;
421 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
422 struct inotify_inode_mark *found_i_mark = NULL;
423 int wd;
424
425 spin_lock(idr_lock);
426 wd = i_mark->wd;
427
428 /*
429 * does this i_mark think it is in the idr? we shouldn't get called
430 * if it wasn't....
431 */
432 if (wd == -1) {
433 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
434 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
435 goto out;
436 }
437
438 /* Lets look in the idr to see if we find it */
439 found_i_mark = inotify_idr_find_locked(group, wd);
440 if (unlikely(!found_i_mark)) {
441 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
442 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
443 goto out;
444 }
445
446 /*
447 * We found an mark in the idr at the right wd, but it's
448 * not the mark we were told to remove. eparis seriously
449 * fucked up somewhere.
450 */
451 if (unlikely(found_i_mark != i_mark)) {
452 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
453 "found_i_mark=%p found_i_mark->wd=%d "
454 "found_i_mark->group=%p\n", __func__, i_mark,
455 i_mark->wd, i_mark->fsn_mark.group, found_i_mark,
456 found_i_mark->wd, found_i_mark->fsn_mark.group);
457 goto out;
458 }
459
460 /*
461 * One ref for being in the idr
462 * one ref grabbed by inotify_idr_find
463 */
464 if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) {
465 printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
466 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
467 /* we can't really recover with bad ref cnting.. */
468 BUG();
469 }
470
471 idr_remove(idr, wd);
472 /* Removed from the idr, drop that ref. */
473 fsnotify_put_mark(&i_mark->fsn_mark);
474 out:
475 i_mark->wd = -1;
476 spin_unlock(idr_lock);
477 /* match the ref taken by inotify_idr_find_locked() */
478 if (found_i_mark)
479 fsnotify_put_mark(&found_i_mark->fsn_mark);
480 }
481
482 /*
483 * Send IN_IGNORED for this wd, remove this wd from the idr.
484 */
inotify_ignored_and_remove_idr(struct fsnotify_mark * fsn_mark,struct fsnotify_group * group)485 void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
486 struct fsnotify_group *group)
487 {
488 struct inotify_inode_mark *i_mark;
489 struct fsnotify_iter_info iter_info = { };
490
491 fsnotify_iter_set_report_type_mark(&iter_info, FSNOTIFY_OBJ_TYPE_INODE,
492 fsn_mark);
493
494 /* Queue ignore event for the watch */
495 inotify_handle_event(group, NULL, FS_IN_IGNORED, NULL,
496 FSNOTIFY_EVENT_NONE, NULL, 0, &iter_info);
497
498 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
499 /* remove this mark from the idr */
500 inotify_remove_from_idr(group, i_mark);
501
502 dec_inotify_watches(group->inotify_data.ucounts);
503 }
504
inotify_update_existing_watch(struct fsnotify_group * group,struct inode * inode,u32 arg)505 static int inotify_update_existing_watch(struct fsnotify_group *group,
506 struct inode *inode,
507 u32 arg)
508 {
509 struct fsnotify_mark *fsn_mark;
510 struct inotify_inode_mark *i_mark;
511 __u32 old_mask, new_mask;
512 __u32 mask;
513 int add = (arg & IN_MASK_ADD);
514 int create = (arg & IN_MASK_CREATE);
515 int ret;
516
517 mask = inotify_arg_to_mask(arg);
518
519 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
520 if (!fsn_mark)
521 return -ENOENT;
522 else if (create) {
523 ret = -EEXIST;
524 goto out;
525 }
526
527 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
528
529 spin_lock(&fsn_mark->lock);
530 old_mask = fsn_mark->mask;
531 if (add)
532 fsn_mark->mask |= mask;
533 else
534 fsn_mark->mask = mask;
535 new_mask = fsn_mark->mask;
536 spin_unlock(&fsn_mark->lock);
537
538 if (old_mask != new_mask) {
539 /* more bits in old than in new? */
540 int dropped = (old_mask & ~new_mask);
541 /* more bits in this fsn_mark than the inode's mask? */
542 int do_inode = (new_mask & ~inode->i_fsnotify_mask);
543
544 /* update the inode with this new fsn_mark */
545 if (dropped || do_inode)
546 fsnotify_recalc_mask(inode->i_fsnotify_marks);
547
548 }
549
550 /* return the wd */
551 ret = i_mark->wd;
552
553 out:
554 /* match the get from fsnotify_find_mark() */
555 fsnotify_put_mark(fsn_mark);
556
557 return ret;
558 }
559
inotify_new_watch(struct fsnotify_group * group,struct inode * inode,u32 arg)560 static int inotify_new_watch(struct fsnotify_group *group,
561 struct inode *inode,
562 u32 arg)
563 {
564 struct inotify_inode_mark *tmp_i_mark;
565 __u32 mask;
566 int ret;
567 struct idr *idr = &group->inotify_data.idr;
568 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
569
570 mask = inotify_arg_to_mask(arg);
571
572 tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
573 if (unlikely(!tmp_i_mark))
574 return -ENOMEM;
575
576 fsnotify_init_mark(&tmp_i_mark->fsn_mark, group);
577 tmp_i_mark->fsn_mark.mask = mask;
578 tmp_i_mark->wd = -1;
579
580 ret = inotify_add_to_idr(idr, idr_lock, tmp_i_mark);
581 if (ret)
582 goto out_err;
583
584 /* increment the number of watches the user has */
585 if (!inc_inotify_watches(group->inotify_data.ucounts)) {
586 inotify_remove_from_idr(group, tmp_i_mark);
587 ret = -ENOSPC;
588 goto out_err;
589 }
590
591 /* we are on the idr, now get on the inode */
592 ret = fsnotify_add_inode_mark_locked(&tmp_i_mark->fsn_mark, inode, 0);
593 if (ret) {
594 /* we failed to get on the inode, get off the idr */
595 inotify_remove_from_idr(group, tmp_i_mark);
596 goto out_err;
597 }
598
599
600 /* return the watch descriptor for this new mark */
601 ret = tmp_i_mark->wd;
602
603 out_err:
604 /* match the ref from fsnotify_init_mark() */
605 fsnotify_put_mark(&tmp_i_mark->fsn_mark);
606
607 return ret;
608 }
609
inotify_update_watch(struct fsnotify_group * group,struct inode * inode,u32 arg)610 static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
611 {
612 int ret = 0;
613
614 mutex_lock(&group->mark_mutex);
615 /* try to update and existing watch with the new arg */
616 ret = inotify_update_existing_watch(group, inode, arg);
617 /* no mark present, try to add a new one */
618 if (ret == -ENOENT)
619 ret = inotify_new_watch(group, inode, arg);
620 mutex_unlock(&group->mark_mutex);
621
622 return ret;
623 }
624
inotify_new_group(unsigned int max_events)625 static struct fsnotify_group *inotify_new_group(unsigned int max_events)
626 {
627 struct fsnotify_group *group;
628 struct inotify_event_info *oevent;
629
630 group = fsnotify_alloc_group(&inotify_fsnotify_ops);
631 if (IS_ERR(group))
632 return group;
633
634 oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL);
635 if (unlikely(!oevent)) {
636 fsnotify_destroy_group(group);
637 return ERR_PTR(-ENOMEM);
638 }
639 group->overflow_event = &oevent->fse;
640 fsnotify_init_event(group->overflow_event, NULL, FS_Q_OVERFLOW);
641 oevent->wd = -1;
642 oevent->sync_cookie = 0;
643 oevent->name_len = 0;
644
645 group->max_events = max_events;
646 group->memcg = get_mem_cgroup_from_mm(current->mm);
647
648 spin_lock_init(&group->inotify_data.idr_lock);
649 idr_init(&group->inotify_data.idr);
650 group->inotify_data.ucounts = inc_ucount(current_user_ns(),
651 current_euid(),
652 UCOUNT_INOTIFY_INSTANCES);
653
654 if (!group->inotify_data.ucounts) {
655 fsnotify_destroy_group(group);
656 return ERR_PTR(-EMFILE);
657 }
658
659 return group;
660 }
661
662
663 /* inotify syscalls */
do_inotify_init(int flags)664 static int do_inotify_init(int flags)
665 {
666 struct fsnotify_group *group;
667 int ret;
668
669 /* Check the IN_* constants for consistency. */
670 BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
671 BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
672
673 if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
674 return -EINVAL;
675
676 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
677 group = inotify_new_group(inotify_max_queued_events);
678 if (IS_ERR(group))
679 return PTR_ERR(group);
680
681 ret = anon_inode_getfd("inotify", &inotify_fops, group,
682 O_RDONLY | flags);
683 if (ret < 0)
684 fsnotify_destroy_group(group);
685
686 return ret;
687 }
688
SYSCALL_DEFINE1(inotify_init1,int,flags)689 SYSCALL_DEFINE1(inotify_init1, int, flags)
690 {
691 return do_inotify_init(flags);
692 }
693
SYSCALL_DEFINE0(inotify_init)694 SYSCALL_DEFINE0(inotify_init)
695 {
696 return do_inotify_init(0);
697 }
698
SYSCALL_DEFINE3(inotify_add_watch,int,fd,const char __user *,pathname,u32,mask)699 SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
700 u32, mask)
701 {
702 struct fsnotify_group *group;
703 struct inode *inode;
704 struct path path;
705 struct fd f;
706 int ret;
707 unsigned flags = 0;
708
709 /*
710 * We share a lot of code with fs/dnotify. We also share
711 * the bit layout between inotify's IN_* and the fsnotify
712 * FS_*. This check ensures that only the inotify IN_*
713 * bits get passed in and set in watches/events.
714 */
715 if (unlikely(mask & ~ALL_INOTIFY_BITS))
716 return -EINVAL;
717 /*
718 * Require at least one valid bit set in the mask.
719 * Without _something_ set, we would have no events to
720 * watch for.
721 */
722 if (unlikely(!(mask & ALL_INOTIFY_BITS)))
723 return -EINVAL;
724
725 f = fdget(fd);
726 if (unlikely(!f.file))
727 return -EBADF;
728
729 /* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */
730 if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) {
731 ret = -EINVAL;
732 goto fput_and_out;
733 }
734
735 /* verify that this is indeed an inotify instance */
736 if (unlikely(f.file->f_op != &inotify_fops)) {
737 ret = -EINVAL;
738 goto fput_and_out;
739 }
740
741 if (!(mask & IN_DONT_FOLLOW))
742 flags |= LOOKUP_FOLLOW;
743 if (mask & IN_ONLYDIR)
744 flags |= LOOKUP_DIRECTORY;
745
746 ret = inotify_find_inode(pathname, &path, flags);
747 if (ret)
748 goto fput_and_out;
749
750 /* inode held in place by reference to path; group by fget on fd */
751 inode = path.dentry->d_inode;
752 group = f.file->private_data;
753
754 /* create/update an inode mark */
755 ret = inotify_update_watch(group, inode, mask);
756 path_put(&path);
757 fput_and_out:
758 fdput(f);
759 return ret;
760 }
761
SYSCALL_DEFINE2(inotify_rm_watch,int,fd,__s32,wd)762 SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
763 {
764 struct fsnotify_group *group;
765 struct inotify_inode_mark *i_mark;
766 struct fd f;
767 int ret = 0;
768
769 f = fdget(fd);
770 if (unlikely(!f.file))
771 return -EBADF;
772
773 /* verify that this is indeed an inotify instance */
774 ret = -EINVAL;
775 if (unlikely(f.file->f_op != &inotify_fops))
776 goto out;
777
778 group = f.file->private_data;
779
780 ret = -EINVAL;
781 i_mark = inotify_idr_find(group, wd);
782 if (unlikely(!i_mark))
783 goto out;
784
785 ret = 0;
786
787 fsnotify_destroy_mark(&i_mark->fsn_mark, group);
788
789 /* match ref taken by inotify_idr_find */
790 fsnotify_put_mark(&i_mark->fsn_mark);
791
792 out:
793 fdput(f);
794 return ret;
795 }
796
797 /*
798 * inotify_user_setup - Our initialization function. Note that we cannot return
799 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
800 * must result in panic().
801 */
inotify_user_setup(void)802 static int __init inotify_user_setup(void)
803 {
804 BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
805 BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
806 BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
807 BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
808 BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
809 BUILD_BUG_ON(IN_OPEN != FS_OPEN);
810 BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
811 BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
812 BUILD_BUG_ON(IN_CREATE != FS_CREATE);
813 BUILD_BUG_ON(IN_DELETE != FS_DELETE);
814 BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
815 BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
816 BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
817 BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
818 BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
819 BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK);
820 BUILD_BUG_ON(IN_ISDIR != FS_ISDIR);
821 BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
822
823 BUG_ON(hweight32(ALL_INOTIFY_BITS) != 22);
824
825 inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark,
826 SLAB_PANIC|SLAB_ACCOUNT);
827
828 inotify_max_queued_events = 16384;
829 init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES] = 128;
830 init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES] = 8192;
831
832 return 0;
833 }
834 fs_initcall(inotify_user_setup);
835