1 /*
2  * fs/inotify_user.c - inotify support for userspace
3  *
4  * Authors:
5  *	John McCutchan	<ttb@tentacle.dhs.org>
6  *	Robert Love	<rml@novell.com>
7  *
8  * Copyright (C) 2005 John McCutchan
9  * Copyright 2006 Hewlett-Packard Development Company, L.P.
10  *
11  * Copyright (C) 2009 Eric Paris <Red Hat Inc>
12  * inotify was largely rewriten to make use of the fsnotify infrastructure
13  *
14  * This program is free software; you can redistribute it and/or modify it
15  * under the terms of the GNU General Public License as published by the
16  * Free Software Foundation; either version 2, or (at your option) any
17  * later version.
18  *
19  * This program is distributed in the hope that it will be useful, but
20  * WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22  * General Public License for more details.
23  */
24 
25 #include <linux/dcache.h> /* d_unlinked */
26 #include <linux/fs.h> /* struct inode */
27 #include <linux/fsnotify_backend.h>
28 #include <linux/inotify.h>
29 #include <linux/path.h> /* struct path */
30 #include <linux/slab.h> /* kmem_* */
31 #include <linux/types.h>
32 #include <linux/sched.h>
33 #include <linux/sched/user.h>
34 #include <linux/sched/mm.h>
35 
36 #include "inotify.h"
37 
38 /*
39  * Check if 2 events contain the same information.
40  */
event_compare(struct fsnotify_event * old_fsn,struct fsnotify_event * new_fsn)41 static bool event_compare(struct fsnotify_event *old_fsn,
42 			  struct fsnotify_event *new_fsn)
43 {
44 	struct inotify_event_info *old, *new;
45 
46 	if (old_fsn->mask & FS_IN_IGNORED)
47 		return false;
48 	old = INOTIFY_E(old_fsn);
49 	new = INOTIFY_E(new_fsn);
50 	if ((old_fsn->mask == new_fsn->mask) &&
51 	    (old_fsn->inode == new_fsn->inode) &&
52 	    (old->name_len == new->name_len) &&
53 	    (!old->name_len || !strcmp(old->name, new->name)))
54 		return true;
55 	return false;
56 }
57 
inotify_merge(struct list_head * list,struct fsnotify_event * event)58 static int inotify_merge(struct list_head *list,
59 			  struct fsnotify_event *event)
60 {
61 	struct fsnotify_event *last_event;
62 
63 	last_event = list_entry(list->prev, struct fsnotify_event, list);
64 	return event_compare(last_event, event);
65 }
66 
inotify_handle_event(struct fsnotify_group * group,struct inode * inode,u32 mask,const void * data,int data_type,const unsigned char * file_name,u32 cookie,struct fsnotify_iter_info * iter_info)67 int inotify_handle_event(struct fsnotify_group *group,
68 			 struct inode *inode,
69 			 u32 mask, const void *data, int data_type,
70 			 const unsigned char *file_name, u32 cookie,
71 			 struct fsnotify_iter_info *iter_info)
72 {
73 	struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info);
74 	struct inotify_inode_mark *i_mark;
75 	struct inotify_event_info *event;
76 	struct fsnotify_event *fsn_event;
77 	int ret;
78 	int len = 0;
79 	int alloc_len = sizeof(struct inotify_event_info);
80 
81 	if (WARN_ON(fsnotify_iter_vfsmount_mark(iter_info)))
82 		return 0;
83 
84 	if ((inode_mark->mask & FS_EXCL_UNLINK) &&
85 	    (data_type == FSNOTIFY_EVENT_PATH)) {
86 		const struct path *path = data;
87 
88 		if (d_unlinked(path->dentry))
89 			return 0;
90 	}
91 	if (file_name) {
92 		len = strlen(file_name);
93 		alloc_len += len + 1;
94 	}
95 
96 	pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode,
97 		 mask);
98 
99 	i_mark = container_of(inode_mark, struct inotify_inode_mark,
100 			      fsn_mark);
101 
102 	/*
103 	 * Whoever is interested in the event, pays for the allocation. Do not
104 	 * trigger OOM killer in the target monitoring memcg as it may have
105 	 * security repercussion.
106 	 */
107 	memalloc_use_memcg(group->memcg);
108 	event = kmalloc(alloc_len, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
109 	memalloc_unuse_memcg();
110 
111 	if (unlikely(!event)) {
112 		/*
113 		 * Treat lost event due to ENOMEM the same way as queue
114 		 * overflow to let userspace know event was lost.
115 		 */
116 		fsnotify_queue_overflow(group);
117 		return -ENOMEM;
118 	}
119 
120 	fsn_event = &event->fse;
121 	fsnotify_init_event(fsn_event, inode, mask);
122 	event->wd = i_mark->wd;
123 	event->sync_cookie = cookie;
124 	event->name_len = len;
125 	if (len)
126 		strcpy(event->name, file_name);
127 
128 	ret = fsnotify_add_event(group, fsn_event, inotify_merge);
129 	if (ret) {
130 		/* Our event wasn't used in the end. Free it. */
131 		fsnotify_destroy_event(group, fsn_event);
132 	}
133 
134 	if (inode_mark->mask & IN_ONESHOT)
135 		fsnotify_destroy_mark(inode_mark, group);
136 
137 	return 0;
138 }
139 
inotify_freeing_mark(struct fsnotify_mark * fsn_mark,struct fsnotify_group * group)140 static void inotify_freeing_mark(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group)
141 {
142 	inotify_ignored_and_remove_idr(fsn_mark, group);
143 }
144 
145 /*
146  * This is NEVER supposed to be called.  Inotify marks should either have been
147  * removed from the idr when the watch was removed or in the
148  * fsnotify_destroy_mark_by_group() call when the inotify instance was being
149  * torn down.  This is only called if the idr is about to be freed but there
150  * are still marks in it.
151  */
idr_callback(int id,void * p,void * data)152 static int idr_callback(int id, void *p, void *data)
153 {
154 	struct fsnotify_mark *fsn_mark;
155 	struct inotify_inode_mark *i_mark;
156 	static bool warned = false;
157 
158 	if (warned)
159 		return 0;
160 
161 	warned = true;
162 	fsn_mark = p;
163 	i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
164 
165 	WARN(1, "inotify closing but id=%d for fsn_mark=%p in group=%p still in "
166 		"idr.  Probably leaking memory\n", id, p, data);
167 
168 	/*
169 	 * I'm taking the liberty of assuming that the mark in question is a
170 	 * valid address and I'm dereferencing it.  This might help to figure
171 	 * out why we got here and the panic is no worse than the original
172 	 * BUG() that was here.
173 	 */
174 	if (fsn_mark)
175 		printk(KERN_WARNING "fsn_mark->group=%p wd=%d\n",
176 			fsn_mark->group, i_mark->wd);
177 	return 0;
178 }
179 
inotify_free_group_priv(struct fsnotify_group * group)180 static void inotify_free_group_priv(struct fsnotify_group *group)
181 {
182 	/* ideally the idr is empty and we won't hit the BUG in the callback */
183 	idr_for_each(&group->inotify_data.idr, idr_callback, group);
184 	idr_destroy(&group->inotify_data.idr);
185 	if (group->inotify_data.ucounts)
186 		dec_inotify_instances(group->inotify_data.ucounts);
187 }
188 
inotify_free_event(struct fsnotify_event * fsn_event)189 static void inotify_free_event(struct fsnotify_event *fsn_event)
190 {
191 	kfree(INOTIFY_E(fsn_event));
192 }
193 
194 /* ding dong the mark is dead */
inotify_free_mark(struct fsnotify_mark * fsn_mark)195 static void inotify_free_mark(struct fsnotify_mark *fsn_mark)
196 {
197 	struct inotify_inode_mark *i_mark;
198 
199 	i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
200 
201 	kmem_cache_free(inotify_inode_mark_cachep, i_mark);
202 }
203 
204 const struct fsnotify_ops inotify_fsnotify_ops = {
205 	.handle_event = inotify_handle_event,
206 	.free_group_priv = inotify_free_group_priv,
207 	.free_event = inotify_free_event,
208 	.freeing_mark = inotify_freeing_mark,
209 	.free_mark = inotify_free_mark,
210 };
211