xref: /linux/fs/notify/inotify/inotify_fsnotify.c (revision c915d8f5)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * fs/inotify_user.c - inotify support for userspace
4  *
5  * Authors:
6  *	John McCutchan	<ttb@tentacle.dhs.org>
7  *	Robert Love	<rml@novell.com>
8  *
9  * Copyright (C) 2005 John McCutchan
10  * Copyright 2006 Hewlett-Packard Development Company, L.P.
11  *
12  * Copyright (C) 2009 Eric Paris <Red Hat Inc>
13  * inotify was largely rewriten to make use of the fsnotify infrastructure
14  */
15 
16 #include <linux/dcache.h> /* d_unlinked */
17 #include <linux/fs.h> /* struct inode */
18 #include <linux/fsnotify_backend.h>
19 #include <linux/inotify.h>
20 #include <linux/path.h> /* struct path */
21 #include <linux/slab.h> /* kmem_* */
22 #include <linux/types.h>
23 #include <linux/sched.h>
24 #include <linux/sched/user.h>
25 #include <linux/sched/mm.h>
26 
27 #include "inotify.h"
28 
29 /*
30  * Check if 2 events contain the same information.
31  */
event_compare(struct fsnotify_event * old_fsn,struct fsnotify_event * new_fsn)32 static bool event_compare(struct fsnotify_event *old_fsn,
33 			  struct fsnotify_event *new_fsn)
34 {
35 	struct inotify_event_info *old, *new;
36 
37 	old = INOTIFY_E(old_fsn);
38 	new = INOTIFY_E(new_fsn);
39 	if (old->mask & FS_IN_IGNORED)
40 		return false;
41 	if ((old->mask == new->mask) &&
42 	    (old->wd == new->wd) &&
43 	    (old->name_len == new->name_len) &&
44 	    (!old->name_len || !strcmp(old->name, new->name)))
45 		return true;
46 	return false;
47 }
48 
inotify_merge(struct fsnotify_group * group,struct fsnotify_event * event)49 static int inotify_merge(struct fsnotify_group *group,
50 			 struct fsnotify_event *event)
51 {
52 	struct list_head *list = &group->notification_list;
53 	struct fsnotify_event *last_event;
54 
55 	last_event = list_entry(list->prev, struct fsnotify_event, list);
56 	return event_compare(last_event, event);
57 }
58 
inotify_handle_inode_event(struct fsnotify_mark * inode_mark,u32 mask,struct inode * inode,struct inode * dir,const struct qstr * name,u32 cookie)59 int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, u32 mask,
60 			       struct inode *inode, struct inode *dir,
61 			       const struct qstr *name, u32 cookie)
62 {
63 	struct inotify_inode_mark *i_mark;
64 	struct inotify_event_info *event;
65 	struct fsnotify_event *fsn_event;
66 	struct fsnotify_group *group = inode_mark->group;
67 	int ret;
68 	int len = 0, wd;
69 	int alloc_len = sizeof(struct inotify_event_info);
70 	struct mem_cgroup *old_memcg;
71 
72 	if (name) {
73 		len = name->len;
74 		alloc_len += len + 1;
75 	}
76 
77 	pr_debug("%s: group=%p mark=%p mask=%x\n", __func__, group, inode_mark,
78 		 mask);
79 
80 	i_mark = container_of(inode_mark, struct inotify_inode_mark,
81 			      fsn_mark);
82 
83 	/*
84 	 * We can be racing with mark being detached. Don't report event with
85 	 * invalid wd.
86 	 */
87 	wd = READ_ONCE(i_mark->wd);
88 	if (wd == -1)
89 		return 0;
90 	/*
91 	 * Whoever is interested in the event, pays for the allocation. Do not
92 	 * trigger OOM killer in the target monitoring memcg as it may have
93 	 * security repercussion.
94 	 */
95 	old_memcg = set_active_memcg(group->memcg);
96 	event = kmalloc(alloc_len, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
97 	set_active_memcg(old_memcg);
98 
99 	if (unlikely(!event)) {
100 		/*
101 		 * Treat lost event due to ENOMEM the same way as queue
102 		 * overflow to let userspace know event was lost.
103 		 */
104 		fsnotify_queue_overflow(group);
105 		return -ENOMEM;
106 	}
107 
108 	/*
109 	 * We now report FS_ISDIR flag with MOVE_SELF and DELETE_SELF events
110 	 * for fanotify. inotify never reported IN_ISDIR with those events.
111 	 * It looks like an oversight, but to avoid the risk of breaking
112 	 * existing inotify programs, mask the flag out from those events.
113 	 */
114 	if (mask & (IN_MOVE_SELF | IN_DELETE_SELF))
115 		mask &= ~IN_ISDIR;
116 
117 	fsn_event = &event->fse;
118 	fsnotify_init_event(fsn_event);
119 	event->mask = mask;
120 	event->wd = wd;
121 	event->sync_cookie = cookie;
122 	event->name_len = len;
123 	if (len)
124 		strcpy(event->name, name->name);
125 
126 	ret = fsnotify_add_event(group, fsn_event, inotify_merge);
127 	if (ret) {
128 		/* Our event wasn't used in the end. Free it. */
129 		fsnotify_destroy_event(group, fsn_event);
130 	}
131 
132 	if (inode_mark->flags & FSNOTIFY_MARK_FLAG_IN_ONESHOT)
133 		fsnotify_destroy_mark(inode_mark, group);
134 
135 	return 0;
136 }
137 
inotify_freeing_mark(struct fsnotify_mark * fsn_mark,struct fsnotify_group * group)138 static void inotify_freeing_mark(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group)
139 {
140 	inotify_ignored_and_remove_idr(fsn_mark, group);
141 }
142 
143 /*
144  * This is NEVER supposed to be called.  Inotify marks should either have been
145  * removed from the idr when the watch was removed or in the
146  * fsnotify_destroy_mark_by_group() call when the inotify instance was being
147  * torn down.  This is only called if the idr is about to be freed but there
148  * are still marks in it.
149  */
idr_callback(int id,void * p,void * data)150 static int idr_callback(int id, void *p, void *data)
151 {
152 	struct fsnotify_mark *fsn_mark;
153 	struct inotify_inode_mark *i_mark;
154 	static bool warned = false;
155 
156 	if (warned)
157 		return 0;
158 
159 	warned = true;
160 	fsn_mark = p;
161 	i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
162 
163 	WARN(1, "inotify closing but id=%d for fsn_mark=%p in group=%p still in "
164 		"idr.  Probably leaking memory\n", id, p, data);
165 
166 	/*
167 	 * I'm taking the liberty of assuming that the mark in question is a
168 	 * valid address and I'm dereferencing it.  This might help to figure
169 	 * out why we got here and the panic is no worse than the original
170 	 * BUG() that was here.
171 	 */
172 	if (fsn_mark)
173 		printk(KERN_WARNING "fsn_mark->group=%p wd=%d\n",
174 			fsn_mark->group, i_mark->wd);
175 	return 0;
176 }
177 
inotify_free_group_priv(struct fsnotify_group * group)178 static void inotify_free_group_priv(struct fsnotify_group *group)
179 {
180 	/* ideally the idr is empty and we won't hit the BUG in the callback */
181 	idr_for_each(&group->inotify_data.idr, idr_callback, group);
182 	idr_destroy(&group->inotify_data.idr);
183 	if (group->inotify_data.ucounts)
184 		dec_inotify_instances(group->inotify_data.ucounts);
185 }
186 
inotify_free_event(struct fsnotify_group * group,struct fsnotify_event * fsn_event)187 static void inotify_free_event(struct fsnotify_group *group,
188 			       struct fsnotify_event *fsn_event)
189 {
190 	kfree(INOTIFY_E(fsn_event));
191 }
192 
193 /* ding dong the mark is dead */
inotify_free_mark(struct fsnotify_mark * fsn_mark)194 static void inotify_free_mark(struct fsnotify_mark *fsn_mark)
195 {
196 	struct inotify_inode_mark *i_mark;
197 
198 	i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
199 
200 	kmem_cache_free(inotify_inode_mark_cachep, i_mark);
201 }
202 
203 const struct fsnotify_ops inotify_fsnotify_ops = {
204 	.handle_inode_event = inotify_handle_inode_event,
205 	.free_group_priv = inotify_free_group_priv,
206 	.free_event = inotify_free_event,
207 	.freeing_mark = inotify_freeing_mark,
208 	.free_mark = inotify_free_mark,
209 };
210