xref: /linux/io_uring/poll.c (revision 414d0f45)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/poll.h>
9 #include <linux/hashtable.h>
10 #include <linux/io_uring.h>
11 
12 #include <trace/events/io_uring.h>
13 
14 #include <uapi/linux/io_uring.h>
15 
16 #include "io_uring.h"
17 #include "alloc_cache.h"
18 #include "refs.h"
19 #include "napi.h"
20 #include "opdef.h"
21 #include "kbuf.h"
22 #include "poll.h"
23 #include "cancel.h"
24 
25 struct io_poll_update {
26 	struct file			*file;
27 	u64				old_user_data;
28 	u64				new_user_data;
29 	__poll_t			events;
30 	bool				update_events;
31 	bool				update_user_data;
32 };
33 
34 struct io_poll_table {
35 	struct poll_table_struct pt;
36 	struct io_kiocb *req;
37 	int nr_entries;
38 	int error;
39 	bool owning;
40 	/* output value, set only if arm poll returns >0 */
41 	__poll_t result_mask;
42 };
43 
44 #define IO_POLL_CANCEL_FLAG	BIT(31)
45 #define IO_POLL_RETRY_FLAG	BIT(30)
46 #define IO_POLL_REF_MASK	GENMASK(29, 0)
47 
48 /*
49  * We usually have 1-2 refs taken, 128 is more than enough and we want to
50  * maximise the margin between this amount and the moment when it overflows.
51  */
52 #define IO_POLL_REF_BIAS	128
53 
54 #define IO_WQE_F_DOUBLE		1
55 
56 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
57 			void *key);
58 
wqe_to_req(struct wait_queue_entry * wqe)59 static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe)
60 {
61 	unsigned long priv = (unsigned long)wqe->private;
62 
63 	return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE);
64 }
65 
wqe_is_double(struct wait_queue_entry * wqe)66 static inline bool wqe_is_double(struct wait_queue_entry *wqe)
67 {
68 	unsigned long priv = (unsigned long)wqe->private;
69 
70 	return priv & IO_WQE_F_DOUBLE;
71 }
72 
io_poll_get_ownership_slowpath(struct io_kiocb * req)73 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
74 {
75 	int v;
76 
77 	/*
78 	 * poll_refs are already elevated and we don't have much hope for
79 	 * grabbing the ownership. Instead of incrementing set a retry flag
80 	 * to notify the loop that there might have been some change.
81 	 */
82 	v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs);
83 	if (v & IO_POLL_REF_MASK)
84 		return false;
85 	return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
86 }
87 
88 /*
89  * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
90  * bump it and acquire ownership. It's disallowed to modify requests while not
91  * owning it, that prevents from races for enqueueing task_work's and b/w
92  * arming poll and wakeups.
93  */
io_poll_get_ownership(struct io_kiocb * req)94 static inline bool io_poll_get_ownership(struct io_kiocb *req)
95 {
96 	if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
97 		return io_poll_get_ownership_slowpath(req);
98 	return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
99 }
100 
io_poll_mark_cancelled(struct io_kiocb * req)101 static void io_poll_mark_cancelled(struct io_kiocb *req)
102 {
103 	atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
104 }
105 
io_poll_get_double(struct io_kiocb * req)106 static struct io_poll *io_poll_get_double(struct io_kiocb *req)
107 {
108 	/* pure poll stashes this in ->async_data, poll driven retry elsewhere */
109 	if (req->opcode == IORING_OP_POLL_ADD)
110 		return req->async_data;
111 	return req->apoll->double_poll;
112 }
113 
io_poll_get_single(struct io_kiocb * req)114 static struct io_poll *io_poll_get_single(struct io_kiocb *req)
115 {
116 	if (req->opcode == IORING_OP_POLL_ADD)
117 		return io_kiocb_to_cmd(req, struct io_poll);
118 	return &req->apoll->poll;
119 }
120 
io_poll_req_insert(struct io_kiocb * req)121 static void io_poll_req_insert(struct io_kiocb *req)
122 {
123 	struct io_hash_table *table = &req->ctx->cancel_table;
124 	u32 index = hash_long(req->cqe.user_data, table->hash_bits);
125 	struct io_hash_bucket *hb = &table->hbs[index];
126 
127 	spin_lock(&hb->lock);
128 	hlist_add_head(&req->hash_node, &hb->list);
129 	spin_unlock(&hb->lock);
130 }
131 
io_poll_req_delete(struct io_kiocb * req,struct io_ring_ctx * ctx)132 static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx)
133 {
134 	struct io_hash_table *table = &req->ctx->cancel_table;
135 	u32 index = hash_long(req->cqe.user_data, table->hash_bits);
136 	spinlock_t *lock = &table->hbs[index].lock;
137 
138 	spin_lock(lock);
139 	hash_del(&req->hash_node);
140 	spin_unlock(lock);
141 }
142 
io_poll_req_insert_locked(struct io_kiocb * req)143 static void io_poll_req_insert_locked(struct io_kiocb *req)
144 {
145 	struct io_hash_table *table = &req->ctx->cancel_table_locked;
146 	u32 index = hash_long(req->cqe.user_data, table->hash_bits);
147 
148 	lockdep_assert_held(&req->ctx->uring_lock);
149 
150 	hlist_add_head(&req->hash_node, &table->hbs[index].list);
151 }
152 
io_poll_tw_hash_eject(struct io_kiocb * req,struct io_tw_state * ts)153 static void io_poll_tw_hash_eject(struct io_kiocb *req, struct io_tw_state *ts)
154 {
155 	struct io_ring_ctx *ctx = req->ctx;
156 
157 	if (req->flags & REQ_F_HASH_LOCKED) {
158 		/*
159 		 * ->cancel_table_locked is protected by ->uring_lock in
160 		 * contrast to per bucket spinlocks. Likely, tctx_task_work()
161 		 * already grabbed the mutex for us, but there is a chance it
162 		 * failed.
163 		 */
164 		io_tw_lock(ctx, ts);
165 		hash_del(&req->hash_node);
166 		req->flags &= ~REQ_F_HASH_LOCKED;
167 	} else {
168 		io_poll_req_delete(req, ctx);
169 	}
170 }
171 
io_init_poll_iocb(struct io_poll * poll,__poll_t events)172 static void io_init_poll_iocb(struct io_poll *poll, __poll_t events)
173 {
174 	poll->head = NULL;
175 #define IO_POLL_UNMASK	(EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
176 	/* mask in events that we always want/need */
177 	poll->events = events | IO_POLL_UNMASK;
178 	INIT_LIST_HEAD(&poll->wait.entry);
179 	init_waitqueue_func_entry(&poll->wait, io_poll_wake);
180 }
181 
io_poll_remove_entry(struct io_poll * poll)182 static inline void io_poll_remove_entry(struct io_poll *poll)
183 {
184 	struct wait_queue_head *head = smp_load_acquire(&poll->head);
185 
186 	if (head) {
187 		spin_lock_irq(&head->lock);
188 		list_del_init(&poll->wait.entry);
189 		poll->head = NULL;
190 		spin_unlock_irq(&head->lock);
191 	}
192 }
193 
io_poll_remove_entries(struct io_kiocb * req)194 static void io_poll_remove_entries(struct io_kiocb *req)
195 {
196 	/*
197 	 * Nothing to do if neither of those flags are set. Avoid dipping
198 	 * into the poll/apoll/double cachelines if we can.
199 	 */
200 	if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
201 		return;
202 
203 	/*
204 	 * While we hold the waitqueue lock and the waitqueue is nonempty,
205 	 * wake_up_pollfree() will wait for us.  However, taking the waitqueue
206 	 * lock in the first place can race with the waitqueue being freed.
207 	 *
208 	 * We solve this as eventpoll does: by taking advantage of the fact that
209 	 * all users of wake_up_pollfree() will RCU-delay the actual free.  If
210 	 * we enter rcu_read_lock() and see that the pointer to the queue is
211 	 * non-NULL, we can then lock it without the memory being freed out from
212 	 * under us.
213 	 *
214 	 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
215 	 * case the caller deletes the entry from the queue, leaving it empty.
216 	 * In that case, only RCU prevents the queue memory from being freed.
217 	 */
218 	rcu_read_lock();
219 	if (req->flags & REQ_F_SINGLE_POLL)
220 		io_poll_remove_entry(io_poll_get_single(req));
221 	if (req->flags & REQ_F_DOUBLE_POLL)
222 		io_poll_remove_entry(io_poll_get_double(req));
223 	rcu_read_unlock();
224 }
225 
226 enum {
227 	IOU_POLL_DONE = 0,
228 	IOU_POLL_NO_ACTION = 1,
229 	IOU_POLL_REMOVE_POLL_USE_RES = 2,
230 	IOU_POLL_REISSUE = 3,
231 	IOU_POLL_REQUEUE = 4,
232 };
233 
__io_poll_execute(struct io_kiocb * req,int mask)234 static void __io_poll_execute(struct io_kiocb *req, int mask)
235 {
236 	unsigned flags = 0;
237 
238 	io_req_set_res(req, mask, 0);
239 	req->io_task_work.func = io_poll_task_func;
240 
241 	trace_io_uring_task_add(req, mask);
242 
243 	if (!(req->flags & REQ_F_POLL_NO_LAZY))
244 		flags = IOU_F_TWQ_LAZY_WAKE;
245 	__io_req_task_work_add(req, flags);
246 }
247 
io_poll_execute(struct io_kiocb * req,int res)248 static inline void io_poll_execute(struct io_kiocb *req, int res)
249 {
250 	if (io_poll_get_ownership(req))
251 		__io_poll_execute(req, res);
252 }
253 
254 /*
255  * All poll tw should go through this. Checks for poll events, manages
256  * references, does rewait, etc.
257  *
258  * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action
259  * require, which is either spurious wakeup or multishot CQE is served.
260  * IOU_POLL_DONE when it's done with the request, then the mask is stored in
261  * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
262  * poll and that the result is stored in req->cqe.
263  */
io_poll_check_events(struct io_kiocb * req,struct io_tw_state * ts)264 static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
265 {
266 	int v;
267 
268 	/* req->task == current here, checking PF_EXITING is safe */
269 	if (unlikely(req->task->flags & PF_EXITING))
270 		return -ECANCELED;
271 
272 	do {
273 		v = atomic_read(&req->poll_refs);
274 
275 		if (unlikely(v != 1)) {
276 			/* tw should be the owner and so have some refs */
277 			if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
278 				return IOU_POLL_NO_ACTION;
279 			if (v & IO_POLL_CANCEL_FLAG)
280 				return -ECANCELED;
281 			/*
282 			 * cqe.res contains only events of the first wake up
283 			 * and all others are to be lost. Redo vfs_poll() to get
284 			 * up to date state.
285 			 */
286 			if ((v & IO_POLL_REF_MASK) != 1)
287 				req->cqe.res = 0;
288 
289 			if (v & IO_POLL_RETRY_FLAG) {
290 				req->cqe.res = 0;
291 				/*
292 				 * We won't find new events that came in between
293 				 * vfs_poll and the ref put unless we clear the
294 				 * flag in advance.
295 				 */
296 				atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
297 				v &= ~IO_POLL_RETRY_FLAG;
298 			}
299 		}
300 
301 		/* the mask was stashed in __io_poll_execute */
302 		if (!req->cqe.res) {
303 			struct poll_table_struct pt = { ._key = req->apoll_events };
304 			req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
305 			/*
306 			 * We got woken with a mask, but someone else got to
307 			 * it first. The above vfs_poll() doesn't add us back
308 			 * to the waitqueue, so if we get nothing back, we
309 			 * should be safe and attempt a reissue.
310 			 */
311 			if (unlikely(!req->cqe.res)) {
312 				/* Multishot armed need not reissue */
313 				if (!(req->apoll_events & EPOLLONESHOT))
314 					continue;
315 				return IOU_POLL_REISSUE;
316 			}
317 		}
318 		if (req->apoll_events & EPOLLONESHOT)
319 			return IOU_POLL_DONE;
320 
321 		/* multishot, just fill a CQE and proceed */
322 		if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
323 			__poll_t mask = mangle_poll(req->cqe.res &
324 						    req->apoll_events);
325 
326 			if (!io_req_post_cqe(req, mask, IORING_CQE_F_MORE)) {
327 				io_req_set_res(req, mask, 0);
328 				return IOU_POLL_REMOVE_POLL_USE_RES;
329 			}
330 		} else {
331 			int ret = io_poll_issue(req, ts);
332 			if (ret == IOU_STOP_MULTISHOT)
333 				return IOU_POLL_REMOVE_POLL_USE_RES;
334 			else if (ret == IOU_REQUEUE)
335 				return IOU_POLL_REQUEUE;
336 			if (ret < 0)
337 				return ret;
338 		}
339 
340 		/* force the next iteration to vfs_poll() */
341 		req->cqe.res = 0;
342 
343 		/*
344 		 * Release all references, retry if someone tried to restart
345 		 * task_work while we were executing it.
346 		 */
347 		v &= IO_POLL_REF_MASK;
348 	} while (atomic_sub_return(v, &req->poll_refs) & IO_POLL_REF_MASK);
349 
350 	return IOU_POLL_NO_ACTION;
351 }
352 
io_poll_task_func(struct io_kiocb * req,struct io_tw_state * ts)353 void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
354 {
355 	int ret;
356 
357 	ret = io_poll_check_events(req, ts);
358 	if (ret == IOU_POLL_NO_ACTION) {
359 		return;
360 	} else if (ret == IOU_POLL_REQUEUE) {
361 		__io_poll_execute(req, 0);
362 		return;
363 	}
364 	io_poll_remove_entries(req);
365 	io_poll_tw_hash_eject(req, ts);
366 
367 	if (req->opcode == IORING_OP_POLL_ADD) {
368 		if (ret == IOU_POLL_DONE) {
369 			struct io_poll *poll;
370 
371 			poll = io_kiocb_to_cmd(req, struct io_poll);
372 			req->cqe.res = mangle_poll(req->cqe.res & poll->events);
373 		} else if (ret == IOU_POLL_REISSUE) {
374 			io_req_task_submit(req, ts);
375 			return;
376 		} else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
377 			req->cqe.res = ret;
378 			req_set_fail(req);
379 		}
380 
381 		io_req_set_res(req, req->cqe.res, 0);
382 		io_req_task_complete(req, ts);
383 	} else {
384 		io_tw_lock(req->ctx, ts);
385 
386 		if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
387 			io_req_task_complete(req, ts);
388 		else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
389 			io_req_task_submit(req, ts);
390 		else
391 			io_req_defer_failed(req, ret);
392 	}
393 }
394 
io_poll_cancel_req(struct io_kiocb * req)395 static void io_poll_cancel_req(struct io_kiocb *req)
396 {
397 	io_poll_mark_cancelled(req);
398 	/* kick tw, which should complete the request */
399 	io_poll_execute(req, 0);
400 }
401 
402 #define IO_ASYNC_POLL_COMMON	(EPOLLONESHOT | EPOLLPRI)
403 
io_pollfree_wake(struct io_kiocb * req,struct io_poll * poll)404 static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
405 {
406 	io_poll_mark_cancelled(req);
407 	/* we have to kick tw in case it's not already */
408 	io_poll_execute(req, 0);
409 
410 	/*
411 	 * If the waitqueue is being freed early but someone is already
412 	 * holds ownership over it, we have to tear down the request as
413 	 * best we can. That means immediately removing the request from
414 	 * its waitqueue and preventing all further accesses to the
415 	 * waitqueue via the request.
416 	 */
417 	list_del_init(&poll->wait.entry);
418 
419 	/*
420 	 * Careful: this *must* be the last step, since as soon
421 	 * as req->head is NULL'ed out, the request can be
422 	 * completed and freed, since aio_poll_complete_work()
423 	 * will no longer need to take the waitqueue lock.
424 	 */
425 	smp_store_release(&poll->head, NULL);
426 	return 1;
427 }
428 
io_poll_wake(struct wait_queue_entry * wait,unsigned mode,int sync,void * key)429 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
430 			void *key)
431 {
432 	struct io_kiocb *req = wqe_to_req(wait);
433 	struct io_poll *poll = container_of(wait, struct io_poll, wait);
434 	__poll_t mask = key_to_poll(key);
435 
436 	if (unlikely(mask & POLLFREE))
437 		return io_pollfree_wake(req, poll);
438 
439 	/* for instances that support it check for an event match first */
440 	if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
441 		return 0;
442 
443 	if (io_poll_get_ownership(req)) {
444 		/*
445 		 * If we trigger a multishot poll off our own wakeup path,
446 		 * disable multishot as there is a circular dependency between
447 		 * CQ posting and triggering the event.
448 		 */
449 		if (mask & EPOLL_URING_WAKE)
450 			poll->events |= EPOLLONESHOT;
451 
452 		/* optional, saves extra locking for removal in tw handler */
453 		if (mask && poll->events & EPOLLONESHOT) {
454 			list_del_init(&poll->wait.entry);
455 			poll->head = NULL;
456 			if (wqe_is_double(wait))
457 				req->flags &= ~REQ_F_DOUBLE_POLL;
458 			else
459 				req->flags &= ~REQ_F_SINGLE_POLL;
460 		}
461 		__io_poll_execute(req, mask);
462 	}
463 	return 1;
464 }
465 
466 /* fails only when polling is already completing by the first entry */
io_poll_double_prepare(struct io_kiocb * req)467 static bool io_poll_double_prepare(struct io_kiocb *req)
468 {
469 	struct wait_queue_head *head;
470 	struct io_poll *poll = io_poll_get_single(req);
471 
472 	/* head is RCU protected, see io_poll_remove_entries() comments */
473 	rcu_read_lock();
474 	head = smp_load_acquire(&poll->head);
475 	/*
476 	 * poll arm might not hold ownership and so race for req->flags with
477 	 * io_poll_wake(). There is only one poll entry queued, serialise with
478 	 * it by taking its head lock. As we're still arming the tw hanlder
479 	 * is not going to be run, so there are no races with it.
480 	 */
481 	if (head) {
482 		spin_lock_irq(&head->lock);
483 		req->flags |= REQ_F_DOUBLE_POLL;
484 		if (req->opcode == IORING_OP_POLL_ADD)
485 			req->flags |= REQ_F_ASYNC_DATA;
486 		spin_unlock_irq(&head->lock);
487 	}
488 	rcu_read_unlock();
489 	return !!head;
490 }
491 
__io_queue_proc(struct io_poll * poll,struct io_poll_table * pt,struct wait_queue_head * head,struct io_poll ** poll_ptr)492 static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
493 			    struct wait_queue_head *head,
494 			    struct io_poll **poll_ptr)
495 {
496 	struct io_kiocb *req = pt->req;
497 	unsigned long wqe_private = (unsigned long) req;
498 
499 	/*
500 	 * The file being polled uses multiple waitqueues for poll handling
501 	 * (e.g. one for read, one for write). Setup a separate io_poll
502 	 * if this happens.
503 	 */
504 	if (unlikely(pt->nr_entries)) {
505 		struct io_poll *first = poll;
506 
507 		/* double add on the same waitqueue head, ignore */
508 		if (first->head == head)
509 			return;
510 		/* already have a 2nd entry, fail a third attempt */
511 		if (*poll_ptr) {
512 			if ((*poll_ptr)->head == head)
513 				return;
514 			pt->error = -EINVAL;
515 			return;
516 		}
517 
518 		poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
519 		if (!poll) {
520 			pt->error = -ENOMEM;
521 			return;
522 		}
523 
524 		/* mark as double wq entry */
525 		wqe_private |= IO_WQE_F_DOUBLE;
526 		io_init_poll_iocb(poll, first->events);
527 		if (!io_poll_double_prepare(req)) {
528 			/* the request is completing, just back off */
529 			kfree(poll);
530 			return;
531 		}
532 		*poll_ptr = poll;
533 	} else {
534 		/* fine to modify, there is no poll queued to race with us */
535 		req->flags |= REQ_F_SINGLE_POLL;
536 	}
537 
538 	pt->nr_entries++;
539 	poll->head = head;
540 	poll->wait.private = (void *) wqe_private;
541 
542 	if (poll->events & EPOLLEXCLUSIVE) {
543 		add_wait_queue_exclusive(head, &poll->wait);
544 	} else {
545 		add_wait_queue(head, &poll->wait);
546 	}
547 }
548 
io_poll_queue_proc(struct file * file,struct wait_queue_head * head,struct poll_table_struct * p)549 static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
550 			       struct poll_table_struct *p)
551 {
552 	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
553 	struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
554 
555 	__io_queue_proc(poll, pt, head,
556 			(struct io_poll **) &pt->req->async_data);
557 }
558 
io_poll_can_finish_inline(struct io_kiocb * req,struct io_poll_table * pt)559 static bool io_poll_can_finish_inline(struct io_kiocb *req,
560 				      struct io_poll_table *pt)
561 {
562 	return pt->owning || io_poll_get_ownership(req);
563 }
564 
io_poll_add_hash(struct io_kiocb * req)565 static void io_poll_add_hash(struct io_kiocb *req)
566 {
567 	if (req->flags & REQ_F_HASH_LOCKED)
568 		io_poll_req_insert_locked(req);
569 	else
570 		io_poll_req_insert(req);
571 }
572 
573 /*
574  * Returns 0 when it's handed over for polling. The caller owns the requests if
575  * it returns non-zero, but otherwise should not touch it. Negative values
576  * contain an error code. When the result is >0, the polling has completed
577  * inline and ipt.result_mask is set to the mask.
578  */
__io_arm_poll_handler(struct io_kiocb * req,struct io_poll * poll,struct io_poll_table * ipt,__poll_t mask,unsigned issue_flags)579 static int __io_arm_poll_handler(struct io_kiocb *req,
580 				 struct io_poll *poll,
581 				 struct io_poll_table *ipt, __poll_t mask,
582 				 unsigned issue_flags)
583 {
584 	INIT_HLIST_NODE(&req->hash_node);
585 	io_init_poll_iocb(poll, mask);
586 	poll->file = req->file;
587 	req->apoll_events = poll->events;
588 
589 	ipt->pt._key = mask;
590 	ipt->req = req;
591 	ipt->error = 0;
592 	ipt->nr_entries = 0;
593 	/*
594 	 * Polling is either completed here or via task_work, so if we're in the
595 	 * task context we're naturally serialised with tw by merit of running
596 	 * the same task. When it's io-wq, take the ownership to prevent tw
597 	 * from running. However, when we're in the task context, skip taking
598 	 * it as an optimisation.
599 	 *
600 	 * Note: even though the request won't be completed/freed, without
601 	 * ownership we still can race with io_poll_wake().
602 	 * io_poll_can_finish_inline() tries to deal with that.
603 	 */
604 	ipt->owning = issue_flags & IO_URING_F_UNLOCKED;
605 	atomic_set(&req->poll_refs, (int)ipt->owning);
606 
607 	/* io-wq doesn't hold uring_lock */
608 	if (issue_flags & IO_URING_F_UNLOCKED)
609 		req->flags &= ~REQ_F_HASH_LOCKED;
610 
611 
612 	/*
613 	 * Exclusive waits may only wake a limited amount of entries
614 	 * rather than all of them, this may interfere with lazy
615 	 * wake if someone does wait(events > 1). Ensure we don't do
616 	 * lazy wake for those, as we need to process each one as they
617 	 * come in.
618 	 */
619 	if (poll->events & EPOLLEXCLUSIVE)
620 		req->flags |= REQ_F_POLL_NO_LAZY;
621 
622 	mask = vfs_poll(req->file, &ipt->pt) & poll->events;
623 
624 	if (unlikely(ipt->error || !ipt->nr_entries)) {
625 		io_poll_remove_entries(req);
626 
627 		if (!io_poll_can_finish_inline(req, ipt)) {
628 			io_poll_mark_cancelled(req);
629 			return 0;
630 		} else if (mask && (poll->events & EPOLLET)) {
631 			ipt->result_mask = mask;
632 			return 1;
633 		}
634 		return ipt->error ?: -EINVAL;
635 	}
636 
637 	if (mask &&
638 	   ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
639 		if (!io_poll_can_finish_inline(req, ipt)) {
640 			io_poll_add_hash(req);
641 			return 0;
642 		}
643 		io_poll_remove_entries(req);
644 		ipt->result_mask = mask;
645 		/* no one else has access to the req, forget about the ref */
646 		return 1;
647 	}
648 
649 	io_poll_add_hash(req);
650 
651 	if (mask && (poll->events & EPOLLET) &&
652 	    io_poll_can_finish_inline(req, ipt)) {
653 		__io_poll_execute(req, mask);
654 		return 0;
655 	}
656 	io_napi_add(req);
657 
658 	if (ipt->owning) {
659 		/*
660 		 * Try to release ownership. If we see a change of state, e.g.
661 		 * poll was waken up, queue up a tw, it'll deal with it.
662 		 */
663 		if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1)
664 			__io_poll_execute(req, 0);
665 	}
666 	return 0;
667 }
668 
io_async_queue_proc(struct file * file,struct wait_queue_head * head,struct poll_table_struct * p)669 static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
670 			       struct poll_table_struct *p)
671 {
672 	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
673 	struct async_poll *apoll = pt->req->apoll;
674 
675 	__io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
676 }
677 
678 /*
679  * We can't reliably detect loops in repeated poll triggers and issue
680  * subsequently failing. But rather than fail these immediately, allow a
681  * certain amount of retries before we give up. Given that this condition
682  * should _rarely_ trigger even once, we should be fine with a larger value.
683  */
684 #define APOLL_MAX_RETRY		128
685 
io_req_alloc_apoll(struct io_kiocb * req,unsigned issue_flags)686 static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
687 					     unsigned issue_flags)
688 {
689 	struct io_ring_ctx *ctx = req->ctx;
690 	struct async_poll *apoll;
691 
692 	if (req->flags & REQ_F_POLLED) {
693 		apoll = req->apoll;
694 		kfree(apoll->double_poll);
695 	} else if (!(issue_flags & IO_URING_F_UNLOCKED)) {
696 		apoll = io_alloc_cache_get(&ctx->apoll_cache);
697 		if (!apoll)
698 			goto alloc_apoll;
699 		apoll->poll.retries = APOLL_MAX_RETRY;
700 	} else {
701 alloc_apoll:
702 		apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
703 		if (unlikely(!apoll))
704 			return NULL;
705 		apoll->poll.retries = APOLL_MAX_RETRY;
706 	}
707 	apoll->double_poll = NULL;
708 	req->apoll = apoll;
709 	if (unlikely(!--apoll->poll.retries))
710 		return NULL;
711 	return apoll;
712 }
713 
io_arm_poll_handler(struct io_kiocb * req,unsigned issue_flags)714 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
715 {
716 	const struct io_issue_def *def = &io_issue_defs[req->opcode];
717 	struct async_poll *apoll;
718 	struct io_poll_table ipt;
719 	__poll_t mask = POLLPRI | POLLERR | EPOLLET;
720 	int ret;
721 
722 	/*
723 	 * apoll requests already grab the mutex to complete in the tw handler,
724 	 * so removal from the mutex-backed hash is free, use it by default.
725 	 */
726 	req->flags |= REQ_F_HASH_LOCKED;
727 
728 	if (!def->pollin && !def->pollout)
729 		return IO_APOLL_ABORTED;
730 	if (!io_file_can_poll(req))
731 		return IO_APOLL_ABORTED;
732 	if (!(req->flags & REQ_F_APOLL_MULTISHOT))
733 		mask |= EPOLLONESHOT;
734 
735 	if (def->pollin) {
736 		mask |= EPOLLIN | EPOLLRDNORM;
737 
738 		/* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
739 		if (req->flags & REQ_F_CLEAR_POLLIN)
740 			mask &= ~EPOLLIN;
741 	} else {
742 		mask |= EPOLLOUT | EPOLLWRNORM;
743 	}
744 	if (def->poll_exclusive)
745 		mask |= EPOLLEXCLUSIVE;
746 
747 	apoll = io_req_alloc_apoll(req, issue_flags);
748 	if (!apoll)
749 		return IO_APOLL_ABORTED;
750 	req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL);
751 	req->flags |= REQ_F_POLLED;
752 	ipt.pt._qproc = io_async_queue_proc;
753 
754 	io_kbuf_recycle(req, issue_flags);
755 
756 	ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
757 	if (ret)
758 		return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED;
759 	trace_io_uring_poll_arm(req, mask, apoll->poll.events);
760 	return IO_APOLL_OK;
761 }
762 
io_poll_remove_all_table(struct task_struct * tsk,struct io_hash_table * table,bool cancel_all)763 static __cold bool io_poll_remove_all_table(struct task_struct *tsk,
764 					    struct io_hash_table *table,
765 					    bool cancel_all)
766 {
767 	unsigned nr_buckets = 1U << table->hash_bits;
768 	struct hlist_node *tmp;
769 	struct io_kiocb *req;
770 	bool found = false;
771 	int i;
772 
773 	for (i = 0; i < nr_buckets; i++) {
774 		struct io_hash_bucket *hb = &table->hbs[i];
775 
776 		spin_lock(&hb->lock);
777 		hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
778 			if (io_match_task_safe(req, tsk, cancel_all)) {
779 				hlist_del_init(&req->hash_node);
780 				io_poll_cancel_req(req);
781 				found = true;
782 			}
783 		}
784 		spin_unlock(&hb->lock);
785 	}
786 	return found;
787 }
788 
789 /*
790  * Returns true if we found and killed one or more poll requests
791  */
io_poll_remove_all(struct io_ring_ctx * ctx,struct task_struct * tsk,bool cancel_all)792 __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
793 			       bool cancel_all)
794 	__must_hold(&ctx->uring_lock)
795 {
796 	bool ret;
797 
798 	ret = io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all);
799 	ret |= io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all);
800 	return ret;
801 }
802 
io_poll_find(struct io_ring_ctx * ctx,bool poll_only,struct io_cancel_data * cd,struct io_hash_table * table,struct io_hash_bucket ** out_bucket)803 static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
804 				     struct io_cancel_data *cd,
805 				     struct io_hash_table *table,
806 				     struct io_hash_bucket **out_bucket)
807 {
808 	struct io_kiocb *req;
809 	u32 index = hash_long(cd->data, table->hash_bits);
810 	struct io_hash_bucket *hb = &table->hbs[index];
811 
812 	*out_bucket = NULL;
813 
814 	spin_lock(&hb->lock);
815 	hlist_for_each_entry(req, &hb->list, hash_node) {
816 		if (cd->data != req->cqe.user_data)
817 			continue;
818 		if (poll_only && req->opcode != IORING_OP_POLL_ADD)
819 			continue;
820 		if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
821 			if (io_cancel_match_sequence(req, cd->seq))
822 				continue;
823 		}
824 		*out_bucket = hb;
825 		return req;
826 	}
827 	spin_unlock(&hb->lock);
828 	return NULL;
829 }
830 
io_poll_file_find(struct io_ring_ctx * ctx,struct io_cancel_data * cd,struct io_hash_table * table,struct io_hash_bucket ** out_bucket)831 static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
832 					  struct io_cancel_data *cd,
833 					  struct io_hash_table *table,
834 					  struct io_hash_bucket **out_bucket)
835 {
836 	unsigned nr_buckets = 1U << table->hash_bits;
837 	struct io_kiocb *req;
838 	int i;
839 
840 	*out_bucket = NULL;
841 
842 	for (i = 0; i < nr_buckets; i++) {
843 		struct io_hash_bucket *hb = &table->hbs[i];
844 
845 		spin_lock(&hb->lock);
846 		hlist_for_each_entry(req, &hb->list, hash_node) {
847 			if (io_cancel_req_match(req, cd)) {
848 				*out_bucket = hb;
849 				return req;
850 			}
851 		}
852 		spin_unlock(&hb->lock);
853 	}
854 	return NULL;
855 }
856 
io_poll_disarm(struct io_kiocb * req)857 static int io_poll_disarm(struct io_kiocb *req)
858 {
859 	if (!req)
860 		return -ENOENT;
861 	if (!io_poll_get_ownership(req))
862 		return -EALREADY;
863 	io_poll_remove_entries(req);
864 	hash_del(&req->hash_node);
865 	return 0;
866 }
867 
__io_poll_cancel(struct io_ring_ctx * ctx,struct io_cancel_data * cd,struct io_hash_table * table)868 static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
869 			    struct io_hash_table *table)
870 {
871 	struct io_hash_bucket *bucket;
872 	struct io_kiocb *req;
873 
874 	if (cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP |
875 			 IORING_ASYNC_CANCEL_ANY))
876 		req = io_poll_file_find(ctx, cd, table, &bucket);
877 	else
878 		req = io_poll_find(ctx, false, cd, table, &bucket);
879 
880 	if (req)
881 		io_poll_cancel_req(req);
882 	if (bucket)
883 		spin_unlock(&bucket->lock);
884 	return req ? 0 : -ENOENT;
885 }
886 
io_poll_cancel(struct io_ring_ctx * ctx,struct io_cancel_data * cd,unsigned issue_flags)887 int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
888 		   unsigned issue_flags)
889 {
890 	int ret;
891 
892 	ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table);
893 	if (ret != -ENOENT)
894 		return ret;
895 
896 	io_ring_submit_lock(ctx, issue_flags);
897 	ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked);
898 	io_ring_submit_unlock(ctx, issue_flags);
899 	return ret;
900 }
901 
io_poll_parse_events(const struct io_uring_sqe * sqe,unsigned int flags)902 static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
903 				     unsigned int flags)
904 {
905 	u32 events;
906 
907 	events = READ_ONCE(sqe->poll32_events);
908 #ifdef __BIG_ENDIAN
909 	events = swahw32(events);
910 #endif
911 	if (!(flags & IORING_POLL_ADD_MULTI))
912 		events |= EPOLLONESHOT;
913 	if (!(flags & IORING_POLL_ADD_LEVEL))
914 		events |= EPOLLET;
915 	return demangle_poll(events) |
916 		(events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET));
917 }
918 
io_poll_remove_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)919 int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
920 {
921 	struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update);
922 	u32 flags;
923 
924 	if (sqe->buf_index || sqe->splice_fd_in)
925 		return -EINVAL;
926 	flags = READ_ONCE(sqe->len);
927 	if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
928 		      IORING_POLL_ADD_MULTI))
929 		return -EINVAL;
930 	/* meaningless without update */
931 	if (flags == IORING_POLL_ADD_MULTI)
932 		return -EINVAL;
933 
934 	upd->old_user_data = READ_ONCE(sqe->addr);
935 	upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
936 	upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
937 
938 	upd->new_user_data = READ_ONCE(sqe->off);
939 	if (!upd->update_user_data && upd->new_user_data)
940 		return -EINVAL;
941 	if (upd->update_events)
942 		upd->events = io_poll_parse_events(sqe, flags);
943 	else if (sqe->poll32_events)
944 		return -EINVAL;
945 
946 	return 0;
947 }
948 
io_poll_add_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)949 int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
950 {
951 	struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
952 	u32 flags;
953 
954 	if (sqe->buf_index || sqe->off || sqe->addr)
955 		return -EINVAL;
956 	flags = READ_ONCE(sqe->len);
957 	if (flags & ~IORING_POLL_ADD_MULTI)
958 		return -EINVAL;
959 	if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
960 		return -EINVAL;
961 
962 	poll->events = io_poll_parse_events(sqe, flags);
963 	return 0;
964 }
965 
io_poll_add(struct io_kiocb * req,unsigned int issue_flags)966 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
967 {
968 	struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
969 	struct io_poll_table ipt;
970 	int ret;
971 
972 	ipt.pt._qproc = io_poll_queue_proc;
973 
974 	/*
975 	 * If sqpoll or single issuer, there is no contention for ->uring_lock
976 	 * and we'll end up holding it in tw handlers anyway.
977 	 */
978 	if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER))
979 		req->flags |= REQ_F_HASH_LOCKED;
980 
981 	ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
982 	if (ret > 0) {
983 		io_req_set_res(req, ipt.result_mask, 0);
984 		return IOU_OK;
985 	}
986 	return ret ?: IOU_ISSUE_SKIP_COMPLETE;
987 }
988 
io_poll_remove(struct io_kiocb * req,unsigned int issue_flags)989 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
990 {
991 	struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
992 	struct io_ring_ctx *ctx = req->ctx;
993 	struct io_cancel_data cd = { .ctx = ctx, .data = poll_update->old_user_data, };
994 	struct io_hash_bucket *bucket;
995 	struct io_kiocb *preq;
996 	int ret2, ret = 0;
997 
998 	io_ring_submit_lock(ctx, issue_flags);
999 	preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
1000 	ret2 = io_poll_disarm(preq);
1001 	if (bucket)
1002 		spin_unlock(&bucket->lock);
1003 	if (!ret2)
1004 		goto found;
1005 	if (ret2 != -ENOENT) {
1006 		ret = ret2;
1007 		goto out;
1008 	}
1009 
1010 	preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket);
1011 	ret2 = io_poll_disarm(preq);
1012 	if (bucket)
1013 		spin_unlock(&bucket->lock);
1014 	if (ret2) {
1015 		ret = ret2;
1016 		goto out;
1017 	}
1018 
1019 found:
1020 	if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) {
1021 		ret = -EFAULT;
1022 		goto out;
1023 	}
1024 
1025 	if (poll_update->update_events || poll_update->update_user_data) {
1026 		/* only mask one event flags, keep behavior flags */
1027 		if (poll_update->update_events) {
1028 			struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll);
1029 
1030 			poll->events &= ~0xffff;
1031 			poll->events |= poll_update->events & 0xffff;
1032 			poll->events |= IO_POLL_UNMASK;
1033 		}
1034 		if (poll_update->update_user_data)
1035 			preq->cqe.user_data = poll_update->new_user_data;
1036 
1037 		ret2 = io_poll_add(preq, issue_flags & ~IO_URING_F_UNLOCKED);
1038 		/* successfully updated, don't complete poll request */
1039 		if (!ret2 || ret2 == -EIOCBQUEUED)
1040 			goto out;
1041 	}
1042 
1043 	req_set_fail(preq);
1044 	io_req_set_res(preq, -ECANCELED, 0);
1045 	preq->io_task_work.func = io_req_task_complete;
1046 	io_req_task_work_add(preq);
1047 out:
1048 	io_ring_submit_unlock(ctx, issue_flags);
1049 	if (ret < 0) {
1050 		req_set_fail(req);
1051 		return ret;
1052 	}
1053 	/* complete update request, we're done with it */
1054 	io_req_set_res(req, ret, 0);
1055 	return IOU_OK;
1056 }
1057