xref: /linux/include/linux/io_uring_types.h (revision 021bc4b9)
1 #ifndef IO_URING_TYPES_H
2 #define IO_URING_TYPES_H
3 
4 #include <linux/blkdev.h>
5 #include <linux/task_work.h>
6 #include <linux/bitmap.h>
7 #include <linux/llist.h>
8 #include <uapi/linux/io_uring.h>
9 
10 enum {
11 	/*
12 	 * A hint to not wake right away but delay until there are enough of
13 	 * tw's queued to match the number of CQEs the task is waiting for.
14 	 *
15 	 * Must not be used wirh requests generating more than one CQE.
16 	 * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set.
17 	 */
18 	IOU_F_TWQ_LAZY_WAKE			= 1,
19 };
20 
21 enum io_uring_cmd_flags {
22 	IO_URING_F_COMPLETE_DEFER	= 1,
23 	IO_URING_F_UNLOCKED		= 2,
24 	/* the request is executed from poll, it should not be freed */
25 	IO_URING_F_MULTISHOT		= 4,
26 	/* executed by io-wq */
27 	IO_URING_F_IOWQ			= 8,
28 	/* int's last bit, sign checks are usually faster than a bit test */
29 	IO_URING_F_NONBLOCK		= INT_MIN,
30 
31 	/* ctx state flags, for URING_CMD */
32 	IO_URING_F_SQE128		= (1 << 8),
33 	IO_URING_F_CQE32		= (1 << 9),
34 	IO_URING_F_IOPOLL		= (1 << 10),
35 
36 	/* set when uring wants to cancel a previously issued command */
37 	IO_URING_F_CANCEL		= (1 << 11),
38 	IO_URING_F_COMPAT		= (1 << 12),
39 };
40 
41 struct io_wq_work_node {
42 	struct io_wq_work_node *next;
43 };
44 
45 struct io_wq_work_list {
46 	struct io_wq_work_node *first;
47 	struct io_wq_work_node *last;
48 };
49 
50 struct io_wq_work {
51 	struct io_wq_work_node list;
52 	unsigned flags;
53 	/* place it here instead of io_kiocb as it fills padding and saves 4B */
54 	int cancel_seq;
55 };
56 
57 struct io_fixed_file {
58 	/* file * with additional FFS_* flags */
59 	unsigned long file_ptr;
60 };
61 
62 struct io_file_table {
63 	struct io_fixed_file *files;
64 	unsigned long *bitmap;
65 	unsigned int alloc_hint;
66 };
67 
68 struct io_hash_bucket {
69 	spinlock_t		lock;
70 	struct hlist_head	list;
71 } ____cacheline_aligned_in_smp;
72 
73 struct io_hash_table {
74 	struct io_hash_bucket	*hbs;
75 	unsigned		hash_bits;
76 };
77 
78 /*
79  * Arbitrary limit, can be raised if need be
80  */
81 #define IO_RINGFD_REG_MAX 16
82 
83 struct io_uring_task {
84 	/* submission side */
85 	int				cached_refs;
86 	const struct io_ring_ctx 	*last;
87 	struct io_wq			*io_wq;
88 	struct file			*registered_rings[IO_RINGFD_REG_MAX];
89 
90 	struct xarray			xa;
91 	struct wait_queue_head		wait;
92 	atomic_t			in_cancel;
93 	atomic_t			inflight_tracked;
94 	struct percpu_counter		inflight;
95 
96 	struct { /* task_work */
97 		struct llist_head	task_list;
98 		struct callback_head	task_work;
99 	} ____cacheline_aligned_in_smp;
100 };
101 
102 struct io_uring {
103 	u32 head;
104 	u32 tail;
105 };
106 
107 /*
108  * This data is shared with the application through the mmap at offsets
109  * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
110  *
111  * The offsets to the member fields are published through struct
112  * io_sqring_offsets when calling io_uring_setup.
113  */
114 struct io_rings {
115 	/*
116 	 * Head and tail offsets into the ring; the offsets need to be
117 	 * masked to get valid indices.
118 	 *
119 	 * The kernel controls head of the sq ring and the tail of the cq ring,
120 	 * and the application controls tail of the sq ring and the head of the
121 	 * cq ring.
122 	 */
123 	struct io_uring		sq, cq;
124 	/*
125 	 * Bitmasks to apply to head and tail offsets (constant, equals
126 	 * ring_entries - 1)
127 	 */
128 	u32			sq_ring_mask, cq_ring_mask;
129 	/* Ring sizes (constant, power of 2) */
130 	u32			sq_ring_entries, cq_ring_entries;
131 	/*
132 	 * Number of invalid entries dropped by the kernel due to
133 	 * invalid index stored in array
134 	 *
135 	 * Written by the kernel, shouldn't be modified by the
136 	 * application (i.e. get number of "new events" by comparing to
137 	 * cached value).
138 	 *
139 	 * After a new SQ head value was read by the application this
140 	 * counter includes all submissions that were dropped reaching
141 	 * the new SQ head (and possibly more).
142 	 */
143 	u32			sq_dropped;
144 	/*
145 	 * Runtime SQ flags
146 	 *
147 	 * Written by the kernel, shouldn't be modified by the
148 	 * application.
149 	 *
150 	 * The application needs a full memory barrier before checking
151 	 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
152 	 */
153 	atomic_t		sq_flags;
154 	/*
155 	 * Runtime CQ flags
156 	 *
157 	 * Written by the application, shouldn't be modified by the
158 	 * kernel.
159 	 */
160 	u32			cq_flags;
161 	/*
162 	 * Number of completion events lost because the queue was full;
163 	 * this should be avoided by the application by making sure
164 	 * there are not more requests pending than there is space in
165 	 * the completion queue.
166 	 *
167 	 * Written by the kernel, shouldn't be modified by the
168 	 * application (i.e. get number of "new events" by comparing to
169 	 * cached value).
170 	 *
171 	 * As completion events come in out of order this counter is not
172 	 * ordered with any other data.
173 	 */
174 	u32			cq_overflow;
175 	/*
176 	 * Ring buffer of completion events.
177 	 *
178 	 * The kernel writes completion events fresh every time they are
179 	 * produced, so the application is allowed to modify pending
180 	 * entries.
181 	 */
182 	struct io_uring_cqe	cqes[] ____cacheline_aligned_in_smp;
183 };
184 
185 struct io_restriction {
186 	DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
187 	DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
188 	u8 sqe_flags_allowed;
189 	u8 sqe_flags_required;
190 	bool registered;
191 };
192 
193 struct io_submit_link {
194 	struct io_kiocb		*head;
195 	struct io_kiocb		*last;
196 };
197 
198 struct io_submit_state {
199 	/* inline/task_work completion list, under ->uring_lock */
200 	struct io_wq_work_node	free_list;
201 	/* batch completion logic */
202 	struct io_wq_work_list	compl_reqs;
203 	struct io_submit_link	link;
204 
205 	bool			plug_started;
206 	bool			need_plug;
207 	unsigned short		submit_nr;
208 	unsigned int		cqes_count;
209 	struct blk_plug		plug;
210 };
211 
212 struct io_ev_fd {
213 	struct eventfd_ctx	*cq_ev_fd;
214 	unsigned int		eventfd_async: 1;
215 	struct rcu_head		rcu;
216 	atomic_t		refs;
217 	atomic_t		ops;
218 };
219 
220 struct io_alloc_cache {
221 	struct io_wq_work_node	list;
222 	unsigned int		nr_cached;
223 	unsigned int		max_cached;
224 	size_t			elem_size;
225 };
226 
227 struct io_ring_ctx {
228 	/* const or read-mostly hot data */
229 	struct {
230 		unsigned int		flags;
231 		unsigned int		drain_next: 1;
232 		unsigned int		restricted: 1;
233 		unsigned int		off_timeout_used: 1;
234 		unsigned int		drain_active: 1;
235 		unsigned int		has_evfd: 1;
236 		/* all CQEs should be posted only by the submitter task */
237 		unsigned int		task_complete: 1;
238 		unsigned int		lockless_cq: 1;
239 		unsigned int		syscall_iopoll: 1;
240 		unsigned int		poll_activated: 1;
241 		unsigned int		drain_disabled: 1;
242 		unsigned int		compat: 1;
243 
244 		struct task_struct	*submitter_task;
245 		struct io_rings		*rings;
246 		struct percpu_ref	refs;
247 
248 		enum task_work_notify_mode	notify_method;
249 	} ____cacheline_aligned_in_smp;
250 
251 	/* submission data */
252 	struct {
253 		struct mutex		uring_lock;
254 
255 		/*
256 		 * Ring buffer of indices into array of io_uring_sqe, which is
257 		 * mmapped by the application using the IORING_OFF_SQES offset.
258 		 *
259 		 * This indirection could e.g. be used to assign fixed
260 		 * io_uring_sqe entries to operations and only submit them to
261 		 * the queue when needed.
262 		 *
263 		 * The kernel modifies neither the indices array nor the entries
264 		 * array.
265 		 */
266 		u32			*sq_array;
267 		struct io_uring_sqe	*sq_sqes;
268 		unsigned		cached_sq_head;
269 		unsigned		sq_entries;
270 
271 		/*
272 		 * Fixed resources fast path, should be accessed only under
273 		 * uring_lock, and updated through io_uring_register(2)
274 		 */
275 		struct io_rsrc_node	*rsrc_node;
276 		atomic_t		cancel_seq;
277 		struct io_file_table	file_table;
278 		unsigned		nr_user_files;
279 		unsigned		nr_user_bufs;
280 		struct io_mapped_ubuf	**user_bufs;
281 
282 		struct io_submit_state	submit_state;
283 
284 		struct io_buffer_list	*io_bl;
285 		struct xarray		io_bl_xa;
286 
287 		struct io_hash_table	cancel_table_locked;
288 		struct io_alloc_cache	apoll_cache;
289 		struct io_alloc_cache	netmsg_cache;
290 
291 		/*
292 		 * ->iopoll_list is protected by the ctx->uring_lock for
293 		 * io_uring instances that don't use IORING_SETUP_SQPOLL.
294 		 * For SQPOLL, only the single threaded io_sq_thread() will
295 		 * manipulate the list, hence no extra locking is needed there.
296 		 */
297 		struct io_wq_work_list	iopoll_list;
298 		bool			poll_multi_queue;
299 
300 		/*
301 		 * Any cancelable uring_cmd is added to this list in
302 		 * ->uring_cmd() by io_uring_cmd_insert_cancelable()
303 		 */
304 		struct hlist_head	cancelable_uring_cmd;
305 	} ____cacheline_aligned_in_smp;
306 
307 	struct {
308 		/*
309 		 * We cache a range of free CQEs we can use, once exhausted it
310 		 * should go through a slower range setup, see __io_get_cqe()
311 		 */
312 		struct io_uring_cqe	*cqe_cached;
313 		struct io_uring_cqe	*cqe_sentinel;
314 
315 		unsigned		cached_cq_tail;
316 		unsigned		cq_entries;
317 		struct io_ev_fd	__rcu	*io_ev_fd;
318 		unsigned		cq_extra;
319 	} ____cacheline_aligned_in_smp;
320 
321 	/*
322 	 * task_work and async notification delivery cacheline. Expected to
323 	 * regularly bounce b/w CPUs.
324 	 */
325 	struct {
326 		struct llist_head	work_llist;
327 		unsigned long		check_cq;
328 		atomic_t		cq_wait_nr;
329 		atomic_t		cq_timeouts;
330 		struct wait_queue_head	cq_wait;
331 	} ____cacheline_aligned_in_smp;
332 
333 	/* timeouts */
334 	struct {
335 		spinlock_t		timeout_lock;
336 		struct list_head	timeout_list;
337 		struct list_head	ltimeout_list;
338 		unsigned		cq_last_tm_flush;
339 	} ____cacheline_aligned_in_smp;
340 
341 	struct io_uring_cqe	completion_cqes[16];
342 
343 	spinlock_t		completion_lock;
344 
345 	/* IRQ completion list, under ->completion_lock */
346 	struct io_wq_work_list	locked_free_list;
347 	unsigned int		locked_free_nr;
348 
349 	struct list_head	io_buffers_comp;
350 	struct list_head	cq_overflow_list;
351 	struct io_hash_table	cancel_table;
352 
353 	struct hlist_head	waitid_list;
354 
355 #ifdef CONFIG_FUTEX
356 	struct hlist_head	futex_list;
357 	struct io_alloc_cache	futex_cache;
358 #endif
359 
360 	const struct cred	*sq_creds;	/* cred used for __io_sq_thread() */
361 	struct io_sq_data	*sq_data;	/* if using sq thread polling */
362 
363 	struct wait_queue_head	sqo_sq_wait;
364 	struct list_head	sqd_list;
365 
366 	unsigned int		file_alloc_start;
367 	unsigned int		file_alloc_end;
368 
369 	struct xarray		personalities;
370 	u32			pers_next;
371 
372 	struct list_head	io_buffers_cache;
373 
374 	/* deferred free list, protected by ->uring_lock */
375 	struct hlist_head	io_buf_list;
376 
377 	/* Keep this last, we don't need it for the fast path */
378 	struct wait_queue_head		poll_wq;
379 	struct io_restriction		restrictions;
380 
381 	/* slow path rsrc auxilary data, used by update/register */
382 	struct io_mapped_ubuf		*dummy_ubuf;
383 	struct io_rsrc_data		*file_data;
384 	struct io_rsrc_data		*buf_data;
385 
386 	/* protected by ->uring_lock */
387 	struct list_head		rsrc_ref_list;
388 	struct io_alloc_cache		rsrc_node_cache;
389 	struct wait_queue_head		rsrc_quiesce_wq;
390 	unsigned			rsrc_quiesce;
391 
392 	/* hashed buffered write serialization */
393 	struct io_wq_hash		*hash_map;
394 
395 	/* Only used for accounting purposes */
396 	struct user_struct		*user;
397 	struct mm_struct		*mm_account;
398 
399 	/* ctx exit and cancelation */
400 	struct llist_head		fallback_llist;
401 	struct delayed_work		fallback_work;
402 	struct work_struct		exit_work;
403 	struct list_head		tctx_list;
404 	struct completion		ref_comp;
405 
406 	/* io-wq management, e.g. thread count */
407 	u32				iowq_limits[2];
408 	bool				iowq_limits_set;
409 
410 	struct callback_head		poll_wq_task_work;
411 	struct list_head		defer_list;
412 	unsigned			sq_thread_idle;
413 	/* protected by ->completion_lock */
414 	unsigned			evfd_last_cq_tail;
415 
416 	/*
417 	 * If IORING_SETUP_NO_MMAP is used, then the below holds
418 	 * the gup'ed pages for the two rings, and the sqes.
419 	 */
420 	unsigned short			n_ring_pages;
421 	unsigned short			n_sqe_pages;
422 	struct page			**ring_pages;
423 	struct page			**sqe_pages;
424 };
425 
426 struct io_tw_state {
427 	/* ->uring_lock is taken, callbacks can use io_tw_lock to lock it */
428 	bool locked;
429 };
430 
431 enum {
432 	REQ_F_FIXED_FILE_BIT	= IOSQE_FIXED_FILE_BIT,
433 	REQ_F_IO_DRAIN_BIT	= IOSQE_IO_DRAIN_BIT,
434 	REQ_F_LINK_BIT		= IOSQE_IO_LINK_BIT,
435 	REQ_F_HARDLINK_BIT	= IOSQE_IO_HARDLINK_BIT,
436 	REQ_F_FORCE_ASYNC_BIT	= IOSQE_ASYNC_BIT,
437 	REQ_F_BUFFER_SELECT_BIT	= IOSQE_BUFFER_SELECT_BIT,
438 	REQ_F_CQE_SKIP_BIT	= IOSQE_CQE_SKIP_SUCCESS_BIT,
439 
440 	/* first byte is taken by user flags, shift it to not overlap */
441 	REQ_F_FAIL_BIT		= 8,
442 	REQ_F_INFLIGHT_BIT,
443 	REQ_F_CUR_POS_BIT,
444 	REQ_F_NOWAIT_BIT,
445 	REQ_F_LINK_TIMEOUT_BIT,
446 	REQ_F_NEED_CLEANUP_BIT,
447 	REQ_F_POLLED_BIT,
448 	REQ_F_BUFFER_SELECTED_BIT,
449 	REQ_F_BUFFER_RING_BIT,
450 	REQ_F_REISSUE_BIT,
451 	REQ_F_CREDS_BIT,
452 	REQ_F_REFCOUNT_BIT,
453 	REQ_F_ARM_LTIMEOUT_BIT,
454 	REQ_F_ASYNC_DATA_BIT,
455 	REQ_F_SKIP_LINK_CQES_BIT,
456 	REQ_F_SINGLE_POLL_BIT,
457 	REQ_F_DOUBLE_POLL_BIT,
458 	REQ_F_PARTIAL_IO_BIT,
459 	REQ_F_APOLL_MULTISHOT_BIT,
460 	REQ_F_CLEAR_POLLIN_BIT,
461 	REQ_F_HASH_LOCKED_BIT,
462 	/* keep async read/write and isreg together and in order */
463 	REQ_F_SUPPORT_NOWAIT_BIT,
464 	REQ_F_ISREG_BIT,
465 	REQ_F_POLL_NO_LAZY_BIT,
466 
467 	/* not a real bit, just to check we're not overflowing the space */
468 	__REQ_F_LAST_BIT,
469 };
470 
471 enum {
472 	/* ctx owns file */
473 	REQ_F_FIXED_FILE	= BIT(REQ_F_FIXED_FILE_BIT),
474 	/* drain existing IO first */
475 	REQ_F_IO_DRAIN		= BIT(REQ_F_IO_DRAIN_BIT),
476 	/* linked sqes */
477 	REQ_F_LINK		= BIT(REQ_F_LINK_BIT),
478 	/* doesn't sever on completion < 0 */
479 	REQ_F_HARDLINK		= BIT(REQ_F_HARDLINK_BIT),
480 	/* IOSQE_ASYNC */
481 	REQ_F_FORCE_ASYNC	= BIT(REQ_F_FORCE_ASYNC_BIT),
482 	/* IOSQE_BUFFER_SELECT */
483 	REQ_F_BUFFER_SELECT	= BIT(REQ_F_BUFFER_SELECT_BIT),
484 	/* IOSQE_CQE_SKIP_SUCCESS */
485 	REQ_F_CQE_SKIP		= BIT(REQ_F_CQE_SKIP_BIT),
486 
487 	/* fail rest of links */
488 	REQ_F_FAIL		= BIT(REQ_F_FAIL_BIT),
489 	/* on inflight list, should be cancelled and waited on exit reliably */
490 	REQ_F_INFLIGHT		= BIT(REQ_F_INFLIGHT_BIT),
491 	/* read/write uses file position */
492 	REQ_F_CUR_POS		= BIT(REQ_F_CUR_POS_BIT),
493 	/* must not punt to workers */
494 	REQ_F_NOWAIT		= BIT(REQ_F_NOWAIT_BIT),
495 	/* has or had linked timeout */
496 	REQ_F_LINK_TIMEOUT	= BIT(REQ_F_LINK_TIMEOUT_BIT),
497 	/* needs cleanup */
498 	REQ_F_NEED_CLEANUP	= BIT(REQ_F_NEED_CLEANUP_BIT),
499 	/* already went through poll handler */
500 	REQ_F_POLLED		= BIT(REQ_F_POLLED_BIT),
501 	/* buffer already selected */
502 	REQ_F_BUFFER_SELECTED	= BIT(REQ_F_BUFFER_SELECTED_BIT),
503 	/* buffer selected from ring, needs commit */
504 	REQ_F_BUFFER_RING	= BIT(REQ_F_BUFFER_RING_BIT),
505 	/* caller should reissue async */
506 	REQ_F_REISSUE		= BIT(REQ_F_REISSUE_BIT),
507 	/* supports async reads/writes */
508 	REQ_F_SUPPORT_NOWAIT	= BIT(REQ_F_SUPPORT_NOWAIT_BIT),
509 	/* regular file */
510 	REQ_F_ISREG		= BIT(REQ_F_ISREG_BIT),
511 	/* has creds assigned */
512 	REQ_F_CREDS		= BIT(REQ_F_CREDS_BIT),
513 	/* skip refcounting if not set */
514 	REQ_F_REFCOUNT		= BIT(REQ_F_REFCOUNT_BIT),
515 	/* there is a linked timeout that has to be armed */
516 	REQ_F_ARM_LTIMEOUT	= BIT(REQ_F_ARM_LTIMEOUT_BIT),
517 	/* ->async_data allocated */
518 	REQ_F_ASYNC_DATA	= BIT(REQ_F_ASYNC_DATA_BIT),
519 	/* don't post CQEs while failing linked requests */
520 	REQ_F_SKIP_LINK_CQES	= BIT(REQ_F_SKIP_LINK_CQES_BIT),
521 	/* single poll may be active */
522 	REQ_F_SINGLE_POLL	= BIT(REQ_F_SINGLE_POLL_BIT),
523 	/* double poll may active */
524 	REQ_F_DOUBLE_POLL	= BIT(REQ_F_DOUBLE_POLL_BIT),
525 	/* request has already done partial IO */
526 	REQ_F_PARTIAL_IO	= BIT(REQ_F_PARTIAL_IO_BIT),
527 	/* fast poll multishot mode */
528 	REQ_F_APOLL_MULTISHOT	= BIT(REQ_F_APOLL_MULTISHOT_BIT),
529 	/* recvmsg special flag, clear EPOLLIN */
530 	REQ_F_CLEAR_POLLIN	= BIT(REQ_F_CLEAR_POLLIN_BIT),
531 	/* hashed into ->cancel_hash_locked, protected by ->uring_lock */
532 	REQ_F_HASH_LOCKED	= BIT(REQ_F_HASH_LOCKED_BIT),
533 	/* don't use lazy poll wake for this request */
534 	REQ_F_POLL_NO_LAZY	= BIT(REQ_F_POLL_NO_LAZY_BIT),
535 };
536 
537 typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
538 
539 struct io_task_work {
540 	struct llist_node		node;
541 	io_req_tw_func_t		func;
542 };
543 
544 struct io_cqe {
545 	__u64	user_data;
546 	__s32	res;
547 	/* fd initially, then cflags for completion */
548 	union {
549 		__u32	flags;
550 		int	fd;
551 	};
552 };
553 
554 /*
555  * Each request type overlays its private data structure on top of this one.
556  * They must not exceed this one in size.
557  */
558 struct io_cmd_data {
559 	struct file		*file;
560 	/* each command gets 56 bytes of data */
561 	__u8			data[56];
562 };
563 
564 static inline void io_kiocb_cmd_sz_check(size_t cmd_sz)
565 {
566 	BUILD_BUG_ON(cmd_sz > sizeof(struct io_cmd_data));
567 }
568 #define io_kiocb_to_cmd(req, cmd_type) ( \
569 	io_kiocb_cmd_sz_check(sizeof(cmd_type)) , \
570 	((cmd_type *)&(req)->cmd) \
571 )
572 #define cmd_to_io_kiocb(ptr)	((struct io_kiocb *) ptr)
573 
574 struct io_kiocb {
575 	union {
576 		/*
577 		 * NOTE! Each of the io_kiocb union members has the file pointer
578 		 * as the first entry in their struct definition. So you can
579 		 * access the file pointer through any of the sub-structs,
580 		 * or directly as just 'file' in this struct.
581 		 */
582 		struct file		*file;
583 		struct io_cmd_data	cmd;
584 	};
585 
586 	u8				opcode;
587 	/* polled IO has completed */
588 	u8				iopoll_completed;
589 	/*
590 	 * Can be either a fixed buffer index, or used with provided buffers.
591 	 * For the latter, before issue it points to the buffer group ID,
592 	 * and after selection it points to the buffer ID itself.
593 	 */
594 	u16				buf_index;
595 	unsigned int			flags;
596 
597 	struct io_cqe			cqe;
598 
599 	struct io_ring_ctx		*ctx;
600 	struct task_struct		*task;
601 
602 	struct io_rsrc_node		*rsrc_node;
603 
604 	union {
605 		/* store used ubuf, so we can prevent reloading */
606 		struct io_mapped_ubuf	*imu;
607 
608 		/* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
609 		struct io_buffer	*kbuf;
610 
611 		/*
612 		 * stores buffer ID for ring provided buffers, valid IFF
613 		 * REQ_F_BUFFER_RING is set.
614 		 */
615 		struct io_buffer_list	*buf_list;
616 	};
617 
618 	union {
619 		/* used by request caches, completion batching and iopoll */
620 		struct io_wq_work_node	comp_list;
621 		/* cache ->apoll->events */
622 		__poll_t apoll_events;
623 	};
624 	atomic_t			refs;
625 	atomic_t			poll_refs;
626 	struct io_task_work		io_task_work;
627 	unsigned			nr_tw;
628 	/* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
629 	struct hlist_node		hash_node;
630 	/* internal polling, see IORING_FEAT_FAST_POLL */
631 	struct async_poll		*apoll;
632 	/* opcode allocated if it needs to store data for async defer */
633 	void				*async_data;
634 	/* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
635 	struct io_kiocb			*link;
636 	/* custom credentials, valid IFF REQ_F_CREDS is set */
637 	const struct cred		*creds;
638 	struct io_wq_work		work;
639 
640 	struct {
641 		u64			extra1;
642 		u64			extra2;
643 	} big_cqe;
644 };
645 
646 struct io_overflow_cqe {
647 	struct list_head list;
648 	struct io_uring_cqe cqe;
649 };
650 
651 #endif
652