1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef IOU_KBUF_H
3 #define IOU_KBUF_H
4
5 #include <uapi/linux/io_uring.h>
6
7 struct io_buffer_list {
8 /*
9 * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
10 * then these are classic provided buffers and ->buf_list is used.
11 */
12 union {
13 struct list_head buf_list;
14 struct {
15 struct page **buf_pages;
16 struct io_uring_buf_ring *buf_ring;
17 };
18 struct rcu_head rcu;
19 };
20 __u16 bgid;
21
22 /* below is for ring provided buffers */
23 __u16 buf_nr_pages;
24 __u16 nr_entries;
25 __u16 head;
26 __u16 mask;
27
28 atomic_t refs;
29
30 /* ring mapped provided buffers */
31 __u8 is_buf_ring;
32 /* ring mapped provided buffers, but mmap'ed by application */
33 __u8 is_mmap;
34 };
35
36 struct io_buffer {
37 struct list_head list;
38 __u64 addr;
39 __u32 len;
40 __u16 bid;
41 __u16 bgid;
42 };
43
44 enum {
45 /* can alloc a bigger vec */
46 KBUF_MODE_EXPAND = 1,
47 /* if bigger vec allocated, free old one */
48 KBUF_MODE_FREE = 2,
49 };
50
51 struct buf_sel_arg {
52 struct iovec *iovs;
53 size_t out_len;
54 size_t max_len;
55 int nr_iovs;
56 int mode;
57 };
58
59 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
60 unsigned int issue_flags);
61 int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
62 unsigned int issue_flags);
63 int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg);
64 void io_destroy_buffers(struct io_ring_ctx *ctx);
65
66 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
67 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
68
69 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
70 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
71
72 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
73 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
74 int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
75
76 void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
77
78 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
79
80 void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl);
81 struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
82 unsigned long bgid);
83 int io_pbuf_mmap(struct file *file, struct vm_area_struct *vma);
84
io_kbuf_recycle_ring(struct io_kiocb * req)85 static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
86 {
87 /*
88 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
89 * the flag and hence ensure that bl->head doesn't get incremented.
90 * If the tail has already been incremented, hang on to it.
91 * The exception is partial io, that case we should increment bl->head
92 * to monopolize the buffer.
93 */
94 if (req->buf_list) {
95 req->buf_index = req->buf_list->bgid;
96 req->flags &= ~(REQ_F_BUFFER_RING|REQ_F_BUFFERS_COMMIT);
97 return true;
98 }
99 return false;
100 }
101
io_do_buffer_select(struct io_kiocb * req)102 static inline bool io_do_buffer_select(struct io_kiocb *req)
103 {
104 if (!(req->flags & REQ_F_BUFFER_SELECT))
105 return false;
106 return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
107 }
108
io_kbuf_recycle(struct io_kiocb * req,unsigned issue_flags)109 static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
110 {
111 if (req->flags & REQ_F_BL_NO_RECYCLE)
112 return false;
113 if (req->flags & REQ_F_BUFFER_SELECTED)
114 return io_kbuf_recycle_legacy(req, issue_flags);
115 if (req->flags & REQ_F_BUFFER_RING)
116 return io_kbuf_recycle_ring(req);
117 return false;
118 }
119
__io_put_kbuf_ring(struct io_kiocb * req,int nr)120 static inline void __io_put_kbuf_ring(struct io_kiocb *req, int nr)
121 {
122 struct io_buffer_list *bl = req->buf_list;
123
124 if (bl) {
125 if (req->flags & REQ_F_BUFFERS_COMMIT) {
126 bl->head += nr;
127 req->flags &= ~REQ_F_BUFFERS_COMMIT;
128 }
129 req->buf_index = bl->bgid;
130 }
131 req->flags &= ~REQ_F_BUFFER_RING;
132 }
133
__io_put_kbuf_list(struct io_kiocb * req,struct list_head * list)134 static inline void __io_put_kbuf_list(struct io_kiocb *req,
135 struct list_head *list)
136 {
137 if (req->flags & REQ_F_BUFFER_RING) {
138 __io_put_kbuf_ring(req, 1);
139 } else {
140 req->buf_index = req->kbuf->bgid;
141 list_add(&req->kbuf->list, list);
142 req->flags &= ~REQ_F_BUFFER_SELECTED;
143 }
144 }
145
io_kbuf_drop(struct io_kiocb * req)146 static inline void io_kbuf_drop(struct io_kiocb *req)
147 {
148 lockdep_assert_held(&req->ctx->completion_lock);
149
150 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
151 return;
152
153 __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
154 }
155
__io_put_kbufs(struct io_kiocb * req,int nbufs,unsigned issue_flags)156 static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int nbufs,
157 unsigned issue_flags)
158 {
159 unsigned int ret;
160
161 if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
162 return 0;
163
164 ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
165 if (req->flags & REQ_F_BUFFER_RING)
166 __io_put_kbuf_ring(req, nbufs);
167 else
168 __io_put_kbuf(req, issue_flags);
169 return ret;
170 }
171
io_put_kbuf(struct io_kiocb * req,unsigned issue_flags)172 static inline unsigned int io_put_kbuf(struct io_kiocb *req,
173 unsigned issue_flags)
174 {
175 return __io_put_kbufs(req, 1, issue_flags);
176 }
177
io_put_kbufs(struct io_kiocb * req,int nbufs,unsigned issue_flags)178 static inline unsigned int io_put_kbufs(struct io_kiocb *req, int nbufs,
179 unsigned issue_flags)
180 {
181 return __io_put_kbufs(req, nbufs, issue_flags);
182 }
183 #endif
184