1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* handling of writes to regular files and writing back to the server
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8 #include <linux/backing-dev.h>
9 #include <linux/slab.h>
10 #include <linux/fs.h>
11 #include <linux/pagemap.h>
12 #include <linux/writeback.h>
13 #include <linux/pagevec.h>
14 #include <linux/netfs.h>
15 #include <trace/events/netfs.h>
16 #include "internal.h"
17
18 /*
19 * completion of write to server
20 */
afs_pages_written_back(struct afs_vnode * vnode,loff_t start,unsigned int len)21 static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len)
22 {
23 _enter("{%llx:%llu},{%x @%llx}",
24 vnode->fid.vid, vnode->fid.vnode, len, start);
25
26 afs_prune_wb_keys(vnode);
27 _leave("");
28 }
29
30 /*
31 * Find a key to use for the writeback. We cached the keys used to author the
32 * writes on the vnode. wreq->netfs_priv2 will contain the last writeback key
33 * record used or NULL and we need to start from there if it's set.
34 * wreq->netfs_priv will be set to the key itself or NULL.
35 */
afs_get_writeback_key(struct netfs_io_request * wreq)36 static void afs_get_writeback_key(struct netfs_io_request *wreq)
37 {
38 struct afs_wb_key *wbk, *old = wreq->netfs_priv2;
39 struct afs_vnode *vnode = AFS_FS_I(wreq->inode);
40
41 key_put(wreq->netfs_priv);
42 wreq->netfs_priv = NULL;
43 wreq->netfs_priv2 = NULL;
44
45 spin_lock(&vnode->wb_lock);
46 if (old)
47 wbk = list_next_entry(old, vnode_link);
48 else
49 wbk = list_first_entry(&vnode->wb_keys, struct afs_wb_key, vnode_link);
50
51 list_for_each_entry_from(wbk, &vnode->wb_keys, vnode_link) {
52 _debug("wbk %u", key_serial(wbk->key));
53 if (key_validate(wbk->key) == 0) {
54 refcount_inc(&wbk->usage);
55 wreq->netfs_priv = key_get(wbk->key);
56 wreq->netfs_priv2 = wbk;
57 _debug("USE WB KEY %u", key_serial(wbk->key));
58 break;
59 }
60 }
61
62 spin_unlock(&vnode->wb_lock);
63
64 afs_put_wb_key(old);
65 }
66
afs_store_data_success(struct afs_operation * op)67 static void afs_store_data_success(struct afs_operation *op)
68 {
69 struct afs_vnode *vnode = op->file[0].vnode;
70
71 op->ctime = op->file[0].scb.status.mtime_client;
72 afs_vnode_commit_status(op, &op->file[0]);
73 if (!afs_op_error(op)) {
74 afs_pages_written_back(vnode, op->store.pos, op->store.size);
75 afs_stat_v(vnode, n_stores);
76 atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes);
77 }
78 }
79
80 static const struct afs_operation_ops afs_store_data_operation = {
81 .issue_afs_rpc = afs_fs_store_data,
82 .issue_yfs_rpc = yfs_fs_store_data,
83 .success = afs_store_data_success,
84 };
85
86 /*
87 * Prepare a subrequest to write to the server. This sets the max_len
88 * parameter.
89 */
afs_prepare_write(struct netfs_io_subrequest * subreq)90 void afs_prepare_write(struct netfs_io_subrequest *subreq)
91 {
92 //if (test_bit(NETFS_SREQ_RETRYING, &subreq->flags))
93 // subreq->max_len = 512 * 1024;
94 //else
95 subreq->max_len = 256 * 1024 * 1024;
96 }
97
98 /*
99 * Issue a subrequest to write to the server.
100 */
afs_issue_write_worker(struct work_struct * work)101 static void afs_issue_write_worker(struct work_struct *work)
102 {
103 struct netfs_io_subrequest *subreq = container_of(work, struct netfs_io_subrequest, work);
104 struct netfs_io_request *wreq = subreq->rreq;
105 struct afs_operation *op;
106 struct afs_vnode *vnode = AFS_FS_I(wreq->inode);
107 unsigned long long pos = subreq->start + subreq->transferred;
108 size_t len = subreq->len - subreq->transferred;
109 int ret = -ENOKEY;
110
111 _enter("R=%x[%x],%s{%llx:%llu.%u},%llx,%zx",
112 wreq->debug_id, subreq->debug_index,
113 vnode->volume->name,
114 vnode->fid.vid,
115 vnode->fid.vnode,
116 vnode->fid.unique,
117 pos, len);
118
119 #if 0 // Error injection
120 if (subreq->debug_index == 3)
121 return netfs_write_subrequest_terminated(subreq, -ENOANO, false);
122
123 if (!test_bit(NETFS_SREQ_RETRYING, &subreq->flags)) {
124 set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
125 return netfs_write_subrequest_terminated(subreq, -EAGAIN, false);
126 }
127 #endif
128
129 op = afs_alloc_operation(wreq->netfs_priv, vnode->volume);
130 if (IS_ERR(op))
131 return netfs_write_subrequest_terminated(subreq, -EAGAIN, false);
132
133 afs_op_set_vnode(op, 0, vnode);
134 op->file[0].dv_delta = 1;
135 op->file[0].modification = true;
136 op->store.pos = pos;
137 op->store.size = len;
138 op->flags |= AFS_OPERATION_UNINTR;
139 op->ops = &afs_store_data_operation;
140
141 afs_begin_vnode_operation(op);
142
143 op->store.write_iter = &subreq->io_iter;
144 op->store.i_size = umax(pos + len, vnode->netfs.remote_i_size);
145 op->mtime = inode_get_mtime(&vnode->netfs.inode);
146
147 afs_wait_for_operation(op);
148 ret = afs_put_operation(op);
149 switch (ret) {
150 case -EACCES:
151 case -EPERM:
152 case -ENOKEY:
153 case -EKEYEXPIRED:
154 case -EKEYREJECTED:
155 case -EKEYREVOKED:
156 /* If there are more keys we can try, use the retry algorithm
157 * to rotate the keys.
158 */
159 if (wreq->netfs_priv2)
160 set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
161 break;
162 }
163
164 netfs_write_subrequest_terminated(subreq, ret < 0 ? ret : subreq->len, false);
165 }
166
afs_issue_write(struct netfs_io_subrequest * subreq)167 void afs_issue_write(struct netfs_io_subrequest *subreq)
168 {
169 subreq->work.func = afs_issue_write_worker;
170 if (!queue_work(system_unbound_wq, &subreq->work))
171 WARN_ON_ONCE(1);
172 }
173
174 /*
175 * Writeback calls this when it finds a folio that needs uploading. This isn't
176 * called if writeback only has copy-to-cache to deal with.
177 */
afs_begin_writeback(struct netfs_io_request * wreq)178 void afs_begin_writeback(struct netfs_io_request *wreq)
179 {
180 afs_get_writeback_key(wreq);
181 wreq->io_streams[0].avail = true;
182 }
183
184 /*
185 * Prepare to retry the writes in request. Use this to try rotating the
186 * available writeback keys.
187 */
afs_retry_request(struct netfs_io_request * wreq,struct netfs_io_stream * stream)188 void afs_retry_request(struct netfs_io_request *wreq, struct netfs_io_stream *stream)
189 {
190 struct netfs_io_subrequest *subreq =
191 list_first_entry(&stream->subrequests,
192 struct netfs_io_subrequest, rreq_link);
193
194 switch (subreq->error) {
195 case -EACCES:
196 case -EPERM:
197 case -ENOKEY:
198 case -EKEYEXPIRED:
199 case -EKEYREJECTED:
200 case -EKEYREVOKED:
201 afs_get_writeback_key(wreq);
202 if (!wreq->netfs_priv)
203 stream->failed = true;
204 break;
205 }
206 }
207
208 /*
209 * write some of the pending data back to the server
210 */
afs_writepages(struct address_space * mapping,struct writeback_control * wbc)211 int afs_writepages(struct address_space *mapping, struct writeback_control *wbc)
212 {
213 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
214 int ret;
215
216 /* We have to be careful as we can end up racing with setattr()
217 * truncating the pagecache since the caller doesn't take a lock here
218 * to prevent it.
219 */
220 if (wbc->sync_mode == WB_SYNC_ALL)
221 down_read(&vnode->validate_lock);
222 else if (!down_read_trylock(&vnode->validate_lock))
223 return 0;
224
225 ret = netfs_writepages(mapping, wbc);
226 up_read(&vnode->validate_lock);
227 return ret;
228 }
229
230 /*
231 * flush any dirty pages for this process, and check for write errors.
232 * - the return status from this call provides a reliable indication of
233 * whether any write errors occurred for this process.
234 */
afs_fsync(struct file * file,loff_t start,loff_t end,int datasync)235 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
236 {
237 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
238 struct afs_file *af = file->private_data;
239 int ret;
240
241 _enter("{%llx:%llu},{n=%pD},%d",
242 vnode->fid.vid, vnode->fid.vnode, file,
243 datasync);
244
245 ret = afs_validate(vnode, af->key);
246 if (ret < 0)
247 return ret;
248
249 return file_write_and_wait_range(file, start, end);
250 }
251
252 /*
253 * notification that a previously read-only page is about to become writable
254 * - if it returns an error, the caller will deliver a bus error signal
255 */
afs_page_mkwrite(struct vm_fault * vmf)256 vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
257 {
258 struct file *file = vmf->vma->vm_file;
259
260 if (afs_validate(AFS_FS_I(file_inode(file)), afs_file_key(file)) < 0)
261 return VM_FAULT_SIGBUS;
262 return netfs_page_mkwrite(vmf, NULL);
263 }
264
265 /*
266 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
267 */
afs_prune_wb_keys(struct afs_vnode * vnode)268 void afs_prune_wb_keys(struct afs_vnode *vnode)
269 {
270 LIST_HEAD(graveyard);
271 struct afs_wb_key *wbk, *tmp;
272
273 /* Discard unused keys */
274 spin_lock(&vnode->wb_lock);
275
276 if (!mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
277 !mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_DIRTY)) {
278 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
279 if (refcount_read(&wbk->usage) == 1)
280 list_move(&wbk->vnode_link, &graveyard);
281 }
282 }
283
284 spin_unlock(&vnode->wb_lock);
285
286 while (!list_empty(&graveyard)) {
287 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
288 list_del(&wbk->vnode_link);
289 afs_put_wb_key(wbk);
290 }
291 }
292