xref: /linux/fs/ntfs3/attrlist.c (revision 021bc4b9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  */
7 
8 #include <linux/fs.h>
9 
10 #include "debug.h"
11 #include "ntfs.h"
12 #include "ntfs_fs.h"
13 
14 /*
15  * al_is_valid_le
16  *
17  * Return: True if @le is valid.
18  */
19 static inline bool al_is_valid_le(const struct ntfs_inode *ni,
20 				  struct ATTR_LIST_ENTRY *le)
21 {
22 	if (!le || !ni->attr_list.le || !ni->attr_list.size)
23 		return false;
24 
25 	return PtrOffset(ni->attr_list.le, le) + le16_to_cpu(le->size) <=
26 	       ni->attr_list.size;
27 }
28 
29 void al_destroy(struct ntfs_inode *ni)
30 {
31 	run_close(&ni->attr_list.run);
32 	kvfree(ni->attr_list.le);
33 	ni->attr_list.le = NULL;
34 	ni->attr_list.size = 0;
35 	ni->attr_list.dirty = false;
36 }
37 
38 /*
39  * ntfs_load_attr_list
40  *
41  * This method makes sure that the ATTRIB list, if present,
42  * has been properly set up.
43  */
44 int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
45 {
46 	int err;
47 	size_t lsize;
48 	void *le = NULL;
49 
50 	if (ni->attr_list.size)
51 		return 0;
52 
53 	if (!attr->non_res) {
54 		lsize = le32_to_cpu(attr->res.data_size);
55 		/* attr is resident: lsize < record_size (1K or 4K) */
56 		le = kvmalloc(al_aligned(lsize), GFP_KERNEL);
57 		if (!le) {
58 			err = -ENOMEM;
59 			goto out;
60 		}
61 		memcpy(le, resident_data(attr), lsize);
62 	} else if (attr->nres.svcn) {
63 		err = -EINVAL;
64 		goto out;
65 	} else {
66 		u16 run_off = le16_to_cpu(attr->nres.run_off);
67 
68 		lsize = le64_to_cpu(attr->nres.data_size);
69 
70 		run_init(&ni->attr_list.run);
71 
72 		if (run_off > le32_to_cpu(attr->size)) {
73 			err = -EINVAL;
74 			goto out;
75 		}
76 
77 		err = run_unpack_ex(&ni->attr_list.run, ni->mi.sbi, ni->mi.rno,
78 				    0, le64_to_cpu(attr->nres.evcn), 0,
79 				    Add2Ptr(attr, run_off),
80 				    le32_to_cpu(attr->size) - run_off);
81 		if (err < 0)
82 			goto out;
83 
84 		/* attr is nonresident.
85 		 * The worst case:
86 		 * 1T (2^40) extremely fragmented file.
87 		 * cluster = 4K (2^12) => 2^28 fragments
88 		 * 2^9 fragments per one record => 2^19 records
89 		 * 2^5 bytes of ATTR_LIST_ENTRY per one record => 2^24 bytes.
90 		 *
91 		 * the result is 16M bytes per attribute list.
92 		 * Use kvmalloc to allocate in range [several Kbytes - dozen Mbytes]
93 		 */
94 		le = kvmalloc(al_aligned(lsize), GFP_KERNEL);
95 		if (!le) {
96 			err = -ENOMEM;
97 			goto out;
98 		}
99 
100 		err = ntfs_read_run_nb(ni->mi.sbi, &ni->attr_list.run, 0, le,
101 				       lsize, NULL);
102 		if (err)
103 			goto out;
104 	}
105 
106 	ni->attr_list.size = lsize;
107 	ni->attr_list.le = le;
108 
109 	return 0;
110 
111 out:
112 	ni->attr_list.le = le;
113 	al_destroy(ni);
114 
115 	return err;
116 }
117 
118 /*
119  * al_enumerate
120  *
121  * Return:
122  * * The next list le.
123  * * If @le is NULL then return the first le.
124  */
125 struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
126 				     struct ATTR_LIST_ENTRY *le)
127 {
128 	size_t off;
129 	u16 sz;
130 	const unsigned le_min_size = le_size(0);
131 
132 	if (!le) {
133 		le = ni->attr_list.le;
134 	} else {
135 		sz = le16_to_cpu(le->size);
136 		if (sz < le_min_size) {
137 			/* Impossible 'cause we should not return such le. */
138 			return NULL;
139 		}
140 		le = Add2Ptr(le, sz);
141 	}
142 
143 	/* Check boundary. */
144 	off = PtrOffset(ni->attr_list.le, le);
145 	if (off + le_min_size > ni->attr_list.size) {
146 		/* The regular end of list. */
147 		return NULL;
148 	}
149 
150 	sz = le16_to_cpu(le->size);
151 
152 	/* Check le for errors. */
153 	if (sz < le_min_size || off + sz > ni->attr_list.size ||
154 	    sz < le->name_off + le->name_len * sizeof(short)) {
155 		return NULL;
156 	}
157 
158 	return le;
159 }
160 
161 /*
162  * al_find_le
163  *
164  * Find the first le in the list which matches type, name and VCN.
165  *
166  * Return: NULL if not found.
167  */
168 struct ATTR_LIST_ENTRY *al_find_le(struct ntfs_inode *ni,
169 				   struct ATTR_LIST_ENTRY *le,
170 				   const struct ATTRIB *attr)
171 {
172 	CLST svcn = attr_svcn(attr);
173 
174 	return al_find_ex(ni, le, attr->type, attr_name(attr), attr->name_len,
175 			  &svcn);
176 }
177 
178 /*
179  * al_find_ex
180  *
181  * Find the first le in the list which matches type, name and VCN.
182  *
183  * Return: NULL if not found.
184  */
185 struct ATTR_LIST_ENTRY *al_find_ex(struct ntfs_inode *ni,
186 				   struct ATTR_LIST_ENTRY *le,
187 				   enum ATTR_TYPE type, const __le16 *name,
188 				   u8 name_len, const CLST *vcn)
189 {
190 	struct ATTR_LIST_ENTRY *ret = NULL;
191 	u32 type_in = le32_to_cpu(type);
192 
193 	while ((le = al_enumerate(ni, le))) {
194 		u64 le_vcn;
195 		int diff = le32_to_cpu(le->type) - type_in;
196 
197 		/* List entries are sorted by type, name and VCN. */
198 		if (diff < 0)
199 			continue;
200 
201 		if (diff > 0)
202 			return ret;
203 
204 		if (le->name_len != name_len)
205 			continue;
206 
207 		le_vcn = le64_to_cpu(le->vcn);
208 		if (!le_vcn) {
209 			/*
210 			 * Compare entry names only for entry with vcn == 0.
211 			 */
212 			diff = ntfs_cmp_names(le_name(le), name_len, name,
213 					      name_len, ni->mi.sbi->upcase,
214 					      true);
215 			if (diff < 0)
216 				continue;
217 
218 			if (diff > 0)
219 				return ret;
220 		}
221 
222 		if (!vcn)
223 			return le;
224 
225 		if (*vcn == le_vcn)
226 			return le;
227 
228 		if (*vcn < le_vcn)
229 			return ret;
230 
231 		ret = le;
232 	}
233 
234 	return ret;
235 }
236 
237 /*
238  * al_find_le_to_insert
239  *
240  * Find the first list entry which matches type, name and VCN.
241  */
242 static struct ATTR_LIST_ENTRY *al_find_le_to_insert(struct ntfs_inode *ni,
243 						    enum ATTR_TYPE type,
244 						    const __le16 *name,
245 						    u8 name_len, CLST vcn)
246 {
247 	struct ATTR_LIST_ENTRY *le = NULL, *prev;
248 	u32 type_in = le32_to_cpu(type);
249 
250 	/* List entries are sorted by type, name and VCN. */
251 	while ((le = al_enumerate(ni, prev = le))) {
252 		int diff = le32_to_cpu(le->type) - type_in;
253 
254 		if (diff < 0)
255 			continue;
256 
257 		if (diff > 0)
258 			return le;
259 
260 		if (!le->vcn) {
261 			/*
262 			 * Compare entry names only for entry with vcn == 0.
263 			 */
264 			diff = ntfs_cmp_names(le_name(le), le->name_len, name,
265 					      name_len, ni->mi.sbi->upcase,
266 					      true);
267 			if (diff < 0)
268 				continue;
269 
270 			if (diff > 0)
271 				return le;
272 		}
273 
274 		if (le64_to_cpu(le->vcn) >= vcn)
275 			return le;
276 	}
277 
278 	return prev ? Add2Ptr(prev, le16_to_cpu(prev->size)) : ni->attr_list.le;
279 }
280 
281 /*
282  * al_add_le
283  *
284  * Add an "attribute list entry" to the list.
285  */
286 int al_add_le(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name,
287 	      u8 name_len, CLST svcn, __le16 id, const struct MFT_REF *ref,
288 	      struct ATTR_LIST_ENTRY **new_le)
289 {
290 	int err;
291 	struct ATTRIB *attr;
292 	struct ATTR_LIST_ENTRY *le;
293 	size_t off;
294 	u16 sz;
295 	size_t asize, new_asize, old_size;
296 	u64 new_size;
297 	typeof(ni->attr_list) *al = &ni->attr_list;
298 
299 	/*
300 	 * Compute the size of the new 'le'
301 	 */
302 	sz = le_size(name_len);
303 	old_size = al->size;
304 	new_size = old_size + sz;
305 	asize = al_aligned(old_size);
306 	new_asize = al_aligned(new_size);
307 
308 	/* Scan forward to the point at which the new 'le' should be inserted. */
309 	le = al_find_le_to_insert(ni, type, name, name_len, svcn);
310 	off = PtrOffset(al->le, le);
311 
312 	if (new_size > asize) {
313 		void *ptr = kmalloc(new_asize, GFP_NOFS);
314 
315 		if (!ptr)
316 			return -ENOMEM;
317 
318 		memcpy(ptr, al->le, off);
319 		memcpy(Add2Ptr(ptr, off + sz), le, old_size - off);
320 		le = Add2Ptr(ptr, off);
321 		kvfree(al->le);
322 		al->le = ptr;
323 	} else {
324 		memmove(Add2Ptr(le, sz), le, old_size - off);
325 	}
326 	*new_le = le;
327 
328 	al->size = new_size;
329 
330 	le->type = type;
331 	le->size = cpu_to_le16(sz);
332 	le->name_len = name_len;
333 	le->name_off = offsetof(struct ATTR_LIST_ENTRY, name);
334 	le->vcn = cpu_to_le64(svcn);
335 	le->ref = *ref;
336 	le->id = id;
337 	memcpy(le->name, name, sizeof(short) * name_len);
338 
339 	err = attr_set_size(ni, ATTR_LIST, NULL, 0, &al->run, new_size,
340 			    &new_size, true, &attr);
341 	if (err) {
342 		/* Undo memmove above. */
343 		memmove(le, Add2Ptr(le, sz), old_size - off);
344 		al->size = old_size;
345 		return err;
346 	}
347 
348 	al->dirty = true;
349 
350 	if (attr && attr->non_res) {
351 		err = ntfs_sb_write_run(ni->mi.sbi, &al->run, 0, al->le,
352 					al->size, 0);
353 		if (err)
354 			return err;
355 		al->dirty = false;
356 	}
357 
358 	return 0;
359 }
360 
361 /*
362  * al_remove_le - Remove @le from attribute list.
363  */
364 bool al_remove_le(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le)
365 {
366 	u16 size;
367 	size_t off;
368 	typeof(ni->attr_list) *al = &ni->attr_list;
369 
370 	if (!al_is_valid_le(ni, le))
371 		return false;
372 
373 	/* Save on stack the size of 'le' */
374 	size = le16_to_cpu(le->size);
375 	off = PtrOffset(al->le, le);
376 
377 	memmove(le, Add2Ptr(le, size), al->size - (off + size));
378 
379 	al->size -= size;
380 	al->dirty = true;
381 
382 	return true;
383 }
384 
385 /*
386  * al_delete_le - Delete first le from the list which matches its parameters.
387  */
388 bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn,
389 		  const __le16 *name, u8 name_len, const struct MFT_REF *ref)
390 {
391 	u16 size;
392 	struct ATTR_LIST_ENTRY *le;
393 	size_t off;
394 	typeof(ni->attr_list) *al = &ni->attr_list;
395 
396 	/* Scan forward to the first le that matches the input. */
397 	le = al_find_ex(ni, NULL, type, name, name_len, &vcn);
398 	if (!le)
399 		return false;
400 
401 	off = PtrOffset(al->le, le);
402 
403 next:
404 	if (off >= al->size)
405 		return false;
406 	if (le->type != type)
407 		return false;
408 	if (le->name_len != name_len)
409 		return false;
410 	if (name_len && ntfs_cmp_names(le_name(le), name_len, name, name_len,
411 				       ni->mi.sbi->upcase, true))
412 		return false;
413 	if (le64_to_cpu(le->vcn) != vcn)
414 		return false;
415 
416 	/*
417 	 * The caller specified a segment reference, so we have to
418 	 * scan through the matching entries until we find that segment
419 	 * reference or we run of matching entries.
420 	 */
421 	if (ref && memcmp(ref, &le->ref, sizeof(*ref))) {
422 		off += le16_to_cpu(le->size);
423 		le = Add2Ptr(al->le, off);
424 		goto next;
425 	}
426 
427 	/* Save on stack the size of 'le'. */
428 	size = le16_to_cpu(le->size);
429 	/* Delete the le. */
430 	memmove(le, Add2Ptr(le, size), al->size - (off + size));
431 
432 	al->size -= size;
433 	al->dirty = true;
434 
435 	return true;
436 }
437 
438 int al_update(struct ntfs_inode *ni, int sync)
439 {
440 	int err;
441 	struct ATTRIB *attr;
442 	typeof(ni->attr_list) *al = &ni->attr_list;
443 
444 	if (!al->dirty || !al->size)
445 		return 0;
446 
447 	/*
448 	 * Attribute list increased on demand in al_add_le.
449 	 * Attribute list decreased here.
450 	 */
451 	err = attr_set_size(ni, ATTR_LIST, NULL, 0, &al->run, al->size, NULL,
452 			    false, &attr);
453 	if (err)
454 		goto out;
455 
456 	if (!attr->non_res) {
457 		memcpy(resident_data(attr), al->le, al->size);
458 	} else {
459 		err = ntfs_sb_write_run(ni->mi.sbi, &al->run, 0, al->le,
460 					al->size, sync);
461 		if (err)
462 			goto out;
463 
464 		attr->nres.valid_size = attr->nres.data_size;
465 	}
466 
467 	ni->mi.dirty = true;
468 	al->dirty = false;
469 
470 out:
471 	return err;
472 }
473