xref: /linux/fs/xfs/xfs_sysfs.c (revision 0be3ff0c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2014 Red Hat, Inc.
4  * All Rights Reserved.
5  */
6 
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sysfs.h"
13 #include "xfs_log.h"
14 #include "xfs_log_priv.h"
15 #include "xfs_mount.h"
16 
17 struct xfs_sysfs_attr {
18 	struct attribute attr;
19 	ssize_t (*show)(struct kobject *kobject, char *buf);
20 	ssize_t (*store)(struct kobject *kobject, const char *buf,
21 			 size_t count);
22 };
23 
24 static inline struct xfs_sysfs_attr *
25 to_attr(struct attribute *attr)
26 {
27 	return container_of(attr, struct xfs_sysfs_attr, attr);
28 }
29 
30 #define XFS_SYSFS_ATTR_RW(name) \
31 	static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RW(name)
32 #define XFS_SYSFS_ATTR_RO(name) \
33 	static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RO(name)
34 #define XFS_SYSFS_ATTR_WO(name) \
35 	static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_WO(name)
36 
37 #define ATTR_LIST(name) &xfs_sysfs_attr_##name.attr
38 
39 STATIC ssize_t
40 xfs_sysfs_object_show(
41 	struct kobject		*kobject,
42 	struct attribute	*attr,
43 	char			*buf)
44 {
45 	struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
46 
47 	return xfs_attr->show ? xfs_attr->show(kobject, buf) : 0;
48 }
49 
50 STATIC ssize_t
51 xfs_sysfs_object_store(
52 	struct kobject		*kobject,
53 	struct attribute	*attr,
54 	const char		*buf,
55 	size_t			count)
56 {
57 	struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
58 
59 	return xfs_attr->store ? xfs_attr->store(kobject, buf, count) : 0;
60 }
61 
62 static const struct sysfs_ops xfs_sysfs_ops = {
63 	.show = xfs_sysfs_object_show,
64 	.store = xfs_sysfs_object_store,
65 };
66 
67 static struct attribute *xfs_mp_attrs[] = {
68 	NULL,
69 };
70 ATTRIBUTE_GROUPS(xfs_mp);
71 
72 struct kobj_type xfs_mp_ktype = {
73 	.release = xfs_sysfs_release,
74 	.sysfs_ops = &xfs_sysfs_ops,
75 	.default_groups = xfs_mp_groups,
76 };
77 
78 #ifdef DEBUG
79 /* debug */
80 
81 STATIC ssize_t
82 bug_on_assert_store(
83 	struct kobject		*kobject,
84 	const char		*buf,
85 	size_t			count)
86 {
87 	int			ret;
88 	int			val;
89 
90 	ret = kstrtoint(buf, 0, &val);
91 	if (ret)
92 		return ret;
93 
94 	if (val == 1)
95 		xfs_globals.bug_on_assert = true;
96 	else if (val == 0)
97 		xfs_globals.bug_on_assert = false;
98 	else
99 		return -EINVAL;
100 
101 	return count;
102 }
103 
104 STATIC ssize_t
105 bug_on_assert_show(
106 	struct kobject		*kobject,
107 	char			*buf)
108 {
109 	return sysfs_emit(buf, "%d\n", xfs_globals.bug_on_assert);
110 }
111 XFS_SYSFS_ATTR_RW(bug_on_assert);
112 
113 STATIC ssize_t
114 log_recovery_delay_store(
115 	struct kobject	*kobject,
116 	const char	*buf,
117 	size_t		count)
118 {
119 	int		ret;
120 	int		val;
121 
122 	ret = kstrtoint(buf, 0, &val);
123 	if (ret)
124 		return ret;
125 
126 	if (val < 0 || val > 60)
127 		return -EINVAL;
128 
129 	xfs_globals.log_recovery_delay = val;
130 
131 	return count;
132 }
133 
134 STATIC ssize_t
135 log_recovery_delay_show(
136 	struct kobject	*kobject,
137 	char		*buf)
138 {
139 	return sysfs_emit(buf, "%d\n", xfs_globals.log_recovery_delay);
140 }
141 XFS_SYSFS_ATTR_RW(log_recovery_delay);
142 
143 STATIC ssize_t
144 mount_delay_store(
145 	struct kobject	*kobject,
146 	const char	*buf,
147 	size_t		count)
148 {
149 	int		ret;
150 	int		val;
151 
152 	ret = kstrtoint(buf, 0, &val);
153 	if (ret)
154 		return ret;
155 
156 	if (val < 0 || val > 60)
157 		return -EINVAL;
158 
159 	xfs_globals.mount_delay = val;
160 
161 	return count;
162 }
163 
164 STATIC ssize_t
165 mount_delay_show(
166 	struct kobject	*kobject,
167 	char		*buf)
168 {
169 	return sysfs_emit(buf, "%d\n", xfs_globals.mount_delay);
170 }
171 XFS_SYSFS_ATTR_RW(mount_delay);
172 
173 static ssize_t
174 always_cow_store(
175 	struct kobject	*kobject,
176 	const char	*buf,
177 	size_t		count)
178 {
179 	ssize_t		ret;
180 
181 	ret = kstrtobool(buf, &xfs_globals.always_cow);
182 	if (ret < 0)
183 		return ret;
184 	return count;
185 }
186 
187 static ssize_t
188 always_cow_show(
189 	struct kobject	*kobject,
190 	char		*buf)
191 {
192 	return sysfs_emit(buf, "%d\n", xfs_globals.always_cow);
193 }
194 XFS_SYSFS_ATTR_RW(always_cow);
195 
196 #ifdef DEBUG
197 /*
198  * Override how many threads the parallel work queue is allowed to create.
199  * This has to be a debug-only global (instead of an errortag) because one of
200  * the main users of parallel workqueues is mount time quotacheck.
201  */
202 STATIC ssize_t
203 pwork_threads_store(
204 	struct kobject	*kobject,
205 	const char	*buf,
206 	size_t		count)
207 {
208 	int		ret;
209 	int		val;
210 
211 	ret = kstrtoint(buf, 0, &val);
212 	if (ret)
213 		return ret;
214 
215 	if (val < -1 || val > num_possible_cpus())
216 		return -EINVAL;
217 
218 	xfs_globals.pwork_threads = val;
219 
220 	return count;
221 }
222 
223 STATIC ssize_t
224 pwork_threads_show(
225 	struct kobject	*kobject,
226 	char		*buf)
227 {
228 	return sysfs_emit(buf, "%d\n", xfs_globals.pwork_threads);
229 }
230 XFS_SYSFS_ATTR_RW(pwork_threads);
231 #endif /* DEBUG */
232 
233 static struct attribute *xfs_dbg_attrs[] = {
234 	ATTR_LIST(bug_on_assert),
235 	ATTR_LIST(log_recovery_delay),
236 	ATTR_LIST(mount_delay),
237 	ATTR_LIST(always_cow),
238 #ifdef DEBUG
239 	ATTR_LIST(pwork_threads),
240 #endif
241 	NULL,
242 };
243 ATTRIBUTE_GROUPS(xfs_dbg);
244 
245 struct kobj_type xfs_dbg_ktype = {
246 	.release = xfs_sysfs_release,
247 	.sysfs_ops = &xfs_sysfs_ops,
248 	.default_groups = xfs_dbg_groups,
249 };
250 
251 #endif /* DEBUG */
252 
253 /* stats */
254 
255 static inline struct xstats *
256 to_xstats(struct kobject *kobject)
257 {
258 	struct xfs_kobj *kobj = to_kobj(kobject);
259 
260 	return container_of(kobj, struct xstats, xs_kobj);
261 }
262 
263 STATIC ssize_t
264 stats_show(
265 	struct kobject	*kobject,
266 	char		*buf)
267 {
268 	struct xstats	*stats = to_xstats(kobject);
269 
270 	return xfs_stats_format(stats->xs_stats, buf);
271 }
272 XFS_SYSFS_ATTR_RO(stats);
273 
274 STATIC ssize_t
275 stats_clear_store(
276 	struct kobject	*kobject,
277 	const char	*buf,
278 	size_t		count)
279 {
280 	int		ret;
281 	int		val;
282 	struct xstats	*stats = to_xstats(kobject);
283 
284 	ret = kstrtoint(buf, 0, &val);
285 	if (ret)
286 		return ret;
287 
288 	if (val != 1)
289 		return -EINVAL;
290 
291 	xfs_stats_clearall(stats->xs_stats);
292 	return count;
293 }
294 XFS_SYSFS_ATTR_WO(stats_clear);
295 
296 static struct attribute *xfs_stats_attrs[] = {
297 	ATTR_LIST(stats),
298 	ATTR_LIST(stats_clear),
299 	NULL,
300 };
301 ATTRIBUTE_GROUPS(xfs_stats);
302 
303 struct kobj_type xfs_stats_ktype = {
304 	.release = xfs_sysfs_release,
305 	.sysfs_ops = &xfs_sysfs_ops,
306 	.default_groups = xfs_stats_groups,
307 };
308 
309 /* xlog */
310 
311 static inline struct xlog *
312 to_xlog(struct kobject *kobject)
313 {
314 	struct xfs_kobj *kobj = to_kobj(kobject);
315 
316 	return container_of(kobj, struct xlog, l_kobj);
317 }
318 
319 STATIC ssize_t
320 log_head_lsn_show(
321 	struct kobject	*kobject,
322 	char		*buf)
323 {
324 	int cycle;
325 	int block;
326 	struct xlog *log = to_xlog(kobject);
327 
328 	spin_lock(&log->l_icloglock);
329 	cycle = log->l_curr_cycle;
330 	block = log->l_curr_block;
331 	spin_unlock(&log->l_icloglock);
332 
333 	return sysfs_emit(buf, "%d:%d\n", cycle, block);
334 }
335 XFS_SYSFS_ATTR_RO(log_head_lsn);
336 
337 STATIC ssize_t
338 log_tail_lsn_show(
339 	struct kobject	*kobject,
340 	char		*buf)
341 {
342 	int cycle;
343 	int block;
344 	struct xlog *log = to_xlog(kobject);
345 
346 	xlog_crack_atomic_lsn(&log->l_tail_lsn, &cycle, &block);
347 	return sysfs_emit(buf, "%d:%d\n", cycle, block);
348 }
349 XFS_SYSFS_ATTR_RO(log_tail_lsn);
350 
351 STATIC ssize_t
352 reserve_grant_head_show(
353 	struct kobject	*kobject,
354 	char		*buf)
355 
356 {
357 	int cycle;
358 	int bytes;
359 	struct xlog *log = to_xlog(kobject);
360 
361 	xlog_crack_grant_head(&log->l_reserve_head.grant, &cycle, &bytes);
362 	return sysfs_emit(buf, "%d:%d\n", cycle, bytes);
363 }
364 XFS_SYSFS_ATTR_RO(reserve_grant_head);
365 
366 STATIC ssize_t
367 write_grant_head_show(
368 	struct kobject	*kobject,
369 	char		*buf)
370 {
371 	int cycle;
372 	int bytes;
373 	struct xlog *log = to_xlog(kobject);
374 
375 	xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &bytes);
376 	return sysfs_emit(buf, "%d:%d\n", cycle, bytes);
377 }
378 XFS_SYSFS_ATTR_RO(write_grant_head);
379 
380 static struct attribute *xfs_log_attrs[] = {
381 	ATTR_LIST(log_head_lsn),
382 	ATTR_LIST(log_tail_lsn),
383 	ATTR_LIST(reserve_grant_head),
384 	ATTR_LIST(write_grant_head),
385 	NULL,
386 };
387 ATTRIBUTE_GROUPS(xfs_log);
388 
389 struct kobj_type xfs_log_ktype = {
390 	.release = xfs_sysfs_release,
391 	.sysfs_ops = &xfs_sysfs_ops,
392 	.default_groups = xfs_log_groups,
393 };
394 
395 /*
396  * Metadata IO error configuration
397  *
398  * The sysfs structure here is:
399  *	...xfs/<dev>/error/<class>/<errno>/<error_attrs>
400  *
401  * where <class> allows us to discriminate between data IO and metadata IO,
402  * and any other future type of IO (e.g. special inode or directory error
403  * handling) we care to support.
404  */
405 static inline struct xfs_error_cfg *
406 to_error_cfg(struct kobject *kobject)
407 {
408 	struct xfs_kobj *kobj = to_kobj(kobject);
409 	return container_of(kobj, struct xfs_error_cfg, kobj);
410 }
411 
412 static inline struct xfs_mount *
413 err_to_mp(struct kobject *kobject)
414 {
415 	struct xfs_kobj *kobj = to_kobj(kobject);
416 	return container_of(kobj, struct xfs_mount, m_error_kobj);
417 }
418 
419 static ssize_t
420 max_retries_show(
421 	struct kobject	*kobject,
422 	char		*buf)
423 {
424 	int		retries;
425 	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
426 
427 	if (cfg->max_retries == XFS_ERR_RETRY_FOREVER)
428 		retries = -1;
429 	else
430 		retries = cfg->max_retries;
431 
432 	return sysfs_emit(buf, "%d\n", retries);
433 }
434 
435 static ssize_t
436 max_retries_store(
437 	struct kobject	*kobject,
438 	const char	*buf,
439 	size_t		count)
440 {
441 	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
442 	int		ret;
443 	int		val;
444 
445 	ret = kstrtoint(buf, 0, &val);
446 	if (ret)
447 		return ret;
448 
449 	if (val < -1)
450 		return -EINVAL;
451 
452 	if (val == -1)
453 		cfg->max_retries = XFS_ERR_RETRY_FOREVER;
454 	else
455 		cfg->max_retries = val;
456 	return count;
457 }
458 XFS_SYSFS_ATTR_RW(max_retries);
459 
460 static ssize_t
461 retry_timeout_seconds_show(
462 	struct kobject	*kobject,
463 	char		*buf)
464 {
465 	int		timeout;
466 	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
467 
468 	if (cfg->retry_timeout == XFS_ERR_RETRY_FOREVER)
469 		timeout = -1;
470 	else
471 		timeout = jiffies_to_msecs(cfg->retry_timeout) / MSEC_PER_SEC;
472 
473 	return sysfs_emit(buf, "%d\n", timeout);
474 }
475 
476 static ssize_t
477 retry_timeout_seconds_store(
478 	struct kobject	*kobject,
479 	const char	*buf,
480 	size_t		count)
481 {
482 	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
483 	int		ret;
484 	int		val;
485 
486 	ret = kstrtoint(buf, 0, &val);
487 	if (ret)
488 		return ret;
489 
490 	/* 1 day timeout maximum, -1 means infinite */
491 	if (val < -1 || val > 86400)
492 		return -EINVAL;
493 
494 	if (val == -1)
495 		cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
496 	else {
497 		cfg->retry_timeout = msecs_to_jiffies(val * MSEC_PER_SEC);
498 		ASSERT(msecs_to_jiffies(val * MSEC_PER_SEC) < LONG_MAX);
499 	}
500 	return count;
501 }
502 XFS_SYSFS_ATTR_RW(retry_timeout_seconds);
503 
504 static ssize_t
505 fail_at_unmount_show(
506 	struct kobject	*kobject,
507 	char		*buf)
508 {
509 	struct xfs_mount	*mp = err_to_mp(kobject);
510 
511 	return sysfs_emit(buf, "%d\n", mp->m_fail_unmount);
512 }
513 
514 static ssize_t
515 fail_at_unmount_store(
516 	struct kobject	*kobject,
517 	const char	*buf,
518 	size_t		count)
519 {
520 	struct xfs_mount	*mp = err_to_mp(kobject);
521 	int		ret;
522 	int		val;
523 
524 	ret = kstrtoint(buf, 0, &val);
525 	if (ret)
526 		return ret;
527 
528 	if (val < 0 || val > 1)
529 		return -EINVAL;
530 
531 	mp->m_fail_unmount = val;
532 	return count;
533 }
534 XFS_SYSFS_ATTR_RW(fail_at_unmount);
535 
536 static struct attribute *xfs_error_attrs[] = {
537 	ATTR_LIST(max_retries),
538 	ATTR_LIST(retry_timeout_seconds),
539 	NULL,
540 };
541 ATTRIBUTE_GROUPS(xfs_error);
542 
543 static struct kobj_type xfs_error_cfg_ktype = {
544 	.release = xfs_sysfs_release,
545 	.sysfs_ops = &xfs_sysfs_ops,
546 	.default_groups = xfs_error_groups,
547 };
548 
549 static struct kobj_type xfs_error_ktype = {
550 	.release = xfs_sysfs_release,
551 	.sysfs_ops = &xfs_sysfs_ops,
552 };
553 
554 /*
555  * Error initialization tables. These need to be ordered in the same
556  * order as the enums used to index the array. All class init tables need to
557  * define a "default" behaviour as the first entry, all other entries can be
558  * empty.
559  */
560 struct xfs_error_init {
561 	char		*name;
562 	int		max_retries;
563 	int		retry_timeout;	/* in seconds */
564 };
565 
566 static const struct xfs_error_init xfs_error_meta_init[XFS_ERR_ERRNO_MAX] = {
567 	{ .name = "default",
568 	  .max_retries = XFS_ERR_RETRY_FOREVER,
569 	  .retry_timeout = XFS_ERR_RETRY_FOREVER,
570 	},
571 	{ .name = "EIO",
572 	  .max_retries = XFS_ERR_RETRY_FOREVER,
573 	  .retry_timeout = XFS_ERR_RETRY_FOREVER,
574 	},
575 	{ .name = "ENOSPC",
576 	  .max_retries = XFS_ERR_RETRY_FOREVER,
577 	  .retry_timeout = XFS_ERR_RETRY_FOREVER,
578 	},
579 	{ .name = "ENODEV",
580 	  .max_retries = 0,	/* We can't recover from devices disappearing */
581 	  .retry_timeout = 0,
582 	},
583 };
584 
585 static int
586 xfs_error_sysfs_init_class(
587 	struct xfs_mount	*mp,
588 	int			class,
589 	const char		*parent_name,
590 	struct xfs_kobj		*parent_kobj,
591 	const struct xfs_error_init init[])
592 {
593 	struct xfs_error_cfg	*cfg;
594 	int			error;
595 	int			i;
596 
597 	ASSERT(class < XFS_ERR_CLASS_MAX);
598 
599 	error = xfs_sysfs_init(parent_kobj, &xfs_error_ktype,
600 				&mp->m_error_kobj, parent_name);
601 	if (error)
602 		return error;
603 
604 	for (i = 0; i < XFS_ERR_ERRNO_MAX; i++) {
605 		cfg = &mp->m_error_cfg[class][i];
606 		error = xfs_sysfs_init(&cfg->kobj, &xfs_error_cfg_ktype,
607 					parent_kobj, init[i].name);
608 		if (error)
609 			goto out_error;
610 
611 		cfg->max_retries = init[i].max_retries;
612 		if (init[i].retry_timeout == XFS_ERR_RETRY_FOREVER)
613 			cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
614 		else
615 			cfg->retry_timeout = msecs_to_jiffies(
616 					init[i].retry_timeout * MSEC_PER_SEC);
617 	}
618 	return 0;
619 
620 out_error:
621 	/* unwind the entries that succeeded */
622 	for (i--; i >= 0; i--) {
623 		cfg = &mp->m_error_cfg[class][i];
624 		xfs_sysfs_del(&cfg->kobj);
625 	}
626 	xfs_sysfs_del(parent_kobj);
627 	return error;
628 }
629 
630 int
631 xfs_error_sysfs_init(
632 	struct xfs_mount	*mp)
633 {
634 	int			error;
635 
636 	/* .../xfs/<dev>/error/ */
637 	error = xfs_sysfs_init(&mp->m_error_kobj, &xfs_error_ktype,
638 				&mp->m_kobj, "error");
639 	if (error)
640 		return error;
641 
642 	error = sysfs_create_file(&mp->m_error_kobj.kobject,
643 				  ATTR_LIST(fail_at_unmount));
644 
645 	if (error)
646 		goto out_error;
647 
648 	/* .../xfs/<dev>/error/metadata/ */
649 	error = xfs_error_sysfs_init_class(mp, XFS_ERR_METADATA,
650 				"metadata", &mp->m_error_meta_kobj,
651 				xfs_error_meta_init);
652 	if (error)
653 		goto out_error;
654 
655 	return 0;
656 
657 out_error:
658 	xfs_sysfs_del(&mp->m_error_kobj);
659 	return error;
660 }
661 
662 void
663 xfs_error_sysfs_del(
664 	struct xfs_mount	*mp)
665 {
666 	struct xfs_error_cfg	*cfg;
667 	int			i, j;
668 
669 	for (i = 0; i < XFS_ERR_CLASS_MAX; i++) {
670 		for (j = 0; j < XFS_ERR_ERRNO_MAX; j++) {
671 			cfg = &mp->m_error_cfg[i][j];
672 
673 			xfs_sysfs_del(&cfg->kobj);
674 		}
675 	}
676 	xfs_sysfs_del(&mp->m_error_meta_kobj);
677 	xfs_sysfs_del(&mp->m_error_kobj);
678 }
679 
680 struct xfs_error_cfg *
681 xfs_error_get_cfg(
682 	struct xfs_mount	*mp,
683 	int			error_class,
684 	int			error)
685 {
686 	struct xfs_error_cfg	*cfg;
687 
688 	if (error < 0)
689 		error = -error;
690 
691 	switch (error) {
692 	case EIO:
693 		cfg = &mp->m_error_cfg[error_class][XFS_ERR_EIO];
694 		break;
695 	case ENOSPC:
696 		cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENOSPC];
697 		break;
698 	case ENODEV:
699 		cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENODEV];
700 		break;
701 	default:
702 		cfg = &mp->m_error_cfg[error_class][XFS_ERR_DEFAULT];
703 		break;
704 	}
705 
706 	return cfg;
707 }
708