xref: /linux/kernel/ksysfs.c (revision dd093fb0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kernel/ksysfs.c - sysfs attributes in /sys/kernel, which
4  * 		     are not related to any other subsystem
5  *
6  * Copyright (C) 2004 Kay Sievers <kay.sievers@vrfy.org>
7  */
8 
9 #include <asm/byteorder.h>
10 #include <linux/kobject.h>
11 #include <linux/string.h>
12 #include <linux/sysfs.h>
13 #include <linux/export.h>
14 #include <linux/init.h>
15 #include <linux/kexec.h>
16 #include <linux/profile.h>
17 #include <linux/stat.h>
18 #include <linux/sched.h>
19 #include <linux/capability.h>
20 #include <linux/compiler.h>
21 
22 #include <linux/rcupdate.h>	/* rcu_expedited and rcu_normal */
23 
24 #if defined(__LITTLE_ENDIAN)
25 #define CPU_BYTEORDER_STRING	"little"
26 #elif defined(__BIG_ENDIAN)
27 #define CPU_BYTEORDER_STRING	"big"
28 #else
29 #error Unknown byteorder
30 #endif
31 
32 #define KERNEL_ATTR_RO(_name) \
33 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
34 
35 #define KERNEL_ATTR_RW(_name) \
36 static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
37 
38 /* current uevent sequence number */
39 static ssize_t uevent_seqnum_show(struct kobject *kobj,
40 				  struct kobj_attribute *attr, char *buf)
41 {
42 	return sprintf(buf, "%llu\n", (unsigned long long)uevent_seqnum);
43 }
44 KERNEL_ATTR_RO(uevent_seqnum);
45 
46 /* cpu byteorder */
47 static ssize_t cpu_byteorder_show(struct kobject *kobj,
48 				  struct kobj_attribute *attr, char *buf)
49 {
50 	return sysfs_emit(buf, "%s\n", CPU_BYTEORDER_STRING);
51 }
52 KERNEL_ATTR_RO(cpu_byteorder);
53 
54 #ifdef CONFIG_UEVENT_HELPER
55 /* uevent helper program, used during early boot */
56 static ssize_t uevent_helper_show(struct kobject *kobj,
57 				  struct kobj_attribute *attr, char *buf)
58 {
59 	return sprintf(buf, "%s\n", uevent_helper);
60 }
61 static ssize_t uevent_helper_store(struct kobject *kobj,
62 				   struct kobj_attribute *attr,
63 				   const char *buf, size_t count)
64 {
65 	if (count+1 > UEVENT_HELPER_PATH_LEN)
66 		return -ENOENT;
67 	memcpy(uevent_helper, buf, count);
68 	uevent_helper[count] = '\0';
69 	if (count && uevent_helper[count-1] == '\n')
70 		uevent_helper[count-1] = '\0';
71 	return count;
72 }
73 KERNEL_ATTR_RW(uevent_helper);
74 #endif
75 
76 #ifdef CONFIG_PROFILING
77 static ssize_t profiling_show(struct kobject *kobj,
78 				  struct kobj_attribute *attr, char *buf)
79 {
80 	return sprintf(buf, "%d\n", prof_on);
81 }
82 static ssize_t profiling_store(struct kobject *kobj,
83 				   struct kobj_attribute *attr,
84 				   const char *buf, size_t count)
85 {
86 	int ret;
87 
88 	if (prof_on)
89 		return -EEXIST;
90 	/*
91 	 * This eventually calls into get_option() which
92 	 * has a ton of callers and is not const.  It is
93 	 * easiest to cast it away here.
94 	 */
95 	profile_setup((char *)buf);
96 	ret = profile_init();
97 	if (ret)
98 		return ret;
99 	ret = create_proc_profile();
100 	if (ret)
101 		return ret;
102 	return count;
103 }
104 KERNEL_ATTR_RW(profiling);
105 #endif
106 
107 #ifdef CONFIG_KEXEC_CORE
108 static ssize_t kexec_loaded_show(struct kobject *kobj,
109 				 struct kobj_attribute *attr, char *buf)
110 {
111 	return sprintf(buf, "%d\n", !!kexec_image);
112 }
113 KERNEL_ATTR_RO(kexec_loaded);
114 
115 static ssize_t kexec_crash_loaded_show(struct kobject *kobj,
116 				       struct kobj_attribute *attr, char *buf)
117 {
118 	return sprintf(buf, "%d\n", kexec_crash_loaded());
119 }
120 KERNEL_ATTR_RO(kexec_crash_loaded);
121 
122 static ssize_t kexec_crash_size_show(struct kobject *kobj,
123 				       struct kobj_attribute *attr, char *buf)
124 {
125 	ssize_t size = crash_get_memory_size();
126 
127 	if (size < 0)
128 		return size;
129 
130 	return sprintf(buf, "%zd\n", size);
131 }
132 static ssize_t kexec_crash_size_store(struct kobject *kobj,
133 				   struct kobj_attribute *attr,
134 				   const char *buf, size_t count)
135 {
136 	unsigned long cnt;
137 	int ret;
138 
139 	if (kstrtoul(buf, 0, &cnt))
140 		return -EINVAL;
141 
142 	ret = crash_shrink_memory(cnt);
143 	return ret < 0 ? ret : count;
144 }
145 KERNEL_ATTR_RW(kexec_crash_size);
146 
147 #endif /* CONFIG_KEXEC_CORE */
148 
149 #ifdef CONFIG_CRASH_CORE
150 
151 static ssize_t vmcoreinfo_show(struct kobject *kobj,
152 			       struct kobj_attribute *attr, char *buf)
153 {
154 	phys_addr_t vmcore_base = paddr_vmcoreinfo_note();
155 	return sprintf(buf, "%pa %x\n", &vmcore_base,
156 			(unsigned int)VMCOREINFO_NOTE_SIZE);
157 }
158 KERNEL_ATTR_RO(vmcoreinfo);
159 
160 #endif /* CONFIG_CRASH_CORE */
161 
162 /* whether file capabilities are enabled */
163 static ssize_t fscaps_show(struct kobject *kobj,
164 				  struct kobj_attribute *attr, char *buf)
165 {
166 	return sprintf(buf, "%d\n", file_caps_enabled);
167 }
168 KERNEL_ATTR_RO(fscaps);
169 
170 #ifndef CONFIG_TINY_RCU
171 int rcu_expedited;
172 static ssize_t rcu_expedited_show(struct kobject *kobj,
173 				  struct kobj_attribute *attr, char *buf)
174 {
175 	return sprintf(buf, "%d\n", READ_ONCE(rcu_expedited));
176 }
177 static ssize_t rcu_expedited_store(struct kobject *kobj,
178 				   struct kobj_attribute *attr,
179 				   const char *buf, size_t count)
180 {
181 	if (kstrtoint(buf, 0, &rcu_expedited))
182 		return -EINVAL;
183 
184 	return count;
185 }
186 KERNEL_ATTR_RW(rcu_expedited);
187 
188 int rcu_normal;
189 static ssize_t rcu_normal_show(struct kobject *kobj,
190 			       struct kobj_attribute *attr, char *buf)
191 {
192 	return sprintf(buf, "%d\n", READ_ONCE(rcu_normal));
193 }
194 static ssize_t rcu_normal_store(struct kobject *kobj,
195 				struct kobj_attribute *attr,
196 				const char *buf, size_t count)
197 {
198 	if (kstrtoint(buf, 0, &rcu_normal))
199 		return -EINVAL;
200 
201 	return count;
202 }
203 KERNEL_ATTR_RW(rcu_normal);
204 #endif /* #ifndef CONFIG_TINY_RCU */
205 
206 /*
207  * Make /sys/kernel/notes give the raw contents of our kernel .notes section.
208  */
209 extern const void __start_notes __weak;
210 extern const void __stop_notes __weak;
211 #define	notes_size (&__stop_notes - &__start_notes)
212 
213 static ssize_t notes_read(struct file *filp, struct kobject *kobj,
214 			  struct bin_attribute *bin_attr,
215 			  char *buf, loff_t off, size_t count)
216 {
217 	memcpy(buf, &__start_notes + off, count);
218 	return count;
219 }
220 
221 static struct bin_attribute notes_attr __ro_after_init  = {
222 	.attr = {
223 		.name = "notes",
224 		.mode = S_IRUGO,
225 	},
226 	.read = &notes_read,
227 };
228 
229 struct kobject *kernel_kobj;
230 EXPORT_SYMBOL_GPL(kernel_kobj);
231 
232 static struct attribute * kernel_attrs[] = {
233 	&fscaps_attr.attr,
234 	&uevent_seqnum_attr.attr,
235 	&cpu_byteorder_attr.attr,
236 #ifdef CONFIG_UEVENT_HELPER
237 	&uevent_helper_attr.attr,
238 #endif
239 #ifdef CONFIG_PROFILING
240 	&profiling_attr.attr,
241 #endif
242 #ifdef CONFIG_KEXEC_CORE
243 	&kexec_loaded_attr.attr,
244 	&kexec_crash_loaded_attr.attr,
245 	&kexec_crash_size_attr.attr,
246 #endif
247 #ifdef CONFIG_CRASH_CORE
248 	&vmcoreinfo_attr.attr,
249 #endif
250 #ifndef CONFIG_TINY_RCU
251 	&rcu_expedited_attr.attr,
252 	&rcu_normal_attr.attr,
253 #endif
254 	NULL
255 };
256 
257 static const struct attribute_group kernel_attr_group = {
258 	.attrs = kernel_attrs,
259 };
260 
261 static int __init ksysfs_init(void)
262 {
263 	int error;
264 
265 	kernel_kobj = kobject_create_and_add("kernel", NULL);
266 	if (!kernel_kobj) {
267 		error = -ENOMEM;
268 		goto exit;
269 	}
270 	error = sysfs_create_group(kernel_kobj, &kernel_attr_group);
271 	if (error)
272 		goto kset_exit;
273 
274 	if (notes_size > 0) {
275 		notes_attr.size = notes_size;
276 		error = sysfs_create_bin_file(kernel_kobj, &notes_attr);
277 		if (error)
278 			goto group_exit;
279 	}
280 
281 	return 0;
282 
283 group_exit:
284 	sysfs_remove_group(kernel_kobj, &kernel_attr_group);
285 kset_exit:
286 	kobject_put(kernel_kobj);
287 exit:
288 	return error;
289 }
290 
291 core_initcall(ksysfs_init);
292