xref: /linux/mm/damon/sysfs.c (revision 0be3ff0c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DAMON sysfs Interface
4  *
5  * Copyright (c) 2022 SeongJae Park <sj@kernel.org>
6  */
7 
8 #include <linux/damon.h>
9 #include <linux/kobject.h>
10 #include <linux/pid.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 
14 static DEFINE_MUTEX(damon_sysfs_lock);
15 
16 /*
17  * unsigned long range directory
18  */
19 
20 struct damon_sysfs_ul_range {
21 	struct kobject kobj;
22 	unsigned long min;
23 	unsigned long max;
24 };
25 
26 static struct damon_sysfs_ul_range *damon_sysfs_ul_range_alloc(
27 		unsigned long min,
28 		unsigned long max)
29 {
30 	struct damon_sysfs_ul_range *range = kmalloc(sizeof(*range),
31 			GFP_KERNEL);
32 
33 	if (!range)
34 		return NULL;
35 	range->kobj = (struct kobject){};
36 	range->min = min;
37 	range->max = max;
38 
39 	return range;
40 }
41 
42 static ssize_t min_show(struct kobject *kobj, struct kobj_attribute *attr,
43 		char *buf)
44 {
45 	struct damon_sysfs_ul_range *range = container_of(kobj,
46 			struct damon_sysfs_ul_range, kobj);
47 
48 	return sysfs_emit(buf, "%lu\n", range->min);
49 }
50 
51 static ssize_t min_store(struct kobject *kobj, struct kobj_attribute *attr,
52 		const char *buf, size_t count)
53 {
54 	struct damon_sysfs_ul_range *range = container_of(kobj,
55 			struct damon_sysfs_ul_range, kobj);
56 	unsigned long min;
57 	int err;
58 
59 	err = kstrtoul(buf, 0, &min);
60 	if (err)
61 		return -EINVAL;
62 
63 	range->min = min;
64 	return count;
65 }
66 
67 static ssize_t max_show(struct kobject *kobj, struct kobj_attribute *attr,
68 		char *buf)
69 {
70 	struct damon_sysfs_ul_range *range = container_of(kobj,
71 			struct damon_sysfs_ul_range, kobj);
72 
73 	return sysfs_emit(buf, "%lu\n", range->max);
74 }
75 
76 static ssize_t max_store(struct kobject *kobj, struct kobj_attribute *attr,
77 		const char *buf, size_t count)
78 {
79 	struct damon_sysfs_ul_range *range = container_of(kobj,
80 			struct damon_sysfs_ul_range, kobj);
81 	unsigned long max;
82 	int err;
83 
84 	err = kstrtoul(buf, 0, &max);
85 	if (err)
86 		return -EINVAL;
87 
88 	range->max = max;
89 	return count;
90 }
91 
92 static void damon_sysfs_ul_range_release(struct kobject *kobj)
93 {
94 	kfree(container_of(kobj, struct damon_sysfs_ul_range, kobj));
95 }
96 
97 static struct kobj_attribute damon_sysfs_ul_range_min_attr =
98 		__ATTR_RW_MODE(min, 0600);
99 
100 static struct kobj_attribute damon_sysfs_ul_range_max_attr =
101 		__ATTR_RW_MODE(max, 0600);
102 
103 static struct attribute *damon_sysfs_ul_range_attrs[] = {
104 	&damon_sysfs_ul_range_min_attr.attr,
105 	&damon_sysfs_ul_range_max_attr.attr,
106 	NULL,
107 };
108 ATTRIBUTE_GROUPS(damon_sysfs_ul_range);
109 
110 static struct kobj_type damon_sysfs_ul_range_ktype = {
111 	.release = damon_sysfs_ul_range_release,
112 	.sysfs_ops = &kobj_sysfs_ops,
113 	.default_groups = damon_sysfs_ul_range_groups,
114 };
115 
116 /*
117  * schemes/stats directory
118  */
119 
120 struct damon_sysfs_stats {
121 	struct kobject kobj;
122 	unsigned long nr_tried;
123 	unsigned long sz_tried;
124 	unsigned long nr_applied;
125 	unsigned long sz_applied;
126 	unsigned long qt_exceeds;
127 };
128 
129 static struct damon_sysfs_stats *damon_sysfs_stats_alloc(void)
130 {
131 	return kzalloc(sizeof(struct damon_sysfs_stats), GFP_KERNEL);
132 }
133 
134 static ssize_t nr_tried_show(struct kobject *kobj, struct kobj_attribute *attr,
135 		char *buf)
136 {
137 	struct damon_sysfs_stats *stats = container_of(kobj,
138 			struct damon_sysfs_stats, kobj);
139 
140 	return sysfs_emit(buf, "%lu\n", stats->nr_tried);
141 }
142 
143 static ssize_t sz_tried_show(struct kobject *kobj, struct kobj_attribute *attr,
144 		char *buf)
145 {
146 	struct damon_sysfs_stats *stats = container_of(kobj,
147 			struct damon_sysfs_stats, kobj);
148 
149 	return sysfs_emit(buf, "%lu\n", stats->sz_tried);
150 }
151 
152 static ssize_t nr_applied_show(struct kobject *kobj,
153 		struct kobj_attribute *attr, char *buf)
154 {
155 	struct damon_sysfs_stats *stats = container_of(kobj,
156 			struct damon_sysfs_stats, kobj);
157 
158 	return sysfs_emit(buf, "%lu\n", stats->nr_applied);
159 }
160 
161 static ssize_t sz_applied_show(struct kobject *kobj,
162 		struct kobj_attribute *attr, char *buf)
163 {
164 	struct damon_sysfs_stats *stats = container_of(kobj,
165 			struct damon_sysfs_stats, kobj);
166 
167 	return sysfs_emit(buf, "%lu\n", stats->sz_applied);
168 }
169 
170 static ssize_t qt_exceeds_show(struct kobject *kobj,
171 		struct kobj_attribute *attr, char *buf)
172 {
173 	struct damon_sysfs_stats *stats = container_of(kobj,
174 			struct damon_sysfs_stats, kobj);
175 
176 	return sysfs_emit(buf, "%lu\n", stats->qt_exceeds);
177 }
178 
179 static void damon_sysfs_stats_release(struct kobject *kobj)
180 {
181 	kfree(container_of(kobj, struct damon_sysfs_stats, kobj));
182 }
183 
184 static struct kobj_attribute damon_sysfs_stats_nr_tried_attr =
185 		__ATTR_RO_MODE(nr_tried, 0400);
186 
187 static struct kobj_attribute damon_sysfs_stats_sz_tried_attr =
188 		__ATTR_RO_MODE(sz_tried, 0400);
189 
190 static struct kobj_attribute damon_sysfs_stats_nr_applied_attr =
191 		__ATTR_RO_MODE(nr_applied, 0400);
192 
193 static struct kobj_attribute damon_sysfs_stats_sz_applied_attr =
194 		__ATTR_RO_MODE(sz_applied, 0400);
195 
196 static struct kobj_attribute damon_sysfs_stats_qt_exceeds_attr =
197 		__ATTR_RO_MODE(qt_exceeds, 0400);
198 
199 static struct attribute *damon_sysfs_stats_attrs[] = {
200 	&damon_sysfs_stats_nr_tried_attr.attr,
201 	&damon_sysfs_stats_sz_tried_attr.attr,
202 	&damon_sysfs_stats_nr_applied_attr.attr,
203 	&damon_sysfs_stats_sz_applied_attr.attr,
204 	&damon_sysfs_stats_qt_exceeds_attr.attr,
205 	NULL,
206 };
207 ATTRIBUTE_GROUPS(damon_sysfs_stats);
208 
209 static struct kobj_type damon_sysfs_stats_ktype = {
210 	.release = damon_sysfs_stats_release,
211 	.sysfs_ops = &kobj_sysfs_ops,
212 	.default_groups = damon_sysfs_stats_groups,
213 };
214 
215 /*
216  * watermarks directory
217  */
218 
219 struct damon_sysfs_watermarks {
220 	struct kobject kobj;
221 	enum damos_wmark_metric metric;
222 	unsigned long interval_us;
223 	unsigned long high;
224 	unsigned long mid;
225 	unsigned long low;
226 };
227 
228 static struct damon_sysfs_watermarks *damon_sysfs_watermarks_alloc(
229 		enum damos_wmark_metric metric, unsigned long interval_us,
230 		unsigned long high, unsigned long mid, unsigned long low)
231 {
232 	struct damon_sysfs_watermarks *watermarks = kmalloc(
233 			sizeof(*watermarks), GFP_KERNEL);
234 
235 	if (!watermarks)
236 		return NULL;
237 	watermarks->kobj = (struct kobject){};
238 	watermarks->metric = metric;
239 	watermarks->interval_us = interval_us;
240 	watermarks->high = high;
241 	watermarks->mid = mid;
242 	watermarks->low = low;
243 	return watermarks;
244 }
245 
246 /* Should match with enum damos_wmark_metric */
247 static const char * const damon_sysfs_wmark_metric_strs[] = {
248 	"none",
249 	"free_mem_rate",
250 };
251 
252 static ssize_t metric_show(struct kobject *kobj, struct kobj_attribute *attr,
253 		char *buf)
254 {
255 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
256 			struct damon_sysfs_watermarks, kobj);
257 
258 	return sysfs_emit(buf, "%s\n",
259 			damon_sysfs_wmark_metric_strs[watermarks->metric]);
260 }
261 
262 static ssize_t metric_store(struct kobject *kobj, struct kobj_attribute *attr,
263 		const char *buf, size_t count)
264 {
265 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
266 			struct damon_sysfs_watermarks, kobj);
267 	enum damos_wmark_metric metric;
268 
269 	for (metric = 0; metric < NR_DAMOS_WMARK_METRICS; metric++) {
270 		if (sysfs_streq(buf, damon_sysfs_wmark_metric_strs[metric])) {
271 			watermarks->metric = metric;
272 			return count;
273 		}
274 	}
275 	return -EINVAL;
276 }
277 
278 static ssize_t interval_us_show(struct kobject *kobj,
279 		struct kobj_attribute *attr, char *buf)
280 {
281 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
282 			struct damon_sysfs_watermarks, kobj);
283 
284 	return sysfs_emit(buf, "%lu\n", watermarks->interval_us);
285 }
286 
287 static ssize_t interval_us_store(struct kobject *kobj,
288 		struct kobj_attribute *attr, const char *buf, size_t count)
289 {
290 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
291 			struct damon_sysfs_watermarks, kobj);
292 	int err = kstrtoul(buf, 0, &watermarks->interval_us);
293 
294 	if (err)
295 		return -EINVAL;
296 	return count;
297 }
298 
299 static ssize_t high_show(struct kobject *kobj,
300 		struct kobj_attribute *attr, char *buf)
301 {
302 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
303 			struct damon_sysfs_watermarks, kobj);
304 
305 	return sysfs_emit(buf, "%lu\n", watermarks->high);
306 }
307 
308 static ssize_t high_store(struct kobject *kobj,
309 		struct kobj_attribute *attr, const char *buf, size_t count)
310 {
311 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
312 			struct damon_sysfs_watermarks, kobj);
313 	int err = kstrtoul(buf, 0, &watermarks->high);
314 
315 	if (err)
316 		return -EINVAL;
317 	return count;
318 }
319 
320 static ssize_t mid_show(struct kobject *kobj,
321 		struct kobj_attribute *attr, char *buf)
322 {
323 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
324 			struct damon_sysfs_watermarks, kobj);
325 
326 	return sysfs_emit(buf, "%lu\n", watermarks->mid);
327 }
328 
329 static ssize_t mid_store(struct kobject *kobj,
330 		struct kobj_attribute *attr, const char *buf, size_t count)
331 {
332 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
333 			struct damon_sysfs_watermarks, kobj);
334 	int err = kstrtoul(buf, 0, &watermarks->mid);
335 
336 	if (err)
337 		return -EINVAL;
338 	return count;
339 }
340 
341 static ssize_t low_show(struct kobject *kobj,
342 		struct kobj_attribute *attr, char *buf)
343 {
344 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
345 			struct damon_sysfs_watermarks, kobj);
346 
347 	return sysfs_emit(buf, "%lu\n", watermarks->low);
348 }
349 
350 static ssize_t low_store(struct kobject *kobj,
351 		struct kobj_attribute *attr, const char *buf, size_t count)
352 {
353 	struct damon_sysfs_watermarks *watermarks = container_of(kobj,
354 			struct damon_sysfs_watermarks, kobj);
355 	int err = kstrtoul(buf, 0, &watermarks->low);
356 
357 	if (err)
358 		return -EINVAL;
359 	return count;
360 }
361 
362 static void damon_sysfs_watermarks_release(struct kobject *kobj)
363 {
364 	kfree(container_of(kobj, struct damon_sysfs_watermarks, kobj));
365 }
366 
367 static struct kobj_attribute damon_sysfs_watermarks_metric_attr =
368 		__ATTR_RW_MODE(metric, 0600);
369 
370 static struct kobj_attribute damon_sysfs_watermarks_interval_us_attr =
371 		__ATTR_RW_MODE(interval_us, 0600);
372 
373 static struct kobj_attribute damon_sysfs_watermarks_high_attr =
374 		__ATTR_RW_MODE(high, 0600);
375 
376 static struct kobj_attribute damon_sysfs_watermarks_mid_attr =
377 		__ATTR_RW_MODE(mid, 0600);
378 
379 static struct kobj_attribute damon_sysfs_watermarks_low_attr =
380 		__ATTR_RW_MODE(low, 0600);
381 
382 static struct attribute *damon_sysfs_watermarks_attrs[] = {
383 	&damon_sysfs_watermarks_metric_attr.attr,
384 	&damon_sysfs_watermarks_interval_us_attr.attr,
385 	&damon_sysfs_watermarks_high_attr.attr,
386 	&damon_sysfs_watermarks_mid_attr.attr,
387 	&damon_sysfs_watermarks_low_attr.attr,
388 	NULL,
389 };
390 ATTRIBUTE_GROUPS(damon_sysfs_watermarks);
391 
392 static struct kobj_type damon_sysfs_watermarks_ktype = {
393 	.release = damon_sysfs_watermarks_release,
394 	.sysfs_ops = &kobj_sysfs_ops,
395 	.default_groups = damon_sysfs_watermarks_groups,
396 };
397 
398 /*
399  * scheme/weights directory
400  */
401 
402 struct damon_sysfs_weights {
403 	struct kobject kobj;
404 	unsigned int sz;
405 	unsigned int nr_accesses;
406 	unsigned int age;
407 };
408 
409 static struct damon_sysfs_weights *damon_sysfs_weights_alloc(unsigned int sz,
410 		unsigned int nr_accesses, unsigned int age)
411 {
412 	struct damon_sysfs_weights *weights = kmalloc(sizeof(*weights),
413 			GFP_KERNEL);
414 
415 	if (!weights)
416 		return NULL;
417 	weights->kobj = (struct kobject){};
418 	weights->sz = sz;
419 	weights->nr_accesses = nr_accesses;
420 	weights->age = age;
421 	return weights;
422 }
423 
424 static ssize_t sz_permil_show(struct kobject *kobj,
425 		struct kobj_attribute *attr, char *buf)
426 {
427 	struct damon_sysfs_weights *weights = container_of(kobj,
428 			struct damon_sysfs_weights, kobj);
429 
430 	return sysfs_emit(buf, "%u\n", weights->sz);
431 }
432 
433 static ssize_t sz_permil_store(struct kobject *kobj,
434 		struct kobj_attribute *attr, const char *buf, size_t count)
435 {
436 	struct damon_sysfs_weights *weights = container_of(kobj,
437 			struct damon_sysfs_weights, kobj);
438 	int err = kstrtouint(buf, 0, &weights->sz);
439 
440 	if (err)
441 		return -EINVAL;
442 	return count;
443 }
444 
445 static ssize_t nr_accesses_permil_show(struct kobject *kobj,
446 		struct kobj_attribute *attr, char *buf)
447 {
448 	struct damon_sysfs_weights *weights = container_of(kobj,
449 			struct damon_sysfs_weights, kobj);
450 
451 	return sysfs_emit(buf, "%u\n", weights->nr_accesses);
452 }
453 
454 static ssize_t nr_accesses_permil_store(struct kobject *kobj,
455 		struct kobj_attribute *attr, const char *buf, size_t count)
456 {
457 	struct damon_sysfs_weights *weights = container_of(kobj,
458 			struct damon_sysfs_weights, kobj);
459 	int err = kstrtouint(buf, 0, &weights->nr_accesses);
460 
461 	if (err)
462 		return -EINVAL;
463 	return count;
464 }
465 
466 static ssize_t age_permil_show(struct kobject *kobj,
467 		struct kobj_attribute *attr, char *buf)
468 {
469 	struct damon_sysfs_weights *weights = container_of(kobj,
470 			struct damon_sysfs_weights, kobj);
471 
472 	return sysfs_emit(buf, "%u\n", weights->age);
473 }
474 
475 static ssize_t age_permil_store(struct kobject *kobj,
476 		struct kobj_attribute *attr, const char *buf, size_t count)
477 {
478 	struct damon_sysfs_weights *weights = container_of(kobj,
479 			struct damon_sysfs_weights, kobj);
480 	int err = kstrtouint(buf, 0, &weights->age);
481 
482 	if (err)
483 		return -EINVAL;
484 	return count;
485 }
486 
487 static void damon_sysfs_weights_release(struct kobject *kobj)
488 {
489 	kfree(container_of(kobj, struct damon_sysfs_weights, kobj));
490 }
491 
492 static struct kobj_attribute damon_sysfs_weights_sz_attr =
493 		__ATTR_RW_MODE(sz_permil, 0600);
494 
495 static struct kobj_attribute damon_sysfs_weights_nr_accesses_attr =
496 		__ATTR_RW_MODE(nr_accesses_permil, 0600);
497 
498 static struct kobj_attribute damon_sysfs_weights_age_attr =
499 		__ATTR_RW_MODE(age_permil, 0600);
500 
501 static struct attribute *damon_sysfs_weights_attrs[] = {
502 	&damon_sysfs_weights_sz_attr.attr,
503 	&damon_sysfs_weights_nr_accesses_attr.attr,
504 	&damon_sysfs_weights_age_attr.attr,
505 	NULL,
506 };
507 ATTRIBUTE_GROUPS(damon_sysfs_weights);
508 
509 static struct kobj_type damon_sysfs_weights_ktype = {
510 	.release = damon_sysfs_weights_release,
511 	.sysfs_ops = &kobj_sysfs_ops,
512 	.default_groups = damon_sysfs_weights_groups,
513 };
514 
515 /*
516  * quotas directory
517  */
518 
519 struct damon_sysfs_quotas {
520 	struct kobject kobj;
521 	struct damon_sysfs_weights *weights;
522 	unsigned long ms;
523 	unsigned long sz;
524 	unsigned long reset_interval_ms;
525 };
526 
527 static struct damon_sysfs_quotas *damon_sysfs_quotas_alloc(void)
528 {
529 	return kzalloc(sizeof(struct damon_sysfs_quotas), GFP_KERNEL);
530 }
531 
532 static int damon_sysfs_quotas_add_dirs(struct damon_sysfs_quotas *quotas)
533 {
534 	struct damon_sysfs_weights *weights;
535 	int err;
536 
537 	weights = damon_sysfs_weights_alloc(0, 0, 0);
538 	if (!weights)
539 		return -ENOMEM;
540 
541 	err = kobject_init_and_add(&weights->kobj, &damon_sysfs_weights_ktype,
542 			&quotas->kobj, "weights");
543 	if (err)
544 		kobject_put(&weights->kobj);
545 	else
546 		quotas->weights = weights;
547 	return err;
548 }
549 
550 static void damon_sysfs_quotas_rm_dirs(struct damon_sysfs_quotas *quotas)
551 {
552 	kobject_put(&quotas->weights->kobj);
553 }
554 
555 static ssize_t ms_show(struct kobject *kobj, struct kobj_attribute *attr,
556 		char *buf)
557 {
558 	struct damon_sysfs_quotas *quotas = container_of(kobj,
559 			struct damon_sysfs_quotas, kobj);
560 
561 	return sysfs_emit(buf, "%lu\n", quotas->ms);
562 }
563 
564 static ssize_t ms_store(struct kobject *kobj, struct kobj_attribute *attr,
565 		const char *buf, size_t count)
566 {
567 	struct damon_sysfs_quotas *quotas = container_of(kobj,
568 			struct damon_sysfs_quotas, kobj);
569 	int err = kstrtoul(buf, 0, &quotas->ms);
570 
571 	if (err)
572 		return -EINVAL;
573 	return count;
574 }
575 
576 static ssize_t bytes_show(struct kobject *kobj, struct kobj_attribute *attr,
577 		char *buf)
578 {
579 	struct damon_sysfs_quotas *quotas = container_of(kobj,
580 			struct damon_sysfs_quotas, kobj);
581 
582 	return sysfs_emit(buf, "%lu\n", quotas->sz);
583 }
584 
585 static ssize_t bytes_store(struct kobject *kobj,
586 		struct kobj_attribute *attr, const char *buf, size_t count)
587 {
588 	struct damon_sysfs_quotas *quotas = container_of(kobj,
589 			struct damon_sysfs_quotas, kobj);
590 	int err = kstrtoul(buf, 0, &quotas->sz);
591 
592 	if (err)
593 		return -EINVAL;
594 	return count;
595 }
596 
597 static ssize_t reset_interval_ms_show(struct kobject *kobj,
598 		struct kobj_attribute *attr, char *buf)
599 {
600 	struct damon_sysfs_quotas *quotas = container_of(kobj,
601 			struct damon_sysfs_quotas, kobj);
602 
603 	return sysfs_emit(buf, "%lu\n", quotas->reset_interval_ms);
604 }
605 
606 static ssize_t reset_interval_ms_store(struct kobject *kobj,
607 		struct kobj_attribute *attr, const char *buf, size_t count)
608 {
609 	struct damon_sysfs_quotas *quotas = container_of(kobj,
610 			struct damon_sysfs_quotas, kobj);
611 	int err = kstrtoul(buf, 0, &quotas->reset_interval_ms);
612 
613 	if (err)
614 		return -EINVAL;
615 	return count;
616 }
617 
618 static void damon_sysfs_quotas_release(struct kobject *kobj)
619 {
620 	kfree(container_of(kobj, struct damon_sysfs_quotas, kobj));
621 }
622 
623 static struct kobj_attribute damon_sysfs_quotas_ms_attr =
624 		__ATTR_RW_MODE(ms, 0600);
625 
626 static struct kobj_attribute damon_sysfs_quotas_sz_attr =
627 		__ATTR_RW_MODE(bytes, 0600);
628 
629 static struct kobj_attribute damon_sysfs_quotas_reset_interval_ms_attr =
630 		__ATTR_RW_MODE(reset_interval_ms, 0600);
631 
632 static struct attribute *damon_sysfs_quotas_attrs[] = {
633 	&damon_sysfs_quotas_ms_attr.attr,
634 	&damon_sysfs_quotas_sz_attr.attr,
635 	&damon_sysfs_quotas_reset_interval_ms_attr.attr,
636 	NULL,
637 };
638 ATTRIBUTE_GROUPS(damon_sysfs_quotas);
639 
640 static struct kobj_type damon_sysfs_quotas_ktype = {
641 	.release = damon_sysfs_quotas_release,
642 	.sysfs_ops = &kobj_sysfs_ops,
643 	.default_groups = damon_sysfs_quotas_groups,
644 };
645 
646 /*
647  * access_pattern directory
648  */
649 
650 struct damon_sysfs_access_pattern {
651 	struct kobject kobj;
652 	struct damon_sysfs_ul_range *sz;
653 	struct damon_sysfs_ul_range *nr_accesses;
654 	struct damon_sysfs_ul_range *age;
655 };
656 
657 static
658 struct damon_sysfs_access_pattern *damon_sysfs_access_pattern_alloc(void)
659 {
660 	struct damon_sysfs_access_pattern *access_pattern =
661 		kmalloc(sizeof(*access_pattern), GFP_KERNEL);
662 
663 	if (!access_pattern)
664 		return NULL;
665 	access_pattern->kobj = (struct kobject){};
666 	return access_pattern;
667 }
668 
669 static int damon_sysfs_access_pattern_add_range_dir(
670 		struct damon_sysfs_access_pattern *access_pattern,
671 		struct damon_sysfs_ul_range **range_dir_ptr,
672 		char *name)
673 {
674 	struct damon_sysfs_ul_range *range = damon_sysfs_ul_range_alloc(0, 0);
675 	int err;
676 
677 	if (!range)
678 		return -ENOMEM;
679 	err = kobject_init_and_add(&range->kobj, &damon_sysfs_ul_range_ktype,
680 			&access_pattern->kobj, name);
681 	if (err)
682 		kobject_put(&range->kobj);
683 	else
684 		*range_dir_ptr = range;
685 	return err;
686 }
687 
688 static int damon_sysfs_access_pattern_add_dirs(
689 		struct damon_sysfs_access_pattern *access_pattern)
690 {
691 	int err;
692 
693 	err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
694 			&access_pattern->sz, "sz");
695 	if (err)
696 		goto put_sz_out;
697 
698 	err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
699 			&access_pattern->nr_accesses, "nr_accesses");
700 	if (err)
701 		goto put_nr_accesses_sz_out;
702 
703 	err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
704 			&access_pattern->age, "age");
705 	if (err)
706 		goto put_age_nr_accesses_sz_out;
707 	return 0;
708 
709 put_age_nr_accesses_sz_out:
710 	kobject_put(&access_pattern->age->kobj);
711 	access_pattern->age = NULL;
712 put_nr_accesses_sz_out:
713 	kobject_put(&access_pattern->nr_accesses->kobj);
714 	access_pattern->nr_accesses = NULL;
715 put_sz_out:
716 	kobject_put(&access_pattern->sz->kobj);
717 	access_pattern->sz = NULL;
718 	return err;
719 }
720 
721 static void damon_sysfs_access_pattern_rm_dirs(
722 		struct damon_sysfs_access_pattern *access_pattern)
723 {
724 	kobject_put(&access_pattern->sz->kobj);
725 	kobject_put(&access_pattern->nr_accesses->kobj);
726 	kobject_put(&access_pattern->age->kobj);
727 }
728 
729 static void damon_sysfs_access_pattern_release(struct kobject *kobj)
730 {
731 	kfree(container_of(kobj, struct damon_sysfs_access_pattern, kobj));
732 }
733 
734 static struct attribute *damon_sysfs_access_pattern_attrs[] = {
735 	NULL,
736 };
737 ATTRIBUTE_GROUPS(damon_sysfs_access_pattern);
738 
739 static struct kobj_type damon_sysfs_access_pattern_ktype = {
740 	.release = damon_sysfs_access_pattern_release,
741 	.sysfs_ops = &kobj_sysfs_ops,
742 	.default_groups = damon_sysfs_access_pattern_groups,
743 };
744 
745 /*
746  * scheme directory
747  */
748 
749 struct damon_sysfs_scheme {
750 	struct kobject kobj;
751 	enum damos_action action;
752 	struct damon_sysfs_access_pattern *access_pattern;
753 	struct damon_sysfs_quotas *quotas;
754 	struct damon_sysfs_watermarks *watermarks;
755 	struct damon_sysfs_stats *stats;
756 };
757 
758 /* This should match with enum damos_action */
759 static const char * const damon_sysfs_damos_action_strs[] = {
760 	"willneed",
761 	"cold",
762 	"pageout",
763 	"hugepage",
764 	"nohugepage",
765 	"stat",
766 };
767 
768 static struct damon_sysfs_scheme *damon_sysfs_scheme_alloc(
769 		enum damos_action action)
770 {
771 	struct damon_sysfs_scheme *scheme = kmalloc(sizeof(*scheme),
772 				GFP_KERNEL);
773 
774 	if (!scheme)
775 		return NULL;
776 	scheme->kobj = (struct kobject){};
777 	scheme->action = action;
778 	return scheme;
779 }
780 
781 static int damon_sysfs_scheme_set_access_pattern(
782 		struct damon_sysfs_scheme *scheme)
783 {
784 	struct damon_sysfs_access_pattern *access_pattern;
785 	int err;
786 
787 	access_pattern = damon_sysfs_access_pattern_alloc();
788 	if (!access_pattern)
789 		return -ENOMEM;
790 	err = kobject_init_and_add(&access_pattern->kobj,
791 			&damon_sysfs_access_pattern_ktype, &scheme->kobj,
792 			"access_pattern");
793 	if (err)
794 		goto out;
795 	err = damon_sysfs_access_pattern_add_dirs(access_pattern);
796 	if (err)
797 		goto out;
798 	scheme->access_pattern = access_pattern;
799 	return 0;
800 
801 out:
802 	kobject_put(&access_pattern->kobj);
803 	return err;
804 }
805 
806 static int damon_sysfs_scheme_set_quotas(struct damon_sysfs_scheme *scheme)
807 {
808 	struct damon_sysfs_quotas *quotas = damon_sysfs_quotas_alloc();
809 	int err;
810 
811 	if (!quotas)
812 		return -ENOMEM;
813 	err = kobject_init_and_add(&quotas->kobj, &damon_sysfs_quotas_ktype,
814 			&scheme->kobj, "quotas");
815 	if (err)
816 		goto out;
817 	err = damon_sysfs_quotas_add_dirs(quotas);
818 	if (err)
819 		goto out;
820 	scheme->quotas = quotas;
821 	return 0;
822 
823 out:
824 	kobject_put(&quotas->kobj);
825 	return err;
826 }
827 
828 static int damon_sysfs_scheme_set_watermarks(struct damon_sysfs_scheme *scheme)
829 {
830 	struct damon_sysfs_watermarks *watermarks =
831 		damon_sysfs_watermarks_alloc(DAMOS_WMARK_NONE, 0, 0, 0, 0);
832 	int err;
833 
834 	if (!watermarks)
835 		return -ENOMEM;
836 	err = kobject_init_and_add(&watermarks->kobj,
837 			&damon_sysfs_watermarks_ktype, &scheme->kobj,
838 			"watermarks");
839 	if (err)
840 		kobject_put(&watermarks->kobj);
841 	else
842 		scheme->watermarks = watermarks;
843 	return err;
844 }
845 
846 static int damon_sysfs_scheme_set_stats(struct damon_sysfs_scheme *scheme)
847 {
848 	struct damon_sysfs_stats *stats = damon_sysfs_stats_alloc();
849 	int err;
850 
851 	if (!stats)
852 		return -ENOMEM;
853 	err = kobject_init_and_add(&stats->kobj, &damon_sysfs_stats_ktype,
854 			&scheme->kobj, "stats");
855 	if (err)
856 		kobject_put(&stats->kobj);
857 	else
858 		scheme->stats = stats;
859 	return err;
860 }
861 
862 static int damon_sysfs_scheme_add_dirs(struct damon_sysfs_scheme *scheme)
863 {
864 	int err;
865 
866 	err = damon_sysfs_scheme_set_access_pattern(scheme);
867 	if (err)
868 		return err;
869 	err = damon_sysfs_scheme_set_quotas(scheme);
870 	if (err)
871 		goto put_access_pattern_out;
872 	err = damon_sysfs_scheme_set_watermarks(scheme);
873 	if (err)
874 		goto put_quotas_access_pattern_out;
875 	err = damon_sysfs_scheme_set_stats(scheme);
876 	if (err)
877 		goto put_watermarks_quotas_access_pattern_out;
878 	return 0;
879 
880 put_watermarks_quotas_access_pattern_out:
881 	kobject_put(&scheme->watermarks->kobj);
882 	scheme->watermarks = NULL;
883 put_quotas_access_pattern_out:
884 	kobject_put(&scheme->quotas->kobj);
885 	scheme->quotas = NULL;
886 put_access_pattern_out:
887 	kobject_put(&scheme->access_pattern->kobj);
888 	scheme->access_pattern = NULL;
889 	return err;
890 }
891 
892 static void damon_sysfs_scheme_rm_dirs(struct damon_sysfs_scheme *scheme)
893 {
894 	damon_sysfs_access_pattern_rm_dirs(scheme->access_pattern);
895 	kobject_put(&scheme->access_pattern->kobj);
896 	damon_sysfs_quotas_rm_dirs(scheme->quotas);
897 	kobject_put(&scheme->quotas->kobj);
898 	kobject_put(&scheme->watermarks->kobj);
899 	kobject_put(&scheme->stats->kobj);
900 }
901 
902 static ssize_t action_show(struct kobject *kobj, struct kobj_attribute *attr,
903 		char *buf)
904 {
905 	struct damon_sysfs_scheme *scheme = container_of(kobj,
906 			struct damon_sysfs_scheme, kobj);
907 
908 	return sysfs_emit(buf, "%s\n",
909 			damon_sysfs_damos_action_strs[scheme->action]);
910 }
911 
912 static ssize_t action_store(struct kobject *kobj, struct kobj_attribute *attr,
913 		const char *buf, size_t count)
914 {
915 	struct damon_sysfs_scheme *scheme = container_of(kobj,
916 			struct damon_sysfs_scheme, kobj);
917 	enum damos_action action;
918 
919 	for (action = 0; action < NR_DAMOS_ACTIONS; action++) {
920 		if (sysfs_streq(buf, damon_sysfs_damos_action_strs[action])) {
921 			scheme->action = action;
922 			return count;
923 		}
924 	}
925 	return -EINVAL;
926 }
927 
928 static void damon_sysfs_scheme_release(struct kobject *kobj)
929 {
930 	kfree(container_of(kobj, struct damon_sysfs_scheme, kobj));
931 }
932 
933 static struct kobj_attribute damon_sysfs_scheme_action_attr =
934 		__ATTR_RW_MODE(action, 0600);
935 
936 static struct attribute *damon_sysfs_scheme_attrs[] = {
937 	&damon_sysfs_scheme_action_attr.attr,
938 	NULL,
939 };
940 ATTRIBUTE_GROUPS(damon_sysfs_scheme);
941 
942 static struct kobj_type damon_sysfs_scheme_ktype = {
943 	.release = damon_sysfs_scheme_release,
944 	.sysfs_ops = &kobj_sysfs_ops,
945 	.default_groups = damon_sysfs_scheme_groups,
946 };
947 
948 /*
949  * schemes directory
950  */
951 
952 struct damon_sysfs_schemes {
953 	struct kobject kobj;
954 	struct damon_sysfs_scheme **schemes_arr;
955 	int nr;
956 };
957 
958 static struct damon_sysfs_schemes *damon_sysfs_schemes_alloc(void)
959 {
960 	return kzalloc(sizeof(struct damon_sysfs_schemes), GFP_KERNEL);
961 }
962 
963 static void damon_sysfs_schemes_rm_dirs(struct damon_sysfs_schemes *schemes)
964 {
965 	struct damon_sysfs_scheme **schemes_arr = schemes->schemes_arr;
966 	int i;
967 
968 	for (i = 0; i < schemes->nr; i++) {
969 		damon_sysfs_scheme_rm_dirs(schemes_arr[i]);
970 		kobject_put(&schemes_arr[i]->kobj);
971 	}
972 	schemes->nr = 0;
973 	kfree(schemes_arr);
974 	schemes->schemes_arr = NULL;
975 }
976 
977 static int damon_sysfs_schemes_add_dirs(struct damon_sysfs_schemes *schemes,
978 		int nr_schemes)
979 {
980 	struct damon_sysfs_scheme **schemes_arr, *scheme;
981 	int err, i;
982 
983 	damon_sysfs_schemes_rm_dirs(schemes);
984 	if (!nr_schemes)
985 		return 0;
986 
987 	schemes_arr = kmalloc_array(nr_schemes, sizeof(*schemes_arr),
988 			GFP_KERNEL | __GFP_NOWARN);
989 	if (!schemes_arr)
990 		return -ENOMEM;
991 	schemes->schemes_arr = schemes_arr;
992 
993 	for (i = 0; i < nr_schemes; i++) {
994 		scheme = damon_sysfs_scheme_alloc(DAMOS_STAT);
995 		if (!scheme) {
996 			damon_sysfs_schemes_rm_dirs(schemes);
997 			return -ENOMEM;
998 		}
999 
1000 		err = kobject_init_and_add(&scheme->kobj,
1001 				&damon_sysfs_scheme_ktype, &schemes->kobj,
1002 				"%d", i);
1003 		if (err)
1004 			goto out;
1005 		err = damon_sysfs_scheme_add_dirs(scheme);
1006 		if (err)
1007 			goto out;
1008 
1009 		schemes_arr[i] = scheme;
1010 		schemes->nr++;
1011 	}
1012 	return 0;
1013 
1014 out:
1015 	damon_sysfs_schemes_rm_dirs(schemes);
1016 	kobject_put(&scheme->kobj);
1017 	return err;
1018 }
1019 
1020 static ssize_t nr_schemes_show(struct kobject *kobj,
1021 		struct kobj_attribute *attr, char *buf)
1022 {
1023 	struct damon_sysfs_schemes *schemes = container_of(kobj,
1024 			struct damon_sysfs_schemes, kobj);
1025 
1026 	return sysfs_emit(buf, "%d\n", schemes->nr);
1027 }
1028 
1029 static ssize_t nr_schemes_store(struct kobject *kobj,
1030 		struct kobj_attribute *attr, const char *buf, size_t count)
1031 {
1032 	struct damon_sysfs_schemes *schemes = container_of(kobj,
1033 			struct damon_sysfs_schemes, kobj);
1034 	int nr, err = kstrtoint(buf, 0, &nr);
1035 
1036 	if (err)
1037 		return err;
1038 	if (nr < 0)
1039 		return -EINVAL;
1040 
1041 	if (!mutex_trylock(&damon_sysfs_lock))
1042 		return -EBUSY;
1043 	err = damon_sysfs_schemes_add_dirs(schemes, nr);
1044 	mutex_unlock(&damon_sysfs_lock);
1045 	if (err)
1046 		return err;
1047 	return count;
1048 }
1049 
1050 static void damon_sysfs_schemes_release(struct kobject *kobj)
1051 {
1052 	kfree(container_of(kobj, struct damon_sysfs_schemes, kobj));
1053 }
1054 
1055 static struct kobj_attribute damon_sysfs_schemes_nr_attr =
1056 		__ATTR_RW_MODE(nr_schemes, 0600);
1057 
1058 static struct attribute *damon_sysfs_schemes_attrs[] = {
1059 	&damon_sysfs_schemes_nr_attr.attr,
1060 	NULL,
1061 };
1062 ATTRIBUTE_GROUPS(damon_sysfs_schemes);
1063 
1064 static struct kobj_type damon_sysfs_schemes_ktype = {
1065 	.release = damon_sysfs_schemes_release,
1066 	.sysfs_ops = &kobj_sysfs_ops,
1067 	.default_groups = damon_sysfs_schemes_groups,
1068 };
1069 
1070 /*
1071  * init region directory
1072  */
1073 
1074 struct damon_sysfs_region {
1075 	struct kobject kobj;
1076 	unsigned long start;
1077 	unsigned long end;
1078 };
1079 
1080 static struct damon_sysfs_region *damon_sysfs_region_alloc(
1081 		unsigned long start,
1082 		unsigned long end)
1083 {
1084 	struct damon_sysfs_region *region = kmalloc(sizeof(*region),
1085 			GFP_KERNEL);
1086 
1087 	if (!region)
1088 		return NULL;
1089 	region->kobj = (struct kobject){};
1090 	region->start = start;
1091 	region->end = end;
1092 	return region;
1093 }
1094 
1095 static ssize_t start_show(struct kobject *kobj, struct kobj_attribute *attr,
1096 		char *buf)
1097 {
1098 	struct damon_sysfs_region *region = container_of(kobj,
1099 			struct damon_sysfs_region, kobj);
1100 
1101 	return sysfs_emit(buf, "%lu\n", region->start);
1102 }
1103 
1104 static ssize_t start_store(struct kobject *kobj, struct kobj_attribute *attr,
1105 		const char *buf, size_t count)
1106 {
1107 	struct damon_sysfs_region *region = container_of(kobj,
1108 			struct damon_sysfs_region, kobj);
1109 	int err = kstrtoul(buf, 0, &region->start);
1110 
1111 	if (err)
1112 		return -EINVAL;
1113 	return count;
1114 }
1115 
1116 static ssize_t end_show(struct kobject *kobj, struct kobj_attribute *attr,
1117 		char *buf)
1118 {
1119 	struct damon_sysfs_region *region = container_of(kobj,
1120 			struct damon_sysfs_region, kobj);
1121 
1122 	return sysfs_emit(buf, "%lu\n", region->end);
1123 }
1124 
1125 static ssize_t end_store(struct kobject *kobj, struct kobj_attribute *attr,
1126 		const char *buf, size_t count)
1127 {
1128 	struct damon_sysfs_region *region = container_of(kobj,
1129 			struct damon_sysfs_region, kobj);
1130 	int err = kstrtoul(buf, 0, &region->end);
1131 
1132 	if (err)
1133 		return -EINVAL;
1134 	return count;
1135 }
1136 
1137 static void damon_sysfs_region_release(struct kobject *kobj)
1138 {
1139 	kfree(container_of(kobj, struct damon_sysfs_region, kobj));
1140 }
1141 
1142 static struct kobj_attribute damon_sysfs_region_start_attr =
1143 		__ATTR_RW_MODE(start, 0600);
1144 
1145 static struct kobj_attribute damon_sysfs_region_end_attr =
1146 		__ATTR_RW_MODE(end, 0600);
1147 
1148 static struct attribute *damon_sysfs_region_attrs[] = {
1149 	&damon_sysfs_region_start_attr.attr,
1150 	&damon_sysfs_region_end_attr.attr,
1151 	NULL,
1152 };
1153 ATTRIBUTE_GROUPS(damon_sysfs_region);
1154 
1155 static struct kobj_type damon_sysfs_region_ktype = {
1156 	.release = damon_sysfs_region_release,
1157 	.sysfs_ops = &kobj_sysfs_ops,
1158 	.default_groups = damon_sysfs_region_groups,
1159 };
1160 
1161 /*
1162  * init_regions directory
1163  */
1164 
1165 struct damon_sysfs_regions {
1166 	struct kobject kobj;
1167 	struct damon_sysfs_region **regions_arr;
1168 	int nr;
1169 };
1170 
1171 static struct damon_sysfs_regions *damon_sysfs_regions_alloc(void)
1172 {
1173 	return kzalloc(sizeof(struct damon_sysfs_regions), GFP_KERNEL);
1174 }
1175 
1176 static void damon_sysfs_regions_rm_dirs(struct damon_sysfs_regions *regions)
1177 {
1178 	struct damon_sysfs_region **regions_arr = regions->regions_arr;
1179 	int i;
1180 
1181 	for (i = 0; i < regions->nr; i++)
1182 		kobject_put(&regions_arr[i]->kobj);
1183 	regions->nr = 0;
1184 	kfree(regions_arr);
1185 	regions->regions_arr = NULL;
1186 }
1187 
1188 static int damon_sysfs_regions_add_dirs(struct damon_sysfs_regions *regions,
1189 		int nr_regions)
1190 {
1191 	struct damon_sysfs_region **regions_arr, *region;
1192 	int err, i;
1193 
1194 	damon_sysfs_regions_rm_dirs(regions);
1195 	if (!nr_regions)
1196 		return 0;
1197 
1198 	regions_arr = kmalloc_array(nr_regions, sizeof(*regions_arr),
1199 			GFP_KERNEL | __GFP_NOWARN);
1200 	if (!regions_arr)
1201 		return -ENOMEM;
1202 	regions->regions_arr = regions_arr;
1203 
1204 	for (i = 0; i < nr_regions; i++) {
1205 		region = damon_sysfs_region_alloc(0, 0);
1206 		if (!region) {
1207 			damon_sysfs_regions_rm_dirs(regions);
1208 			return -ENOMEM;
1209 		}
1210 
1211 		err = kobject_init_and_add(&region->kobj,
1212 				&damon_sysfs_region_ktype, &regions->kobj,
1213 				"%d", i);
1214 		if (err) {
1215 			kobject_put(&region->kobj);
1216 			damon_sysfs_regions_rm_dirs(regions);
1217 			return err;
1218 		}
1219 
1220 		regions_arr[i] = region;
1221 		regions->nr++;
1222 	}
1223 	return 0;
1224 }
1225 
1226 static ssize_t nr_regions_show(struct kobject *kobj,
1227 		struct kobj_attribute *attr, char *buf)
1228 {
1229 	struct damon_sysfs_regions *regions = container_of(kobj,
1230 			struct damon_sysfs_regions, kobj);
1231 
1232 	return sysfs_emit(buf, "%d\n", regions->nr);
1233 }
1234 
1235 static ssize_t nr_regions_store(struct kobject *kobj,
1236 		struct kobj_attribute *attr, const char *buf, size_t count)
1237 {
1238 	struct damon_sysfs_regions *regions = container_of(kobj,
1239 			struct damon_sysfs_regions, kobj);
1240 	int nr, err = kstrtoint(buf, 0, &nr);
1241 
1242 	if (err)
1243 		return err;
1244 	if (nr < 0)
1245 		return -EINVAL;
1246 
1247 	if (!mutex_trylock(&damon_sysfs_lock))
1248 		return -EBUSY;
1249 	err = damon_sysfs_regions_add_dirs(regions, nr);
1250 	mutex_unlock(&damon_sysfs_lock);
1251 	if (err)
1252 		return err;
1253 
1254 	return count;
1255 }
1256 
1257 static void damon_sysfs_regions_release(struct kobject *kobj)
1258 {
1259 	kfree(container_of(kobj, struct damon_sysfs_regions, kobj));
1260 }
1261 
1262 static struct kobj_attribute damon_sysfs_regions_nr_attr =
1263 		__ATTR_RW_MODE(nr_regions, 0600);
1264 
1265 static struct attribute *damon_sysfs_regions_attrs[] = {
1266 	&damon_sysfs_regions_nr_attr.attr,
1267 	NULL,
1268 };
1269 ATTRIBUTE_GROUPS(damon_sysfs_regions);
1270 
1271 static struct kobj_type damon_sysfs_regions_ktype = {
1272 	.release = damon_sysfs_regions_release,
1273 	.sysfs_ops = &kobj_sysfs_ops,
1274 	.default_groups = damon_sysfs_regions_groups,
1275 };
1276 
1277 /*
1278  * target directory
1279  */
1280 
1281 struct damon_sysfs_target {
1282 	struct kobject kobj;
1283 	struct damon_sysfs_regions *regions;
1284 	int pid;
1285 };
1286 
1287 static struct damon_sysfs_target *damon_sysfs_target_alloc(void)
1288 {
1289 	return kzalloc(sizeof(struct damon_sysfs_target), GFP_KERNEL);
1290 }
1291 
1292 static int damon_sysfs_target_add_dirs(struct damon_sysfs_target *target)
1293 {
1294 	struct damon_sysfs_regions *regions = damon_sysfs_regions_alloc();
1295 	int err;
1296 
1297 	if (!regions)
1298 		return -ENOMEM;
1299 
1300 	err = kobject_init_and_add(&regions->kobj, &damon_sysfs_regions_ktype,
1301 			&target->kobj, "regions");
1302 	if (err)
1303 		kobject_put(&regions->kobj);
1304 	else
1305 		target->regions = regions;
1306 	return err;
1307 }
1308 
1309 static void damon_sysfs_target_rm_dirs(struct damon_sysfs_target *target)
1310 {
1311 	damon_sysfs_regions_rm_dirs(target->regions);
1312 	kobject_put(&target->regions->kobj);
1313 }
1314 
1315 static ssize_t pid_target_show(struct kobject *kobj,
1316 		struct kobj_attribute *attr, char *buf)
1317 {
1318 	struct damon_sysfs_target *target = container_of(kobj,
1319 			struct damon_sysfs_target, kobj);
1320 
1321 	return sysfs_emit(buf, "%d\n", target->pid);
1322 }
1323 
1324 static ssize_t pid_target_store(struct kobject *kobj,
1325 		struct kobj_attribute *attr, const char *buf, size_t count)
1326 {
1327 	struct damon_sysfs_target *target = container_of(kobj,
1328 			struct damon_sysfs_target, kobj);
1329 	int err = kstrtoint(buf, 0, &target->pid);
1330 
1331 	if (err)
1332 		return -EINVAL;
1333 	return count;
1334 }
1335 
1336 static void damon_sysfs_target_release(struct kobject *kobj)
1337 {
1338 	kfree(container_of(kobj, struct damon_sysfs_target, kobj));
1339 }
1340 
1341 static struct kobj_attribute damon_sysfs_target_pid_attr =
1342 		__ATTR_RW_MODE(pid_target, 0600);
1343 
1344 static struct attribute *damon_sysfs_target_attrs[] = {
1345 	&damon_sysfs_target_pid_attr.attr,
1346 	NULL,
1347 };
1348 ATTRIBUTE_GROUPS(damon_sysfs_target);
1349 
1350 static struct kobj_type damon_sysfs_target_ktype = {
1351 	.release = damon_sysfs_target_release,
1352 	.sysfs_ops = &kobj_sysfs_ops,
1353 	.default_groups = damon_sysfs_target_groups,
1354 };
1355 
1356 /*
1357  * targets directory
1358  */
1359 
1360 struct damon_sysfs_targets {
1361 	struct kobject kobj;
1362 	struct damon_sysfs_target **targets_arr;
1363 	int nr;
1364 };
1365 
1366 static struct damon_sysfs_targets *damon_sysfs_targets_alloc(void)
1367 {
1368 	return kzalloc(sizeof(struct damon_sysfs_targets), GFP_KERNEL);
1369 }
1370 
1371 static void damon_sysfs_targets_rm_dirs(struct damon_sysfs_targets *targets)
1372 {
1373 	struct damon_sysfs_target **targets_arr = targets->targets_arr;
1374 	int i;
1375 
1376 	for (i = 0; i < targets->nr; i++) {
1377 		damon_sysfs_target_rm_dirs(targets_arr[i]);
1378 		kobject_put(&targets_arr[i]->kobj);
1379 	}
1380 	targets->nr = 0;
1381 	kfree(targets_arr);
1382 	targets->targets_arr = NULL;
1383 }
1384 
1385 static int damon_sysfs_targets_add_dirs(struct damon_sysfs_targets *targets,
1386 		int nr_targets)
1387 {
1388 	struct damon_sysfs_target **targets_arr, *target;
1389 	int err, i;
1390 
1391 	damon_sysfs_targets_rm_dirs(targets);
1392 	if (!nr_targets)
1393 		return 0;
1394 
1395 	targets_arr = kmalloc_array(nr_targets, sizeof(*targets_arr),
1396 			GFP_KERNEL | __GFP_NOWARN);
1397 	if (!targets_arr)
1398 		return -ENOMEM;
1399 	targets->targets_arr = targets_arr;
1400 
1401 	for (i = 0; i < nr_targets; i++) {
1402 		target = damon_sysfs_target_alloc();
1403 		if (!target) {
1404 			damon_sysfs_targets_rm_dirs(targets);
1405 			return -ENOMEM;
1406 		}
1407 
1408 		err = kobject_init_and_add(&target->kobj,
1409 				&damon_sysfs_target_ktype, &targets->kobj,
1410 				"%d", i);
1411 		if (err)
1412 			goto out;
1413 
1414 		err = damon_sysfs_target_add_dirs(target);
1415 		if (err)
1416 			goto out;
1417 
1418 		targets_arr[i] = target;
1419 		targets->nr++;
1420 	}
1421 	return 0;
1422 
1423 out:
1424 	damon_sysfs_targets_rm_dirs(targets);
1425 	kobject_put(&target->kobj);
1426 	return err;
1427 }
1428 
1429 static ssize_t nr_targets_show(struct kobject *kobj,
1430 		struct kobj_attribute *attr, char *buf)
1431 {
1432 	struct damon_sysfs_targets *targets = container_of(kobj,
1433 			struct damon_sysfs_targets, kobj);
1434 
1435 	return sysfs_emit(buf, "%d\n", targets->nr);
1436 }
1437 
1438 static ssize_t nr_targets_store(struct kobject *kobj,
1439 		struct kobj_attribute *attr, const char *buf, size_t count)
1440 {
1441 	struct damon_sysfs_targets *targets = container_of(kobj,
1442 			struct damon_sysfs_targets, kobj);
1443 	int nr, err = kstrtoint(buf, 0, &nr);
1444 
1445 	if (err)
1446 		return err;
1447 	if (nr < 0)
1448 		return -EINVAL;
1449 
1450 	if (!mutex_trylock(&damon_sysfs_lock))
1451 		return -EBUSY;
1452 	err = damon_sysfs_targets_add_dirs(targets, nr);
1453 	mutex_unlock(&damon_sysfs_lock);
1454 	if (err)
1455 		return err;
1456 
1457 	return count;
1458 }
1459 
1460 static void damon_sysfs_targets_release(struct kobject *kobj)
1461 {
1462 	kfree(container_of(kobj, struct damon_sysfs_targets, kobj));
1463 }
1464 
1465 static struct kobj_attribute damon_sysfs_targets_nr_attr =
1466 		__ATTR_RW_MODE(nr_targets, 0600);
1467 
1468 static struct attribute *damon_sysfs_targets_attrs[] = {
1469 	&damon_sysfs_targets_nr_attr.attr,
1470 	NULL,
1471 };
1472 ATTRIBUTE_GROUPS(damon_sysfs_targets);
1473 
1474 static struct kobj_type damon_sysfs_targets_ktype = {
1475 	.release = damon_sysfs_targets_release,
1476 	.sysfs_ops = &kobj_sysfs_ops,
1477 	.default_groups = damon_sysfs_targets_groups,
1478 };
1479 
1480 /*
1481  * intervals directory
1482  */
1483 
1484 struct damon_sysfs_intervals {
1485 	struct kobject kobj;
1486 	unsigned long sample_us;
1487 	unsigned long aggr_us;
1488 	unsigned long update_us;
1489 };
1490 
1491 static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc(
1492 		unsigned long sample_us, unsigned long aggr_us,
1493 		unsigned long update_us)
1494 {
1495 	struct damon_sysfs_intervals *intervals = kmalloc(sizeof(*intervals),
1496 			GFP_KERNEL);
1497 
1498 	if (!intervals)
1499 		return NULL;
1500 
1501 	intervals->kobj = (struct kobject){};
1502 	intervals->sample_us = sample_us;
1503 	intervals->aggr_us = aggr_us;
1504 	intervals->update_us = update_us;
1505 	return intervals;
1506 }
1507 
1508 static ssize_t sample_us_show(struct kobject *kobj,
1509 		struct kobj_attribute *attr, char *buf)
1510 {
1511 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1512 			struct damon_sysfs_intervals, kobj);
1513 
1514 	return sysfs_emit(buf, "%lu\n", intervals->sample_us);
1515 }
1516 
1517 static ssize_t sample_us_store(struct kobject *kobj,
1518 		struct kobj_attribute *attr, const char *buf, size_t count)
1519 {
1520 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1521 			struct damon_sysfs_intervals, kobj);
1522 	unsigned long us;
1523 	int err = kstrtoul(buf, 0, &us);
1524 
1525 	if (err)
1526 		return -EINVAL;
1527 
1528 	intervals->sample_us = us;
1529 	return count;
1530 }
1531 
1532 static ssize_t aggr_us_show(struct kobject *kobj, struct kobj_attribute *attr,
1533 		char *buf)
1534 {
1535 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1536 			struct damon_sysfs_intervals, kobj);
1537 
1538 	return sysfs_emit(buf, "%lu\n", intervals->aggr_us);
1539 }
1540 
1541 static ssize_t aggr_us_store(struct kobject *kobj, struct kobj_attribute *attr,
1542 		const char *buf, size_t count)
1543 {
1544 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1545 			struct damon_sysfs_intervals, kobj);
1546 	unsigned long us;
1547 	int err = kstrtoul(buf, 0, &us);
1548 
1549 	if (err)
1550 		return -EINVAL;
1551 
1552 	intervals->aggr_us = us;
1553 	return count;
1554 }
1555 
1556 static ssize_t update_us_show(struct kobject *kobj,
1557 		struct kobj_attribute *attr, char *buf)
1558 {
1559 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1560 			struct damon_sysfs_intervals, kobj);
1561 
1562 	return sysfs_emit(buf, "%lu\n", intervals->update_us);
1563 }
1564 
1565 static ssize_t update_us_store(struct kobject *kobj,
1566 		struct kobj_attribute *attr, const char *buf, size_t count)
1567 {
1568 	struct damon_sysfs_intervals *intervals = container_of(kobj,
1569 			struct damon_sysfs_intervals, kobj);
1570 	unsigned long us;
1571 	int err = kstrtoul(buf, 0, &us);
1572 
1573 	if (err)
1574 		return -EINVAL;
1575 
1576 	intervals->update_us = us;
1577 	return count;
1578 }
1579 
1580 static void damon_sysfs_intervals_release(struct kobject *kobj)
1581 {
1582 	kfree(container_of(kobj, struct damon_sysfs_intervals, kobj));
1583 }
1584 
1585 static struct kobj_attribute damon_sysfs_intervals_sample_us_attr =
1586 		__ATTR_RW_MODE(sample_us, 0600);
1587 
1588 static struct kobj_attribute damon_sysfs_intervals_aggr_us_attr =
1589 		__ATTR_RW_MODE(aggr_us, 0600);
1590 
1591 static struct kobj_attribute damon_sysfs_intervals_update_us_attr =
1592 		__ATTR_RW_MODE(update_us, 0600);
1593 
1594 static struct attribute *damon_sysfs_intervals_attrs[] = {
1595 	&damon_sysfs_intervals_sample_us_attr.attr,
1596 	&damon_sysfs_intervals_aggr_us_attr.attr,
1597 	&damon_sysfs_intervals_update_us_attr.attr,
1598 	NULL,
1599 };
1600 ATTRIBUTE_GROUPS(damon_sysfs_intervals);
1601 
1602 static struct kobj_type damon_sysfs_intervals_ktype = {
1603 	.release = damon_sysfs_intervals_release,
1604 	.sysfs_ops = &kobj_sysfs_ops,
1605 	.default_groups = damon_sysfs_intervals_groups,
1606 };
1607 
1608 /*
1609  * monitoring_attrs directory
1610  */
1611 
1612 struct damon_sysfs_attrs {
1613 	struct kobject kobj;
1614 	struct damon_sysfs_intervals *intervals;
1615 	struct damon_sysfs_ul_range *nr_regions_range;
1616 };
1617 
1618 static struct damon_sysfs_attrs *damon_sysfs_attrs_alloc(void)
1619 {
1620 	struct damon_sysfs_attrs *attrs = kmalloc(sizeof(*attrs), GFP_KERNEL);
1621 
1622 	if (!attrs)
1623 		return NULL;
1624 	attrs->kobj = (struct kobject){};
1625 	return attrs;
1626 }
1627 
1628 static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs *attrs)
1629 {
1630 	struct damon_sysfs_intervals *intervals;
1631 	struct damon_sysfs_ul_range *nr_regions_range;
1632 	int err;
1633 
1634 	intervals = damon_sysfs_intervals_alloc(5000, 100000, 60000000);
1635 	if (!intervals)
1636 		return -ENOMEM;
1637 
1638 	err = kobject_init_and_add(&intervals->kobj,
1639 			&damon_sysfs_intervals_ktype, &attrs->kobj,
1640 			"intervals");
1641 	if (err)
1642 		goto put_intervals_out;
1643 	attrs->intervals = intervals;
1644 
1645 	nr_regions_range = damon_sysfs_ul_range_alloc(10, 1000);
1646 	if (!nr_regions_range) {
1647 		err = -ENOMEM;
1648 		goto put_intervals_out;
1649 	}
1650 
1651 	err = kobject_init_and_add(&nr_regions_range->kobj,
1652 			&damon_sysfs_ul_range_ktype, &attrs->kobj,
1653 			"nr_regions");
1654 	if (err)
1655 		goto put_nr_regions_intervals_out;
1656 	attrs->nr_regions_range = nr_regions_range;
1657 	return 0;
1658 
1659 put_nr_regions_intervals_out:
1660 	kobject_put(&nr_regions_range->kobj);
1661 	attrs->nr_regions_range = NULL;
1662 put_intervals_out:
1663 	kobject_put(&intervals->kobj);
1664 	attrs->intervals = NULL;
1665 	return err;
1666 }
1667 
1668 static void damon_sysfs_attrs_rm_dirs(struct damon_sysfs_attrs *attrs)
1669 {
1670 	kobject_put(&attrs->nr_regions_range->kobj);
1671 	kobject_put(&attrs->intervals->kobj);
1672 }
1673 
1674 static void damon_sysfs_attrs_release(struct kobject *kobj)
1675 {
1676 	kfree(container_of(kobj, struct damon_sysfs_attrs, kobj));
1677 }
1678 
1679 static struct attribute *damon_sysfs_attrs_attrs[] = {
1680 	NULL,
1681 };
1682 ATTRIBUTE_GROUPS(damon_sysfs_attrs);
1683 
1684 static struct kobj_type damon_sysfs_attrs_ktype = {
1685 	.release = damon_sysfs_attrs_release,
1686 	.sysfs_ops = &kobj_sysfs_ops,
1687 	.default_groups = damon_sysfs_attrs_groups,
1688 };
1689 
1690 /*
1691  * context directory
1692  */
1693 
1694 /* This should match with enum damon_ops_id */
1695 static const char * const damon_sysfs_ops_strs[] = {
1696 	"vaddr",
1697 	"paddr",
1698 };
1699 
1700 struct damon_sysfs_context {
1701 	struct kobject kobj;
1702 	enum damon_ops_id ops_id;
1703 	struct damon_sysfs_attrs *attrs;
1704 	struct damon_sysfs_targets *targets;
1705 	struct damon_sysfs_schemes *schemes;
1706 };
1707 
1708 static struct damon_sysfs_context *damon_sysfs_context_alloc(
1709 		enum damon_ops_id ops_id)
1710 {
1711 	struct damon_sysfs_context *context = kmalloc(sizeof(*context),
1712 				GFP_KERNEL);
1713 
1714 	if (!context)
1715 		return NULL;
1716 	context->kobj = (struct kobject){};
1717 	context->ops_id = ops_id;
1718 	return context;
1719 }
1720 
1721 static int damon_sysfs_context_set_attrs(struct damon_sysfs_context *context)
1722 {
1723 	struct damon_sysfs_attrs *attrs = damon_sysfs_attrs_alloc();
1724 	int err;
1725 
1726 	if (!attrs)
1727 		return -ENOMEM;
1728 	err = kobject_init_and_add(&attrs->kobj, &damon_sysfs_attrs_ktype,
1729 			&context->kobj, "monitoring_attrs");
1730 	if (err)
1731 		goto out;
1732 	err = damon_sysfs_attrs_add_dirs(attrs);
1733 	if (err)
1734 		goto out;
1735 	context->attrs = attrs;
1736 	return 0;
1737 
1738 out:
1739 	kobject_put(&attrs->kobj);
1740 	return err;
1741 }
1742 
1743 static int damon_sysfs_context_set_targets(struct damon_sysfs_context *context)
1744 {
1745 	struct damon_sysfs_targets *targets = damon_sysfs_targets_alloc();
1746 	int err;
1747 
1748 	if (!targets)
1749 		return -ENOMEM;
1750 	err = kobject_init_and_add(&targets->kobj, &damon_sysfs_targets_ktype,
1751 			&context->kobj, "targets");
1752 	if (err) {
1753 		kobject_put(&targets->kobj);
1754 		return err;
1755 	}
1756 	context->targets = targets;
1757 	return 0;
1758 }
1759 
1760 static int damon_sysfs_context_set_schemes(struct damon_sysfs_context *context)
1761 {
1762 	struct damon_sysfs_schemes *schemes = damon_sysfs_schemes_alloc();
1763 	int err;
1764 
1765 	if (!schemes)
1766 		return -ENOMEM;
1767 	err = kobject_init_and_add(&schemes->kobj, &damon_sysfs_schemes_ktype,
1768 			&context->kobj, "schemes");
1769 	if (err) {
1770 		kobject_put(&schemes->kobj);
1771 		return err;
1772 	}
1773 	context->schemes = schemes;
1774 	return 0;
1775 }
1776 
1777 static int damon_sysfs_context_add_dirs(struct damon_sysfs_context *context)
1778 {
1779 	int err;
1780 
1781 	err = damon_sysfs_context_set_attrs(context);
1782 	if (err)
1783 		return err;
1784 
1785 	err = damon_sysfs_context_set_targets(context);
1786 	if (err)
1787 		goto put_attrs_out;
1788 
1789 	err = damon_sysfs_context_set_schemes(context);
1790 	if (err)
1791 		goto put_targets_attrs_out;
1792 	return 0;
1793 
1794 put_targets_attrs_out:
1795 	kobject_put(&context->targets->kobj);
1796 	context->targets = NULL;
1797 put_attrs_out:
1798 	kobject_put(&context->attrs->kobj);
1799 	context->attrs = NULL;
1800 	return err;
1801 }
1802 
1803 static void damon_sysfs_context_rm_dirs(struct damon_sysfs_context *context)
1804 {
1805 	damon_sysfs_attrs_rm_dirs(context->attrs);
1806 	kobject_put(&context->attrs->kobj);
1807 	damon_sysfs_targets_rm_dirs(context->targets);
1808 	kobject_put(&context->targets->kobj);
1809 	damon_sysfs_schemes_rm_dirs(context->schemes);
1810 	kobject_put(&context->schemes->kobj);
1811 }
1812 
1813 static ssize_t operations_show(struct kobject *kobj,
1814 		struct kobj_attribute *attr, char *buf)
1815 {
1816 	struct damon_sysfs_context *context = container_of(kobj,
1817 			struct damon_sysfs_context, kobj);
1818 
1819 	return sysfs_emit(buf, "%s\n", damon_sysfs_ops_strs[context->ops_id]);
1820 }
1821 
1822 static ssize_t operations_store(struct kobject *kobj,
1823 		struct kobj_attribute *attr, const char *buf, size_t count)
1824 {
1825 	struct damon_sysfs_context *context = container_of(kobj,
1826 			struct damon_sysfs_context, kobj);
1827 	enum damon_ops_id id;
1828 
1829 	for (id = 0; id < NR_DAMON_OPS; id++) {
1830 		if (sysfs_streq(buf, damon_sysfs_ops_strs[id])) {
1831 			context->ops_id = id;
1832 			return count;
1833 		}
1834 	}
1835 	return -EINVAL;
1836 }
1837 
1838 static void damon_sysfs_context_release(struct kobject *kobj)
1839 {
1840 	kfree(container_of(kobj, struct damon_sysfs_context, kobj));
1841 }
1842 
1843 static struct kobj_attribute damon_sysfs_context_operations_attr =
1844 		__ATTR_RW_MODE(operations, 0600);
1845 
1846 static struct attribute *damon_sysfs_context_attrs[] = {
1847 	&damon_sysfs_context_operations_attr.attr,
1848 	NULL,
1849 };
1850 ATTRIBUTE_GROUPS(damon_sysfs_context);
1851 
1852 static struct kobj_type damon_sysfs_context_ktype = {
1853 	.release = damon_sysfs_context_release,
1854 	.sysfs_ops = &kobj_sysfs_ops,
1855 	.default_groups = damon_sysfs_context_groups,
1856 };
1857 
1858 /*
1859  * contexts directory
1860  */
1861 
1862 struct damon_sysfs_contexts {
1863 	struct kobject kobj;
1864 	struct damon_sysfs_context **contexts_arr;
1865 	int nr;
1866 };
1867 
1868 static struct damon_sysfs_contexts *damon_sysfs_contexts_alloc(void)
1869 {
1870 	return kzalloc(sizeof(struct damon_sysfs_contexts), GFP_KERNEL);
1871 }
1872 
1873 static void damon_sysfs_contexts_rm_dirs(struct damon_sysfs_contexts *contexts)
1874 {
1875 	struct damon_sysfs_context **contexts_arr = contexts->contexts_arr;
1876 	int i;
1877 
1878 	for (i = 0; i < contexts->nr; i++) {
1879 		damon_sysfs_context_rm_dirs(contexts_arr[i]);
1880 		kobject_put(&contexts_arr[i]->kobj);
1881 	}
1882 	contexts->nr = 0;
1883 	kfree(contexts_arr);
1884 	contexts->contexts_arr = NULL;
1885 }
1886 
1887 static int damon_sysfs_contexts_add_dirs(struct damon_sysfs_contexts *contexts,
1888 		int nr_contexts)
1889 {
1890 	struct damon_sysfs_context **contexts_arr, *context;
1891 	int err, i;
1892 
1893 	damon_sysfs_contexts_rm_dirs(contexts);
1894 	if (!nr_contexts)
1895 		return 0;
1896 
1897 	contexts_arr = kmalloc_array(nr_contexts, sizeof(*contexts_arr),
1898 			GFP_KERNEL | __GFP_NOWARN);
1899 	if (!contexts_arr)
1900 		return -ENOMEM;
1901 	contexts->contexts_arr = contexts_arr;
1902 
1903 	for (i = 0; i < nr_contexts; i++) {
1904 		context = damon_sysfs_context_alloc(DAMON_OPS_VADDR);
1905 		if (!context) {
1906 			damon_sysfs_contexts_rm_dirs(contexts);
1907 			return -ENOMEM;
1908 		}
1909 
1910 		err = kobject_init_and_add(&context->kobj,
1911 				&damon_sysfs_context_ktype, &contexts->kobj,
1912 				"%d", i);
1913 		if (err)
1914 			goto out;
1915 
1916 		err = damon_sysfs_context_add_dirs(context);
1917 		if (err)
1918 			goto out;
1919 
1920 		contexts_arr[i] = context;
1921 		contexts->nr++;
1922 	}
1923 	return 0;
1924 
1925 out:
1926 	damon_sysfs_contexts_rm_dirs(contexts);
1927 	kobject_put(&context->kobj);
1928 	return err;
1929 }
1930 
1931 static ssize_t nr_contexts_show(struct kobject *kobj,
1932 		struct kobj_attribute *attr, char *buf)
1933 {
1934 	struct damon_sysfs_contexts *contexts = container_of(kobj,
1935 			struct damon_sysfs_contexts, kobj);
1936 
1937 	return sysfs_emit(buf, "%d\n", contexts->nr);
1938 }
1939 
1940 static ssize_t nr_contexts_store(struct kobject *kobj,
1941 		struct kobj_attribute *attr, const char *buf, size_t count)
1942 {
1943 	struct damon_sysfs_contexts *contexts = container_of(kobj,
1944 			struct damon_sysfs_contexts, kobj);
1945 	int nr, err;
1946 
1947 	err = kstrtoint(buf, 0, &nr);
1948 	if (err)
1949 		return err;
1950 	/* TODO: support multiple contexts per kdamond */
1951 	if (nr < 0 || 1 < nr)
1952 		return -EINVAL;
1953 
1954 	if (!mutex_trylock(&damon_sysfs_lock))
1955 		return -EBUSY;
1956 	err = damon_sysfs_contexts_add_dirs(contexts, nr);
1957 	mutex_unlock(&damon_sysfs_lock);
1958 	if (err)
1959 		return err;
1960 
1961 	return count;
1962 }
1963 
1964 static void damon_sysfs_contexts_release(struct kobject *kobj)
1965 {
1966 	kfree(container_of(kobj, struct damon_sysfs_contexts, kobj));
1967 }
1968 
1969 static struct kobj_attribute damon_sysfs_contexts_nr_attr
1970 		= __ATTR_RW_MODE(nr_contexts, 0600);
1971 
1972 static struct attribute *damon_sysfs_contexts_attrs[] = {
1973 	&damon_sysfs_contexts_nr_attr.attr,
1974 	NULL,
1975 };
1976 ATTRIBUTE_GROUPS(damon_sysfs_contexts);
1977 
1978 static struct kobj_type damon_sysfs_contexts_ktype = {
1979 	.release = damon_sysfs_contexts_release,
1980 	.sysfs_ops = &kobj_sysfs_ops,
1981 	.default_groups = damon_sysfs_contexts_groups,
1982 };
1983 
1984 /*
1985  * kdamond directory
1986  */
1987 
1988 struct damon_sysfs_kdamond {
1989 	struct kobject kobj;
1990 	struct damon_sysfs_contexts *contexts;
1991 	struct damon_ctx *damon_ctx;
1992 };
1993 
1994 static struct damon_sysfs_kdamond *damon_sysfs_kdamond_alloc(void)
1995 {
1996 	return kzalloc(sizeof(struct damon_sysfs_kdamond), GFP_KERNEL);
1997 }
1998 
1999 static int damon_sysfs_kdamond_add_dirs(struct damon_sysfs_kdamond *kdamond)
2000 {
2001 	struct damon_sysfs_contexts *contexts;
2002 	int err;
2003 
2004 	contexts = damon_sysfs_contexts_alloc();
2005 	if (!contexts)
2006 		return -ENOMEM;
2007 
2008 	err = kobject_init_and_add(&contexts->kobj,
2009 			&damon_sysfs_contexts_ktype, &kdamond->kobj,
2010 			"contexts");
2011 	if (err) {
2012 		kobject_put(&contexts->kobj);
2013 		return err;
2014 	}
2015 	kdamond->contexts = contexts;
2016 
2017 	return err;
2018 }
2019 
2020 static void damon_sysfs_kdamond_rm_dirs(struct damon_sysfs_kdamond *kdamond)
2021 {
2022 	damon_sysfs_contexts_rm_dirs(kdamond->contexts);
2023 	kobject_put(&kdamond->contexts->kobj);
2024 }
2025 
2026 static bool damon_sysfs_ctx_running(struct damon_ctx *ctx)
2027 {
2028 	bool running;
2029 
2030 	mutex_lock(&ctx->kdamond_lock);
2031 	running = ctx->kdamond != NULL;
2032 	mutex_unlock(&ctx->kdamond_lock);
2033 	return running;
2034 }
2035 
2036 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
2037 		char *buf)
2038 {
2039 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2040 			struct damon_sysfs_kdamond, kobj);
2041 	struct damon_ctx *ctx = kdamond->damon_ctx;
2042 	bool running;
2043 
2044 	if (!ctx)
2045 		running = false;
2046 	else
2047 		running = damon_sysfs_ctx_running(ctx);
2048 
2049 	return sysfs_emit(buf, "%s\n", running ? "on" : "off");
2050 }
2051 
2052 static int damon_sysfs_set_attrs(struct damon_ctx *ctx,
2053 		struct damon_sysfs_attrs *sys_attrs)
2054 {
2055 	struct damon_sysfs_intervals *sys_intervals = sys_attrs->intervals;
2056 	struct damon_sysfs_ul_range *sys_nr_regions =
2057 		sys_attrs->nr_regions_range;
2058 
2059 	return damon_set_attrs(ctx, sys_intervals->sample_us,
2060 			sys_intervals->aggr_us, sys_intervals->update_us,
2061 			sys_nr_regions->min, sys_nr_regions->max);
2062 }
2063 
2064 static void damon_sysfs_destroy_targets(struct damon_ctx *ctx)
2065 {
2066 	struct damon_target *t, *next;
2067 
2068 	damon_for_each_target_safe(t, next, ctx) {
2069 		if (ctx->ops.id == DAMON_OPS_VADDR)
2070 			put_pid(t->pid);
2071 		damon_destroy_target(t);
2072 	}
2073 }
2074 
2075 static int damon_sysfs_set_regions(struct damon_target *t,
2076 		struct damon_sysfs_regions *sysfs_regions)
2077 {
2078 	int i;
2079 
2080 	for (i = 0; i < sysfs_regions->nr; i++) {
2081 		struct damon_sysfs_region *sys_region =
2082 			sysfs_regions->regions_arr[i];
2083 		struct damon_region *prev, *r;
2084 
2085 		if (sys_region->start > sys_region->end)
2086 			return -EINVAL;
2087 		r = damon_new_region(sys_region->start, sys_region->end);
2088 		if (!r)
2089 			return -ENOMEM;
2090 		damon_add_region(r, t);
2091 		if (damon_nr_regions(t) > 1) {
2092 			prev = damon_prev_region(r);
2093 			if (prev->ar.end > r->ar.start) {
2094 				damon_destroy_region(r, t);
2095 				return -EINVAL;
2096 			}
2097 		}
2098 	}
2099 	return 0;
2100 }
2101 
2102 static int damon_sysfs_set_targets(struct damon_ctx *ctx,
2103 		struct damon_sysfs_targets *sysfs_targets)
2104 {
2105 	int i, err;
2106 
2107 	for (i = 0; i < sysfs_targets->nr; i++) {
2108 		struct damon_sysfs_target *sys_target =
2109 			sysfs_targets->targets_arr[i];
2110 		struct damon_target *t = damon_new_target();
2111 
2112 		if (!t) {
2113 			damon_sysfs_destroy_targets(ctx);
2114 			return -ENOMEM;
2115 		}
2116 		if (ctx->ops.id == DAMON_OPS_VADDR) {
2117 			t->pid = find_get_pid(sys_target->pid);
2118 			if (!t->pid) {
2119 				damon_sysfs_destroy_targets(ctx);
2120 				return -EINVAL;
2121 			}
2122 		}
2123 		damon_add_target(ctx, t);
2124 		err = damon_sysfs_set_regions(t, sys_target->regions);
2125 		if (err) {
2126 			damon_sysfs_destroy_targets(ctx);
2127 			return err;
2128 		}
2129 	}
2130 	return 0;
2131 }
2132 
2133 static struct damos *damon_sysfs_mk_scheme(
2134 		struct damon_sysfs_scheme *sysfs_scheme)
2135 {
2136 	struct damon_sysfs_access_pattern *pattern =
2137 		sysfs_scheme->access_pattern;
2138 	struct damon_sysfs_quotas *sysfs_quotas = sysfs_scheme->quotas;
2139 	struct damon_sysfs_weights *sysfs_weights = sysfs_quotas->weights;
2140 	struct damon_sysfs_watermarks *sysfs_wmarks = sysfs_scheme->watermarks;
2141 	struct damos_quota quota = {
2142 		.ms = sysfs_quotas->ms,
2143 		.sz = sysfs_quotas->sz,
2144 		.reset_interval = sysfs_quotas->reset_interval_ms,
2145 		.weight_sz = sysfs_weights->sz,
2146 		.weight_nr_accesses = sysfs_weights->nr_accesses,
2147 		.weight_age = sysfs_weights->age,
2148 	};
2149 	struct damos_watermarks wmarks = {
2150 		.metric = sysfs_wmarks->metric,
2151 		.interval = sysfs_wmarks->interval_us,
2152 		.high = sysfs_wmarks->high,
2153 		.mid = sysfs_wmarks->mid,
2154 		.low = sysfs_wmarks->low,
2155 	};
2156 
2157 	return damon_new_scheme(pattern->sz->min, pattern->sz->max,
2158 			pattern->nr_accesses->min, pattern->nr_accesses->max,
2159 			pattern->age->min, pattern->age->max,
2160 			sysfs_scheme->action, &quota, &wmarks);
2161 }
2162 
2163 static int damon_sysfs_set_schemes(struct damon_ctx *ctx,
2164 		struct damon_sysfs_schemes *sysfs_schemes)
2165 {
2166 	int i;
2167 
2168 	for (i = 0; i < sysfs_schemes->nr; i++) {
2169 		struct damos *scheme, *next;
2170 
2171 		scheme = damon_sysfs_mk_scheme(sysfs_schemes->schemes_arr[i]);
2172 		if (!scheme) {
2173 			damon_for_each_scheme_safe(scheme, next, ctx)
2174 				damon_destroy_scheme(scheme);
2175 			return -ENOMEM;
2176 		}
2177 		damon_add_scheme(ctx, scheme);
2178 	}
2179 	return 0;
2180 }
2181 
2182 static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
2183 {
2184 	struct damon_target *t, *next;
2185 
2186 	if (ctx->ops.id != DAMON_OPS_VADDR)
2187 		return;
2188 
2189 	mutex_lock(&ctx->kdamond_lock);
2190 	damon_for_each_target_safe(t, next, ctx) {
2191 		put_pid(t->pid);
2192 		damon_destroy_target(t);
2193 	}
2194 	mutex_unlock(&ctx->kdamond_lock);
2195 }
2196 
2197 static struct damon_ctx *damon_sysfs_build_ctx(
2198 		struct damon_sysfs_context *sys_ctx)
2199 {
2200 	struct damon_ctx *ctx = damon_new_ctx();
2201 	int err;
2202 
2203 	if (!ctx)
2204 		return ERR_PTR(-ENOMEM);
2205 
2206 	err = damon_select_ops(ctx, sys_ctx->ops_id);
2207 	if (err)
2208 		goto out;
2209 	err = damon_sysfs_set_attrs(ctx, sys_ctx->attrs);
2210 	if (err)
2211 		goto out;
2212 	err = damon_sysfs_set_targets(ctx, sys_ctx->targets);
2213 	if (err)
2214 		goto out;
2215 	err = damon_sysfs_set_schemes(ctx, sys_ctx->schemes);
2216 	if (err)
2217 		goto out;
2218 
2219 	ctx->callback.before_terminate = damon_sysfs_before_terminate;
2220 	return ctx;
2221 
2222 out:
2223 	damon_destroy_ctx(ctx);
2224 	return ERR_PTR(err);
2225 }
2226 
2227 static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
2228 {
2229 	struct damon_ctx *ctx;
2230 	int err;
2231 
2232 	if (kdamond->damon_ctx &&
2233 			damon_sysfs_ctx_running(kdamond->damon_ctx))
2234 		return -EBUSY;
2235 	/* TODO: support multiple contexts per kdamond */
2236 	if (kdamond->contexts->nr != 1)
2237 		return -EINVAL;
2238 
2239 	if (kdamond->damon_ctx)
2240 		damon_destroy_ctx(kdamond->damon_ctx);
2241 	kdamond->damon_ctx = NULL;
2242 
2243 	ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]);
2244 	if (IS_ERR(ctx))
2245 		return PTR_ERR(ctx);
2246 	err = damon_start(&ctx, 1, false);
2247 	if (err) {
2248 		damon_destroy_ctx(ctx);
2249 		return err;
2250 	}
2251 	kdamond->damon_ctx = ctx;
2252 	return err;
2253 }
2254 
2255 static int damon_sysfs_turn_damon_off(struct damon_sysfs_kdamond *kdamond)
2256 {
2257 	if (!kdamond->damon_ctx)
2258 		return -EINVAL;
2259 	return damon_stop(&kdamond->damon_ctx, 1);
2260 	/*
2261 	 * To allow users show final monitoring results of already turned-off
2262 	 * DAMON, we free kdamond->damon_ctx in next
2263 	 * damon_sysfs_turn_damon_on(), or kdamonds_nr_store()
2264 	 */
2265 }
2266 
2267 static int damon_sysfs_update_schemes_stats(struct damon_sysfs_kdamond *kdamond)
2268 {
2269 	struct damon_ctx *ctx = kdamond->damon_ctx;
2270 	struct damos *scheme;
2271 	int schemes_idx = 0;
2272 
2273 	if (!ctx)
2274 		return -EINVAL;
2275 	mutex_lock(&ctx->kdamond_lock);
2276 	damon_for_each_scheme(scheme, ctx) {
2277 		struct damon_sysfs_schemes *sysfs_schemes;
2278 		struct damon_sysfs_stats *sysfs_stats;
2279 
2280 		sysfs_schemes = kdamond->contexts->contexts_arr[0]->schemes;
2281 		sysfs_stats = sysfs_schemes->schemes_arr[schemes_idx++]->stats;
2282 		sysfs_stats->nr_tried = scheme->stat.nr_tried;
2283 		sysfs_stats->sz_tried = scheme->stat.sz_tried;
2284 		sysfs_stats->nr_applied = scheme->stat.nr_applied;
2285 		sysfs_stats->sz_applied = scheme->stat.sz_applied;
2286 		sysfs_stats->qt_exceeds = scheme->stat.qt_exceeds;
2287 	}
2288 	mutex_unlock(&ctx->kdamond_lock);
2289 	return 0;
2290 }
2291 
2292 static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
2293 		const char *buf, size_t count)
2294 {
2295 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2296 			struct damon_sysfs_kdamond, kobj);
2297 	ssize_t ret;
2298 
2299 	if (!mutex_trylock(&damon_sysfs_lock))
2300 		return -EBUSY;
2301 	if (sysfs_streq(buf, "on"))
2302 		ret = damon_sysfs_turn_damon_on(kdamond);
2303 	else if (sysfs_streq(buf, "off"))
2304 		ret = damon_sysfs_turn_damon_off(kdamond);
2305 	else if (sysfs_streq(buf, "update_schemes_stats"))
2306 		ret = damon_sysfs_update_schemes_stats(kdamond);
2307 	else
2308 		ret = -EINVAL;
2309 	mutex_unlock(&damon_sysfs_lock);
2310 	if (!ret)
2311 		ret = count;
2312 	return ret;
2313 }
2314 
2315 static ssize_t pid_show(struct kobject *kobj,
2316 		struct kobj_attribute *attr, char *buf)
2317 {
2318 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2319 			struct damon_sysfs_kdamond, kobj);
2320 	struct damon_ctx *ctx;
2321 	int pid;
2322 
2323 	if (!mutex_trylock(&damon_sysfs_lock))
2324 		return -EBUSY;
2325 	ctx = kdamond->damon_ctx;
2326 	if (!ctx) {
2327 		pid = -1;
2328 		goto out;
2329 	}
2330 	mutex_lock(&ctx->kdamond_lock);
2331 	if (!ctx->kdamond)
2332 		pid = -1;
2333 	else
2334 		pid = ctx->kdamond->pid;
2335 	mutex_unlock(&ctx->kdamond_lock);
2336 out:
2337 	mutex_unlock(&damon_sysfs_lock);
2338 	return sysfs_emit(buf, "%d\n", pid);
2339 }
2340 
2341 static void damon_sysfs_kdamond_release(struct kobject *kobj)
2342 {
2343 	struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2344 			struct damon_sysfs_kdamond, kobj);
2345 
2346 	if (kdamond->damon_ctx)
2347 		damon_destroy_ctx(kdamond->damon_ctx);
2348 	kfree(kdamond);
2349 }
2350 
2351 static struct kobj_attribute damon_sysfs_kdamond_state_attr =
2352 		__ATTR_RW_MODE(state, 0600);
2353 
2354 static struct kobj_attribute damon_sysfs_kdamond_pid_attr =
2355 		__ATTR_RO_MODE(pid, 0400);
2356 
2357 static struct attribute *damon_sysfs_kdamond_attrs[] = {
2358 	&damon_sysfs_kdamond_state_attr.attr,
2359 	&damon_sysfs_kdamond_pid_attr.attr,
2360 	NULL,
2361 };
2362 ATTRIBUTE_GROUPS(damon_sysfs_kdamond);
2363 
2364 static struct kobj_type damon_sysfs_kdamond_ktype = {
2365 	.release = damon_sysfs_kdamond_release,
2366 	.sysfs_ops = &kobj_sysfs_ops,
2367 	.default_groups = damon_sysfs_kdamond_groups,
2368 };
2369 
2370 /*
2371  * kdamonds directory
2372  */
2373 
2374 struct damon_sysfs_kdamonds {
2375 	struct kobject kobj;
2376 	struct damon_sysfs_kdamond **kdamonds_arr;
2377 	int nr;
2378 };
2379 
2380 static struct damon_sysfs_kdamonds *damon_sysfs_kdamonds_alloc(void)
2381 {
2382 	return kzalloc(sizeof(struct damon_sysfs_kdamonds), GFP_KERNEL);
2383 }
2384 
2385 static void damon_sysfs_kdamonds_rm_dirs(struct damon_sysfs_kdamonds *kdamonds)
2386 {
2387 	struct damon_sysfs_kdamond **kdamonds_arr = kdamonds->kdamonds_arr;
2388 	int i;
2389 
2390 	for (i = 0; i < kdamonds->nr; i++) {
2391 		damon_sysfs_kdamond_rm_dirs(kdamonds_arr[i]);
2392 		kobject_put(&kdamonds_arr[i]->kobj);
2393 	}
2394 	kdamonds->nr = 0;
2395 	kfree(kdamonds_arr);
2396 	kdamonds->kdamonds_arr = NULL;
2397 }
2398 
2399 static int damon_sysfs_nr_running_ctxs(struct damon_sysfs_kdamond **kdamonds,
2400 		int nr_kdamonds)
2401 {
2402 	int nr_running_ctxs = 0;
2403 	int i;
2404 
2405 	for (i = 0; i < nr_kdamonds; i++) {
2406 		struct damon_ctx *ctx = kdamonds[i]->damon_ctx;
2407 
2408 		if (!ctx)
2409 			continue;
2410 		mutex_lock(&ctx->kdamond_lock);
2411 		if (ctx->kdamond)
2412 			nr_running_ctxs++;
2413 		mutex_unlock(&ctx->kdamond_lock);
2414 	}
2415 	return nr_running_ctxs;
2416 }
2417 
2418 static int damon_sysfs_kdamonds_add_dirs(struct damon_sysfs_kdamonds *kdamonds,
2419 		int nr_kdamonds)
2420 {
2421 	struct damon_sysfs_kdamond **kdamonds_arr, *kdamond;
2422 	int err, i;
2423 
2424 	if (damon_sysfs_nr_running_ctxs(kdamonds->kdamonds_arr, kdamonds->nr))
2425 		return -EBUSY;
2426 
2427 	damon_sysfs_kdamonds_rm_dirs(kdamonds);
2428 	if (!nr_kdamonds)
2429 		return 0;
2430 
2431 	kdamonds_arr = kmalloc_array(nr_kdamonds, sizeof(*kdamonds_arr),
2432 			GFP_KERNEL | __GFP_NOWARN);
2433 	if (!kdamonds_arr)
2434 		return -ENOMEM;
2435 	kdamonds->kdamonds_arr = kdamonds_arr;
2436 
2437 	for (i = 0; i < nr_kdamonds; i++) {
2438 		kdamond = damon_sysfs_kdamond_alloc();
2439 		if (!kdamond) {
2440 			damon_sysfs_kdamonds_rm_dirs(kdamonds);
2441 			return -ENOMEM;
2442 		}
2443 
2444 		err = kobject_init_and_add(&kdamond->kobj,
2445 				&damon_sysfs_kdamond_ktype, &kdamonds->kobj,
2446 				"%d", i);
2447 		if (err)
2448 			goto out;
2449 
2450 		err = damon_sysfs_kdamond_add_dirs(kdamond);
2451 		if (err)
2452 			goto out;
2453 
2454 		kdamonds_arr[i] = kdamond;
2455 		kdamonds->nr++;
2456 	}
2457 	return 0;
2458 
2459 out:
2460 	damon_sysfs_kdamonds_rm_dirs(kdamonds);
2461 	kobject_put(&kdamond->kobj);
2462 	return err;
2463 }
2464 
2465 static ssize_t nr_kdamonds_show(struct kobject *kobj,
2466 		struct kobj_attribute *attr, char *buf)
2467 {
2468 	struct damon_sysfs_kdamonds *kdamonds = container_of(kobj,
2469 			struct damon_sysfs_kdamonds, kobj);
2470 
2471 	return sysfs_emit(buf, "%d\n", kdamonds->nr);
2472 }
2473 
2474 static ssize_t nr_kdamonds_store(struct kobject *kobj,
2475 		struct kobj_attribute *attr, const char *buf, size_t count)
2476 {
2477 	struct damon_sysfs_kdamonds *kdamonds = container_of(kobj,
2478 			struct damon_sysfs_kdamonds, kobj);
2479 	int nr, err;
2480 
2481 	err = kstrtoint(buf, 0, &nr);
2482 	if (err)
2483 		return err;
2484 	if (nr < 0)
2485 		return -EINVAL;
2486 
2487 	if (!mutex_trylock(&damon_sysfs_lock))
2488 		return -EBUSY;
2489 	err = damon_sysfs_kdamonds_add_dirs(kdamonds, nr);
2490 	mutex_unlock(&damon_sysfs_lock);
2491 	if (err)
2492 		return err;
2493 
2494 	return count;
2495 }
2496 
2497 static void damon_sysfs_kdamonds_release(struct kobject *kobj)
2498 {
2499 	kfree(container_of(kobj, struct damon_sysfs_kdamonds, kobj));
2500 }
2501 
2502 static struct kobj_attribute damon_sysfs_kdamonds_nr_attr =
2503 		__ATTR_RW_MODE(nr_kdamonds, 0600);
2504 
2505 static struct attribute *damon_sysfs_kdamonds_attrs[] = {
2506 	&damon_sysfs_kdamonds_nr_attr.attr,
2507 	NULL,
2508 };
2509 ATTRIBUTE_GROUPS(damon_sysfs_kdamonds);
2510 
2511 static struct kobj_type damon_sysfs_kdamonds_ktype = {
2512 	.release = damon_sysfs_kdamonds_release,
2513 	.sysfs_ops = &kobj_sysfs_ops,
2514 	.default_groups = damon_sysfs_kdamonds_groups,
2515 };
2516 
2517 /*
2518  * damon user interface directory
2519  */
2520 
2521 struct damon_sysfs_ui_dir {
2522 	struct kobject kobj;
2523 	struct damon_sysfs_kdamonds *kdamonds;
2524 };
2525 
2526 static struct damon_sysfs_ui_dir *damon_sysfs_ui_dir_alloc(void)
2527 {
2528 	return kzalloc(sizeof(struct damon_sysfs_ui_dir), GFP_KERNEL);
2529 }
2530 
2531 static int damon_sysfs_ui_dir_add_dirs(struct damon_sysfs_ui_dir *ui_dir)
2532 {
2533 	struct damon_sysfs_kdamonds *kdamonds;
2534 	int err;
2535 
2536 	kdamonds = damon_sysfs_kdamonds_alloc();
2537 	if (!kdamonds)
2538 		return -ENOMEM;
2539 
2540 	err = kobject_init_and_add(&kdamonds->kobj,
2541 			&damon_sysfs_kdamonds_ktype, &ui_dir->kobj,
2542 			"kdamonds");
2543 	if (err) {
2544 		kobject_put(&kdamonds->kobj);
2545 		return err;
2546 	}
2547 	ui_dir->kdamonds = kdamonds;
2548 	return err;
2549 }
2550 
2551 static void damon_sysfs_ui_dir_release(struct kobject *kobj)
2552 {
2553 	kfree(container_of(kobj, struct damon_sysfs_ui_dir, kobj));
2554 }
2555 
2556 static struct attribute *damon_sysfs_ui_dir_attrs[] = {
2557 	NULL,
2558 };
2559 ATTRIBUTE_GROUPS(damon_sysfs_ui_dir);
2560 
2561 static struct kobj_type damon_sysfs_ui_dir_ktype = {
2562 	.release = damon_sysfs_ui_dir_release,
2563 	.sysfs_ops = &kobj_sysfs_ops,
2564 	.default_groups = damon_sysfs_ui_dir_groups,
2565 };
2566 
2567 static int __init damon_sysfs_init(void)
2568 {
2569 	struct kobject *damon_sysfs_root;
2570 	struct damon_sysfs_ui_dir *admin;
2571 	int err;
2572 
2573 	damon_sysfs_root = kobject_create_and_add("damon", mm_kobj);
2574 	if (!damon_sysfs_root)
2575 		return -ENOMEM;
2576 
2577 	admin = damon_sysfs_ui_dir_alloc();
2578 	if (!admin) {
2579 		kobject_put(damon_sysfs_root);
2580 		return -ENOMEM;
2581 	}
2582 	err = kobject_init_and_add(&admin->kobj, &damon_sysfs_ui_dir_ktype,
2583 			damon_sysfs_root, "admin");
2584 	if (err)
2585 		goto out;
2586 	err = damon_sysfs_ui_dir_add_dirs(admin);
2587 	if (err)
2588 		goto out;
2589 	return 0;
2590 
2591 out:
2592 	kobject_put(&admin->kobj);
2593 	kobject_put(damon_sysfs_root);
2594 	return err;
2595 }
2596 subsys_initcall(damon_sysfs_init);
2597