xref: /linux/include/linux/vmstat.h (revision c701123b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VMSTAT_H
3 #define _LINUX_VMSTAT_H
4 
5 #include <linux/types.h>
6 #include <linux/percpu.h>
7 #include <linux/mmzone.h>
8 #include <linux/vm_event_item.h>
9 #include <linux/atomic.h>
10 #include <linux/static_key.h>
11 #include <linux/mmdebug.h>
12 
13 extern int sysctl_stat_interval;
14 
15 #ifdef CONFIG_NUMA
16 #define ENABLE_NUMA_STAT   1
17 #define DISABLE_NUMA_STAT   0
18 extern int sysctl_vm_numa_stat;
19 DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
20 int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
21 		void *buffer, size_t *length, loff_t *ppos);
22 #endif
23 
24 struct reclaim_stat {
25 	unsigned nr_dirty;
26 	unsigned nr_unqueued_dirty;
27 	unsigned nr_congested;
28 	unsigned nr_writeback;
29 	unsigned nr_immediate;
30 	unsigned nr_pageout;
31 	unsigned nr_activate[ANON_AND_FILE];
32 	unsigned nr_ref_keep;
33 	unsigned nr_unmap_fail;
34 	unsigned nr_lazyfree_fail;
35 };
36 
37 enum writeback_stat_item {
38 	NR_DIRTY_THRESHOLD,
39 	NR_DIRTY_BG_THRESHOLD,
40 	NR_VM_WRITEBACK_STAT_ITEMS,
41 };
42 
43 #ifdef CONFIG_VM_EVENT_COUNTERS
44 /*
45  * Light weight per cpu counter implementation.
46  *
47  * Counters should only be incremented and no critical kernel component
48  * should rely on the counter values.
49  *
50  * Counters are handled completely inline. On many platforms the code
51  * generated will simply be the increment of a global address.
52  */
53 
54 struct vm_event_state {
55 	unsigned long event[NR_VM_EVENT_ITEMS];
56 };
57 
58 DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
59 
60 /*
61  * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
62  * local_irq_disable overhead.
63  */
__count_vm_event(enum vm_event_item item)64 static inline void __count_vm_event(enum vm_event_item item)
65 {
66 	raw_cpu_inc(vm_event_states.event[item]);
67 }
68 
count_vm_event(enum vm_event_item item)69 static inline void count_vm_event(enum vm_event_item item)
70 {
71 	this_cpu_inc(vm_event_states.event[item]);
72 }
73 
__count_vm_events(enum vm_event_item item,long delta)74 static inline void __count_vm_events(enum vm_event_item item, long delta)
75 {
76 	raw_cpu_add(vm_event_states.event[item], delta);
77 }
78 
count_vm_events(enum vm_event_item item,long delta)79 static inline void count_vm_events(enum vm_event_item item, long delta)
80 {
81 	this_cpu_add(vm_event_states.event[item], delta);
82 }
83 
84 extern void all_vm_events(unsigned long *);
85 
86 extern void vm_events_fold_cpu(int cpu);
87 
88 #else
89 
90 /* Disable counters */
count_vm_event(enum vm_event_item item)91 static inline void count_vm_event(enum vm_event_item item)
92 {
93 }
count_vm_events(enum vm_event_item item,long delta)94 static inline void count_vm_events(enum vm_event_item item, long delta)
95 {
96 }
__count_vm_event(enum vm_event_item item)97 static inline void __count_vm_event(enum vm_event_item item)
98 {
99 }
__count_vm_events(enum vm_event_item item,long delta)100 static inline void __count_vm_events(enum vm_event_item item, long delta)
101 {
102 }
all_vm_events(unsigned long * ret)103 static inline void all_vm_events(unsigned long *ret)
104 {
105 }
vm_events_fold_cpu(int cpu)106 static inline void vm_events_fold_cpu(int cpu)
107 {
108 }
109 
110 #endif /* CONFIG_VM_EVENT_COUNTERS */
111 
112 #ifdef CONFIG_NUMA_BALANCING
113 #define count_vm_numa_event(x)     count_vm_event(x)
114 #define count_vm_numa_events(x, y) count_vm_events(x, y)
115 #else
116 #define count_vm_numa_event(x) do {} while (0)
117 #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
118 #endif /* CONFIG_NUMA_BALANCING */
119 
120 #ifdef CONFIG_DEBUG_TLBFLUSH
121 #define count_vm_tlb_event(x)	   count_vm_event(x)
122 #define count_vm_tlb_events(x, y)  count_vm_events(x, y)
123 #else
124 #define count_vm_tlb_event(x)     do {} while (0)
125 #define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
126 #endif
127 
128 #ifdef CONFIG_PER_VMA_LOCK_STATS
129 #define count_vm_vma_lock_event(x) count_vm_event(x)
130 #else
131 #define count_vm_vma_lock_event(x) do {} while (0)
132 #endif
133 
134 #define __count_zid_vm_events(item, zid, delta) \
135 	__count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
136 
137 /*
138  * Zone and node-based page accounting with per cpu differentials.
139  */
140 extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
141 extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
142 extern atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
143 
144 #ifdef CONFIG_NUMA
zone_numa_event_add(long x,struct zone * zone,enum numa_stat_item item)145 static inline void zone_numa_event_add(long x, struct zone *zone,
146 				enum numa_stat_item item)
147 {
148 	atomic_long_add(x, &zone->vm_numa_event[item]);
149 	atomic_long_add(x, &vm_numa_event[item]);
150 }
151 
zone_numa_event_state(struct zone * zone,enum numa_stat_item item)152 static inline unsigned long zone_numa_event_state(struct zone *zone,
153 					enum numa_stat_item item)
154 {
155 	return atomic_long_read(&zone->vm_numa_event[item]);
156 }
157 
158 static inline unsigned long
global_numa_event_state(enum numa_stat_item item)159 global_numa_event_state(enum numa_stat_item item)
160 {
161 	return atomic_long_read(&vm_numa_event[item]);
162 }
163 #endif /* CONFIG_NUMA */
164 
zone_page_state_add(long x,struct zone * zone,enum zone_stat_item item)165 static inline void zone_page_state_add(long x, struct zone *zone,
166 				 enum zone_stat_item item)
167 {
168 	atomic_long_add(x, &zone->vm_stat[item]);
169 	atomic_long_add(x, &vm_zone_stat[item]);
170 }
171 
node_page_state_add(long x,struct pglist_data * pgdat,enum node_stat_item item)172 static inline void node_page_state_add(long x, struct pglist_data *pgdat,
173 				 enum node_stat_item item)
174 {
175 	atomic_long_add(x, &pgdat->vm_stat[item]);
176 	atomic_long_add(x, &vm_node_stat[item]);
177 }
178 
global_zone_page_state(enum zone_stat_item item)179 static inline unsigned long global_zone_page_state(enum zone_stat_item item)
180 {
181 	long x = atomic_long_read(&vm_zone_stat[item]);
182 #ifdef CONFIG_SMP
183 	if (x < 0)
184 		x = 0;
185 #endif
186 	return x;
187 }
188 
189 static inline
global_node_page_state_pages(enum node_stat_item item)190 unsigned long global_node_page_state_pages(enum node_stat_item item)
191 {
192 	long x = atomic_long_read(&vm_node_stat[item]);
193 #ifdef CONFIG_SMP
194 	if (x < 0)
195 		x = 0;
196 #endif
197 	return x;
198 }
199 
global_node_page_state(enum node_stat_item item)200 static inline unsigned long global_node_page_state(enum node_stat_item item)
201 {
202 	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
203 
204 	return global_node_page_state_pages(item);
205 }
206 
zone_page_state(struct zone * zone,enum zone_stat_item item)207 static inline unsigned long zone_page_state(struct zone *zone,
208 					enum zone_stat_item item)
209 {
210 	long x = atomic_long_read(&zone->vm_stat[item]);
211 #ifdef CONFIG_SMP
212 	if (x < 0)
213 		x = 0;
214 #endif
215 	return x;
216 }
217 
218 /*
219  * More accurate version that also considers the currently pending
220  * deltas. For that we need to loop over all cpus to find the current
221  * deltas. There is no synchronization so the result cannot be
222  * exactly accurate either.
223  */
zone_page_state_snapshot(struct zone * zone,enum zone_stat_item item)224 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
225 					enum zone_stat_item item)
226 {
227 	long x = atomic_long_read(&zone->vm_stat[item]);
228 
229 #ifdef CONFIG_SMP
230 	int cpu;
231 	for_each_online_cpu(cpu)
232 		x += per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_stat_diff[item];
233 
234 	if (x < 0)
235 		x = 0;
236 #endif
237 	return x;
238 }
239 
240 #ifdef CONFIG_NUMA
241 /* See __count_vm_event comment on why raw_cpu_inc is used. */
242 static inline void
__count_numa_event(struct zone * zone,enum numa_stat_item item)243 __count_numa_event(struct zone *zone, enum numa_stat_item item)
244 {
245 	struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
246 
247 	raw_cpu_inc(pzstats->vm_numa_event[item]);
248 }
249 
250 static inline void
__count_numa_events(struct zone * zone,enum numa_stat_item item,long delta)251 __count_numa_events(struct zone *zone, enum numa_stat_item item, long delta)
252 {
253 	struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
254 
255 	raw_cpu_add(pzstats->vm_numa_event[item], delta);
256 }
257 
258 extern unsigned long sum_zone_node_page_state(int node,
259 					      enum zone_stat_item item);
260 extern unsigned long sum_zone_numa_event_state(int node, enum numa_stat_item item);
261 extern unsigned long node_page_state(struct pglist_data *pgdat,
262 						enum node_stat_item item);
263 extern unsigned long node_page_state_pages(struct pglist_data *pgdat,
264 					   enum node_stat_item item);
265 extern void fold_vm_numa_events(void);
266 #else
267 #define sum_zone_node_page_state(node, item) global_zone_page_state(item)
268 #define node_page_state(node, item) global_node_page_state(item)
269 #define node_page_state_pages(node, item) global_node_page_state_pages(item)
fold_vm_numa_events(void)270 static inline void fold_vm_numa_events(void)
271 {
272 }
273 #endif /* CONFIG_NUMA */
274 
275 #ifdef CONFIG_SMP
276 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
277 void __inc_zone_page_state(struct page *, enum zone_stat_item);
278 void __dec_zone_page_state(struct page *, enum zone_stat_item);
279 
280 void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
281 void __inc_node_page_state(struct page *, enum node_stat_item);
282 void __dec_node_page_state(struct page *, enum node_stat_item);
283 
284 void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
285 void inc_zone_page_state(struct page *, enum zone_stat_item);
286 void dec_zone_page_state(struct page *, enum zone_stat_item);
287 
288 void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
289 void inc_node_page_state(struct page *, enum node_stat_item);
290 void dec_node_page_state(struct page *, enum node_stat_item);
291 
292 extern void inc_node_state(struct pglist_data *, enum node_stat_item);
293 extern void __inc_zone_state(struct zone *, enum zone_stat_item);
294 extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
295 extern void dec_zone_state(struct zone *, enum zone_stat_item);
296 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
297 extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
298 
299 void quiet_vmstat(void);
300 void cpu_vm_stats_fold(int cpu);
301 void refresh_zone_stat_thresholds(void);
302 
303 struct ctl_table;
304 int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp,
305 		loff_t *ppos);
306 
307 void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *);
308 
309 int calculate_pressure_threshold(struct zone *zone);
310 int calculate_normal_threshold(struct zone *zone);
311 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
312 				int (*calculate_pressure)(struct zone *));
313 #else /* CONFIG_SMP */
314 
315 /*
316  * We do not maintain differentials in a single processor configuration.
317  * The functions directly modify the zone and global counters.
318  */
__mod_zone_page_state(struct zone * zone,enum zone_stat_item item,long delta)319 static inline void __mod_zone_page_state(struct zone *zone,
320 			enum zone_stat_item item, long delta)
321 {
322 	zone_page_state_add(delta, zone, item);
323 }
324 
__mod_node_page_state(struct pglist_data * pgdat,enum node_stat_item item,int delta)325 static inline void __mod_node_page_state(struct pglist_data *pgdat,
326 			enum node_stat_item item, int delta)
327 {
328 	if (vmstat_item_in_bytes(item)) {
329 		/*
330 		 * Only cgroups use subpage accounting right now; at
331 		 * the global level, these items still change in
332 		 * multiples of whole pages. Store them as pages
333 		 * internally to keep the per-cpu counters compact.
334 		 */
335 		VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
336 		delta >>= PAGE_SHIFT;
337 	}
338 
339 	node_page_state_add(delta, pgdat, item);
340 }
341 
__inc_zone_state(struct zone * zone,enum zone_stat_item item)342 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
343 {
344 	atomic_long_inc(&zone->vm_stat[item]);
345 	atomic_long_inc(&vm_zone_stat[item]);
346 }
347 
__inc_node_state(struct pglist_data * pgdat,enum node_stat_item item)348 static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
349 {
350 	atomic_long_inc(&pgdat->vm_stat[item]);
351 	atomic_long_inc(&vm_node_stat[item]);
352 }
353 
__dec_zone_state(struct zone * zone,enum zone_stat_item item)354 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
355 {
356 	atomic_long_dec(&zone->vm_stat[item]);
357 	atomic_long_dec(&vm_zone_stat[item]);
358 }
359 
__dec_node_state(struct pglist_data * pgdat,enum node_stat_item item)360 static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
361 {
362 	atomic_long_dec(&pgdat->vm_stat[item]);
363 	atomic_long_dec(&vm_node_stat[item]);
364 }
365 
__inc_zone_page_state(struct page * page,enum zone_stat_item item)366 static inline void __inc_zone_page_state(struct page *page,
367 			enum zone_stat_item item)
368 {
369 	__inc_zone_state(page_zone(page), item);
370 }
371 
__inc_node_page_state(struct page * page,enum node_stat_item item)372 static inline void __inc_node_page_state(struct page *page,
373 			enum node_stat_item item)
374 {
375 	__inc_node_state(page_pgdat(page), item);
376 }
377 
378 
__dec_zone_page_state(struct page * page,enum zone_stat_item item)379 static inline void __dec_zone_page_state(struct page *page,
380 			enum zone_stat_item item)
381 {
382 	__dec_zone_state(page_zone(page), item);
383 }
384 
__dec_node_page_state(struct page * page,enum node_stat_item item)385 static inline void __dec_node_page_state(struct page *page,
386 			enum node_stat_item item)
387 {
388 	__dec_node_state(page_pgdat(page), item);
389 }
390 
391 
392 /*
393  * We only use atomic operations to update counters. So there is no need to
394  * disable interrupts.
395  */
396 #define inc_zone_page_state __inc_zone_page_state
397 #define dec_zone_page_state __dec_zone_page_state
398 #define mod_zone_page_state __mod_zone_page_state
399 
400 #define inc_node_page_state __inc_node_page_state
401 #define dec_node_page_state __dec_node_page_state
402 #define mod_node_page_state __mod_node_page_state
403 
404 #define inc_zone_state __inc_zone_state
405 #define inc_node_state __inc_node_state
406 #define dec_zone_state __dec_zone_state
407 
408 #define set_pgdat_percpu_threshold(pgdat, callback) { }
409 
refresh_zone_stat_thresholds(void)410 static inline void refresh_zone_stat_thresholds(void) { }
cpu_vm_stats_fold(int cpu)411 static inline void cpu_vm_stats_fold(int cpu) { }
quiet_vmstat(void)412 static inline void quiet_vmstat(void) { }
413 
drain_zonestat(struct zone * zone,struct per_cpu_zonestat * pzstats)414 static inline void drain_zonestat(struct zone *zone,
415 			struct per_cpu_zonestat *pzstats) { }
416 #endif		/* CONFIG_SMP */
417 
__zone_stat_mod_folio(struct folio * folio,enum zone_stat_item item,long nr)418 static inline void __zone_stat_mod_folio(struct folio *folio,
419 		enum zone_stat_item item, long nr)
420 {
421 	__mod_zone_page_state(folio_zone(folio), item, nr);
422 }
423 
__zone_stat_add_folio(struct folio * folio,enum zone_stat_item item)424 static inline void __zone_stat_add_folio(struct folio *folio,
425 		enum zone_stat_item item)
426 {
427 	__mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio));
428 }
429 
__zone_stat_sub_folio(struct folio * folio,enum zone_stat_item item)430 static inline void __zone_stat_sub_folio(struct folio *folio,
431 		enum zone_stat_item item)
432 {
433 	__mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio));
434 }
435 
zone_stat_mod_folio(struct folio * folio,enum zone_stat_item item,long nr)436 static inline void zone_stat_mod_folio(struct folio *folio,
437 		enum zone_stat_item item, long nr)
438 {
439 	mod_zone_page_state(folio_zone(folio), item, nr);
440 }
441 
zone_stat_add_folio(struct folio * folio,enum zone_stat_item item)442 static inline void zone_stat_add_folio(struct folio *folio,
443 		enum zone_stat_item item)
444 {
445 	mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio));
446 }
447 
zone_stat_sub_folio(struct folio * folio,enum zone_stat_item item)448 static inline void zone_stat_sub_folio(struct folio *folio,
449 		enum zone_stat_item item)
450 {
451 	mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio));
452 }
453 
__node_stat_mod_folio(struct folio * folio,enum node_stat_item item,long nr)454 static inline void __node_stat_mod_folio(struct folio *folio,
455 		enum node_stat_item item, long nr)
456 {
457 	__mod_node_page_state(folio_pgdat(folio), item, nr);
458 }
459 
__node_stat_add_folio(struct folio * folio,enum node_stat_item item)460 static inline void __node_stat_add_folio(struct folio *folio,
461 		enum node_stat_item item)
462 {
463 	__mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio));
464 }
465 
__node_stat_sub_folio(struct folio * folio,enum node_stat_item item)466 static inline void __node_stat_sub_folio(struct folio *folio,
467 		enum node_stat_item item)
468 {
469 	__mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio));
470 }
471 
node_stat_mod_folio(struct folio * folio,enum node_stat_item item,long nr)472 static inline void node_stat_mod_folio(struct folio *folio,
473 		enum node_stat_item item, long nr)
474 {
475 	mod_node_page_state(folio_pgdat(folio), item, nr);
476 }
477 
node_stat_add_folio(struct folio * folio,enum node_stat_item item)478 static inline void node_stat_add_folio(struct folio *folio,
479 		enum node_stat_item item)
480 {
481 	mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio));
482 }
483 
node_stat_sub_folio(struct folio * folio,enum node_stat_item item)484 static inline void node_stat_sub_folio(struct folio *folio,
485 		enum node_stat_item item)
486 {
487 	mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio));
488 }
489 
__mod_zone_freepage_state(struct zone * zone,int nr_pages,int migratetype)490 static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
491 					     int migratetype)
492 {
493 	__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
494 	if (is_migrate_cma(migratetype))
495 		__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
496 }
497 
498 extern const char * const vmstat_text[];
499 
zone_stat_name(enum zone_stat_item item)500 static inline const char *zone_stat_name(enum zone_stat_item item)
501 {
502 	return vmstat_text[item];
503 }
504 
505 #ifdef CONFIG_NUMA
numa_stat_name(enum numa_stat_item item)506 static inline const char *numa_stat_name(enum numa_stat_item item)
507 {
508 	return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
509 			   item];
510 }
511 #endif /* CONFIG_NUMA */
512 
node_stat_name(enum node_stat_item item)513 static inline const char *node_stat_name(enum node_stat_item item)
514 {
515 	return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
516 			   NR_VM_NUMA_EVENT_ITEMS +
517 			   item];
518 }
519 
lru_list_name(enum lru_list lru)520 static inline const char *lru_list_name(enum lru_list lru)
521 {
522 	return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
523 }
524 
writeback_stat_name(enum writeback_stat_item item)525 static inline const char *writeback_stat_name(enum writeback_stat_item item)
526 {
527 	return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
528 			   NR_VM_NUMA_EVENT_ITEMS +
529 			   NR_VM_NODE_STAT_ITEMS +
530 			   item];
531 }
532 
533 #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
vm_event_name(enum vm_event_item item)534 static inline const char *vm_event_name(enum vm_event_item item)
535 {
536 	return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
537 			   NR_VM_NUMA_EVENT_ITEMS +
538 			   NR_VM_NODE_STAT_ITEMS +
539 			   NR_VM_WRITEBACK_STAT_ITEMS +
540 			   item];
541 }
542 #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
543 
544 #ifdef CONFIG_MEMCG
545 
546 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
547 			int val);
548 
mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)549 static inline void mod_lruvec_state(struct lruvec *lruvec,
550 				    enum node_stat_item idx, int val)
551 {
552 	unsigned long flags;
553 
554 	local_irq_save(flags);
555 	__mod_lruvec_state(lruvec, idx, val);
556 	local_irq_restore(flags);
557 }
558 
559 void __lruvec_stat_mod_folio(struct folio *folio,
560 			     enum node_stat_item idx, int val);
561 
lruvec_stat_mod_folio(struct folio * folio,enum node_stat_item idx,int val)562 static inline void lruvec_stat_mod_folio(struct folio *folio,
563 					 enum node_stat_item idx, int val)
564 {
565 	unsigned long flags;
566 
567 	local_irq_save(flags);
568 	__lruvec_stat_mod_folio(folio, idx, val);
569 	local_irq_restore(flags);
570 }
571 
mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)572 static inline void mod_lruvec_page_state(struct page *page,
573 					 enum node_stat_item idx, int val)
574 {
575 	lruvec_stat_mod_folio(page_folio(page), idx, val);
576 }
577 
578 #else
579 
__mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)580 static inline void __mod_lruvec_state(struct lruvec *lruvec,
581 				      enum node_stat_item idx, int val)
582 {
583 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
584 }
585 
mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)586 static inline void mod_lruvec_state(struct lruvec *lruvec,
587 				    enum node_stat_item idx, int val)
588 {
589 	mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
590 }
591 
__lruvec_stat_mod_folio(struct folio * folio,enum node_stat_item idx,int val)592 static inline void __lruvec_stat_mod_folio(struct folio *folio,
593 					 enum node_stat_item idx, int val)
594 {
595 	__mod_node_page_state(folio_pgdat(folio), idx, val);
596 }
597 
lruvec_stat_mod_folio(struct folio * folio,enum node_stat_item idx,int val)598 static inline void lruvec_stat_mod_folio(struct folio *folio,
599 					 enum node_stat_item idx, int val)
600 {
601 	mod_node_page_state(folio_pgdat(folio), idx, val);
602 }
603 
mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)604 static inline void mod_lruvec_page_state(struct page *page,
605 					 enum node_stat_item idx, int val)
606 {
607 	mod_node_page_state(page_pgdat(page), idx, val);
608 }
609 
610 #endif /* CONFIG_MEMCG */
611 
__lruvec_stat_add_folio(struct folio * folio,enum node_stat_item idx)612 static inline void __lruvec_stat_add_folio(struct folio *folio,
613 					   enum node_stat_item idx)
614 {
615 	__lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
616 }
617 
__lruvec_stat_sub_folio(struct folio * folio,enum node_stat_item idx)618 static inline void __lruvec_stat_sub_folio(struct folio *folio,
619 					   enum node_stat_item idx)
620 {
621 	__lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
622 }
623 
lruvec_stat_add_folio(struct folio * folio,enum node_stat_item idx)624 static inline void lruvec_stat_add_folio(struct folio *folio,
625 					 enum node_stat_item idx)
626 {
627 	lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
628 }
629 
lruvec_stat_sub_folio(struct folio * folio,enum node_stat_item idx)630 static inline void lruvec_stat_sub_folio(struct folio *folio,
631 					 enum node_stat_item idx)
632 {
633 	lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
634 }
635 #endif /* _LINUX_VMSTAT_H */
636