Home
last modified time | relevance | path

Searched refs:memcg (Results 1 – 25 of 64) sorted by relevance

123

/linux/mm/
H A Dmemcontrol.c504 for (; memcg; memcg = parent_mem_cgroup(memcg)) { in mem_cgroup_update_tree()
1002 memcg = pn->memcg; in __mod_memcg_lruvec_state()
1959 .memcg = memcg, in mem_cgroup_out_of_memory()
2152 if (memcg && memcg->under_oom) in memcg_oom_recover()
2241 owait.memcg = memcg; in mem_cgroup_oom_synchronize()
2308 for (; memcg; memcg = parent_mem_cgroup(memcg)) { in mem_cgroup_get_oom_group()
4163 objcg->memcg = memcg; in memcg_online_kmem()
5293 event->memcg = memcg; in memcg_write_event_control()
5655 pn->memcg = memcg; in alloc_mem_cgroup_per_node_info()
7685 if (ug->memcg != memcg) { in uncharge_folio()
[all …]
H A Dshrinker.c69 pn = memcg->nodeinfo[nid]; in free_shrinker_info()
100 free_shrinker_info(memcg); in alloc_shrinker_info()
153 struct mem_cgroup *memcg; in expand_shrinker_info() local
171 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); in expand_shrinker_info()
196 if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { in set_shrinker_bit()
342 if (sc->memcg && in xchg_nr_deferred()
345 sc->memcg); in xchg_nr_deferred()
359 if (sc->memcg && in add_nr_deferred()
362 sc->memcg); in add_nr_deferred()
529 .memcg = memcg, in shrink_slab_memcg()
[all …]
H A Dshrinker_debug.c19 struct mem_cgroup *memcg, in shrinker_count_objects() argument
30 .memcg = memcg, in shrinker_count_objects()
51 struct mem_cgroup *memcg; in shrinker_debugfs_count_show() local
66 if (memcg && !mem_cgroup_online(memcg)) in shrinker_debugfs_count_show()
89 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); in shrinker_debugfs_count_show()
113 struct mem_cgroup *memcg = NULL; in shrinker_debugfs_scan_write() local
133 if (!memcg || IS_ERR(memcg)) in shrinker_debugfs_scan_write()
136 if (!mem_cgroup_online(memcg)) { in shrinker_debugfs_scan_write()
137 mem_cgroup_put(memcg); in shrinker_debugfs_scan_write()
145 sc.memcg = memcg; in shrinker_debugfs_scan_write()
[all …]
H A Dlist_lru.c89 struct mem_cgroup *memcg) in list_lru_add() argument
116 return list_lru_add(lru, item, nid, memcg); in list_lru_add_obj()
121 struct mem_cgroup *memcg) in list_lru_del() argument
146 return list_lru_del(lru, item, nid, memcg); in list_lru_del_obj()
166 int nid, struct mem_cgroup *memcg) in list_lru_count_one() argument
424 int src_idx = memcg->kmemcg_id; in memcg_reparent_list_lrus()
457 int idx = memcg->kmemcg_id; in memcg_list_lru_allocated()
469 struct mem_cgroup *memcg; in memcg_list_lru_alloc() member
486 for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) { in memcg_list_lru_alloc()
487 if (memcg_list_lru_allocated(memcg, lru)) in memcg_list_lru_alloc()
[all …]
H A Dvmpressure.c77 struct mem_cgroup *memcg = vmpressure_to_memcg(vmpr); in vmpressure_parent() local
79 memcg = parent_mem_cgroup(memcg); in vmpressure_parent()
80 if (!memcg) in vmpressure_parent()
82 return memcg_to_vmpressure(memcg); in vmpressure_parent()
255 vmpr = memcg_to_vmpressure(memcg); in vmpressure()
295 if (!memcg || mem_cgroup_is_root(memcg)) in vmpressure()
319 WRITE_ONCE(memcg->socket_pressure, jiffies + HZ); in vmpressure()
351 vmpressure(gfp, memcg, true, vmpressure_win, 0); in vmpressure_prio()
374 int vmpressure_register_event(struct mem_cgroup *memcg, in vmpressure_register_event() argument
377 struct vmpressure *vmpr = memcg_to_vmpressure(memcg); in vmpressure_register_event()
[all …]
H A Dzswap.c716 struct mem_cgroup *memcg; in zswap_lru_add() local
754 struct mem_cgroup *memcg; in zswap_lru_del() local
1216 struct mem_cgroup *memcg = sc->memcg; in zswap_shrinker_count() local
1301 if (memcg && !mem_cgroup_online(memcg)) in shrink_memcg()
1315 struct mem_cgroup *memcg; in shrink_worker() local
1339 if (!memcg) { in shrink_worker()
1362 mem_cgroup_put(memcg); in shrink_worker()
1437 mem_cgroup_put(memcg); in zswap_store()
1440 mem_cgroup_put(memcg); in zswap_store()
1468 mem_cgroup_put(memcg); in zswap_store()
[all …]
H A Dvmscan.c2610 if (memcg) { in get_lruvec()
2761 if (memcg) in get_mm_list()
2801 mm->lru_gen.memcg = memcg; in lru_gen_add_mm()
2829 memcg = mm->lru_gen.memcg; in lru_gen_del_mm()
2878 if (memcg == mm->lru_gen.memcg) in lru_gen_migrate_mm()
3975 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); in lru_gen_age_node()
5094 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); in lru_gen_change_state()
5205 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); in lru_gen_seq_start()
5228 memcg = mem_cgroup_iter(NULL, memcg, NULL); in lru_gen_seq_next()
5309 if (memcg) in lru_gen_seq_show()
[all …]
H A Dworkingset.c248 lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_eviction()
268 struct mem_cgroup *memcg; in lru_gen_test_recent() local
273 memcg = mem_cgroup_from_id(memcg_id); in lru_gen_test_recent()
274 *lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_test_recent()
534 struct mem_cgroup *memcg; in workingset_refault() local
555 memcg = folio_memcg(folio); in workingset_refault()
557 lruvec = mem_cgroup_lruvec(memcg, pgdat); in workingset_refault()
586 struct mem_cgroup *memcg; in workingset_activation() local
596 memcg = folio_memcg_rcu(folio); in workingset_activation()
597 if (!mem_cgroup_disabled() && !memcg) in workingset_activation()
[all …]
H A Dmmap_lock.c202 struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm); in get_mm_memcg_path() local
204 if (memcg == NULL) in get_mm_memcg_path()
206 if (unlikely(memcg->css.cgroup == NULL)) in get_mm_memcg_path()
213 cgroup_path(memcg->css.cgroup, buf, MEMCG_PATH_BUF_SIZE); in get_mm_memcg_path()
216 css_put(&memcg->css); in get_mm_memcg_path()
H A Doom_kill.c74 return oc->memcg != NULL; in is_memcg_oom()
261 oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1; in constrained_alloc()
370 mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); in select_bad_process()
431 mem_cgroup_scan_tasks(oc->memcg, dump_task, oc); in dump_tasks()
449 mem_cgroup_print_oom_context(oc->memcg, victim); in dump_oom_victim()
464 mem_cgroup_print_oom_meminfo(oc->memcg); in dump_header()
1044 oom_group = mem_cgroup_get_oom_group(victim, oc->memcg); in oom_kill_process()
/linux/include/linux/
H A Dmemcontrol.h748 if (!memcg) in mem_cgroup_lruvec()
827 return !memcg || css_tryget(&memcg->css); in mem_cgroup_tryget()
832 return !memcg || css_tryget_online(&memcg->css); in mem_cgroup_tryget_online()
837 if (memcg) in mem_cgroup_put()
863 return memcg ? cgroup_ino(memcg->css.cgroup) : 0; in mem_cgroup_ino()
1018 if (memcg) in mod_memcg_page_state()
1062 if (memcg) in count_memcg_folio_events()
1102 } while ((memcg = parent_mem_cgroup(memcg)) && in memcg_memory_event()
1617 memcg = parent_mem_cgroup(memcg); in parent_lruvec()
1742 } while ((memcg = parent_mem_cgroup(memcg))); in mem_cgroup_under_socket_pressure()
[all …]
H A Dvmpressure.h33 extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
35 extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio);
39 extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg);
41 extern int vmpressure_register_event(struct mem_cgroup *memcg,
44 extern void vmpressure_unregister_event(struct mem_cgroup *memcg,
47 static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, in vmpressure() argument
49 static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, in vmpressure_prio() argument
H A Dlist_lru.h70 int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
72 void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent);
93 struct mem_cgroup *memcg);
122 struct mem_cgroup *memcg);
150 int nid, struct mem_cgroup *memcg);
156 return list_lru_count_one(lru, sc->nid, sc->memcg); in list_lru_shrink_count()
200 int nid, struct mem_cgroup *memcg,
217 int nid, struct mem_cgroup *memcg,
228 return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg, in list_lru_shrink_walk()
236 return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg, in list_lru_shrink_walk_irq()
H A Dzswap.h35 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg);
59 static inline void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) {} in zswap_memcg_offline_cleanup() argument
H A Dswap.h407 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
607 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) in mem_cgroup_swappiness() argument
614 if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg)) in mem_cgroup_swappiness()
617 return READ_ONCE(memcg->swappiness); in mem_cgroup_swappiness()
659 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
677 static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) in mem_cgroup_get_nr_swap_pages() argument
H A Dmmzone.h562 void lru_gen_init_memcg(struct mem_cgroup *memcg);
563 void lru_gen_exit_memcg(struct mem_cgroup *memcg);
564 void lru_gen_online_memcg(struct mem_cgroup *memcg);
565 void lru_gen_offline_memcg(struct mem_cgroup *memcg);
566 void lru_gen_release_memcg(struct mem_cgroup *memcg);
567 void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid);
583 static inline void lru_gen_init_memcg(struct mem_cgroup *memcg) in lru_gen_init_memcg() argument
587 static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg) in lru_gen_exit_memcg() argument
591 static inline void lru_gen_online_memcg(struct mem_cgroup *memcg) in lru_gen_online_memcg() argument
595 static inline void lru_gen_offline_memcg(struct mem_cgroup *memcg) in lru_gen_offline_memcg() argument
[all …]
/linux/tools/testing/selftests/cgroup/
H A Dtest_memcontrol.c170 char *memcg; in test_memcg_current() local
173 if (!memcg) in test_memcg_current()
193 free(memcg); in test_memcg_current()
507 if (!memcg) in test_memcg_high()
571 if (!memcg) in test_memcg_high_sync()
632 if (!memcg) in test_memcg_max()
726 if (!memcg) in test_memcg_reclaim()
832 if (!memcg) in test_memcg_swap_max()
892 if (!memcg) in test_memcg_oom_events()
1053 if (!memcg) in test_memcg_sock()
[all …]
/linux/tools/cgroup/
H A Dmemcg_slabinfo.py42 memcg = container_of(css, 'struct mem_cgroup', 'css')
43 MEMCGS[css.cgroup.kn.id.value_()] = memcg
170 memcg = MEMCGS[cgroup_id]
186 obj_cgroups.add(memcg.objcg.value_())
188 memcg.objcg_list.address_of_(),
220 memcg.kmem_caches.address_of_(),
/linux/Documentation/admin-guide/cgroup-v1/
H A Dmemcg_test.rst9 Because VM is getting complex (one of reasons is memcg...), memcg's behavior
10 is complex. This is a document for memcg's internal behavior.
61 At commit(), the page is associated with the memcg.
114 But brief explanation of the behavior of memcg around shmem will be
136 Each memcg has its own vector of LRUs (inactive anon, active anon,
138 each LRU handled under a single lru_lock for that memcg and node.
145 9.1 Small limit to memcg.
248 Besides management of swap is one of complicated parts of memcg,
275 Out-of-memory caused by memcg's limit will kill tasks under
276 the memcg. When hierarchy is used, a task under hierarchy
[all …]
/linux/Documentation/translations/zh_CN/mm/
H A Dhwpoison.rst119 corrupt-filter-memcg
120 限制注入到memgroup拥有的页面。由memcg的inode号指定。
130 echo $memcg_ino > /debug/hwpoison/corrupt-filter-memcg
/linux/include/linux/sched/
H A Dmm.h505 set_active_memcg(struct mem_cgroup *memcg) in set_active_memcg() argument
511 this_cpu_write(int_active_memcg, memcg); in set_active_memcg()
514 current->active_memcg = memcg; in set_active_memcg()
521 set_active_memcg(struct mem_cgroup *memcg) in set_active_memcg() argument
/linux/mm/damon/
H A Dpaddr.c202 struct mem_cgroup *memcg; in __damos_pa_filter_out() local
210 memcg = folio_memcg_check(folio); in __damos_pa_filter_out()
211 if (!memcg) in __damos_pa_filter_out()
214 matched = filter->memcg_id == mem_cgroup_id(memcg); in __damos_pa_filter_out()
/linux/Documentation/admin-guide/mm/
H A Dshrinker_debugfs.rst14 trigger *count_objects()* and *scan_objects()* callbacks for each memcg and
59 If the shrinker is not memcg-aware or CONFIG_MEMCG is off, 0 is printed
112 For a non-memcg-aware shrinker or on a system with no memory
/linux/kernel/bpf/
H A Dmemalloc.c208 struct mem_cgroup *memcg = NULL, *old_memcg; in alloc_bulk() local
239 memcg = get_memcg(c); in alloc_bulk()
240 old_memcg = set_active_memcg(memcg); in alloc_bulk()
253 mem_cgroup_put(memcg); in alloc_bulk()
998 struct mem_cgroup *memcg, *old_memcg; in bpf_mem_cache_alloc_flags() local
1000 memcg = get_memcg(c); in bpf_mem_cache_alloc_flags()
1001 old_memcg = set_active_memcg(memcg); in bpf_mem_cache_alloc_flags()
1006 mem_cgroup_put(memcg); in bpf_mem_cache_alloc_flags()
/linux/Documentation/mm/
H A Dmultigen_lru.rst162 An ``mm_struct`` list is maintained for each memcg, and an
163 ``mm_struct`` follows its owner task to the new memcg when this task
173 ``mm_struct`` was migrated, pages left in the previous memcg will be
174 ignored when the current memcg is under reclaim. Similarly, page table
225 An memcg LRU is a per-node LRU of memcgs. It is also an LRU of LRUs,
226 since each node and memcg combination has an LRU of folios (see
229 data centers. Note that memcg LRU only applies to global reclaim.
231 The basic structure of an memcg LRU can be understood by an analogy to
238 3. Other events trigger similar operations, e.g., offlining an memcg
243 1. Sharding, which allows each thread to start at a random memcg (in

123