mm: separate folio_split_memcg_refs() from split_page_memcg()

Patch series "Minor memcg cleanups & prep for memdescs", v2.

Separate the handling of accounted folios and GFP_ACCOUNT pages for easier
to understand code.  For more detail, see
https://lore.kernel.org/linux-mm/Z9LwTOudOlCGny3f@casper.infradead.org/


This patch (of 5):

Folios always use memcg_data to refer to the mem_cgroup while pages
allocated with GFP_ACCOUNT have a pointer to the obj_cgroup.  Since the
caller already knows what it has, split the function into two and then we
don't need to check.

Move the assignment of split folio memcg_data to the point where we set up
the other parts of the new folio.  That leaves folio_split_memcg_refs()
just handling the memcg accounting.

Link: https://lkml.kernel.org/r/20250314133617.138071-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20250314133617.138071-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Acked-by: Zi Yan <ziy@nvidia.com>
Acked-by: Roman Gushchin <roman.gushchin@linux.dev>
Cc: David Hildenbrand <david@redhat.com>
Cc: Matthew Wilcow (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2025-03-14 13:36:11 +00:00 committed by Andrew Morton
parent cb44821e1f
commit fa23a338de
3 changed files with 24 additions and 16 deletions

View File

@ -1039,6 +1039,8 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
}
void split_page_memcg(struct page *head, int old_order, int new_order);
void folio_split_memcg_refs(struct folio *folio, unsigned old_order,
unsigned new_order);
static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
{
@ -1463,6 +1465,11 @@ static inline void split_page_memcg(struct page *head, int old_order, int new_or
{
}
static inline void folio_split_memcg_refs(struct folio *folio,
unsigned old_order, unsigned new_order)
{
}
static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
{
return 0;

View File

@ -3394,6 +3394,9 @@ static void __split_folio_to_order(struct folio *folio, int old_order,
folio_set_young(new_folio);
if (folio_test_idle(folio))
folio_set_idle(new_folio);
#ifdef CONFIG_MEMCG
new_folio->memcg_data = folio->memcg_data;
#endif
folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio));
}
@ -3525,18 +3528,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
}
}
/*
* Reset any memcg data overlay in the tail pages.
* folio_nr_pages() is unreliable until prep_compound_page()
* was called again.
*/
#ifdef NR_PAGES_IN_LARGE_FOLIO
folio->_nr_pages = 0;
#endif
/* complete memcg works before add pages to LRU */
split_page_memcg(&folio->page, old_order, split_order);
folio_split_memcg_refs(folio, old_order, split_order);
split_page_owner(&folio->page, old_order, split_order);
pgalloc_tag_split(folio, old_order, split_order);

View File

@ -3081,10 +3081,19 @@ void split_page_memcg(struct page *head, int old_order, int new_order)
for (i = new_nr; i < old_nr; i += new_nr)
folio_page(folio, i)->memcg_data = folio->memcg_data;
if (folio_memcg_kmem(folio))
obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1);
else
css_get_many(&folio_memcg(folio)->css, old_nr / new_nr - 1);
obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1);
}
void folio_split_memcg_refs(struct folio *folio, unsigned old_order,
unsigned new_order)
{
unsigned new_refs;
if (mem_cgroup_disabled() || !folio_memcg_charged(folio))
return;
new_refs = (1 << (old_order - new_order)) - 1;
css_get_many(&__folio_memcg(folio)->css, new_refs);
}
unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)