mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/
synced 2025-04-19 20:58:31 +09:00
mm/hugetlb: move hugetlb CMA code in to its own file
hugetlb.c contained a number of CONFIG_CMA ifdefs, and the code inside them was large enough to merit being in its own file, so move it, cleaning up things a bit. Hide some direct variable access behind functions to accommodate the move. No functional change intended. Link: https://lkml.kernel.org/r/20250228182928.2645936-28-fvdl@google.com Signed-off-by: Frank van der Linden <fvdl@google.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Dan Carpenter <dan.carpenter@linaro.org> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David Hildenbrand <david@redhat.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Joao Martins <joao.m.martins@oracle.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Muchun Song <muchun.song@linux.dev> Cc: Oscar Salvador <osalvador@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Roman Gushchin (Cruise) <roman.gushchin@linux.dev> Cc: Usama Arif <usamaarif642@gmail.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Yu Zhao <yuzhao@google.com> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
d2d7867140
commit
474fe91f21
@ -10708,6 +10708,8 @@ F: fs/hugetlbfs/
|
||||
F: include/linux/hugetlb.h
|
||||
F: include/trace/events/hugetlbfs.h
|
||||
F: mm/hugetlb.c
|
||||
F: mm/hugetlb_cma.c
|
||||
F: mm/hugetlb_cma.h
|
||||
F: mm/hugetlb_vmemmap.c
|
||||
F: mm/hugetlb_vmemmap.h
|
||||
F: tools/testing/selftests/cgroup/test_hugetlb_memcg.c
|
||||
|
@ -79,6 +79,9 @@ obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o swap_slots.o
|
||||
obj-$(CONFIG_ZSWAP) += zswap.o
|
||||
obj-$(CONFIG_HAS_DMA) += dmapool.o
|
||||
obj-$(CONFIG_HUGETLBFS) += hugetlb.o
|
||||
ifdef CONFIG_CMA
|
||||
obj-$(CONFIG_HUGETLBFS) += hugetlb_cma.o
|
||||
endif
|
||||
obj-$(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP) += hugetlb_vmemmap.o
|
||||
obj-$(CONFIG_NUMA) += mempolicy.o
|
||||
obj-$(CONFIG_SPARSEMEM) += sparse.o
|
||||
|
269
mm/hugetlb.c
269
mm/hugetlb.c
@ -49,19 +49,13 @@
|
||||
#include <linux/page_owner.h>
|
||||
#include "internal.h"
|
||||
#include "hugetlb_vmemmap.h"
|
||||
#include "hugetlb_cma.h"
|
||||
#include <linux/page-isolation.h>
|
||||
|
||||
int hugetlb_max_hstate __read_mostly;
|
||||
unsigned int default_hstate_idx;
|
||||
struct hstate hstates[HUGE_MAX_HSTATE];
|
||||
|
||||
#ifdef CONFIG_CMA
|
||||
static struct cma *hugetlb_cma[MAX_NUMNODES];
|
||||
static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
|
||||
#endif
|
||||
static bool hugetlb_cma_only;
|
||||
static unsigned long hugetlb_cma_size __initdata;
|
||||
|
||||
__initdata struct list_head huge_boot_pages[MAX_NUMNODES];
|
||||
static unsigned long hstate_boot_nrinvalid[HUGE_MAX_HSTATE] __initdata;
|
||||
|
||||
@ -128,14 +122,11 @@ static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
|
||||
|
||||
static void hugetlb_free_folio(struct folio *folio)
|
||||
{
|
||||
#ifdef CONFIG_CMA
|
||||
int nid = folio_nid(folio);
|
||||
|
||||
if (folio_test_hugetlb_cma(folio)) {
|
||||
WARN_ON_ONCE(!cma_free_folio(hugetlb_cma[nid], folio));
|
||||
hugetlb_cma_free_folio(folio);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
folio_put(folio);
|
||||
}
|
||||
|
||||
@ -1492,31 +1483,9 @@ static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
|
||||
if (nid == NUMA_NO_NODE)
|
||||
nid = numa_mem_id();
|
||||
retry:
|
||||
folio = NULL;
|
||||
#ifdef CONFIG_CMA
|
||||
{
|
||||
int node;
|
||||
|
||||
if (hugetlb_cma[nid])
|
||||
folio = cma_alloc_folio(hugetlb_cma[nid], order, gfp_mask);
|
||||
|
||||
if (!folio && !(gfp_mask & __GFP_THISNODE)) {
|
||||
for_each_node_mask(node, *nodemask) {
|
||||
if (node == nid || !hugetlb_cma[node])
|
||||
continue;
|
||||
|
||||
folio = cma_alloc_folio(hugetlb_cma[node], order, gfp_mask);
|
||||
if (folio)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (folio)
|
||||
folio_set_hugetlb_cma(folio);
|
||||
}
|
||||
#endif
|
||||
folio = hugetlb_cma_alloc_folio(h, gfp_mask, nid, nodemask);
|
||||
if (!folio) {
|
||||
if (hugetlb_cma_only)
|
||||
if (hugetlb_cma_exclusive_alloc())
|
||||
return NULL;
|
||||
|
||||
folio = folio_alloc_gigantic(order, gfp_mask, nid, nodemask);
|
||||
@ -3191,47 +3160,14 @@ out_end_reservation:
|
||||
return ERR_PTR(-ENOSPC);
|
||||
}
|
||||
|
||||
static bool __init hugetlb_early_cma(struct hstate *h)
|
||||
{
|
||||
if (arch_has_huge_bootmem_alloc())
|
||||
return false;
|
||||
|
||||
return (hstate_is_gigantic(h) && hugetlb_cma_only);
|
||||
}
|
||||
|
||||
static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact)
|
||||
{
|
||||
struct huge_bootmem_page *m;
|
||||
unsigned long flags;
|
||||
struct cma *cma;
|
||||
int listnode = nid;
|
||||
|
||||
#ifdef CONFIG_CMA
|
||||
if (hugetlb_early_cma(h)) {
|
||||
flags = HUGE_BOOTMEM_CMA;
|
||||
cma = hugetlb_cma[nid];
|
||||
m = cma_reserve_early(cma, huge_page_size(h));
|
||||
if (!m) {
|
||||
int node;
|
||||
|
||||
if (node_exact)
|
||||
return NULL;
|
||||
for_each_online_node(node) {
|
||||
cma = hugetlb_cma[node];
|
||||
if (!cma || node == nid)
|
||||
continue;
|
||||
m = cma_reserve_early(cma, huge_page_size(h));
|
||||
if (m) {
|
||||
listnode = node;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
flags = 0;
|
||||
cma = NULL;
|
||||
if (hugetlb_early_cma(h))
|
||||
m = hugetlb_cma_alloc_bootmem(h, &listnode, node_exact);
|
||||
else {
|
||||
if (node_exact)
|
||||
m = memblock_alloc_exact_nid_raw(huge_page_size(h),
|
||||
huge_page_size(h), 0,
|
||||
@ -3250,6 +3186,11 @@ static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact)
|
||||
if (m)
|
||||
listnode = early_pfn_to_nid(PHYS_PFN(virt_to_phys(m)));
|
||||
}
|
||||
|
||||
if (m) {
|
||||
m->flags = 0;
|
||||
m->cma = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (m) {
|
||||
@ -3264,8 +3205,6 @@ static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact)
|
||||
INIT_LIST_HEAD(&m->list);
|
||||
list_add(&m->list, &huge_boot_pages[listnode]);
|
||||
m->hstate = h;
|
||||
m->flags = flags;
|
||||
m->cma = cma;
|
||||
}
|
||||
|
||||
return m;
|
||||
@ -3715,7 +3654,8 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
|
||||
* Skip gigantic hugepages allocation if early CMA
|
||||
* reservations are not available.
|
||||
*/
|
||||
if (hstate_is_gigantic(h) && hugetlb_cma_size && !hugetlb_early_cma(h)) {
|
||||
if (hstate_is_gigantic(h) && hugetlb_cma_total_size() &&
|
||||
!hugetlb_early_cma(h)) {
|
||||
pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
|
||||
return;
|
||||
}
|
||||
@ -3752,7 +3692,7 @@ static void __init hugetlb_init_hstates(void)
|
||||
*/
|
||||
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
|
||||
continue;
|
||||
if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER)
|
||||
if (hugetlb_cma_total_size() && h->order <= HUGETLB_PAGE_ORDER)
|
||||
continue;
|
||||
for_each_hstate(h2) {
|
||||
if (h2 == h)
|
||||
@ -4654,14 +4594,6 @@ static void hugetlb_register_all_nodes(void) { }
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CMA
|
||||
static void __init hugetlb_cma_check(void);
|
||||
#else
|
||||
static inline __init void hugetlb_cma_check(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __init hugetlb_sysfs_init(void)
|
||||
{
|
||||
struct hstate *h;
|
||||
@ -4845,8 +4777,7 @@ static __init void hugetlb_parse_params(void)
|
||||
hcp->setup(hcp->val);
|
||||
}
|
||||
|
||||
if (!hugetlb_cma_size)
|
||||
hugetlb_cma_only = false;
|
||||
hugetlb_cma_validate_params();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -7916,169 +7847,3 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
|
||||
hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
|
||||
ALIGN_DOWN(vma->vm_end, PUD_SIZE));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CMA
|
||||
static bool cma_reserve_called __initdata;
|
||||
|
||||
static int __init cmdline_parse_hugetlb_cma(char *p)
|
||||
{
|
||||
int nid, count = 0;
|
||||
unsigned long tmp;
|
||||
char *s = p;
|
||||
|
||||
while (*s) {
|
||||
if (sscanf(s, "%lu%n", &tmp, &count) != 1)
|
||||
break;
|
||||
|
||||
if (s[count] == ':') {
|
||||
if (tmp >= MAX_NUMNODES)
|
||||
break;
|
||||
nid = array_index_nospec(tmp, MAX_NUMNODES);
|
||||
|
||||
s += count + 1;
|
||||
tmp = memparse(s, &s);
|
||||
hugetlb_cma_size_in_node[nid] = tmp;
|
||||
hugetlb_cma_size += tmp;
|
||||
|
||||
/*
|
||||
* Skip the separator if have one, otherwise
|
||||
* break the parsing.
|
||||
*/
|
||||
if (*s == ',')
|
||||
s++;
|
||||
else
|
||||
break;
|
||||
} else {
|
||||
hugetlb_cma_size = memparse(p, &p);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
|
||||
|
||||
static int __init cmdline_parse_hugetlb_cma_only(char *p)
|
||||
{
|
||||
return kstrtobool(p, &hugetlb_cma_only);
|
||||
}
|
||||
|
||||
early_param("hugetlb_cma_only", cmdline_parse_hugetlb_cma_only);
|
||||
|
||||
void __init hugetlb_cma_reserve(int order)
|
||||
{
|
||||
unsigned long size, reserved, per_node;
|
||||
bool node_specific_cma_alloc = false;
|
||||
int nid;
|
||||
|
||||
/*
|
||||
* HugeTLB CMA reservation is required for gigantic
|
||||
* huge pages which could not be allocated via the
|
||||
* page allocator. Just warn if there is any change
|
||||
* breaking this assumption.
|
||||
*/
|
||||
VM_WARN_ON(order <= MAX_PAGE_ORDER);
|
||||
cma_reserve_called = true;
|
||||
|
||||
if (!hugetlb_cma_size)
|
||||
return;
|
||||
|
||||
for (nid = 0; nid < MAX_NUMNODES; nid++) {
|
||||
if (hugetlb_cma_size_in_node[nid] == 0)
|
||||
continue;
|
||||
|
||||
if (!node_online(nid)) {
|
||||
pr_warn("hugetlb_cma: invalid node %d specified\n", nid);
|
||||
hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
|
||||
hugetlb_cma_size_in_node[nid] = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) {
|
||||
pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
|
||||
nid, (PAGE_SIZE << order) / SZ_1M);
|
||||
hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
|
||||
hugetlb_cma_size_in_node[nid] = 0;
|
||||
} else {
|
||||
node_specific_cma_alloc = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* Validate the CMA size again in case some invalid nodes specified. */
|
||||
if (!hugetlb_cma_size)
|
||||
return;
|
||||
|
||||
if (hugetlb_cma_size < (PAGE_SIZE << order)) {
|
||||
pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
|
||||
(PAGE_SIZE << order) / SZ_1M);
|
||||
hugetlb_cma_size = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!node_specific_cma_alloc) {
|
||||
/*
|
||||
* If 3 GB area is requested on a machine with 4 numa nodes,
|
||||
* let's allocate 1 GB on first three nodes and ignore the last one.
|
||||
*/
|
||||
per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
|
||||
pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
|
||||
hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
|
||||
}
|
||||
|
||||
reserved = 0;
|
||||
for_each_online_node(nid) {
|
||||
int res;
|
||||
char name[CMA_MAX_NAME];
|
||||
|
||||
if (node_specific_cma_alloc) {
|
||||
if (hugetlb_cma_size_in_node[nid] == 0)
|
||||
continue;
|
||||
|
||||
size = hugetlb_cma_size_in_node[nid];
|
||||
} else {
|
||||
size = min(per_node, hugetlb_cma_size - reserved);
|
||||
}
|
||||
|
||||
size = round_up(size, PAGE_SIZE << order);
|
||||
|
||||
snprintf(name, sizeof(name), "hugetlb%d", nid);
|
||||
/*
|
||||
* Note that 'order per bit' is based on smallest size that
|
||||
* may be returned to CMA allocator in the case of
|
||||
* huge page demotion.
|
||||
*/
|
||||
res = cma_declare_contiguous_multi(size, PAGE_SIZE << order,
|
||||
HUGETLB_PAGE_ORDER, name,
|
||||
&hugetlb_cma[nid], nid);
|
||||
if (res) {
|
||||
pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
|
||||
res, nid);
|
||||
continue;
|
||||
}
|
||||
|
||||
reserved += size;
|
||||
pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
|
||||
size / SZ_1M, nid);
|
||||
|
||||
if (reserved >= hugetlb_cma_size)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!reserved)
|
||||
/*
|
||||
* hugetlb_cma_size is used to determine if allocations from
|
||||
* cma are possible. Set to zero if no cma regions are set up.
|
||||
*/
|
||||
hugetlb_cma_size = 0;
|
||||
}
|
||||
|
||||
static void __init hugetlb_cma_check(void)
|
||||
{
|
||||
if (!hugetlb_cma_size || cma_reserve_called)
|
||||
return;
|
||||
|
||||
pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CMA */
|
||||
|
275
mm/hugetlb_cma.c
Normal file
275
mm/hugetlb_cma.c
Normal file
@ -0,0 +1,275 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/cma.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/mm_inline.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
#include <linux/hugetlb.h>
|
||||
#include "internal.h"
|
||||
#include "hugetlb_cma.h"
|
||||
|
||||
|
||||
static struct cma *hugetlb_cma[MAX_NUMNODES];
|
||||
static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
|
||||
static bool hugetlb_cma_only;
|
||||
static unsigned long hugetlb_cma_size __initdata;
|
||||
|
||||
void hugetlb_cma_free_folio(struct folio *folio)
|
||||
{
|
||||
int nid = folio_nid(folio);
|
||||
|
||||
WARN_ON_ONCE(!cma_free_folio(hugetlb_cma[nid], folio));
|
||||
}
|
||||
|
||||
|
||||
struct folio *hugetlb_cma_alloc_folio(struct hstate *h, gfp_t gfp_mask,
|
||||
int nid, nodemask_t *nodemask)
|
||||
{
|
||||
int node;
|
||||
int order = huge_page_order(h);
|
||||
struct folio *folio = NULL;
|
||||
|
||||
if (hugetlb_cma[nid])
|
||||
folio = cma_alloc_folio(hugetlb_cma[nid], order, gfp_mask);
|
||||
|
||||
if (!folio && !(gfp_mask & __GFP_THISNODE)) {
|
||||
for_each_node_mask(node, *nodemask) {
|
||||
if (node == nid || !hugetlb_cma[node])
|
||||
continue;
|
||||
|
||||
folio = cma_alloc_folio(hugetlb_cma[node], order, gfp_mask);
|
||||
if (folio)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (folio)
|
||||
folio_set_hugetlb_cma(folio);
|
||||
|
||||
return folio;
|
||||
}
|
||||
|
||||
struct huge_bootmem_page * __init
|
||||
hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid, bool node_exact)
|
||||
{
|
||||
struct cma *cma;
|
||||
struct huge_bootmem_page *m;
|
||||
int node = *nid;
|
||||
|
||||
cma = hugetlb_cma[*nid];
|
||||
m = cma_reserve_early(cma, huge_page_size(h));
|
||||
if (!m) {
|
||||
if (node_exact)
|
||||
return NULL;
|
||||
|
||||
for_each_online_node(node) {
|
||||
cma = hugetlb_cma[node];
|
||||
if (!cma || node == *nid)
|
||||
continue;
|
||||
m = cma_reserve_early(cma, huge_page_size(h));
|
||||
if (m) {
|
||||
*nid = node;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (m) {
|
||||
m->flags = HUGE_BOOTMEM_CMA;
|
||||
m->cma = cma;
|
||||
}
|
||||
|
||||
return m;
|
||||
}
|
||||
|
||||
|
||||
static bool cma_reserve_called __initdata;
|
||||
|
||||
static int __init cmdline_parse_hugetlb_cma(char *p)
|
||||
{
|
||||
int nid, count = 0;
|
||||
unsigned long tmp;
|
||||
char *s = p;
|
||||
|
||||
while (*s) {
|
||||
if (sscanf(s, "%lu%n", &tmp, &count) != 1)
|
||||
break;
|
||||
|
||||
if (s[count] == ':') {
|
||||
if (tmp >= MAX_NUMNODES)
|
||||
break;
|
||||
nid = array_index_nospec(tmp, MAX_NUMNODES);
|
||||
|
||||
s += count + 1;
|
||||
tmp = memparse(s, &s);
|
||||
hugetlb_cma_size_in_node[nid] = tmp;
|
||||
hugetlb_cma_size += tmp;
|
||||
|
||||
/*
|
||||
* Skip the separator if have one, otherwise
|
||||
* break the parsing.
|
||||
*/
|
||||
if (*s == ',')
|
||||
s++;
|
||||
else
|
||||
break;
|
||||
} else {
|
||||
hugetlb_cma_size = memparse(p, &p);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
|
||||
|
||||
static int __init cmdline_parse_hugetlb_cma_only(char *p)
|
||||
{
|
||||
return kstrtobool(p, &hugetlb_cma_only);
|
||||
}
|
||||
|
||||
early_param("hugetlb_cma_only", cmdline_parse_hugetlb_cma_only);
|
||||
|
||||
void __init hugetlb_cma_reserve(int order)
|
||||
{
|
||||
unsigned long size, reserved, per_node;
|
||||
bool node_specific_cma_alloc = false;
|
||||
int nid;
|
||||
|
||||
/*
|
||||
* HugeTLB CMA reservation is required for gigantic
|
||||
* huge pages which could not be allocated via the
|
||||
* page allocator. Just warn if there is any change
|
||||
* breaking this assumption.
|
||||
*/
|
||||
VM_WARN_ON(order <= MAX_PAGE_ORDER);
|
||||
cma_reserve_called = true;
|
||||
|
||||
if (!hugetlb_cma_size)
|
||||
return;
|
||||
|
||||
for (nid = 0; nid < MAX_NUMNODES; nid++) {
|
||||
if (hugetlb_cma_size_in_node[nid] == 0)
|
||||
continue;
|
||||
|
||||
if (!node_online(nid)) {
|
||||
pr_warn("hugetlb_cma: invalid node %d specified\n", nid);
|
||||
hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
|
||||
hugetlb_cma_size_in_node[nid] = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) {
|
||||
pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
|
||||
nid, (PAGE_SIZE << order) / SZ_1M);
|
||||
hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
|
||||
hugetlb_cma_size_in_node[nid] = 0;
|
||||
} else {
|
||||
node_specific_cma_alloc = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* Validate the CMA size again in case some invalid nodes specified. */
|
||||
if (!hugetlb_cma_size)
|
||||
return;
|
||||
|
||||
if (hugetlb_cma_size < (PAGE_SIZE << order)) {
|
||||
pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
|
||||
(PAGE_SIZE << order) / SZ_1M);
|
||||
hugetlb_cma_size = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!node_specific_cma_alloc) {
|
||||
/*
|
||||
* If 3 GB area is requested on a machine with 4 numa nodes,
|
||||
* let's allocate 1 GB on first three nodes and ignore the last one.
|
||||
*/
|
||||
per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
|
||||
pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
|
||||
hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
|
||||
}
|
||||
|
||||
reserved = 0;
|
||||
for_each_online_node(nid) {
|
||||
int res;
|
||||
char name[CMA_MAX_NAME];
|
||||
|
||||
if (node_specific_cma_alloc) {
|
||||
if (hugetlb_cma_size_in_node[nid] == 0)
|
||||
continue;
|
||||
|
||||
size = hugetlb_cma_size_in_node[nid];
|
||||
} else {
|
||||
size = min(per_node, hugetlb_cma_size - reserved);
|
||||
}
|
||||
|
||||
size = round_up(size, PAGE_SIZE << order);
|
||||
|
||||
snprintf(name, sizeof(name), "hugetlb%d", nid);
|
||||
/*
|
||||
* Note that 'order per bit' is based on smallest size that
|
||||
* may be returned to CMA allocator in the case of
|
||||
* huge page demotion.
|
||||
*/
|
||||
res = cma_declare_contiguous_multi(size, PAGE_SIZE << order,
|
||||
HUGETLB_PAGE_ORDER, name,
|
||||
&hugetlb_cma[nid], nid);
|
||||
if (res) {
|
||||
pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
|
||||
res, nid);
|
||||
continue;
|
||||
}
|
||||
|
||||
reserved += size;
|
||||
pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
|
||||
size / SZ_1M, nid);
|
||||
|
||||
if (reserved >= hugetlb_cma_size)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!reserved)
|
||||
/*
|
||||
* hugetlb_cma_size is used to determine if allocations from
|
||||
* cma are possible. Set to zero if no cma regions are set up.
|
||||
*/
|
||||
hugetlb_cma_size = 0;
|
||||
}
|
||||
|
||||
void __init hugetlb_cma_check(void)
|
||||
{
|
||||
if (!hugetlb_cma_size || cma_reserve_called)
|
||||
return;
|
||||
|
||||
pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
|
||||
}
|
||||
|
||||
bool hugetlb_cma_exclusive_alloc(void)
|
||||
{
|
||||
return hugetlb_cma_only;
|
||||
}
|
||||
|
||||
unsigned long __init hugetlb_cma_total_size(void)
|
||||
{
|
||||
return hugetlb_cma_size;
|
||||
}
|
||||
|
||||
void __init hugetlb_cma_validate_params(void)
|
||||
{
|
||||
if (!hugetlb_cma_size)
|
||||
hugetlb_cma_only = false;
|
||||
}
|
||||
|
||||
bool __init hugetlb_early_cma(struct hstate *h)
|
||||
{
|
||||
if (arch_has_huge_bootmem_alloc())
|
||||
return false;
|
||||
|
||||
return hstate_is_gigantic(h) && hugetlb_cma_only;
|
||||
}
|
57
mm/hugetlb_cma.h
Normal file
57
mm/hugetlb_cma.h
Normal file
@ -0,0 +1,57 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_HUGETLB_CMA_H
|
||||
#define _LINUX_HUGETLB_CMA_H
|
||||
|
||||
#ifdef CONFIG_CMA
|
||||
void hugetlb_cma_free_folio(struct folio *folio);
|
||||
struct folio *hugetlb_cma_alloc_folio(struct hstate *h, gfp_t gfp_mask,
|
||||
int nid, nodemask_t *nodemask);
|
||||
struct huge_bootmem_page *hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid,
|
||||
bool node_exact);
|
||||
void hugetlb_cma_check(void);
|
||||
bool hugetlb_cma_exclusive_alloc(void);
|
||||
unsigned long hugetlb_cma_total_size(void);
|
||||
void hugetlb_cma_validate_params(void);
|
||||
bool hugetlb_early_cma(struct hstate *h);
|
||||
#else
|
||||
static inline void hugetlb_cma_free_folio(struct folio *folio)
|
||||
{
|
||||
}
|
||||
|
||||
static inline struct folio *hugetlb_cma_alloc_folio(struct hstate *h,
|
||||
gfp_t gfp_mask, int nid, nodemask_t *nodemask)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct huge_bootmem_page *hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid,
|
||||
bool node_exact)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void hugetlb_cma_check(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool hugetlb_cma_exclusive_alloc(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline unsigned long hugetlb_cma_total_size(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void hugetlb_cma_validate_params(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool hugetlb_early_cma(struct hstate *h)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
#endif
|
Loading…
x
Reference in New Issue
Block a user