Skip to content

Commit 394e31d

Browse files
Xishi Qiutorvalds
Xishi Qiu
authored andcommitted
mem-hotplug: alloc new page from a nearest neighbor node when mem-offline
If we offline a node, alloc the new page from a nearest neighbor node instead of the current node or other remote nodes, because re-migrate is a waste of time and the distance of the remote nodes is often very large. Also use GFP_HIGHUSER_MOVABLE to alloc new page if the zone is movable zone or highmem zone. Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Xishi Qiu <[email protected]> Cc: Joonsoo Kim <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Naoya Horiguchi <[email protected]> Cc: David Rientjes <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 3fa6c50 commit 394e31d

File tree

1 file changed

+33
-5
lines changed

1 file changed

+33
-5
lines changed

mm/memory_hotplug.c

Lines changed: 33 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1548,6 +1548,37 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
15481548
return 0;
15491549
}
15501550

1551+
static struct page *new_node_page(struct page *page, unsigned long private,
1552+
int **result)
1553+
{
1554+
gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
1555+
int nid = page_to_nid(page);
1556+
nodemask_t nmask = node_online_map;
1557+
struct page *new_page;
1558+
1559+
/*
1560+
* TODO: allocate a destination hugepage from a nearest neighbor node,
1561+
* accordance with memory policy of the user process if possible. For
1562+
* now as a simple work-around, we use the next node for destination.
1563+
*/
1564+
if (PageHuge(page))
1565+
return alloc_huge_page_node(page_hstate(compound_head(page)),
1566+
next_node_in(nid, nmask));
1567+
1568+
node_clear(nid, nmask);
1569+
if (PageHighMem(page)
1570+
|| (zone_idx(page_zone(page)) == ZONE_MOVABLE))
1571+
gfp_mask |= __GFP_HIGHMEM;
1572+
1573+
new_page = __alloc_pages_nodemask(gfp_mask, 0,
1574+
node_zonelist(nid, gfp_mask), &nmask);
1575+
if (!new_page)
1576+
new_page = __alloc_pages(gfp_mask, 0,
1577+
node_zonelist(nid, gfp_mask));
1578+
1579+
return new_page;
1580+
}
1581+
15511582
#define NR_OFFLINE_AT_ONCE_PAGES (256)
15521583
static int
15531584
do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
@@ -1611,11 +1642,8 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
16111642
goto out;
16121643
}
16131644

1614-
/*
1615-
* alloc_migrate_target should be improooooved!!
1616-
* migrate_pages returns # of failed pages.
1617-
*/
1618-
ret = migrate_pages(&source, alloc_migrate_target, NULL, 0,
1645+
/* Allocate a new page from the nearest neighbor node */
1646+
ret = migrate_pages(&source, new_node_page, NULL, 0,
16191647
MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
16201648
if (ret)
16211649
putback_movable_pages(&source);

0 commit comments

Comments
 (0)