Skip to content

Commit e632a93

Browse files
Naoya Horiguchitorvalds
authored andcommitted
mm: migrate: add hugepage migration code to move_pages()
Extend move_pages() to handle vma with VM_HUGETLB set. We will be able to migrate hugepage with move_pages(2) after applying the enablement patch which comes later in this series. We avoid getting refcount on tail pages of hugepage, because unlike thp, hugepage is not split and we need not care about races with splitting. And migration of larger (1GB for x86_64) hugepage are not enabled. Signed-off-by: Naoya Horiguchi <[email protected]> Acked-by: Andi Kleen <[email protected]> Reviewed-by: Wanpeng Li <[email protected]> Cc: Hillf Danton <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: KOSAKI Motohiro <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Rik van Riel <[email protected]> Cc: "Aneesh Kumar K.V" <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent e2d8cf4 commit e632a93

File tree

2 files changed

+26
-4
lines changed

2 files changed

+26
-4
lines changed

mm/memory.c

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1481,7 +1481,8 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
14811481
if (pud_none(*pud))
14821482
goto no_page_table;
14831483
if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
1484-
BUG_ON(flags & FOLL_GET);
1484+
if (flags & FOLL_GET)
1485+
goto out;
14851486
page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
14861487
goto out;
14871488
}
@@ -1492,8 +1493,20 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
14921493
if (pmd_none(*pmd))
14931494
goto no_page_table;
14941495
if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
1495-
BUG_ON(flags & FOLL_GET);
14961496
page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
1497+
if (flags & FOLL_GET) {
1498+
/*
1499+
* Refcount on tail pages are not well-defined and
1500+
* shouldn't be taken. The caller should handle a NULL
1501+
* return when trying to follow tail pages.
1502+
*/
1503+
if (PageHead(page))
1504+
get_page(page);
1505+
else {
1506+
page = NULL;
1507+
goto out;
1508+
}
1509+
}
14971510
goto out;
14981511
}
14991512
if ((flags & FOLL_NUMA) && pmd_numa(*pmd))

mm/migrate.c

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1092,7 +1092,11 @@ static struct page *new_page_node(struct page *p, unsigned long private,
10921092

10931093
*result = &pm->status;
10941094

1095-
return alloc_pages_exact_node(pm->node,
1095+
if (PageHuge(p))
1096+
return alloc_huge_page_node(page_hstate(compound_head(p)),
1097+
pm->node);
1098+
else
1099+
return alloc_pages_exact_node(pm->node,
10961100
GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
10971101
}
10981102

@@ -1152,6 +1156,11 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
11521156
!migrate_all)
11531157
goto put_and_set;
11541158

1159+
if (PageHuge(page)) {
1160+
isolate_huge_page(page, &pagelist);
1161+
goto put_and_set;
1162+
}
1163+
11551164
err = isolate_lru_page(page);
11561165
if (!err) {
11571166
list_add_tail(&page->lru, &pagelist);
@@ -1174,7 +1183,7 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
11741183
err = migrate_pages(&pagelist, new_page_node,
11751184
(unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
11761185
if (err)
1177-
putback_lru_pages(&pagelist);
1186+
putback_movable_pages(&pagelist);
11781187
}
11791188

11801189
up_read(&mm->mmap_sem);

0 commit comments

Comments
 (0)