Skip to content

Commit 2916ecc

Browse files
Jérôme Glissetorvalds
authored andcommitted
mm/migrate: new migrate mode MIGRATE_SYNC_NO_COPY
Introduce a new migration mode that allow to offload the copy to a device DMA engine. This changes the workflow of migration and not all address_space migratepage callback can support this. This is intended to be use by migrate_vma() which itself is use for thing like HMM (see include/linux/hmm.h). No additional per-filesystem migratepage testing is needed. I disables MIGRATE_SYNC_NO_COPY in all problematic migratepage() callback and i added comment in those to explain why (part of this patch). The commit message is unclear it should say that any callback that wish to support this new mode need to be aware of the difference in the migration flow from other mode. Some of these callbacks do extra locking while copying (aio, zsmalloc, balloon, ...) and for DMA to be effective you want to copy multiple pages in one DMA operations. But in the problematic case you can not easily hold the extra lock accross multiple call to this callback. Usual flow is: For each page { 1 - lock page 2 - call migratepage() callback 3 - (extra locking in some migratepage() callback) 4 - migrate page state (freeze refcount, update page cache, buffer head, ...) 5 - copy page 6 - (unlock any extra lock of migratepage() callback) 7 - return from migratepage() callback 8 - unlock page } The new mode MIGRATE_SYNC_NO_COPY: 1 - lock multiple pages For each page { 2 - call migratepage() callback 3 - abort in all problematic migratepage() callback 4 - migrate page state (freeze refcount, update page cache, buffer head, ...) } // finished all calls to migratepage() callback 5 - DMA copy multiple pages 6 - unlock all the pages To support MIGRATE_SYNC_NO_COPY in the problematic case we would need a new callback migratepages() (for instance) that deals with multiple pages in one transaction. Because the problematic cases are not important for current usage I did not wanted to complexify this patchset even more for no good reason. Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Jérôme Glisse <[email protected]> Cc: Aneesh Kumar <[email protected]> Cc: Balbir Singh <[email protected]> Cc: Benjamin Herrenschmidt <[email protected]> Cc: Dan Williams <[email protected]> Cc: David Nellans <[email protected]> Cc: Evgeny Baskakov <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: John Hubbard <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: Mark Hairgrove <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Paul E. McKenney <[email protected]> Cc: Ross Zwisler <[email protected]> Cc: Sherry Cheung <[email protected]> Cc: Subhash Gutti <[email protected]> Cc: Vladimir Davydov <[email protected]> Cc: Bob Liu <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 858b54d commit 2916ecc

File tree

9 files changed

+86
-15
lines changed

9 files changed

+86
-15
lines changed

fs/aio.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -373,6 +373,14 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
373373
pgoff_t idx;
374374
int rc;
375375

376+
/*
377+
* We cannot support the _NO_COPY case here, because copy needs to
378+
* happen under the ctx->completion_lock. That does not work with the
379+
* migration workflow of MIGRATE_SYNC_NO_COPY.
380+
*/
381+
if (mode == MIGRATE_SYNC_NO_COPY)
382+
return -EINVAL;
383+
376384
rc = 0;
377385

378386
/* mapping->private_lock here protects against the kioctx teardown. */

fs/f2fs/data.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2253,7 +2253,10 @@ int f2fs_migrate_page(struct address_space *mapping,
22532253
SetPagePrivate(newpage);
22542254
set_page_private(newpage, page_private(page));
22552255

2256-
migrate_page_copy(newpage, page);
2256+
if (mode != MIGRATE_SYNC_NO_COPY)
2257+
migrate_page_copy(newpage, page);
2258+
else
2259+
migrate_page_states(newpage, page);
22572260

22582261
return MIGRATEPAGE_SUCCESS;
22592262
}

fs/hugetlbfs/inode.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -830,7 +830,10 @@ static int hugetlbfs_migrate_page(struct address_space *mapping,
830830
rc = migrate_huge_page_move_mapping(mapping, newpage, page);
831831
if (rc != MIGRATEPAGE_SUCCESS)
832832
return rc;
833-
migrate_page_copy(newpage, page);
833+
if (mode != MIGRATE_SYNC_NO_COPY)
834+
migrate_page_copy(newpage, page);
835+
else
836+
migrate_page_states(newpage, page);
834837

835838
return MIGRATEPAGE_SUCCESS;
836839
}

fs/ubifs/file.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1490,7 +1490,10 @@ static int ubifs_migrate_page(struct address_space *mapping,
14901490
SetPagePrivate(newpage);
14911491
}
14921492

1493-
migrate_page_copy(newpage, page);
1493+
if (mode != MIGRATE_SYNC_NO_COPY)
1494+
migrate_page_copy(newpage, page);
1495+
else
1496+
migrate_page_states(newpage, page);
14941497
return MIGRATEPAGE_SUCCESS;
14951498
}
14961499
#endif

include/linux/migrate.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,7 @@ extern void putback_movable_page(struct page *page);
7272

7373
extern int migrate_prep(void);
7474
extern int migrate_prep_local(void);
75+
extern void migrate_page_states(struct page *newpage, struct page *page);
7576
extern void migrate_page_copy(struct page *newpage, struct page *page);
7677
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
7778
struct page *newpage, struct page *page);
@@ -92,6 +93,10 @@ static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
9293
static inline int migrate_prep(void) { return -ENOSYS; }
9394
static inline int migrate_prep_local(void) { return -ENOSYS; }
9495

96+
static inline void migrate_page_states(struct page *newpage, struct page *page)
97+
{
98+
}
99+
95100
static inline void migrate_page_copy(struct page *newpage,
96101
struct page *page) {}
97102

include/linux/migrate_mode.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,16 @@
66
* on most operations but not ->writepage as the potential stall time
77
* is too significant
88
* MIGRATE_SYNC will block when migrating pages
9+
* MIGRATE_SYNC_NO_COPY will block when migrating pages but will not copy pages
10+
* with the CPU. Instead, page copy happens outside the migratepage()
11+
* callback and is likely using a DMA engine. See migrate_vma() and HMM
12+
* (mm/hmm.c) for users of this mode.
913
*/
1014
enum migrate_mode {
1115
MIGRATE_ASYNC,
1216
MIGRATE_SYNC_LIGHT,
1317
MIGRATE_SYNC,
18+
MIGRATE_SYNC_NO_COPY,
1419
};
1520

1621
#endif /* MIGRATE_MODE_H_INCLUDED */

mm/balloon_compaction.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -139,6 +139,14 @@ int balloon_page_migrate(struct address_space *mapping,
139139
{
140140
struct balloon_dev_info *balloon = balloon_page_device(page);
141141

142+
/*
143+
* We can not easily support the no copy case here so ignore it as it
144+
* is unlikely to be use with ballon pages. See include/linux/hmm.h for
145+
* user of the MIGRATE_SYNC_NO_COPY mode.
146+
*/
147+
if (mode == MIGRATE_SYNC_NO_COPY)
148+
return -EINVAL;
149+
142150
VM_BUG_ON_PAGE(!PageLocked(page), page);
143151
VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
144152

mm/migrate.c

Lines changed: 40 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -634,15 +634,10 @@ static void copy_huge_page(struct page *dst, struct page *src)
634634
/*
635635
* Copy the page to its new location
636636
*/
637-
void migrate_page_copy(struct page *newpage, struct page *page)
637+
void migrate_page_states(struct page *newpage, struct page *page)
638638
{
639639
int cpupid;
640640

641-
if (PageHuge(page) || PageTransHuge(page))
642-
copy_huge_page(newpage, page);
643-
else
644-
copy_highpage(newpage, page);
645-
646641
if (PageError(page))
647642
SetPageError(newpage);
648643
if (PageReferenced(page))
@@ -696,6 +691,17 @@ void migrate_page_copy(struct page *newpage, struct page *page)
696691

697692
mem_cgroup_migrate(page, newpage);
698693
}
694+
EXPORT_SYMBOL(migrate_page_states);
695+
696+
void migrate_page_copy(struct page *newpage, struct page *page)
697+
{
698+
if (PageHuge(page) || PageTransHuge(page))
699+
copy_huge_page(newpage, page);
700+
else
701+
copy_highpage(newpage, page);
702+
703+
migrate_page_states(newpage, page);
704+
}
699705
EXPORT_SYMBOL(migrate_page_copy);
700706

701707
/************************************************************
@@ -721,7 +727,10 @@ int migrate_page(struct address_space *mapping,
721727
if (rc != MIGRATEPAGE_SUCCESS)
722728
return rc;
723729

724-
migrate_page_copy(newpage, page);
730+
if (mode != MIGRATE_SYNC_NO_COPY)
731+
migrate_page_copy(newpage, page);
732+
else
733+
migrate_page_states(newpage, page);
725734
return MIGRATEPAGE_SUCCESS;
726735
}
727736
EXPORT_SYMBOL(migrate_page);
@@ -771,12 +780,15 @@ int buffer_migrate_page(struct address_space *mapping,
771780

772781
SetPagePrivate(newpage);
773782

774-
migrate_page_copy(newpage, page);
783+
if (mode != MIGRATE_SYNC_NO_COPY)
784+
migrate_page_copy(newpage, page);
785+
else
786+
migrate_page_states(newpage, page);
775787

776788
bh = head;
777789
do {
778790
unlock_buffer(bh);
779-
put_bh(bh);
791+
put_bh(bh);
780792
bh = bh->b_this_page;
781793

782794
} while (bh != head);
@@ -835,8 +847,13 @@ static int fallback_migrate_page(struct address_space *mapping,
835847
{
836848
if (PageDirty(page)) {
837849
/* Only writeback pages in full synchronous migration */
838-
if (mode != MIGRATE_SYNC)
850+
switch (mode) {
851+
case MIGRATE_SYNC:
852+
case MIGRATE_SYNC_NO_COPY:
853+
break;
854+
default:
839855
return -EBUSY;
856+
}
840857
return writeout(mapping, page);
841858
}
842859

@@ -973,7 +990,11 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
973990
* the retry loop is too short and in the sync-light case,
974991
* the overhead of stalling is too much
975992
*/
976-
if (mode != MIGRATE_SYNC) {
993+
switch (mode) {
994+
case MIGRATE_SYNC:
995+
case MIGRATE_SYNC_NO_COPY:
996+
break;
997+
default:
977998
rc = -EBUSY;
978999
goto out_unlock;
9791000
}
@@ -1243,8 +1264,15 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
12431264
return -ENOMEM;
12441265

12451266
if (!trylock_page(hpage)) {
1246-
if (!force || mode != MIGRATE_SYNC)
1267+
if (!force)
12471268
goto out;
1269+
switch (mode) {
1270+
case MIGRATE_SYNC:
1271+
case MIGRATE_SYNC_NO_COPY:
1272+
break;
1273+
default:
1274+
goto out;
1275+
}
12481276
lock_page(hpage);
12491277
}
12501278

mm/zsmalloc.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1969,6 +1969,14 @@ int zs_page_migrate(struct address_space *mapping, struct page *newpage,
19691969
unsigned int obj_idx;
19701970
int ret = -EAGAIN;
19711971

1972+
/*
1973+
* We cannot support the _NO_COPY case here, because copy needs to
1974+
* happen under the zs lock, which does not work with
1975+
* MIGRATE_SYNC_NO_COPY workflow.
1976+
*/
1977+
if (mode == MIGRATE_SYNC_NO_COPY)
1978+
return -EINVAL;
1979+
19721980
VM_BUG_ON_PAGE(!PageMovable(page), page);
19731981
VM_BUG_ON_PAGE(!PageIsolated(page), page);
19741982

0 commit comments

Comments
 (0)