Skip to content

Commit 87c01d5

Browse files
apopple-nvidiatorvalds
authored andcommitted
mm/hmm.c: allow VM_MIXEDMAP to work with hmm_range_fault
hmm_range_fault() can be used instead of get_user_pages() for devices which allow faulting however unlike get_user_pages() it will return an error when used on a VM_MIXEDMAP range. To make hmm_range_fault() more closely match get_user_pages() remove this restriction. This requires dealing with the !ARCH_HAS_PTE_SPECIAL case in hmm_vma_handle_pte(). Rather than replicating the logic of vm_normal_page() call it directly and do a check for the zero pfn similar to what get_user_pages() currently does. Also add a test to hmm selftest to verify functionality. Link: https://lkml.kernel.org/r/[email protected] Fixes: da4c3c7 ("mm/hmm/mirror: helper to snapshot CPU page table") Signed-off-by: Alistair Popple <[email protected]> Reviewed-by: Jason Gunthorpe <[email protected]> Cc: Jerome Glisse <[email protected]> Cc: John Hubbard <[email protected]> Cc: Zi Yan <[email protected]> Cc: Ralph Campbell <[email protected]> Cc: Felix Kuehling <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent cab0a7c commit 87c01d5

File tree

3 files changed

+69
-2
lines changed

3 files changed

+69
-2
lines changed

lib/test_hmm.c

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1086,9 +1086,33 @@ static long dmirror_fops_unlocked_ioctl(struct file *filp,
10861086
return 0;
10871087
}
10881088

1089+
static int dmirror_fops_mmap(struct file *file, struct vm_area_struct *vma)
1090+
{
1091+
unsigned long addr;
1092+
1093+
for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
1094+
struct page *page;
1095+
int ret;
1096+
1097+
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1098+
if (!page)
1099+
return -ENOMEM;
1100+
1101+
ret = vm_insert_page(vma, addr, page);
1102+
if (ret) {
1103+
__free_page(page);
1104+
return ret;
1105+
}
1106+
put_page(page);
1107+
}
1108+
1109+
return 0;
1110+
}
1111+
10891112
static const struct file_operations dmirror_fops = {
10901113
.open = dmirror_fops_open,
10911114
.release = dmirror_fops_release,
1115+
.mmap = dmirror_fops_mmap,
10921116
.unlocked_ioctl = dmirror_fops_unlocked_ioctl,
10931117
.llseek = default_llseek,
10941118
.owner = THIS_MODULE,

mm/hmm.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -300,7 +300,8 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
300300
* Since each architecture defines a struct page for the zero page, just
301301
* fall through and treat it like a normal page.
302302
*/
303-
if (pte_special(pte) && !pte_devmap(pte) &&
303+
if (!vm_normal_page(walk->vma, addr, pte) &&
304+
!pte_devmap(pte) &&
304305
!is_zero_pfn(pte_pfn(pte))) {
305306
if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) {
306307
pte_unmap(ptep);
@@ -518,7 +519,7 @@ static int hmm_vma_walk_test(unsigned long start, unsigned long end,
518519
struct hmm_range *range = hmm_vma_walk->range;
519520
struct vm_area_struct *vma = walk->vma;
520521

521-
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) &&
522+
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)) &&
522523
vma->vm_flags & VM_READ)
523524
return 0;
524525

tools/testing/selftests/vm/hmm-tests.c

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1248,6 +1248,48 @@ TEST_F(hmm, anon_teardown)
12481248
}
12491249
}
12501250

1251+
/*
1252+
* Test memory snapshot without faulting in pages accessed by the device.
1253+
*/
1254+
TEST_F(hmm, mixedmap)
1255+
{
1256+
struct hmm_buffer *buffer;
1257+
unsigned long npages;
1258+
unsigned long size;
1259+
unsigned char *m;
1260+
int ret;
1261+
1262+
npages = 1;
1263+
size = npages << self->page_shift;
1264+
1265+
buffer = malloc(sizeof(*buffer));
1266+
ASSERT_NE(buffer, NULL);
1267+
1268+
buffer->fd = -1;
1269+
buffer->size = size;
1270+
buffer->mirror = malloc(npages);
1271+
ASSERT_NE(buffer->mirror, NULL);
1272+
1273+
1274+
/* Reserve a range of addresses. */
1275+
buffer->ptr = mmap(NULL, size,
1276+
PROT_READ | PROT_WRITE,
1277+
MAP_PRIVATE,
1278+
self->fd, 0);
1279+
ASSERT_NE(buffer->ptr, MAP_FAILED);
1280+
1281+
/* Simulate a device snapshotting CPU pagetables. */
1282+
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1283+
ASSERT_EQ(ret, 0);
1284+
ASSERT_EQ(buffer->cpages, npages);
1285+
1286+
/* Check what the device saw. */
1287+
m = buffer->mirror;
1288+
ASSERT_EQ(m[0], HMM_DMIRROR_PROT_READ);
1289+
1290+
hmm_buffer_free(buffer);
1291+
}
1292+
12511293
/*
12521294
* Test memory snapshot without faulting in pages accessed by the device.
12531295
*/

0 commit comments

Comments
 (0)