@@ -638,3 +638,57 @@ ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
638
638
{
639
639
return __mcopy_atomic (dst_mm , start , 0 , len , true, mmap_changing , 0 );
640
640
}
641
+
642
+ int mwriteprotect_range (struct mm_struct * dst_mm , unsigned long start ,
643
+ unsigned long len , bool enable_wp , bool * mmap_changing )
644
+ {
645
+ struct vm_area_struct * dst_vma ;
646
+ pgprot_t newprot ;
647
+ int err ;
648
+
649
+ /*
650
+ * Sanitize the command parameters:
651
+ */
652
+ BUG_ON (start & ~PAGE_MASK );
653
+ BUG_ON (len & ~PAGE_MASK );
654
+
655
+ /* Does the address range wrap, or is the span zero-sized? */
656
+ BUG_ON (start + len <= start );
657
+
658
+ down_read (& dst_mm -> mmap_sem );
659
+
660
+ /*
661
+ * If memory mappings are changing because of non-cooperative
662
+ * operation (e.g. mremap) running in parallel, bail out and
663
+ * request the user to retry later
664
+ */
665
+ err = - EAGAIN ;
666
+ if (mmap_changing && READ_ONCE (* mmap_changing ))
667
+ goto out_unlock ;
668
+
669
+ err = - ENOENT ;
670
+ dst_vma = find_dst_vma (dst_mm , start , len );
671
+ /*
672
+ * Make sure the vma is not shared, that the dst range is
673
+ * both valid and fully within a single existing vma.
674
+ */
675
+ if (!dst_vma || (dst_vma -> vm_flags & VM_SHARED ))
676
+ goto out_unlock ;
677
+ if (!userfaultfd_wp (dst_vma ))
678
+ goto out_unlock ;
679
+ if (!vma_is_anonymous (dst_vma ))
680
+ goto out_unlock ;
681
+
682
+ if (enable_wp )
683
+ newprot = vm_get_page_prot (dst_vma -> vm_flags & ~(VM_WRITE ));
684
+ else
685
+ newprot = vm_get_page_prot (dst_vma -> vm_flags );
686
+
687
+ change_protection (dst_vma , start , start + len , newprot ,
688
+ enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE );
689
+
690
+ err = 0 ;
691
+ out_unlock :
692
+ up_read (& dst_mm -> mmap_sem );
693
+ return err ;
694
+ }
0 commit comments