Skip to content

Commit 9fee28b

Browse files
Matthew Wilcox (Oracle)akpm00
Matthew Wilcox (Oracle)
authored andcommitted
powerpc: implement the new page table range API
Add set_ptes(), update_mmu_cache_range() and flush_dcache_folio(). Change the PG_arch_1 (aka PG_dcache_dirty) flag from being per-page to per-folio. [[email protected]: re-export flush_dcache_icache_folio()] Link: https://lkml.kernel.org/r/[email protected] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Acked-by: Mike Rapoport (IBM) <[email protected]> Cc: Michael Ellerman <[email protected]> Cc: Nicholas Piggin <[email protected]> Cc: Christophe Leroy <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent e70bbca commit 9fee28b

File tree

11 files changed

+89
-93
lines changed

11 files changed

+89
-93
lines changed

arch/powerpc/include/asm/book3s/32/pgtable.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -462,11 +462,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
462462
pgprot_val(pgprot));
463463
}
464464

465-
static inline unsigned long pte_pfn(pte_t pte)
466-
{
467-
return pte_val(pte) >> PTE_RPN_SHIFT;
468-
}
469-
470465
/* Generic modifiers for PTE bits */
471466
static inline pte_t pte_wrprotect(pte_t pte)
472467
{

arch/powerpc/include/asm/book3s/64/pgtable.h

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,7 @@
104104
* and every thing below PAGE_SHIFT;
105105
*/
106106
#define PTE_RPN_MASK (((1UL << _PAGE_PA_MAX) - 1) & (PAGE_MASK))
107+
#define PTE_RPN_SHIFT PAGE_SHIFT
107108
/*
108109
* set of bits not changed in pmd_modify. Even though we have hash specific bits
109110
* in here, on radix we expect them to be zero.
@@ -569,11 +570,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
569570
return __pte(((pte_basic_t)pfn << PAGE_SHIFT) | pgprot_val(pgprot) | _PAGE_PTE);
570571
}
571572

572-
static inline unsigned long pte_pfn(pte_t pte)
573-
{
574-
return (pte_val(pte) & PTE_RPN_MASK) >> PAGE_SHIFT;
575-
}
576-
577573
/* Generic modifiers for PTE bits */
578574
static inline pte_t pte_wrprotect(pte_t pte)
579575
{

arch/powerpc/include/asm/book3s/pgtable.h

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -9,13 +9,6 @@
99
#endif
1010

1111
#ifndef __ASSEMBLY__
12-
/* Insert a PTE, top-level function is out of line. It uses an inline
13-
* low level function in the respective pgtable-* files
14-
*/
15-
extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
16-
pte_t pte);
17-
18-
1912
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
2013
extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
2114
pte_t *ptep, pte_t entry, int dirty);
@@ -36,7 +29,9 @@ void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t
3629
* corresponding HPTE into the hash table ahead of time, instead of
3730
* waiting for the inevitable extra hash-table miss exception.
3831
*/
39-
static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
32+
static inline void update_mmu_cache_range(struct vm_fault *vmf,
33+
struct vm_area_struct *vma, unsigned long address,
34+
pte_t *ptep, unsigned int nr)
4035
{
4136
if (IS_ENABLED(CONFIG_PPC32) && !mmu_has_feature(MMU_FTR_HPTE_TABLE))
4237
return;

arch/powerpc/include/asm/cacheflush.h

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -35,13 +35,19 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
3535
* It just marks the page as not i-cache clean. We do the i-cache
3636
* flush later when the page is given to a user process, if necessary.
3737
*/
38-
static inline void flush_dcache_page(struct page *page)
38+
static inline void flush_dcache_folio(struct folio *folio)
3939
{
4040
if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
4141
return;
4242
/* avoid an atomic op if possible */
43-
if (test_bit(PG_dcache_clean, &page->flags))
44-
clear_bit(PG_dcache_clean, &page->flags);
43+
if (test_bit(PG_dcache_clean, &folio->flags))
44+
clear_bit(PG_dcache_clean, &folio->flags);
45+
}
46+
#define flush_dcache_folio flush_dcache_folio
47+
48+
static inline void flush_dcache_page(struct page *page)
49+
{
50+
flush_dcache_folio(page_folio(page));
4551
}
4652

4753
void flush_icache_range(unsigned long start, unsigned long stop);
@@ -51,7 +57,7 @@ void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
5157
unsigned long addr, int len);
5258
#define flush_icache_user_page flush_icache_user_page
5359

54-
void flush_dcache_icache_page(struct page *page);
60+
void flush_dcache_icache_folio(struct folio *folio);
5561

5662
/**
5763
* flush_dcache_range(): Write any modified data cache blocks out to memory and

arch/powerpc/include/asm/kvm_ppc.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -894,7 +894,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids);
894894

895895
static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
896896
{
897-
struct page *page;
897+
struct folio *folio;
898898
/*
899899
* We can only access pages that the kernel maps
900900
* as memory. Bail out for unmapped ones.
@@ -903,10 +903,10 @@ static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
903903
return;
904904

905905
/* Clear i-cache for new pages */
906-
page = pfn_to_page(pfn);
907-
if (!test_bit(PG_dcache_clean, &page->flags)) {
908-
flush_dcache_icache_page(page);
909-
set_bit(PG_dcache_clean, &page->flags);
906+
folio = page_folio(pfn_to_page(pfn));
907+
if (!test_bit(PG_dcache_clean, &folio->flags)) {
908+
flush_dcache_icache_folio(folio);
909+
set_bit(PG_dcache_clean, &folio->flags);
910910
}
911911
}
912912

arch/powerpc/include/asm/nohash/pgtable.h

Lines changed: 5 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -101,8 +101,6 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
101101
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
102102
return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
103103
pgprot_val(pgprot)); }
104-
static inline unsigned long pte_pfn(pte_t pte) {
105-
return pte_val(pte) >> PTE_RPN_SHIFT; }
106104

107105
/* Generic modifiers for PTE bits */
108106
static inline pte_t pte_exprotect(pte_t pte)
@@ -166,12 +164,6 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
166164
return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
167165
}
168166

169-
/* Insert a PTE, top-level function is out of line. It uses an inline
170-
* low level function in the respective pgtable-* files
171-
*/
172-
extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
173-
pte_t pte);
174-
175167
/* This low level function performs the actual PTE insertion
176168
* Setting the PTE depends on the MMU type and other factors. It's
177169
* an horrible mess that I'm not going to try to clean up now but
@@ -282,10 +274,12 @@ static inline int pud_huge(pud_t pud)
282274
* for the page which has just been mapped in.
283275
*/
284276
#if defined(CONFIG_PPC_E500) && defined(CONFIG_HUGETLB_PAGE)
285-
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
277+
void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
278+
unsigned long address, pte_t *ptep, unsigned int nr);
286279
#else
287-
static inline
288-
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {}
280+
static inline void update_mmu_cache_range(struct vm_fault *vmf,
281+
struct vm_area_struct *vma, unsigned long address,
282+
pte_t *ptep, unsigned int nr) {}
289283
#endif
290284

291285
#endif /* __ASSEMBLY__ */

arch/powerpc/include/asm/pgtable.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,13 +41,25 @@ struct mm_struct;
4141

4242
#ifndef __ASSEMBLY__
4343

44+
void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
45+
pte_t pte, unsigned int nr);
46+
#define set_ptes set_ptes
47+
#define update_mmu_cache(vma, addr, ptep) \
48+
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
49+
4450
#ifndef MAX_PTRS_PER_PGD
4551
#define MAX_PTRS_PER_PGD PTRS_PER_PGD
4652
#endif
4753

4854
/* Keep these as a macros to avoid include dependency mess */
4955
#define pte_page(x) pfn_to_page(pte_pfn(x))
5056
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
57+
58+
static inline unsigned long pte_pfn(pte_t pte)
59+
{
60+
return (pte_val(pte) & PTE_RPN_MASK) >> PTE_RPN_SHIFT;
61+
}
62+
5163
/*
5264
* Select all bits except the pfn
5365
*/

arch/powerpc/mm/book3s64/hash_utils.c

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1307,18 +1307,19 @@ void hash__early_init_mmu_secondary(void)
13071307
*/
13081308
unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
13091309
{
1310-
struct page *page;
1310+
struct folio *folio;
13111311

13121312
if (!pfn_valid(pte_pfn(pte)))
13131313
return pp;
13141314

1315-
page = pte_page(pte);
1315+
folio = page_folio(pte_page(pte));
13161316

13171317
/* page is dirty */
1318-
if (!test_bit(PG_dcache_clean, &page->flags) && !PageReserved(page)) {
1318+
if (!test_bit(PG_dcache_clean, &folio->flags) &&
1319+
!folio_test_reserved(folio)) {
13191320
if (trap == INTERRUPT_INST_STORAGE) {
1320-
flush_dcache_icache_page(page);
1321-
set_bit(PG_dcache_clean, &page->flags);
1321+
flush_dcache_icache_folio(folio);
1322+
set_bit(PG_dcache_clean, &folio->flags);
13221323
} else
13231324
pp |= HPTE_R_N;
13241325
}

arch/powerpc/mm/cacheflush.c

Lines changed: 14 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -148,44 +148,31 @@ static void __flush_dcache_icache(void *p)
148148
invalidate_icache_range(addr, addr + PAGE_SIZE);
149149
}
150150

151-
static void flush_dcache_icache_hugepage(struct page *page)
151+
void flush_dcache_icache_folio(struct folio *folio)
152152
{
153-
int i;
154-
int nr = compound_nr(page);
153+
unsigned int i, nr = folio_nr_pages(folio);
155154

156-
if (!PageHighMem(page)) {
155+
if (flush_coherent_icache())
156+
return;
157+
158+
if (!folio_test_highmem(folio)) {
159+
void *addr = folio_address(folio);
157160
for (i = 0; i < nr; i++)
158-
__flush_dcache_icache(lowmem_page_address(page + i));
159-
} else {
161+
__flush_dcache_icache(addr + i * PAGE_SIZE);
162+
} else if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
160163
for (i = 0; i < nr; i++) {
161-
void *start = kmap_local_page(page + i);
164+
void *start = kmap_local_folio(folio, i * PAGE_SIZE);
162165

163166
__flush_dcache_icache(start);
164167
kunmap_local(start);
165168
}
166-
}
167-
}
168-
169-
void flush_dcache_icache_page(struct page *page)
170-
{
171-
if (flush_coherent_icache())
172-
return;
173-
174-
if (PageCompound(page))
175-
return flush_dcache_icache_hugepage(page);
176-
177-
if (!PageHighMem(page)) {
178-
__flush_dcache_icache(lowmem_page_address(page));
179-
} else if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
180-
void *start = kmap_local_page(page);
181-
182-
__flush_dcache_icache(start);
183-
kunmap_local(start);
184169
} else {
185-
flush_dcache_icache_phys(page_to_phys(page));
170+
unsigned long pfn = folio_pfn(folio);
171+
for (i = 0; i < nr; i++)
172+
flush_dcache_icache_phys((pfn + i) * PAGE_SIZE);
186173
}
187174
}
188-
EXPORT_SYMBOL(flush_dcache_icache_page);
175+
EXPORT_SYMBOL(flush_dcache_icache_folio);
189176

190177
void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
191178
{

arch/powerpc/mm/nohash/e500_hugetlbpage.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,8 @@ book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte)
178178
*
179179
* This must always be called with the pte lock held.
180180
*/
181-
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
181+
void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
182+
unsigned long address, pte_t *ptep, unsigned int nr)
182183
{
183184
if (is_vm_hugetlb_page(vma))
184185
book3e_hugetlb_preload(vma, address, *ptep);

0 commit comments

Comments
 (0)