|
46 | 46 | * |
47 | 47 | * The mmu_gather API consists of: |
48 | 48 | * |
49 | | - * - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu() |
| 49 | + * - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_gather_mmu_vma() / |
| 50 | + * tlb_finish_mmu() |
50 | 51 | * |
51 | 52 | * start and finish a mmu_gather |
52 | 53 | * |
@@ -364,6 +365,20 @@ struct mmu_gather { |
364 | 365 | unsigned int vma_huge : 1; |
365 | 366 | unsigned int vma_pfn : 1; |
366 | 367 |
|
| 368 | + /* |
| 369 | + * Did we unshare (unmap) any shared page tables? For now only |
| 370 | + * used for hugetlb PMD table sharing. |
| 371 | + */ |
| 372 | + unsigned int unshared_tables : 1; |
| 373 | + |
| 374 | + /* |
| 375 | + * Did we unshare any page tables such that they are now exclusive |
| 376 | + * and could get reused+modified by the new owner? When setting this |
| 377 | + * flag, "unshared_tables" will be set as well. For now only used |
| 378 | + * for hugetlb PMD table sharing. |
| 379 | + */ |
| 380 | + unsigned int fully_unshared_tables : 1; |
| 381 | + |
367 | 382 | unsigned int batch_count; |
368 | 383 |
|
369 | 384 | #ifndef CONFIG_MMU_GATHER_NO_GATHER |
@@ -400,6 +415,7 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) |
400 | 415 | tlb->cleared_pmds = 0; |
401 | 416 | tlb->cleared_puds = 0; |
402 | 417 | tlb->cleared_p4ds = 0; |
| 418 | + tlb->unshared_tables = 0; |
403 | 419 | /* |
404 | 420 | * Do not reset mmu_gather::vma_* fields here, we do not |
405 | 421 | * call into tlb_start_vma() again to set them if there is an |
@@ -484,7 +500,7 @@ static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) |
484 | 500 | * these bits. |
485 | 501 | */ |
486 | 502 | if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds || |
487 | | - tlb->cleared_puds || tlb->cleared_p4ds)) |
| 503 | + tlb->cleared_puds || tlb->cleared_p4ds || tlb->unshared_tables)) |
488 | 504 | return; |
489 | 505 |
|
490 | 506 | tlb_flush(tlb); |
@@ -773,6 +789,63 @@ static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd) |
773 | 789 | } |
774 | 790 | #endif |
775 | 791 |
|
| 792 | +#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING |
| 793 | +static inline void tlb_unshare_pmd_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt, |
| 794 | + unsigned long addr) |
| 795 | +{ |
| 796 | + /* |
| 797 | + * The caller must make sure that concurrent unsharing + exclusive |
| 798 | + * reuse is impossible until tlb_flush_unshared_tables() was called. |
| 799 | + */ |
| 800 | + VM_WARN_ON_ONCE(!ptdesc_pmd_is_shared(pt)); |
| 801 | + ptdesc_pmd_pts_dec(pt); |
| 802 | + |
| 803 | + /* Clearing a PUD pointing at a PMD table with PMD leaves. */ |
| 804 | + tlb_flush_pmd_range(tlb, addr & PUD_MASK, PUD_SIZE); |
| 805 | + |
| 806 | + /* |
| 807 | + * If the page table is now exclusively owned, we fully unshared |
| 808 | + * a page table. |
| 809 | + */ |
| 810 | + if (!ptdesc_pmd_is_shared(pt)) |
| 811 | + tlb->fully_unshared_tables = true; |
| 812 | + tlb->unshared_tables = true; |
| 813 | +} |
| 814 | + |
| 815 | +static inline void tlb_flush_unshared_tables(struct mmu_gather *tlb) |
| 816 | +{ |
| 817 | + /* |
| 818 | + * As soon as the caller drops locks to allow for reuse of |
| 819 | + * previously-shared tables, these tables could get modified and |
| 820 | + * even reused outside of hugetlb context, so we have to make sure that |
| 821 | + * any page table walkers (incl. TLB, GUP-fast) are aware of that |
| 822 | + * change. |
| 823 | + * |
| 824 | + * Even if we are not fully unsharing a PMD table, we must |
| 825 | + * flush the TLB for the unsharer now. |
| 826 | + */ |
| 827 | + if (tlb->unshared_tables) |
| 828 | + tlb_flush_mmu_tlbonly(tlb); |
| 829 | + |
| 830 | + /* |
| 831 | + * Similarly, we must make sure that concurrent GUP-fast will not |
| 832 | + * walk previously-shared page tables that are getting modified+reused |
| 833 | + * elsewhere. So broadcast an IPI to wait for any concurrent GUP-fast. |
| 834 | + * |
| 835 | + * We only perform this when we are the last sharer of a page table, |
| 836 | + * as the IPI will reach all CPUs: any GUP-fast. |
| 837 | + * |
| 838 | + * Note that on configs where tlb_remove_table_sync_one() is a NOP, |
| 839 | + * the expectation is that the tlb_flush_mmu_tlbonly() would have issued |
| 840 | + * required IPIs already for us. |
| 841 | + */ |
| 842 | + if (tlb->fully_unshared_tables) { |
| 843 | + tlb_remove_table_sync_one(); |
| 844 | + tlb->fully_unshared_tables = false; |
| 845 | + } |
| 846 | +} |
| 847 | +#endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */ |
| 848 | + |
776 | 849 | #endif /* CONFIG_MMU */ |
777 | 850 |
|
778 | 851 | #endif /* _ASM_GENERIC__TLB_H */ |
0 commit comments