Skip to content

Commit d4fb458

Browse files
davidhildenbrandakpm00
authored andcommitted
mm: rename __PageMovable() to page_has_movable_ops()
Let's make it clearer that we are talking about movable_ops pages. While at it, convert a VM_BUG_ON to a VM_WARN_ON_ONCE_PAGE. Link: https://lkml.kernel.org/r/20250704102524.326966-17-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Zi Yan <ziy@nvidia.com> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Reviewed-by: Harry Yoo <harry.yoo@oracle.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Brendan Jackman <jackmanb@google.com> Cc: Byungchul Park <byungchul@sk.com> Cc: Chengming Zhou <chengming.zhou@linux.dev> Cc: Christian Brauner <brauner@kernel.org> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Eugenio Pé rez <eperezma@redhat.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Gregory Price <gourry@gourry.net> Cc: "Huang, Ying" <ying.huang@linux.alibaba.com> Cc: Jan Kara <jack@suse.cz> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Jason Wang <jasowang@redhat.com> Cc: Jerrin Shaji George <jerrin.shaji-george@broadcom.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Joshua Hahn <joshua.hahnjy@gmail.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mathew Brost <matthew.brost@intel.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: "Michael S. Tsirkin" <mst@redhat.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Naoya Horiguchi <nao.horiguchi@gmail.com> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Peter Xu <peterx@redhat.com> Cc: Qi Zheng <zhengqi.arch@bytedance.com> Cc: Rakie Kim <rakie.kim@sk.com> Cc: Rik van Riel <riel@surriel.com> Cc: Sergey Senozhatsky <senozhatsky@chromium.org> Cc: Shakeel Butt <shakeel.butt@linux.dev> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com> Cc: xu xin <xu.xin16@zte.com.cn> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 22d103a commit d4fb458

File tree

8 files changed

+20
-25
lines changed

8 files changed

+20
-25
lines changed

include/linux/migrate.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ static inline void __SetPageMovable(struct page *page,
115115
static inline
116116
const struct movable_operations *page_movable_ops(struct page *page)
117117
{
118-
VM_BUG_ON(!__PageMovable(page));
118+
VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
119119

120120
return (const struct movable_operations *)
121121
((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE);

include/linux/page-flags.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -750,7 +750,7 @@ static __always_inline bool __folio_test_movable(const struct folio *folio)
750750
PAGE_MAPPING_MOVABLE;
751751
}
752752

753-
static __always_inline bool __PageMovable(const struct page *page)
753+
static __always_inline bool page_has_movable_ops(const struct page *page)
754754
{
755755
return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
756756
PAGE_MAPPING_MOVABLE;

mm/compaction.c

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1056,11 +1056,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
10561056
* Skip any other type of page
10571057
*/
10581058
if (!PageLRU(page)) {
1059-
/*
1060-
* __PageMovable can return false positive so we need
1061-
* to verify it under page_lock.
1062-
*/
1063-
if (unlikely(__PageMovable(page)) &&
1059+
/* Isolation code will deal with any races. */
1060+
if (unlikely(page_has_movable_ops(page)) &&
10641061
!PageIsolated(page)) {
10651062
if (locked) {
10661063
unlock_page_lruvec_irqrestore(locked, flags);

mm/memory-failure.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1388,8 +1388,8 @@ static inline bool HWPoisonHandlable(struct page *page, unsigned long flags)
13881388
if (PageSlab(page))
13891389
return false;
13901390

1391-
/* Soft offline could migrate non-LRU movable pages */
1392-
if ((flags & MF_SOFT_OFFLINE) && __PageMovable(page))
1391+
/* Soft offline could migrate movable_ops pages */
1392+
if ((flags & MF_SOFT_OFFLINE) && page_has_movable_ops(page))
13931393
return true;
13941394

13951395
return PageLRU(page) || is_free_buddy_page(page);

mm/memory_hotplug.c

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1739,8 +1739,8 @@ bool mhp_range_allowed(u64 start, u64 size, bool need_mapping)
17391739

17401740
#ifdef CONFIG_MEMORY_HOTREMOVE
17411741
/*
1742-
* Scan pfn range [start,end) to find movable/migratable pages (LRU pages,
1743-
* non-lru movable pages and hugepages). Will skip over most unmovable
1742+
* Scan pfn range [start,end) to find movable/migratable pages (LRU and
1743+
* hugetlb folio, movable_ops pages). Will skip over most unmovable
17441744
* pages (esp., pages that can be skipped when offlining), but bail out on
17451745
* definitely unmovable pages.
17461746
*
@@ -1759,13 +1759,11 @@ static int scan_movable_pages(unsigned long start, unsigned long end,
17591759
struct folio *folio;
17601760

17611761
page = pfn_to_page(pfn);
1762-
if (PageLRU(page))
1763-
goto found;
1764-
if (__PageMovable(page))
1762+
if (PageLRU(page) || page_has_movable_ops(page))
17651763
goto found;
17661764

17671765
/*
1768-
* PageOffline() pages that are not marked __PageMovable() and
1766+
* PageOffline() pages that do not have movable_ops and
17691767
* have a reference count > 0 (after MEM_GOING_OFFLINE) are
17701768
* definitely unmovable. If their reference count would be 0,
17711769
* they could at least be skipped when offlining memory.

mm/migrate.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode)
9494
* Note that once a page has movable_ops, it will stay that way
9595
* until the page was freed.
9696
*/
97-
if (unlikely(!__PageMovable(page)))
97+
if (unlikely(!page_has_movable_ops(page)))
9898
goto out_putfolio;
9999

100100
/*
@@ -111,7 +111,7 @@ bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode)
111111
if (unlikely(!folio_trylock(folio)))
112112
goto out_putfolio;
113113

114-
VM_WARN_ON_ONCE_PAGE(!__PageMovable(page), page);
114+
VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
115115
if (PageIsolated(page))
116116
goto out_no_isolated;
117117

@@ -153,7 +153,7 @@ static void putback_movable_ops_page(struct page *page)
153153
*/
154154
struct folio *folio = page_folio(page);
155155

156-
VM_WARN_ON_ONCE_PAGE(!__PageMovable(page), page);
156+
VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page);
157157
VM_WARN_ON_ONCE_PAGE(!PageIsolated(page), page);
158158
folio_lock(folio);
159159
page_movable_ops(page)->putback_page(page);
@@ -194,7 +194,7 @@ static int migrate_movable_ops_page(struct page *dst, struct page *src,
194194
{
195195
int rc = MIGRATEPAGE_SUCCESS;
196196

197-
VM_WARN_ON_ONCE_PAGE(!__PageMovable(src), src);
197+
VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(src), src);
198198
VM_WARN_ON_ONCE_PAGE(!PageIsolated(src), src);
199199
rc = page_movable_ops(src)->migrate_page(dst, src, mode);
200200
if (rc == MIGRATEPAGE_SUCCESS)

mm/page_alloc.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2006,7 +2006,7 @@ static bool prep_move_freepages_block(struct zone *zone, struct page *page,
20062006
* migration are movable. But we don't actually try
20072007
* isolating, as that would be expensive.
20082008
*/
2009-
if (PageLRU(page) || __PageMovable(page))
2009+
if (PageLRU(page) || page_has_movable_ops(page))
20102010
(*num_movable)++;
20112011
pfn++;
20122012
}

mm/page_isolation.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,9 @@
2121
* consequently belong to a single zone.
2222
*
2323
* PageLRU check without isolation or lru_lock could race so that
24-
* MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
25-
* check without lock_page also may miss some movable non-lru pages at
26-
* race condition. So you can't expect this function should be exact.
24+
* MIGRATE_MOVABLE block might include unmovable pages. Similarly, pages
25+
* with movable_ops can only be identified some time after they were
26+
* allocated. So you can't expect this function should be exact.
2727
*
2828
* Returns a page without holding a reference. If the caller wants to
2929
* dereference that page (e.g., dumping), it has to make sure that it
@@ -133,7 +133,7 @@ static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long e
133133
if ((mode == PB_ISOLATE_MODE_MEM_OFFLINE) && PageOffline(page))
134134
continue;
135135

136-
if (__PageMovable(page) || PageLRU(page))
136+
if (PageLRU(page) || page_has_movable_ops(page))
137137
continue;
138138

139139
/*
@@ -421,7 +421,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn,
421421
* proper free and split handling for them.
422422
*/
423423
VM_WARN_ON_ONCE_PAGE(PageLRU(page), page);
424-
VM_WARN_ON_ONCE_PAGE(__PageMovable(page), page);
424+
VM_WARN_ON_ONCE_PAGE(page_has_movable_ops(page), page);
425425

426426
goto failed;
427427
}

0 commit comments

Comments
 (0)