Skip to content

Commit d8505de

Browse files
Shaohua Litorvalds
authored andcommitted
mm: simplify code of swap.c
Clean up code and remove duplicate code. Next patch will use pagevec_lru_move_fn introduced here too. Signed-off-by: Shaohua Li <shaohua.li@intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent c06b1fc commit d8505de

File tree

1 file changed

+54
-47
lines changed

1 file changed

+54
-47
lines changed

mm/swap.c

Lines changed: 54 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -178,45 +178,63 @@ void put_pages_list(struct list_head *pages)
178178
}
179179
EXPORT_SYMBOL(put_pages_list);
180180

181-
/*
182-
* pagevec_move_tail() must be called with IRQ disabled.
183-
* Otherwise this may cause nasty races.
184-
*/
185-
static void pagevec_move_tail(struct pagevec *pvec)
181+
static void pagevec_lru_move_fn(struct pagevec *pvec,
182+
void (*move_fn)(struct page *page, void *arg),
183+
void *arg)
186184
{
187185
int i;
188-
int pgmoved = 0;
189186
struct zone *zone = NULL;
187+
unsigned long flags = 0;
190188

191189
for (i = 0; i < pagevec_count(pvec); i++) {
192190
struct page *page = pvec->pages[i];
193191
struct zone *pagezone = page_zone(page);
194192

195193
if (pagezone != zone) {
196194
if (zone)
197-
spin_unlock(&zone->lru_lock);
195+
spin_unlock_irqrestore(&zone->lru_lock, flags);
198196
zone = pagezone;
199-
spin_lock(&zone->lru_lock);
200-
}
201-
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
202-
int lru = page_lru_base_type(page);
203-
list_move_tail(&page->lru, &zone->lru[lru].list);
204-
pgmoved++;
197+
spin_lock_irqsave(&zone->lru_lock, flags);
205198
}
199+
200+
(*move_fn)(page, arg);
206201
}
207202
if (zone)
208-
spin_unlock(&zone->lru_lock);
209-
__count_vm_events(PGROTATED, pgmoved);
210-
release_pages(pvec->pages, pvec->nr, pvec->cold);
203+
spin_unlock_irqrestore(&zone->lru_lock, flags);
204+
release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
211205
pagevec_reinit(pvec);
212206
}
213207

208+
static void pagevec_move_tail_fn(struct page *page, void *arg)
209+
{
210+
int *pgmoved = arg;
211+
struct zone *zone = page_zone(page);
212+
213+
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
214+
int lru = page_lru_base_type(page);
215+
list_move_tail(&page->lru, &zone->lru[lru].list);
216+
(*pgmoved)++;
217+
}
218+
}
219+
220+
/*
221+
* pagevec_move_tail() must be called with IRQ disabled.
222+
* Otherwise this may cause nasty races.
223+
*/
224+
static void pagevec_move_tail(struct pagevec *pvec)
225+
{
226+
int pgmoved = 0;
227+
228+
pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
229+
__count_vm_events(PGROTATED, pgmoved);
230+
}
231+
214232
/*
215233
* Writeback is about to end against a page which has been marked for immediate
216234
* reclaim. If it still appears to be reclaimable, move it to the tail of the
217235
* inactive list.
218236
*/
219-
void rotate_reclaimable_page(struct page *page)
237+
void rotate_reclaimable_page(struct page *page)
220238
{
221239
if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
222240
!PageUnevictable(page) && PageLRU(page)) {
@@ -516,44 +534,33 @@ void lru_add_page_tail(struct zone* zone,
516534
}
517535
}
518536

537+
static void ____pagevec_lru_add_fn(struct page *page, void *arg)
538+
{
539+
enum lru_list lru = (enum lru_list)arg;
540+
struct zone *zone = page_zone(page);
541+
int file = is_file_lru(lru);
542+
int active = is_active_lru(lru);
543+
544+
VM_BUG_ON(PageActive(page));
545+
VM_BUG_ON(PageUnevictable(page));
546+
VM_BUG_ON(PageLRU(page));
547+
548+
SetPageLRU(page);
549+
if (active)
550+
SetPageActive(page);
551+
update_page_reclaim_stat(zone, page, file, active);
552+
add_page_to_lru_list(zone, page, lru);
553+
}
554+
519555
/*
520556
* Add the passed pages to the LRU, then drop the caller's refcount
521557
* on them. Reinitialises the caller's pagevec.
522558
*/
523559
void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
524560
{
525-
int i;
526-
struct zone *zone = NULL;
527-
528561
VM_BUG_ON(is_unevictable_lru(lru));
529562

530-
for (i = 0; i < pagevec_count(pvec); i++) {
531-
struct page *page = pvec->pages[i];
532-
struct zone *pagezone = page_zone(page);
533-
int file;
534-
int active;
535-
536-
if (pagezone != zone) {
537-
if (zone)
538-
spin_unlock_irq(&zone->lru_lock);
539-
zone = pagezone;
540-
spin_lock_irq(&zone->lru_lock);
541-
}
542-
VM_BUG_ON(PageActive(page));
543-
VM_BUG_ON(PageUnevictable(page));
544-
VM_BUG_ON(PageLRU(page));
545-
SetPageLRU(page);
546-
active = is_active_lru(lru);
547-
file = is_file_lru(lru);
548-
if (active)
549-
SetPageActive(page);
550-
update_page_reclaim_stat(zone, page, file, active);
551-
add_page_to_lru_list(zone, page, lru);
552-
}
553-
if (zone)
554-
spin_unlock_irq(&zone->lru_lock);
555-
release_pages(pvec->pages, pvec->nr, pvec->cold);
556-
pagevec_reinit(pvec);
563+
pagevec_lru_move_fn(pvec, ____pagevec_lru_add_fn, (void *)lru);
557564
}
558565

559566
EXPORT_SYMBOL(____pagevec_lru_add);

0 commit comments

Comments
 (0)