Skip to content

Commit

Permalink
Split IRQ-off and zone->lock while freeing pages from PCP list OpenNu…
Browse files Browse the repository at this point in the history
…voton#1

Split the IRQ-off section while accessing the PCP list from zone->lock
while freeing pages.
Introcude  isolate_pcp_pages() which separates the pages from the PCP
list onto a temporary list and then free the temporary list via
free_pcppages_bulk().

Signed-off-by: Peter Zijlstra <[email protected]>
Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
  • Loading branch information
Peter Zijlstra authored and jserv committed Nov 27, 2024
1 parent 8296b6a commit 5214c03
Showing 1 changed file with 51 additions and 30 deletions.
81 changes: 51 additions & 30 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1331,7 +1331,7 @@ static inline void prefetch_buddy(struct page *page)
}

/*
* Frees a number of pages from the PCP lists
* Frees a number of pages which have been collected from the pcp lists.
* Assumes all pages on list are in same zone, and of same order.
* count is the number of pages to free.
*
Expand All @@ -1342,14 +1342,40 @@ static inline void prefetch_buddy(struct page *page)
* pinned" detection logic.
*/
static void free_pcppages_bulk(struct zone *zone, int count,
struct per_cpu_pages *pcp)
struct list_head *head)
{
bool isolated_pageblocks;
struct page *page, *tmp;
unsigned long flags;

spin_lock_irqsave(&zone->lock, flags);
isolated_pageblocks = has_isolate_pageblock(zone);

/*
* Use safe version since after __free_one_page(),
* page->lru.next will not point to original list.
*/
list_for_each_entry_safe(page, tmp, head, lru) {
int mt = get_pcppage_migratetype(page);
/* MIGRATE_ISOLATE page should not go to pcplists */
VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
/* Pageblock could have been isolated meanwhile */
if (unlikely(isolated_pageblocks))
mt = get_pageblock_migratetype(page);

__free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE);
trace_mm_page_pcpu_drain(page, 0, mt);
}
spin_unlock_irqrestore(&zone->lock, flags);
}

static void isolate_pcp_pages(int count, struct per_cpu_pages *pcp,
struct list_head *dst)
{
int migratetype = 0;
int batch_free = 0;
int prefetch_nr = 0;
bool isolated_pageblocks;
struct page *page, *tmp;
LIST_HEAD(head);
struct page *page;

/*
* Ensure proper count is passed which otherwise would stuck in the
Expand Down Expand Up @@ -1386,7 +1412,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
if (bulkfree_pcp_prepare(page))
continue;

list_add_tail(&page->lru, &head);
list_add_tail(&page->lru, dst);

/*
* We are going to put the page back to the global
Expand All @@ -1401,26 +1427,6 @@ static void free_pcppages_bulk(struct zone *zone, int count,
prefetch_buddy(page);
} while (--count && --batch_free && !list_empty(list));
}

spin_lock(&zone->lock);
isolated_pageblocks = has_isolate_pageblock(zone);

/*
* Use safe version since after __free_one_page(),
* page->lru.next will not point to original list.
*/
list_for_each_entry_safe(page, tmp, &head, lru) {
int mt = get_pcppage_migratetype(page);
/* MIGRATE_ISOLATE page should not go to pcplists */
VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
/* Pageblock could have been isolated meanwhile */
if (unlikely(isolated_pageblocks))
mt = get_pageblock_migratetype(page);

__free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE);
trace_mm_page_pcpu_drain(page, 0, mt);
}
spin_unlock(&zone->lock);
}

static void free_one_page(struct zone *zone,
Expand Down Expand Up @@ -2938,13 +2944,18 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
unsigned long flags;
int to_drain, batch;
LIST_HEAD(dst);

local_irq_save(flags);
batch = READ_ONCE(pcp->batch);
to_drain = min(pcp->count, batch);
if (to_drain > 0)
free_pcppages_bulk(zone, to_drain, pcp);
isolate_pcp_pages(to_drain, pcp, &dst);

local_irq_restore(flags);

if (to_drain > 0)
free_pcppages_bulk(zone, to_drain, &dst);
}
#endif

Expand All @@ -2960,14 +2971,21 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
unsigned long flags;
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
LIST_HEAD(dst);
int count;

local_irq_save(flags);
pset = per_cpu_ptr(zone->pageset, cpu);

pcp = &pset->pcp;
if (pcp->count)
free_pcppages_bulk(zone, pcp->count, pcp);
count = pcp->count;
if (count)
isolate_pcp_pages(count, pcp, &dst);

local_irq_restore(flags);

if (count)
free_pcppages_bulk(zone, count, &dst);
}

/*
Expand Down Expand Up @@ -3196,7 +3214,10 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn)
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
free_pcppages_bulk(zone, batch, pcp);
LIST_HEAD(dst);

isolate_pcp_pages(batch, pcp, &dst);
free_pcppages_bulk(zone, batch, &dst);
}
}

Expand Down

0 comments on commit 5214c03

Please sign in to comment.