Skip to content

Commit

Permalink
mm/migrate: migrate_vma() unmap page from vma while collecting pages
Browse files Browse the repository at this point in the history
Common case for migration of virtual address range is page are map only
once inside the vma in which migration is taking place.  Because we
already walk the CPU page table for that range we can directly do the
unmap there and setup special migration swap entry.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Jérôme Glisse <[email protected]>
Signed-off-by: Evgeny Baskakov <[email protected]>
Signed-off-by: John Hubbard <[email protected]>
Signed-off-by: Mark Hairgrove <[email protected]>
Signed-off-by: Sherry Cheung <[email protected]>
Signed-off-by: Subhash Gutti <[email protected]>
Cc: Aneesh Kumar <[email protected]>
Cc: Balbir Singh <[email protected]>
Cc: Benjamin Herrenschmidt <[email protected]>
Cc: Dan Williams <[email protected]>
Cc: David Nellans <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Kirill A. Shutemov <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Ross Zwisler <[email protected]>
Cc: Vladimir Davydov <[email protected]>
Cc: Bob Liu <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Jérôme Glisse authored and torvalds committed Sep 9, 2017
1 parent 8763cb4 commit 8c3328f
Showing 1 changed file with 112 additions and 29 deletions.
141 changes: 112 additions & 29 deletions mm/migrate.c
Original file line number Diff line number Diff line change
Expand Up @@ -2149,7 +2149,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
struct migrate_vma *migrate = walk->private;
struct vm_area_struct *vma = walk->vma;
struct mm_struct *mm = vma->vm_mm;
unsigned long addr = start;
unsigned long addr = start, unmapped = 0;
spinlock_t *ptl;
pte_t *ptep;

Expand Down Expand Up @@ -2194,9 +2194,12 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
return migrate_vma_collect_hole(start, end, walk);

ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
arch_enter_lazy_mmu_mode();

for (; addr < end; addr += PAGE_SIZE, ptep++) {
unsigned long mpfn, pfn;
struct page *page;
swp_entry_t entry;
pte_t pte;

pte = *ptep;
Expand Down Expand Up @@ -2228,11 +2231,44 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;

/*
* Optimize for the common case where page is only mapped once
* in one process. If we can lock the page, then we can safely
* set up a special migration page table entry now.
*/
if (trylock_page(page)) {
pte_t swp_pte;

mpfn |= MIGRATE_PFN_LOCKED;
ptep_get_and_clear(mm, addr, ptep);

/* Setup special migration page table entry */
entry = make_migration_entry(page, pte_write(pte));
swp_pte = swp_entry_to_pte(entry);
if (pte_soft_dirty(pte))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
set_pte_at(mm, addr, ptep, swp_pte);

/*
* This is like regular unmap: we remove the rmap and
* drop page refcount. Page won't be freed, as we took
* a reference just above.
*/
page_remove_rmap(page, false);
put_page(page);
unmapped++;
}

next:
migrate->src[migrate->npages++] = mpfn;
}
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(ptep - 1, ptl);

/* Only flush the TLB if we actually modified any entries */
if (unmapped)
flush_tlb_range(walk->vma, start, end);

return 0;
}

Expand All @@ -2257,7 +2293,13 @@ static void migrate_vma_collect(struct migrate_vma *migrate)
mm_walk.mm = migrate->vma->vm_mm;
mm_walk.private = migrate;

mmu_notifier_invalidate_range_start(mm_walk.mm,
migrate->start,
migrate->end);
walk_page_range(migrate->start, migrate->end, &mm_walk);
mmu_notifier_invalidate_range_end(mm_walk.mm,
migrate->start,
migrate->end);

migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
}
Expand Down Expand Up @@ -2305,32 +2347,37 @@ static bool migrate_vma_check_page(struct page *page)
static void migrate_vma_prepare(struct migrate_vma *migrate)
{
const unsigned long npages = migrate->npages;
const unsigned long start = migrate->start;
unsigned long addr, i, restore = 0;
bool allow_drain = true;
unsigned long i;

lru_add_drain();

for (i = 0; (i < npages) && migrate->cpages; i++) {
struct page *page = migrate_pfn_to_page(migrate->src[i]);
bool remap = true;

if (!page)
continue;

/*
* Because we are migrating several pages there can be
* a deadlock between 2 concurrent migration where each
* are waiting on each other page lock.
*
* Make migrate_vma() a best effort thing and backoff
* for any page we can not lock right away.
*/
if (!trylock_page(page)) {
migrate->src[i] = 0;
migrate->cpages--;
put_page(page);
continue;
if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
/*
* Because we are migrating several pages there can be
* a deadlock between 2 concurrent migration where each
* are waiting on each other page lock.
*
* Make migrate_vma() a best effort thing and backoff
* for any page we can not lock right away.
*/
if (!trylock_page(page)) {
migrate->src[i] = 0;
migrate->cpages--;
put_page(page);
continue;
}
remap = false;
migrate->src[i] |= MIGRATE_PFN_LOCKED;
}
migrate->src[i] |= MIGRATE_PFN_LOCKED;

if (!PageLRU(page) && allow_drain) {
/* Drain CPU's pagevec */
Expand All @@ -2339,21 +2386,50 @@ static void migrate_vma_prepare(struct migrate_vma *migrate)
}

if (isolate_lru_page(page)) {
migrate->src[i] = 0;
unlock_page(page);
migrate->cpages--;
put_page(page);
if (remap) {
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
migrate->cpages--;
restore++;
} else {
migrate->src[i] = 0;
unlock_page(page);
migrate->cpages--;
put_page(page);
}
continue;
}

if (!migrate_vma_check_page(page)) {
migrate->src[i] = 0;
unlock_page(page);
migrate->cpages--;
if (remap) {
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
migrate->cpages--;
restore++;

putback_lru_page(page);
get_page(page);
putback_lru_page(page);
} else {
migrate->src[i] = 0;
unlock_page(page);
migrate->cpages--;

putback_lru_page(page);
}
}
}

for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
struct page *page = migrate_pfn_to_page(migrate->src[i]);

if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
continue;

remove_migration_pte(page, migrate->vma, addr, page);

migrate->src[i] = 0;
unlock_page(page);
put_page(page);
restore--;
}
}

/*
Expand All @@ -2380,12 +2456,19 @@ static void migrate_vma_unmap(struct migrate_vma *migrate)
if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
continue;

try_to_unmap(page, flags);
if (page_mapped(page) || !migrate_vma_check_page(page)) {
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
migrate->cpages--;
restore++;
if (page_mapped(page)) {
try_to_unmap(page, flags);
if (page_mapped(page))
goto restore;
}

if (migrate_vma_check_page(page))
continue;

restore:
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
migrate->cpages--;
restore++;
}

for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
Expand Down

0 comments on commit 8c3328f

Please sign in to comment.