Skip to content

Commit

Permalink
anon: Handle page faults under the page table lock
Browse files Browse the repository at this point in the history
Signed-off-by: Pedro Falcato <[email protected]>
  • Loading branch information
heatd committed Nov 29, 2024
1 parent a635188 commit d115671
Showing 1 changed file with 20 additions and 41 deletions.
61 changes: 20 additions & 41 deletions kernel/kernel/mm/anon.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,74 +20,53 @@ const struct vm_operations anon_vmops = {.fault = vm_anon_fault};

int vm_anon_fault(struct vm_pf_context *ctx)
{
struct vm_area_struct *region = ctx->entry;
struct vm_area_struct *vma = ctx->entry;
struct fault_info *info = ctx->info;
struct page *page = nullptr, *oldp = nullptr;
bool needs_invd = false;
struct page *page = nullptr;
pte_t *ptep;
struct spinlock *lock;

/* pte_present is done in do_wp_page, not here. */
CHECK(!pte_present(ctx->oldpte));
/* Permission checks have already been handled before .fault() */
if (!info->write)
{
/* Don't copy, just fault the zero page */
page = vm_get_zero_page();
/* Write protect the page and don't bother flushing the TLB */
ctx->page_rwx &= ~VM_WRITE;
ctx->page_rwx |= VM_NOFLUSH;
goto map;
}
else
{
bool copy_old = false;
if (pte_present(ctx->oldpte))
{
oldp = phys_to_page(pte_addr(ctx->oldpte));
DCHECK(info->write && !pte_write(ctx->oldpte));
if (oldp != vm_get_zero_page())
copy_old = true;
needs_invd = true;

if (copy_old && 0 && page_flag_set(oldp, PAGE_FLAG_ANON) && page_mapcount(oldp) == 1)
{
/* If this is an anon page *and* mapcount = 1, avoid allocating a new page. Since
* mapcount = 1 (AND *ANON*), no one else can grab a ref. */
/* TODO: We might be able to explore this - we may avoid the TLB shootdown and just
* change prots, but it would require significant code refactoring as-is. */
/* TODO: checking mapcount = 1 probably isn't this easy once we get swapping,
* because refs may come and go. Will we need the page lock? */
page = oldp;
page_ref(page);
goto map;
}

/* oldp's mapcount will be decremented in vm_map_page */
}

struct anon_vma *anon = anon_vma_prepare(ctx->entry);
if (!anon)
return -ENOMEM;

/* Allocate a brand-new (possibly zero-filled) page */
page = alloc_page((copy_old ? PAGE_ALLOC_NO_ZERO : 0) | GFP_KERNEL);
/* Allocate a brand-new, zero-filled page */
page = alloc_page(GFP_KERNEL);
if (!page)
goto enomem;
page_set_anon(page);
page->owner = (struct vm_object *) anon;
page->pageoff = ctx->vpage;
page_add_lru(page);
page_set_dirty(page);

if (copy_old)
copy_page_to_page(page_to_phys(page), page_to_phys(oldp));
goto map;
}

map:
if (!vm_map_page(region->vm_mm, ctx->vpage, (u64) page_to_phys(page), ctx->page_rwx,
ctx->entry))
if (pgtable_prealloc(vma->vm_mm, ctx->vpage) < 0)
goto enomem;
if (needs_invd)
vm_invalidate_range(ctx->vpage, 1);

ptep = ptep_get_locked(vma->vm_mm, ctx->vpage, &lock);
if (ptep->pte != ctx->oldpte.pte)
goto out;

increment_vm_stat(vma->vm_mm, resident_set_size, PAGE_SIZE);
page_add_mapcount(page);
set_pte(ptep, pte_mkpte((u64) page_to_phys(page),
calc_pgprot((u64) page_to_phys(page), ctx->page_rwx)));

out:
spin_unlock(lock);
/* The mapcount holds the only reference we need for anon pages... */
if (info->write)
page_unref(page);
Expand Down

0 comments on commit d115671

Please sign in to comment.