Skip to content

Commit

Permalink
x86/tlb: enable tlb flush range support for x86
Browse files Browse the repository at this point in the history
Not every tlb_flush execution moment is really need to evacuate all
TLB entries, like in munmap, just few 'invlpg' is better for whole
process performance, since it leaves most of TLB entries for later
accessing.

This patch also rewrite flush_tlb_range for 2 purposes:
1, split it out to get flush_blt_mm_range function.
2, clean up to reduce line breaking, thanks for Borislav's input.

My micro benchmark 'mummap' http://lkml.org/lkml/2012/5/17/59
show that the random memory access on other CPU has 0~50% speed up
on a 2P * 4cores * HT NHM EP while do 'munmap'.

Thanks Yongjie's testing on this patch:
-------------
I used Linux 3.4-RC6 w/ and w/o his patches as Xen dom0 and guest
kernel.
After running two benchmarks in Xen HVM guest, I found his patches
brought about 1%~3% performance gain in 'kernel build' and 'netperf'
testing, though the performance gain was not very stable in 'kernel
build' testing.

Some detailed testing results are below.

Testing Environment:
	Hardware: Romley-EP platform
	Xen version: latest upstream
	Linux kernel: 3.4-RC6
	Guest vCPU number: 8
	NIC: Intel 82599 (10GB bandwidth)

In 'kernel build' testing in guest:
	Command line  |  performance gain
    make -j 4      |    3.81%
    make -j 8      |    0.37%
    make -j 16     |    -0.52%

In 'netperf' testing, we tested TCP_STREAM with default socket size
16384 byte as large packet and 64 byte as small packet.
I used several clients to add networking pressure, then 'netperf' server
automatically generated several threads to response them.
I also used large-size packet and small-size packet in the testing.
	Packet size  |  Thread number | performance gain
	16384 bytes  |      4       |   0.02%
	16384 bytes  |      8       |   2.21%
	16384 bytes  |      16      |   2.04%
	64 bytes     |      4       |   1.07%
	64 bytes     |      8       |   3.31%
	64 bytes     |      16      |   0.71%

Signed-off-by: Alex Shi <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Tested-by: Ren, Yongjie <[email protected]>
Signed-off-by: H. Peter Anvin <[email protected]>
  • Loading branch information
Alex Shi authored and H. Peter Anvin committed Jun 28, 2012
1 parent 597e1c3 commit 611ae8e
Show file tree
Hide file tree
Showing 3 changed files with 68 additions and 70 deletions.
9 changes: 8 additions & 1 deletion arch/x86/include/asm/tlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,14 @@
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)

#define tlb_flush(tlb) \
{ \
if (tlb->fullmm == 0) \
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
else \
flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
}

#include <asm-generic/tlb.h>

Expand Down
17 changes: 14 additions & 3 deletions arch/x86/include/asm/tlbflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,13 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
__flush_tlb();
}

static inline void flush_tlb_mm_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end, unsigned long vmflag)
{
if (vma->vm_mm == current->active_mm)
__flush_tlb();
}

static inline void native_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
unsigned long start,
Expand All @@ -122,12 +129,16 @@ static inline void reset_lazy_tlbstate(void)

#define local_flush_tlb() __flush_tlb()

#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)

#define flush_tlb_range(vma, start, end) \
flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)

extern void flush_tlb_all(void);
extern void flush_tlb_current_task(void);
extern void flush_tlb_mm(struct mm_struct *);
extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
extern void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long vmflag);

#define flush_tlb() flush_tlb_current_task()

Expand Down
112 changes: 46 additions & 66 deletions arch/x86/mm/tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -301,23 +301,10 @@ void flush_tlb_current_task(void)
preempt_enable();
}

void flush_tlb_mm(struct mm_struct *mm)
{
preempt_disable();

if (current->active_mm == mm) {
if (current->mm)
local_flush_tlb();
else
leave_mm(smp_processor_id());
}
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);

preempt_enable();
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* It can find out the THP large page, or
* HUGETLB page in tlb_flush when THP disabled
*/
static inline unsigned long has_large_page(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
Expand All @@ -339,68 +326,61 @@ static inline unsigned long has_large_page(struct mm_struct *mm,
}
return 0;
}
#else
static inline unsigned long has_large_page(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
return 0;
}
#endif
void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
struct mm_struct *mm;

if (vma->vm_flags & VM_HUGETLB || tlb_flushall_shift == -1) {
flush_all:
flush_tlb_mm(vma->vm_mm);
return;
}
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long vmflag)
{
unsigned long addr;
unsigned act_entries, tlb_entries = 0;

preempt_disable();
mm = vma->vm_mm;
if (current->active_mm == mm) {
if (current->mm) {
unsigned long addr, vmflag = vma->vm_flags;
unsigned act_entries, tlb_entries = 0;
if (current->active_mm != mm)
goto flush_all;

if (vmflag & VM_EXEC)
tlb_entries = tlb_lli_4k[ENTRIES];
else
tlb_entries = tlb_lld_4k[ENTRIES];

act_entries = tlb_entries > mm->total_vm ?
mm->total_vm : tlb_entries;
if (!current->mm) {
leave_mm(smp_processor_id());
goto flush_all;
}

if ((end - start) >> PAGE_SHIFT >
act_entries >> tlb_flushall_shift)
local_flush_tlb();
else {
if (has_large_page(mm, start, end)) {
preempt_enable();
goto flush_all;
}
for (addr = start; addr < end;
addr += PAGE_SIZE)
__flush_tlb_single(addr);
if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1
|| vmflag == VM_HUGETLB) {
local_flush_tlb();
goto flush_all;
}

if (cpumask_any_but(mm_cpumask(mm),
smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm,
start, end);
preempt_enable();
return;
}
} else {
leave_mm(smp_processor_id());
/* In modern CPU, last level tlb used for both data/ins */
if (vmflag & VM_EXEC)
tlb_entries = tlb_lli_4k[ENTRIES];
else
tlb_entries = tlb_lld_4k[ENTRIES];
/* Assume all of TLB entries was occupied by this task */
act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm;

/* tlb_flushall_shift is on balance point, details in commit log */
if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift)
local_flush_tlb();
else {
if (has_large_page(mm, start, end)) {
local_flush_tlb();
goto flush_all;
}
/* flush range by one by one 'invlpg' */
for (addr = start; addr < end; addr += PAGE_SIZE)
__flush_tlb_single(addr);

if (cpumask_any_but(mm_cpumask(mm),
smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, start, end);
preempt_enable();
return;
}

flush_all:
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
preempt_enable();
}


void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
{
struct mm_struct *mm = vma->vm_mm;
Expand Down

0 comments on commit 611ae8e

Please sign in to comment.