-
Notifications
You must be signed in to change notification settings - Fork 3
/
0024-KVM-arm64-Enable-PV-qspinlock.patch
139 lines (128 loc) · 3.61 KB
/
0024-KVM-arm64-Enable-PV-qspinlock.patch
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
From bd001aae516223acbf64cfb941af6da62cf67158 Mon Sep 17 00:00:00 2001
From: AltArch Kernel <[email protected]>
Date: Wed, 23 Sep 2020 16:59:35 +0800
Subject: [PATCH 24/25] KVM: arm64: Enable PV qspinlock
euleros inclusion
category: feature
bugzilla: NA
DTS: #231
CVE: NA
--------------------------------
Linux kernel builds were run in KVM guest on HiSilicon Kunpeng920 system.
VM guests were set up with 32, 48 and 64 vCPUs on the 32 physical CPUs.
The kernel build (make -j<n>) was done in a VM with unpinned vCPUs 3
times with the best time selected and <n> is the number of vCPUs
available. The build times of the original linux 4.19.87, pvqspinlock
with various number of vCPUs are as follows:
Kernel 32 vCPUs 48 vCPUs 60 vCPUs
---------- -------- -------- --------
4.19.87 342.336s 602.048s 950.340s
pvqsinlock 341.366s 376.135s 437.037s
---
arch/arm64/include/asm/paravirt.h | 5 ++++
arch/arm64/kernel/paravirt.c | 60 +++++++++++++++++++++++++++++++++++++++
2 files changed, 65 insertions(+)
diff --git a/arch/arm64/include/asm/paravirt.h b/arch/arm64/include/asm/paravirt.h
index 256e3f9..10ec061 100644
--- a/arch/arm64/include/asm/paravirt.h
+++ b/arch/arm64/include/asm/paravirt.h
@@ -42,6 +42,7 @@ static inline bool pv_vcpu_is_preempted(int cpu)
}
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+void __init pv_qspinlock_init(void);
bool pv_is_native_spin_unlock(void);
static inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
{
@@ -62,6 +63,10 @@ static inline void pv_kick(int cpu)
{
return pv_ops.sched.kick(cpu);
}
+#else
+
+#define pv_qspinlock_init() do {} while (0)
+
#endif /* SMP && PARAVIRT_SPINLOCKS */
#else
diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c
index 3edbcf8..f2376b5 100644
--- a/arch/arm64/kernel/paravirt.c
+++ b/arch/arm64/kernel/paravirt.c
@@ -25,6 +25,7 @@
#include <linux/types.h>
#include <asm/paravirt.h>
#include <asm/pvsched-abi.h>
+#include <asm/qspinlock_paravirt.h>
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
@@ -120,6 +121,63 @@ static bool has_kvm_pvsched(void)
return (res.a0 == SMCCC_RET_SUCCESS);
}
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+static bool arm_pvspin = false;
+
+/* Kick a cpu by its cpuid. Used to wake up a halted vcpu */
+static void kvm_kick_cpu(int cpu)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_HV_PV_SCHED_KICK_CPU, cpu, &res);
+}
+
+static void kvm_wait(u8 *ptr, u8 val)
+{
+ unsigned long flags;
+
+ if (in_nmi())
+ return;
+
+ local_irq_save(flags);
+
+ if (READ_ONCE(*ptr) != val)
+ goto out;
+
+ dsb(sy);
+ wfi();
+
+out:
+ local_irq_restore(flags);
+}
+
+void __init pv_qspinlock_init(void)
+{
+ /* Don't use the PV qspinlock code if there is only 1 vCPU. */
+ if (num_possible_cpus() == 1)
+ arm_pvspin = false;
+
+ if (!arm_pvspin) {
+ pr_info("PV qspinlocks disabled\n");
+ return;
+ }
+ pr_info("PV qspinlocks enabled\n");
+
+ __pv_init_lock_hash();
+ pv_ops.sched.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
+ pv_ops.sched.queued_spin_unlock = __pv_queued_spin_unlock;
+ pv_ops.sched.wait = kvm_wait;
+ pv_ops.sched.kick = kvm_kick_cpu;
+}
+
+static __init int arm_parse_pvspin(char *arg)
+{
+ arm_pvspin = true;
+ return 0;
+}
+early_param("arm_pvspin", arm_parse_pvspin);
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+
int __init pv_sched_init(void)
{
int ret;
@@ -139,5 +197,7 @@ int __init pv_sched_init(void)
pv_ops.sched.vcpu_is_preempted = kvm_vcpu_is_preempted;
pr_info("using PV sched preempted\n");
+ pv_qspinlock_init();
+
return 0;
}
--
1.8.3.1