-
Notifications
You must be signed in to change notification settings - Fork 3
/
0021-KVM-arm64-Add-interface-to-support-PV-qspinlock.patch
233 lines (213 loc) · 7.46 KB
/
0021-KVM-arm64-Add-interface-to-support-PV-qspinlock.patch
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
From e0e185e36ea8a9641dd4e5e410b49c423b0b93a0 Mon Sep 17 00:00:00 2001
From: AltArch Kernel <[email protected]>
Date: Wed, 23 Sep 2020 16:56:12 +0800
Subject: [PATCH 21/25] KVM: arm64: Add interface to support PV qspinlock
euleros inclusion
category: feature
bugzilla: NA
DTS: #231
CVE: NA
--------------------------------
As kernel has used this interface, so lets support it.
---
arch/arm64/Kconfig | 14 ++++++++++++++
arch/arm64/include/asm/paravirt.h | 29 +++++++++++++++++++++++++++++
arch/arm64/include/asm/qspinlock.h | 14 +++++++++++---
arch/arm64/include/asm/qspinlock_paravirt.h | 12 ++++++++++++
arch/arm64/include/asm/spinlock.h | 3 +++
arch/arm64/kernel/Makefile | 1 +
arch/arm64/kernel/alternative.c | 1 +
arch/arm64/kernel/paravirt-spinlocks.c | 5 +++++
arch/arm64/kernel/paravirt.c | 4 ++++
9 files changed, 80 insertions(+), 3 deletions(-)
create mode 100644 arch/arm64/include/asm/qspinlock_paravirt.h
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index e2b6212..5810e1c 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -698,6 +698,7 @@ config NODES_SHIFT
config NUMA_AWARE_SPINLOCKS
bool "Numa-aware spinlocks"
depends on NUMA && QUEUED_SPINLOCKS
+ depends on PARAVIRT_SPINLOCKS
default n
help
Introduce NUMA (Non Uniform Memory Access) awareness into
@@ -782,6 +783,19 @@ config PARAVIRT
under a hypervisor, potentially improving performance significantly
over full virtualization.
+config PARAVIRT_SPINLOCKS
+ bool "Paravirtualization layer for spinlocks"
+ depends on PARAVIRT && SMP
+ help
+ Paravirtualized spinlocks allow a pvops backend to replace the
+ spinlock implementation with something virtualization-friendly
+ (for example, block the virtual CPU rather than spinning).
+
+ It has a minimal impact on native kernels and gives a nice performance
+ benefit on paravirtualized KVM kernels.
+
+ If you are unsure how to answer this question, answer Y.
+
config PARAVIRT_TIME_ACCOUNTING
bool "Paravirtual steal time accounting"
select PARAVIRT
diff --git a/arch/arm64/include/asm/paravirt.h b/arch/arm64/include/asm/paravirt.h
index 62e9ba7..256e3f9 100644
--- a/arch/arm64/include/asm/paravirt.h
+++ b/arch/arm64/include/asm/paravirt.h
@@ -12,6 +12,12 @@ struct pv_time_ops {
};
struct pv_sched_ops {
+ void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
+ void (*queued_spin_unlock)(struct qspinlock *lock);
+
+ void (*wait)(u8 *ptr, u8 val);
+ void (*kick)(int cpu);
+
bool (*vcpu_is_preempted)(int cpu);
};
@@ -35,6 +41,29 @@ static inline bool pv_vcpu_is_preempted(int cpu)
return pv_ops.sched.vcpu_is_preempted(cpu);
}
+#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+bool pv_is_native_spin_unlock(void);
+static inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+{
+ return pv_ops.sched.queued_spin_lock_slowpath(lock, val);
+}
+
+static inline void pv_queued_spin_unlock(struct qspinlock *lock)
+{
+ return pv_ops.sched.queued_spin_unlock(lock);
+}
+
+static inline void pv_wait(u8 *ptr, u8 val)
+{
+ return pv_ops.sched.wait(ptr, val);
+}
+
+static inline void pv_kick(int cpu)
+{
+ return pv_ops.sched.kick(cpu);
+}
+#endif /* SMP && PARAVIRT_SPINLOCKS */
+
#else
#define pv_sched_init() do {} while (0)
diff --git a/arch/arm64/include/asm/qspinlock.h b/arch/arm64/include/asm/qspinlock.h
index 1d35bc7..23aff07 100644
--- a/arch/arm64/include/asm/qspinlock.h
+++ b/arch/arm64/include/asm/qspinlock.h
@@ -2,11 +2,19 @@
#ifndef _ASM_ARM64_QSPINLOCK_H
#define _ASM_ARM64_QSPINLOCK_H
-#ifdef CONFIG_NUMA_AWARE_SPINLOCKS
#include <asm-generic/qspinlock_types.h>
+#include <asm/paravirt.h>
+
+#define _Q_PENDING_LOOPS (1 << 9)
+#ifdef CONFIG_NUMA_AWARE_SPINLOCKS
extern void __cna_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+#endif
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __pv_init_lock_hash(void);
+extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
#define queued_spin_unlock queued_spin_unlock
/**
@@ -22,12 +30,12 @@ static inline void native_queued_spin_unlock(struct qspinlock *lock)
static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
{
- __cna_queued_spin_lock_slowpath(lock, val);
+ pv_queued_spin_lock_slowpath(lock, val);
}
static inline void queued_spin_unlock(struct qspinlock *lock)
{
- native_queued_spin_unlock(lock);
+ pv_queued_spin_unlock(lock);
}
#endif
diff --git a/arch/arm64/include/asm/qspinlock_paravirt.h b/arch/arm64/include/asm/qspinlock_paravirt.h
new file mode 100644
index 0000000..eba4be2
--- /dev/null
+++ b/arch/arm64/include/asm/qspinlock_paravirt.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright(c) 2019 Huawei Technologies Co., Ltd
+ * Author: Zengruan Ye <[email protected]>
+ */
+
+#ifndef __ASM_QSPINLOCK_PARAVIRT_H
+#define __ASM_QSPINLOCK_PARAVIRT_H
+
+extern void __pv_queued_spin_unlock(struct qspinlock *lock);
+
+#endif
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 6e8db57..4fb3eda 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -27,6 +27,9 @@
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
+/* How long a lock should spin before we consider blocking */
+#define SPIN_THRESHOLD (1 << 15)
+
/* See include/linux/spinlock.h */
#define smp_mb__after_spinlock() smp_mb()
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index e1a562a..661b070 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -47,6 +47,7 @@ arm64-obj-$(CONFIG_ACPI) += acpi.o
arm64-obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o
arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt-spinlocks.o
+arm64-obj-$(CONFIG_PARAVIRT_SPINLOCKS) += paravirt.o paravirt-spinlocks.o
arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 5c4bce4..d2a019b 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -26,6 +26,7 @@
#include <asm/cpufeature.h>
#include <asm/insn.h>
#include <asm/sections.h>
+#include <asm/paravirt.h>
#include <linux/stop_machine.h>
#define __ALT_PTR(a,f) ((void *)&(a)->f + (a)->f)
diff --git a/arch/arm64/kernel/paravirt-spinlocks.c b/arch/arm64/kernel/paravirt-spinlocks.c
index fd733eb..3cb43f9 100644
--- a/arch/arm64/kernel/paravirt-spinlocks.c
+++ b/arch/arm64/kernel/paravirt-spinlocks.c
@@ -11,3 +11,8 @@ __visible bool __native_vcpu_is_preempted(int cpu)
{
return false;
}
+
+bool pv_is_native_spin_unlock(void)
+{
+ return false;
+}
diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c
index 2370908..3edbcf8 100644
--- a/arch/arm64/kernel/paravirt.c
+++ b/arch/arm64/kernel/paravirt.c
@@ -30,6 +30,10 @@
struct static_key paravirt_steal_rq_enabled;
struct paravirt_patch_template pv_ops = {
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+ .sched.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
+ .sched.queued_spin_unlock = native_queued_spin_unlock,
+#endif
.sched.vcpu_is_preempted = __native_vcpu_is_preempted,
};
EXPORT_SYMBOL_GPL(pv_ops);
--
1.8.3.1