summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@google.com>2018-10-20 16:32:57 +0200
committerGreg Kroah-Hartman <gregkh@google.com>2018-10-20 16:32:57 +0200
commit3eb8e735195577476e7de568bd11c2832b47d1ad (patch)
tree4fafa380fd8c09d18e02e2d99a762d7beeab47d8 /arch
parent34e65b671bc7bd047847dd7e67392dc55585d5cf (diff)
parent24c2342b8e51ab3185e68470709904150a1e3ee0 (diff)
Merge 4.4.162 into android-4.4
Changes in 4.4.162 ASoC: wm8804: Add ACPI support ASoC: sigmadsp: safeload should not have lower byte limit selftests/efivarfs: add required kernel configs mfd: omap-usb-host: Fix dts probe of children sound: enable interrupt after dma buffer initialization stmmac: fix valid numbers of unicast filter entries net: macb: disable scatter-gather for macb on sama5d3 ARM: dts: at91: add new compatibility string for macb on sama5d3 drm/amdgpu: Fix SDMA HQD destroy error on gfx_v7 ext4: add corruption check in ext4_xattr_set_entry() mm/vmstat.c: fix outdated vmstat_text mach64: detect the dot clock divider correctly on sparc perf script python: Fix export-to-postgresql.py occasional failure i2c: i2c-scmi: fix for i2c_smbus_write_block_data xhci: Don't print a warning when setting link state for disabled ports jffs2: return -ERANGE when xattr buffer is too small bnxt_en: Fix TX timeout during netpoll. bonding: avoid possible dead-lock ip6_tunnel: be careful when accessing the inner header ip_tunnel: be careful when accessing the inner header ipv4: fix use-after-free in ip_cmsg_recv_dstaddr() net: ipv4: update fnhe_pmtu when first hop's MTU changes net/ipv6: Display all addresses in output of /proc/net/if_inet6 netlabel: check for IPV4MASK in addrinfo_get net/usb: cancel pending work when unbinding smsc75xx qlcnic: fix Tx descriptor corruption on 82xx devices team: Forbid enslaving team device to itself net: mvpp2: Extract the correct ethtype from the skb for tx csum offload net: systemport: Fix wake-up interrupt race during resume rtnl: limit IFLA_NUM_TX_QUEUES and IFLA_NUM_RX_QUEUES to 4096 KVM: x86: remove eager_fpu field of struct kvm_vcpu_arch x86/fpu: Remove use_eager_fpu() x86/fpu: Remove struct fpu::counter x86/fpu: Finish excising 'eagerfpu' media: af9035: prevent buffer overflow on write clocksource/drivers/ti-32k: Add CLOCK_SOURCE_SUSPEND_NONSTOP flag for non-am43 SoCs Input: atakbd - fix Atari keymap Input: atakbd - fix Atari CapsLock behaviour net/mlx4: Use cpumask_available for eq->affinity_mask powerpc/tm: Fix userspace r13 corruption powerpc/tm: Avoid possible userspace r1 corruption on reclaim ARC: build: Get rid of toolchain check usb: gadget: serial: fix oops when data rx'd after close Drivers: hv: utils: Invoke the poll function after handshake Drivers: hv: util: Pass the channel information during the init call Drivers: hv: kvp: fix IP Failover HV: properly delay KVP packets when negotiation is in progress Linux 4.4.162 Change-Id: Ib44f3b764a6005a2891b28315b3dbfa3f6cedcb5 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arc/Makefile14
-rw-r--r--arch/arm/boot/dts/sama5d3_emac.dtsi2
-rw-r--r--arch/powerpc/kernel/tm.S20
-rw-r--r--arch/x86/crypto/crc32c-intel_glue.c17
-rw-r--r--arch/x86/include/asm/cpufeatures.h1
-rw-r--r--arch/x86/include/asm/fpu/internal.h37
-rw-r--r--arch/x86/include/asm/fpu/types.h34
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kernel/fpu/core.c41
-rw-r--r--arch/x86/kernel/fpu/signal.c8
-rw-r--r--arch/x86/kvm/cpuid.c5
-rw-r--r--arch/x86/kvm/x86.c10
12 files changed, 32 insertions, 158 deletions
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index b9f7306412e5..9d64eacdd2aa 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -18,20 +18,6 @@ cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs
-is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || echo 0)
-
-ifdef CONFIG_ISA_ARCOMPACT
-ifeq ($(is_700), 0)
- $(error Toolchain not configured for ARCompact builds)
-endif
-endif
-
-ifdef CONFIG_ISA_ARCV2
-ifeq ($(is_700), 1)
- $(error Toolchain not configured for ARCv2 builds)
-endif
-endif
-
ifdef CONFIG_ARC_CURR_IN_REG
# For a global register defintion, make sure it gets passed to every file
# We had a customer reported bug where some code built in kernel was NOT using
diff --git a/arch/arm/boot/dts/sama5d3_emac.dtsi b/arch/arm/boot/dts/sama5d3_emac.dtsi
index 7cb235ef0fb6..6e9e1c2f9def 100644
--- a/arch/arm/boot/dts/sama5d3_emac.dtsi
+++ b/arch/arm/boot/dts/sama5d3_emac.dtsi
@@ -41,7 +41,7 @@
};
macb1: ethernet@f802c000 {
- compatible = "cdns,at91sam9260-macb", "cdns,macb";
+ compatible = "atmel,sama5d3-macb", "cdns,at91sam9260-macb", "cdns,macb";
reg = <0xf802c000 0x100>;
interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index b7019b559ddb..2d2860711e07 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -199,13 +199,27 @@ dont_backup_fp:
std r1, PACATMSCRATCH(r13)
ld r1, PACAR1(r13)
- /* Store the PPR in r11 and reset to decent value */
std r11, GPR11(r1) /* Temporary stash */
+ /*
+ * Move the saved user r1 to the kernel stack in case PACATMSCRATCH is
+ * clobbered by an exception once we turn on MSR_RI below.
+ */
+ ld r11, PACATMSCRATCH(r13)
+ std r11, GPR1(r1)
+
+ /*
+ * Store r13 away so we can free up the scratch SPR for the SLB fault
+ * handler (needed once we start accessing the thread_struct).
+ */
+ GET_SCRATCH0(r11)
+ std r11, GPR13(r1)
+
/* Reset MSR RI so we can take SLB faults again */
li r11, MSR_RI
mtmsrd r11, 1
+ /* Store the PPR in r11 and reset to decent value */
mfspr r11, SPRN_PPR
HMT_MEDIUM
@@ -230,11 +244,11 @@ dont_backup_fp:
SAVE_GPR(8, r7) /* user r8 */
SAVE_GPR(9, r7) /* user r9 */
SAVE_GPR(10, r7) /* user r10 */
- ld r3, PACATMSCRATCH(r13) /* user r1 */
+ ld r3, GPR1(r1) /* user r1 */
ld r4, GPR7(r1) /* user r7 */
ld r5, GPR11(r1) /* user r11 */
ld r6, GPR12(r1) /* user r12 */
- GET_SCRATCH0(8) /* user r13 */
+ ld r8, GPR13(r1) /* user r13 */
std r3, GPR1(r7)
std r4, GPR7(r7)
std r5, GPR11(r7)
diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
index 715399b14ed7..c194d5717ae5 100644
--- a/arch/x86/crypto/crc32c-intel_glue.c
+++ b/arch/x86/crypto/crc32c-intel_glue.c
@@ -48,21 +48,13 @@
#ifdef CONFIG_X86_64
/*
* use carryless multiply version of crc32c when buffer
- * size is >= 512 (when eager fpu is enabled) or
- * >= 1024 (when eager fpu is disabled) to account
+ * size is >= 512 to account
* for fpu state save/restore overhead.
*/
-#define CRC32C_PCL_BREAKEVEN_EAGERFPU 512
-#define CRC32C_PCL_BREAKEVEN_NOEAGERFPU 1024
+#define CRC32C_PCL_BREAKEVEN 512
asmlinkage unsigned int crc_pcl(const u8 *buffer, int len,
unsigned int crc_init);
-static int crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_EAGERFPU;
-#define set_pcl_breakeven_point() \
-do { \
- if (!use_eager_fpu()) \
- crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU; \
-} while (0)
#endif /* CONFIG_X86_64 */
static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t length)
@@ -185,7 +177,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
* use faster PCL version if datasize is large enough to
* overcome kernel fpu state save/restore overhead
*/
- if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
+ if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
kernel_fpu_begin();
*crcp = crc_pcl(data, len, *crcp);
kernel_fpu_end();
@@ -197,7 +189,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len,
u8 *out)
{
- if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
+ if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
kernel_fpu_begin();
*(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp));
kernel_fpu_end();
@@ -256,7 +248,6 @@ static int __init crc32c_intel_mod_init(void)
alg.update = crc32c_pcl_intel_update;
alg.finup = crc32c_pcl_intel_finup;
alg.digest = crc32c_pcl_intel_digest;
- set_pcl_breakeven_point();
}
#endif
return crypto_register_shash(&alg);
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index dd2269dcbc47..a5fa3195a230 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -104,7 +104,6 @@
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
-/* free, was #define X86_FEATURE_EAGER_FPU ( 3*32+29) * "eagerfpu" Non lazy FPU restore */
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index ec2aedb6f92a..16825dda18dc 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -57,11 +57,6 @@ extern u64 fpu__get_supported_xfeatures_mask(void);
/*
* FPU related CPU feature flag helper routines:
*/
-static __always_inline __pure bool use_eager_fpu(void)
-{
- return true;
-}
-
static __always_inline __pure bool use_xsaveopt(void)
{
return static_cpu_has(X86_FEATURE_XSAVEOPT);
@@ -498,24 +493,6 @@ static inline int fpu_want_lazy_restore(struct fpu *fpu, unsigned int cpu)
}
-/*
- * Wrap lazy FPU TS handling in a 'hw fpregs activation/deactivation'
- * idiom, which is then paired with the sw-flag (fpregs_active) later on:
- */
-
-static inline void __fpregs_activate_hw(void)
-{
- if (!use_eager_fpu())
- clts();
-}
-
-static inline void __fpregs_deactivate_hw(void)
-{
- if (!use_eager_fpu())
- stts();
-}
-
-/* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */
static inline void __fpregs_deactivate(struct fpu *fpu)
{
WARN_ON_FPU(!fpu->fpregs_active);
@@ -524,7 +501,6 @@ static inline void __fpregs_deactivate(struct fpu *fpu)
this_cpu_write(fpu_fpregs_owner_ctx, NULL);
}
-/* Must be paired with a 'clts' (fpregs_activate_hw()) before! */
static inline void __fpregs_activate(struct fpu *fpu)
{
WARN_ON_FPU(fpu->fpregs_active);
@@ -549,22 +525,17 @@ static inline int fpregs_active(void)
}
/*
- * Encapsulate the CR0.TS handling together with the
- * software flag.
- *
* These generally need preemption protection to work,
* do try to avoid using these on their own.
*/
static inline void fpregs_activate(struct fpu *fpu)
{
- __fpregs_activate_hw();
__fpregs_activate(fpu);
}
static inline void fpregs_deactivate(struct fpu *fpu)
{
__fpregs_deactivate(fpu);
- __fpregs_deactivate_hw();
}
/*
@@ -591,8 +562,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
* or if the past 5 consecutive context-switches used math.
*/
fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
- new_fpu->fpstate_active &&
- (use_eager_fpu() || new_fpu->counter > 5);
+ new_fpu->fpstate_active;
if (old_fpu->fpregs_active) {
if (!copy_fpregs_to_fpstate(old_fpu))
@@ -605,17 +575,12 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
/* Don't change CR0.TS if we just switch! */
if (fpu.preload) {
- new_fpu->counter++;
__fpregs_activate(new_fpu);
prefetch(&new_fpu->state);
- } else {
- __fpregs_deactivate_hw();
}
} else {
- old_fpu->counter = 0;
old_fpu->last_cpu = -1;
if (fpu.preload) {
- new_fpu->counter++;
if (fpu_want_lazy_restore(new_fpu, cpu))
fpu.preload = 0;
else
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 1c6f6ac52ad0..0d81c7d6fe96 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -303,17 +303,6 @@ struct fpu {
unsigned char fpregs_active;
/*
- * @counter:
- *
- * This counter contains the number of consecutive context switches
- * during which the FPU stays used. If this is over a threshold, the
- * lazy FPU restore logic becomes eager, to save the trap overhead.
- * This is an unsigned char so that after 256 iterations the counter
- * wraps and the context switch behavior turns lazy again; this is to
- * deal with bursty apps that only use the FPU for a short time:
- */
- unsigned char counter;
- /*
* @state:
*
* In-memory copy of all FPU registers that we save/restore
@@ -321,29 +310,6 @@ struct fpu {
* the registers in the FPU are more recent than this state
* copy. If the task context-switches away then they get
* saved here and represent the FPU state.
- *
- * After context switches there may be a (short) time period
- * during which the in-FPU hardware registers are unchanged
- * and still perfectly match this state, if the tasks
- * scheduled afterwards are not using the FPU.
- *
- * This is the 'lazy restore' window of optimization, which
- * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
- *
- * We detect whether a subsequent task uses the FPU via setting
- * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
- *
- * During this window, if the task gets scheduled again, we
- * might be able to skip having to do a restore from this
- * memory buffer to the hardware registers - at the cost of
- * incurring the overhead of #NM fault traps.
- *
- * Note that on modern CPUs that support the XSAVEOPT (or other
- * optimized XSAVE instructions), we don't use #NM traps anymore,
- * as the hardware can track whether FPU registers need saving
- * or not. On such CPUs we activate the non-lazy ('eagerfpu')
- * logic, which unconditionally saves/restores all FPU state
- * across context switches. (if FPU state exists.)
*/
union fpregs_state state;
/*
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 74fda1a453bd..3a37cdbdfbaa 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -439,7 +439,6 @@ struct kvm_vcpu_arch {
struct kvm_mmu_memory_cache mmu_page_header_cache;
struct fpu guest_fpu;
- bool eager_fpu;
u64 xcr0;
u64 guest_supported_xcr0;
u32 guest_xstate_size;
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 6aa0b519c851..b322325424bc 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -53,27 +53,9 @@ static bool kernel_fpu_disabled(void)
return this_cpu_read(in_kernel_fpu);
}
-/*
- * Were we in an interrupt that interrupted kernel mode?
- *
- * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
- * pair does nothing at all: the thread must not have fpu (so
- * that we don't try to save the FPU state), and TS must
- * be set (so that the clts/stts pair does nothing that is
- * visible in the interrupted kernel thread).
- *
- * Except for the eagerfpu case when we return true; in the likely case
- * the thread has FPU but we are not going to set/clear TS.
- */
static bool interrupted_kernel_fpu_idle(void)
{
- if (kernel_fpu_disabled())
- return false;
-
- if (use_eager_fpu())
- return true;
-
- return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
+ return !kernel_fpu_disabled();
}
/*
@@ -121,7 +103,6 @@ void __kernel_fpu_begin(void)
copy_fpregs_to_fpstate(fpu);
} else {
this_cpu_write(fpu_fpregs_owner_ctx, NULL);
- __fpregs_activate_hw();
}
}
EXPORT_SYMBOL(__kernel_fpu_begin);
@@ -132,8 +113,6 @@ void __kernel_fpu_end(void)
if (fpu->fpregs_active)
copy_kernel_to_fpregs(&fpu->state);
- else
- __fpregs_deactivate_hw();
kernel_fpu_enable();
}
@@ -194,10 +173,7 @@ void fpu__save(struct fpu *fpu)
preempt_disable();
if (fpu->fpregs_active) {
if (!copy_fpregs_to_fpstate(fpu)) {
- if (use_eager_fpu())
- copy_kernel_to_fpregs(&fpu->state);
- else
- fpregs_deactivate(fpu);
+ copy_kernel_to_fpregs(&fpu->state);
}
}
preempt_enable();
@@ -245,8 +221,7 @@ static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
* Don't let 'init optimized' areas of the XSAVE area
* leak into the child task:
*/
- if (use_eager_fpu())
- memset(&dst_fpu->state.xsave, 0, xstate_size);
+ memset(&dst_fpu->state.xsave, 0, xstate_size);
/*
* Save current FPU registers directly into the child
@@ -268,17 +243,13 @@ static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
if (!copy_fpregs_to_fpstate(dst_fpu)) {
memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
- if (use_eager_fpu())
- copy_kernel_to_fpregs(&src_fpu->state);
- else
- fpregs_deactivate(src_fpu);
+ copy_kernel_to_fpregs(&src_fpu->state);
}
preempt_enable();
}
int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
{
- dst_fpu->counter = 0;
dst_fpu->fpregs_active = 0;
dst_fpu->last_cpu = -1;
@@ -381,7 +352,6 @@ void fpu__restore(struct fpu *fpu)
kernel_fpu_disable();
fpregs_activate(fpu);
copy_kernel_to_fpregs(&fpu->state);
- fpu->counter++;
kernel_fpu_enable();
}
EXPORT_SYMBOL_GPL(fpu__restore);
@@ -398,7 +368,6 @@ EXPORT_SYMBOL_GPL(fpu__restore);
void fpu__drop(struct fpu *fpu)
{
preempt_disable();
- fpu->counter = 0;
if (fpu->fpregs_active) {
/* Ignore delayed exceptions from user space */
@@ -437,7 +406,7 @@ void fpu__clear(struct fpu *fpu)
{
WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
- if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) {
+ if (!static_cpu_has(X86_FEATURE_FPU)) {
/* FPU state will be reallocated lazily at the first use. */
fpu__drop(fpu);
} else {
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 3de077116218..9be3e79eb629 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -319,11 +319,9 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
}
fpu->fpstate_active = 1;
- if (use_eager_fpu()) {
- preempt_disable();
- fpu__restore(fpu);
- preempt_enable();
- }
+ preempt_disable();
+ fpu__restore(fpu);
+ preempt_enable();
return err;
} else {
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 83d6369c45f5..338d13d4fd2f 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
-#include <asm/fpu/internal.h> /* For use_eager_fpu. Ugh! */
#include <asm/user.h>
#include <asm/fpu/xstate.h>
#include "cpuid.h"
@@ -104,9 +103,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
- vcpu->arch.eager_fpu = use_eager_fpu();
- if (vcpu->arch.eager_fpu)
- kvm_x86_ops->fpu_activate(vcpu);
+ kvm_x86_ops->fpu_activate(vcpu);
/*
* The existing code assumes virtual address is 48-bit in the canonical
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 53d43d22a84b..e6ab034f0bc7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7319,16 +7319,6 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
__kernel_fpu_end();
++vcpu->stat.fpu_reload;
- /*
- * If using eager FPU mode, or if the guest is a frequent user
- * of the FPU, just leave the FPU active for next time.
- * Every 255 times fpu_counter rolls over to 0; a guest that uses
- * the FPU in bursts will revert to loading it on demand.
- */
- if (!vcpu->arch.eager_fpu) {
- if (++vcpu->fpu_counter < 5)
- kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
- }
trace_kvm_fpu(0);
}