Skip to content

Commit

Permalink
Merge 4.14.97 into android-4.14-p
Browse files Browse the repository at this point in the history
Changes in 4.14.97
	amd-xgbe: Fix mdio access for non-zero ports and clause 45 PHYs
	net: bridge: Fix ethernet header pointer before check skb forwardable
	net: Fix usage of pskb_trim_rcsum
	net: phy: mdio_bus: add missing device_del() in mdiobus_register() error handling
	net_sched: refetch skb protocol for each filter
	openvswitch: Avoid OOB read when parsing flow nlattrs
	vhost: log dirty page correctly
	net: ipv4: Fix memory leak in network namespace dismantle
	tcp: allow MSG_ZEROCOPY transmission also in CLOSE_WAIT state
	ipfrag: really prevent allocation on netns exit
	mmc: Kconfig: Enable CONFIG_MMC_SDHCI_IO_ACCESSORS
	mei: me: add denverton innovation engine device IDs
	USB: serial: simple: add Motorola Tetra TPG2200 device id
	USB: serial: pl2303: add new PID to support PL2303TB
	ASoC: atom: fix a missing check of snd_pcm_lib_malloc_pages
	ASoC: rt5514-spi: Fix potential NULL pointer dereference
	ALSA: hda - Add mute LED support for HP ProBook 470 G5
	ARCv2: lib: memeset: fix doing prefetchw outside of buffer
	ARC: adjust memblock_reserve of kernel memory
	ARC: perf: map generic branches to correct hardware condition
	s390/early: improve machine detection
	s390/smp: fix CPU hotplug deadlock with CPU rescan
	char/mwave: fix potential Spectre v1 vulnerability
	staging: rtl8188eu: Add device code for D-Link DWA-121 rev B1
	tty: Handle problem if line discipline does not have receive_buf
	uart: Fix crash in uart_write and uart_put_char
	tty/n_hdlc: fix __might_sleep warning
	hv_balloon: avoid touching uninitialized struct page during tail onlining
	Drivers: hv: vmbus: Check for ring when getting debug info
	CIFS: Fix possible hang during async MTU reads and writes
	CIFS: Fix credits calculations for reads with errors
	CIFS: Fix credit calculation for encrypted reads with errors
	CIFS: Do not reconnect TCP session in add_credits()
	Input: xpad - add support for SteelSeries Stratus Duo
	compiler.h: enable builtin overflow checkers and add fallback code
	Input: uinput - fix undefined behavior in uinput_validate_absinfo()
	acpi/nfit: Block function zero DSMs
	acpi/nfit: Fix command-supported detection
	dm thin: fix passdown_double_checking_shared_status()
	dm crypt: fix parsing of extended IV arguments
	KVM: x86: Fix single-step debugging
	x86/pkeys: Properly copy pkey state at fork()
	x86/selftests/pkeys: Fork() to check for state being preserved
	x86/kaslr: Fix incorrect i8254 outb() parameters
	posix-cpu-timers: Unbreak timer rearming
	irqchip/gic-v3-its: Align PCI Multi-MSI allocation on their size
	can: dev: __can_get_echo_skb(): fix bogous check for non-existing skb by removing it
	can: bcm: check timer values before ktime conversion
	vt: invoke notifier on screen size change
	perf unwind: Unwind with libdw doesn't take symfs into account
	perf unwind: Take pgoff into account when reporting elf to libdwfl
	Revert "seccomp: add a selftest for get_metadata"
	net: stmmac: Use correct values in TQS/RQS fields
	KVM: x86: Fix a 4.14 backport regression related to userspace/guest FPU
	s390/smp: Fix calling smp_call_ipl_cpu() from ipl CPU
	nvmet-rdma: Add unlikely for response allocated check
	nvmet-rdma: fix null dereference under heavy load
	usb: dwc3: gadget: Clear req->needs_extra_trb flag on cleanup
	xhci: Fix leaking USB3 shared_hcd at xhci removal
	ptp_kvm: probe for kvm guest availability
	x86/pvclock: add setter for pvclock_pvti_cpu0_va
	x86/xen/time: set pvclock flags on xen_time_init()
	x86/xen/time: setup vcpu 0 time info page
	x86/xen/time: Output xen sched_clock time from 0
	xen: Fix x86 sched_clock() interface for xen
	f2fs: read page index before freeing
	btrfs: fix error handling in btrfs_dev_replace_start
	btrfs: dev-replace: go back to suspended state if target device is missing
	Linux 4.14.97

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
  • Loading branch information
gregkh committed Feb 1, 2019
2 parents 3e57d23 + e1e364b commit 0053642
Show file tree
Hide file tree
Showing 92 changed files with 1,072 additions and 327 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 96
SUBLEVEL = 97
EXTRAVERSION =
NAME = Petit Gorille

Expand Down
3 changes: 2 additions & 1 deletion arch/arc/include/asm/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = {

/* counts condition */
[PERF_COUNT_HW_INSTRUCTIONS] = "iall",
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */
/* All jump instructions that are taken */
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
[PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
#ifdef CONFIG_ISA_ARCV2
[PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
Expand Down
40 changes: 32 additions & 8 deletions arch/arc/lib/memset-archs.S
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,39 @@
*/

#include <linux/linkage.h>
#include <asm/cache.h>

#undef PREALLOC_NOT_AVAIL
/*
* The memset implementation below is optimized to use prefetchw and prealloc
* instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6)
* If you want to implement optimized memset for other possible L1 data cache
* line lengths (32B and 128B) you should rewrite code carefully checking
* we don't call any prefetchw/prealloc instruction for L1 cache lines which
* don't belongs to memset area.
*/

#if L1_CACHE_SHIFT == 6

.macro PREALLOC_INSTR reg, off
prealloc [\reg, \off]
.endm

.macro PREFETCHW_INSTR reg, off
prefetchw [\reg, \off]
.endm

#else

.macro PREALLOC_INSTR
.endm

.macro PREFETCHW_INSTR
.endm

#endif

ENTRY_CFI(memset)
prefetchw [r0] ; Prefetch the write location
PREFETCHW_INSTR r0, 0 ; Prefetch the first write location
mov.f 0, r2
;;; if size is zero
jz.d [blink]
Expand Down Expand Up @@ -48,11 +76,8 @@ ENTRY_CFI(memset)

lpnz @.Lset64bytes
;; LOOP START
#ifdef PREALLOC_NOT_AVAIL
prefetchw [r3, 64] ;Prefetch the next write location
#else
prealloc [r3, 64]
#endif
PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching

#ifdef CONFIG_ARC_HAS_LL64
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
Expand Down Expand Up @@ -85,7 +110,6 @@ ENTRY_CFI(memset)
lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
lpnz .Lset32bytes
;; LOOP START
prefetchw [r3, 32] ;Prefetch the next write location
#ifdef CONFIG_ARC_HAS_LL64
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
Expand Down
3 changes: 2 additions & 1 deletion arch/arc/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,8 @@ void __init setup_arch_memory(void)
*/

memblock_add_node(low_mem_start, low_mem_sz, 0);
memblock_reserve(low_mem_start, __pa(_end) - low_mem_start);
memblock_reserve(CONFIG_LINUX_LINK_BASE,
__pa(_end) - CONFIG_LINUX_LINK_BASE);

#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start)
Expand Down
4 changes: 2 additions & 2 deletions arch/s390/kernel/early.c
Original file line number Diff line number Diff line change
Expand Up @@ -226,10 +226,10 @@ static noinline __init void detect_machine_type(void)
if (stsi(vmms, 3, 2, 2) || !vmms->count)
return;

/* Running under KVM? If not we assume z/VM */
/* Detect known hypervisors */
if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
else
else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
}

Expand Down
2 changes: 2 additions & 0 deletions arch/s390/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -884,6 +884,8 @@ void __init setup_arch(char **cmdline_p)
pr_info("Linux is running under KVM in 64-bit mode\n");
else if (MACHINE_IS_LPAR)
pr_info("Linux is running natively in 64-bit mode\n");
else
pr_info("Linux is running as a guest in 64-bit mode\n");

/* Have one command line that is parsed and saved in /proc/cmdline */
/* boot_command_line has been already set up in early.c */
Expand Down
12 changes: 10 additions & 2 deletions arch/s390/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -387,9 +387,13 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
*/
void smp_call_ipl_cpu(void (*func)(void *), void *data)
{
struct lowcore *lc = pcpu_devices->lowcore;

if (pcpu_devices[0].address == stap())
lc = &S390_lowcore;

pcpu_delegate(&pcpu_devices[0], func, data,
pcpu_devices->lowcore->panic_stack -
PANIC_FRAME_OFFSET + PAGE_SIZE);
lc->panic_stack - PANIC_FRAME_OFFSET + PAGE_SIZE);
}

int smp_find_processor_id(u16 address)
Expand Down Expand Up @@ -1168,7 +1172,11 @@ static ssize_t __ref rescan_store(struct device *dev,
{
int rc;

rc = lock_device_hotplug_sysfs();
if (rc)
return rc;
rc = smp_rescan_cpus();
unlock_device_hotplug();
return rc ? rc : count;
}
static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/entry/vdso/vma.c
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ static int vvar_fault(const struct vm_special_mapping *sm,
__pa_symbol(&__vvar_page) >> PAGE_SHIFT);
} else if (sym_offset == image->sym_pvclock_page) {
struct pvclock_vsyscall_time_info *pvti =
pvclock_pvti_cpu0_va();
pvclock_get_pvti_cpu0_va();
if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
ret = vm_insert_pfn(
vma,
Expand Down
18 changes: 18 additions & 0 deletions arch/x86/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,10 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)

void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);

/*
* Init a new mm. Used on mm copies, like at fork()
* and on mm's that are brand-new, like at execve().
*/
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
Expand Down Expand Up @@ -232,8 +236,22 @@ do { \
} while (0)
#endif

static inline void arch_dup_pkeys(struct mm_struct *oldmm,
struct mm_struct *mm)
{
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
return;

/* Duplicate the oldmm pkey state in mm: */
mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
#endif
}

static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
{
arch_dup_pkeys(oldmm, mm);
paravirt_arch_dup_mmap(oldmm, mm);
return ldt_dup_context(oldmm, mm);
}
Expand Down
19 changes: 10 additions & 9 deletions arch/x86/include/asm/pvclock.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,6 @@
#include <linux/clocksource.h>
#include <asm/pvclock-abi.h>

#ifdef CONFIG_KVM_GUEST
extern struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void);
#else
static inline struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void)
{
return NULL;
}
#endif

/* some helper functions for xen and kvm pv clock sources */
u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src);
Expand Down Expand Up @@ -102,4 +93,14 @@ struct pvclock_vsyscall_time_info {

#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)

#ifdef CONFIG_PARAVIRT_CLOCK
void pvclock_set_pvti_cpu0_va(struct pvclock_vsyscall_time_info *pvti);
struct pvclock_vsyscall_time_info *pvclock_get_pvti_cpu0_va(void);
#else
static inline struct pvclock_vsyscall_time_info *pvclock_get_pvti_cpu0_va(void)
{
return NULL;
}
#endif

#endif /* _ASM_X86_PVCLOCK_H */
7 changes: 1 addition & 6 deletions arch/x86/kernel/kvmclock.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,12 +47,6 @@ early_param("no-kvmclock", parse_no_kvmclock);
static struct pvclock_vsyscall_time_info *hv_clock;
static struct pvclock_wall_clock wall_clock;

struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void)
{
return hv_clock;
}
EXPORT_SYMBOL_GPL(pvclock_pvti_cpu0_va);

/*
* The wallclock is the time of day when we booted. Since then, some time may
* have elapsed since the hypervisor wrote the data. So we try to account for
Expand Down Expand Up @@ -335,6 +329,7 @@ int __init kvm_setup_vsyscall_timeinfo(void)
return 1;
}

pvclock_set_pvti_cpu0_va(hv_clock);
put_cpu();

kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
Expand Down
14 changes: 14 additions & 0 deletions arch/x86/kernel/pvclock.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,10 @@

#include <asm/fixmap.h>
#include <asm/pvclock.h>
#include <asm/vgtod.h>

static u8 valid_flags __read_mostly = 0;
static struct pvclock_vsyscall_time_info *pvti_cpu0_va __read_mostly;

void pvclock_set_flags(u8 flags)
{
Expand Down Expand Up @@ -144,3 +146,15 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,

set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
}

void pvclock_set_pvti_cpu0_va(struct pvclock_vsyscall_time_info *pvti)
{
WARN_ON(vclock_was_used(VCLOCK_PVCLOCK));
pvti_cpu0_va = pvti;
}

struct pvclock_vsyscall_time_info *pvclock_get_pvti_cpu0_va(void)
{
return pvti_cpu0_va;
}
EXPORT_SYMBOL_GPL(pvclock_get_pvti_cpu0_va);
9 changes: 2 additions & 7 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -5923,8 +5923,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_rip_write(vcpu, ctxt->eip);
if (r == EMULATE_DONE &&
(ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
if (r == EMULATE_DONE && ctxt->tf)
kvm_vcpu_do_singlestep(vcpu, &r);
if (!ctxt->have_exception ||
exception_type(ctxt->exception.vector) == EXCPT_TRAP)
Expand Down Expand Up @@ -7423,14 +7422,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
}
}

kvm_load_guest_fpu(vcpu);

if (unlikely(vcpu->arch.complete_userspace_io)) {
int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
vcpu->arch.complete_userspace_io = NULL;
r = cui(vcpu);
if (r <= 0)
goto out_fpu;
goto out;
} else
WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);

Expand All @@ -7439,8 +7436,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
else
r = vcpu_run(vcpu);

out_fpu:
kvm_put_guest_fpu(vcpu);
out:
kvm_put_guest_fpu(vcpu);
post_kvm_run_save(vcpu);
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/lib/kaslr.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@ static inline u16 i8254(void)
u16 status, timer;

do {
outb(I8254_PORT_CONTROL,
I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0,
I8254_PORT_CONTROL);
status = inb(I8254_PORT_COUNTER0);
timer = inb(I8254_PORT_COUNTER0);
timer |= inb(I8254_PORT_COUNTER0) << 8;
Expand Down
4 changes: 4 additions & 0 deletions arch/x86/xen/suspend.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ static DEFINE_PER_CPU(u64, spec_ctrl);

void xen_arch_pre_suspend(void)
{
xen_save_time_memory_area();

if (xen_pv_domain())
xen_pv_pre_suspend();
}
Expand All @@ -32,6 +34,8 @@ void xen_arch_post_suspend(int cancelled)
xen_pv_post_suspend(cancelled);
else
xen_hvm_post_suspend(cancelled);

xen_restore_time_memory_area();
}

static void xen_vcpu_notify_restore(void *data)
Expand Down
Loading

0 comments on commit 0053642

Please sign in to comment.