Skip to content

Commit

Permalink
Merge tag 'v4.14.76' into 4.14-main
Browse files Browse the repository at this point in the history
This is the 4.14.76 stable release
  • Loading branch information
frank-w committed Oct 15, 2018
2 parents 708a643 + 0b46ce3 commit e85d907
Show file tree
Hide file tree
Showing 56 changed files with 660 additions and 158 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 75
SUBLEVEL = 76
EXTRAVERSION =
NAME = Petit Gorille

Expand Down
20 changes: 20 additions & 0 deletions arch/arc/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -241,6 +241,26 @@ int copy_thread(unsigned long clone_flags,
task_thread_info(current)->thr_ptr;
}


/*
* setup usermode thread pointer #1:
* when child is picked by scheduler, __switch_to() uses @c_callee to
* populate usermode callee regs: this works (despite being in a kernel
* function) since special return path for child @ret_from_fork()
* ensures those regs are not clobbered all the way to RTIE to usermode
*/
c_callee->r25 = task_thread_info(p)->thr_ptr;

#ifdef CONFIG_ARC_CURR_IN_REG
/*
* setup usermode thread pointer #2:
* however for this special use of r25 in kernel, __switch_to() sets
* r25 for kernel needs and only in the final return path is usermode
* r25 setup, from pt_regs->user_r25. So set that up as well
*/
c_regs->user_r25 = c_callee->r25;
#endif

return 0;
}

Expand Down
1 change: 1 addition & 0 deletions arch/powerpc/include/asm/setup.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex);

extern unsigned int rtas_data;
extern unsigned long long memory_limit;
extern bool init_mem_is_free;
extern unsigned long klimit;
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);

Expand Down
45 changes: 28 additions & 17 deletions arch/powerpc/lib/code-patching.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,20 +22,28 @@
#include <asm/page.h>
#include <asm/code-patching.h>
#include <asm/setup.h>
#include <asm/sections.h>

static int __patch_instruction(unsigned int *addr, unsigned int instr)
static int __patch_instruction(unsigned int *exec_addr, unsigned int instr,
unsigned int *patch_addr)
{
int err;

__put_user_size(instr, addr, 4, err);
__put_user_size(instr, patch_addr, 4, err);
if (err)
return err;

asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" :: "r" (addr));
asm ("dcbst 0, %0; sync; icbi 0,%1; sync; isync" :: "r" (patch_addr),
"r" (exec_addr));

return 0;
}

static int raw_patch_instruction(unsigned int *addr, unsigned int instr)
{
return __patch_instruction(addr, instr, addr);
}

#ifdef CONFIG_STRICT_KERNEL_RWX
static DEFINE_PER_CPU(struct vm_struct *, text_poke_area);

Expand Down Expand Up @@ -135,10 +143,10 @@ static inline int unmap_patch_area(unsigned long addr)
return 0;
}

int patch_instruction(unsigned int *addr, unsigned int instr)
static int do_patch_instruction(unsigned int *addr, unsigned int instr)
{
int err;
unsigned int *dest = NULL;
unsigned int *patch_addr = NULL;
unsigned long flags;
unsigned long text_poke_addr;
unsigned long kaddr = (unsigned long)addr;
Expand All @@ -149,7 +157,7 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
* to allow patching. We just do the plain old patching
*/
if (!this_cpu_read(*PTRRELOC(&text_poke_area)))
return __patch_instruction(addr, instr);
return raw_patch_instruction(addr, instr);

local_irq_save(flags);

Expand All @@ -159,17 +167,10 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
goto out;
}

dest = (unsigned int *)(text_poke_addr) +
patch_addr = (unsigned int *)(text_poke_addr) +
((kaddr & ~PAGE_MASK) / sizeof(unsigned int));

/*
* We use __put_user_size so that we can handle faults while
* writing to dest and return err to handle faults gracefully
*/
__put_user_size(instr, dest, 4, err);
if (!err)
asm ("dcbst 0, %0; sync; icbi 0,%0; icbi 0,%1; sync; isync"
::"r" (dest), "r"(addr));
__patch_instruction(addr, instr, patch_addr);

err = unmap_patch_area(text_poke_addr);
if (err)
Expand All @@ -182,12 +183,22 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
}
#else /* !CONFIG_STRICT_KERNEL_RWX */

int patch_instruction(unsigned int *addr, unsigned int instr)
static int do_patch_instruction(unsigned int *addr, unsigned int instr)
{
return __patch_instruction(addr, instr);
return raw_patch_instruction(addr, instr);
}

#endif /* CONFIG_STRICT_KERNEL_RWX */

int patch_instruction(unsigned int *addr, unsigned int instr)
{
/* Make sure we aren't patching a freed init section */
if (init_mem_is_free && init_section_contains(addr, 4)) {
pr_debug("Skipping init section patching addr: 0x%px\n", addr);
return 0;
}
return do_patch_instruction(addr, instr);
}
NOKPROBE_SYMBOL(patch_instruction);

int patch_branch(unsigned int *addr, unsigned long target, int flags)
Expand Down
2 changes: 2 additions & 0 deletions arch/powerpc/mm/mem.c
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@
#endif

unsigned long long memory_limit;
bool init_mem_is_free;

#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
Expand Down Expand Up @@ -405,6 +406,7 @@ void free_initmem(void)
{
ppc_md.progress = ppc_printk_progress;
mark_initmem_nx();
init_mem_is_free = true;
free_initmem_default(POISON_FREE_INITMEM);
}

Expand Down
16 changes: 14 additions & 2 deletions arch/x86/entry/vdso/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,13 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
$(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
-fno-omit-frame-pointer -foptimize-sibling-calls \
-DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS)
-DDISABLE_BRANCH_PROFILING -DBUILD_VDSO

ifdef CONFIG_RETPOLINE
ifneq ($(RETPOLINE_VDSO_CFLAGS),)
CFL += $(RETPOLINE_VDSO_CFLAGS)
endif
endif

$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)

Expand Down Expand Up @@ -153,7 +159,13 @@ KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)

ifdef CONFIG_RETPOLINE
ifneq ($(RETPOLINE_VDSO_CFLAGS),)
KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
endif
endif

$(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)

$(obj)/vdso32.so.dbg: FORCE \
Expand Down
26 changes: 14 additions & 12 deletions arch/x86/entry/vdso/vclock_gettime.c
Original file line number Diff line number Diff line change
Expand Up @@ -43,17 +43,19 @@ extern u8 hvclock_page
notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
{
long ret;
asm("syscall" : "=a" (ret) :
"0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
asm ("syscall" : "=a" (ret), "=m" (*ts) :
"0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
"memory", "rcx", "r11");
return ret;
}

notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
{
long ret;

asm("syscall" : "=a" (ret) :
"0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
"0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
"memory", "rcx", "r11");
return ret;
}

Expand All @@ -64,13 +66,13 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
{
long ret;

asm(
asm (
"mov %%ebx, %%edx \n"
"mov %2, %%ebx \n"
"mov %[clock], %%ebx \n"
"call __kernel_vsyscall \n"
"mov %%edx, %%ebx \n"
: "=a" (ret)
: "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
: "=a" (ret), "=m" (*ts)
: "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
: "memory", "edx");
return ret;
}
Expand All @@ -79,13 +81,13 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
{
long ret;

asm(
asm (
"mov %%ebx, %%edx \n"
"mov %2, %%ebx \n"
"mov %[tv], %%ebx \n"
"call __kernel_vsyscall \n"
"mov %%edx, %%ebx \n"
: "=a" (ret)
: "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
: "=a" (ret), "=m" (*tv), "=m" (*tz)
: "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
: "memory", "edx");
return ret;
}
Expand Down
24 changes: 20 additions & 4 deletions arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -231,6 +231,17 @@ static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
*/
static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;

/*
* In some cases, we need to preserve the GFN of a non-present or reserved
* SPTE when we usurp the upper five bits of the physical address space to
* defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll
* shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask
* left into the reserved bits, i.e. the GFN in the SPTE will be split into
* high and low parts. This mask covers the lower bits of the GFN.
*/
static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;


static void mmu_spte_set(u64 *sptep, u64 spte);
static void mmu_free_roots(struct kvm_vcpu *vcpu);

Expand Down Expand Up @@ -338,9 +349,7 @@ static bool is_mmio_spte(u64 spte)

static gfn_t get_mmio_spte_gfn(u64 spte)
{
u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
shadow_nonpresent_or_rsvd_mask;
u64 gpa = spte & ~mask;
u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;

gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
& shadow_nonpresent_or_rsvd_mask;
Expand Down Expand Up @@ -404,6 +413,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);

static void kvm_mmu_reset_all_pte_masks(void)
{
u8 low_phys_bits;

shadow_user_mask = 0;
shadow_accessed_mask = 0;
shadow_dirty_mask = 0;
Expand All @@ -418,12 +429,17 @@ static void kvm_mmu_reset_all_pte_masks(void)
* appropriate mask to guard against L1TF attacks. Otherwise, it is
* assumed that the CPU is not vulnerable to L1TF.
*/
low_phys_bits = boot_cpu_data.x86_phys_bits;
if (boot_cpu_data.x86_phys_bits <
52 - shadow_nonpresent_or_rsvd_mask_len)
52 - shadow_nonpresent_or_rsvd_mask_len) {
shadow_nonpresent_or_rsvd_mask =
rsvd_bits(boot_cpu_data.x86_phys_bits -
shadow_nonpresent_or_rsvd_mask_len,
boot_cpu_data.x86_phys_bits - 1);
low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
}
shadow_nonpresent_or_rsvd_lower_gfn_mask =
GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
}

static int is_cpuid_PSE36(void)
Expand Down
4 changes: 2 additions & 2 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -1512,7 +1512,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
BUG_ON(!rq->q);
if (rq->mq_ctx != this_ctx) {
if (this_ctx) {
trace_block_unplug(this_q, depth, from_schedule);
trace_block_unplug(this_q, depth, !from_schedule);
blk_mq_sched_insert_requests(this_q, this_ctx,
&ctx_list,
from_schedule);
Expand All @@ -1532,7 +1532,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
* on 'ctx_list'. Do those.
*/
if (this_ctx) {
trace_block_unplug(this_q, depth, from_schedule);
trace_block_unplug(this_q, depth, !from_schedule);
blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
from_schedule);
}
Expand Down
5 changes: 4 additions & 1 deletion drivers/base/power/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -1462,8 +1462,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)

dpm_wait_for_subordinate(dev, async);

if (async_error)
if (async_error) {
dev->power.direct_complete = false;
goto Complete;
}

/*
* If a device configured to wake up the system from sleep states
Expand All @@ -1475,6 +1477,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
pm_wakeup_event(dev, 0);

if (pm_wakeup_pending()) {
dev->power.direct_complete = false;
async_error = -EBUSY;
goto Complete;
}
Expand Down
Loading

0 comments on commit e85d907

Please sign in to comment.