Skip to content
This repository has been archived by the owner on Apr 12, 2024. It is now read-only.

Commit

Permalink
Merge tag 'v5.4.244'
Browse files Browse the repository at this point in the history
This is the 5.4.244 stable release

Change-Id: I226881da5320a453c930b2338fae7c681c41c86c
Signed-off-by: Vaisakh Murali <[email protected]>
  • Loading branch information
mvaisakh committed May 30, 2023
2 parents 0c763fa + 51d0ac4 commit 7c4afb9
Show file tree
Hide file tree
Showing 198 changed files with 1,658 additions and 833 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 243
SUBLEVEL = 244
EXTRAVERSION =
NAME = Kleptomaniac Octopus

Expand Down
5 changes: 4 additions & 1 deletion arch/arm/mach-sa1100/jornada720_ssp.c
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/**
/*
* arch/arm/mac-sa1100/jornada720_ssp.c
*
* Copyright (C) 2006/2007 Kristoffer Ericson <[email protected]>
Expand All @@ -26,6 +26,7 @@ static unsigned long jornada_ssp_flags;

/**
* jornada_ssp_reverse - reverses input byte
* @byte: input byte to reverse
*
* we need to reverse all data we receive from the mcu due to its physical location
* returns : 01110111 -> 11101110
Expand All @@ -46,6 +47,7 @@ EXPORT_SYMBOL(jornada_ssp_reverse);

/**
* jornada_ssp_byte - waits for ready ssp bus and sends byte
* @byte: input byte to transmit
*
* waits for fifo buffer to clear and then transmits, if it doesn't then we will
* timeout after <timeout> rounds. Needs mcu running before its called.
Expand Down Expand Up @@ -77,6 +79,7 @@ EXPORT_SYMBOL(jornada_ssp_byte);

/**
* jornada_ssp_inout - decide if input is command or trading byte
* @byte: input byte to send (may be %TXDUMMY)
*
* returns : (jornada_ssp_byte(byte)) on success
* : %-ETIMEDOUT on timeout failure
Expand Down
14 changes: 10 additions & 4 deletions arch/m68k/kernel/signal.c
Original file line number Diff line number Diff line change
Expand Up @@ -883,11 +883,17 @@ static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *
}

static inline void __user *
get_sigframe(struct ksignal *ksig, size_t frame_size)
get_sigframe(struct ksignal *ksig, struct pt_regs *tregs, size_t frame_size)
{
unsigned long usp = sigsp(rdusp(), ksig);
unsigned long gap = 0;

return (void __user *)((usp - frame_size) & -8UL);
if (CPU_IS_020_OR_030 && tregs->format == 0xb) {
/* USP is unreliable so use worst-case value */
gap = 256;
}

return (void __user *)((usp - gap - frame_size) & -8UL);
}

static int setup_frame(struct ksignal *ksig, sigset_t *set,
Expand All @@ -905,7 +911,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
return -EFAULT;
}

frame = get_sigframe(ksig, sizeof(*frame) + fsize);
frame = get_sigframe(ksig, tregs, sizeof(*frame) + fsize);

if (fsize)
err |= copy_to_user (frame + 1, regs + 1, fsize);
Expand Down Expand Up @@ -976,7 +982,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
return -EFAULT;
}

frame = get_sigframe(ksig, sizeof(*frame));
frame = get_sigframe(ksig, tregs, sizeof(*frame));

if (fsize)
err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
Expand Down
5 changes: 5 additions & 0 deletions arch/parisc/include/asm/cacheflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,11 @@ extern void flush_dcache_page(struct page *page);

#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
#define flush_dcache_mmap_lock_irqsave(mapping, flags) \
xa_lock_irqsave(&mapping->i_pages, flags)
#define flush_dcache_mmap_unlock_irqrestore(mapping, flags) \
xa_unlock_irqrestore(&mapping->i_pages, flags)


#define flush_icache_page(vma,page) do { \
flush_kernel_dcache_page(page); \
Expand Down
5 changes: 3 additions & 2 deletions arch/parisc/kernel/cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -328,6 +328,7 @@ void flush_dcache_page(struct page *page)
struct vm_area_struct *mpnt;
unsigned long offset;
unsigned long addr, old_addr = 0;
unsigned long flags;
pgoff_t pgoff;

if (mapping && !mapping_mapped(mapping)) {
Expand All @@ -347,7 +348,7 @@ void flush_dcache_page(struct page *page)
* declared as MAP_PRIVATE or MAP_SHARED), so we only need
* to flush one address here for them all to become coherent */

flush_dcache_mmap_lock(mapping);
flush_dcache_mmap_lock_irqsave(mapping, flags);
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
addr = mpnt->vm_start + offset;
Expand All @@ -370,7 +371,7 @@ void flush_dcache_page(struct page *page)
old_addr = addr;
}
}
flush_dcache_mmap_unlock(mapping);
flush_dcache_mmap_unlock_irqrestore(mapping, flags);
}
EXPORT_SYMBOL(flush_dcache_page);

Expand Down
11 changes: 8 additions & 3 deletions arch/parisc/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -124,13 +124,18 @@ void machine_power_off(void)
/* It seems we have no way to power the system off via
* software. The user has to press the button himself. */

printk(KERN_EMERG "System shut down completed.\n"
"Please power this system off now.");
printk("Power off or press RETURN to reboot.\n");

/* prevent soft lockup/stalled CPU messages for endless loop. */
rcu_sysrq_start();
lockup_detector_soft_poweroff();
for (;;);
while (1) {
/* reboot if user presses RETURN key */
if (pdc_iodc_getc() == 13) {
printk("Rebooting...\n");
machine_restart(NULL);
}
}
}

void (*pm_power_off)(void);
Expand Down
4 changes: 2 additions & 2 deletions arch/parisc/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -305,8 +305,8 @@ static void handle_break(struct pt_regs *regs)
#endif

#ifdef CONFIG_KGDB
if (unlikely(iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
iir == PARISC_KGDB_BREAK_INSN)) {
if (unlikely((iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
iir == PARISC_KGDB_BREAK_INSN)) && !user_mode(regs)) {
kgdb_handle_exception(9, SIGTRAP, 0, regs);
return;
}
Expand Down
4 changes: 2 additions & 2 deletions arch/powerpc/mm/book3s64/radix_pgtable.c
Original file line number Diff line number Diff line change
Expand Up @@ -1018,8 +1018,8 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
pte_t entry, unsigned long address, int psize)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
_PAGE_RW | _PAGE_EXEC);
unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_SOFT_DIRTY |
_PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);

unsigned long change = pte_val(entry) ^ pte_val(*ptep);
/*
Expand Down
5 changes: 5 additions & 0 deletions arch/x86/include/asm/intel-family.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,11 @@
#define INTEL_FAM6_LAKEFIELD 0x8A
#define INTEL_FAM6_ALDERLAKE 0x97
#define INTEL_FAM6_ALDERLAKE_L 0x9A
#define INTEL_FAM6_ALDERLAKE_N 0xBE

#define INTEL_FAM6_RAPTORLAKE 0xB7
#define INTEL_FAM6_RAPTORLAKE_P 0xBA
#define INTEL_FAM6_RAPTORLAKE_S 0xBF

/* "Small Core" Processors (Atom) */

Expand Down
3 changes: 3 additions & 0 deletions arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -563,6 +563,7 @@ struct kvm_vcpu_arch {
u64 ia32_misc_enable_msr;
u64 smbase;
u64 smi_count;
bool at_instruction_boundary;
bool tpr_access_reporting;
u64 ia32_xss;
u64 microcode_version;
Expand Down Expand Up @@ -981,6 +982,8 @@ struct kvm_vcpu_stat {
u64 irq_injections;
u64 nmi_injections;
u64 req_event;
u64 preemption_reported;
u64 preemption_other;
};

struct x86_instruction_info;
Expand Down
5 changes: 3 additions & 2 deletions arch/x86/kernel/cpu/topology.c
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ int detect_extended_topology_early(struct cpuinfo_x86 *c)
* initial apic id, which also represents 32-bit extended x2apic id.
*/
c->initial_apicid = edx;
smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx));
#endif
return 0;
}
Expand Down Expand Up @@ -107,7 +107,8 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
*/
cpuid_count(leaf, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
c->initial_apicid = edx;
core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
core_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx));
core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
die_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
Expand Down
7 changes: 5 additions & 2 deletions arch/x86/kernel/dumpstack.c
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,6 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
printk("%sCall Trace:\n", log_lvl);

unwind_start(&state, task, regs, stack);
stack = stack ? : get_stack_pointer(task, regs);
regs = unwind_get_entry_regs(&state, &partial);

/*
Expand All @@ -190,9 +189,13 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
* - hardirq stack
* - entry stack
*/
for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
for (stack = stack ?: get_stack_pointer(task, regs);
stack;
stack = stack_info.next_sp) {
const char *stack_name;

stack = PTR_ALIGN(stack, sizeof(long));

if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
/*
* We weren't on a valid stack. It's possible that
Expand Down
3 changes: 2 additions & 1 deletion arch/x86/kvm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -6246,7 +6246,8 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,

static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
{

if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR)
vcpu->arch.at_instruction_boundary = true;
}

static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
Expand Down
1 change: 1 addition & 0 deletions arch/x86/kvm/vmx/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -6358,6 +6358,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
);

kvm_after_interrupt(vcpu);
vcpu->arch.at_instruction_boundary = true;
}
STACK_FRAME_NON_STANDARD(handle_external_interrupt_irqoff);

Expand Down
22 changes: 22 additions & 0 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -207,6 +207,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "nmi_injections", VCPU_STAT(nmi_injections) },
{ "req_event", VCPU_STAT(req_event) },
{ "l1d_flush", VCPU_STAT(l1d_flush) },
{ "preemption_reported", VCPU_STAT(preemption_reported) },
{ "preemption_other", VCPU_STAT(preemption_other) },
{ "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
{ "mmu_pte_write", VM_STAT(mmu_pte_write) },
{ "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
Expand Down Expand Up @@ -3562,6 +3564,19 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
struct kvm_host_map map;
struct kvm_steal_time *st;

/*
* The vCPU can be marked preempted if and only if the VM-Exit was on
* an instruction boundary and will not trigger guest emulation of any
* kind (see vcpu_run). Vendor specific code controls (conservatively)
* when this is true, for example allowing the vCPU to be marked
* preempted if and only if the VM-Exit was due to a host interrupt.
*/
if (!vcpu->arch.at_instruction_boundary) {
vcpu->stat.preemption_other++;
return;
}

vcpu->stat.preemption_reported++;
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return;

Expand Down Expand Up @@ -8446,6 +8461,13 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
vcpu->arch.l1tf_flush_l1d = true;

for (;;) {
/*
* If another guest vCPU requests a PV TLB flush in the middle
* of instruction emulation, the rest of the emulation could
* use a stale page translation. Assume that any code after
* this point can start executing an instruction.
*/
vcpu->arch.at_instruction_boundary = false;
if (kvm_vcpu_running(vcpu)) {
r = vcpu_enter_guest(vcpu);
} else {
Expand Down
25 changes: 25 additions & 0 deletions arch/x86/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <linux/sched/task.h>

#include <asm/set_memory.h>
#include <asm/cpu_device_id.h>
#include <asm/e820/api.h>
#include <asm/init.h>
#include <asm/page.h>
Expand Down Expand Up @@ -208,6 +209,24 @@ static void __init probe_page_size_mask(void)
}
}

#define INTEL_MATCH(_model) { .vendor = X86_VENDOR_INTEL, \
.family = 6, \
.model = _model, \
}
/*
* INVLPG may not properly flush Global entries
* on these CPUs when PCIDs are enabled.
*/
static const struct x86_cpu_id invlpg_miss_ids[] = {
INTEL_MATCH(INTEL_FAM6_ALDERLAKE ),
INTEL_MATCH(INTEL_FAM6_ALDERLAKE_L ),
INTEL_MATCH(INTEL_FAM6_ALDERLAKE_N ),
INTEL_MATCH(INTEL_FAM6_RAPTORLAKE ),
INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_P),
INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_S),
{}
};

static void setup_pcid(void)
{
if (!IS_ENABLED(CONFIG_X86_64))
Expand All @@ -216,6 +235,12 @@ static void setup_pcid(void)
if (!boot_cpu_has(X86_FEATURE_PCID))
return;

if (x86_match_cpu(invlpg_miss_ids)) {
pr_info("Incomplete global flushes, disabling PCID");
setup_clear_cpu_cap(X86_FEATURE_PCID);
return;
}

if (boot_cpu_has(X86_FEATURE_PGE)) {
/*
* This can't be cr4_set_bits_and_update_boot() -- the
Expand Down
3 changes: 3 additions & 0 deletions drivers/acpi/acpica/dbnames.c
Original file line number Diff line number Diff line change
Expand Up @@ -571,6 +571,9 @@ acpi_status acpi_db_display_objects(char *obj_type_arg, char *display_count_arg)
object_info =
ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_object_info));

if (!object_info)
return (AE_NO_MEMORY);

/* Walk the namespace from the root */

(void)acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
Expand Down
11 changes: 8 additions & 3 deletions drivers/acpi/acpica/dswstate.c
Original file line number Diff line number Diff line change
Expand Up @@ -576,9 +576,14 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state,
ACPI_FUNCTION_TRACE(ds_init_aml_walk);

walk_state->parser_state.aml =
walk_state->parser_state.aml_start = aml_start;
walk_state->parser_state.aml_end =
walk_state->parser_state.pkg_end = aml_start + aml_length;
walk_state->parser_state.aml_start =
walk_state->parser_state.aml_end =
walk_state->parser_state.pkg_end = aml_start;
/* Avoid undefined behavior: applying zero offset to null pointer */
if (aml_length != 0) {
walk_state->parser_state.aml_end += aml_length;
walk_state->parser_state.pkg_end += aml_length;
}

/* The next_op of the next_walk will be the beginning of the method */

Expand Down
1 change: 1 addition & 0 deletions drivers/acpi/ec.c
Original file line number Diff line number Diff line change
Expand Up @@ -1118,6 +1118,7 @@ static void acpi_ec_remove_query_handlers(struct acpi_ec *ec,
void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
{
acpi_ec_remove_query_handlers(ec, false, query_bit);
flush_workqueue(ec_query_wq);
}
EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);

Expand Down
7 changes: 7 additions & 0 deletions drivers/base/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -4172,6 +4172,13 @@ void device_set_of_node_from_dev(struct device *dev, const struct device *dev2)
}
EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);

void device_set_node(struct device *dev, struct fwnode_handle *fwnode)
{
dev->fwnode = fwnode;
dev->of_node = to_of_node(fwnode);
}
EXPORT_SYMBOL_GPL(device_set_node);

int device_match_name(struct device *dev, const void *name)
{
return sysfs_streq(dev_name(dev), name);
Expand Down
Loading

0 comments on commit 7c4afb9

Please sign in to comment.