This is the 6.13.11 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmf3vL0ACgkQONu9yGCS
 aT4vmhAAgAbX1Y0bizH2DDaAvzIYED4OAFnO+S6F6/wu2zBBY7ICqnFxgc+hq/MM
 2+QCkFJLnhrPzYhFb+ZCTBwlYLXq4612W1869e47TcvFcny1Vuso1iUWQx8CjO/7
 ahDX9sAl/vpVaHun+A+ELUPDEhhBt6Jn14wm8xn4ppp6kd/28IYEyO2f7n+ix0/X
 R/ePl2v/WgfKiQMKOisIUayQgjNfnoltrmSPRJAMKbjHGYQYU7TIMIm0ImuBEk7G
 3crDsSwPnWHuLVaDLORKkbU8Z8kXc1h7sPRoPPxqb31skiDdrUBZ+pn9cGcI3PHX
 BDecw6PkXwWF0NX/9XvL7wU/IkkaxjROhNT1/Clm1MCLiPzYPsfin/ZmgYyVmEJm
 R31fp0OA1B/6VhZNCtlSPzL8YLo/dTSAWaqzFWvEixk1Mboq7khGlBTNrdMMf9yy
 Y9sZyKzqXjJcXQj/pY86aYGmZmBJzi/0ZXYf+J8F4gIPGFhP13fkGbGR8YUM2pfD
 n3aAye0/qN0Sjd+t6nEzJSdpgqSlr/zUl7uf1NKW46G3TGiFQ0/vEmlM7cezdyvI
 0vvBiSRF4jkf6oz/WZrGnu3Fi7MmJQebHxaBU3JQXyzLkoL+2h6Oie1FF+yBnh3/
 fdaW9z0qdxG0joqmVCMBKnY/6IOG/u6Xgn0yZAl3ze+wq5WcS+M=
 =eTs4
 -----END PGP SIGNATURE-----

Merge v6.13.11

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2025-04-10 14:42:07 +02:00
commit b56d937da3
505 changed files with 5297 additions and 3186 deletions

View File

@ -589,6 +589,8 @@ patternProperties:
description: GlobalTop Technology, Inc.
"^gmt,.*":
description: Global Mixed-mode Technology, Inc.
"^gocontroll,.*":
description: GOcontroll Modular Embedded Electronics B.V.
"^goldelico,.*":
description: Golden Delicious Computers GmbH & Co. KG
"^goodix,.*":

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 13
SUBLEVEL = 10
SUBLEVEL = 11
EXTRAVERSION =
NAME = Baby Opossum Posse

View File

@ -118,7 +118,7 @@ config ARM
select HAVE_KERNEL_XZ
select HAVE_KPROBES if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !CPU_V7M
select HAVE_KRETPROBES if HAVE_KPROBES
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if (LD_VERSION >= 23600 || LD_IS_LLD)
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if (LD_VERSION >= 23600 || LD_CAN_USE_KEEP_IN_OVERLAY)
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_OPTPROBES if !THUMB2_KERNEL

View File

@ -34,6 +34,12 @@
#define NOCROSSREFS
#endif
#ifdef CONFIG_LD_CAN_USE_KEEP_IN_OVERLAY
#define OVERLAY_KEEP(x) KEEP(x)
#else
#define OVERLAY_KEEP(x) x
#endif
/* Set start/end symbol names to the LMA for the section */
#define ARM_LMA(sym, section) \
sym##_start = LOADADDR(section); \
@ -125,13 +131,13 @@
__vectors_lma = .; \
OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) { \
.vectors { \
*(.vectors) \
OVERLAY_KEEP(*(.vectors)) \
} \
.vectors.bhb.loop8 { \
*(.vectors.bhb.loop8) \
OVERLAY_KEEP(*(.vectors.bhb.loop8)) \
} \
.vectors.bhb.bpiall { \
*(.vectors.bhb.bpiall) \
OVERLAY_KEEP(*(.vectors.bhb.bpiall)) \
} \
} \
ARM_LMA(__vectors, .vectors); \

View File

@ -21,4 +21,15 @@ static inline bool force_dma_unencrypted(struct device *dev)
return is_realm_world();
}
/*
* For Arm CCA guests, canonical addresses are "encrypted", so no changes
* required for dma_addr_encrypted().
* The unencrypted DMA buffers must be accessed via the unprotected IPA,
* "top IPA bit" set.
*/
#define dma_addr_unencrypted(x) ((x) | PROT_NS_SHARED)
/* Clear the "top" IPA bit while converting back */
#define dma_addr_canonical(x) ((x) & ~PROT_NS_SHARED)
#endif /* __ASM_MEM_ENCRYPT_H */

View File

@ -368,6 +368,8 @@ int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs)
return 1;
}
if (!handler)
return 1;
type = handler(addr, instr, regs);
if (type == TYPE_ERROR || type == TYPE_FAULT)

View File

@ -378,8 +378,8 @@ config CMDLINE_BOOTLOADER
config CMDLINE_EXTEND
bool "Use built-in to extend bootloader kernel arguments"
help
The command-line arguments provided during boot will be
appended to the built-in command line. This is useful in
The built-in command line will be appended to the command-
line arguments provided during boot. This is useful in
cases where the provided arguments are insufficient and
you don't want to or cannot modify them.

View File

@ -8,6 +8,8 @@
#define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define ARCH_DMA_MINALIGN (16)
#define __read_mostly __section(".data..read_mostly")
#endif /* _ASM_CACHE_H */

View File

@ -53,7 +53,7 @@ void spurious_interrupt(void);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
void arch_trigger_cpumask_backtrace(const struct cpumask *mask, int exclude_cpu);
#define MAX_IO_PICS 2
#define MAX_IO_PICS 8
#define NR_IRQS (64 + NR_VECTORS * (NR_CPUS + MAX_IO_PICS))
struct acpi_vector_group {

View File

@ -8,6 +8,7 @@
#include <asm/asm.h>
#include <asm/ptrace.h>
#include <asm/loongarch.h>
#include <asm/unwind_hints.h>
#include <linux/stringify.h>
enum stack_type {
@ -43,6 +44,7 @@ int get_stack_info(unsigned long stack, struct task_struct *task, struct stack_i
static __always_inline void prepare_frametrace(struct pt_regs *regs)
{
__asm__ __volatile__(
UNWIND_HINT_SAVE
/* Save $ra */
STORE_ONE_REG(1)
/* Use $ra to save PC */
@ -80,6 +82,7 @@ static __always_inline void prepare_frametrace(struct pt_regs *regs)
STORE_ONE_REG(29)
STORE_ONE_REG(30)
STORE_ONE_REG(31)
UNWIND_HINT_RESTORE
: "=m" (regs->csr_era)
: "r" (regs->regs)
: "memory");

View File

@ -23,6 +23,14 @@
UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_CALL
.endm
#endif /* __ASSEMBLY__ */
#else /* !__ASSEMBLY__ */
#define UNWIND_HINT_SAVE \
UNWIND_HINT(UNWIND_HINT_TYPE_SAVE, 0, 0, 0)
#define UNWIND_HINT_RESTORE \
UNWIND_HINT(UNWIND_HINT_TYPE_RESTORE, 0, 0, 0)
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_LOONGARCH_UNWIND_HINTS_H */

View File

@ -68,6 +68,8 @@ static int __init fdt_cpu_clk_init(void)
return -ENODEV;
clk = of_clk_get(np, 0);
of_node_put(np);
if (IS_ERR(clk))
return -ENODEV;

View File

@ -8,6 +8,7 @@
#include <linux/hw_breakpoint.h>
#include <linux/kdebug.h>
#include <linux/kgdb.h>
#include <linux/objtool.h>
#include <linux/processor.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
@ -224,13 +225,13 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
regs->csr_era = pc;
}
void arch_kgdb_breakpoint(void)
noinline void arch_kgdb_breakpoint(void)
{
__asm__ __volatile__ ( \
".globl kgdb_breakinst\n\t" \
"nop\n" \
"kgdb_breakinst:\tbreak 2\n\t"); /* BRK_KDB = 2 */
}
STACK_FRAME_NON_STANDARD(arch_kgdb_breakpoint);
/*
* Calls linux_debug_hook before the kernel dies. If KGDB is enabled,

View File

@ -142,6 +142,8 @@ static void build_prologue(struct jit_ctx *ctx)
*/
if (seen_tail_call(ctx) && seen_call(ctx))
move_reg(ctx, TCC_SAVED, REG_TCC);
else
emit_insn(ctx, nop);
ctx->stack_size = stack_adjust;
}
@ -905,7 +907,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
move_addr(ctx, t1, func_addr);
emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0);
move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
if (insn->src_reg != BPF_PSEUDO_CALL)
move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
break;
/* tail call */
@ -930,7 +935,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
{
const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
move_imm(ctx, dst, imm64, is32);
if (bpf_pseudo_func(insn))
move_addr(ctx, dst, imm64);
else
move_imm(ctx, dst, imm64, is32);
return 1;
}

View File

@ -27,6 +27,11 @@ struct jit_data {
struct jit_ctx ctx;
};
static inline void emit_nop(union loongarch_instruction *insn)
{
insn->word = INSN_NOP;
}
#define emit_insn(ctx, func, ...) \
do { \
if (ctx->image != NULL) { \

View File

@ -78,4 +78,4 @@ CONFIG_DEBUG_VM_PGTABLE=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_BDI_SWITCH=y
CONFIG_PPC_EARLY_DEBUG=y
CONFIG_GENERIC_PTDUMP=y
CONFIG_PTDUMP_DEBUGFS=y

View File

@ -56,3 +56,4 @@ $(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE
OBJECT_FILES_NON_STANDARD_aesp10-ppc.o := y
OBJECT_FILES_NON_STANDARD_ghashp10-ppc.o := y
OBJECT_FILES_NON_STANDARD_aesp8-ppc.o := y
OBJECT_FILES_NON_STANDARD_ghashp8-ppc.o := y

View File

@ -348,16 +348,13 @@ write_utlb:
rlwinm r10, r24, 0, 22, 27
cmpwi r10, PPC47x_TLB0_4K
bne 0f
li r10, 0x1000 /* r10 = 4k */
ANNOTATE_INTRA_FUNCTION_CALL
bl 1f
beq 0f
0:
/* Defaults to 256M */
lis r10, 0x1000
bcl 20,31,$+4
0: bcl 20,31,$+4
1: mflr r4
addi r4, r4, (2f-1b) /* virtual address of 2f */

View File

@ -156,6 +156,7 @@ static void vpa_pmu_del(struct perf_event *event, int flags)
}
static struct pmu vpa_pmu = {
.module = THIS_MODULE,
.task_ctx_nr = perf_sw_context,
.name = "vpa_pmu",
.event_init = vpa_pmu_event_init,

View File

@ -25,6 +25,7 @@ struct spu_gang *alloc_spu_gang(void)
mutex_init(&gang->aff_mutex);
INIT_LIST_HEAD(&gang->list);
INIT_LIST_HEAD(&gang->aff_list_head);
gang->alive = 1;
out:
return gang;

View File

@ -192,13 +192,32 @@ static int spufs_fill_dir(struct dentry *dir,
return -ENOMEM;
ret = spufs_new_file(dir->d_sb, dentry, files->ops,
files->mode & mode, files->size, ctx);
if (ret)
if (ret) {
dput(dentry);
return ret;
}
files++;
}
return 0;
}
static void unuse_gang(struct dentry *dir)
{
struct inode *inode = dir->d_inode;
struct spu_gang *gang = SPUFS_I(inode)->i_gang;
if (gang) {
bool dead;
inode_lock(inode); // exclusion with spufs_create_context()
dead = !--gang->alive;
inode_unlock(inode);
if (dead)
simple_recursive_removal(dir, NULL);
}
}
static int spufs_dir_close(struct inode *inode, struct file *file)
{
struct inode *parent;
@ -213,6 +232,7 @@ static int spufs_dir_close(struct inode *inode, struct file *file)
inode_unlock(parent);
WARN_ON(ret);
unuse_gang(dir->d_parent);
return dcache_dir_close(inode, file);
}
@ -405,7 +425,7 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
{
int ret;
int affinity;
struct spu_gang *gang;
struct spu_gang *gang = SPUFS_I(inode)->i_gang;
struct spu_context *neighbor;
struct path path = {.mnt = mnt, .dentry = dentry};
@ -420,11 +440,15 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader)
return -ENODEV;
gang = NULL;
if (gang) {
if (!gang->alive)
return -ENOENT;
gang->alive++;
}
neighbor = NULL;
affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU);
if (affinity) {
gang = SPUFS_I(inode)->i_gang;
if (!gang)
return -EINVAL;
mutex_lock(&gang->aff_mutex);
@ -436,8 +460,11 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
}
ret = spufs_mkdir(inode, dentry, flags, mode & 0777);
if (ret)
if (ret) {
if (neighbor)
put_spu_context(neighbor);
goto out_aff_unlock;
}
if (affinity) {
spufs_set_affinity(flags, SPUFS_I(d_inode(dentry))->i_ctx,
@ -453,6 +480,8 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
out_aff_unlock:
if (affinity)
mutex_unlock(&gang->aff_mutex);
if (ret && gang)
gang->alive--; // can't reach 0
return ret;
}
@ -482,6 +511,7 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode)
inode->i_fop = &simple_dir_operations;
d_instantiate(dentry, inode);
dget(dentry);
inc_nlink(dir);
inc_nlink(d_inode(dentry));
return ret;
@ -492,6 +522,21 @@ out:
return ret;
}
static int spufs_gang_close(struct inode *inode, struct file *file)
{
unuse_gang(file->f_path.dentry);
return dcache_dir_close(inode, file);
}
static const struct file_operations spufs_gang_fops = {
.open = dcache_dir_open,
.release = spufs_gang_close,
.llseek = dcache_dir_lseek,
.read = generic_read_dir,
.iterate_shared = dcache_readdir,
.fsync = noop_fsync,
};
static int spufs_gang_open(const struct path *path)
{
int ret;
@ -511,7 +556,7 @@ static int spufs_gang_open(const struct path *path)
return PTR_ERR(filp);
}
filp->f_op = &simple_dir_operations;
filp->f_op = &spufs_gang_fops;
fd_install(ret, filp);
return ret;
}
@ -526,10 +571,8 @@ static int spufs_create_gang(struct inode *inode,
ret = spufs_mkgang(inode, dentry, mode & 0777);
if (!ret) {
ret = spufs_gang_open(&path);
if (ret < 0) {
int err = simple_rmdir(inode, dentry);
WARN_ON(err);
}
if (ret < 0)
unuse_gang(dentry);
}
return ret;
}

View File

@ -151,6 +151,8 @@ struct spu_gang {
int aff_flags;
struct spu *aff_ref_spu;
atomic_t aff_sched_count;
int alive;
};
/* Flag bits for spu_gang aff_flags */

View File

@ -1,5 +1,9 @@
ifdef CONFIG_RELOCATABLE
KBUILD_CFLAGS += -fno-pie
# We can't use PIC/PIE when handling early-boot errata parsing, as the kernel
# doesn't have a GOT setup at that point. So instead just use medany: it's
# usually position-independent, so it should be good enough for the errata
# handling.
KBUILD_CFLAGS += -fno-pie -mcmodel=medany
endif
ifdef CONFIG_RISCV_ALTERNATIVE_EARLY

View File

@ -61,7 +61,7 @@ void __init riscv_user_isa_enable(void);
#define __RISCV_ISA_EXT_SUPERSET_VALIDATE(_name, _id, _sub_exts, _validate) \
_RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate)
bool check_unaligned_access_emulated_all_cpus(void);
bool __init check_unaligned_access_emulated_all_cpus(void);
#if defined(CONFIG_RISCV_SCALAR_MISALIGNED)
void check_unaligned_access_emulated(struct work_struct *work __always_unused);
void unaligned_emulation_finish(void);
@ -74,7 +74,7 @@ static inline bool unaligned_ctl_available(void)
}
#endif
bool check_vector_unaligned_access_emulated_all_cpus(void);
bool __init check_vector_unaligned_access_emulated_all_cpus(void);
#if defined(CONFIG_RISCV_VECTOR_MISALIGNED)
void check_vector_unaligned_access_emulated(struct work_struct *work __always_unused);
DECLARE_PER_CPU(long, vector_misaligned_access);

View File

@ -92,7 +92,7 @@ struct dyn_arch_ftrace {
#define make_call_t0(caller, callee, call) \
do { \
unsigned int offset = \
(unsigned long) callee - (unsigned long) caller; \
(unsigned long) (callee) - (unsigned long) (caller); \
call[0] = to_auipc_t0(offset); \
call[1] = to_jalr_t0(offset); \
} while (0)
@ -108,7 +108,7 @@ do { \
#define make_call_ra(caller, callee, call) \
do { \
unsigned int offset = \
(unsigned long) callee - (unsigned long) caller; \
(unsigned long) (callee) - (unsigned long) (caller); \
call[0] = to_auipc_ra(offset); \
call[1] = to_jalr_ra(offset); \
} while (0)

View File

@ -468,6 +468,9 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
case R_RISCV_ALIGN:
case R_RISCV_RELAX:
break;
case R_RISCV_64:
*(u64 *)loc = val;
break;
default:
pr_err("Unknown rela relocation: %d\n", r_type);
return -ENOEXEC;

View File

@ -605,16 +605,10 @@ void check_vector_unaligned_access_emulated(struct work_struct *work __always_un
kernel_vector_end();
}
bool check_vector_unaligned_access_emulated_all_cpus(void)
bool __init check_vector_unaligned_access_emulated_all_cpus(void)
{
int cpu;
if (!has_vector()) {
for_each_online_cpu(cpu)
per_cpu(vector_misaligned_access, cpu) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED;
return false;
}
schedule_on_each_cpu(check_vector_unaligned_access_emulated);
for_each_online_cpu(cpu)
@ -625,7 +619,7 @@ bool check_vector_unaligned_access_emulated_all_cpus(void)
return true;
}
#else
bool check_vector_unaligned_access_emulated_all_cpus(void)
bool __init check_vector_unaligned_access_emulated_all_cpus(void)
{
return false;
}
@ -659,7 +653,7 @@ void check_unaligned_access_emulated(struct work_struct *work __always_unused)
}
}
bool check_unaligned_access_emulated_all_cpus(void)
bool __init check_unaligned_access_emulated_all_cpus(void)
{
int cpu;
@ -684,7 +678,7 @@ bool unaligned_ctl_available(void)
return unaligned_ctl;
}
#else
bool check_unaligned_access_emulated_all_cpus(void)
bool __init check_unaligned_access_emulated_all_cpus(void)
{
return false;
}

View File

@ -121,7 +121,7 @@ static int check_unaligned_access(void *param)
return 0;
}
static void check_unaligned_access_nonboot_cpu(void *param)
static void __init check_unaligned_access_nonboot_cpu(void *param)
{
unsigned int cpu = smp_processor_id();
struct page **pages = param;
@ -175,7 +175,7 @@ static void set_unaligned_access_static_branches(void)
modify_unaligned_access_branches(&fast_and_online, num_online_cpus());
}
static int lock_and_set_unaligned_access_static_branch(void)
static int __init lock_and_set_unaligned_access_static_branch(void)
{
cpus_read_lock();
set_unaligned_access_static_branches();
@ -218,7 +218,7 @@ static int riscv_offline_cpu(unsigned int cpu)
}
/* Measure unaligned access speed on all CPUs present at boot in parallel. */
static int check_unaligned_access_speed_all_cpus(void)
static int __init check_unaligned_access_speed_all_cpus(void)
{
unsigned int cpu;
unsigned int cpu_count = num_possible_cpus();
@ -264,7 +264,7 @@ out:
return 0;
}
#else /* CONFIG_RISCV_PROBE_UNALIGNED_ACCESS */
static int check_unaligned_access_speed_all_cpus(void)
static int __init check_unaligned_access_speed_all_cpus(void)
{
return 0;
}
@ -349,7 +349,7 @@ static void check_vector_unaligned_access(struct work_struct *work __always_unus
pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned vector access speed\n",
cpu);
return;
goto free;
}
if (word_cycles < byte_cycles)
@ -363,14 +363,19 @@ static void check_vector_unaligned_access(struct work_struct *work __always_unus
(speed == RISCV_HWPROBE_MISALIGNED_VECTOR_FAST) ? "fast" : "slow");
per_cpu(vector_misaligned_access, cpu) = speed;
free:
__free_pages(page, MISALIGNED_BUFFER_ORDER);
}
static int riscv_online_cpu_vec(unsigned int cpu)
{
if (!has_vector())
if (!has_vector()) {
per_cpu(vector_misaligned_access, cpu) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED;
return 0;
}
if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED)
if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN)
return 0;
check_vector_unaligned_access_emulated(NULL);
@ -379,7 +384,7 @@ static int riscv_online_cpu_vec(unsigned int cpu)
}
/* Measure unaligned access speed on all CPUs present at boot in parallel. */
static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
static int __init vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
{
schedule_on_each_cpu(check_vector_unaligned_access);
@ -393,21 +398,24 @@ static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unuse
return 0;
}
#else /* CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS */
static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
static int __init vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
{
return 0;
}
#endif
static int check_unaligned_access_all_cpus(void)
static int __init check_unaligned_access_all_cpus(void)
{
bool all_cpus_emulated, all_cpus_vec_unsupported;
bool all_cpus_emulated;
int cpu;
all_cpus_emulated = check_unaligned_access_emulated_all_cpus();
all_cpus_vec_unsupported = check_vector_unaligned_access_emulated_all_cpus();
if (!all_cpus_vec_unsupported &&
IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) {
if (!has_vector()) {
for_each_online_cpu(cpu)
per_cpu(vector_misaligned_access, cpu) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED;
} else if (!check_vector_unaligned_access_emulated_all_cpus() &&
IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) {
kthread_run(vec_check_unaligned_access_speed_all_cpus,
NULL, "vec_check_unaligned_access_speed_all_cpus");
}

View File

@ -11,7 +11,7 @@
#define WORD_SEW CONCATENATE(e, WORD_EEW)
#define VEC_L CONCATENATE(vle, WORD_EEW).v
#define VEC_S CONCATENATE(vle, WORD_EEW).v
#define VEC_S CONCATENATE(vse, WORD_EEW).v
/* void __riscv_copy_vec_words_unaligned(void *, const void *, size_t) */
/* Performs a memcpy without aligning buffers, using word loads and stores. */

View File

@ -172,8 +172,8 @@ module_init(riscv_kvm_init);
static void __exit riscv_kvm_exit(void)
{
kvm_riscv_teardown();
kvm_exit();
kvm_riscv_teardown();
}
module_exit(riscv_kvm_exit);

View File

@ -666,6 +666,7 @@ int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_ba
.type = etype,
.size = sizeof(struct perf_event_attr),
.pinned = true,
.disabled = true,
/*
* It should never reach here if the platform doesn't support the sscofpmf
* extension as mode filtering won't work without it.

View File

@ -148,22 +148,25 @@ unsigned long hugetlb_mask_last_page(struct hstate *h)
static pte_t get_clear_contig(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep,
unsigned long pte_num)
unsigned long ncontig)
{
pte_t orig_pte = ptep_get(ptep);
unsigned long i;
pte_t pte, tmp_pte;
bool present;
for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++) {
pte_t pte = ptep_get_and_clear(mm, addr, ptep);
if (pte_dirty(pte))
orig_pte = pte_mkdirty(orig_pte);
if (pte_young(pte))
orig_pte = pte_mkyoung(orig_pte);
pte = ptep_get_and_clear(mm, addr, ptep);
present = pte_present(pte);
while (--ncontig) {
ptep++;
addr += PAGE_SIZE;
tmp_pte = ptep_get_and_clear(mm, addr, ptep);
if (present) {
if (pte_dirty(tmp_pte))
pte = pte_mkdirty(pte);
if (pte_young(tmp_pte))
pte = pte_mkyoung(pte);
}
}
return orig_pte;
return pte;
}
static pte_t get_clear_contig_flush(struct mm_struct *mm,
@ -212,6 +215,26 @@ static void clear_flush(struct mm_struct *mm,
flush_tlb_range(&vma, saddr, addr);
}
static int num_contig_ptes_from_size(unsigned long sz, size_t *pgsize)
{
unsigned long hugepage_shift;
if (sz >= PGDIR_SIZE)
hugepage_shift = PGDIR_SHIFT;
else if (sz >= P4D_SIZE)
hugepage_shift = P4D_SHIFT;
else if (sz >= PUD_SIZE)
hugepage_shift = PUD_SHIFT;
else if (sz >= PMD_SIZE)
hugepage_shift = PMD_SHIFT;
else
hugepage_shift = PAGE_SHIFT;
*pgsize = 1 << hugepage_shift;
return sz >> hugepage_shift;
}
/*
* When dealing with NAPOT mappings, the privileged specification indicates that
* "if an update needs to be made, the OS generally should first mark all of the
@ -226,22 +249,10 @@ void set_huge_pte_at(struct mm_struct *mm,
pte_t pte,
unsigned long sz)
{
unsigned long hugepage_shift, pgsize;
size_t pgsize;
int i, pte_num;
if (sz >= PGDIR_SIZE)
hugepage_shift = PGDIR_SHIFT;
else if (sz >= P4D_SIZE)
hugepage_shift = P4D_SHIFT;
else if (sz >= PUD_SIZE)
hugepage_shift = PUD_SHIFT;
else if (sz >= PMD_SIZE)
hugepage_shift = PMD_SHIFT;
else
hugepage_shift = PAGE_SHIFT;
pte_num = sz >> hugepage_shift;
pgsize = 1 << hugepage_shift;
pte_num = num_contig_ptes_from_size(sz, &pgsize);
if (!pte_present(pte)) {
for (i = 0; i < pte_num; i++, ptep++, addr += pgsize)
@ -295,13 +306,14 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep, unsigned long sz)
{
size_t pgsize;
pte_t orig_pte = ptep_get(ptep);
int pte_num;
if (!pte_napot(orig_pte))
return ptep_get_and_clear(mm, addr, ptep);
pte_num = napot_pte_num(napot_cont_order(orig_pte));
pte_num = num_contig_ptes_from_size(sz, &pgsize);
return get_clear_contig(mm, addr, ptep, pte_num);
}
@ -351,6 +363,7 @@ void huge_pte_clear(struct mm_struct *mm,
pte_t *ptep,
unsigned long sz)
{
size_t pgsize;
pte_t pte = ptep_get(ptep);
int i, pte_num;
@ -359,8 +372,9 @@ void huge_pte_clear(struct mm_struct *mm,
return;
}
pte_num = napot_pte_num(napot_cont_order(pte));
for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++)
pte_num = num_contig_ptes_from_size(sz, &pgsize);
for (i = 0; i < pte_num; i++, addr += pgsize, ptep++)
pte_clear(mm, addr, ptep);
}

View File

@ -12,6 +12,7 @@
.text
.align 2
SYM_CODE_START(purgatory_start)
lla sp, .Lstack

View File

@ -34,8 +34,6 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
#define ioremap_wc(addr, size) \
ioremap_prot((addr), (size), pgprot_val(pgprot_writecombine(PAGE_KERNEL)))
#define ioremap_wt(addr, size) \
ioremap_prot((addr), (size), pgprot_val(pgprot_writethrough(PAGE_KERNEL)))
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{

View File

@ -1391,9 +1391,6 @@ void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
#define pgprot_writecombine pgprot_writecombine
pgprot_t pgprot_writecombine(pgprot_t prot);
#define pgprot_writethrough pgprot_writethrough
pgprot_t pgprot_writethrough(pgprot_t prot);
#define PFN_PTE_SHIFT PAGE_SHIFT
/*

View File

@ -481,7 +481,7 @@ SYM_CODE_START(mcck_int_handler)
clgrjl %r9,%r14, 4f
larl %r14,.Lsie_leave
clgrjhe %r9,%r14, 4f
lg %r10,__LC_PCPU
lg %r10,__LC_PCPU(%r13)
oi __PCPU_FLAGS+7(%r10), _CIF_MCCK_GUEST
4: BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
SIEEXIT __SF_SIE_CONTROL(%r15),%r13

View File

@ -34,16 +34,6 @@ pgprot_t pgprot_writecombine(pgprot_t prot)
}
EXPORT_SYMBOL_GPL(pgprot_writecombine);
pgprot_t pgprot_writethrough(pgprot_t prot)
{
/*
* mio_wb_bit_mask may be set on a different CPU, but it is only set
* once at init and only read afterwards.
*/
return __pgprot(pgprot_val(prot) & ~mio_wb_bit_mask);
}
EXPORT_SYMBOL_GPL(pgprot_writethrough);
static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, int nodat)
{

View File

@ -213,7 +213,6 @@ extern int os_protect_memory(void *addr, unsigned long len,
extern int os_unmap_memory(void *addr, int len);
extern int os_drop_memory(void *addr, int length);
extern int can_drop_memory(void);
extern int os_mincore(void *addr, unsigned long len);
void os_set_pdeathsig(void);

View File

@ -17,7 +17,7 @@ extra-y := vmlinux.lds
obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \
physmem.o process.o ptrace.o reboot.o sigio.o \
signal.o sysrq.o time.o tlb.o trap.o \
um_arch.o umid.o maccess.o kmsg_dump.o capflags.o skas/
um_arch.o umid.o kmsg_dump.o capflags.o skas/
obj-y += load_file.o
obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o

View File

@ -1,19 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Richard Weinberger <richrd@nod.at>
*/
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <os.h>
bool copy_from_kernel_nofault_allowed(const void *src, size_t size)
{
void *psrc = (void *)rounddown((unsigned long)src, PAGE_SIZE);
if ((unsigned long)src < PAGE_SIZE || size <= 0)
return false;
if (os_mincore(psrc, size + src - psrc) <= 0)
return false;
return true;
}

View File

@ -142,57 +142,6 @@ out:
return ok;
}
static int os_page_mincore(void *addr)
{
char vec[2];
int ret;
ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
if (ret < 0) {
if (errno == ENOMEM || errno == EINVAL)
return 0;
else
return -errno;
}
return vec[0] & 1;
}
int os_mincore(void *addr, unsigned long len)
{
char *vec;
int ret, i;
if (len <= UM_KERN_PAGE_SIZE)
return os_page_mincore(addr);
vec = calloc(1, (len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE);
if (!vec)
return -ENOMEM;
ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
if (ret < 0) {
if (errno == ENOMEM || errno == EINVAL)
ret = 0;
else
ret = -errno;
goto out;
}
for (i = 0; i < ((len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE); i++) {
if (!(vec[i] & 1)) {
ret = 0;
goto out;
}
}
ret = 1;
out:
free(vec);
return ret;
}
void init_new_thread_signals(void)
{
set_handler(SIGSEGV);

View File

@ -229,7 +229,7 @@ config X86
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI if X86_64
select HAVE_EBPF_JIT
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_EISA
select HAVE_EISA if X86_32
select HAVE_EXIT_THREAD
select HAVE_GUP_FAST
select HAVE_FENTRY if X86_64 || DYNAMIC_FTRACE
@ -897,6 +897,7 @@ config INTEL_TDX_GUEST
depends on X86_64 && CPU_SUP_INTEL
depends on X86_X2APIC
depends on EFI_STUB
depends on PARAVIRT
select ARCH_HAS_CC_PLATFORM
select X86_MEM_ENCRYPT
select X86_MCE

View File

@ -368,7 +368,7 @@ config X86_HAVE_PAE
config X86_CMPXCHG64
def_bool y
depends on X86_HAVE_PAE || M586TSC || M586MMX || MK6 || MK7
depends on X86_HAVE_PAE || M586TSC || M586MMX || MK6 || MK7 || MGEODEGX1 || MGEODE_LX
# this should be set for all -march=.. options where the compiler
# generates cmov.

View File

@ -7,12 +7,13 @@ core-y += arch/x86/crypto/
# GCC versions < 11. See:
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99652
#
ifeq ($(CONFIG_CC_IS_CLANG),y)
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
ifeq ($(call gcc-min-version, 110000)$(CONFIG_CC_IS_CLANG),y)
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2
endif
KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
ifeq ($(CONFIG_X86_32),y)
START := 0x8048000

View File

@ -14,6 +14,7 @@
#include <asm/ia32.h>
#include <asm/insn.h>
#include <asm/insn-eval.h>
#include <asm/paravirt_types.h>
#include <asm/pgtable.h>
#include <asm/set_memory.h>
#include <asm/traps.h>
@ -386,7 +387,7 @@ static int handle_halt(struct ve_info *ve)
return ve_instr_len(ve);
}
void __cpuidle tdx_safe_halt(void)
void __cpuidle tdx_halt(void)
{
const bool irq_disabled = false;
@ -397,6 +398,16 @@ void __cpuidle tdx_safe_halt(void)
WARN_ONCE(1, "HLT instruction emulation failed\n");
}
static void __cpuidle tdx_safe_halt(void)
{
tdx_halt();
/*
* "__cpuidle" section doesn't support instrumentation, so stick
* with raw_* variant that avoids tracing hooks.
*/
raw_local_irq_enable();
}
static int read_msr(struct pt_regs *regs, struct ve_info *ve)
{
struct tdx_module_args args = {
@ -1083,6 +1094,19 @@ void __init tdx_early_init(void)
x86_platform.guest.enc_kexec_begin = tdx_kexec_begin;
x86_platform.guest.enc_kexec_finish = tdx_kexec_finish;
/*
* Avoid "sti;hlt" execution in TDX guests as HLT induces a #VE that
* will enable interrupts before HLT TDCALL invocation if executed
* in STI-shadow, possibly resulting in missed wakeup events.
*
* Modify all possible HLT execution paths to use TDX specific routines
* that directly execute TDCALL and toggle the interrupt state as
* needed after TDCALL completion. This also reduces HLT related #VEs
* in addition to having a reliable halt logic execution.
*/
pv_ops.irq.safe_halt = tdx_safe_halt;
pv_ops.irq.halt = tdx_halt;
/*
* TDX intercepts the RDMSR to read the X2APIC ID in the parallel
* bringup low level code. That raises #VE which cannot be handled

View File

@ -70,6 +70,8 @@ For 32-bit we have the following conventions - kernel is built with
pushq %rsi /* pt_regs->si */
movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */
movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */
/* We just clobbered the return address - use the IRET frame for unwinding: */
UNWIND_HINT_IRET_REGS offset=3*8
.else
pushq %rdi /* pt_regs->di */
pushq %rsi /* pt_regs->si */

View File

@ -142,7 +142,7 @@ static __always_inline int syscall_32_enter(struct pt_regs *regs)
#ifdef CONFIG_IA32_EMULATION
bool __ia32_enabled __ro_after_init = !IS_ENABLED(CONFIG_IA32_EMULATION_DEFAULT_DISABLED);
static int ia32_emulation_override_cmdline(char *arg)
static int __init ia32_emulation_override_cmdline(char *arg)
{
return kstrtobool(arg, &__ia32_enabled);
}

View File

@ -24,7 +24,7 @@ SECTIONS
timens_page = vvar_start + PAGE_SIZE;
vclock_pages = vvar_start + VDSO_NR_VCLOCK_PAGES * PAGE_SIZE;
vclock_pages = VDSO_VCLOCK_PAGES_START(vvar_start);
pvclock_page = vclock_pages + VDSO_PAGE_PVCLOCK_OFFSET * PAGE_SIZE;
hvclock_page = vclock_pages + VDSO_PAGE_HVCLOCK_OFFSET * PAGE_SIZE;

View File

@ -290,7 +290,7 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
}
vma = _install_special_mapping(mm,
addr + (__VVAR_PAGES - VDSO_NR_VCLOCK_PAGES) * PAGE_SIZE,
VDSO_VCLOCK_PAGES_START(addr),
VDSO_NR_VCLOCK_PAGES * PAGE_SIZE,
VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
VM_PFNMAP,

View File

@ -2779,28 +2779,33 @@ static u64 icl_update_topdown_event(struct perf_event *event)
DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update);
static void intel_pmu_read_topdown_event(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
/* Only need to call update_topdown_event() once for group read. */
if ((cpuc->txn_flags & PERF_PMU_TXN_READ) &&
!is_slots_event(event))
return;
perf_pmu_disable(event->pmu);
static_call(intel_pmu_update_topdown_event)(event);
perf_pmu_enable(event->pmu);
}
static void intel_pmu_read_event(struct perf_event *event)
{
if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
intel_pmu_auto_reload_read(event);
else if (is_topdown_count(event))
intel_pmu_read_topdown_event(event);
else
x86_perf_event_update(event);
if (event->hw.flags & (PERF_X86_EVENT_AUTO_RELOAD | PERF_X86_EVENT_TOPDOWN)) {
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
bool pmu_enabled = cpuc->enabled;
/* Only need to call update_topdown_event() once for group read. */
if (is_metric_event(event) && (cpuc->txn_flags & PERF_PMU_TXN_READ))
return;
cpuc->enabled = 0;
if (pmu_enabled)
intel_pmu_disable_all();
if (is_topdown_event(event))
static_call(intel_pmu_update_topdown_event)(event);
else
intel_pmu_drain_pebs_buffer();
cpuc->enabled = pmu_enabled;
if (pmu_enabled)
intel_pmu_enable_all(0);
return;
}
x86_perf_event_update(event);
}
static void intel_pmu_enable_fixed(struct perf_event *event)
@ -3067,7 +3072,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
handled++;
x86_pmu_handle_guest_pebs(regs, &data);
x86_pmu.drain_pebs(regs, &data);
static_call(x86_pmu_drain_pebs)(regs, &data);
status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
/*

View File

@ -953,11 +953,11 @@ unlock:
return 1;
}
static inline void intel_pmu_drain_pebs_buffer(void)
void intel_pmu_drain_pebs_buffer(void)
{
struct perf_sample_data data;
x86_pmu.drain_pebs(NULL, &data);
static_call(x86_pmu_drain_pebs)(NULL, &data);
}
/*
@ -2100,15 +2100,6 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit)
return NULL;
}
void intel_pmu_auto_reload_read(struct perf_event *event)
{
WARN_ON(!(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD));
perf_pmu_disable(event->pmu);
intel_pmu_drain_pebs_buffer();
perf_pmu_enable(event->pmu);
}
/*
* Special variant of intel_pmu_save_and_restart() for auto-reload.
*/

View File

@ -1106,6 +1106,7 @@ extern struct x86_pmu x86_pmu __read_mostly;
DECLARE_STATIC_CALL(x86_pmu_set_period, *x86_pmu.set_period);
DECLARE_STATIC_CALL(x86_pmu_update, *x86_pmu.update);
DECLARE_STATIC_CALL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs);
static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx)
{
@ -1642,7 +1643,7 @@ void intel_pmu_pebs_disable_all(void);
void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
void intel_pmu_auto_reload_read(struct perf_event *event);
void intel_pmu_drain_pebs_buffer(void);
void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr);

View File

@ -30,6 +30,7 @@ void __init hv_vtl_init_platform(void)
x86_platform.realmode_init = x86_init_noop;
x86_init.irqs.pre_vector_init = x86_init_noop;
x86_init.timers.timer_init = x86_init_noop;
x86_init.resources.probe_roms = x86_init_noop;
/* Avoid searching for BIOS MP tables */
x86_init.mpparse.find_mptable = x86_init_noop;

View File

@ -339,7 +339,7 @@ int hv_snp_boot_ap(u32 cpu, unsigned long start_ip)
vmsa->sev_features = sev_status >> 2;
ret = snp_set_vmsa(vmsa, true);
if (!ret) {
if (ret) {
pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret);
free_page((u64)vmsa);
return ret;
@ -465,7 +465,6 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
enum hv_mem_host_visibility visibility)
{
struct hv_gpa_range_for_visibility *input;
u16 pages_processed;
u64 hv_status;
unsigned long flags;
@ -494,7 +493,7 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn));
hv_status = hv_do_rep_hypercall(
HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, count,
0, input, &pages_processed);
0, input, NULL);
local_irq_restore(flags);
if (hv_result_success(hv_status))

View File

@ -58,7 +58,7 @@ void tdx_get_ve_info(struct ve_info *ve);
bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve);
void tdx_safe_halt(void);
void tdx_halt(void);
bool tdx_early_handle_ve(struct pt_regs *regs);
@ -69,7 +69,7 @@ u64 tdx_hcall_get_quote(u8 *buf, size_t size);
#else
static inline void tdx_early_init(void) { };
static inline void tdx_safe_halt(void) { };
static inline void tdx_halt(void) { };
static inline bool tdx_early_handle_ve(struct pt_regs *regs) { return false; }

View File

@ -242,7 +242,7 @@ void flush_tlb_multi(const struct cpumask *cpumask,
flush_tlb_mm_range((vma)->vm_mm, start, end, \
((vma)->vm_flags & VM_HUGETLB) \
? huge_page_shift(hstate_vma(vma)) \
: PAGE_SHIFT, false)
: PAGE_SHIFT, true)
extern void flush_tlb_all(void);
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,

View File

@ -6,6 +6,7 @@
#define __VVAR_PAGES 4
#define VDSO_NR_VCLOCK_PAGES 2
#define VDSO_VCLOCK_PAGES_START(_b) ((_b) + (__VVAR_PAGES - VDSO_NR_VCLOCK_PAGES) * PAGE_SIZE)
#define VDSO_PAGE_PVCLOCK_OFFSET 0
#define VDSO_PAGE_HVCLOCK_OFFSET 1

View File

@ -192,7 +192,13 @@ static void __split_lock_reenable(struct work_struct *work)
{
sld_update_msr(true);
}
static DECLARE_DELAYED_WORK(sl_reenable, __split_lock_reenable);
/*
* In order for each CPU to schedule its delayed work independently of the
* others, delayed work struct must be per-CPU. This is not required when
* sysctl_sld_mitigate is enabled because of the semaphore that limits
* the number of simultaneously scheduled delayed works to 1.
*/
static DEFINE_PER_CPU(struct delayed_work, sl_reenable);
/*
* If a CPU goes offline with pending delayed work to re-enable split lock
@ -213,7 +219,7 @@ static int splitlock_cpu_offline(unsigned int cpu)
static void split_lock_warn(unsigned long ip)
{
struct delayed_work *work;
struct delayed_work *work = NULL;
int cpu;
if (!current->reported_split_lock)
@ -235,11 +241,17 @@ static void split_lock_warn(unsigned long ip)
if (down_interruptible(&buslock_sem) == -EINTR)
return;
work = &sl_reenable_unlock;
} else {
work = &sl_reenable;
}
cpu = get_cpu();
if (!work) {
work = this_cpu_ptr(&sl_reenable);
/* Deferred initialization of per-CPU struct */
if (!work->work.func)
INIT_DELAYED_WORK(work, __split_lock_reenable);
}
schedule_delayed_work_on(cpu, work, 2);
/* Disable split lock detection on this CPU to make progress */

View File

@ -300,13 +300,12 @@ static noinstr int error_context(struct mce *m, struct pt_regs *regs)
copy_user = is_copy_from_user(regs);
instrumentation_end();
switch (fixup_type) {
case EX_TYPE_UACCESS:
if (!copy_user)
return IN_KERNEL;
m->kflags |= MCE_IN_KERNEL_COPYIN;
fallthrough;
if (copy_user) {
m->kflags |= MCE_IN_KERNEL_COPYIN | MCE_IN_KERNEL_RECOV;
return IN_KERNEL_RECOV;
}
switch (fixup_type) {
case EX_TYPE_FAULT_MCE_SAFE:
case EX_TYPE_DEFAULT_MCE_SAFE:
m->kflags |= MCE_IN_KERNEL_RECOV;

View File

@ -600,7 +600,7 @@ static bool __apply_microcode_amd(struct microcode_amd *mc, u32 *cur_rev,
unsigned long p_addr = (unsigned long)&mc->hdr.data_code;
if (!verify_sha256_digest(mc->hdr.patch_id, *cur_rev, (const u8 *)p_addr, psize))
return -1;
return false;
native_wrmsrl(MSR_AMD64_PATCH_LOADER, p_addr);

View File

@ -148,7 +148,8 @@ static int closid_alloc(void)
lockdep_assert_held(&rdtgroup_mutex);
if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID) &&
is_llc_occupancy_enabled()) {
cleanest_closid = resctrl_find_cleanest_closid();
if (cleanest_closid < 0)
return cleanest_closid;

View File

@ -150,13 +150,15 @@ int __init sgx_drv_init(void)
u64 xfrm_mask;
int ret;
if (!cpu_feature_enabled(X86_FEATURE_SGX_LC))
if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) {
pr_info("SGX disabled: SGX launch control CPU feature is not available, /dev/sgx_enclave disabled.\n");
return -ENODEV;
}
cpuid_count(SGX_CPUID, 0, &eax, &ebx, &ecx, &edx);
if (!(eax & 1)) {
pr_err("SGX disabled: SGX1 instruction support not available.\n");
pr_info("SGX disabled: SGX1 instruction support not available, /dev/sgx_enclave disabled.\n");
return -ENODEV;
}
@ -173,8 +175,10 @@ int __init sgx_drv_init(void)
}
ret = misc_register(&sgx_dev_enclave);
if (ret)
if (ret) {
pr_info("SGX disabled: Unable to register the /dev/sgx_enclave driver (%d).\n", ret);
return ret;
}
return 0;
}

View File

@ -195,6 +195,7 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
printk("%sCall Trace:\n", log_lvl);
unwind_start(&state, task, regs, stack);
stack = stack ?: get_stack_pointer(task, regs);
regs = unwind_get_entry_regs(&state, &partial);
/*
@ -213,9 +214,7 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
* - hardirq stack
* - entry stack
*/
for (stack = stack ?: get_stack_pointer(task, regs);
stack;
stack = stack_info.next_sp) {
for (; stack; stack = stack_info.next_sp) {
const char *stack_name;
stack = PTR_ALIGN(stack, sizeof(long));

View File

@ -220,7 +220,7 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu)
struct fpstate *fpstate;
unsigned int size;
size = fpu_user_cfg.default_size + ALIGN(offsetof(struct fpstate, regs), 64);
size = fpu_kernel_cfg.default_size + ALIGN(offsetof(struct fpstate, regs), 64);
fpstate = vzalloc(size);
if (!fpstate)
return false;
@ -232,8 +232,8 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu)
fpstate->is_guest = true;
gfpu->fpstate = fpstate;
gfpu->xfeatures = fpu_user_cfg.default_features;
gfpu->perm = fpu_user_cfg.default_features;
gfpu->xfeatures = fpu_kernel_cfg.default_features;
gfpu->perm = fpu_kernel_cfg.default_features;
/*
* KVM sets the FP+SSE bits in the XSAVE header when copying FPU state

View File

@ -92,7 +92,12 @@ EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
*/
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
memcpy(dst, src, arch_task_struct_size);
/* init_task is not dynamically sized (incomplete FPU state) */
if (unlikely(src == &init_task))
memcpy_and_pad(dst, arch_task_struct_size, src, sizeof(init_task), 0);
else
memcpy(dst, src, arch_task_struct_size);
#ifdef CONFIG_VM86
dst->thread.vm86 = NULL;
#endif
@ -933,7 +938,7 @@ void __init select_idle_routine(void)
static_call_update(x86_idle, mwait_idle);
} else if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) {
pr_info("using TDX aware idle routine\n");
static_call_update(x86_idle, tdx_safe_halt);
static_call_update(x86_idle, tdx_halt);
} else {
static_call_update(x86_idle, default_idle);
}

View File

@ -379,6 +379,21 @@ __visible void __noreturn handle_stack_overflow(struct pt_regs *regs,
}
#endif
/*
* Prevent the compiler and/or objtool from marking the !CONFIG_X86_ESPFIX64
* version of exc_double_fault() as noreturn. Otherwise the noreturn mismatch
* between configs triggers objtool warnings.
*
* This is a temporary hack until we have compiler or plugin support for
* annotating noreturns.
*/
#ifdef CONFIG_X86_ESPFIX64
#define always_true() true
#else
bool always_true(void);
bool __weak always_true(void) { return true; }
#endif
/*
* Runs on an IST stack for x86_64 and on a special task stack for x86_32.
*
@ -514,7 +529,8 @@ DEFINE_IDTENTRY_DF(exc_double_fault)
pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code);
die("double fault", regs, error_code);
panic("Machine halted.");
if (always_true())
panic("Machine halted.");
instrumentation_end();
}

View File

@ -957,7 +957,7 @@ static unsigned long long cyc2ns_suspend;
void tsc_save_sched_clock_state(void)
{
if (!sched_clock_stable())
if (!static_branch_likely(&__use_tsc) && !sched_clock_stable())
return;
cyc2ns_suspend = sched_clock();
@ -977,7 +977,7 @@ void tsc_restore_sched_clock_state(void)
unsigned long flags;
int cpu;
if (!sched_clock_stable())
if (!static_branch_likely(&__use_tsc) && !sched_clock_stable())
return;
local_irq_save(flags);

View File

@ -357,19 +357,23 @@ void *arch_uprobe_trampoline(unsigned long *psize)
return &insn;
}
static unsigned long trampoline_check_ip(void)
static unsigned long trampoline_check_ip(unsigned long tramp)
{
unsigned long tramp = uprobe_get_trampoline_vaddr();
return tramp + (uretprobe_syscall_check - uretprobe_trampoline_entry);
}
SYSCALL_DEFINE0(uretprobe)
{
struct pt_regs *regs = task_pt_regs(current);
unsigned long err, ip, sp, r11_cx_ax[3];
unsigned long err, ip, sp, r11_cx_ax[3], tramp;
if (regs->ip != trampoline_check_ip())
/* If there's no trampoline, we are called from wrong place. */
tramp = uprobe_get_trampoline_vaddr();
if (unlikely(tramp == UPROBE_NO_TRAMPOLINE_VADDR))
goto sigill;
/* Make sure the ip matches the only allowed sys_uretprobe caller. */
if (unlikely(regs->ip != trampoline_check_ip(tramp)))
goto sigill;
err = copy_from_user(r11_cx_ax, (void __user *)regs->sp, sizeof(r11_cx_ax));

View File

@ -3945,16 +3945,12 @@ static int sev_snp_ap_creation(struct vcpu_svm *svm)
/*
* The target vCPU is valid, so the vCPU will be kicked unless the
* request is for CREATE_ON_INIT. For any errors at this stage, the
* kick will place the vCPU in an non-runnable state.
* request is for CREATE_ON_INIT.
*/
kick = true;
mutex_lock(&target_svm->sev_es.snp_vmsa_mutex);
target_svm->sev_es.snp_vmsa_gpa = INVALID_PAGE;
target_svm->sev_es.snp_ap_waiting_for_reset = true;
/* Interrupt injection mode shouldn't change for AP creation */
if (request < SVM_VMGEXIT_AP_DESTROY) {
u64 sev_features;
@ -4000,20 +3996,23 @@ static int sev_snp_ap_creation(struct vcpu_svm *svm)
target_svm->sev_es.snp_vmsa_gpa = svm->vmcb->control.exit_info_2;
break;
case SVM_VMGEXIT_AP_DESTROY:
target_svm->sev_es.snp_vmsa_gpa = INVALID_PAGE;
break;
default:
vcpu_unimpl(vcpu, "vmgexit: invalid AP creation request [%#x] from guest\n",
request);
ret = -EINVAL;
break;
goto out;
}
out:
target_svm->sev_es.snp_ap_waiting_for_reset = true;
if (kick) {
kvm_make_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, target_vcpu);
kvm_vcpu_kick(target_vcpu);
}
out:
mutex_unlock(&target_svm->sev_es.snp_vmsa_mutex);
return ret;

View File

@ -4573,6 +4573,11 @@ static bool kvm_is_vm_type_supported(unsigned long type)
return type < 32 && (kvm_caps.supported_vm_types & BIT(type));
}
static inline u32 kvm_sync_valid_fields(struct kvm *kvm)
{
return kvm && kvm->arch.has_protected_state ? 0 : KVM_SYNC_X86_VALID_FIELDS;
}
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
{
int r = 0;
@ -4681,7 +4686,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break;
#endif
case KVM_CAP_SYNC_REGS:
r = KVM_SYNC_X86_VALID_FIELDS;
r = kvm_sync_valid_fields(kvm);
break;
case KVM_CAP_ADJUST_CLOCK:
r = KVM_CLOCK_VALID_FLAGS;
@ -11466,6 +11471,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
struct kvm_queued_exception *ex = &vcpu->arch.exception;
struct kvm_run *kvm_run = vcpu->run;
u32 sync_valid_fields;
int r;
r = kvm_mmu_post_init_vm(vcpu->kvm);
@ -11511,8 +11517,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
goto out;
}
if ((kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) ||
(kvm_run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)) {
sync_valid_fields = kvm_sync_valid_fields(vcpu->kvm);
if ((kvm_run->kvm_valid_regs & ~sync_valid_fields) ||
(kvm_run->kvm_dirty_regs & ~sync_valid_fields)) {
r = -EINVAL;
goto out;
}
@ -11570,7 +11577,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
out:
kvm_put_guest_fpu(vcpu);
if (kvm_run->kvm_valid_regs)
if (kvm_run->kvm_valid_regs && likely(!vcpu->arch.guest_state_protected))
store_regs(vcpu);
post_kvm_run_save(vcpu);
kvm_vcpu_srcu_read_unlock(vcpu);

View File

@ -74,6 +74,24 @@ SYM_FUNC_START(rep_movs_alternative)
_ASM_EXTABLE_UA( 0b, 1b)
.Llarge_movsq:
/* Do the first possibly unaligned word */
0: movq (%rsi),%rax
1: movq %rax,(%rdi)
_ASM_EXTABLE_UA( 0b, .Lcopy_user_tail)
_ASM_EXTABLE_UA( 1b, .Lcopy_user_tail)
/* What would be the offset to the aligned destination? */
leaq 8(%rdi),%rax
andq $-8,%rax
subq %rdi,%rax
/* .. and update pointers and count to match */
addq %rax,%rdi
addq %rax,%rsi
subq %rax,%rcx
/* make %rcx contain the number of words, %rax the remainder */
movq %rcx,%rax
shrq $3,%rcx
andl $7,%eax

View File

@ -565,7 +565,7 @@ void __head sme_enable(struct boot_params *bp)
}
RIP_REL_REF(sme_me_mask) = me_mask;
physical_mask &= ~me_mask;
cc_vendor = CC_VENDOR_AMD;
RIP_REL_REF(physical_mask) &= ~me_mask;
RIP_REL_REF(cc_vendor) = CC_VENDOR_AMD;
cc_set_mask(me_mask);
}

View File

@ -183,7 +183,7 @@ static int pageattr_test(void)
break;
case 1:
err = change_page_attr_set(addrs, len[1], PAGE_CPA_TEST, 1);
err = change_page_attr_set(addrs, len[i], PAGE_CPA_TEST, 1);
break;
case 2:

View File

@ -984,29 +984,42 @@ static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr,
return -EINVAL;
}
/*
* track_pfn_copy is called when vma that is covering the pfnmap gets
* copied through copy_page_range().
*
* If the vma has a linear pfn mapping for the entire range, we get the prot
* from pte and reserve the entire vma range with single reserve_pfn_range call.
*/
int track_pfn_copy(struct vm_area_struct *vma)
int track_pfn_copy(struct vm_area_struct *dst_vma,
struct vm_area_struct *src_vma, unsigned long *pfn)
{
const unsigned long vma_size = src_vma->vm_end - src_vma->vm_start;
resource_size_t paddr;
unsigned long vma_size = vma->vm_end - vma->vm_start;
pgprot_t pgprot;
int rc;
if (vma->vm_flags & VM_PAT) {
if (get_pat_info(vma, &paddr, &pgprot))
return -EINVAL;
/* reserve the whole chunk covered by vma. */
return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
}
if (!(src_vma->vm_flags & VM_PAT))
return 0;
/*
* Duplicate the PAT information for the dst VMA based on the src
* VMA.
*/
if (get_pat_info(src_vma, &paddr, &pgprot))
return -EINVAL;
rc = reserve_pfn_range(paddr, vma_size, &pgprot, 1);
if (rc)
return rc;
/* Reservation for the destination VMA succeeded. */
vm_flags_set(dst_vma, VM_PAT);
*pfn = PHYS_PFN(paddr);
return 0;
}
void untrack_pfn_copy(struct vm_area_struct *dst_vma, unsigned long pfn)
{
untrack_pfn(dst_vma, pfn, dst_vma->vm_end - dst_vma->vm_start, true);
/*
* Reservation was freed, any copied page tables will get cleaned
* up later, but without getting PAT involved again.
*/
}
/*
* prot is passed in as a parameter for the new mapping. If the vma has
* a linear pfn mapping for the entire range, or no vma is provided,
@ -1095,15 +1108,6 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
}
}
/*
* untrack_pfn_clear is called if the following situation fits:
*
* 1) while mremapping a pfnmap for a new region, with the old vma after
* its pfnmap page table has been removed. The new vma has a new pfnmap
* to the same pfn & cache type with VM_PAT set.
* 2) while duplicating vm area, the new vma fails to copy the pgtable from
* old vma.
*/
void untrack_pfn_clear(struct vm_area_struct *vma)
{
vm_flags_clear(vma, VM_PAT);

View File

@ -36,7 +36,8 @@ EXPORT_SYMBOL_GPL(crypto_chain);
DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished);
#endif
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg,
u32 type, u32 mask);
static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
u32 mask);
@ -145,7 +146,7 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
if (alg != &larval->alg) {
kfree(larval);
if (crypto_is_larval(alg))
alg = crypto_larval_wait(alg);
alg = crypto_larval_wait(alg, type, mask);
}
return alg;
@ -197,7 +198,8 @@ static void crypto_start_test(struct crypto_larval *larval)
crypto_schedule_test(larval);
}
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg,
u32 type, u32 mask)
{
struct crypto_larval *larval;
long time_left;
@ -219,12 +221,7 @@ again:
crypto_larval_kill(larval);
alg = ERR_PTR(-ETIMEDOUT);
} else if (!alg) {
u32 type;
u32 mask;
alg = &larval->alg;
type = alg->cra_flags & ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
mask = larval->mask;
alg = crypto_alg_lookup(alg->cra_name, type, mask) ?:
ERR_PTR(-EAGAIN);
} else if (IS_ERR(alg))
@ -304,7 +301,7 @@ static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type,
}
if (!IS_ERR_OR_NULL(alg) && crypto_is_larval(alg))
alg = crypto_larval_wait(alg);
alg = crypto_larval_wait(alg, type, mask);
else if (alg)
;
else if (!(mask & CRYPTO_ALG_TESTED))
@ -352,7 +349,7 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval);
if (ok == NOTIFY_STOP)
alg = crypto_larval_wait(larval);
alg = crypto_larval_wait(larval, type, mask);
else {
crypto_mod_put(larval);
alg = ERR_PTR(-ENOENT);

View File

@ -80,3 +80,4 @@ static void __exit bpf_crypto_skcipher_exit(void)
module_init(bpf_crypto_skcipher_init);
module_exit(bpf_crypto_skcipher_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Symmetric key cipher support for BPF");

View File

@ -648,6 +648,13 @@ acpi_video_device_EDID(struct acpi_video_device *device, void **edid, int length
obj = buffer.pointer;
/*
* Some buggy implementations incorrectly return the EDID buffer in an ACPI package.
* In this case, extract the buffer from the package.
*/
if (obj && obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 1)
obj = &obj->package.elements[0];
if (obj && obj->type == ACPI_TYPE_BUFFER) {
*edid = kmemdup(obj->buffer.pointer, obj->buffer.length, GFP_KERNEL);
ret = *edid ? obj->buffer.length : -ENOMEM;
@ -657,7 +664,7 @@ acpi_video_device_EDID(struct acpi_video_device *device, void **edid, int length
ret = -EFAULT;
}
kfree(obj);
kfree(buffer.pointer);
return ret;
}

View File

@ -485,7 +485,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
cmd_mask = nd_desc->cmd_mask;
if (cmd == ND_CMD_CALL && call_pkg->nd_family) {
family = call_pkg->nd_family;
if (family > NVDIMM_BUS_FAMILY_MAX ||
if (call_pkg->nd_family > NVDIMM_BUS_FAMILY_MAX ||
!test_bit(family, &nd_desc->bus_family_mask))
return -EINVAL;
family = array_index_nospec(family,

View File

@ -268,6 +268,10 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
pr->power.states[ACPI_STATE_C3].address);
if (!pr->power.states[ACPI_STATE_C2].address &&
!pr->power.states[ACPI_STATE_C3].address)
return -ENODEV;
return 0;
}

View File

@ -440,6 +440,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
},
},
{
/* Asus Vivobook X1404VAP */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BOARD_NAME, "X1404VAP"),
},
},
{
/* Asus Vivobook X1504VAP */
.matches = {

View File

@ -374,7 +374,8 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
/* Medion Lifetab S10346 */

View File

@ -503,6 +503,7 @@ config HT16K33
config MAX6959
tristate "Maxim MAX6958/6959 7-segment LED controller"
depends on I2C
select BITREVERSE
select REGMAP_I2C
select LINEDISP
help

View File

@ -1664,7 +1664,7 @@ err_lcd_unreg:
if (lcd.enabled)
charlcd_unregister(lcd.charlcd);
err_unreg_device:
kfree(lcd.charlcd);
charlcd_free(lcd.charlcd);
lcd.charlcd = NULL;
parport_unregister_device(pprt);
pprt = NULL;
@ -1692,7 +1692,7 @@ static void panel_detach(struct parport *port)
charlcd_unregister(lcd.charlcd);
lcd.initialized = false;
kfree(lcd.charlcd->drvdata);
kfree(lcd.charlcd);
charlcd_free(lcd.charlcd);
lcd.charlcd = NULL;
}

View File

@ -913,6 +913,9 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
if (dev->power.syscore)
goto Complete;
if (!dev->power.is_suspended)
goto Complete;
if (dev->power.direct_complete) {
/* Match the pm_runtime_disable() in __device_suspend(). */
pm_runtime_enable(dev);
@ -931,9 +934,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
*/
dev->power.is_prepared = false;
if (!dev->power.is_suspended)
goto Unlock;
if (dev->pm_domain) {
info = "power domain ";
callback = pm_op(&dev->pm_domain->ops, state);
@ -973,7 +973,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
error = dpm_run_callback(callback, dev, state, info);
dev->power.is_suspended = false;
Unlock:
device_unlock(dev);
dpm_watchdog_clear(&wd);
@ -1254,14 +1253,13 @@ Skip:
dev->power.is_noirq_suspended = true;
/*
* Skipping the resume of devices that were in use right before the
* system suspend (as indicated by their PM-runtime usage counters)
* would be suboptimal. Also resume them if doing that is not allowed
* to be skipped.
* Devices must be resumed unless they are explicitly allowed to be left
* in suspend, but even in that case skipping the resume of devices that
* were in use right before the system suspend (as indicated by their
* runtime PM usage counters and child counters) would be suboptimal.
*/
if (atomic_read(&dev->power.usage_count) > 1 ||
!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
dev->power.may_skip_resume))
if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
dev->power.must_resume = true;
if (dev->power.must_resume)
@ -1628,6 +1626,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async)
pm_runtime_disable(dev);
if (pm_runtime_status_suspended(dev)) {
pm_dev_dbg(dev, state, "direct-complete ");
dev->power.is_suspended = true;
goto Complete;
}

View File

@ -1874,7 +1874,7 @@ void pm_runtime_drop_link(struct device_link *link)
pm_request_idle(link->supplier);
}
static bool pm_runtime_need_not_resume(struct device *dev)
bool pm_runtime_need_not_resume(struct device *dev)
{
return atomic_read(&dev->power.usage_count) <= 1 &&
(atomic_read(&dev->power.child_count) == 0 ||

View File

@ -1452,18 +1452,28 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
}
}
static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
/* Must be called when queue is frozen */
static bool ublk_mark_queue_canceling(struct ublk_queue *ubq)
{
struct gendisk *disk;
bool canceled;
spin_lock(&ubq->cancel_lock);
if (ubq->canceling) {
spin_unlock(&ubq->cancel_lock);
return false;
}
ubq->canceling = true;
canceled = ubq->canceling;
if (!canceled)
ubq->canceling = true;
spin_unlock(&ubq->cancel_lock);
return canceled;
}
static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
{
bool was_canceled = ubq->canceling;
struct gendisk *disk;
if (was_canceled)
return false;
spin_lock(&ub->lock);
disk = ub->ub_disk;
if (disk)
@ -1474,14 +1484,23 @@ static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
if (!disk)
return false;
/* Now we are serialized with ublk_queue_rq() */
/*
* Now we are serialized with ublk_queue_rq()
*
* Make sure that ubq->canceling is set when queue is frozen,
* because ublk_queue_rq() has to rely on this flag for avoiding to
* touch completed uring_cmd
*/
blk_mq_quiesce_queue(disk->queue);
/* abort queue is for making forward progress */
ublk_abort_queue(ub, ubq);
was_canceled = ublk_mark_queue_canceling(ubq);
if (!was_canceled) {
/* abort queue is for making forward progress */
ublk_abort_queue(ub, ubq);
}
blk_mq_unquiesce_queue(disk->queue);
put_device(disk_to_dev(disk));
return true;
return !was_canceled;
}
static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io,

View File

@ -180,14 +180,14 @@ static struct clk_imx8mp_audiomix_sel sels[] = {
CLK_GATE("asrc", ASRC_IPG),
CLK_GATE("pdm", PDM_IPG),
CLK_GATE("earc", EARC_IPG),
CLK_GATE("ocrama", OCRAMA_IPG),
CLK_GATE_PARENT("ocrama", OCRAMA_IPG, "axi"),
CLK_GATE("aud2htx", AUD2HTX_IPG),
CLK_GATE_PARENT("earc_phy", EARC_PHY, "sai_pll_out_div2"),
CLK_GATE("sdma2", SDMA2_ROOT),
CLK_GATE("sdma3", SDMA3_ROOT),
CLK_GATE("spba2", SPBA2_ROOT),
CLK_GATE("dsp", DSP_ROOT),
CLK_GATE("dspdbg", DSPDBG_ROOT),
CLK_GATE_PARENT("dsp", DSP_ROOT, "axi"),
CLK_GATE_PARENT("dspdbg", DSPDBG_ROOT, "axi"),
CLK_GATE("edma", EDMA_ROOT),
CLK_GATE_PARENT("audpll", AUDPLL_ROOT, "osc_24m"),
CLK_GATE("mu2", MU2_ROOT),

View File

@ -1137,8 +1137,18 @@ static struct clk_regmap g12a_cpu_clk_div16_en = {
.hw.init = &(struct clk_init_data) {
.name = "cpu_clk_div16_en",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
&g12a_cpu_clk.hw
.parent_data = &(const struct clk_parent_data) {
/*
* Note:
* G12A and G12B have different cpu clocks (with
* different struct clk_hw). We fallback to the global
* naming string mechanism so this clock picks
* up the appropriate one. Same goes for the other
* clock using cpu cluster A clock output and present
* on both G12 variant.
*/
.name = "cpu_clk",
.index = -1,
},
.num_parents = 1,
/*
@ -1203,7 +1213,10 @@ static struct clk_regmap g12a_cpu_clk_apb_div = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_apb_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw },
.parent_data = &(const struct clk_parent_data) {
.name = "cpu_clk",
.index = -1,
},
.num_parents = 1,
},
};
@ -1237,7 +1250,10 @@ static struct clk_regmap g12a_cpu_clk_atb_div = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_atb_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw },
.parent_data = &(const struct clk_parent_data) {
.name = "cpu_clk",
.index = -1,
},
.num_parents = 1,
},
};
@ -1271,7 +1287,10 @@ static struct clk_regmap g12a_cpu_clk_axi_div = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_axi_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw },
.parent_data = &(const struct clk_parent_data) {
.name = "cpu_clk",
.index = -1,
},
.num_parents = 1,
},
};
@ -1306,13 +1325,6 @@ static struct clk_regmap g12a_cpu_clk_trace_div = {
.name = "cpu_clk_trace_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_data = &(const struct clk_parent_data) {
/*
* Note:
* G12A and G12B have different cpu_clks (with
* different struct clk_hw). We fallback to the global
* naming string mechanism so cpu_clk_trace_div picks
* up the appropriate one.
*/
.name = "cpu_clk",
.index = -1,
},
@ -4311,7 +4323,7 @@ static MESON_GATE(g12a_spicc_1, HHI_GCLK_MPEG0, 14);
static MESON_GATE(g12a_hiu_reg, HHI_GCLK_MPEG0, 19);
static MESON_GATE(g12a_mipi_dsi_phy, HHI_GCLK_MPEG0, 20);
static MESON_GATE(g12a_assist_misc, HHI_GCLK_MPEG0, 23);
static MESON_GATE(g12a_emmc_a, HHI_GCLK_MPEG0, 4);
static MESON_GATE(g12a_emmc_a, HHI_GCLK_MPEG0, 24);
static MESON_GATE(g12a_emmc_b, HHI_GCLK_MPEG0, 25);
static MESON_GATE(g12a_emmc_c, HHI_GCLK_MPEG0, 26);
static MESON_GATE(g12a_audio_codec, HHI_GCLK_MPEG0, 28);

View File

@ -1266,14 +1266,13 @@ static struct clk_regmap gxbb_cts_i958 = {
},
};
/*
* This table skips a clock named 'cts_slow_oscin' in the documentation
* This clock does not exist yet in this controller or the AO one
*/
static u32 gxbb_32k_clk_parents_val_table[] = { 0, 2, 3 };
static const struct clk_parent_data gxbb_32k_clk_parent_data[] = {
{ .fw_name = "xtal", },
/*
* FIXME: This clock is provided by the ao clock controller but the
* clock is not yet part of the binding of this controller, so string
* name must be use to set this parent.
*/
{ .name = "cts_slow_oscin", .index = -1 },
{ .hw = &gxbb_fclk_div3.hw },
{ .hw = &gxbb_fclk_div5.hw },
};
@ -1283,6 +1282,7 @@ static struct clk_regmap gxbb_32k_clk_sel = {
.offset = HHI_32K_CLK_CNTL,
.mask = 0x3,
.shift = 16,
.table = gxbb_32k_clk_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "32k_clk_sel",
@ -1306,7 +1306,7 @@ static struct clk_regmap gxbb_32k_clk_div = {
&gxbb_32k_clk_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_DIVIDER_ROUND_CLOSEST,
.flags = CLK_SET_RATE_PARENT,
},
};

View File

@ -87,8 +87,8 @@ static int pxa1908_apmu_probe(struct platform_device *pdev)
struct pxa1908_clk_unit *pxa_unit;
pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL);
if (IS_ERR(pxa_unit))
return PTR_ERR(pxa_unit);
if (!pxa_unit)
return -ENOMEM;
pxa_unit->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pxa_unit->base))

View File

@ -572,13 +572,19 @@ static struct clk_rcg2 gcc_qupv3_spi1_clk_src = {
};
static const struct freq_tbl ftbl_gcc_qupv3_uart0_clk_src[] = {
F(960000, P_XO, 10, 2, 5),
F(4800000, P_XO, 5, 0, 0),
F(9600000, P_XO, 2, 4, 5),
F(16000000, P_GPLL0_OUT_MAIN, 10, 1, 5),
F(3686400, P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 1, 144, 15625),
F(7372800, P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 1, 288, 15625),
F(14745600, P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 1, 576, 15625),
F(24000000, P_XO, 1, 0, 0),
F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2),
F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
F(32000000, P_GPLL0_OUT_MAIN, 1, 1, 25),
F(40000000, P_GPLL0_OUT_MAIN, 1, 1, 20),
F(46400000, P_GPLL0_OUT_MAIN, 1, 29, 500),
F(48000000, P_GPLL0_OUT_MAIN, 1, 3, 50),
F(51200000, P_GPLL0_OUT_MAIN, 1, 8, 125),
F(56000000, P_GPLL0_OUT_MAIN, 1, 7, 100),
F(58982400, P_GPLL0_OUT_MAIN, 1, 1152, 15625),
F(60000000, P_GPLL0_OUT_MAIN, 1, 3, 40),
F(64000000, P_GPLL0_OUT_MAIN, 12.5, 0, 0),
{ }
};
@ -614,11 +620,11 @@ static struct clk_rcg2 gcc_qupv3_uart1_clk_src = {
static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
F(144000, P_XO, 16, 12, 125),
F(400000, P_XO, 12, 1, 5),
F(24000000, P_XO, 1, 0, 0),
F(48000000, P_GPLL2_OUT_MAIN, 12, 1, 2),
F(96000000, P_GPLL2_OUT_MAIN, 6, 1, 2),
F(24000000, P_GPLL2_OUT_MAIN, 12, 1, 2),
F(48000000, P_GPLL2_OUT_MAIN, 12, 0, 0),
F(96000000, P_GPLL2_OUT_MAIN, 6, 0, 0),
F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
F(192000000, P_GPLL2_OUT_MAIN, 6, 0, 0),
F(192000000, P_GPLL2_OUT_MAIN, 3, 0, 0),
F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
{ }
};

View File

@ -3770,7 +3770,7 @@ static struct clk_branch gcc_venus0_axi_clk = {
static struct clk_branch gcc_venus0_core0_vcodec0_clk = {
.halt_reg = 0x4c02c,
.halt_check = BRANCH_HALT,
.halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x4c02c,
.enable_mask = BIT(0),

View File

@ -3497,7 +3497,7 @@ static struct gdsc usb30_prim_gdsc = {
.pd = {
.name = "usb30_prim_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
.pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@ -3506,7 +3506,7 @@ static struct gdsc usb3_phy_gdsc = {
.pd = {
.name = "usb3_phy_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
.pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};

View File

@ -2564,19 +2564,6 @@ static struct clk_branch gcc_disp_hf_axi_clk = {
},
};
static struct clk_branch gcc_disp_xo_clk = {
.halt_reg = 0x27018,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x27018,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_disp_xo_clk",
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch gcc_gp1_clk = {
.halt_reg = 0x64000,
.halt_check = BRANCH_HALT,
@ -2631,21 +2618,6 @@ static struct clk_branch gcc_gp3_clk = {
},
};
static struct clk_branch gcc_gpu_cfg_ahb_clk = {
.halt_reg = 0x71004,
.halt_check = BRANCH_HALT_VOTED,
.hwcg_reg = 0x71004,
.hwcg_bit = 1,
.clkr = {
.enable_reg = 0x71004,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_gpu_cfg_ahb_clk",
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch gcc_gpu_gpll0_cph_clk_src = {
.halt_check = BRANCH_HALT_DELAY,
.clkr = {
@ -6268,7 +6240,6 @@ static struct clk_regmap *gcc_x1e80100_clocks[] = {
[GCC_CNOC_PCIE_TUNNEL_CLK] = &gcc_cnoc_pcie_tunnel_clk.clkr,
[GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
[GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
[GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
[GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
[GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
[GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
@ -6281,7 +6252,6 @@ static struct clk_regmap *gcc_x1e80100_clocks[] = {
[GCC_GPLL7] = &gcc_gpll7.clkr,
[GCC_GPLL8] = &gcc_gpll8.clkr,
[GCC_GPLL9] = &gcc_gpll9.clkr,
[GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
[GCC_GPU_GPLL0_CPH_CLK_SRC] = &gcc_gpu_gpll0_cph_clk_src.clkr,
[GCC_GPU_GPLL0_DIV_CPH_CLK_SRC] = &gcc_gpu_gpll0_div_cph_clk_src.clkr,
[GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,

View File

@ -2544,7 +2544,7 @@ static struct clk_branch video_core_clk = {
static struct clk_branch video_subcore0_clk = {
.halt_reg = 0x1048,
.halt_check = BRANCH_HALT,
.halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x1048,
.enable_mask = BIT(0),

View File

@ -51,7 +51,7 @@
#define G3S_SEL_SDHI2 SEL_PLL_PACK(G3S_CPG_SDHI_DSEL, 8, 2)
/* PLL 1/4/6 configuration registers macro. */
#define G3S_PLL146_CONF(clk1, clk2) ((clk1) << 22 | (clk2) << 12)
#define G3S_PLL146_CONF(clk1, clk2, setting) ((clk1) << 22 | (clk2) << 12 | (setting))
#define DEF_G3S_MUX(_name, _id, _conf, _parent_names, _mux_flags, _clk_flags) \
DEF_TYPE(_name, _id, CLK_TYPE_MUX, .conf = (_conf), \
@ -134,7 +134,8 @@ static const struct cpg_core_clk r9a08g045_core_clks[] __initconst = {
/* Internal Core Clocks */
DEF_FIXED(".osc_div1000", CLK_OSC_DIV1000, CLK_EXTAL, 1, 1000),
DEF_G3S_PLL(".pll1", CLK_PLL1, CLK_EXTAL, G3S_PLL146_CONF(0x4, 0x8)),
DEF_G3S_PLL(".pll1", CLK_PLL1, CLK_EXTAL, G3S_PLL146_CONF(0x4, 0x8, 0x100),
1100000000UL),
DEF_FIXED(".pll2", CLK_PLL2, CLK_EXTAL, 200, 3),
DEF_FIXED(".pll3", CLK_PLL3, CLK_EXTAL, 200, 3),
DEF_FIXED(".pll4", CLK_PLL4, CLK_EXTAL, 100, 3),

View File

@ -51,6 +51,7 @@
#define RZG3S_DIV_M GENMASK(25, 22)
#define RZG3S_DIV_NI GENMASK(21, 13)
#define RZG3S_DIV_NF GENMASK(12, 1)
#define RZG3S_SEL_PLL BIT(0)
#define CLK_ON_R(reg) (reg)
#define CLK_MON_R(reg) (0x180 + (reg))
@ -60,6 +61,7 @@
#define GET_REG_OFFSET(val) ((val >> 20) & 0xfff)
#define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff)
#define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff)
#define GET_REG_SAMPLL_SETTING(val) ((val) & 0xfff)
#define CPG_WEN_BIT BIT(16)
@ -943,6 +945,7 @@ rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
struct pll_clk {
struct clk_hw hw;
unsigned long default_rate;
unsigned int conf;
unsigned int type;
void __iomem *base;
@ -980,12 +983,19 @@ static unsigned long rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
{
struct pll_clk *pll_clk = to_pll(hw);
struct rzg2l_cpg_priv *priv = pll_clk->priv;
u32 nir, nfr, mr, pr, val;
u32 nir, nfr, mr, pr, val, setting;
u64 rate;
if (pll_clk->type != CLK_TYPE_G3S_PLL)
return parent_rate;
setting = GET_REG_SAMPLL_SETTING(pll_clk->conf);
if (setting) {
val = readl(priv->base + setting);
if (val & RZG3S_SEL_PLL)
return pll_clk->default_rate;
}
val = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
pr = 1 << FIELD_GET(RZG3S_DIV_P, val);
@ -1038,6 +1048,7 @@ rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
pll_clk->base = priv->base;
pll_clk->priv = priv;
pll_clk->type = core->type;
pll_clk->default_rate = core->default_rate;
ret = devm_clk_hw_register(dev, &pll_clk->hw);
if (ret)

View File

@ -102,7 +102,10 @@ struct cpg_core_clk {
const struct clk_div_table *dtable;
const u32 *mtable;
const unsigned long invalid_rate;
const unsigned long max_rate;
union {
const unsigned long max_rate;
const unsigned long default_rate;
};
const char * const *parent_names;
notifier_fn_t notifier;
u32 flag;
@ -144,8 +147,9 @@ enum clk_types {
DEF_TYPE(_name, _id, _type, .parent = _parent)
#define DEF_SAMPLL(_name, _id, _parent, _conf) \
DEF_TYPE(_name, _id, CLK_TYPE_SAM_PLL, .parent = _parent, .conf = _conf)
#define DEF_G3S_PLL(_name, _id, _parent, _conf) \
DEF_TYPE(_name, _id, CLK_TYPE_G3S_PLL, .parent = _parent, .conf = _conf)
#define DEF_G3S_PLL(_name, _id, _parent, _conf, _default_rate) \
DEF_TYPE(_name, _id, CLK_TYPE_G3S_PLL, .parent = _parent, .conf = _conf, \
.default_rate = _default_rate)
#define DEF_INPUT(_name, _id) \
DEF_TYPE(_name, _id, CLK_TYPE_IN)
#define DEF_FIXED(_name, _id, _parent, _mult, _div) \

View File

@ -201,7 +201,7 @@ PNAME(mux_aclk_peri_pre_p) = { "cpll_peri",
"gpll_peri",
"hdmiphy_peri" };
PNAME(mux_ref_usb3otg_src_p) = { "xin24m",
"clk_usb3otg_ref" };
"clk_ref_usb3otg_src" };
PNAME(mux_xin24m_32k_p) = { "xin24m",
"clk_rtc32k" };
PNAME(mux_mac2io_src_p) = { "clk_mac2io_src",

View File

@ -74,12 +74,12 @@ struct samsung_clk_provider * __init samsung_clk_init(struct device *dev,
if (!ctx)
panic("could not allocate clock provider context.\n");
ctx->clk_data.num = nr_clks;
for (i = 0; i < nr_clks; ++i)
ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
ctx->dev = dev;
ctx->reg_base = base;
ctx->clk_data.num = nr_clks;
spin_lock_init(&ctx->lock);
return ctx;

Some files were not shown because too many files have changed in this diff Show More