mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/
synced 2025-04-19 20:58:31 +09:00
This is the 6.12.23 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmf3vEQACgkQONu9yGCS aT4umQ/+LiOGOpn050tBq8Ba5HpZLHETEdX1No0NBtx9/WHsWHNrHHUC2GlERshf EGBxP2QZ3nWzNCJYpaHe+BWewD3L4NoYVCujuELEvRANQqp7Lo6kdRUNe0UEi+3a 0VjGZ84PVGLzgOA5/Qu43E3mm+sAquhR4Q4P5qAdfO/AJz72MyxevXJZNm19WNsE iC0wcLhIM+wr2jUWzwar8eqtKXACts5LpiOr2Ylq3F05fk3DVpQs0kU7L0h2pWG/ XaoKWePREmAFqxf+egiJYef7k7X9hK64TxxdiSoCqNW2GRa9ezwsqPoa2PnR7+Hb Occ/TRbbJ1/GSuO2oT7LT4Af4Wqn9Jkp5TF21BTMIjIGWdnFXL3oBN6gd8vCZiWR 4vRVX4+9UliYvWr6O6qNWkCzWUtS69ZrbyRvCx4KM1tOkzn3uxclfihrR1wOftaw rxKE4G6zTqmsa9EU9tuVnqQ3AR32uCOlF/WlfZ3LN1Dr/tvojeFCDCtomjm+aDeH cJT4r/WaK85mcO826bUU+CJ4BzSaxlGrdqzk7A2n7thX4Mt+0Pchw2slwFoCwOcC nwbolsxeLx1AIlWozjH/eJmeSJ5qkCFy+Ta3eCUZ3Hxvb88KG/fbc/AVLJowsFuY Yj/JQ8/XE4Iqlaguhy+HQtHilDv6UVhHWuFA5cS3bOLwQ2rw8hU= =r2Q0 -----END PGP SIGNATURE----- Merge v6.12.23 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
08c38f86ab
@ -581,6 +581,8 @@ patternProperties:
|
||||
description: GlobalTop Technology, Inc.
|
||||
"^gmt,.*":
|
||||
description: Global Mixed-mode Technology, Inc.
|
||||
"^gocontroll,.*":
|
||||
description: GOcontroll Modular Embedded Electronics B.V.
|
||||
"^goldelico,.*":
|
||||
description: Golden Delicious Computers GmbH & Co. KG
|
||||
"^goodix,.*":
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 12
|
||||
SUBLEVEL = 22
|
||||
SUBLEVEL = 23
|
||||
EXTRAVERSION =
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
|
@ -118,7 +118,7 @@ config ARM
|
||||
select HAVE_KERNEL_XZ
|
||||
select HAVE_KPROBES if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !CPU_V7M
|
||||
select HAVE_KRETPROBES if HAVE_KPROBES
|
||||
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if (LD_VERSION >= 23600 || LD_IS_LLD)
|
||||
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if (LD_VERSION >= 23600 || LD_CAN_USE_KEEP_IN_OVERLAY)
|
||||
select HAVE_MOD_ARCH_SPECIFIC
|
||||
select HAVE_NMI
|
||||
select HAVE_OPTPROBES if !THUMB2_KERNEL
|
||||
|
@ -34,6 +34,12 @@
|
||||
#define NOCROSSREFS
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_LD_CAN_USE_KEEP_IN_OVERLAY
|
||||
#define OVERLAY_KEEP(x) KEEP(x)
|
||||
#else
|
||||
#define OVERLAY_KEEP(x) x
|
||||
#endif
|
||||
|
||||
/* Set start/end symbol names to the LMA for the section */
|
||||
#define ARM_LMA(sym, section) \
|
||||
sym##_start = LOADADDR(section); \
|
||||
@ -125,13 +131,13 @@
|
||||
__vectors_lma = .; \
|
||||
OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) { \
|
||||
.vectors { \
|
||||
*(.vectors) \
|
||||
OVERLAY_KEEP(*(.vectors)) \
|
||||
} \
|
||||
.vectors.bhb.loop8 { \
|
||||
*(.vectors.bhb.loop8) \
|
||||
OVERLAY_KEEP(*(.vectors.bhb.loop8)) \
|
||||
} \
|
||||
.vectors.bhb.bpiall { \
|
||||
*(.vectors.bhb.bpiall) \
|
||||
OVERLAY_KEEP(*(.vectors.bhb.bpiall)) \
|
||||
} \
|
||||
} \
|
||||
ARM_LMA(__vectors, .vectors); \
|
||||
|
@ -368,6 +368,8 @@ int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs)
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!handler)
|
||||
return 1;
|
||||
type = handler(addr, instr, regs);
|
||||
|
||||
if (type == TYPE_ERROR || type == TYPE_FAULT)
|
||||
|
@ -375,8 +375,8 @@ config CMDLINE_BOOTLOADER
|
||||
config CMDLINE_EXTEND
|
||||
bool "Use built-in to extend bootloader kernel arguments"
|
||||
help
|
||||
The command-line arguments provided during boot will be
|
||||
appended to the built-in command line. This is useful in
|
||||
The built-in command line will be appended to the command-
|
||||
line arguments provided during boot. This is useful in
|
||||
cases where the provided arguments are insufficient and
|
||||
you don't want to or cannot modify them.
|
||||
|
||||
|
@ -8,6 +8,8 @@
|
||||
#define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT
|
||||
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
||||
|
||||
#define ARCH_DMA_MINALIGN (16)
|
||||
|
||||
#define __read_mostly __section(".data..read_mostly")
|
||||
|
||||
#endif /* _ASM_CACHE_H */
|
||||
|
@ -53,7 +53,7 @@ void spurious_interrupt(void);
|
||||
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
|
||||
void arch_trigger_cpumask_backtrace(const struct cpumask *mask, int exclude_cpu);
|
||||
|
||||
#define MAX_IO_PICS 2
|
||||
#define MAX_IO_PICS 8
|
||||
#define NR_IRQS (64 + NR_VECTORS * (NR_CPUS + MAX_IO_PICS))
|
||||
|
||||
struct acpi_vector_group {
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <asm/asm.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/loongarch.h>
|
||||
#include <asm/unwind_hints.h>
|
||||
#include <linux/stringify.h>
|
||||
|
||||
enum stack_type {
|
||||
@ -43,6 +44,7 @@ int get_stack_info(unsigned long stack, struct task_struct *task, struct stack_i
|
||||
static __always_inline void prepare_frametrace(struct pt_regs *regs)
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
UNWIND_HINT_SAVE
|
||||
/* Save $ra */
|
||||
STORE_ONE_REG(1)
|
||||
/* Use $ra to save PC */
|
||||
@ -80,6 +82,7 @@ static __always_inline void prepare_frametrace(struct pt_regs *regs)
|
||||
STORE_ONE_REG(29)
|
||||
STORE_ONE_REG(30)
|
||||
STORE_ONE_REG(31)
|
||||
UNWIND_HINT_RESTORE
|
||||
: "=m" (regs->csr_era)
|
||||
: "r" (regs->regs)
|
||||
: "memory");
|
||||
|
@ -23,6 +23,14 @@
|
||||
UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_CALL
|
||||
.endm
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#else /* !__ASSEMBLY__ */
|
||||
|
||||
#define UNWIND_HINT_SAVE \
|
||||
UNWIND_HINT(UNWIND_HINT_TYPE_SAVE, 0, 0, 0)
|
||||
|
||||
#define UNWIND_HINT_RESTORE \
|
||||
UNWIND_HINT(UNWIND_HINT_TYPE_RESTORE, 0, 0, 0)
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_LOONGARCH_UNWIND_HINTS_H */
|
||||
|
@ -68,6 +68,8 @@ static int __init fdt_cpu_clk_init(void)
|
||||
return -ENODEV;
|
||||
|
||||
clk = of_clk_get(np, 0);
|
||||
of_node_put(np);
|
||||
|
||||
if (IS_ERR(clk))
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <linux/hw_breakpoint.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/kgdb.h>
|
||||
#include <linux/objtool.h>
|
||||
#include <linux/processor.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/sched.h>
|
||||
@ -224,13 +225,13 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
|
||||
regs->csr_era = pc;
|
||||
}
|
||||
|
||||
void arch_kgdb_breakpoint(void)
|
||||
noinline void arch_kgdb_breakpoint(void)
|
||||
{
|
||||
__asm__ __volatile__ ( \
|
||||
".globl kgdb_breakinst\n\t" \
|
||||
"nop\n" \
|
||||
"kgdb_breakinst:\tbreak 2\n\t"); /* BRK_KDB = 2 */
|
||||
}
|
||||
STACK_FRAME_NON_STANDARD(arch_kgdb_breakpoint);
|
||||
|
||||
/*
|
||||
* Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
|
||||
|
@ -142,6 +142,8 @@ static void build_prologue(struct jit_ctx *ctx)
|
||||
*/
|
||||
if (seen_tail_call(ctx) && seen_call(ctx))
|
||||
move_reg(ctx, TCC_SAVED, REG_TCC);
|
||||
else
|
||||
emit_insn(ctx, nop);
|
||||
|
||||
ctx->stack_size = stack_adjust;
|
||||
}
|
||||
@ -905,7 +907,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
|
||||
|
||||
move_addr(ctx, t1, func_addr);
|
||||
emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0);
|
||||
move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
|
||||
|
||||
if (insn->src_reg != BPF_PSEUDO_CALL)
|
||||
move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
|
||||
|
||||
break;
|
||||
|
||||
/* tail call */
|
||||
@ -930,7 +935,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
|
||||
{
|
||||
const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
|
||||
|
||||
move_imm(ctx, dst, imm64, is32);
|
||||
if (bpf_pseudo_func(insn))
|
||||
move_addr(ctx, dst, imm64);
|
||||
else
|
||||
move_imm(ctx, dst, imm64, is32);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,11 @@ struct jit_data {
|
||||
struct jit_ctx ctx;
|
||||
};
|
||||
|
||||
static inline void emit_nop(union loongarch_instruction *insn)
|
||||
{
|
||||
insn->word = INSN_NOP;
|
||||
}
|
||||
|
||||
#define emit_insn(ctx, func, ...) \
|
||||
do { \
|
||||
if (ctx->image != NULL) { \
|
||||
|
@ -78,4 +78,4 @@ CONFIG_DEBUG_VM_PGTABLE=y
|
||||
CONFIG_DETECT_HUNG_TASK=y
|
||||
CONFIG_BDI_SWITCH=y
|
||||
CONFIG_PPC_EARLY_DEBUG=y
|
||||
CONFIG_GENERIC_PTDUMP=y
|
||||
CONFIG_PTDUMP_DEBUGFS=y
|
||||
|
@ -56,3 +56,4 @@ $(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE
|
||||
OBJECT_FILES_NON_STANDARD_aesp10-ppc.o := y
|
||||
OBJECT_FILES_NON_STANDARD_ghashp10-ppc.o := y
|
||||
OBJECT_FILES_NON_STANDARD_aesp8-ppc.o := y
|
||||
OBJECT_FILES_NON_STANDARD_ghashp8-ppc.o := y
|
||||
|
@ -348,16 +348,13 @@ write_utlb:
|
||||
rlwinm r10, r24, 0, 22, 27
|
||||
|
||||
cmpwi r10, PPC47x_TLB0_4K
|
||||
bne 0f
|
||||
li r10, 0x1000 /* r10 = 4k */
|
||||
ANNOTATE_INTRA_FUNCTION_CALL
|
||||
bl 1f
|
||||
beq 0f
|
||||
|
||||
0:
|
||||
/* Defaults to 256M */
|
||||
lis r10, 0x1000
|
||||
|
||||
bcl 20,31,$+4
|
||||
0: bcl 20,31,$+4
|
||||
1: mflr r4
|
||||
addi r4, r4, (2f-1b) /* virtual address of 2f */
|
||||
|
||||
|
@ -25,6 +25,7 @@ struct spu_gang *alloc_spu_gang(void)
|
||||
mutex_init(&gang->aff_mutex);
|
||||
INIT_LIST_HEAD(&gang->list);
|
||||
INIT_LIST_HEAD(&gang->aff_list_head);
|
||||
gang->alive = 1;
|
||||
|
||||
out:
|
||||
return gang;
|
||||
|
@ -192,13 +192,32 @@ static int spufs_fill_dir(struct dentry *dir,
|
||||
return -ENOMEM;
|
||||
ret = spufs_new_file(dir->d_sb, dentry, files->ops,
|
||||
files->mode & mode, files->size, ctx);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
dput(dentry);
|
||||
return ret;
|
||||
}
|
||||
files++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void unuse_gang(struct dentry *dir)
|
||||
{
|
||||
struct inode *inode = dir->d_inode;
|
||||
struct spu_gang *gang = SPUFS_I(inode)->i_gang;
|
||||
|
||||
if (gang) {
|
||||
bool dead;
|
||||
|
||||
inode_lock(inode); // exclusion with spufs_create_context()
|
||||
dead = !--gang->alive;
|
||||
inode_unlock(inode);
|
||||
|
||||
if (dead)
|
||||
simple_recursive_removal(dir, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static int spufs_dir_close(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct inode *parent;
|
||||
@ -213,6 +232,7 @@ static int spufs_dir_close(struct inode *inode, struct file *file)
|
||||
inode_unlock(parent);
|
||||
WARN_ON(ret);
|
||||
|
||||
unuse_gang(dir->d_parent);
|
||||
return dcache_dir_close(inode, file);
|
||||
}
|
||||
|
||||
@ -405,7 +425,7 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
|
||||
{
|
||||
int ret;
|
||||
int affinity;
|
||||
struct spu_gang *gang;
|
||||
struct spu_gang *gang = SPUFS_I(inode)->i_gang;
|
||||
struct spu_context *neighbor;
|
||||
struct path path = {.mnt = mnt, .dentry = dentry};
|
||||
|
||||
@ -420,11 +440,15 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
|
||||
if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader)
|
||||
return -ENODEV;
|
||||
|
||||
gang = NULL;
|
||||
if (gang) {
|
||||
if (!gang->alive)
|
||||
return -ENOENT;
|
||||
gang->alive++;
|
||||
}
|
||||
|
||||
neighbor = NULL;
|
||||
affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU);
|
||||
if (affinity) {
|
||||
gang = SPUFS_I(inode)->i_gang;
|
||||
if (!gang)
|
||||
return -EINVAL;
|
||||
mutex_lock(&gang->aff_mutex);
|
||||
@ -436,8 +460,11 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
|
||||
}
|
||||
|
||||
ret = spufs_mkdir(inode, dentry, flags, mode & 0777);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
if (neighbor)
|
||||
put_spu_context(neighbor);
|
||||
goto out_aff_unlock;
|
||||
}
|
||||
|
||||
if (affinity) {
|
||||
spufs_set_affinity(flags, SPUFS_I(d_inode(dentry))->i_ctx,
|
||||
@ -453,6 +480,8 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
|
||||
out_aff_unlock:
|
||||
if (affinity)
|
||||
mutex_unlock(&gang->aff_mutex);
|
||||
if (ret && gang)
|
||||
gang->alive--; // can't reach 0
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -482,6 +511,7 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode)
|
||||
inode->i_fop = &simple_dir_operations;
|
||||
|
||||
d_instantiate(dentry, inode);
|
||||
dget(dentry);
|
||||
inc_nlink(dir);
|
||||
inc_nlink(d_inode(dentry));
|
||||
return ret;
|
||||
@ -492,6 +522,21 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int spufs_gang_close(struct inode *inode, struct file *file)
|
||||
{
|
||||
unuse_gang(file->f_path.dentry);
|
||||
return dcache_dir_close(inode, file);
|
||||
}
|
||||
|
||||
static const struct file_operations spufs_gang_fops = {
|
||||
.open = dcache_dir_open,
|
||||
.release = spufs_gang_close,
|
||||
.llseek = dcache_dir_lseek,
|
||||
.read = generic_read_dir,
|
||||
.iterate_shared = dcache_readdir,
|
||||
.fsync = noop_fsync,
|
||||
};
|
||||
|
||||
static int spufs_gang_open(const struct path *path)
|
||||
{
|
||||
int ret;
|
||||
@ -511,7 +556,7 @@ static int spufs_gang_open(const struct path *path)
|
||||
return PTR_ERR(filp);
|
||||
}
|
||||
|
||||
filp->f_op = &simple_dir_operations;
|
||||
filp->f_op = &spufs_gang_fops;
|
||||
fd_install(ret, filp);
|
||||
return ret;
|
||||
}
|
||||
@ -526,10 +571,8 @@ static int spufs_create_gang(struct inode *inode,
|
||||
ret = spufs_mkgang(inode, dentry, mode & 0777);
|
||||
if (!ret) {
|
||||
ret = spufs_gang_open(&path);
|
||||
if (ret < 0) {
|
||||
int err = simple_rmdir(inode, dentry);
|
||||
WARN_ON(err);
|
||||
}
|
||||
if (ret < 0)
|
||||
unuse_gang(dentry);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -151,6 +151,8 @@ struct spu_gang {
|
||||
int aff_flags;
|
||||
struct spu *aff_ref_spu;
|
||||
atomic_t aff_sched_count;
|
||||
|
||||
int alive;
|
||||
};
|
||||
|
||||
/* Flag bits for spu_gang aff_flags */
|
||||
|
@ -1,5 +1,9 @@
|
||||
ifdef CONFIG_RELOCATABLE
|
||||
KBUILD_CFLAGS += -fno-pie
|
||||
# We can't use PIC/PIE when handling early-boot errata parsing, as the kernel
|
||||
# doesn't have a GOT setup at that point. So instead just use medany: it's
|
||||
# usually position-independent, so it should be good enough for the errata
|
||||
# handling.
|
||||
KBUILD_CFLAGS += -fno-pie -mcmodel=medany
|
||||
endif
|
||||
|
||||
ifdef CONFIG_RISCV_ALTERNATIVE_EARLY
|
||||
|
@ -92,7 +92,7 @@ struct dyn_arch_ftrace {
|
||||
#define make_call_t0(caller, callee, call) \
|
||||
do { \
|
||||
unsigned int offset = \
|
||||
(unsigned long) callee - (unsigned long) caller; \
|
||||
(unsigned long) (callee) - (unsigned long) (caller); \
|
||||
call[0] = to_auipc_t0(offset); \
|
||||
call[1] = to_jalr_t0(offset); \
|
||||
} while (0)
|
||||
@ -108,7 +108,7 @@ do { \
|
||||
#define make_call_ra(caller, callee, call) \
|
||||
do { \
|
||||
unsigned int offset = \
|
||||
(unsigned long) callee - (unsigned long) caller; \
|
||||
(unsigned long) (callee) - (unsigned long) (caller); \
|
||||
call[0] = to_auipc_ra(offset); \
|
||||
call[1] = to_jalr_ra(offset); \
|
||||
} while (0)
|
||||
|
@ -468,6 +468,9 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
|
||||
case R_RISCV_ALIGN:
|
||||
case R_RISCV_RELAX:
|
||||
break;
|
||||
case R_RISCV_64:
|
||||
*(u64 *)loc = val;
|
||||
break;
|
||||
default:
|
||||
pr_err("Unknown rela relocation: %d\n", r_type);
|
||||
return -ENOEXEC;
|
||||
|
@ -666,6 +666,7 @@ int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_ba
|
||||
.type = etype,
|
||||
.size = sizeof(struct perf_event_attr),
|
||||
.pinned = true,
|
||||
.disabled = true,
|
||||
/*
|
||||
* It should never reach here if the platform doesn't support the sscofpmf
|
||||
* extension as mode filtering won't work without it.
|
||||
|
@ -148,22 +148,25 @@ unsigned long hugetlb_mask_last_page(struct hstate *h)
|
||||
static pte_t get_clear_contig(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
pte_t *ptep,
|
||||
unsigned long pte_num)
|
||||
unsigned long ncontig)
|
||||
{
|
||||
pte_t orig_pte = ptep_get(ptep);
|
||||
unsigned long i;
|
||||
pte_t pte, tmp_pte;
|
||||
bool present;
|
||||
|
||||
for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++) {
|
||||
pte_t pte = ptep_get_and_clear(mm, addr, ptep);
|
||||
|
||||
if (pte_dirty(pte))
|
||||
orig_pte = pte_mkdirty(orig_pte);
|
||||
|
||||
if (pte_young(pte))
|
||||
orig_pte = pte_mkyoung(orig_pte);
|
||||
pte = ptep_get_and_clear(mm, addr, ptep);
|
||||
present = pte_present(pte);
|
||||
while (--ncontig) {
|
||||
ptep++;
|
||||
addr += PAGE_SIZE;
|
||||
tmp_pte = ptep_get_and_clear(mm, addr, ptep);
|
||||
if (present) {
|
||||
if (pte_dirty(tmp_pte))
|
||||
pte = pte_mkdirty(pte);
|
||||
if (pte_young(tmp_pte))
|
||||
pte = pte_mkyoung(pte);
|
||||
}
|
||||
}
|
||||
|
||||
return orig_pte;
|
||||
return pte;
|
||||
}
|
||||
|
||||
static pte_t get_clear_contig_flush(struct mm_struct *mm,
|
||||
@ -212,6 +215,26 @@ static void clear_flush(struct mm_struct *mm,
|
||||
flush_tlb_range(&vma, saddr, addr);
|
||||
}
|
||||
|
||||
static int num_contig_ptes_from_size(unsigned long sz, size_t *pgsize)
|
||||
{
|
||||
unsigned long hugepage_shift;
|
||||
|
||||
if (sz >= PGDIR_SIZE)
|
||||
hugepage_shift = PGDIR_SHIFT;
|
||||
else if (sz >= P4D_SIZE)
|
||||
hugepage_shift = P4D_SHIFT;
|
||||
else if (sz >= PUD_SIZE)
|
||||
hugepage_shift = PUD_SHIFT;
|
||||
else if (sz >= PMD_SIZE)
|
||||
hugepage_shift = PMD_SHIFT;
|
||||
else
|
||||
hugepage_shift = PAGE_SHIFT;
|
||||
|
||||
*pgsize = 1 << hugepage_shift;
|
||||
|
||||
return sz >> hugepage_shift;
|
||||
}
|
||||
|
||||
/*
|
||||
* When dealing with NAPOT mappings, the privileged specification indicates that
|
||||
* "if an update needs to be made, the OS generally should first mark all of the
|
||||
@ -226,22 +249,10 @@ void set_huge_pte_at(struct mm_struct *mm,
|
||||
pte_t pte,
|
||||
unsigned long sz)
|
||||
{
|
||||
unsigned long hugepage_shift, pgsize;
|
||||
size_t pgsize;
|
||||
int i, pte_num;
|
||||
|
||||
if (sz >= PGDIR_SIZE)
|
||||
hugepage_shift = PGDIR_SHIFT;
|
||||
else if (sz >= P4D_SIZE)
|
||||
hugepage_shift = P4D_SHIFT;
|
||||
else if (sz >= PUD_SIZE)
|
||||
hugepage_shift = PUD_SHIFT;
|
||||
else if (sz >= PMD_SIZE)
|
||||
hugepage_shift = PMD_SHIFT;
|
||||
else
|
||||
hugepage_shift = PAGE_SHIFT;
|
||||
|
||||
pte_num = sz >> hugepage_shift;
|
||||
pgsize = 1 << hugepage_shift;
|
||||
pte_num = num_contig_ptes_from_size(sz, &pgsize);
|
||||
|
||||
if (!pte_present(pte)) {
|
||||
for (i = 0; i < pte_num; i++, ptep++, addr += pgsize)
|
||||
@ -295,13 +306,14 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
pte_t *ptep, unsigned long sz)
|
||||
{
|
||||
size_t pgsize;
|
||||
pte_t orig_pte = ptep_get(ptep);
|
||||
int pte_num;
|
||||
|
||||
if (!pte_napot(orig_pte))
|
||||
return ptep_get_and_clear(mm, addr, ptep);
|
||||
|
||||
pte_num = napot_pte_num(napot_cont_order(orig_pte));
|
||||
pte_num = num_contig_ptes_from_size(sz, &pgsize);
|
||||
|
||||
return get_clear_contig(mm, addr, ptep, pte_num);
|
||||
}
|
||||
@ -351,6 +363,7 @@ void huge_pte_clear(struct mm_struct *mm,
|
||||
pte_t *ptep,
|
||||
unsigned long sz)
|
||||
{
|
||||
size_t pgsize;
|
||||
pte_t pte = ptep_get(ptep);
|
||||
int i, pte_num;
|
||||
|
||||
@ -359,8 +372,9 @@ void huge_pte_clear(struct mm_struct *mm,
|
||||
return;
|
||||
}
|
||||
|
||||
pte_num = napot_pte_num(napot_cont_order(pte));
|
||||
for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++)
|
||||
pte_num = num_contig_ptes_from_size(sz, &pgsize);
|
||||
|
||||
for (i = 0; i < pte_num; i++, addr += pgsize, ptep++)
|
||||
pte_clear(mm, addr, ptep);
|
||||
}
|
||||
|
||||
|
@ -12,6 +12,7 @@
|
||||
|
||||
.text
|
||||
|
||||
.align 2
|
||||
SYM_CODE_START(purgatory_start)
|
||||
|
||||
lla sp, .Lstack
|
||||
|
@ -34,8 +34,6 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
|
||||
|
||||
#define ioremap_wc(addr, size) \
|
||||
ioremap_prot((addr), (size), pgprot_val(pgprot_writecombine(PAGE_KERNEL)))
|
||||
#define ioremap_wt(addr, size) \
|
||||
ioremap_prot((addr), (size), pgprot_val(pgprot_writethrough(PAGE_KERNEL)))
|
||||
|
||||
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
|
||||
{
|
||||
|
@ -1365,9 +1365,6 @@ void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
|
||||
#define pgprot_writecombine pgprot_writecombine
|
||||
pgprot_t pgprot_writecombine(pgprot_t prot);
|
||||
|
||||
#define pgprot_writethrough pgprot_writethrough
|
||||
pgprot_t pgprot_writethrough(pgprot_t prot);
|
||||
|
||||
#define PFN_PTE_SHIFT PAGE_SHIFT
|
||||
|
||||
/*
|
||||
|
@ -501,7 +501,7 @@ SYM_CODE_START(mcck_int_handler)
|
||||
clgrjl %r9,%r14, 4f
|
||||
larl %r14,.Lsie_leave
|
||||
clgrjhe %r9,%r14, 4f
|
||||
lg %r10,__LC_PCPU
|
||||
lg %r10,__LC_PCPU(%r13)
|
||||
oi __PCPU_FLAGS+7(%r10), _CIF_MCCK_GUEST
|
||||
4: BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
|
||||
SIEEXIT __SF_SIE_CONTROL(%r15),%r13
|
||||
|
@ -34,16 +34,6 @@ pgprot_t pgprot_writecombine(pgprot_t prot)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pgprot_writecombine);
|
||||
|
||||
pgprot_t pgprot_writethrough(pgprot_t prot)
|
||||
{
|
||||
/*
|
||||
* mio_wb_bit_mask may be set on a different CPU, but it is only set
|
||||
* once at init and only read afterwards.
|
||||
*/
|
||||
return __pgprot(pgprot_val(prot) & ~mio_wb_bit_mask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pgprot_writethrough);
|
||||
|
||||
static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, int nodat)
|
||||
{
|
||||
|
@ -218,7 +218,6 @@ extern int os_protect_memory(void *addr, unsigned long len,
|
||||
extern int os_unmap_memory(void *addr, int len);
|
||||
extern int os_drop_memory(void *addr, int length);
|
||||
extern int can_drop_memory(void);
|
||||
extern int os_mincore(void *addr, unsigned long len);
|
||||
|
||||
/* execvp.c */
|
||||
extern int execvp_noalloc(char *buf, const char *file, char *const argv[]);
|
||||
|
@ -17,7 +17,7 @@ extra-y := vmlinux.lds
|
||||
obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \
|
||||
physmem.o process.o ptrace.o reboot.o sigio.o \
|
||||
signal.o sysrq.o time.o tlb.o trap.o \
|
||||
um_arch.o umid.o maccess.o kmsg_dump.o capflags.o skas/
|
||||
um_arch.o umid.o kmsg_dump.o capflags.o skas/
|
||||
obj-y += load_file.o
|
||||
|
||||
obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
|
||||
|
@ -1,19 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2013 Richard Weinberger <richrd@nod.at>
|
||||
*/
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <os.h>
|
||||
|
||||
bool copy_from_kernel_nofault_allowed(const void *src, size_t size)
|
||||
{
|
||||
void *psrc = (void *)rounddown((unsigned long)src, PAGE_SIZE);
|
||||
|
||||
if ((unsigned long)src < PAGE_SIZE || size <= 0)
|
||||
return false;
|
||||
if (os_mincore(psrc, size + src - psrc) <= 0)
|
||||
return false;
|
||||
return true;
|
||||
}
|
@ -223,57 +223,6 @@ out:
|
||||
return ok;
|
||||
}
|
||||
|
||||
static int os_page_mincore(void *addr)
|
||||
{
|
||||
char vec[2];
|
||||
int ret;
|
||||
|
||||
ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
|
||||
if (ret < 0) {
|
||||
if (errno == ENOMEM || errno == EINVAL)
|
||||
return 0;
|
||||
else
|
||||
return -errno;
|
||||
}
|
||||
|
||||
return vec[0] & 1;
|
||||
}
|
||||
|
||||
int os_mincore(void *addr, unsigned long len)
|
||||
{
|
||||
char *vec;
|
||||
int ret, i;
|
||||
|
||||
if (len <= UM_KERN_PAGE_SIZE)
|
||||
return os_page_mincore(addr);
|
||||
|
||||
vec = calloc(1, (len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE);
|
||||
if (!vec)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
|
||||
if (ret < 0) {
|
||||
if (errno == ENOMEM || errno == EINVAL)
|
||||
ret = 0;
|
||||
else
|
||||
ret = -errno;
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < ((len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE); i++) {
|
||||
if (!(vec[i] & 1)) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
ret = 1;
|
||||
out:
|
||||
free(vec);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void init_new_thread_signals(void)
|
||||
{
|
||||
set_handler(SIGSEGV);
|
||||
|
@ -226,7 +226,7 @@ config X86
|
||||
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI if X86_64
|
||||
select HAVE_EBPF_JIT
|
||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
select HAVE_EISA
|
||||
select HAVE_EISA if X86_32
|
||||
select HAVE_EXIT_THREAD
|
||||
select HAVE_GUP_FAST
|
||||
select HAVE_FENTRY if X86_64 || DYNAMIC_FTRACE
|
||||
@ -894,6 +894,7 @@ config INTEL_TDX_GUEST
|
||||
depends on X86_64 && CPU_SUP_INTEL
|
||||
depends on X86_X2APIC
|
||||
depends on EFI_STUB
|
||||
depends on PARAVIRT
|
||||
select ARCH_HAS_CC_PLATFORM
|
||||
select X86_MEM_ENCRYPT
|
||||
select X86_MCE
|
||||
|
@ -368,7 +368,7 @@ config X86_HAVE_PAE
|
||||
|
||||
config X86_CMPXCHG64
|
||||
def_bool y
|
||||
depends on X86_HAVE_PAE || M586TSC || M586MMX || MK6 || MK7
|
||||
depends on X86_HAVE_PAE || M586TSC || M586MMX || MK6 || MK7 || MGEODEGX1 || MGEODE_LX
|
||||
|
||||
# this should be set for all -march=.. options where the compiler
|
||||
# generates cmov.
|
||||
|
@ -7,12 +7,13 @@ core-y += arch/x86/crypto/
|
||||
# GCC versions < 11. See:
|
||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99652
|
||||
#
|
||||
ifeq ($(CONFIG_CC_IS_CLANG),y)
|
||||
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
|
||||
KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
|
||||
ifeq ($(call gcc-min-version, 110000)$(CONFIG_CC_IS_CLANG),y)
|
||||
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
|
||||
KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2
|
||||
endif
|
||||
|
||||
KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
|
||||
|
||||
ifeq ($(CONFIG_X86_32),y)
|
||||
START := 0x8048000
|
||||
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <asm/ia32.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/insn-eval.h>
|
||||
#include <asm/paravirt_types.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/set_memory.h>
|
||||
#include <asm/traps.h>
|
||||
@ -359,7 +360,7 @@ static int handle_halt(struct ve_info *ve)
|
||||
return ve_instr_len(ve);
|
||||
}
|
||||
|
||||
void __cpuidle tdx_safe_halt(void)
|
||||
void __cpuidle tdx_halt(void)
|
||||
{
|
||||
const bool irq_disabled = false;
|
||||
|
||||
@ -370,6 +371,16 @@ void __cpuidle tdx_safe_halt(void)
|
||||
WARN_ONCE(1, "HLT instruction emulation failed\n");
|
||||
}
|
||||
|
||||
static void __cpuidle tdx_safe_halt(void)
|
||||
{
|
||||
tdx_halt();
|
||||
/*
|
||||
* "__cpuidle" section doesn't support instrumentation, so stick
|
||||
* with raw_* variant that avoids tracing hooks.
|
||||
*/
|
||||
raw_local_irq_enable();
|
||||
}
|
||||
|
||||
static int read_msr(struct pt_regs *regs, struct ve_info *ve)
|
||||
{
|
||||
struct tdx_module_args args = {
|
||||
@ -1056,6 +1067,19 @@ void __init tdx_early_init(void)
|
||||
x86_platform.guest.enc_kexec_begin = tdx_kexec_begin;
|
||||
x86_platform.guest.enc_kexec_finish = tdx_kexec_finish;
|
||||
|
||||
/*
|
||||
* Avoid "sti;hlt" execution in TDX guests as HLT induces a #VE that
|
||||
* will enable interrupts before HLT TDCALL invocation if executed
|
||||
* in STI-shadow, possibly resulting in missed wakeup events.
|
||||
*
|
||||
* Modify all possible HLT execution paths to use TDX specific routines
|
||||
* that directly execute TDCALL and toggle the interrupt state as
|
||||
* needed after TDCALL completion. This also reduces HLT related #VEs
|
||||
* in addition to having a reliable halt logic execution.
|
||||
*/
|
||||
pv_ops.irq.safe_halt = tdx_safe_halt;
|
||||
pv_ops.irq.halt = tdx_halt;
|
||||
|
||||
/*
|
||||
* TDX intercepts the RDMSR to read the X2APIC ID in the parallel
|
||||
* bringup low level code. That raises #VE which cannot be handled
|
||||
|
@ -70,6 +70,8 @@ For 32-bit we have the following conventions - kernel is built with
|
||||
pushq %rsi /* pt_regs->si */
|
||||
movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */
|
||||
movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */
|
||||
/* We just clobbered the return address - use the IRET frame for unwinding: */
|
||||
UNWIND_HINT_IRET_REGS offset=3*8
|
||||
.else
|
||||
pushq %rdi /* pt_regs->di */
|
||||
pushq %rsi /* pt_regs->si */
|
||||
|
@ -142,7 +142,7 @@ static __always_inline int syscall_32_enter(struct pt_regs *regs)
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
bool __ia32_enabled __ro_after_init = !IS_ENABLED(CONFIG_IA32_EMULATION_DEFAULT_DISABLED);
|
||||
|
||||
static int ia32_emulation_override_cmdline(char *arg)
|
||||
static int __init ia32_emulation_override_cmdline(char *arg)
|
||||
{
|
||||
return kstrtobool(arg, &__ia32_enabled);
|
||||
}
|
||||
|
@ -2779,28 +2779,33 @@ static u64 icl_update_topdown_event(struct perf_event *event)
|
||||
|
||||
DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update);
|
||||
|
||||
static void intel_pmu_read_topdown_event(struct perf_event *event)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||
|
||||
/* Only need to call update_topdown_event() once for group read. */
|
||||
if ((cpuc->txn_flags & PERF_PMU_TXN_READ) &&
|
||||
!is_slots_event(event))
|
||||
return;
|
||||
|
||||
perf_pmu_disable(event->pmu);
|
||||
static_call(intel_pmu_update_topdown_event)(event);
|
||||
perf_pmu_enable(event->pmu);
|
||||
}
|
||||
|
||||
static void intel_pmu_read_event(struct perf_event *event)
|
||||
{
|
||||
if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
|
||||
intel_pmu_auto_reload_read(event);
|
||||
else if (is_topdown_count(event))
|
||||
intel_pmu_read_topdown_event(event);
|
||||
else
|
||||
x86_perf_event_update(event);
|
||||
if (event->hw.flags & (PERF_X86_EVENT_AUTO_RELOAD | PERF_X86_EVENT_TOPDOWN)) {
|
||||
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||
bool pmu_enabled = cpuc->enabled;
|
||||
|
||||
/* Only need to call update_topdown_event() once for group read. */
|
||||
if (is_metric_event(event) && (cpuc->txn_flags & PERF_PMU_TXN_READ))
|
||||
return;
|
||||
|
||||
cpuc->enabled = 0;
|
||||
if (pmu_enabled)
|
||||
intel_pmu_disable_all();
|
||||
|
||||
if (is_topdown_event(event))
|
||||
static_call(intel_pmu_update_topdown_event)(event);
|
||||
else
|
||||
intel_pmu_drain_pebs_buffer();
|
||||
|
||||
cpuc->enabled = pmu_enabled;
|
||||
if (pmu_enabled)
|
||||
intel_pmu_enable_all(0);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
x86_perf_event_update(event);
|
||||
}
|
||||
|
||||
static void intel_pmu_enable_fixed(struct perf_event *event)
|
||||
@ -3067,7 +3072,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
|
||||
|
||||
handled++;
|
||||
x86_pmu_handle_guest_pebs(regs, &data);
|
||||
x86_pmu.drain_pebs(regs, &data);
|
||||
static_call(x86_pmu_drain_pebs)(regs, &data);
|
||||
status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
|
||||
|
||||
/*
|
||||
|
@ -932,11 +932,11 @@ unlock:
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void intel_pmu_drain_pebs_buffer(void)
|
||||
void intel_pmu_drain_pebs_buffer(void)
|
||||
{
|
||||
struct perf_sample_data data;
|
||||
|
||||
x86_pmu.drain_pebs(NULL, &data);
|
||||
static_call(x86_pmu_drain_pebs)(NULL, &data);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2079,15 +2079,6 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void intel_pmu_auto_reload_read(struct perf_event *event)
|
||||
{
|
||||
WARN_ON(!(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD));
|
||||
|
||||
perf_pmu_disable(event->pmu);
|
||||
intel_pmu_drain_pebs_buffer();
|
||||
perf_pmu_enable(event->pmu);
|
||||
}
|
||||
|
||||
/*
|
||||
* Special variant of intel_pmu_save_and_restart() for auto-reload.
|
||||
*/
|
||||
|
@ -1092,6 +1092,7 @@ extern struct x86_pmu x86_pmu __read_mostly;
|
||||
|
||||
DECLARE_STATIC_CALL(x86_pmu_set_period, *x86_pmu.set_period);
|
||||
DECLARE_STATIC_CALL(x86_pmu_update, *x86_pmu.update);
|
||||
DECLARE_STATIC_CALL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs);
|
||||
|
||||
static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx)
|
||||
{
|
||||
@ -1626,7 +1627,7 @@ void intel_pmu_pebs_disable_all(void);
|
||||
|
||||
void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
|
||||
|
||||
void intel_pmu_auto_reload_read(struct perf_event *event);
|
||||
void intel_pmu_drain_pebs_buffer(void);
|
||||
|
||||
void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr);
|
||||
|
||||
|
@ -30,6 +30,7 @@ void __init hv_vtl_init_platform(void)
|
||||
x86_platform.realmode_init = x86_init_noop;
|
||||
x86_init.irqs.pre_vector_init = x86_init_noop;
|
||||
x86_init.timers.timer_init = x86_init_noop;
|
||||
x86_init.resources.probe_roms = x86_init_noop;
|
||||
|
||||
/* Avoid searching for BIOS MP tables */
|
||||
x86_init.mpparse.find_mptable = x86_init_noop;
|
||||
|
@ -339,7 +339,7 @@ int hv_snp_boot_ap(u32 cpu, unsigned long start_ip)
|
||||
vmsa->sev_features = sev_status >> 2;
|
||||
|
||||
ret = snp_set_vmsa(vmsa, true);
|
||||
if (!ret) {
|
||||
if (ret) {
|
||||
pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret);
|
||||
free_page((u64)vmsa);
|
||||
return ret;
|
||||
@ -465,7 +465,6 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
|
||||
enum hv_mem_host_visibility visibility)
|
||||
{
|
||||
struct hv_gpa_range_for_visibility *input;
|
||||
u16 pages_processed;
|
||||
u64 hv_status;
|
||||
unsigned long flags;
|
||||
|
||||
@ -494,7 +493,7 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
|
||||
memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn));
|
||||
hv_status = hv_do_rep_hypercall(
|
||||
HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, count,
|
||||
0, input, &pages_processed);
|
||||
0, input, NULL);
|
||||
local_irq_restore(flags);
|
||||
|
||||
if (hv_result_success(hv_status))
|
||||
|
@ -58,7 +58,7 @@ void tdx_get_ve_info(struct ve_info *ve);
|
||||
|
||||
bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve);
|
||||
|
||||
void tdx_safe_halt(void);
|
||||
void tdx_halt(void);
|
||||
|
||||
bool tdx_early_handle_ve(struct pt_regs *regs);
|
||||
|
||||
@ -69,7 +69,7 @@ u64 tdx_hcall_get_quote(u8 *buf, size_t size);
|
||||
#else
|
||||
|
||||
static inline void tdx_early_init(void) { };
|
||||
static inline void tdx_safe_halt(void) { };
|
||||
static inline void tdx_halt(void) { };
|
||||
|
||||
static inline bool tdx_early_handle_ve(struct pt_regs *regs) { return false; }
|
||||
|
||||
|
@ -242,7 +242,7 @@ void flush_tlb_multi(const struct cpumask *cpumask,
|
||||
flush_tlb_mm_range((vma)->vm_mm, start, end, \
|
||||
((vma)->vm_flags & VM_HUGETLB) \
|
||||
? huge_page_shift(hstate_vma(vma)) \
|
||||
: PAGE_SHIFT, false)
|
||||
: PAGE_SHIFT, true)
|
||||
|
||||
extern void flush_tlb_all(void);
|
||||
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
||||
|
@ -300,13 +300,12 @@ static noinstr int error_context(struct mce *m, struct pt_regs *regs)
|
||||
copy_user = is_copy_from_user(regs);
|
||||
instrumentation_end();
|
||||
|
||||
switch (fixup_type) {
|
||||
case EX_TYPE_UACCESS:
|
||||
if (!copy_user)
|
||||
return IN_KERNEL;
|
||||
m->kflags |= MCE_IN_KERNEL_COPYIN;
|
||||
fallthrough;
|
||||
if (copy_user) {
|
||||
m->kflags |= MCE_IN_KERNEL_COPYIN | MCE_IN_KERNEL_RECOV;
|
||||
return IN_KERNEL_RECOV;
|
||||
}
|
||||
|
||||
switch (fixup_type) {
|
||||
case EX_TYPE_FAULT_MCE_SAFE:
|
||||
case EX_TYPE_DEFAULT_MCE_SAFE:
|
||||
m->kflags |= MCE_IN_KERNEL_RECOV;
|
||||
|
@ -600,7 +600,7 @@ static bool __apply_microcode_amd(struct microcode_amd *mc, u32 *cur_rev,
|
||||
unsigned long p_addr = (unsigned long)&mc->hdr.data_code;
|
||||
|
||||
if (!verify_sha256_digest(mc->hdr.patch_id, *cur_rev, (const u8 *)p_addr, psize))
|
||||
return -1;
|
||||
return false;
|
||||
|
||||
native_wrmsrl(MSR_AMD64_PATCH_LOADER, p_addr);
|
||||
|
||||
|
@ -148,7 +148,8 @@ static int closid_alloc(void)
|
||||
|
||||
lockdep_assert_held(&rdtgroup_mutex);
|
||||
|
||||
if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
|
||||
if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID) &&
|
||||
is_llc_occupancy_enabled()) {
|
||||
cleanest_closid = resctrl_find_cleanest_closid();
|
||||
if (cleanest_closid < 0)
|
||||
return cleanest_closid;
|
||||
|
@ -150,13 +150,15 @@ int __init sgx_drv_init(void)
|
||||
u64 xfrm_mask;
|
||||
int ret;
|
||||
|
||||
if (!cpu_feature_enabled(X86_FEATURE_SGX_LC))
|
||||
if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) {
|
||||
pr_info("SGX disabled: SGX launch control CPU feature is not available, /dev/sgx_enclave disabled.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
cpuid_count(SGX_CPUID, 0, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
if (!(eax & 1)) {
|
||||
pr_err("SGX disabled: SGX1 instruction support not available.\n");
|
||||
pr_info("SGX disabled: SGX1 instruction support not available, /dev/sgx_enclave disabled.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
@ -173,8 +175,10 @@ int __init sgx_drv_init(void)
|
||||
}
|
||||
|
||||
ret = misc_register(&sgx_dev_enclave);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
pr_info("SGX disabled: Unable to register the /dev/sgx_enclave driver (%d).\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -195,6 +195,7 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
||||
printk("%sCall Trace:\n", log_lvl);
|
||||
|
||||
unwind_start(&state, task, regs, stack);
|
||||
stack = stack ?: get_stack_pointer(task, regs);
|
||||
regs = unwind_get_entry_regs(&state, &partial);
|
||||
|
||||
/*
|
||||
@ -213,9 +214,7 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
||||
* - hardirq stack
|
||||
* - entry stack
|
||||
*/
|
||||
for (stack = stack ?: get_stack_pointer(task, regs);
|
||||
stack;
|
||||
stack = stack_info.next_sp) {
|
||||
for (; stack; stack = stack_info.next_sp) {
|
||||
const char *stack_name;
|
||||
|
||||
stack = PTR_ALIGN(stack, sizeof(long));
|
||||
|
@ -220,7 +220,7 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu)
|
||||
struct fpstate *fpstate;
|
||||
unsigned int size;
|
||||
|
||||
size = fpu_user_cfg.default_size + ALIGN(offsetof(struct fpstate, regs), 64);
|
||||
size = fpu_kernel_cfg.default_size + ALIGN(offsetof(struct fpstate, regs), 64);
|
||||
fpstate = vzalloc(size);
|
||||
if (!fpstate)
|
||||
return false;
|
||||
@ -232,8 +232,8 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu)
|
||||
fpstate->is_guest = true;
|
||||
|
||||
gfpu->fpstate = fpstate;
|
||||
gfpu->xfeatures = fpu_user_cfg.default_features;
|
||||
gfpu->perm = fpu_user_cfg.default_features;
|
||||
gfpu->xfeatures = fpu_kernel_cfg.default_features;
|
||||
gfpu->perm = fpu_kernel_cfg.default_features;
|
||||
|
||||
/*
|
||||
* KVM sets the FP+SSE bits in the XSAVE header when copying FPU state
|
||||
|
@ -92,7 +92,12 @@ EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
|
||||
*/
|
||||
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
||||
{
|
||||
memcpy(dst, src, arch_task_struct_size);
|
||||
/* init_task is not dynamically sized (incomplete FPU state) */
|
||||
if (unlikely(src == &init_task))
|
||||
memcpy_and_pad(dst, arch_task_struct_size, src, sizeof(init_task), 0);
|
||||
else
|
||||
memcpy(dst, src, arch_task_struct_size);
|
||||
|
||||
#ifdef CONFIG_VM86
|
||||
dst->thread.vm86 = NULL;
|
||||
#endif
|
||||
@ -933,7 +938,7 @@ void __init select_idle_routine(void)
|
||||
static_call_update(x86_idle, mwait_idle);
|
||||
} else if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) {
|
||||
pr_info("using TDX aware idle routine\n");
|
||||
static_call_update(x86_idle, tdx_safe_halt);
|
||||
static_call_update(x86_idle, tdx_halt);
|
||||
} else {
|
||||
static_call_update(x86_idle, default_idle);
|
||||
}
|
||||
|
@ -379,6 +379,21 @@ __visible void __noreturn handle_stack_overflow(struct pt_regs *regs,
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Prevent the compiler and/or objtool from marking the !CONFIG_X86_ESPFIX64
|
||||
* version of exc_double_fault() as noreturn. Otherwise the noreturn mismatch
|
||||
* between configs triggers objtool warnings.
|
||||
*
|
||||
* This is a temporary hack until we have compiler or plugin support for
|
||||
* annotating noreturns.
|
||||
*/
|
||||
#ifdef CONFIG_X86_ESPFIX64
|
||||
#define always_true() true
|
||||
#else
|
||||
bool always_true(void);
|
||||
bool __weak always_true(void) { return true; }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Runs on an IST stack for x86_64 and on a special task stack for x86_32.
|
||||
*
|
||||
@ -514,7 +529,8 @@ DEFINE_IDTENTRY_DF(exc_double_fault)
|
||||
|
||||
pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code);
|
||||
die("double fault", regs, error_code);
|
||||
panic("Machine halted.");
|
||||
if (always_true())
|
||||
panic("Machine halted.");
|
||||
instrumentation_end();
|
||||
}
|
||||
|
||||
|
@ -956,7 +956,7 @@ static unsigned long long cyc2ns_suspend;
|
||||
|
||||
void tsc_save_sched_clock_state(void)
|
||||
{
|
||||
if (!sched_clock_stable())
|
||||
if (!static_branch_likely(&__use_tsc) && !sched_clock_stable())
|
||||
return;
|
||||
|
||||
cyc2ns_suspend = sched_clock();
|
||||
@ -976,7 +976,7 @@ void tsc_restore_sched_clock_state(void)
|
||||
unsigned long flags;
|
||||
int cpu;
|
||||
|
||||
if (!sched_clock_stable())
|
||||
if (!static_branch_likely(&__use_tsc) && !sched_clock_stable())
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
@ -357,19 +357,23 @@ void *arch_uprobe_trampoline(unsigned long *psize)
|
||||
return &insn;
|
||||
}
|
||||
|
||||
static unsigned long trampoline_check_ip(void)
|
||||
static unsigned long trampoline_check_ip(unsigned long tramp)
|
||||
{
|
||||
unsigned long tramp = uprobe_get_trampoline_vaddr();
|
||||
|
||||
return tramp + (uretprobe_syscall_check - uretprobe_trampoline_entry);
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE0(uretprobe)
|
||||
{
|
||||
struct pt_regs *regs = task_pt_regs(current);
|
||||
unsigned long err, ip, sp, r11_cx_ax[3];
|
||||
unsigned long err, ip, sp, r11_cx_ax[3], tramp;
|
||||
|
||||
if (regs->ip != trampoline_check_ip())
|
||||
/* If there's no trampoline, we are called from wrong place. */
|
||||
tramp = uprobe_get_trampoline_vaddr();
|
||||
if (unlikely(tramp == UPROBE_NO_TRAMPOLINE_VADDR))
|
||||
goto sigill;
|
||||
|
||||
/* Make sure the ip matches the only allowed sys_uretprobe caller. */
|
||||
if (unlikely(regs->ip != trampoline_check_ip(tramp)))
|
||||
goto sigill;
|
||||
|
||||
err = copy_from_user(r11_cx_ax, (void __user *)regs->sp, sizeof(r11_cx_ax));
|
||||
|
@ -3957,16 +3957,12 @@ static int sev_snp_ap_creation(struct vcpu_svm *svm)
|
||||
|
||||
/*
|
||||
* The target vCPU is valid, so the vCPU will be kicked unless the
|
||||
* request is for CREATE_ON_INIT. For any errors at this stage, the
|
||||
* kick will place the vCPU in an non-runnable state.
|
||||
* request is for CREATE_ON_INIT.
|
||||
*/
|
||||
kick = true;
|
||||
|
||||
mutex_lock(&target_svm->sev_es.snp_vmsa_mutex);
|
||||
|
||||
target_svm->sev_es.snp_vmsa_gpa = INVALID_PAGE;
|
||||
target_svm->sev_es.snp_ap_waiting_for_reset = true;
|
||||
|
||||
/* Interrupt injection mode shouldn't change for AP creation */
|
||||
if (request < SVM_VMGEXIT_AP_DESTROY) {
|
||||
u64 sev_features;
|
||||
@ -4012,20 +4008,23 @@ static int sev_snp_ap_creation(struct vcpu_svm *svm)
|
||||
target_svm->sev_es.snp_vmsa_gpa = svm->vmcb->control.exit_info_2;
|
||||
break;
|
||||
case SVM_VMGEXIT_AP_DESTROY:
|
||||
target_svm->sev_es.snp_vmsa_gpa = INVALID_PAGE;
|
||||
break;
|
||||
default:
|
||||
vcpu_unimpl(vcpu, "vmgexit: invalid AP creation request [%#x] from guest\n",
|
||||
request);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
target_svm->sev_es.snp_ap_waiting_for_reset = true;
|
||||
|
||||
if (kick) {
|
||||
kvm_make_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, target_vcpu);
|
||||
kvm_vcpu_kick(target_vcpu);
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&target_svm->sev_es.snp_vmsa_mutex);
|
||||
|
||||
return ret;
|
||||
|
@ -4590,6 +4590,11 @@ static bool kvm_is_vm_type_supported(unsigned long type)
|
||||
return type < 32 && (kvm_caps.supported_vm_types & BIT(type));
|
||||
}
|
||||
|
||||
static inline u32 kvm_sync_valid_fields(struct kvm *kvm)
|
||||
{
|
||||
return kvm && kvm->arch.has_protected_state ? 0 : KVM_SYNC_X86_VALID_FIELDS;
|
||||
}
|
||||
|
||||
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
{
|
||||
int r = 0;
|
||||
@ -4698,7 +4703,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
break;
|
||||
#endif
|
||||
case KVM_CAP_SYNC_REGS:
|
||||
r = KVM_SYNC_X86_VALID_FIELDS;
|
||||
r = kvm_sync_valid_fields(kvm);
|
||||
break;
|
||||
case KVM_CAP_ADJUST_CLOCK:
|
||||
r = KVM_CLOCK_VALID_FLAGS;
|
||||
@ -11470,6 +11475,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_queued_exception *ex = &vcpu->arch.exception;
|
||||
struct kvm_run *kvm_run = vcpu->run;
|
||||
u32 sync_valid_fields;
|
||||
int r;
|
||||
|
||||
r = kvm_mmu_post_init_vm(vcpu->kvm);
|
||||
@ -11515,8 +11521,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) ||
|
||||
(kvm_run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)) {
|
||||
sync_valid_fields = kvm_sync_valid_fields(vcpu->kvm);
|
||||
if ((kvm_run->kvm_valid_regs & ~sync_valid_fields) ||
|
||||
(kvm_run->kvm_dirty_regs & ~sync_valid_fields)) {
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@ -11574,7 +11581,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
|
||||
out:
|
||||
kvm_put_guest_fpu(vcpu);
|
||||
if (kvm_run->kvm_valid_regs)
|
||||
if (kvm_run->kvm_valid_regs && likely(!vcpu->arch.guest_state_protected))
|
||||
store_regs(vcpu);
|
||||
post_kvm_run_save(vcpu);
|
||||
kvm_vcpu_srcu_read_unlock(vcpu);
|
||||
|
@ -74,6 +74,24 @@ SYM_FUNC_START(rep_movs_alternative)
|
||||
_ASM_EXTABLE_UA( 0b, 1b)
|
||||
|
||||
.Llarge_movsq:
|
||||
/* Do the first possibly unaligned word */
|
||||
0: movq (%rsi),%rax
|
||||
1: movq %rax,(%rdi)
|
||||
|
||||
_ASM_EXTABLE_UA( 0b, .Lcopy_user_tail)
|
||||
_ASM_EXTABLE_UA( 1b, .Lcopy_user_tail)
|
||||
|
||||
/* What would be the offset to the aligned destination? */
|
||||
leaq 8(%rdi),%rax
|
||||
andq $-8,%rax
|
||||
subq %rdi,%rax
|
||||
|
||||
/* .. and update pointers and count to match */
|
||||
addq %rax,%rdi
|
||||
addq %rax,%rsi
|
||||
subq %rax,%rcx
|
||||
|
||||
/* make %rcx contain the number of words, %rax the remainder */
|
||||
movq %rcx,%rax
|
||||
shrq $3,%rcx
|
||||
andl $7,%eax
|
||||
|
@ -562,7 +562,7 @@ void __head sme_enable(struct boot_params *bp)
|
||||
}
|
||||
|
||||
RIP_REL_REF(sme_me_mask) = me_mask;
|
||||
physical_mask &= ~me_mask;
|
||||
cc_vendor = CC_VENDOR_AMD;
|
||||
RIP_REL_REF(physical_mask) &= ~me_mask;
|
||||
RIP_REL_REF(cc_vendor) = CC_VENDOR_AMD;
|
||||
cc_set_mask(me_mask);
|
||||
}
|
||||
|
@ -183,7 +183,7 @@ static int pageattr_test(void)
|
||||
break;
|
||||
|
||||
case 1:
|
||||
err = change_page_attr_set(addrs, len[1], PAGE_CPA_TEST, 1);
|
||||
err = change_page_attr_set(addrs, len[i], PAGE_CPA_TEST, 1);
|
||||
break;
|
||||
|
||||
case 2:
|
||||
|
@ -984,29 +984,42 @@ static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* track_pfn_copy is called when vma that is covering the pfnmap gets
|
||||
* copied through copy_page_range().
|
||||
*
|
||||
* If the vma has a linear pfn mapping for the entire range, we get the prot
|
||||
* from pte and reserve the entire vma range with single reserve_pfn_range call.
|
||||
*/
|
||||
int track_pfn_copy(struct vm_area_struct *vma)
|
||||
int track_pfn_copy(struct vm_area_struct *dst_vma,
|
||||
struct vm_area_struct *src_vma, unsigned long *pfn)
|
||||
{
|
||||
const unsigned long vma_size = src_vma->vm_end - src_vma->vm_start;
|
||||
resource_size_t paddr;
|
||||
unsigned long vma_size = vma->vm_end - vma->vm_start;
|
||||
pgprot_t pgprot;
|
||||
int rc;
|
||||
|
||||
if (vma->vm_flags & VM_PAT) {
|
||||
if (get_pat_info(vma, &paddr, &pgprot))
|
||||
return -EINVAL;
|
||||
/* reserve the whole chunk covered by vma. */
|
||||
return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
|
||||
}
|
||||
if (!(src_vma->vm_flags & VM_PAT))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Duplicate the PAT information for the dst VMA based on the src
|
||||
* VMA.
|
||||
*/
|
||||
if (get_pat_info(src_vma, &paddr, &pgprot))
|
||||
return -EINVAL;
|
||||
rc = reserve_pfn_range(paddr, vma_size, &pgprot, 1);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Reservation for the destination VMA succeeded. */
|
||||
vm_flags_set(dst_vma, VM_PAT);
|
||||
*pfn = PHYS_PFN(paddr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void untrack_pfn_copy(struct vm_area_struct *dst_vma, unsigned long pfn)
|
||||
{
|
||||
untrack_pfn(dst_vma, pfn, dst_vma->vm_end - dst_vma->vm_start, true);
|
||||
/*
|
||||
* Reservation was freed, any copied page tables will get cleaned
|
||||
* up later, but without getting PAT involved again.
|
||||
*/
|
||||
}
|
||||
|
||||
/*
|
||||
* prot is passed in as a parameter for the new mapping. If the vma has
|
||||
* a linear pfn mapping for the entire range, or no vma is provided,
|
||||
@ -1095,15 +1108,6 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* untrack_pfn_clear is called if the following situation fits:
|
||||
*
|
||||
* 1) while mremapping a pfnmap for a new region, with the old vma after
|
||||
* its pfnmap page table has been removed. The new vma has a new pfnmap
|
||||
* to the same pfn & cache type with VM_PAT set.
|
||||
* 2) while duplicating vm area, the new vma fails to copy the pgtable from
|
||||
* old vma.
|
||||
*/
|
||||
void untrack_pfn_clear(struct vm_area_struct *vma)
|
||||
{
|
||||
vm_flags_clear(vma, VM_PAT);
|
||||
|
17
crypto/api.c
17
crypto/api.c
@ -36,7 +36,8 @@ EXPORT_SYMBOL_GPL(crypto_chain);
|
||||
DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished);
|
||||
#endif
|
||||
|
||||
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
|
||||
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg,
|
||||
u32 type, u32 mask);
|
||||
static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
|
||||
u32 mask);
|
||||
|
||||
@ -145,7 +146,7 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
|
||||
if (alg != &larval->alg) {
|
||||
kfree(larval);
|
||||
if (crypto_is_larval(alg))
|
||||
alg = crypto_larval_wait(alg);
|
||||
alg = crypto_larval_wait(alg, type, mask);
|
||||
}
|
||||
|
||||
return alg;
|
||||
@ -197,7 +198,8 @@ static void crypto_start_test(struct crypto_larval *larval)
|
||||
crypto_schedule_test(larval);
|
||||
}
|
||||
|
||||
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
|
||||
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg,
|
||||
u32 type, u32 mask)
|
||||
{
|
||||
struct crypto_larval *larval;
|
||||
long time_left;
|
||||
@ -219,12 +221,7 @@ again:
|
||||
crypto_larval_kill(larval);
|
||||
alg = ERR_PTR(-ETIMEDOUT);
|
||||
} else if (!alg) {
|
||||
u32 type;
|
||||
u32 mask;
|
||||
|
||||
alg = &larval->alg;
|
||||
type = alg->cra_flags & ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
|
||||
mask = larval->mask;
|
||||
alg = crypto_alg_lookup(alg->cra_name, type, mask) ?:
|
||||
ERR_PTR(-EAGAIN);
|
||||
} else if (IS_ERR(alg))
|
||||
@ -304,7 +301,7 @@ static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type,
|
||||
}
|
||||
|
||||
if (!IS_ERR_OR_NULL(alg) && crypto_is_larval(alg))
|
||||
alg = crypto_larval_wait(alg);
|
||||
alg = crypto_larval_wait(alg, type, mask);
|
||||
else if (alg)
|
||||
;
|
||||
else if (!(mask & CRYPTO_ALG_TESTED))
|
||||
@ -352,7 +349,7 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
|
||||
ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval);
|
||||
|
||||
if (ok == NOTIFY_STOP)
|
||||
alg = crypto_larval_wait(larval);
|
||||
alg = crypto_larval_wait(larval, type, mask);
|
||||
else {
|
||||
crypto_mod_put(larval);
|
||||
alg = ERR_PTR(-ENOENT);
|
||||
|
@ -80,3 +80,4 @@ static void __exit bpf_crypto_skcipher_exit(void)
|
||||
module_init(bpf_crypto_skcipher_init);
|
||||
module_exit(bpf_crypto_skcipher_exit);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Symmetric key cipher support for BPF");
|
||||
|
@ -485,7 +485,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
||||
cmd_mask = nd_desc->cmd_mask;
|
||||
if (cmd == ND_CMD_CALL && call_pkg->nd_family) {
|
||||
family = call_pkg->nd_family;
|
||||
if (family > NVDIMM_BUS_FAMILY_MAX ||
|
||||
if (call_pkg->nd_family > NVDIMM_BUS_FAMILY_MAX ||
|
||||
!test_bit(family, &nd_desc->bus_family_mask))
|
||||
return -EINVAL;
|
||||
family = array_index_nospec(family,
|
||||
|
@ -268,6 +268,10 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
|
||||
ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
|
||||
pr->power.states[ACPI_STATE_C3].address);
|
||||
|
||||
if (!pr->power.states[ACPI_STATE_C2].address &&
|
||||
!pr->power.states[ACPI_STATE_C3].address)
|
||||
return -ENODEV;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -440,6 +440,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus Vivobook X1404VAP */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "X1404VAP"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus Vivobook X1504VAP */
|
||||
.matches = {
|
||||
|
@ -374,7 +374,8 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
|
||||
},
|
||||
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
|
||||
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
|
||||
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
|
||||
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
|
||||
},
|
||||
{
|
||||
/* Medion Lifetab S10346 */
|
||||
|
@ -503,6 +503,7 @@ config HT16K33
|
||||
config MAX6959
|
||||
tristate "Maxim MAX6958/6959 7-segment LED controller"
|
||||
depends on I2C
|
||||
select BITREVERSE
|
||||
select REGMAP_I2C
|
||||
select LINEDISP
|
||||
help
|
||||
|
@ -1664,7 +1664,7 @@ err_lcd_unreg:
|
||||
if (lcd.enabled)
|
||||
charlcd_unregister(lcd.charlcd);
|
||||
err_unreg_device:
|
||||
kfree(lcd.charlcd);
|
||||
charlcd_free(lcd.charlcd);
|
||||
lcd.charlcd = NULL;
|
||||
parport_unregister_device(pprt);
|
||||
pprt = NULL;
|
||||
@ -1692,7 +1692,7 @@ static void panel_detach(struct parport *port)
|
||||
charlcd_unregister(lcd.charlcd);
|
||||
lcd.initialized = false;
|
||||
kfree(lcd.charlcd->drvdata);
|
||||
kfree(lcd.charlcd);
|
||||
charlcd_free(lcd.charlcd);
|
||||
lcd.charlcd = NULL;
|
||||
}
|
||||
|
||||
|
@ -913,6 +913,9 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
|
||||
if (dev->power.syscore)
|
||||
goto Complete;
|
||||
|
||||
if (!dev->power.is_suspended)
|
||||
goto Complete;
|
||||
|
||||
if (dev->power.direct_complete) {
|
||||
/* Match the pm_runtime_disable() in __device_suspend(). */
|
||||
pm_runtime_enable(dev);
|
||||
@ -931,9 +934,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
|
||||
*/
|
||||
dev->power.is_prepared = false;
|
||||
|
||||
if (!dev->power.is_suspended)
|
||||
goto Unlock;
|
||||
|
||||
if (dev->pm_domain) {
|
||||
info = "power domain ";
|
||||
callback = pm_op(&dev->pm_domain->ops, state);
|
||||
@ -973,7 +973,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
|
||||
error = dpm_run_callback(callback, dev, state, info);
|
||||
dev->power.is_suspended = false;
|
||||
|
||||
Unlock:
|
||||
device_unlock(dev);
|
||||
dpm_watchdog_clear(&wd);
|
||||
|
||||
@ -1254,14 +1253,13 @@ Skip:
|
||||
dev->power.is_noirq_suspended = true;
|
||||
|
||||
/*
|
||||
* Skipping the resume of devices that were in use right before the
|
||||
* system suspend (as indicated by their PM-runtime usage counters)
|
||||
* would be suboptimal. Also resume them if doing that is not allowed
|
||||
* to be skipped.
|
||||
* Devices must be resumed unless they are explicitly allowed to be left
|
||||
* in suspend, but even in that case skipping the resume of devices that
|
||||
* were in use right before the system suspend (as indicated by their
|
||||
* runtime PM usage counters and child counters) would be suboptimal.
|
||||
*/
|
||||
if (atomic_read(&dev->power.usage_count) > 1 ||
|
||||
!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
|
||||
dev->power.may_skip_resume))
|
||||
if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
|
||||
dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
|
||||
dev->power.must_resume = true;
|
||||
|
||||
if (dev->power.must_resume)
|
||||
@ -1628,6 +1626,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async)
|
||||
pm_runtime_disable(dev);
|
||||
if (pm_runtime_status_suspended(dev)) {
|
||||
pm_dev_dbg(dev, state, "direct-complete ");
|
||||
dev->power.is_suspended = true;
|
||||
goto Complete;
|
||||
}
|
||||
|
||||
|
@ -1874,7 +1874,7 @@ void pm_runtime_drop_link(struct device_link *link)
|
||||
pm_request_idle(link->supplier);
|
||||
}
|
||||
|
||||
static bool pm_runtime_need_not_resume(struct device *dev)
|
||||
bool pm_runtime_need_not_resume(struct device *dev)
|
||||
{
|
||||
return atomic_read(&dev->power.usage_count) <= 1 &&
|
||||
(atomic_read(&dev->power.child_count) == 0 ||
|
||||
|
@ -1416,18 +1416,28 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
|
||||
}
|
||||
}
|
||||
|
||||
static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
|
||||
/* Must be called when queue is frozen */
|
||||
static bool ublk_mark_queue_canceling(struct ublk_queue *ubq)
|
||||
{
|
||||
struct gendisk *disk;
|
||||
bool canceled;
|
||||
|
||||
spin_lock(&ubq->cancel_lock);
|
||||
if (ubq->canceling) {
|
||||
spin_unlock(&ubq->cancel_lock);
|
||||
return false;
|
||||
}
|
||||
ubq->canceling = true;
|
||||
canceled = ubq->canceling;
|
||||
if (!canceled)
|
||||
ubq->canceling = true;
|
||||
spin_unlock(&ubq->cancel_lock);
|
||||
|
||||
return canceled;
|
||||
}
|
||||
|
||||
static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
|
||||
{
|
||||
bool was_canceled = ubq->canceling;
|
||||
struct gendisk *disk;
|
||||
|
||||
if (was_canceled)
|
||||
return false;
|
||||
|
||||
spin_lock(&ub->lock);
|
||||
disk = ub->ub_disk;
|
||||
if (disk)
|
||||
@ -1438,14 +1448,23 @@ static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
|
||||
if (!disk)
|
||||
return false;
|
||||
|
||||
/* Now we are serialized with ublk_queue_rq() */
|
||||
/*
|
||||
* Now we are serialized with ublk_queue_rq()
|
||||
*
|
||||
* Make sure that ubq->canceling is set when queue is frozen,
|
||||
* because ublk_queue_rq() has to rely on this flag for avoiding to
|
||||
* touch completed uring_cmd
|
||||
*/
|
||||
blk_mq_quiesce_queue(disk->queue);
|
||||
/* abort queue is for making forward progress */
|
||||
ublk_abort_queue(ub, ubq);
|
||||
was_canceled = ublk_mark_queue_canceling(ubq);
|
||||
if (!was_canceled) {
|
||||
/* abort queue is for making forward progress */
|
||||
ublk_abort_queue(ub, ubq);
|
||||
}
|
||||
blk_mq_unquiesce_queue(disk->queue);
|
||||
put_device(disk_to_dev(disk));
|
||||
|
||||
return true;
|
||||
return !was_canceled;
|
||||
}
|
||||
|
||||
static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io,
|
||||
|
@ -180,14 +180,14 @@ static struct clk_imx8mp_audiomix_sel sels[] = {
|
||||
CLK_GATE("asrc", ASRC_IPG),
|
||||
CLK_GATE("pdm", PDM_IPG),
|
||||
CLK_GATE("earc", EARC_IPG),
|
||||
CLK_GATE("ocrama", OCRAMA_IPG),
|
||||
CLK_GATE_PARENT("ocrama", OCRAMA_IPG, "axi"),
|
||||
CLK_GATE("aud2htx", AUD2HTX_IPG),
|
||||
CLK_GATE_PARENT("earc_phy", EARC_PHY, "sai_pll_out_div2"),
|
||||
CLK_GATE("sdma2", SDMA2_ROOT),
|
||||
CLK_GATE("sdma3", SDMA3_ROOT),
|
||||
CLK_GATE("spba2", SPBA2_ROOT),
|
||||
CLK_GATE("dsp", DSP_ROOT),
|
||||
CLK_GATE("dspdbg", DSPDBG_ROOT),
|
||||
CLK_GATE_PARENT("dsp", DSP_ROOT, "axi"),
|
||||
CLK_GATE_PARENT("dspdbg", DSPDBG_ROOT, "axi"),
|
||||
CLK_GATE("edma", EDMA_ROOT),
|
||||
CLK_GATE_PARENT("audpll", AUDPLL_ROOT, "osc_24m"),
|
||||
CLK_GATE("mu2", MU2_ROOT),
|
||||
|
@ -1139,8 +1139,18 @@ static struct clk_regmap g12a_cpu_clk_div16_en = {
|
||||
.hw.init = &(struct clk_init_data) {
|
||||
.name = "cpu_clk_div16_en",
|
||||
.ops = &clk_regmap_gate_ro_ops,
|
||||
.parent_hws = (const struct clk_hw *[]) {
|
||||
&g12a_cpu_clk.hw
|
||||
.parent_data = &(const struct clk_parent_data) {
|
||||
/*
|
||||
* Note:
|
||||
* G12A and G12B have different cpu clocks (with
|
||||
* different struct clk_hw). We fallback to the global
|
||||
* naming string mechanism so this clock picks
|
||||
* up the appropriate one. Same goes for the other
|
||||
* clock using cpu cluster A clock output and present
|
||||
* on both G12 variant.
|
||||
*/
|
||||
.name = "cpu_clk",
|
||||
.index = -1,
|
||||
},
|
||||
.num_parents = 1,
|
||||
/*
|
||||
@ -1205,7 +1215,10 @@ static struct clk_regmap g12a_cpu_clk_apb_div = {
|
||||
.hw.init = &(struct clk_init_data){
|
||||
.name = "cpu_clk_apb_div",
|
||||
.ops = &clk_regmap_divider_ro_ops,
|
||||
.parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw },
|
||||
.parent_data = &(const struct clk_parent_data) {
|
||||
.name = "cpu_clk",
|
||||
.index = -1,
|
||||
},
|
||||
.num_parents = 1,
|
||||
},
|
||||
};
|
||||
@ -1239,7 +1252,10 @@ static struct clk_regmap g12a_cpu_clk_atb_div = {
|
||||
.hw.init = &(struct clk_init_data){
|
||||
.name = "cpu_clk_atb_div",
|
||||
.ops = &clk_regmap_divider_ro_ops,
|
||||
.parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw },
|
||||
.parent_data = &(const struct clk_parent_data) {
|
||||
.name = "cpu_clk",
|
||||
.index = -1,
|
||||
},
|
||||
.num_parents = 1,
|
||||
},
|
||||
};
|
||||
@ -1273,7 +1289,10 @@ static struct clk_regmap g12a_cpu_clk_axi_div = {
|
||||
.hw.init = &(struct clk_init_data){
|
||||
.name = "cpu_clk_axi_div",
|
||||
.ops = &clk_regmap_divider_ro_ops,
|
||||
.parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw },
|
||||
.parent_data = &(const struct clk_parent_data) {
|
||||
.name = "cpu_clk",
|
||||
.index = -1,
|
||||
},
|
||||
.num_parents = 1,
|
||||
},
|
||||
};
|
||||
@ -1308,13 +1327,6 @@ static struct clk_regmap g12a_cpu_clk_trace_div = {
|
||||
.name = "cpu_clk_trace_div",
|
||||
.ops = &clk_regmap_divider_ro_ops,
|
||||
.parent_data = &(const struct clk_parent_data) {
|
||||
/*
|
||||
* Note:
|
||||
* G12A and G12B have different cpu_clks (with
|
||||
* different struct clk_hw). We fallback to the global
|
||||
* naming string mechanism so cpu_clk_trace_div picks
|
||||
* up the appropriate one.
|
||||
*/
|
||||
.name = "cpu_clk",
|
||||
.index = -1,
|
||||
},
|
||||
@ -4317,7 +4329,7 @@ static MESON_GATE(g12a_spicc_1, HHI_GCLK_MPEG0, 14);
|
||||
static MESON_GATE(g12a_hiu_reg, HHI_GCLK_MPEG0, 19);
|
||||
static MESON_GATE(g12a_mipi_dsi_phy, HHI_GCLK_MPEG0, 20);
|
||||
static MESON_GATE(g12a_assist_misc, HHI_GCLK_MPEG0, 23);
|
||||
static MESON_GATE(g12a_emmc_a, HHI_GCLK_MPEG0, 4);
|
||||
static MESON_GATE(g12a_emmc_a, HHI_GCLK_MPEG0, 24);
|
||||
static MESON_GATE(g12a_emmc_b, HHI_GCLK_MPEG0, 25);
|
||||
static MESON_GATE(g12a_emmc_c, HHI_GCLK_MPEG0, 26);
|
||||
static MESON_GATE(g12a_audio_codec, HHI_GCLK_MPEG0, 28);
|
||||
|
@ -1272,14 +1272,13 @@ static struct clk_regmap gxbb_cts_i958 = {
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* This table skips a clock named 'cts_slow_oscin' in the documentation
|
||||
* This clock does not exist yet in this controller or the AO one
|
||||
*/
|
||||
static u32 gxbb_32k_clk_parents_val_table[] = { 0, 2, 3 };
|
||||
static const struct clk_parent_data gxbb_32k_clk_parent_data[] = {
|
||||
{ .fw_name = "xtal", },
|
||||
/*
|
||||
* FIXME: This clock is provided by the ao clock controller but the
|
||||
* clock is not yet part of the binding of this controller, so string
|
||||
* name must be use to set this parent.
|
||||
*/
|
||||
{ .name = "cts_slow_oscin", .index = -1 },
|
||||
{ .hw = &gxbb_fclk_div3.hw },
|
||||
{ .hw = &gxbb_fclk_div5.hw },
|
||||
};
|
||||
@ -1289,6 +1288,7 @@ static struct clk_regmap gxbb_32k_clk_sel = {
|
||||
.offset = HHI_32K_CLK_CNTL,
|
||||
.mask = 0x3,
|
||||
.shift = 16,
|
||||
.table = gxbb_32k_clk_parents_val_table,
|
||||
},
|
||||
.hw.init = &(struct clk_init_data){
|
||||
.name = "32k_clk_sel",
|
||||
@ -1312,7 +1312,7 @@ static struct clk_regmap gxbb_32k_clk_div = {
|
||||
&gxbb_32k_clk_sel.hw
|
||||
},
|
||||
.num_parents = 1,
|
||||
.flags = CLK_SET_RATE_PARENT | CLK_DIVIDER_ROUND_CLOSEST,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -3770,7 +3770,7 @@ static struct clk_branch gcc_venus0_axi_clk = {
|
||||
|
||||
static struct clk_branch gcc_venus0_core0_vcodec0_clk = {
|
||||
.halt_reg = 0x4c02c,
|
||||
.halt_check = BRANCH_HALT,
|
||||
.halt_check = BRANCH_HALT_SKIP,
|
||||
.clkr = {
|
||||
.enable_reg = 0x4c02c,
|
||||
.enable_mask = BIT(0),
|
||||
|
@ -3497,7 +3497,7 @@ static struct gdsc usb30_prim_gdsc = {
|
||||
.pd = {
|
||||
.name = "usb30_prim_gdsc",
|
||||
},
|
||||
.pwrsts = PWRSTS_OFF_ON,
|
||||
.pwrsts = PWRSTS_RET_ON,
|
||||
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
|
||||
};
|
||||
|
||||
@ -3506,7 +3506,7 @@ static struct gdsc usb3_phy_gdsc = {
|
||||
.pd = {
|
||||
.name = "usb3_phy_gdsc",
|
||||
},
|
||||
.pwrsts = PWRSTS_OFF_ON,
|
||||
.pwrsts = PWRSTS_RET_ON,
|
||||
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
|
||||
};
|
||||
|
||||
|
@ -2564,19 +2564,6 @@ static struct clk_branch gcc_disp_hf_axi_clk = {
|
||||
},
|
||||
};
|
||||
|
||||
static struct clk_branch gcc_disp_xo_clk = {
|
||||
.halt_reg = 0x27018,
|
||||
.halt_check = BRANCH_HALT,
|
||||
.clkr = {
|
||||
.enable_reg = 0x27018,
|
||||
.enable_mask = BIT(0),
|
||||
.hw.init = &(const struct clk_init_data) {
|
||||
.name = "gcc_disp_xo_clk",
|
||||
.ops = &clk_branch2_ops,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static struct clk_branch gcc_gp1_clk = {
|
||||
.halt_reg = 0x64000,
|
||||
.halt_check = BRANCH_HALT,
|
||||
@ -2631,21 +2618,6 @@ static struct clk_branch gcc_gp3_clk = {
|
||||
},
|
||||
};
|
||||
|
||||
static struct clk_branch gcc_gpu_cfg_ahb_clk = {
|
||||
.halt_reg = 0x71004,
|
||||
.halt_check = BRANCH_HALT_VOTED,
|
||||
.hwcg_reg = 0x71004,
|
||||
.hwcg_bit = 1,
|
||||
.clkr = {
|
||||
.enable_reg = 0x71004,
|
||||
.enable_mask = BIT(0),
|
||||
.hw.init = &(const struct clk_init_data) {
|
||||
.name = "gcc_gpu_cfg_ahb_clk",
|
||||
.ops = &clk_branch2_ops,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static struct clk_branch gcc_gpu_gpll0_cph_clk_src = {
|
||||
.halt_check = BRANCH_HALT_DELAY,
|
||||
.clkr = {
|
||||
@ -6268,7 +6240,6 @@ static struct clk_regmap *gcc_x1e80100_clocks[] = {
|
||||
[GCC_CNOC_PCIE_TUNNEL_CLK] = &gcc_cnoc_pcie_tunnel_clk.clkr,
|
||||
[GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
|
||||
[GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
|
||||
[GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
|
||||
[GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
|
||||
[GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
|
||||
[GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
|
||||
@ -6281,7 +6252,6 @@ static struct clk_regmap *gcc_x1e80100_clocks[] = {
|
||||
[GCC_GPLL7] = &gcc_gpll7.clkr,
|
||||
[GCC_GPLL8] = &gcc_gpll8.clkr,
|
||||
[GCC_GPLL9] = &gcc_gpll9.clkr,
|
||||
[GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
|
||||
[GCC_GPU_GPLL0_CPH_CLK_SRC] = &gcc_gpu_gpll0_cph_clk_src.clkr,
|
||||
[GCC_GPU_GPLL0_DIV_CPH_CLK_SRC] = &gcc_gpu_gpll0_div_cph_clk_src.clkr,
|
||||
[GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
|
||||
|
@ -2544,7 +2544,7 @@ static struct clk_branch video_core_clk = {
|
||||
|
||||
static struct clk_branch video_subcore0_clk = {
|
||||
.halt_reg = 0x1048,
|
||||
.halt_check = BRANCH_HALT,
|
||||
.halt_check = BRANCH_HALT_SKIP,
|
||||
.clkr = {
|
||||
.enable_reg = 0x1048,
|
||||
.enable_mask = BIT(0),
|
||||
|
@ -50,7 +50,7 @@
|
||||
#define G3S_SEL_SDHI2 SEL_PLL_PACK(G3S_CPG_SDHI_DSEL, 8, 2)
|
||||
|
||||
/* PLL 1/4/6 configuration registers macro. */
|
||||
#define G3S_PLL146_CONF(clk1, clk2) ((clk1) << 22 | (clk2) << 12)
|
||||
#define G3S_PLL146_CONF(clk1, clk2, setting) ((clk1) << 22 | (clk2) << 12 | (setting))
|
||||
|
||||
#define DEF_G3S_MUX(_name, _id, _conf, _parent_names, _mux_flags, _clk_flags) \
|
||||
DEF_TYPE(_name, _id, CLK_TYPE_MUX, .conf = (_conf), \
|
||||
@ -133,7 +133,8 @@ static const struct cpg_core_clk r9a08g045_core_clks[] __initconst = {
|
||||
|
||||
/* Internal Core Clocks */
|
||||
DEF_FIXED(".osc_div1000", CLK_OSC_DIV1000, CLK_EXTAL, 1, 1000),
|
||||
DEF_G3S_PLL(".pll1", CLK_PLL1, CLK_EXTAL, G3S_PLL146_CONF(0x4, 0x8)),
|
||||
DEF_G3S_PLL(".pll1", CLK_PLL1, CLK_EXTAL, G3S_PLL146_CONF(0x4, 0x8, 0x100),
|
||||
1100000000UL),
|
||||
DEF_FIXED(".pll2", CLK_PLL2, CLK_EXTAL, 200, 3),
|
||||
DEF_FIXED(".pll3", CLK_PLL3, CLK_EXTAL, 200, 3),
|
||||
DEF_FIXED(".pll4", CLK_PLL4, CLK_EXTAL, 100, 3),
|
||||
|
@ -51,6 +51,7 @@
|
||||
#define RZG3S_DIV_M GENMASK(25, 22)
|
||||
#define RZG3S_DIV_NI GENMASK(21, 13)
|
||||
#define RZG3S_DIV_NF GENMASK(12, 1)
|
||||
#define RZG3S_SEL_PLL BIT(0)
|
||||
|
||||
#define CLK_ON_R(reg) (reg)
|
||||
#define CLK_MON_R(reg) (0x180 + (reg))
|
||||
@ -60,6 +61,7 @@
|
||||
#define GET_REG_OFFSET(val) ((val >> 20) & 0xfff)
|
||||
#define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff)
|
||||
#define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff)
|
||||
#define GET_REG_SAMPLL_SETTING(val) ((val) & 0xfff)
|
||||
|
||||
#define CPG_WEN_BIT BIT(16)
|
||||
|
||||
@ -943,6 +945,7 @@ rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
|
||||
|
||||
struct pll_clk {
|
||||
struct clk_hw hw;
|
||||
unsigned long default_rate;
|
||||
unsigned int conf;
|
||||
unsigned int type;
|
||||
void __iomem *base;
|
||||
@ -980,12 +983,19 @@ static unsigned long rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
|
||||
{
|
||||
struct pll_clk *pll_clk = to_pll(hw);
|
||||
struct rzg2l_cpg_priv *priv = pll_clk->priv;
|
||||
u32 nir, nfr, mr, pr, val;
|
||||
u32 nir, nfr, mr, pr, val, setting;
|
||||
u64 rate;
|
||||
|
||||
if (pll_clk->type != CLK_TYPE_G3S_PLL)
|
||||
return parent_rate;
|
||||
|
||||
setting = GET_REG_SAMPLL_SETTING(pll_clk->conf);
|
||||
if (setting) {
|
||||
val = readl(priv->base + setting);
|
||||
if (val & RZG3S_SEL_PLL)
|
||||
return pll_clk->default_rate;
|
||||
}
|
||||
|
||||
val = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
|
||||
|
||||
pr = 1 << FIELD_GET(RZG3S_DIV_P, val);
|
||||
@ -1038,6 +1048,7 @@ rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
|
||||
pll_clk->base = priv->base;
|
||||
pll_clk->priv = priv;
|
||||
pll_clk->type = core->type;
|
||||
pll_clk->default_rate = core->default_rate;
|
||||
|
||||
ret = devm_clk_hw_register(dev, &pll_clk->hw);
|
||||
if (ret)
|
||||
|
@ -102,7 +102,10 @@ struct cpg_core_clk {
|
||||
const struct clk_div_table *dtable;
|
||||
const u32 *mtable;
|
||||
const unsigned long invalid_rate;
|
||||
const unsigned long max_rate;
|
||||
union {
|
||||
const unsigned long max_rate;
|
||||
const unsigned long default_rate;
|
||||
};
|
||||
const char * const *parent_names;
|
||||
notifier_fn_t notifier;
|
||||
u32 flag;
|
||||
@ -144,8 +147,9 @@ enum clk_types {
|
||||
DEF_TYPE(_name, _id, _type, .parent = _parent)
|
||||
#define DEF_SAMPLL(_name, _id, _parent, _conf) \
|
||||
DEF_TYPE(_name, _id, CLK_TYPE_SAM_PLL, .parent = _parent, .conf = _conf)
|
||||
#define DEF_G3S_PLL(_name, _id, _parent, _conf) \
|
||||
DEF_TYPE(_name, _id, CLK_TYPE_G3S_PLL, .parent = _parent, .conf = _conf)
|
||||
#define DEF_G3S_PLL(_name, _id, _parent, _conf, _default_rate) \
|
||||
DEF_TYPE(_name, _id, CLK_TYPE_G3S_PLL, .parent = _parent, .conf = _conf, \
|
||||
.default_rate = _default_rate)
|
||||
#define DEF_INPUT(_name, _id) \
|
||||
DEF_TYPE(_name, _id, CLK_TYPE_IN)
|
||||
#define DEF_FIXED(_name, _id, _parent, _mult, _div) \
|
||||
|
@ -201,7 +201,7 @@ PNAME(mux_aclk_peri_pre_p) = { "cpll_peri",
|
||||
"gpll_peri",
|
||||
"hdmiphy_peri" };
|
||||
PNAME(mux_ref_usb3otg_src_p) = { "xin24m",
|
||||
"clk_usb3otg_ref" };
|
||||
"clk_ref_usb3otg_src" };
|
||||
PNAME(mux_xin24m_32k_p) = { "xin24m",
|
||||
"clk_rtc32k" };
|
||||
PNAME(mux_mac2io_src_p) = { "clk_mac2io_src",
|
||||
|
@ -74,12 +74,12 @@ struct samsung_clk_provider * __init samsung_clk_init(struct device *dev,
|
||||
if (!ctx)
|
||||
panic("could not allocate clock provider context.\n");
|
||||
|
||||
ctx->clk_data.num = nr_clks;
|
||||
for (i = 0; i < nr_clks; ++i)
|
||||
ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
|
||||
|
||||
ctx->dev = dev;
|
||||
ctx->reg_base = base;
|
||||
ctx->clk_data.num = nr_clks;
|
||||
spin_lock_init(&ctx->lock);
|
||||
|
||||
return ctx;
|
||||
|
@ -245,7 +245,7 @@ config ARM_TEGRA186_CPUFREQ
|
||||
|
||||
config ARM_TEGRA194_CPUFREQ
|
||||
tristate "Tegra194 CPUFreq support"
|
||||
depends on ARCH_TEGRA_194_SOC || (64BIT && COMPILE_TEST)
|
||||
depends on ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC || (64BIT && COMPILE_TEST)
|
||||
depends on TEGRA_BPMP
|
||||
default y
|
||||
help
|
||||
|
@ -145,7 +145,23 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
|
||||
time_elapsed = update_time - j_cdbs->prev_update_time;
|
||||
j_cdbs->prev_update_time = update_time;
|
||||
|
||||
idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
|
||||
/*
|
||||
* cur_idle_time could be smaller than j_cdbs->prev_cpu_idle if
|
||||
* it's obtained from get_cpu_idle_time_jiffy() when NOHZ is
|
||||
* off, where idle_time is calculated by the difference between
|
||||
* time elapsed in jiffies and "busy time" obtained from CPU
|
||||
* statistics. If a CPU is 100% busy, the time elapsed and busy
|
||||
* time should grow with the same amount in two consecutive
|
||||
* samples, but in practice there could be a tiny difference,
|
||||
* making the accumulated idle time decrease sometimes. Hence,
|
||||
* in this case, idle_time should be regarded as 0 in order to
|
||||
* make the further process correct.
|
||||
*/
|
||||
if (cur_idle_time > j_cdbs->prev_cpu_idle)
|
||||
idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
|
||||
else
|
||||
idle_time = 0;
|
||||
|
||||
j_cdbs->prev_cpu_idle = cur_idle_time;
|
||||
|
||||
if (ignore_nice) {
|
||||
@ -162,7 +178,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
|
||||
* calls, so the previous load value can be used then.
|
||||
*/
|
||||
load = j_cdbs->prev_load;
|
||||
} else if (unlikely((int)idle_time > 2 * sampling_rate &&
|
||||
} else if (unlikely(idle_time > 2 * sampling_rate &&
|
||||
j_cdbs->prev_load)) {
|
||||
/*
|
||||
* If the CPU had gone completely idle and a task has
|
||||
@ -189,30 +205,15 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
|
||||
load = j_cdbs->prev_load;
|
||||
j_cdbs->prev_load = 0;
|
||||
} else {
|
||||
if (time_elapsed >= idle_time) {
|
||||
if (time_elapsed > idle_time)
|
||||
load = 100 * (time_elapsed - idle_time) / time_elapsed;
|
||||
} else {
|
||||
/*
|
||||
* That can happen if idle_time is returned by
|
||||
* get_cpu_idle_time_jiffy(). In that case
|
||||
* idle_time is roughly equal to the difference
|
||||
* between time_elapsed and "busy time" obtained
|
||||
* from CPU statistics. Then, the "busy time"
|
||||
* can end up being greater than time_elapsed
|
||||
* (for example, if jiffies_64 and the CPU
|
||||
* statistics are updated by different CPUs),
|
||||
* so idle_time may in fact be negative. That
|
||||
* means, though, that the CPU was busy all
|
||||
* the time (on the rough average) during the
|
||||
* last sampling interval and 100 can be
|
||||
* returned as the load.
|
||||
*/
|
||||
load = (int)idle_time < 0 ? 100 : 0;
|
||||
}
|
||||
else
|
||||
load = 0;
|
||||
|
||||
j_cdbs->prev_load = load;
|
||||
}
|
||||
|
||||
if (unlikely((int)idle_time > 2 * sampling_rate)) {
|
||||
if (unlikely(idle_time > 2 * sampling_rate)) {
|
||||
unsigned int periods = idle_time / sampling_rate;
|
||||
|
||||
if (periods < idle_periods)
|
||||
|
@ -39,8 +39,9 @@ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
|
||||
static int
|
||||
scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
|
||||
{
|
||||
u64 rate = policy->freq_table[index].frequency * 1000;
|
||||
unsigned long freq_khz = policy->freq_table[index].frequency;
|
||||
struct scpi_data *priv = policy->driver_data;
|
||||
unsigned long rate = freq_khz * 1000;
|
||||
int ret;
|
||||
|
||||
ret = clk_set_rate(priv->clk, rate);
|
||||
@ -48,7 +49,7 @@ scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (clk_get_rate(priv->clk) != rate)
|
||||
if (clk_get_rate(priv->clk) / 1000 != freq_khz)
|
||||
return -EIO;
|
||||
|
||||
return 0;
|
||||
|
@ -37,7 +37,6 @@ struct sec_aead_req {
|
||||
u8 *a_ivin;
|
||||
dma_addr_t a_ivin_dma;
|
||||
struct aead_request *aead_req;
|
||||
bool fallback;
|
||||
};
|
||||
|
||||
/* SEC request of Crypto */
|
||||
|
@ -57,7 +57,6 @@
|
||||
#define SEC_TYPE_MASK 0x0F
|
||||
#define SEC_DONE_MASK 0x0001
|
||||
#define SEC_ICV_MASK 0x000E
|
||||
#define SEC_SQE_LEN_RATE_MASK 0x3
|
||||
|
||||
#define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth))
|
||||
#define SEC_SGL_SGE_NR 128
|
||||
@ -80,16 +79,16 @@
|
||||
#define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \
|
||||
SEC_PBUF_LEFT_SZ(depth))
|
||||
|
||||
#define SEC_SQE_LEN_RATE 4
|
||||
#define SEC_SQE_CFLAG 2
|
||||
#define SEC_SQE_AEAD_FLAG 3
|
||||
#define SEC_SQE_DONE 0x1
|
||||
#define SEC_ICV_ERR 0x2
|
||||
#define MIN_MAC_LEN 4
|
||||
#define MAC_LEN_MASK 0x1U
|
||||
#define MAX_INPUT_DATA_LEN 0xFFFE00
|
||||
#define BITS_MASK 0xFF
|
||||
#define WORD_MASK 0x3
|
||||
#define BYTE_BITS 0x8
|
||||
#define BYTES_TO_WORDS(bcount) ((bcount) >> 2)
|
||||
#define SEC_XTS_NAME_SZ 0x3
|
||||
#define IV_CM_CAL_NUM 2
|
||||
#define IV_CL_MASK 0x7
|
||||
@ -691,14 +690,10 @@ static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
|
||||
|
||||
c_ctx->fallback = false;
|
||||
|
||||
/* Currently, only XTS mode need fallback tfm when using 192bit key */
|
||||
if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ)))
|
||||
return 0;
|
||||
|
||||
c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0,
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(c_ctx->fbtfm)) {
|
||||
pr_err("failed to alloc xts mode fallback tfm!\n");
|
||||
pr_err("failed to alloc fallback tfm for %s!\n", alg);
|
||||
return PTR_ERR(c_ctx->fbtfm);
|
||||
}
|
||||
|
||||
@ -858,7 +853,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
}
|
||||
|
||||
memcpy(c_ctx->c_key, key, keylen);
|
||||
if (c_ctx->fallback && c_ctx->fbtfm) {
|
||||
if (c_ctx->fbtfm) {
|
||||
ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to set fallback skcipher key!\n");
|
||||
@ -1090,11 +1085,6 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
|
||||
struct crypto_shash *hash_tfm = ctx->hash_tfm;
|
||||
int blocksize, digestsize, ret;
|
||||
|
||||
if (!keys->authkeylen) {
|
||||
pr_err("hisi_sec2: aead auth key error!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
blocksize = crypto_shash_blocksize(hash_tfm);
|
||||
digestsize = crypto_shash_digestsize(hash_tfm);
|
||||
if (keys->authkeylen > blocksize) {
|
||||
@ -1106,7 +1096,8 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
|
||||
}
|
||||
ctx->a_key_len = digestsize;
|
||||
} else {
|
||||
memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
|
||||
if (keys->authkeylen)
|
||||
memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
|
||||
ctx->a_key_len = keys->authkeylen;
|
||||
}
|
||||
|
||||
@ -1160,8 +1151,10 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
|
||||
}
|
||||
|
||||
ret = crypto_authenc_extractkeys(&keys, key, keylen);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
dev_err(dev, "sec extract aead keys err!\n");
|
||||
goto bad_key;
|
||||
}
|
||||
|
||||
ret = sec_aead_aes_set_key(c_ctx, &keys);
|
||||
if (ret) {
|
||||
@ -1175,12 +1168,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
|
||||
goto bad_key;
|
||||
}
|
||||
|
||||
if (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK) {
|
||||
ret = -EINVAL;
|
||||
dev_err(dev, "AUTH key length error!\n");
|
||||
goto bad_key;
|
||||
}
|
||||
|
||||
ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
|
||||
if (ret) {
|
||||
dev_err(dev, "set sec fallback key err!\n");
|
||||
@ -1583,11 +1570,10 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
|
||||
|
||||
sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
|
||||
|
||||
sec_sqe->type2.mac_key_alg = cpu_to_le32(authsize / SEC_SQE_LEN_RATE);
|
||||
sec_sqe->type2.mac_key_alg = cpu_to_le32(BYTES_TO_WORDS(authsize));
|
||||
|
||||
sec_sqe->type2.mac_key_alg |=
|
||||
cpu_to_le32((u32)((ctx->a_key_len) /
|
||||
SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET);
|
||||
cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET);
|
||||
|
||||
sec_sqe->type2.mac_key_alg |=
|
||||
cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
|
||||
@ -1639,12 +1625,10 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
|
||||
sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
|
||||
|
||||
sqe3->auth_mac_key |=
|
||||
cpu_to_le32((u32)(authsize /
|
||||
SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);
|
||||
cpu_to_le32(BYTES_TO_WORDS(authsize) << SEC_MAC_OFFSET_V3);
|
||||
|
||||
sqe3->auth_mac_key |=
|
||||
cpu_to_le32((u32)(ctx->a_key_len /
|
||||
SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3);
|
||||
cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET_V3);
|
||||
|
||||
sqe3->auth_mac_key |=
|
||||
cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3);
|
||||
@ -2003,8 +1987,7 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
|
||||
return sec_aead_ctx_init(tfm, "sha512");
|
||||
}
|
||||
|
||||
static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
|
||||
struct sec_req *sreq)
|
||||
static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, struct sec_req *sreq)
|
||||
{
|
||||
u32 cryptlen = sreq->c_req.sk_req->cryptlen;
|
||||
struct device *dev = ctx->dev;
|
||||
@ -2026,10 +2009,6 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
|
||||
}
|
||||
break;
|
||||
case SEC_CMODE_CTR:
|
||||
if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) {
|
||||
dev_err(dev, "skcipher HW version error!\n");
|
||||
ret = -EINVAL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
@ -2038,17 +2017,21 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
|
||||
static int sec_skcipher_param_check(struct sec_ctx *ctx,
|
||||
struct sec_req *sreq, bool *need_fallback)
|
||||
{
|
||||
struct skcipher_request *sk_req = sreq->c_req.sk_req;
|
||||
struct device *dev = ctx->dev;
|
||||
u8 c_alg = ctx->c_ctx.c_alg;
|
||||
|
||||
if (unlikely(!sk_req->src || !sk_req->dst ||
|
||||
sk_req->cryptlen > MAX_INPUT_DATA_LEN)) {
|
||||
if (unlikely(!sk_req->src || !sk_req->dst)) {
|
||||
dev_err(dev, "skcipher input param error!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (sk_req->cryptlen > MAX_INPUT_DATA_LEN)
|
||||
*need_fallback = true;
|
||||
|
||||
sreq->c_req.c_len = sk_req->cryptlen;
|
||||
|
||||
if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
|
||||
@ -2106,6 +2089,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
|
||||
struct sec_req *req = skcipher_request_ctx(sk_req);
|
||||
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
bool need_fallback = false;
|
||||
int ret;
|
||||
|
||||
if (!sk_req->cryptlen) {
|
||||
@ -2119,11 +2103,11 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
|
||||
req->c_req.encrypt = encrypt;
|
||||
req->ctx = ctx;
|
||||
|
||||
ret = sec_skcipher_param_check(ctx, req);
|
||||
ret = sec_skcipher_param_check(ctx, req, &need_fallback);
|
||||
if (unlikely(ret))
|
||||
return -EINVAL;
|
||||
|
||||
if (unlikely(ctx->c_ctx.fallback))
|
||||
if (unlikely(ctx->c_ctx.fallback || need_fallback))
|
||||
return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
|
||||
|
||||
return ctx->req_op->process(ctx, req);
|
||||
@ -2231,52 +2215,35 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
size_t sz = crypto_aead_authsize(tfm);
|
||||
u8 c_mode = ctx->c_ctx.c_mode;
|
||||
struct device *dev = ctx->dev;
|
||||
int ret;
|
||||
|
||||
/* Hardware does not handle cases where authsize is less than 4 bytes */
|
||||
if (unlikely(sz < MIN_MAC_LEN)) {
|
||||
sreq->aead_req.fallback = true;
|
||||
if (unlikely(ctx->sec->qm.ver == QM_HW_V2 && !sreq->c_req.c_len))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
|
||||
req->assoclen > SEC_MAX_AAD_LEN)) {
|
||||
dev_err(dev, "aead input spec error!\n");
|
||||
req->assoclen > SEC_MAX_AAD_LEN))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (c_mode == SEC_CMODE_CCM) {
|
||||
if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) {
|
||||
dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n");
|
||||
if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN))
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = aead_iv_demension_check(req);
|
||||
if (ret) {
|
||||
dev_err(dev, "aead input iv param error!\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (sreq->c_req.encrypt)
|
||||
sreq->c_req.c_len = req->cryptlen;
|
||||
else
|
||||
sreq->c_req.c_len = req->cryptlen - sz;
|
||||
if (c_mode == SEC_CMODE_CBC) {
|
||||
if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
|
||||
dev_err(dev, "aead crypto length error!\n");
|
||||
ret = aead_iv_demension_check(req);
|
||||
if (unlikely(ret))
|
||||
return -EINVAL;
|
||||
} else if (c_mode == SEC_CMODE_CBC) {
|
||||
if (unlikely(sz & WORD_MASK))
|
||||
return -EINVAL;
|
||||
if (unlikely(ctx->a_ctx.a_key_len & WORD_MASK))
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
|
||||
static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq, bool *need_fallback)
|
||||
{
|
||||
struct aead_request *req = sreq->aead_req.aead_req;
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
size_t authsize = crypto_aead_authsize(tfm);
|
||||
struct device *dev = ctx->dev;
|
||||
u8 c_alg = ctx->c_ctx.c_alg;
|
||||
|
||||
@ -2285,12 +2252,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ctx->sec->qm.ver == QM_HW_V2) {
|
||||
if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
|
||||
req->cryptlen <= authsize))) {
|
||||
sreq->aead_req.fallback = true;
|
||||
return -EINVAL;
|
||||
}
|
||||
if (unlikely(ctx->c_ctx.c_mode == SEC_CMODE_CBC &&
|
||||
sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
|
||||
dev_err(dev, "aead cbc mode input data length error!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Support AES or SM4 */
|
||||
@ -2299,8 +2264,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (unlikely(sec_aead_spec_check(ctx, sreq)))
|
||||
if (unlikely(sec_aead_spec_check(ctx, sreq))) {
|
||||
*need_fallback = true;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
|
||||
SEC_PBUF_SZ)
|
||||
@ -2344,17 +2311,19 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
|
||||
struct sec_req *req = aead_request_ctx(a_req);
|
||||
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
size_t sz = crypto_aead_authsize(tfm);
|
||||
bool need_fallback = false;
|
||||
int ret;
|
||||
|
||||
req->flag = a_req->base.flags;
|
||||
req->aead_req.aead_req = a_req;
|
||||
req->c_req.encrypt = encrypt;
|
||||
req->ctx = ctx;
|
||||
req->aead_req.fallback = false;
|
||||
req->c_req.c_len = a_req->cryptlen - (req->c_req.encrypt ? 0 : sz);
|
||||
|
||||
ret = sec_aead_param_check(ctx, req);
|
||||
ret = sec_aead_param_check(ctx, req, &need_fallback);
|
||||
if (unlikely(ret)) {
|
||||
if (req->aead_req.fallback)
|
||||
if (need_fallback)
|
||||
return sec_aead_soft_crypto(ctx, a_req, encrypt);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1527,7 +1527,7 @@ static int iaa_comp_acompress(struct acomp_req *req)
|
||||
iaa_wq = idxd_wq_get_private(wq);
|
||||
|
||||
if (!req->dst) {
|
||||
gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
|
||||
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
|
||||
|
||||
/* incompressible data will always be < 2 * slen */
|
||||
req->dlen = 2 * req->slen;
|
||||
@ -1609,7 +1609,7 @@ out:
|
||||
|
||||
static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req)
|
||||
{
|
||||
gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
|
||||
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
struct crypto_tfm *tfm = req->base.tfm;
|
||||
dma_addr_t src_addr, dst_addr;
|
||||
|
@ -420,6 +420,7 @@ static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask)
|
||||
dev_err_mask->parerr_cpr_xlt_mask = ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK;
|
||||
dev_err_mask->parerr_dcpr_ucs_mask = ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK;
|
||||
dev_err_mask->parerr_pke_mask = ADF_420XX_PARITYERRORMASK_PKE_MASK;
|
||||
dev_err_mask->parerr_wat_wcp_mask = ADF_420XX_PARITYERRORMASK_WAT_WCP_MASK;
|
||||
dev_err_mask->ssmfeatren_mask = ADF_420XX_SSMFEATREN_MASK;
|
||||
}
|
||||
|
||||
|
@ -695,7 +695,7 @@ static bool adf_handle_slice_hang_error(struct adf_accel_dev *accel_dev,
|
||||
if (err_mask->parerr_wat_wcp_mask)
|
||||
adf_poll_slicehang_csr(accel_dev, csr,
|
||||
ADF_GEN4_SLICEHANGSTATUS_WAT_WCP,
|
||||
"ath_cph");
|
||||
"wat_wcp");
|
||||
|
||||
return false;
|
||||
}
|
||||
@ -1043,63 +1043,16 @@ static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev,
|
||||
return reset_required;
|
||||
}
|
||||
|
||||
static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev,
|
||||
static void adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev,
|
||||
void __iomem *csr, u32 iastatssm)
|
||||
{
|
||||
struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
|
||||
u32 reg;
|
||||
|
||||
if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT))
|
||||
return false;
|
||||
|
||||
reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC);
|
||||
reg &= ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT;
|
||||
if (reg) {
|
||||
ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
|
||||
ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC, reg);
|
||||
}
|
||||
|
||||
reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH);
|
||||
reg &= err_mask->parerr_ath_cph_mask;
|
||||
if (reg) {
|
||||
ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
|
||||
ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH, reg);
|
||||
}
|
||||
|
||||
reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT);
|
||||
reg &= err_mask->parerr_cpr_xlt_mask;
|
||||
if (reg) {
|
||||
ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
|
||||
ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT, reg);
|
||||
}
|
||||
|
||||
reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS);
|
||||
reg &= err_mask->parerr_dcpr_ucs_mask;
|
||||
if (reg) {
|
||||
ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
|
||||
ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS, reg);
|
||||
}
|
||||
|
||||
reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE);
|
||||
reg &= err_mask->parerr_pke_mask;
|
||||
if (reg) {
|
||||
ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
|
||||
ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE, reg);
|
||||
}
|
||||
|
||||
if (err_mask->parerr_wat_wcp_mask) {
|
||||
reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP);
|
||||
reg &= err_mask->parerr_wat_wcp_mask;
|
||||
if (reg) {
|
||||
ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
|
||||
ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP,
|
||||
reg);
|
||||
}
|
||||
}
|
||||
return;
|
||||
|
||||
ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
|
||||
dev_err(&GET_DEV(accel_dev), "Slice ssm soft parity error reported");
|
||||
|
||||
return false;
|
||||
return;
|
||||
}
|
||||
|
||||
static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev,
|
||||
@ -1171,8 +1124,8 @@ static bool adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev,
|
||||
reset_required |= adf_handle_slice_hang_error(accel_dev, csr, iastatssm);
|
||||
reset_required |= adf_handle_spppar_err(accel_dev, csr, iastatssm);
|
||||
reset_required |= adf_handle_ssmcpppar_err(accel_dev, csr, iastatssm);
|
||||
reset_required |= adf_handle_rf_parr_err(accel_dev, csr, iastatssm);
|
||||
reset_required |= adf_handle_ser_err_ssmsh(accel_dev, csr, iastatssm);
|
||||
adf_handle_rf_parr_err(accel_dev, csr, iastatssm);
|
||||
|
||||
ADF_CSR_WR(csr, ADF_GEN4_IAINTSTATSSM, iastatssm);
|
||||
|
||||
|
@ -1144,6 +1144,7 @@ static void __init nxcop_get_capabilities(void)
|
||||
{
|
||||
struct hv_vas_all_caps *hv_caps;
|
||||
struct hv_nx_cop_caps *hv_nxc;
|
||||
u64 feat;
|
||||
int rc;
|
||||
|
||||
hv_caps = kmalloc(sizeof(*hv_caps), GFP_KERNEL);
|
||||
@ -1154,27 +1155,26 @@ static void __init nxcop_get_capabilities(void)
|
||||
*/
|
||||
rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, 0,
|
||||
(u64)virt_to_phys(hv_caps));
|
||||
if (!rc)
|
||||
feat = be64_to_cpu(hv_caps->feat_type);
|
||||
kfree(hv_caps);
|
||||
if (rc)
|
||||
goto out;
|
||||
return;
|
||||
if (!(feat & VAS_NX_GZIP_FEAT_BIT))
|
||||
return;
|
||||
|
||||
caps_feat = be64_to_cpu(hv_caps->feat_type);
|
||||
/*
|
||||
* NX-GZIP feature available
|
||||
*/
|
||||
if (caps_feat & VAS_NX_GZIP_FEAT_BIT) {
|
||||
hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL);
|
||||
if (!hv_nxc)
|
||||
goto out;
|
||||
/*
|
||||
* Get capabilities for NX-GZIP feature
|
||||
*/
|
||||
rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES,
|
||||
VAS_NX_GZIP_FEAT,
|
||||
(u64)virt_to_phys(hv_nxc));
|
||||
} else {
|
||||
pr_err("NX-GZIP feature is not available\n");
|
||||
rc = -EINVAL;
|
||||
}
|
||||
hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL);
|
||||
if (!hv_nxc)
|
||||
return;
|
||||
/*
|
||||
* Get capabilities for NX-GZIP feature
|
||||
*/
|
||||
rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES,
|
||||
VAS_NX_GZIP_FEAT,
|
||||
(u64)virt_to_phys(hv_nxc));
|
||||
|
||||
if (!rc) {
|
||||
nx_cop_caps.descriptor = be64_to_cpu(hv_nxc->descriptor);
|
||||
@ -1184,13 +1184,10 @@ static void __init nxcop_get_capabilities(void)
|
||||
be64_to_cpu(hv_nxc->min_compress_len);
|
||||
nx_cop_caps.min_decompress_len =
|
||||
be64_to_cpu(hv_nxc->min_decompress_len);
|
||||
} else {
|
||||
caps_feat = 0;
|
||||
caps_feat = feat;
|
||||
}
|
||||
|
||||
kfree(hv_nxc);
|
||||
out:
|
||||
kfree(hv_caps);
|
||||
}
|
||||
|
||||
static const struct vio_device_id nx842_vio_driver_ids[] = {
|
||||
|
@ -282,7 +282,7 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
|
||||
/* Prepare the command and submit for execution */
|
||||
cmdlen = tegra_aes_prep_cmd(ctx, rctx);
|
||||
ret = tegra_se_host1x_submit(se, cmdlen);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
|
||||
/* Copy the result */
|
||||
tegra_aes_update_iv(req, ctx);
|
||||
@ -443,6 +443,9 @@ static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt)
|
||||
if (!req->cryptlen)
|
||||
return 0;
|
||||
|
||||
if (ctx->alg == SE_ALG_ECB)
|
||||
req->iv = NULL;
|
||||
|
||||
rctx->encrypt = encrypt;
|
||||
rctx->config = tegra234_aes_cfg(ctx->alg, encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, encrypt);
|
||||
@ -719,7 +722,7 @@ static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqct
|
||||
|
||||
cmdlen = tegra_gmac_prep_cmd(ctx, rctx);
|
||||
|
||||
return tegra_se_host1x_submit(se, cmdlen);
|
||||
return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
}
|
||||
|
||||
static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
|
||||
@ -736,7 +739,7 @@ static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc
|
||||
|
||||
/* Prepare command and submit */
|
||||
cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx);
|
||||
ret = tegra_se_host1x_submit(se, cmdlen);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -759,7 +762,7 @@ static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc
|
||||
|
||||
/* Prepare command and submit */
|
||||
cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
|
||||
ret = tegra_se_host1x_submit(se, cmdlen);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -891,7 +894,7 @@ static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_req
|
||||
/* Prepare command and submit */
|
||||
cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx);
|
||||
|
||||
return tegra_se_host1x_submit(se, cmdlen);
|
||||
return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
}
|
||||
|
||||
static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
|
||||
@ -1098,7 +1101,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx
|
||||
|
||||
/* Prepare command and submit */
|
||||
cmdlen = tegra_ctr_prep_cmd(ctx, rctx);
|
||||
ret = tegra_se_host1x_submit(se, cmdlen);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1513,23 +1516,16 @@ static int tegra_cmac_do_update(struct ahash_request *req)
|
||||
rctx->residue.size = nresidue;
|
||||
|
||||
/*
|
||||
* If this is not the first 'update' call, paste the previous copied
|
||||
* If this is not the first task, paste the previous copied
|
||||
* intermediate results to the registers so that it gets picked up.
|
||||
* This is to support the import/export functionality.
|
||||
*/
|
||||
if (!(rctx->task & SHA_FIRST))
|
||||
tegra_cmac_paste_result(ctx->se, rctx);
|
||||
|
||||
cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
|
||||
ret = tegra_se_host1x_submit(se, cmdlen);
|
||||
/*
|
||||
* If this is not the final update, copy the intermediate results
|
||||
* from the registers so that it can be used in the next 'update'
|
||||
* call. This is to support the import/export functionality.
|
||||
*/
|
||||
if (!(rctx->task & SHA_FINAL))
|
||||
tegra_cmac_copy_result(ctx->se, rctx);
|
||||
tegra_cmac_copy_result(ctx->se, rctx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1553,9 +1549,16 @@ static int tegra_cmac_do_final(struct ahash_request *req)
|
||||
rctx->total_len += rctx->residue.size;
|
||||
rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
|
||||
|
||||
/*
|
||||
* If this is not the first task, paste the previous copied
|
||||
* intermediate results to the registers so that it gets picked up.
|
||||
*/
|
||||
if (!(rctx->task & SHA_FIRST))
|
||||
tegra_cmac_paste_result(ctx->se, rctx);
|
||||
|
||||
/* Prepare command and submit */
|
||||
cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
|
||||
ret = tegra_se_host1x_submit(se, cmdlen);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@ -1581,18 +1584,24 @@ static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
struct tegra_se *se = ctx->se;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
if (rctx->task & SHA_UPDATE) {
|
||||
ret = tegra_cmac_do_update(req);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
rctx->task &= ~SHA_UPDATE;
|
||||
}
|
||||
|
||||
if (rctx->task & SHA_FINAL) {
|
||||
ret = tegra_cmac_do_final(req);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
rctx->task &= ~SHA_FINAL;
|
||||
}
|
||||
|
||||
out:
|
||||
crypto_finalize_hash_request(se->engine, req, ret);
|
||||
|
||||
return 0;
|
||||
|
@ -300,8 +300,9 @@ static int tegra_sha_do_update(struct ahash_request *req)
|
||||
{
|
||||
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
|
||||
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
|
||||
struct tegra_se *se = ctx->se;
|
||||
unsigned int nblks, nresidue, size, ret;
|
||||
u32 *cpuvaddr = ctx->se->cmdbuf->addr;
|
||||
u32 *cpuvaddr = se->cmdbuf->addr;
|
||||
|
||||
nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
|
||||
nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
|
||||
@ -353,11 +354,11 @@ static int tegra_sha_do_update(struct ahash_request *req)
|
||||
* This is to support the import/export functionality.
|
||||
*/
|
||||
if (!(rctx->task & SHA_FIRST))
|
||||
tegra_sha_paste_hash_result(ctx->se, rctx);
|
||||
tegra_sha_paste_hash_result(se, rctx);
|
||||
|
||||
size = tegra_sha_prep_cmd(ctx->se, cpuvaddr, rctx);
|
||||
size = tegra_sha_prep_cmd(se, cpuvaddr, rctx);
|
||||
|
||||
ret = tegra_se_host1x_submit(ctx->se, size);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
|
||||
|
||||
/*
|
||||
* If this is not the final update, copy the intermediate results
|
||||
@ -365,7 +366,7 @@ static int tegra_sha_do_update(struct ahash_request *req)
|
||||
* call. This is to support the import/export functionality.
|
||||
*/
|
||||
if (!(rctx->task & SHA_FINAL))
|
||||
tegra_sha_copy_hash_result(ctx->se, rctx);
|
||||
tegra_sha_copy_hash_result(se, rctx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -388,7 +389,7 @@ static int tegra_sha_do_final(struct ahash_request *req)
|
||||
|
||||
size = tegra_sha_prep_cmd(se, cpuvaddr, rctx);
|
||||
|
||||
ret = tegra_se_host1x_submit(se, size);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@ -416,14 +417,21 @@ static int tegra_sha_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
|
||||
if (rctx->task & SHA_UPDATE) {
|
||||
ret = tegra_sha_do_update(req);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
rctx->task &= ~SHA_UPDATE;
|
||||
}
|
||||
|
||||
if (rctx->task & SHA_FINAL) {
|
||||
ret = tegra_sha_do_final(req);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
rctx->task &= ~SHA_FINAL;
|
||||
}
|
||||
|
||||
out:
|
||||
crypto_finalize_hash_request(se->engine, req, ret);
|
||||
|
||||
return 0;
|
||||
@ -559,13 +567,18 @@ static int tegra_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
int ret;
|
||||
|
||||
if (aes_check_keylen(keylen))
|
||||
return tegra_hmac_fallback_setkey(ctx, key, keylen);
|
||||
|
||||
ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
if (ret)
|
||||
return tegra_hmac_fallback_setkey(ctx, key, keylen);
|
||||
|
||||
ctx->fallback = false;
|
||||
|
||||
return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_sha_update(struct ahash_request *req)
|
||||
|
@ -115,11 +115,17 @@ static int tegra_key_insert(struct tegra_se *se, const u8 *key,
|
||||
u32 keylen, u16 slot, u32 alg)
|
||||
{
|
||||
const u32 *keyval = (u32 *)key;
|
||||
u32 *addr = se->cmdbuf->addr, size;
|
||||
u32 *addr = se->keybuf->addr, size;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&kslt_lock);
|
||||
|
||||
size = tegra_key_prep_ins_cmd(se, addr, keyval, keylen, slot, alg);
|
||||
ret = tegra_se_host1x_submit(se, se->keybuf, size);
|
||||
|
||||
return tegra_se_host1x_submit(se, size);
|
||||
mutex_unlock(&kslt_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg)
|
||||
|
@ -141,7 +141,7 @@ static struct tegra_se_cmdbuf *tegra_se_host1x_bo_alloc(struct tegra_se *se, ssi
|
||||
return cmdbuf;
|
||||
}
|
||||
|
||||
int tegra_se_host1x_submit(struct tegra_se *se, u32 size)
|
||||
int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size)
|
||||
{
|
||||
struct host1x_job *job;
|
||||
int ret;
|
||||
@ -160,9 +160,9 @@ int tegra_se_host1x_submit(struct tegra_se *se, u32 size)
|
||||
job->engine_fallback_streamid = se->stream_id;
|
||||
job->engine_streamid_offset = SE_STREAM_ID;
|
||||
|
||||
se->cmdbuf->words = size;
|
||||
cmdbuf->words = size;
|
||||
|
||||
host1x_job_add_gather(job, &se->cmdbuf->bo, size, 0);
|
||||
host1x_job_add_gather(job, &cmdbuf->bo, size, 0);
|
||||
|
||||
ret = host1x_job_pin(job, se->dev);
|
||||
if (ret) {
|
||||
@ -220,14 +220,22 @@ static int tegra_se_client_init(struct host1x_client *client)
|
||||
goto syncpt_put;
|
||||
}
|
||||
|
||||
se->keybuf = tegra_se_host1x_bo_alloc(se, SZ_4K);
|
||||
if (!se->keybuf) {
|
||||
ret = -ENOMEM;
|
||||
goto cmdbuf_put;
|
||||
}
|
||||
|
||||
ret = se->hw->init_alg(se);
|
||||
if (ret) {
|
||||
dev_err(se->dev, "failed to register algorithms\n");
|
||||
goto cmdbuf_put;
|
||||
goto keybuf_put;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
keybuf_put:
|
||||
tegra_se_cmdbuf_put(&se->keybuf->bo);
|
||||
cmdbuf_put:
|
||||
tegra_se_cmdbuf_put(&se->cmdbuf->bo);
|
||||
syncpt_put:
|
||||
|
@ -420,6 +420,7 @@ struct tegra_se {
|
||||
struct host1x_client client;
|
||||
struct host1x_channel *channel;
|
||||
struct tegra_se_cmdbuf *cmdbuf;
|
||||
struct tegra_se_cmdbuf *keybuf;
|
||||
struct crypto_engine *engine;
|
||||
struct host1x_syncpt *syncpt;
|
||||
struct device *dev;
|
||||
@ -502,7 +503,7 @@ void tegra_deinit_hash(struct tegra_se *se);
|
||||
int tegra_key_submit(struct tegra_se *se, const u8 *key,
|
||||
u32 keylen, u32 alg, u32 *keyid);
|
||||
void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg);
|
||||
int tegra_se_host1x_submit(struct tegra_se *se, u32 size);
|
||||
int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size);
|
||||
|
||||
/* HOST1x OPCODES */
|
||||
static inline u32 host1x_opcode_setpayload(unsigned int payload)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user