Merge pull request 'linux-rolling-lts' (#42) from dummy/linux-stable-mirror:linux-rolling-lts into follow-tmp
All checks were successful
Build test / build-for-arm-test (pull_request) Successful in 8m28s

Reviewed-on: #42
This commit is contained in:
haru 2024-10-07 09:16:41 +09:00
commit e5ee93b834
882 changed files with 10683 additions and 5936 deletions

1
.gitignore vendored
View File

@ -136,7 +136,6 @@ GTAGS
# id-utils files
ID
*.orig
*~
\#*#

View File

@ -3,7 +3,7 @@ KernelVersion:
Contact: linux-iio@vger.kernel.org
Description:
Reading this returns the valid values that can be written to the
on_altvoltage0_mode attribute:
filter_mode attribute:
- auto -> Adjust bandpass filter to track changes in input clock rate.
- manual -> disable/unregister the clock rate notifier / input clock tracking.

View File

@ -54,6 +54,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| Ampere | AmpereOne | AC03_CPU_38 | AMPERE_ERRATUM_AC03_CPU_38 |
+----------------+-----------------+-----------------+-----------------------------+
| Ampere | AmpereOne AC04 | AC04_CPU_10 | AMPERE_ERRATUM_AC03_CPU_38 |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 |
+----------------+-----------------+-----------------+-----------------------------+

View File

@ -23,7 +23,6 @@ properties:
- ak8963
- ak09911
- ak09912
- ak09916
deprecated: true
reg:

View File

@ -15,12 +15,19 @@ allOf:
properties:
compatible:
enum:
- nxp,imx8dxl-fspi
- nxp,imx8mm-fspi
- nxp,imx8mp-fspi
- nxp,imx8qxp-fspi
- nxp,lx2160a-fspi
oneOf:
- enum:
- nxp,imx8dxl-fspi
- nxp,imx8mm-fspi
- nxp,imx8mp-fspi
- nxp,imx8qxp-fspi
- nxp,imx8ulp-fspi
- nxp,lx2160a-fspi
- items:
- enum:
- nxp,imx93-fspi
- nxp,imx95-fspi
- const: nxp,imx8mm-fspi
reg:
items:

View File

@ -540,7 +540,7 @@ at module load time (for a module) with::
alerts_broken
The addresses are normal I2C addresses. The adapter is the string
name of the adapter, as shown in /sys/class/i2c-adapter/i2c-<n>/name.
name of the adapter, as shown in /sys/bus/i2c/devices/i2c-<n>/name.
It is *NOT* i2c-<n> itself. Also, the comparison is done ignoring
spaces, so if the name is "This is an I2C chip" you can say
adapter_name=ThisisanI2cchip. This is because it's hard to pass in

View File

@ -9,7 +9,7 @@ KVM Lock Overview
The acquisition orders for mutexes are as follows:
- cpus_read_lock() is taken outside kvm_lock
- cpus_read_lock() is taken outside kvm_lock and kvm_usage_lock
- kvm->lock is taken outside vcpu->mutex
@ -24,6 +24,13 @@ The acquisition orders for mutexes are as follows:
are taken on the waiting side when modifying memslots, so MMU notifiers
must not take either kvm->slots_lock or kvm->slots_arch_lock.
cpus_read_lock() vs kvm_lock:
- Taking cpus_read_lock() outside of kvm_lock is problematic, despite that
being the official ordering, as it is quite easy to unknowingly trigger
cpus_read_lock() while holding kvm_lock. Use caution when walking vm_list,
e.g. avoid complex operations when possible.
For SRCU:
- ``synchronize_srcu(&kvm->srcu)`` is called inside critical sections
@ -228,10 +235,17 @@ time it will be set using the Dirty tracking mechanism described above.
:Type: mutex
:Arch: any
:Protects: - vm_list
- kvm_usage_count
``kvm_usage_lock``
^^^^^^^^^^^^^^^^^^
:Type: mutex
:Arch: any
:Protects: - kvm_usage_count
- hardware virtualization enable/disable
:Comment: KVM also disables CPU hotplug via cpus_read_lock() during
enable/disable.
:Comment: Exists because using kvm_lock leads to deadlock (see earlier comment
on cpus_read_lock() vs kvm_lock). Note, KVM also disables CPU hotplug via
cpus_read_lock() when enabling/disabling virtualization.
``kvm->mn_invalidate_lock``
^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -291,11 +305,12 @@ time it will be set using the Dirty tracking mechanism described above.
wakeup.
``vendor_module_lock``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^
:Type: mutex
:Arch: x86
:Protects: loading a vendor module (kvm_amd or kvm_intel)
:Comment: Exists because using kvm_lock leads to deadlock. cpu_hotplug_lock is
taken outside of kvm_lock, e.g. in KVM's CPU online/offline callbacks, and
many operations need to take cpu_hotplug_lock when loading a vendor module,
e.g. updating static calls.
:Comment: Exists because using kvm_lock leads to deadlock. kvm_lock is taken
in notifiers, e.g. __kvmclock_cpufreq_notifier(), that may be invoked while
cpu_hotplug_lock is held, e.g. from cpufreq_boost_trigger_state(), and many
operations need to take cpu_hotplug_lock when loading a vendor module, e.g.
updating static calls.

View File

@ -13702,7 +13702,7 @@ M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
M: "Paul E. McKenney" <paulmck@kernel.org>
L: linux-kernel@vger.kernel.org
S: Supported
F: arch/powerpc/include/asm/membarrier.h
F: arch/*/include/asm/membarrier.h
F: include/uapi/linux/membarrier.h
F: kernel/sched/membarrier.c

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 50
SUBLEVEL = 54
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth

View File

@ -1312,7 +1312,7 @@
compatible = "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt";
reg = <0xfffffe20 0x20>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
clocks = <&clk32k 0>;
clocks = <&clk32k 1>;
};
pit: timer@fffffe40 {
@ -1338,7 +1338,7 @@
compatible = "microchip,sam9x60-rtc", "atmel,at91sam9x5-rtc";
reg = <0xfffffea8 0x100>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
clocks = <&clk32k 0>;
clocks = <&clk32k 1>;
};
watchdog: watchdog@ffffff80 {

View File

@ -272,7 +272,7 @@
compatible = "microchip,sama7g5-rtt", "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt";
reg = <0xe001d020 0x30>;
interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk32k 0>;
clocks = <&clk32k 1>;
};
clk32k: clock-controller@e001d050 {

View File

@ -366,7 +366,7 @@
};
pinctrl_tsc: tscgrp {
fsl,pin = <
fsl,pins = <
MX6UL_PAD_GPIO1_IO01__GPIO1_IO01 0xb0
MX6UL_PAD_GPIO1_IO02__GPIO1_IO02 0xb0
MX6UL_PAD_GPIO1_IO03__GPIO1_IO03 0xb0

View File

@ -350,7 +350,7 @@
&iomuxc_lpsr {
pinctrl_enet1_phy_interrupt: enet1phyinterruptgrp {
fsl,phy = <
fsl,pins = <
MX7D_PAD_LPSR_GPIO1_IO02__GPIO1_IO2 0x08
>;
};

View File

@ -151,6 +151,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
#define pgdp_get(pgpd) READ_ONCE(*pgdp)
#define pud_page(pud) pmd_page(__pmd(pud_val(pud)))
#define pud_write(pud) pmd_write(__pmd(pud_val(pud)))

View File

@ -359,7 +359,7 @@ static unsigned long ep93xx_div_recalc_rate(struct clk_hw *hw,
u32 val = __raw_readl(psc->reg);
u8 index = (val & psc->mask) >> psc->shift;
if (index > psc->num_div)
if (index >= psc->num_div)
return 0;
return DIV_ROUND_UP_ULL(parent_rate, psc->div[index]);

View File

@ -66,6 +66,7 @@ static void __init realview_smp_prepare_cpus(unsigned int max_cpus)
return;
}
map = syscon_node_to_regmap(np);
of_node_put(np);
if (IS_ERR(map)) {
pr_err("PLATSMP: No syscon regmap\n");
return;

View File

@ -64,33 +64,37 @@
#ifdef CONFIG_AS_VFP_VMRS_FPINST
#define fmrx(_vfp_) ({ \
u32 __v; \
asm(".fpu vfpv2\n" \
"vmrs %0, " #_vfp_ \
: "=r" (__v) : : "cc"); \
__v; \
})
#define fmrx(_vfp_) ({ \
u32 __v; \
asm volatile (".fpu vfpv2\n" \
"vmrs %0, " #_vfp_ \
: "=r" (__v) : : "cc"); \
__v; \
})
#define fmxr(_vfp_,_var_) \
asm(".fpu vfpv2\n" \
"vmsr " #_vfp_ ", %0" \
: : "r" (_var_) : "cc")
#define fmxr(_vfp_, _var_) ({ \
asm volatile (".fpu vfpv2\n" \
"vmsr " #_vfp_ ", %0" \
: : "r" (_var_) : "cc"); \
})
#else
#define vfpreg(_vfp_) #_vfp_
#define fmrx(_vfp_) ({ \
u32 __v; \
asm("mrc p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmrx %0, " #_vfp_ \
: "=r" (__v) : : "cc"); \
__v; \
})
#define fmrx(_vfp_) ({ \
u32 __v; \
asm volatile ("mrc p10, 7, %0, " vfpreg(_vfp_) "," \
"cr0, 0 @ fmrx %0, " #_vfp_ \
: "=r" (__v) : : "cc"); \
__v; \
})
#define fmxr(_vfp_,_var_) \
asm("mcr p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmxr " #_vfp_ ", %0" \
: : "r" (_var_) : "cc")
#define fmxr(_vfp_, _var_) ({ \
asm volatile ("mcr p10, 7, %0, " vfpreg(_vfp_) "," \
"cr0, 0 @ fmxr " #_vfp_ ", %0" \
: : "r" (_var_) : "cc"); \
})
#endif

View File

@ -420,7 +420,7 @@ config AMPERE_ERRATUM_AC03_CPU_38
default y
help
This option adds an alternative code sequence to work around Ampere
erratum AC03_CPU_38 on AmpereOne.
errata AC03_CPU_38 and AC04_CPU_10 on AmpereOne.
The affected design reports FEAT_HAFDBS as not implemented in
ID_AA64MMFR1_EL1.HAFDBS, but (V)TCR_ELx.{HA,HD} are not RES0

View File

@ -32,7 +32,7 @@
device_type = "memory";
reg = <0x0 0x80000000 0x3da00000>,
<0x0 0xc0000000 0x40000000>,
<0x8 0x80000000 0x40000000>;
<0x8 0x80000000 0x80000000>;
};
gpio-keys {

View File

@ -731,7 +731,7 @@
opp-900000000-3 {
opp-hz = /bits/ 64 <900000000>;
opp-microvolt = <850000>;
opp-supported-hw = <0x8>;
opp-supported-hw = <0xcf>;
};
opp-900000000-4 {
@ -743,13 +743,13 @@
opp-900000000-5 {
opp-hz = /bits/ 64 <900000000>;
opp-microvolt = <825000>;
opp-supported-hw = <0x30>;
opp-supported-hw = <0x20>;
};
opp-950000000-3 {
opp-hz = /bits/ 64 <950000000>;
opp-microvolt = <900000>;
opp-supported-hw = <0x8>;
opp-supported-hw = <0xcf>;
};
opp-950000000-4 {
@ -761,13 +761,13 @@
opp-950000000-5 {
opp-hz = /bits/ 64 <950000000>;
opp-microvolt = <850000>;
opp-supported-hw = <0x30>;
opp-supported-hw = <0x20>;
};
opp-1000000000-3 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <950000>;
opp-supported-hw = <0x8>;
opp-supported-hw = <0xcf>;
};
opp-1000000000-4 {
@ -779,7 +779,7 @@
opp-1000000000-5 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <875000>;
opp-supported-hw = <0x30>;
opp-supported-hw = <0x20>;
};
};

View File

@ -1312,6 +1312,7 @@
usb2-lpm-disable;
vusb33-supply = <&mt6359_vusb_ldo_reg>;
vbus-supply = <&usb_vbus>;
mediatek,u3p-dis-msk = <1>;
};
#include <arm/cros-ec-keyboard.dtsi>

View File

@ -2766,10 +2766,10 @@
compatible = "mediatek,mt8195-dp-intf";
reg = <0 0x1c015000 0 0x1000>;
interrupts = <GIC_SPI 657 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&vdosys0 CLK_VDO0_DP_INTF0>,
<&vdosys0 CLK_VDO0_DP_INTF0_DP_INTF>,
clocks = <&vdosys0 CLK_VDO0_DP_INTF0_DP_INTF>,
<&vdosys0 CLK_VDO0_DP_INTF0>,
<&apmixedsys CLK_APMIXED_TVDPLL1>;
clock-names = "engine", "pixel", "pll";
clock-names = "pixel", "engine", "pll";
status = "disabled";
};
@ -3036,10 +3036,10 @@
reg = <0 0x1c113000 0 0x1000>;
interrupts = <GIC_SPI 513 IRQ_TYPE_LEVEL_HIGH 0>;
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
clocks = <&vdosys1 CLK_VDO1_DP_INTF0_MM>,
<&vdosys1 CLK_VDO1_DPINTF>,
clocks = <&vdosys1 CLK_VDO1_DPINTF>,
<&vdosys1 CLK_VDO1_DP_INTF0_MM>,
<&apmixedsys CLK_APMIXED_TVDPLL2>;
clock-names = "engine", "pixel", "pll";
clock-names = "pixel", "engine", "pll";
status = "disabled";
};

View File

@ -1951,6 +1951,7 @@
reg = <0x0 0x15000000 0x0 0x100000>;
#iommu-cells = <2>;
#global-interrupts = <2>;
dma-coherent;
interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
@ -2089,6 +2090,7 @@
reg = <0x0 0x15200000 0x0 0x80000>;
#iommu-cells = <2>;
#global-interrupts = <2>;
dma-coherent;
interrupts = <GIC_SPI 920 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 921 IRQ_TYPE_LEVEL_HIGH>,

View File

@ -145,8 +145,8 @@
#interrupt-cells = <3>;
#address-cells = <0>;
interrupt-controller;
reg = <0x0 0x11900000 0 0x40000>,
<0x0 0x11940000 0 0x60000>;
reg = <0x0 0x11900000 0 0x20000>,
<0x0 0x11940000 0 0x40000>;
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
};
};

View File

@ -997,8 +997,8 @@
#interrupt-cells = <3>;
#address-cells = <0>;
interrupt-controller;
reg = <0x0 0x11900000 0 0x40000>,
<0x0 0x11940000 0 0x60000>;
reg = <0x0 0x11900000 0 0x20000>,
<0x0 0x11940000 0 0x40000>;
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
};

View File

@ -1004,8 +1004,8 @@
#interrupt-cells = <3>;
#address-cells = <0>;
interrupt-controller;
reg = <0x0 0x11900000 0 0x40000>,
<0x0 0x11940000 0 0x60000>;
reg = <0x0 0x11900000 0 0x20000>,
<0x0 0x11940000 0 0x40000>;
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
};

View File

@ -332,7 +332,7 @@
pmic {
pmic_int_l: pmic-int-l {
rockchip,pins = <2 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up>;
rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>;
};
};

View File

@ -32,12 +32,12 @@
backlight: edp-backlight {
compatible = "pwm-backlight";
power-supply = <&vcc_12v>;
pwms = <&pwm0 0 740740 0>;
pwms = <&pwm0 0 125000 0>;
};
bat: battery {
compatible = "simple-battery";
charge-full-design-microamp-hours = <9800000>;
charge-full-design-microamp-hours = <10000000>;
voltage-max-design-microvolt = <4350000>;
voltage-min-design-microvolt = <3000000>;
};

View File

@ -119,6 +119,22 @@
drive-impedance-ohm = <33>;
};
&gpio3 {
/*
* The Qseven BIOS_DISABLE signal on the RK3399-Q7 keeps the on-module
* eMMC and SPI flash powered-down initially (in fact it keeps the
* reset signal asserted). BIOS_DISABLE_OVERRIDE pin allows to override
* that signal so that eMMC and SPI can be used regardless of the state
* of the signal.
*/
bios-disable-override-hog {
gpios = <RK_PD5 GPIO_ACTIVE_LOW>;
gpio-hog;
line-name = "bios_disable_override";
output-high;
};
};
&gmac {
assigned-clocks = <&cru SCLK_RMII_SRC>;
assigned-clock-parents = <&clkin_gmac>;
@ -374,6 +390,7 @@
&i2s0 {
pinctrl-0 = <&i2s0_2ch_bus>;
pinctrl-1 = <&i2s0_2ch_bus_bclk_off>;
rockchip,playback-channels = <2>;
rockchip,capture-channels = <2>;
status = "okay";
@ -382,8 +399,8 @@
/*
* As Q7 does not specify neither a global nor a RX clock for I2S these
* signals are not used. Furthermore I2S0_LRCK_RX is used as GPIO.
* Therefore we have to redefine the i2s0_2ch_bus definition to prevent
* conflicts.
* Therefore we have to redefine the i2s0_2ch_bus and i2s0_2ch_bus_bclk_off
* definitions to prevent conflicts.
*/
&i2s0_2ch_bus {
rockchip,pins =
@ -393,6 +410,14 @@
<3 RK_PD7 1 &pcfg_pull_none>;
};
&i2s0_2ch_bus_bclk_off {
rockchip,pins =
<3 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>,
<3 RK_PD2 1 &pcfg_pull_none>,
<3 RK_PD3 1 &pcfg_pull_none>,
<3 RK_PD7 1 &pcfg_pull_none>;
};
&io_domains {
status = "okay";
bt656-supply = <&vcc_1v8>;
@ -408,9 +433,14 @@
&pinctrl {
pinctrl-names = "default";
pinctrl-0 = <&q7_thermal_pin>;
pinctrl-0 = <&q7_thermal_pin &bios_disable_override_hog_pin>;
gpios {
bios_disable_override_hog_pin: bios-disable-override-hog-pin {
rockchip,pins =
<3 RK_PD5 RK_FUNC_GPIO &pcfg_pull_down>;
};
q7_thermal_pin: q7-thermal-pin {
rockchip,pins =
<0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;

View File

@ -13,7 +13,7 @@
/ {
model = "Hardkernel ODROID-M1";
compatible = "rockchip,rk3568-odroid-m1", "rockchip,rk3568";
compatible = "hardkernel,odroid-m1", "rockchip,rk3568";
aliases {
ethernet0 = &gmac0;

View File

@ -123,7 +123,7 @@
no-map;
};
c66_1_dma_memory_region: c66-dma-memory@a6000000 {
c66_0_dma_memory_region: c66-dma-memory@a6000000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa6000000 0x00 0x100000>;
no-map;
@ -135,7 +135,7 @@
no-map;
};
c66_0_dma_memory_region: c66-dma-memory@a7000000 {
c66_1_dma_memory_region: c66-dma-memory@a7000000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa7000000 0x00 0x100000>;
no-map;

View File

@ -119,7 +119,7 @@
no-map;
};
c66_1_dma_memory_region: c66-dma-memory@a6000000 {
c66_0_dma_memory_region: c66-dma-memory@a6000000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa6000000 0x00 0x100000>;
no-map;
@ -131,7 +131,7 @@
no-map;
};
c66_0_dma_memory_region: c66-dma-memory@a7000000 {
c66_1_dma_memory_region: c66-dma-memory@a7000000 {
compatible = "shared-dma-pool";
reg = <0x00 0xa7000000 0x00 0x100000>;
no-map;

View File

@ -119,6 +119,18 @@ static inline u32 get_acpi_id_for_cpu(unsigned int cpu)
return acpi_cpu_get_madt_gicc(cpu)->uid;
}
static inline int get_cpu_for_acpi_id(u32 uid)
{
int cpu;
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
if (acpi_cpu_get_madt_gicc(cpu) &&
uid == get_acpi_id_for_cpu(cpu))
return cpu;
return -EINVAL;
}
static inline void arch_fix_phys_package_id(int num, u32 slot) { }
void __init acpi_init_cpus(void);
int apei_claim_sea(struct pt_regs *regs);

View File

@ -143,6 +143,7 @@
#define APPLE_CPU_PART_M2_AVALANCHE_MAX 0x039
#define AMPERE_CPU_PART_AMPERE1 0xAC3
#define AMPERE_CPU_PART_AMPERE1A 0xAC4
#define MICROSOFT_CPU_PART_AZURE_COBALT_100 0xD49 /* Based on r0p0 of ARM Neoverse N2 */
@ -212,6 +213,7 @@
#define MIDR_APPLE_M2_BLIZZARD_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_MAX)
#define MIDR_APPLE_M2_AVALANCHE_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_MAX)
#define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
#define MIDR_AMPERE1A MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1A)
#define MIDR_MICROSOFT_AZURE_COBALT_100 MIDR_CPU_MODEL(ARM_CPU_IMP_MICROSOFT, MICROSOFT_CPU_PART_AZURE_COBALT_100)
/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */

View File

@ -10,63 +10,63 @@
#include <asm/memory.h>
#include <asm/sysreg.h>
#define ESR_ELx_EC_UNKNOWN (0x00)
#define ESR_ELx_EC_WFx (0x01)
#define ESR_ELx_EC_UNKNOWN UL(0x00)
#define ESR_ELx_EC_WFx UL(0x01)
/* Unallocated EC: 0x02 */
#define ESR_ELx_EC_CP15_32 (0x03)
#define ESR_ELx_EC_CP15_64 (0x04)
#define ESR_ELx_EC_CP14_MR (0x05)
#define ESR_ELx_EC_CP14_LS (0x06)
#define ESR_ELx_EC_FP_ASIMD (0x07)
#define ESR_ELx_EC_CP10_ID (0x08) /* EL2 only */
#define ESR_ELx_EC_PAC (0x09) /* EL2 and above */
#define ESR_ELx_EC_CP15_32 UL(0x03)
#define ESR_ELx_EC_CP15_64 UL(0x04)
#define ESR_ELx_EC_CP14_MR UL(0x05)
#define ESR_ELx_EC_CP14_LS UL(0x06)
#define ESR_ELx_EC_FP_ASIMD UL(0x07)
#define ESR_ELx_EC_CP10_ID UL(0x08) /* EL2 only */
#define ESR_ELx_EC_PAC UL(0x09) /* EL2 and above */
/* Unallocated EC: 0x0A - 0x0B */
#define ESR_ELx_EC_CP14_64 (0x0C)
#define ESR_ELx_EC_BTI (0x0D)
#define ESR_ELx_EC_ILL (0x0E)
#define ESR_ELx_EC_CP14_64 UL(0x0C)
#define ESR_ELx_EC_BTI UL(0x0D)
#define ESR_ELx_EC_ILL UL(0x0E)
/* Unallocated EC: 0x0F - 0x10 */
#define ESR_ELx_EC_SVC32 (0x11)
#define ESR_ELx_EC_HVC32 (0x12) /* EL2 only */
#define ESR_ELx_EC_SMC32 (0x13) /* EL2 and above */
#define ESR_ELx_EC_SVC32 UL(0x11)
#define ESR_ELx_EC_HVC32 UL(0x12) /* EL2 only */
#define ESR_ELx_EC_SMC32 UL(0x13) /* EL2 and above */
/* Unallocated EC: 0x14 */
#define ESR_ELx_EC_SVC64 (0x15)
#define ESR_ELx_EC_HVC64 (0x16) /* EL2 and above */
#define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */
#define ESR_ELx_EC_SYS64 (0x18)
#define ESR_ELx_EC_SVE (0x19)
#define ESR_ELx_EC_ERET (0x1a) /* EL2 only */
#define ESR_ELx_EC_SVC64 UL(0x15)
#define ESR_ELx_EC_HVC64 UL(0x16) /* EL2 and above */
#define ESR_ELx_EC_SMC64 UL(0x17) /* EL2 and above */
#define ESR_ELx_EC_SYS64 UL(0x18)
#define ESR_ELx_EC_SVE UL(0x19)
#define ESR_ELx_EC_ERET UL(0x1a) /* EL2 only */
/* Unallocated EC: 0x1B */
#define ESR_ELx_EC_FPAC (0x1C) /* EL1 and above */
#define ESR_ELx_EC_SME (0x1D)
#define ESR_ELx_EC_FPAC UL(0x1C) /* EL1 and above */
#define ESR_ELx_EC_SME UL(0x1D)
/* Unallocated EC: 0x1E */
#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */
#define ESR_ELx_EC_IABT_LOW (0x20)
#define ESR_ELx_EC_IABT_CUR (0x21)
#define ESR_ELx_EC_PC_ALIGN (0x22)
#define ESR_ELx_EC_IMP_DEF UL(0x1f) /* EL3 only */
#define ESR_ELx_EC_IABT_LOW UL(0x20)
#define ESR_ELx_EC_IABT_CUR UL(0x21)
#define ESR_ELx_EC_PC_ALIGN UL(0x22)
/* Unallocated EC: 0x23 */
#define ESR_ELx_EC_DABT_LOW (0x24)
#define ESR_ELx_EC_DABT_CUR (0x25)
#define ESR_ELx_EC_SP_ALIGN (0x26)
#define ESR_ELx_EC_MOPS (0x27)
#define ESR_ELx_EC_FP_EXC32 (0x28)
#define ESR_ELx_EC_DABT_LOW UL(0x24)
#define ESR_ELx_EC_DABT_CUR UL(0x25)
#define ESR_ELx_EC_SP_ALIGN UL(0x26)
#define ESR_ELx_EC_MOPS UL(0x27)
#define ESR_ELx_EC_FP_EXC32 UL(0x28)
/* Unallocated EC: 0x29 - 0x2B */
#define ESR_ELx_EC_FP_EXC64 (0x2C)
#define ESR_ELx_EC_FP_EXC64 UL(0x2C)
/* Unallocated EC: 0x2D - 0x2E */
#define ESR_ELx_EC_SERROR (0x2F)
#define ESR_ELx_EC_BREAKPT_LOW (0x30)
#define ESR_ELx_EC_BREAKPT_CUR (0x31)
#define ESR_ELx_EC_SOFTSTP_LOW (0x32)
#define ESR_ELx_EC_SOFTSTP_CUR (0x33)
#define ESR_ELx_EC_WATCHPT_LOW (0x34)
#define ESR_ELx_EC_WATCHPT_CUR (0x35)
#define ESR_ELx_EC_SERROR UL(0x2F)
#define ESR_ELx_EC_BREAKPT_LOW UL(0x30)
#define ESR_ELx_EC_BREAKPT_CUR UL(0x31)
#define ESR_ELx_EC_SOFTSTP_LOW UL(0x32)
#define ESR_ELx_EC_SOFTSTP_CUR UL(0x33)
#define ESR_ELx_EC_WATCHPT_LOW UL(0x34)
#define ESR_ELx_EC_WATCHPT_CUR UL(0x35)
/* Unallocated EC: 0x36 - 0x37 */
#define ESR_ELx_EC_BKPT32 (0x38)
#define ESR_ELx_EC_BKPT32 UL(0x38)
/* Unallocated EC: 0x39 */
#define ESR_ELx_EC_VECTOR32 (0x3A) /* EL2 only */
#define ESR_ELx_EC_VECTOR32 UL(0x3A) /* EL2 only */
/* Unallocated EC: 0x3B */
#define ESR_ELx_EC_BRK64 (0x3C)
#define ESR_ELx_EC_BRK64 UL(0x3C)
/* Unallocated EC: 0x3D - 0x3F */
#define ESR_ELx_EC_MAX (0x3F)
#define ESR_ELx_EC_MAX UL(0x3F)
#define ESR_ELx_EC_SHIFT (26)
#define ESR_ELx_EC_WIDTH (6)

View File

@ -312,10 +312,10 @@ struct zt_context {
((sizeof(struct za_context) + (__SVE_VQ_BYTES - 1)) \
/ __SVE_VQ_BYTES * __SVE_VQ_BYTES)
#define ZA_SIG_REGS_SIZE(vq) ((vq * __SVE_VQ_BYTES) * (vq * __SVE_VQ_BYTES))
#define ZA_SIG_REGS_SIZE(vq) (((vq) * __SVE_VQ_BYTES) * ((vq) * __SVE_VQ_BYTES))
#define ZA_SIG_ZAV_OFFSET(vq, n) (ZA_SIG_REGS_OFFSET + \
(SVE_SIG_ZREG_SIZE(vq) * n))
(SVE_SIG_ZREG_SIZE(vq) * (n)))
#define ZA_SIG_CONTEXT_SIZE(vq) \
(ZA_SIG_REGS_OFFSET + ZA_SIG_REGS_SIZE(vq))
@ -326,7 +326,7 @@ struct zt_context {
#define ZT_SIG_REGS_OFFSET sizeof(struct zt_context)
#define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * n)
#define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * (n))
#define ZT_SIG_CONTEXT_SIZE(n) \
(sizeof(struct zt_context) + ZT_SIG_REGS_SIZE(n))

View File

@ -34,17 +34,6 @@ int __init acpi_numa_get_nid(unsigned int cpu)
return acpi_early_node_map[cpu];
}
static inline int get_cpu_for_acpi_id(u32 uid)
{
int cpu;
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
if (uid == get_acpi_id_for_cpu(cpu))
return cpu;
return -EINVAL;
}
static int __init acpi_parse_gicc_pxm(union acpi_subtable_headers *header,
const unsigned long end)
{

View File

@ -472,6 +472,14 @@ static const struct midr_range erratum_spec_ssbs_list[] = {
};
#endif
#ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
static const struct midr_range erratum_ac03_cpu_38_list[] = {
MIDR_ALL_VERSIONS(MIDR_AMPERE1),
MIDR_ALL_VERSIONS(MIDR_AMPERE1A),
{},
};
#endif
const struct arm64_cpu_capabilities arm64_errata[] = {
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
{
@ -789,7 +797,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
.desc = "AmpereOne erratum AC03_CPU_38",
.capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38,
ERRATA_MIDR_ALL_VERSIONS(MIDR_AMPERE1),
ERRATA_MIDR_RANGE_LIST(erratum_ac03_cpu_38_list),
},
#endif
{

View File

@ -415,9 +415,9 @@ out:
return;
}
static __always_inline void do_ffa_mem_xfer(const u64 func_id,
struct arm_smccc_res *res,
struct kvm_cpu_context *ctxt)
static void __do_ffa_mem_xfer(const u64 func_id,
struct arm_smccc_res *res,
struct kvm_cpu_context *ctxt)
{
DECLARE_REG(u32, len, ctxt, 1);
DECLARE_REG(u32, fraglen, ctxt, 2);
@ -428,9 +428,6 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id,
u32 offset, nr_ranges;
int ret = 0;
BUILD_BUG_ON(func_id != FFA_FN64_MEM_SHARE &&
func_id != FFA_FN64_MEM_LEND);
if (addr_mbz || npages_mbz || fraglen > len ||
fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
ret = FFA_RET_INVALID_PARAMETERS;
@ -449,6 +446,11 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id,
goto out_unlock;
}
if (len > ffa_desc_buf.len) {
ret = FFA_RET_NO_MEMORY;
goto out_unlock;
}
buf = hyp_buffers.tx;
memcpy(buf, host_buffers.tx, fraglen);
@ -498,6 +500,13 @@ err_unshare:
goto out_unlock;
}
#define do_ffa_mem_xfer(fid, res, ctxt) \
do { \
BUILD_BUG_ON((fid) != FFA_FN64_MEM_SHARE && \
(fid) != FFA_FN64_MEM_LEND); \
__do_ffa_mem_xfer((fid), (res), (ctxt)); \
} while (0);
static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
struct kvm_cpu_context *ctxt)
{

View File

@ -9,6 +9,8 @@
extern atomic_t irq_err_count;
#define ARCH_IRQ_INIT_FLAGS IRQ_NOPROBE
/*
* interrupt-retrigger: NOP for now. This may not be appropriate for all
* machines, we'll see ...

View File

@ -122,9 +122,6 @@ void __init init_IRQ(void)
panic("IPI IRQ request failed\n");
#endif
for (i = 0; i < NR_IRQS; i++)
irq_set_noprobe(i);
for_each_possible_cpu(i) {
page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, order);

View File

@ -13,6 +13,7 @@
#include <asm/bootinfo.h>
#include <asm/early_ioremap.h>
#include <asm/inst.h>
#include <asm/io.h>
#include <asm/sections.h>
#include <asm/setup.h>
@ -170,7 +171,7 @@ unsigned long __init relocate_kernel(void)
unsigned long kernel_length;
unsigned long random_offset = 0;
void *location_new = _text; /* Default to original kernel start */
char *cmdline = early_ioremap(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */
char *cmdline = early_memremap_ro(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */
strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE);
@ -182,6 +183,7 @@ unsigned long __init relocate_kernel(void)
random_offset = (unsigned long)location_new - (unsigned long)(_text);
#endif
reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS;
early_memunmap(cmdline, COMMAND_LINE_SIZE);
if (random_offset) {
kernel_length = (long)(_end) - (long)(_text);

View File

@ -116,7 +116,7 @@ asmlinkage int m68k_clone(struct pt_regs *regs)
{
/* regs will be equal to current_pt_regs() */
struct kernel_clone_args args = {
.flags = regs->d1 & ~CSIGNAL,
.flags = (u32)(regs->d1) & ~CSIGNAL,
.pidfd = (int __user *)regs->d3,
.child_tid = (int __user *)regs->d4,
.parent_tid = (int __user *)regs->d3,

View File

@ -193,11 +193,6 @@ asmlinkage void __init mmu_init(void)
{
unsigned int kstart, ksize;
if (!memblock.reserved.cnt) {
pr_emerg("Error memory count\n");
machine_restart(NULL);
}
if ((u32) memblock.memory.regions[0].size < 0x400000) {
pr_emerg("Memory must be greater than 4MB\n");
machine_restart(NULL);

View File

@ -303,13 +303,6 @@ int r4k_clockevent_init(void)
if (!c0_compare_int_usable())
return -ENXIO;
/*
* With vectored interrupts things are getting platform specific.
* get_c0_compare_int is a hook to allow a platform to return the
* interrupt number of its liking.
*/
irq = get_c0_compare_int();
cd = &per_cpu(mips_clockevent_device, cpu);
cd->name = "MIPS";
@ -320,7 +313,6 @@ int r4k_clockevent_init(void)
min_delta = calculate_min_delta();
cd->rating = 300;
cd->irq = irq;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = mips_next_event;
cd->event_handler = mips_event_handler;
@ -332,6 +324,13 @@ int r4k_clockevent_init(void)
cp0_timer_irq_installed = 1;
/*
* With vectored interrupts things are getting platform specific.
* get_c0_compare_int is a hook to allow a platform to return the
* interrupt number of its liking.
*/
irq = get_c0_compare_int();
if (request_irq(irq, c0_compare_interrupt, flags, "timer",
c0_compare_interrupt))
pr_err("Failed to request irq %d (timer)\n", irq);

View File

@ -96,6 +96,7 @@ config CRYPTO_AES_PPC_SPE
config CRYPTO_AES_GCM_P10
tristate "Stitched AES/GCM acceleration support on P10 or later CPU (PPC)"
depends on BROKEN
depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
select CRYPTO_LIB_AES
select CRYPTO_ALGAPI

View File

@ -39,6 +39,12 @@
#define STDX_BE stringify_in_c(stdbrx)
#endif
#ifdef CONFIG_CC_IS_CLANG
#define DS_FORM_CONSTRAINT "Z<>"
#else
#define DS_FORM_CONSTRAINT "YZ<>"
#endif
#else /* 32-bit */
/* operations for longs and pointers */

View File

@ -11,6 +11,7 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
#include <asm/asm-const.h>
#include <asm/asm-compat.h>
/*
* Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
@ -197,7 +198,7 @@ static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
__asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter));
else
__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : DS_FORM_CONSTRAINT (v->counter));
return t;
}
@ -208,7 +209,7 @@ static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
__asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
else
__asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
__asm__ __volatile__("std%U0%X0 %1,%0" : "=" DS_FORM_CONSTRAINT (v->counter) : "r"(i));
}
#define ATOMIC64_OP(op, asm_op) \

View File

@ -303,8 +303,7 @@ extern unsigned long linear_map_top;
extern int book3e_htw_mode;
#define PPC_HTW_NONE 0
#define PPC_HTW_IBM 1
#define PPC_HTW_E6500 2
#define PPC_HTW_E6500 1
/*
* 64-bit booke platforms don't load the tlb in the tlb miss handler code.

View File

@ -6,6 +6,7 @@
#include <asm/page.h>
#include <asm/extable.h>
#include <asm/kup.h>
#include <asm/asm-compat.h>
#ifdef __powerpc64__
/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
@ -92,12 +93,6 @@ __pu_failed: \
: label)
#endif
#ifdef CONFIG_CC_IS_CLANG
#define DS_FORM_CONSTRAINT "Z<>"
#else
#define DS_FORM_CONSTRAINT "YZ<>"
#endif
#ifdef __powerpc64__
#ifdef CONFIG_PPC_KERNEL_PREFIXED
#define __put_user_asm2_goto(x, ptr, label) \

View File

@ -41,12 +41,12 @@
#include "head_32.h"
.macro compare_to_kernel_boundary scratch, addr
#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000
#if CONFIG_TASK_SIZE <= 0x80000000 && MODULES_VADDR >= 0x80000000
/* By simply checking Address >= 0x80000000, we know if its a kernel address */
not. \scratch, \addr
#else
rlwinm \scratch, \addr, 16, 0xfff8
cmpli cr0, \scratch, PAGE_OFFSET@h
cmpli cr0, \scratch, TASK_SIZE@h
#endif
.endm
@ -404,7 +404,7 @@ FixupDAR:/* Entry point for dcbx workaround. */
mfspr r10, SPRN_SRR0
mtspr SPRN_MD_EPN, r10
rlwinm r11, r10, 16, 0xfff8
cmpli cr1, r11, PAGE_OFFSET@h
cmpli cr1, r11, TASK_SIZE@h
mfspr r11, SPRN_M_TWB /* Get level 1 table */
blt+ cr1, 3f

View File

@ -950,6 +950,7 @@ void __init setup_arch(char **cmdline_p)
mem_topology_setup();
/* Set max_mapnr before paging_init() */
set_max_mapnr(max_pfn);
high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
/*
* Release secondary cpus out of their spinloops at 0x60 now that

View File

@ -38,11 +38,7 @@
.else
addi r4, r5, VDSO_DATA_OFFSET
.endif
#ifdef __powerpc64__
bl CFUNC(DOTSYM(\funct))
#else
bl \funct
#endif
PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
#ifdef __powerpc64__
PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1)

View File

@ -74,6 +74,8 @@ SECTIONS
.got : { *(.got) } :text
.plt : { *(.plt) }
.rela.dyn : { *(.rela .rela*) }
_end = .;
__end = .;
PROVIDE(end = .);
@ -87,7 +89,7 @@ SECTIONS
*(.branch_lt)
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
*(.got1 .glink .iplt .rela*)
*(.got1 .glink .iplt)
}
}

View File

@ -69,7 +69,7 @@ SECTIONS
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
.eh_frame : { KEEP (*(.eh_frame)) } :text
.gcc_except_table : { *(.gcc_except_table) }
.rela.dyn ALIGN(8) : { *(.rela.dyn) }
.rela.dyn ALIGN(8) : { *(.rela .rela*) }
.got ALIGN(8) : { *(.got .toc) }
@ -86,7 +86,7 @@ SECTIONS
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
*(.opd)
*(.glink .iplt .plt .rela*)
*(.glink .iplt .plt)
}
}

View File

@ -715,7 +715,15 @@ again:
}
release:
qnodesp->count--; /* release the node */
/*
* Clear the lock before releasing the node, as another CPU might see stale
* values if an interrupt occurs after we increment qnodesp->count
* but before node->lock is initialized. The barrier ensures that
* there are no further stores to the node after it has been released.
*/
node->lock = NULL;
barrier();
qnodesp->count--;
}
void queued_spin_lock_slowpath(struct qspinlock *lock)

View File

@ -287,8 +287,6 @@ void __init mem_init(void)
swiotlb_init(ppc_swiotlb_enable, ppc_swiotlb_flags);
#endif
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
kasan_late_init();
memblock_free_all();

View File

@ -149,11 +149,11 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
mmu_mapin_immr();
mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true);
mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_X, true);
if (debug_pagealloc_enabled_or_kfence()) {
top = boundary;
} else {
mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true);
mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_X, true);
mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true);
}

View File

@ -3,7 +3,7 @@
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
obj-y += mmu_context.o tlb.o tlb_low.o kup.o
obj-$(CONFIG_PPC_BOOK3E_64) += tlb_low_64e.o book3e_pgtable.o
obj-$(CONFIG_PPC_BOOK3E_64) += tlb_64e.o tlb_low_64e.o book3e_pgtable.o
obj-$(CONFIG_40x) += 40x.o
obj-$(CONFIG_44x) += 44x.o
obj-$(CONFIG_PPC_8xx) += 8xx.o

View File

@ -110,28 +110,6 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
};
#endif
/* The variables below are currently only used on 64-bit Book3E
* though this will probably be made common with other nohash
* implementations at some point
*/
#ifdef CONFIG_PPC64
int mmu_pte_psize; /* Page size used for PTE pages */
int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */
unsigned long linear_map_top; /* Top of linear mapping */
/*
* Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug
* exceptions. This is used for bolted and e6500 TLB miss handlers which
* do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
* this is set to zero.
*/
int extlb_level_exc;
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_PPC_E500
/* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
DEFINE_PER_CPU(int, next_tlbcam_idx);
@ -358,381 +336,7 @@ void tlb_flush(struct mmu_gather *tlb)
flush_tlb_mm(tlb->mm);
}
/*
* Below are functions specific to the 64-bit variant of Book3E though that
* may change in the future
*/
#ifdef CONFIG_PPC64
/*
* Handling of virtual linear page tables or indirect TLB entries
* flushing when PTE pages are freed
*/
void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
{
int tsize = mmu_psize_defs[mmu_pte_psize].enc;
if (book3e_htw_mode != PPC_HTW_NONE) {
unsigned long start = address & PMD_MASK;
unsigned long end = address + PMD_SIZE;
unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
/* This isn't the most optimal, ideally we would factor out the
* while preempt & CPU mask mucking around, or even the IPI but
* it will do for now
*/
while (start < end) {
__flush_tlb_page(tlb->mm, start, tsize, 1);
start += size;
}
} else {
unsigned long rmask = 0xf000000000000000ul;
unsigned long rid = (address & rmask) | 0x1000000000000000ul;
unsigned long vpte = address & ~rmask;
vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
vpte |= rid;
__flush_tlb_page(tlb->mm, vpte, tsize, 0);
}
}
static void __init setup_page_sizes(void)
{
unsigned int tlb0cfg;
unsigned int tlb0ps;
unsigned int eptcfg;
int i, psize;
#ifdef CONFIG_PPC_E500
unsigned int mmucfg = mfspr(SPRN_MMUCFG);
int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
unsigned int min_pg, max_pg;
min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
struct mmu_psize_def *def;
unsigned int shift;
def = &mmu_psize_defs[psize];
shift = def->shift;
if (shift == 0 || shift & 1)
continue;
/* adjust to be in terms of 4^shift Kb */
shift = (shift - 10) >> 1;
if ((shift >= min_pg) && (shift <= max_pg))
def->flags |= MMU_PAGE_SIZE_DIRECT;
}
goto out;
}
if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
u32 tlb1cfg, tlb1ps;
tlb0cfg = mfspr(SPRN_TLB0CFG);
tlb1cfg = mfspr(SPRN_TLB1CFG);
tlb1ps = mfspr(SPRN_TLB1PS);
eptcfg = mfspr(SPRN_EPTCFG);
if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
book3e_htw_mode = PPC_HTW_E6500;
/*
* We expect 4K subpage size and unrestricted indirect size.
* The lack of a restriction on indirect size is a Freescale
* extension, indicated by PSn = 0 but SPSn != 0.
*/
if (eptcfg != 2)
book3e_htw_mode = PPC_HTW_NONE;
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
struct mmu_psize_def *def = &mmu_psize_defs[psize];
if (!def->shift)
continue;
if (tlb1ps & (1U << (def->shift - 10))) {
def->flags |= MMU_PAGE_SIZE_DIRECT;
if (book3e_htw_mode && psize == MMU_PAGE_2M)
def->flags |= MMU_PAGE_SIZE_INDIRECT;
}
}
goto out;
}
#endif
tlb0cfg = mfspr(SPRN_TLB0CFG);
tlb0ps = mfspr(SPRN_TLB0PS);
eptcfg = mfspr(SPRN_EPTCFG);
/* Look for supported direct sizes */
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
struct mmu_psize_def *def = &mmu_psize_defs[psize];
if (tlb0ps & (1U << (def->shift - 10)))
def->flags |= MMU_PAGE_SIZE_DIRECT;
}
/* Indirect page sizes supported ? */
if ((tlb0cfg & TLBnCFG_IND) == 0 ||
(tlb0cfg & TLBnCFG_PT) == 0)
goto out;
book3e_htw_mode = PPC_HTW_IBM;
/* Now, we only deal with one IND page size for each
* direct size. Hopefully all implementations today are
* unambiguous, but we might want to be careful in the
* future.
*/
for (i = 0; i < 3; i++) {
unsigned int ps, sps;
sps = eptcfg & 0x1f;
eptcfg >>= 5;
ps = eptcfg & 0x1f;
eptcfg >>= 5;
if (!ps || !sps)
continue;
for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
struct mmu_psize_def *def = &mmu_psize_defs[psize];
if (ps == (def->shift - 10))
def->flags |= MMU_PAGE_SIZE_INDIRECT;
if (sps == (def->shift - 10))
def->ind = ps + 10;
}
}
out:
/* Cleanup array and print summary */
pr_info("MMU: Supported page sizes\n");
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
struct mmu_psize_def *def = &mmu_psize_defs[psize];
const char *__page_type_names[] = {
"unsupported",
"direct",
"indirect",
"direct & indirect"
};
if (def->flags == 0) {
def->shift = 0;
continue;
}
pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10),
__page_type_names[def->flags & 0x3]);
}
}
static void __init setup_mmu_htw(void)
{
/*
* If we want to use HW tablewalk, enable it by patching the TLB miss
* handlers to branch to the one dedicated to it.
*/
switch (book3e_htw_mode) {
case PPC_HTW_IBM:
patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
break;
#ifdef CONFIG_PPC_E500
case PPC_HTW_E6500:
extlb_level_exc = EX_TLB_SIZE;
patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
break;
#endif
}
pr_info("MMU: Book3E HW tablewalk %s\n",
book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
}
/*
* Early initialization of the MMU TLB code
*/
static void early_init_this_mmu(void)
{
unsigned int mas4;
/* Set MAS4 based on page table setting */
mas4 = 0x4 << MAS4_WIMGED_SHIFT;
switch (book3e_htw_mode) {
case PPC_HTW_E6500:
mas4 |= MAS4_INDD;
mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
mas4 |= MAS4_TLBSELD(1);
mmu_pte_psize = MMU_PAGE_2M;
break;
case PPC_HTW_IBM:
mas4 |= MAS4_INDD;
mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
mmu_pte_psize = MMU_PAGE_1M;
break;
case PPC_HTW_NONE:
mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
mmu_pte_psize = mmu_virtual_psize;
break;
}
mtspr(SPRN_MAS4, mas4);
#ifdef CONFIG_PPC_E500
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
unsigned int num_cams;
bool map = true;
/* use a quarter of the TLBCAM for bolted linear map */
num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
/*
* Only do the mapping once per core, or else the
* transient mapping would cause problems.
*/
#ifdef CONFIG_SMP
if (hweight32(get_tensr()) > 1)
map = false;
#endif
if (map)
linear_map_top = map_mem_in_cams(linear_map_top,
num_cams, false, true);
}
#endif
/* A sync won't hurt us after mucking around with
* the MMU configuration
*/
mb();
}
static void __init early_init_mmu_global(void)
{
/* XXX This should be decided at runtime based on supported
* page sizes in the TLB, but for now let's assume 16M is
* always there and a good fit (which it probably is)
*
* Freescale booke only supports 4K pages in TLB0, so use that.
*/
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
mmu_vmemmap_psize = MMU_PAGE_4K;
else
mmu_vmemmap_psize = MMU_PAGE_16M;
/* XXX This code only checks for TLB 0 capabilities and doesn't
* check what page size combos are supported by the HW. It
* also doesn't handle the case where a separate array holds
* the IND entries from the array loaded by the PT.
*/
/* Look for supported page sizes */
setup_page_sizes();
/* Look for HW tablewalk support */
setup_mmu_htw();
#ifdef CONFIG_PPC_E500
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
if (book3e_htw_mode == PPC_HTW_NONE) {
extlb_level_exc = EX_TLB_SIZE;
patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
patch_exception(0x1e0,
exc_instruction_tlb_miss_bolted_book3e);
}
}
#endif
/* Set the global containing the top of the linear mapping
* for use by the TLB miss code
*/
linear_map_top = memblock_end_of_DRAM();
ioremap_bot = IOREMAP_BASE;
}
static void __init early_mmu_set_memory_limit(void)
{
#ifdef CONFIG_PPC_E500
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
/*
* Limit memory so we dont have linear faults.
* Unlike memblock_set_current_limit, which limits
* memory available during early boot, this permanently
* reduces the memory available to Linux. We need to
* do this because highmem is not supported on 64-bit.
*/
memblock_enforce_memory_limit(linear_map_top);
}
#endif
memblock_set_current_limit(linear_map_top);
}
/* boot cpu only */
void __init early_init_mmu(void)
{
early_init_mmu_global();
early_init_this_mmu();
early_mmu_set_memory_limit();
}
void early_init_mmu_secondary(void)
{
early_init_this_mmu();
}
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
{
/* On non-FSL Embedded 64-bit, we adjust the RMA size to match
* the bolted TLB entry. We know for now that only 1G
* entries are supported though that may eventually
* change.
*
* on FSL Embedded 64-bit, usually all RAM is bolted, but with
* unusual memory sizes it's possible for some RAM to not be mapped
* (such RAM is not used at all by Linux, since we don't support
* highmem on 64-bit). We limit ppc64_rma_size to what would be
* mappable if this memblock is the only one. Additional memblocks
* can only increase, not decrease, the amount that ends up getting
* mapped. We still limit max to 1G even if we'll eventually map
* more. This is due to what the early init code is set up to do.
*
* We crop it to the size of the first MEMBLOCK to
* avoid going over total available memory just in case...
*/
#ifdef CONFIG_PPC_E500
if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
unsigned long linear_sz;
unsigned int num_cams;
/* use a quarter of the TLBCAM for bolted linear map */
num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
true, true);
ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
} else
#endif
ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
/* Finally limit subsequent allocations */
memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
}
#else /* ! CONFIG_PPC64 */
#ifndef CONFIG_PPC64
void __init early_init_mmu(void)
{
unsigned long root = of_get_flat_dt_root();

View File

@ -0,0 +1,361 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
* IBM Corp.
*
* Derived from arch/ppc/mm/init.c:
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/memblock.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/code-patching.h>
#include <asm/cputhreads.h>
#include <mm/mmu_decl.h>
/* The variables below are currently only used on 64-bit Book3E
* though this will probably be made common with other nohash
* implementations at some point
*/
static int mmu_pte_psize; /* Page size used for PTE pages */
int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */
unsigned long linear_map_top; /* Top of linear mapping */
/*
* Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug
* exceptions. This is used for bolted and e6500 TLB miss handlers which
* do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
* this is set to zero.
*/
int extlb_level_exc;
/*
* Handling of virtual linear page tables or indirect TLB entries
* flushing when PTE pages are freed
*/
void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
{
int tsize = mmu_psize_defs[mmu_pte_psize].enc;
if (book3e_htw_mode != PPC_HTW_NONE) {
unsigned long start = address & PMD_MASK;
unsigned long end = address + PMD_SIZE;
unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
/* This isn't the most optimal, ideally we would factor out the
* while preempt & CPU mask mucking around, or even the IPI but
* it will do for now
*/
while (start < end) {
__flush_tlb_page(tlb->mm, start, tsize, 1);
start += size;
}
} else {
unsigned long rmask = 0xf000000000000000ul;
unsigned long rid = (address & rmask) | 0x1000000000000000ul;
unsigned long vpte = address & ~rmask;
vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
vpte |= rid;
__flush_tlb_page(tlb->mm, vpte, tsize, 0);
}
}
static void __init setup_page_sizes(void)
{
unsigned int tlb0cfg;
unsigned int eptcfg;
int psize;
#ifdef CONFIG_PPC_E500
unsigned int mmucfg = mfspr(SPRN_MMUCFG);
int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
unsigned int min_pg, max_pg;
min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
struct mmu_psize_def *def;
unsigned int shift;
def = &mmu_psize_defs[psize];
shift = def->shift;
if (shift == 0 || shift & 1)
continue;
/* adjust to be in terms of 4^shift Kb */
shift = (shift - 10) >> 1;
if ((shift >= min_pg) && (shift <= max_pg))
def->flags |= MMU_PAGE_SIZE_DIRECT;
}
goto out;
}
if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
u32 tlb1cfg, tlb1ps;
tlb0cfg = mfspr(SPRN_TLB0CFG);
tlb1cfg = mfspr(SPRN_TLB1CFG);
tlb1ps = mfspr(SPRN_TLB1PS);
eptcfg = mfspr(SPRN_EPTCFG);
if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
book3e_htw_mode = PPC_HTW_E6500;
/*
* We expect 4K subpage size and unrestricted indirect size.
* The lack of a restriction on indirect size is a Freescale
* extension, indicated by PSn = 0 but SPSn != 0.
*/
if (eptcfg != 2)
book3e_htw_mode = PPC_HTW_NONE;
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
struct mmu_psize_def *def = &mmu_psize_defs[psize];
if (!def->shift)
continue;
if (tlb1ps & (1U << (def->shift - 10))) {
def->flags |= MMU_PAGE_SIZE_DIRECT;
if (book3e_htw_mode && psize == MMU_PAGE_2M)
def->flags |= MMU_PAGE_SIZE_INDIRECT;
}
}
goto out;
}
#endif
out:
/* Cleanup array and print summary */
pr_info("MMU: Supported page sizes\n");
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
struct mmu_psize_def *def = &mmu_psize_defs[psize];
const char *__page_type_names[] = {
"unsupported",
"direct",
"indirect",
"direct & indirect"
};
if (def->flags == 0) {
def->shift = 0;
continue;
}
pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10),
__page_type_names[def->flags & 0x3]);
}
}
static void __init setup_mmu_htw(void)
{
/*
* If we want to use HW tablewalk, enable it by patching the TLB miss
* handlers to branch to the one dedicated to it.
*/
switch (book3e_htw_mode) {
#ifdef CONFIG_PPC_E500
case PPC_HTW_E6500:
extlb_level_exc = EX_TLB_SIZE;
patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
break;
#endif
}
pr_info("MMU: Book3E HW tablewalk %s\n",
book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
}
/*
* Early initialization of the MMU TLB code
*/
static void early_init_this_mmu(void)
{
unsigned int mas4;
/* Set MAS4 based on page table setting */
mas4 = 0x4 << MAS4_WIMGED_SHIFT;
switch (book3e_htw_mode) {
case PPC_HTW_E6500:
mas4 |= MAS4_INDD;
mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
mas4 |= MAS4_TLBSELD(1);
mmu_pte_psize = MMU_PAGE_2M;
break;
case PPC_HTW_NONE:
mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
mmu_pte_psize = mmu_virtual_psize;
break;
}
mtspr(SPRN_MAS4, mas4);
#ifdef CONFIG_PPC_E500
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
unsigned int num_cams;
bool map = true;
/* use a quarter of the TLBCAM for bolted linear map */
num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
/*
* Only do the mapping once per core, or else the
* transient mapping would cause problems.
*/
#ifdef CONFIG_SMP
if (hweight32(get_tensr()) > 1)
map = false;
#endif
if (map)
linear_map_top = map_mem_in_cams(linear_map_top,
num_cams, false, true);
}
#endif
/* A sync won't hurt us after mucking around with
* the MMU configuration
*/
mb();
}
static void __init early_init_mmu_global(void)
{
/* XXX This should be decided at runtime based on supported
* page sizes in the TLB, but for now let's assume 16M is
* always there and a good fit (which it probably is)
*
* Freescale booke only supports 4K pages in TLB0, so use that.
*/
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
mmu_vmemmap_psize = MMU_PAGE_4K;
else
mmu_vmemmap_psize = MMU_PAGE_16M;
/* XXX This code only checks for TLB 0 capabilities and doesn't
* check what page size combos are supported by the HW. It
* also doesn't handle the case where a separate array holds
* the IND entries from the array loaded by the PT.
*/
/* Look for supported page sizes */
setup_page_sizes();
/* Look for HW tablewalk support */
setup_mmu_htw();
#ifdef CONFIG_PPC_E500
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
if (book3e_htw_mode == PPC_HTW_NONE) {
extlb_level_exc = EX_TLB_SIZE;
patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
patch_exception(0x1e0,
exc_instruction_tlb_miss_bolted_book3e);
}
}
#endif
/* Set the global containing the top of the linear mapping
* for use by the TLB miss code
*/
linear_map_top = memblock_end_of_DRAM();
ioremap_bot = IOREMAP_BASE;
}
static void __init early_mmu_set_memory_limit(void)
{
#ifdef CONFIG_PPC_E500
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
/*
* Limit memory so we dont have linear faults.
* Unlike memblock_set_current_limit, which limits
* memory available during early boot, this permanently
* reduces the memory available to Linux. We need to
* do this because highmem is not supported on 64-bit.
*/
memblock_enforce_memory_limit(linear_map_top);
}
#endif
memblock_set_current_limit(linear_map_top);
}
/* boot cpu only */
void __init early_init_mmu(void)
{
early_init_mmu_global();
early_init_this_mmu();
early_mmu_set_memory_limit();
}
void early_init_mmu_secondary(void)
{
early_init_this_mmu();
}
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
{
/* On non-FSL Embedded 64-bit, we adjust the RMA size to match
* the bolted TLB entry. We know for now that only 1G
* entries are supported though that may eventually
* change.
*
* on FSL Embedded 64-bit, usually all RAM is bolted, but with
* unusual memory sizes it's possible for some RAM to not be mapped
* (such RAM is not used at all by Linux, since we don't support
* highmem on 64-bit). We limit ppc64_rma_size to what would be
* mappable if this memblock is the only one. Additional memblocks
* can only increase, not decrease, the amount that ends up getting
* mapped. We still limit max to 1G even if we'll eventually map
* more. This is due to what the early init code is set up to do.
*
* We crop it to the size of the first MEMBLOCK to
* avoid going over total available memory just in case...
*/
#ifdef CONFIG_PPC_E500
if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
unsigned long linear_sz;
unsigned int num_cams;
/* use a quarter of the TLBCAM for bolted linear map */
num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
true, true);
ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
} else
#endif
ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
/* Finally limit subsequent allocations */
memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
}

View File

@ -893,201 +893,6 @@ virt_page_table_tlb_miss_whacko_fault:
TLB_MISS_EPILOG_ERROR
b exc_data_storage_book3e
/**************************************************************
* *
* TLB miss handling for Book3E with hw page table support *
* *
**************************************************************/
/* Data TLB miss */
START_EXCEPTION(data_tlb_miss_htw)
TLB_MISS_PROLOG
/* Now we handle the fault proper. We only save DEAR in normal
* fault case since that's the only interesting values here.
* We could probably also optimize by not saving SRR0/1 in the
* linear mapping case but I'll leave that for later
*/
mfspr r14,SPRN_ESR
mfspr r16,SPRN_DEAR /* get faulting address */
srdi r11,r16,44 /* get region */
xoris r11,r11,0xc
cmpldi cr0,r11,0 /* linear mapping ? */
beq tlb_load_linear /* yes -> go to linear map load */
cmpldi cr1,r11,1 /* vmalloc mapping ? */
/* We do the user/kernel test for the PID here along with the RW test
*/
srdi. r11,r16,60 /* Check for user region */
ld r15,PACAPGD(r13) /* Load user pgdir */
beq htw_tlb_miss
/* XXX replace the RMW cycles with immediate loads + writes */
1: mfspr r10,SPRN_MAS1
rlwinm r10,r10,0,16,1 /* Clear TID */
mtspr SPRN_MAS1,r10
ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */
beq+ cr1,htw_tlb_miss
/* We got a crappy address, just fault with whatever DEAR and ESR
* are here
*/
TLB_MISS_EPILOG_ERROR
b exc_data_storage_book3e
/* Instruction TLB miss */
START_EXCEPTION(instruction_tlb_miss_htw)
TLB_MISS_PROLOG
/* If we take a recursive fault, the second level handler may need
* to know whether we are handling a data or instruction fault in
* order to get to the right store fault handler. We provide that
* info by keeping a crazy value for ESR in r14
*/
li r14,-1 /* store to exception frame is done later */
/* Now we handle the fault proper. We only save DEAR in the non
* linear mapping case since we know the linear mapping case will
* not re-enter. We could indeed optimize and also not save SRR0/1
* in the linear mapping case but I'll leave that for later
*
* Faulting address is SRR0 which is already in r16
*/
srdi r11,r16,44 /* get region */
xoris r11,r11,0xc
cmpldi cr0,r11,0 /* linear mapping ? */
beq tlb_load_linear /* yes -> go to linear map load */
cmpldi cr1,r11,1 /* vmalloc mapping ? */
/* We do the user/kernel test for the PID here along with the RW test
*/
srdi. r11,r16,60 /* Check for user region */
ld r15,PACAPGD(r13) /* Load user pgdir */
beq htw_tlb_miss
/* XXX replace the RMW cycles with immediate loads + writes */
1: mfspr r10,SPRN_MAS1
rlwinm r10,r10,0,16,1 /* Clear TID */
mtspr SPRN_MAS1,r10
ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */
beq+ htw_tlb_miss
/* We got a crappy address, just fault */
TLB_MISS_EPILOG_ERROR
b exc_instruction_storage_book3e
/*
* This is the guts of the second-level TLB miss handler for direct
* misses. We are entered with:
*
* r16 = virtual page table faulting address
* r15 = PGD pointer
* r14 = ESR
* r13 = PACA
* r12 = TLB exception frame in PACA
* r11 = crap (free to use)
* r10 = crap (free to use)
*
* It can be re-entered by the linear mapping miss handler. However, to
* avoid too much complication, it will save/restore things for us
*/
htw_tlb_miss:
#ifdef CONFIG_PPC_KUAP
mfspr r10,SPRN_MAS1
rlwinm. r10,r10,0,0x3fff0000
beq- htw_tlb_miss_fault /* KUAP fault */
#endif
/* Search if we already have a TLB entry for that virtual address, and
* if we do, bail out.
*
* MAS1:IND should be already set based on MAS4
*/
PPC_TLBSRX_DOT(0,R16)
beq htw_tlb_miss_done
/* Now, we need to walk the page tables. First check if we are in
* range.
*/
rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
bne- htw_tlb_miss_fault
/* Get the PGD pointer */
cmpldi cr0,r15,0
beq- htw_tlb_miss_fault
/* Get to PGD entry */
rldicl r11,r16,64-(PGDIR_SHIFT-3),64-PGD_INDEX_SIZE-3
clrrdi r10,r11,3
ldx r15,r10,r15
cmpdi cr0,r15,0
bge htw_tlb_miss_fault
/* Get to PUD entry */
rldicl r11,r16,64-(PUD_SHIFT-3),64-PUD_INDEX_SIZE-3
clrrdi r10,r11,3
ldx r15,r10,r15
cmpdi cr0,r15,0
bge htw_tlb_miss_fault
/* Get to PMD entry */
rldicl r11,r16,64-(PMD_SHIFT-3),64-PMD_INDEX_SIZE-3
clrrdi r10,r11,3
ldx r15,r10,r15
cmpdi cr0,r15,0
bge htw_tlb_miss_fault
/* Ok, we're all right, we can now create an indirect entry for
* a 1M or 256M page.
*
* The last trick is now that because we use "half" pages for
* the HTW (1M IND is 2K and 256M IND is 32K) we need to account
* for an added LSB bit to the RPN. For 64K pages, there is no
* problem as we already use 32K arrays (half PTE pages), but for
* 4K page we need to extract a bit from the virtual address and
* insert it into the "PA52" bit of the RPN.
*/
rlwimi r15,r16,32-9,20,20
/* Now we build the MAS:
*
* MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG
* MAS 1 : Almost fully setup
* - PID already updated by caller if necessary
* - TSIZE for now is base ind page size always
* MAS 2 : Use defaults
* MAS 3+7 : Needs to be done
*/
ori r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
srdi r16,r10,32
mtspr SPRN_MAS3,r10
mtspr SPRN_MAS7,r16
tlbwe
htw_tlb_miss_done:
/* We don't bother with restoring DEAR or ESR since we know we are
* level 0 and just going back to userland. They are only needed
* if you are going to take an access fault
*/
TLB_MISS_EPILOG_SUCCESS
rfi
htw_tlb_miss_fault:
/* We need to check if it was an instruction miss. We know this
* though because r14 would contain -1
*/
cmpdi cr0,r14,-1
beq 1f
mtspr SPRN_DEAR,r16
mtspr SPRN_ESR,r14
TLB_MISS_EPILOG_ERROR
b exc_data_storage_book3e
1: TLB_MISS_EPILOG_ERROR
b exc_instruction_storage_book3e
/*
* This is the guts of "any" level TLB miss handler for kernel linear
* mapping misses. We are entered with:

View File

@ -27,6 +27,7 @@ config RISCV
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_KCOV
select ARCH_HAS_MEMBARRIER_CALLBACKS
select ARCH_HAS_MMIOWB
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PMEM_API
@ -489,8 +490,8 @@ config RISCV_ISA_SVPBMT
config TOOLCHAIN_HAS_V
bool
default y
depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64iv)
depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32iv)
depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64imv)
depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32imv)
depends on LLD_VERSION >= 140000 || LD_VERSION >= 23800
depends on AS_HAS_OPTION_ARCH

View File

@ -204,6 +204,8 @@
&mmc0 {
max-frequency = <100000000>;
assigned-clocks = <&syscrg JH7110_SYSCLK_SDIO0_SDCARD>;
assigned-clock-rates = <50000000>;
bus-width = <8>;
cap-mmc-highspeed;
mmc-ddr-1_8v;
@ -220,6 +222,8 @@
&mmc1 {
max-frequency = <100000000>;
assigned-clocks = <&syscrg JH7110_SYSCLK_SDIO1_SDCARD>;
assigned-clock-rates = <50000000>;
bus-width = <4>;
no-sdio;
no-mmc;

View File

@ -18,9 +18,9 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
pte_t *pte = virt_to_kpte(addr);
if (protect)
set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
set_pte(pte, __pte(pte_val(ptep_get(pte)) & ~_PAGE_PRESENT));
else
set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
set_pte(pte, __pte(pte_val(ptep_get(pte)) | _PAGE_PRESENT));
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);

View File

@ -10,6 +10,7 @@
#define __KVM_VCPU_RISCV_PMU_H
#include <linux/perf/riscv_pmu.h>
#include <asm/kvm_vcpu_insn.h>
#include <asm/sbi.h>
#ifdef CONFIG_RISCV_PMU_SBI
@ -57,11 +58,11 @@ struct kvm_pmu {
#if defined(CONFIG_32BIT)
#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
{.base = CSR_CYCLEH, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, \
{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm },
{.base = CSR_CYCLEH, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, \
{.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm },
#else
#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm },
{.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm },
#endif
int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid);
@ -92,8 +93,20 @@ void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu);
struct kvm_pmu {
};
static inline int kvm_riscv_vcpu_pmu_read_legacy(struct kvm_vcpu *vcpu, unsigned int csr_num,
unsigned long *val, unsigned long new_val,
unsigned long wr_mask)
{
if (csr_num == CSR_CYCLE || csr_num == CSR_INSTRET) {
*val = 0;
return KVM_INSN_CONTINUE_NEXT_SEPC;
} else {
return KVM_INSN_ILLEGAL_TRAP;
}
}
#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
{.base = 0, .count = 0, .func = NULL },
{.base = CSR_CYCLE, .count = 3, .func = kvm_riscv_vcpu_pmu_read_legacy },
static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {}
static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid)

View File

@ -0,0 +1,31 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _ASM_RISCV_MEMBARRIER_H
#define _ASM_RISCV_MEMBARRIER_H
static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
struct mm_struct *next,
struct task_struct *tsk)
{
/*
* Only need the full barrier when switching between processes.
* Barrier when switching from kernel to userspace is not
* required here, given that it is implied by mmdrop(). Barrier
* when switching from userspace to kernel is not needed after
* store to rq->curr.
*/
if (IS_ENABLED(CONFIG_SMP) &&
likely(!(atomic_read(&next->membarrier_state) &
(MEMBARRIER_STATE_PRIVATE_EXPEDITED |
MEMBARRIER_STATE_GLOBAL_EXPEDITED)) || !prev))
return;
/*
* The membarrier system call requires a full memory barrier
* after storing to rq->curr, before going back to user-space.
* Matches a full barrier in the proximity of the membarrier
* system call entry.
*/
smp_mb();
}
#endif /* _ASM_RISCV_MEMBARRIER_H */

View File

@ -198,7 +198,7 @@ static inline int pud_user(pud_t pud)
static inline void set_pud(pud_t *pudp, pud_t pud)
{
*pudp = pud;
WRITE_ONCE(*pudp, pud);
}
static inline void pud_clear(pud_t *pudp)
@ -274,7 +274,7 @@ static inline unsigned long _pmd_pfn(pmd_t pmd)
static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
{
if (pgtable_l4_enabled)
*p4dp = p4d;
WRITE_ONCE(*p4dp, p4d);
else
set_pud((pud_t *)p4dp, (pud_t){ p4d_val(p4d) });
}
@ -336,18 +336,12 @@ static inline struct page *p4d_page(p4d_t p4d)
#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
#define pud_offset pud_offset
static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
{
if (pgtable_l4_enabled)
return p4d_pgtable(*p4d) + pud_index(address);
return (pud_t *)p4d;
}
pud_t *pud_offset(p4d_t *p4d, unsigned long address);
static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
{
if (pgtable_l5_enabled)
*pgdp = pgd;
WRITE_ONCE(*pgdp, pgd);
else
set_p4d((p4d_t *)pgdp, (p4d_t){ pgd_val(pgd) });
}
@ -400,12 +394,6 @@ static inline struct page *pgd_page(pgd_t pgd)
#define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
#define p4d_offset p4d_offset
static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
{
if (pgtable_l5_enabled)
return pgd_pgtable(*pgd) + p4d_index(address);
return (p4d_t *)pgd;
}
p4d_t *p4d_offset(pgd_t *pgd, unsigned long address);
#endif /* _ASM_RISCV_PGTABLE_64_H */

View File

@ -248,7 +248,7 @@ static inline int pmd_leaf(pmd_t pmd)
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
*pmdp = pmd;
WRITE_ONCE(*pmdp, pmd);
}
static inline void pmd_clear(pmd_t *pmdp)
@ -515,7 +515,7 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
*/
static inline void set_pte(pte_t *ptep, pte_t pteval)
{
*ptep = pteval;
WRITE_ONCE(*ptep, pteval);
}
void flush_icache_pte(pte_t pte);
@ -549,19 +549,12 @@ static inline void pte_clear(struct mm_struct *mm,
__set_pte_at(ptep, __pte(0));
}
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
static inline int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty)
{
if (!pte_same(*ptep, entry))
__set_pte_at(ptep, entry);
/*
* update_mmu_cache will unconditionally execute, handling both
* the case that the PTE changed and the spurious fault case.
*/
return true;
}
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */
extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep, pte_t entry, int dirty);
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG /* defined in mm/pgtable.c */
extern int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep);
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
@ -574,16 +567,6 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
return pte;
}
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address,
pte_t *ptep)
{
if (!pte_young(*ptep))
return 0;
return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
}
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm,
unsigned long address, pte_t *ptep)

View File

@ -60,7 +60,7 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
{
efi_memory_desc_t *md = data;
pte_t pte = READ_ONCE(*ptep);
pte_t pte = ptep_get(ptep);
unsigned long val;
if (md->attribute & EFI_MEMORY_RO) {

View File

@ -305,6 +305,9 @@ clear_bss_done:
#else
mv a0, a1
#endif /* CONFIG_BUILTIN_DTB */
/* Set trap vector to spin forever to help debug */
la a3, .Lsecondary_park
csrw CSR_TVEC, a3
call setup_vm
#ifdef CONFIG_MMU
la a0, early_pg_dir

View File

@ -62,7 +62,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
perf_callchain_store(entry, regs->epc);
fp = user_backtrace(entry, fp, regs->ra);
while (fp && !(fp & 0x3) && entry->nr < entry->max_stack)
while (fp && !(fp & 0x7) && entry->nr < entry->max_stack)
fp = user_backtrace(entry, fp, 0);
}

View File

@ -28,9 +28,8 @@ static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
p->ainsn.api.restore = (unsigned long)p->addr + offset;
patch_text(p->ainsn.api.insn, &p->opcode, 1);
patch_text((void *)((unsigned long)(p->ainsn.api.insn) + offset),
&insn, 1);
patch_text_nosync(p->ainsn.api.insn, &p->opcode, 1);
patch_text_nosync(p->ainsn.api.insn + offset, &insn, 1);
}
static void __kprobes arch_prepare_simulate(struct kprobe *p)

View File

@ -103,7 +103,7 @@ static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr,
*ptep_level = current_level;
ptep = (pte_t *)kvm->arch.pgd;
ptep = &ptep[gstage_pte_index(addr, current_level)];
while (ptep && pte_val(*ptep)) {
while (ptep && pte_val(ptep_get(ptep))) {
if (gstage_pte_leaf(ptep)) {
*ptep_level = current_level;
*ptepp = ptep;
@ -113,7 +113,7 @@ static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr,
if (current_level) {
current_level--;
*ptep_level = current_level;
ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
ptep = &ptep[gstage_pte_index(addr, current_level)];
} else {
ptep = NULL;
@ -149,25 +149,25 @@ static int gstage_set_pte(struct kvm *kvm, u32 level,
if (gstage_pte_leaf(ptep))
return -EEXIST;
if (!pte_val(*ptep)) {
if (!pte_val(ptep_get(ptep))) {
if (!pcache)
return -ENOMEM;
next_ptep = kvm_mmu_memory_cache_alloc(pcache);
if (!next_ptep)
return -ENOMEM;
*ptep = pfn_pte(PFN_DOWN(__pa(next_ptep)),
__pgprot(_PAGE_TABLE));
set_pte(ptep, pfn_pte(PFN_DOWN(__pa(next_ptep)),
__pgprot(_PAGE_TABLE)));
} else {
if (gstage_pte_leaf(ptep))
return -EEXIST;
next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
}
current_level--;
ptep = &next_ptep[gstage_pte_index(addr, current_level)];
}
*ptep = *new_pte;
set_pte(ptep, *new_pte);
if (gstage_pte_leaf(ptep))
gstage_remote_tlb_flush(kvm, current_level, addr);
@ -239,11 +239,11 @@ static void gstage_op_pte(struct kvm *kvm, gpa_t addr,
BUG_ON(addr & (page_size - 1));
if (!pte_val(*ptep))
if (!pte_val(ptep_get(ptep)))
return;
if (ptep_level && !gstage_pte_leaf(ptep)) {
next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
next_ptep_level = ptep_level - 1;
ret = gstage_level_to_page_size(next_ptep_level,
&next_page_size);
@ -261,7 +261,7 @@ static void gstage_op_pte(struct kvm *kvm, gpa_t addr,
if (op == GSTAGE_OP_CLEAR)
set_pte(ptep, __pte(0));
else if (op == GSTAGE_OP_WP)
set_pte(ptep, __pte(pte_val(*ptep) & ~_PAGE_WRITE));
set_pte(ptep, __pte(pte_val(ptep_get(ptep)) & ~_PAGE_WRITE));
gstage_remote_tlb_flush(kvm, ptep_level, addr);
}
}
@ -603,7 +603,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
&ptep, &ptep_level))
return false;
return pte_young(*ptep);
return pte_young(ptep_get(ptep));
}
int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,

View File

@ -91,8 +91,8 @@ void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
run->riscv_sbi.args[3] = cp->a3;
run->riscv_sbi.args[4] = cp->a4;
run->riscv_sbi.args[5] = cp->a5;
run->riscv_sbi.ret[0] = cp->a0;
run->riscv_sbi.ret[1] = cp->a1;
run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED;
run->riscv_sbi.ret[1] = 0;
}
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,

View File

@ -13,10 +13,9 @@ endif
KCOV_INSTRUMENT_init.o := n
obj-y += init.o
obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o
obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o pgtable.o
obj-y += cacheflush.o
obj-y += context.o
obj-y += pgtable.o
obj-y += pmem.o
ifeq ($(CONFIG_MMU),y)

View File

@ -323,6 +323,8 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (unlikely(prev == next))
return;
membarrier_arch_switch_mm(prev, next, task);
/*
* Mark the current MM context as inactive, and the next as
* active. This is at least used by the icache flushing

View File

@ -137,24 +137,24 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
pgd = (pgd_t *)pfn_to_virt(pfn) + index;
pgd_k = init_mm.pgd + index;
if (!pgd_present(*pgd_k)) {
if (!pgd_present(pgdp_get(pgd_k))) {
no_context(regs, addr);
return;
}
set_pgd(pgd, *pgd_k);
set_pgd(pgd, pgdp_get(pgd_k));
p4d_k = p4d_offset(pgd_k, addr);
if (!p4d_present(*p4d_k)) {
if (!p4d_present(p4dp_get(p4d_k))) {
no_context(regs, addr);
return;
}
pud_k = pud_offset(p4d_k, addr);
if (!pud_present(*pud_k)) {
if (!pud_present(pudp_get(pud_k))) {
no_context(regs, addr);
return;
}
if (pud_leaf(*pud_k))
if (pud_leaf(pudp_get(pud_k)))
goto flush_tlb;
/*
@ -162,11 +162,11 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
* to copy individual PTEs
*/
pmd_k = pmd_offset(pud_k, addr);
if (!pmd_present(*pmd_k)) {
if (!pmd_present(pmdp_get(pmd_k))) {
no_context(regs, addr);
return;
}
if (pmd_leaf(*pmd_k))
if (pmd_leaf(pmdp_get(pmd_k)))
goto flush_tlb;
/*
@ -176,7 +176,7 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
* silently loop forever.
*/
pte_k = pte_offset_kernel(pmd_k, addr);
if (!pte_present(*pte_k)) {
if (!pte_present(ptep_get(pte_k))) {
no_context(regs, addr);
return;
}

View File

@ -54,7 +54,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
}
if (sz == PMD_SIZE) {
if (want_pmd_share(vma, addr) && pud_none(*pud))
if (want_pmd_share(vma, addr) && pud_none(pudp_get(pud)))
pte = huge_pmd_share(mm, vma, addr, pud);
else
pte = (pte_t *)pmd_alloc(mm, pud, addr);
@ -93,11 +93,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
pmd_t *pmd;
pgd = pgd_offset(mm, addr);
if (!pgd_present(*pgd))
if (!pgd_present(pgdp_get(pgd)))
return NULL;
p4d = p4d_offset(pgd, addr);
if (!p4d_present(*p4d))
if (!p4d_present(p4dp_get(p4d)))
return NULL;
pud = pud_offset(p4d, addr);
@ -105,7 +105,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
/* must be pud huge, non-present or none */
return (pte_t *)pud;
if (!pud_present(*pud))
if (!pud_present(pudp_get(pud)))
return NULL;
pmd = pmd_offset(pud, addr);
@ -113,7 +113,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
/* must be pmd huge, non-present or none */
return (pte_t *)pmd;
if (!pmd_present(*pmd))
if (!pmd_present(pmdp_get(pmd)))
return NULL;
for_each_napot_order(order) {
@ -351,7 +351,7 @@ void huge_pte_clear(struct mm_struct *mm,
pte_t *ptep,
unsigned long sz)
{
pte_t pte = READ_ONCE(*ptep);
pte_t pte = ptep_get(ptep);
int i, pte_num;
if (!pte_napot(pte)) {

View File

@ -235,7 +235,7 @@ static void __init setup_bootmem(void)
* The size of the linear page mapping may restrict the amount of
* usable RAM.
*/
if (IS_ENABLED(CONFIG_64BIT)) {
if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU)) {
max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE;
memblock_cap_memory_range(phys_ram_base,
max_mapped_addr - phys_ram_base);

View File

@ -31,7 +31,7 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned
phys_addr_t phys_addr;
pte_t *ptep, *p;
if (pmd_none(*pmd)) {
if (pmd_none(pmdp_get(pmd))) {
p = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@ -39,7 +39,7 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned
ptep = pte_offset_kernel(pmd, vaddr);
do {
if (pte_none(*ptep)) {
if (pte_none(ptep_get(ptep))) {
phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
memset(__va(phys_addr), KASAN_SHADOW_INIT, PAGE_SIZE);
@ -53,7 +53,7 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned
pmd_t *pmdp, *p;
unsigned long next;
if (pud_none(*pud)) {
if (pud_none(pudp_get(pud))) {
p = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
set_pud(pud, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@ -63,7 +63,8 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned
do {
next = pmd_addr_end(vaddr, end);
if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
if (pmd_none(pmdp_get(pmdp)) && IS_ALIGNED(vaddr, PMD_SIZE) &&
(next - vaddr) >= PMD_SIZE) {
phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
if (phys_addr) {
set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
@ -83,7 +84,7 @@ static void __init kasan_populate_pud(p4d_t *p4d,
pud_t *pudp, *p;
unsigned long next;
if (p4d_none(*p4d)) {
if (p4d_none(p4dp_get(p4d))) {
p = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
set_p4d(p4d, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@ -93,7 +94,8 @@ static void __init kasan_populate_pud(p4d_t *p4d,
do {
next = pud_addr_end(vaddr, end);
if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) &&
(next - vaddr) >= PUD_SIZE) {
phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
if (phys_addr) {
set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
@ -113,7 +115,7 @@ static void __init kasan_populate_p4d(pgd_t *pgd,
p4d_t *p4dp, *p;
unsigned long next;
if (pgd_none(*pgd)) {
if (pgd_none(pgdp_get(pgd))) {
p = memblock_alloc(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE);
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@ -123,7 +125,8 @@ static void __init kasan_populate_p4d(pgd_t *pgd,
do {
next = p4d_addr_end(vaddr, end);
if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) {
if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) &&
(next - vaddr) >= P4D_SIZE) {
phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
if (phys_addr) {
set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
@ -145,7 +148,7 @@ static void __init kasan_populate_pgd(pgd_t *pgdp,
do {
next = pgd_addr_end(vaddr, end);
if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
(next - vaddr) >= PGDIR_SIZE) {
phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
if (phys_addr) {
@ -168,7 +171,7 @@ static void __init kasan_early_clear_pud(p4d_t *p4dp,
if (!pgtable_l4_enabled) {
pudp = (pud_t *)p4dp;
} else {
base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp))));
pudp = base_pud + pud_index(vaddr);
}
@ -193,7 +196,7 @@ static void __init kasan_early_clear_p4d(pgd_t *pgdp,
if (!pgtable_l5_enabled) {
p4dp = (p4d_t *)pgdp;
} else {
base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp))));
p4dp = base_p4d + p4d_index(vaddr);
}
@ -239,14 +242,14 @@ static void __init kasan_early_populate_pud(p4d_t *p4dp,
if (!pgtable_l4_enabled) {
pudp = (pud_t *)p4dp;
} else {
base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp))));
pudp = base_pud + pud_index(vaddr);
}
do {
next = pud_addr_end(vaddr, end);
if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) &&
if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) &&
(next - vaddr) >= PUD_SIZE) {
phys_addr = __pa((uintptr_t)kasan_early_shadow_pmd);
set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
@ -277,14 +280,14 @@ static void __init kasan_early_populate_p4d(pgd_t *pgdp,
if (!pgtable_l5_enabled) {
p4dp = (p4d_t *)pgdp;
} else {
base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp))));
p4dp = base_p4d + p4d_index(vaddr);
}
do {
next = p4d_addr_end(vaddr, end);
if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) &&
if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) &&
(next - vaddr) >= P4D_SIZE) {
phys_addr = __pa((uintptr_t)kasan_early_shadow_pud);
set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
@ -305,7 +308,7 @@ static void __init kasan_early_populate_pgd(pgd_t *pgdp,
do {
next = pgd_addr_end(vaddr, end);
if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
(next - vaddr) >= PGDIR_SIZE) {
phys_addr = __pa((uintptr_t)kasan_early_shadow_p4d);
set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
@ -381,7 +384,7 @@ static void __init kasan_shallow_populate_pud(p4d_t *p4d,
do {
next = pud_addr_end(vaddr, end);
if (pud_none(*pud_k)) {
if (pud_none(pudp_get(pud_k))) {
p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
set_pud(pud_k, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
continue;
@ -401,7 +404,7 @@ static void __init kasan_shallow_populate_p4d(pgd_t *pgd,
do {
next = p4d_addr_end(vaddr, end);
if (p4d_none(*p4d_k)) {
if (p4d_none(p4dp_get(p4d_k))) {
p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
set_p4d(p4d_k, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
continue;
@ -420,7 +423,7 @@ static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long
do {
next = pgd_addr_end(vaddr, end);
if (pgd_none(*pgd_k)) {
if (pgd_none(pgdp_get(pgd_k))) {
p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
continue;
@ -451,7 +454,7 @@ static void __init create_tmp_mapping(void)
/* Copy the last p4d since it is shared with the kernel mapping. */
if (pgtable_l5_enabled) {
ptr = (p4d_t *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
ptr = (p4d_t *)pgd_page_vaddr(pgdp_get(pgd_offset_k(KASAN_SHADOW_END)));
memcpy(tmp_p4d, ptr, sizeof(p4d_t) * PTRS_PER_P4D);
set_pgd(&tmp_pg_dir[pgd_index(KASAN_SHADOW_END)],
pfn_pgd(PFN_DOWN(__pa(tmp_p4d)), PAGE_TABLE));
@ -462,7 +465,7 @@ static void __init create_tmp_mapping(void)
/* Copy the last pud since it is shared with the kernel mapping. */
if (pgtable_l4_enabled) {
ptr = (pud_t *)p4d_page_vaddr(*(base_p4d + p4d_index(KASAN_SHADOW_END)));
ptr = (pud_t *)p4d_page_vaddr(p4dp_get(base_p4d + p4d_index(KASAN_SHADOW_END)));
memcpy(tmp_pud, ptr, sizeof(pud_t) * PTRS_PER_PUD);
set_p4d(&base_p4d[p4d_index(KASAN_SHADOW_END)],
pfn_p4d(PFN_DOWN(__pa(tmp_pud)), PAGE_TABLE));

View File

@ -29,7 +29,7 @@ static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
p4d_t val = READ_ONCE(*p4d);
p4d_t val = p4dp_get(p4d);
if (p4d_leaf(val)) {
val = __p4d(set_pageattr_masks(p4d_val(val), walk));
@ -42,7 +42,7 @@ static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
pud_t val = READ_ONCE(*pud);
pud_t val = pudp_get(pud);
if (pud_leaf(val)) {
val = __pud(set_pageattr_masks(pud_val(val), walk));
@ -55,7 +55,7 @@ static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
pmd_t val = READ_ONCE(*pmd);
pmd_t val = pmdp_get(pmd);
if (pmd_leaf(val)) {
val = __pmd(set_pageattr_masks(pmd_val(val), walk));
@ -68,7 +68,7 @@ static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
pte_t val = READ_ONCE(*pte);
pte_t val = ptep_get(pte);
val = __pte(set_pageattr_masks(pte_val(val), walk));
set_pte(pte, val);
@ -108,10 +108,10 @@ static int __split_linear_mapping_pmd(pud_t *pudp,
vaddr <= (vaddr & PMD_MASK) && end >= next)
continue;
if (pmd_leaf(*pmdp)) {
if (pmd_leaf(pmdp_get(pmdp))) {
struct page *pte_page;
unsigned long pfn = _pmd_pfn(*pmdp);
pgprot_t prot = __pgprot(pmd_val(*pmdp) & ~_PAGE_PFN_MASK);
unsigned long pfn = _pmd_pfn(pmdp_get(pmdp));
pgprot_t prot = __pgprot(pmd_val(pmdp_get(pmdp)) & ~_PAGE_PFN_MASK);
pte_t *ptep_new;
int i;
@ -148,10 +148,10 @@ static int __split_linear_mapping_pud(p4d_t *p4dp,
vaddr <= (vaddr & PUD_MASK) && end >= next)
continue;
if (pud_leaf(*pudp)) {
if (pud_leaf(pudp_get(pudp))) {
struct page *pmd_page;
unsigned long pfn = _pud_pfn(*pudp);
pgprot_t prot = __pgprot(pud_val(*pudp) & ~_PAGE_PFN_MASK);
unsigned long pfn = _pud_pfn(pudp_get(pudp));
pgprot_t prot = __pgprot(pud_val(pudp_get(pudp)) & ~_PAGE_PFN_MASK);
pmd_t *pmdp_new;
int i;
@ -197,10 +197,10 @@ static int __split_linear_mapping_p4d(pgd_t *pgdp,
vaddr <= (vaddr & P4D_MASK) && end >= next)
continue;
if (p4d_leaf(*p4dp)) {
if (p4d_leaf(p4dp_get(p4dp))) {
struct page *pud_page;
unsigned long pfn = _p4d_pfn(*p4dp);
pgprot_t prot = __pgprot(p4d_val(*p4dp) & ~_PAGE_PFN_MASK);
unsigned long pfn = _p4d_pfn(p4dp_get(p4dp));
pgprot_t prot = __pgprot(p4d_val(p4dp_get(p4dp)) & ~_PAGE_PFN_MASK);
pud_t *pudp_new;
int i;
@ -427,29 +427,29 @@ bool kernel_page_present(struct page *page)
pte_t *pte;
pgd = pgd_offset_k(addr);
if (!pgd_present(*pgd))
if (!pgd_present(pgdp_get(pgd)))
return false;
if (pgd_leaf(*pgd))
if (pgd_leaf(pgdp_get(pgd)))
return true;
p4d = p4d_offset(pgd, addr);
if (!p4d_present(*p4d))
if (!p4d_present(p4dp_get(p4d)))
return false;
if (p4d_leaf(*p4d))
if (p4d_leaf(p4dp_get(p4d)))
return true;
pud = pud_offset(p4d, addr);
if (!pud_present(*pud))
if (!pud_present(pudp_get(pud)))
return false;
if (pud_leaf(*pud))
if (pud_leaf(pudp_get(pud)))
return true;
pmd = pmd_offset(pud, addr);
if (!pmd_present(*pmd))
if (!pmd_present(pmdp_get(pmd)))
return false;
if (pmd_leaf(*pmd))
if (pmd_leaf(pmdp_get(pmd)))
return true;
pte = pte_offset_kernel(pmd, addr);
return pte_present(*pte);
return pte_present(ptep_get(pte));
}

View File

@ -5,6 +5,47 @@
#include <linux/kernel.h>
#include <linux/pgtable.h>
int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty)
{
if (!pte_same(ptep_get(ptep), entry))
__set_pte_at(ptep, entry);
/*
* update_mmu_cache will unconditionally execute, handling both
* the case that the PTE changed and the spurious fault case.
*/
return true;
}
int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address,
pte_t *ptep)
{
if (!pte_young(ptep_get(ptep)))
return 0;
return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
}
EXPORT_SYMBOL_GPL(ptep_test_and_clear_young);
#ifdef CONFIG_64BIT
pud_t *pud_offset(p4d_t *p4d, unsigned long address)
{
if (pgtable_l4_enabled)
return p4d_pgtable(p4dp_get(p4d)) + pud_index(address);
return (pud_t *)p4d;
}
p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
{
if (pgtable_l5_enabled)
return pgd_pgtable(pgdp_get(pgd)) + p4d_index(address);
return (p4d_t *)pgd;
}
#endif
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
{
@ -25,7 +66,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
int pud_clear_huge(pud_t *pud)
{
if (!pud_leaf(READ_ONCE(*pud)))
if (!pud_leaf(pudp_get(pud)))
return 0;
pud_clear(pud);
return 1;
@ -33,7 +74,7 @@ int pud_clear_huge(pud_t *pud)
int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
pmd_t *pmd = pud_pgtable(*pud);
pmd_t *pmd = pud_pgtable(pudp_get(pud));
int i;
pud_clear(pud);
@ -63,7 +104,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
int pmd_clear_huge(pmd_t *pmd)
{
if (!pmd_leaf(READ_ONCE(*pmd)))
if (!pmd_leaf(pmdp_get(pmd)))
return 0;
pmd_clear(pmd);
return 1;
@ -71,7 +112,7 @@ int pmd_clear_huge(pmd_t *pmd)
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);
pte_t *pte = (pte_t *)pmd_page_vaddr(pmdp_get(pmd));
pmd_clear(pmd);
@ -88,7 +129,7 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(pmd_trans_huge(*pmdp));
VM_BUG_ON(pmd_trans_huge(pmdp_get(pmdp)));
/*
* When leaf PTE entries (regular pages) are collapsed into a leaf
* PMD entry (huge page), a valid non-leaf PTE is converted into a

View File

@ -71,6 +71,15 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__end_ro_after_init = .;
.data.rel.ro : {
*(.data.rel.ro .data.rel.ro.*)
}
.got : {
__got_start = .;
*(.got)
__got_end = .;
}
RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE)
BOOT_DATA_PRESERVED

View File

@ -383,6 +383,7 @@ int setup_one_line(struct line *lines, int n, char *init,
parse_chan_pair(NULL, line, n, opts, error_out);
err = 0;
}
*error_out = "configured as 'none'";
} else {
char *new = kstrdup(init, GFP_KERNEL);
if (!new) {
@ -406,6 +407,7 @@ int setup_one_line(struct line *lines, int n, char *init,
}
}
if (err) {
*error_out = "failed to parse channel pair";
line->init_str = NULL;
line->valid = 0;
kfree(new);

View File

@ -14,6 +14,7 @@
#include <asm/insn.h>
#include <asm/insn-eval.h>
#include <asm/pgtable.h>
#include <asm/traps.h>
/* MMIO direction */
#define EPT_READ 0
@ -362,7 +363,6 @@ static bool mmio_read(int size, unsigned long addr, unsigned long *val)
.r12 = size,
.r13 = EPT_READ,
.r14 = addr,
.r15 = *val,
};
if (__tdx_hypercall_ret(&args))
@ -406,6 +406,11 @@ static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
return -EINVAL;
}
if (!fault_in_kernel_space(ve->gla)) {
WARN_ONCE(1, "Access to userspace address is not supported");
return -EINVAL;
}
/*
* Reject EPT violation #VEs that split pages.
*

View File

@ -4465,6 +4465,25 @@ static u8 adl_get_hybrid_cpu_type(void)
return hybrid_big;
}
static inline bool erratum_hsw11(struct perf_event *event)
{
return (event->hw.config & INTEL_ARCH_EVENT_MASK) ==
X86_CONFIG(.event=0xc0, .umask=0x01);
}
/*
* The HSW11 requires a period larger than 100 which is the same as the BDM11.
* A minimum period of 128 is enforced as well for the INST_RETIRED.ALL.
*
* The message 'interrupt took too long' can be observed on any counter which
* was armed with a period < 32 and two events expired in the same NMI.
* A minimum period of 32 is enforced for the rest of the events.
*/
static void hsw_limit_period(struct perf_event *event, s64 *left)
{
*left = max(*left, erratum_hsw11(event) ? 128 : 32);
}
/*
* Broadwell:
*
@ -4482,8 +4501,7 @@ static u8 adl_get_hybrid_cpu_type(void)
*/
static void bdw_limit_period(struct perf_event *event, s64 *left)
{
if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
X86_CONFIG(.event=0xc0, .umask=0x01)) {
if (erratum_hsw11(event)) {
if (*left < 128)
*left = 128;
*left &= ~0x3fULL;
@ -6392,6 +6410,7 @@ __init int intel_pmu_init(void)
x86_pmu.hw_config = hsw_hw_config;
x86_pmu.get_event_constraints = hsw_get_event_constraints;
x86_pmu.limit_period = hsw_limit_period;
x86_pmu.lbr_double_abort = true;
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
hsw_format_attr : nhm_format_attr;

View File

@ -1602,6 +1602,7 @@ static void pt_event_stop(struct perf_event *event, int mode)
* see comment in intel_pt_interrupt().
*/
WRITE_ONCE(pt->handle_nmi, 0);
barrier();
pt_config_stop(event);
@ -1653,11 +1654,10 @@ static long pt_event_snapshot_aux(struct perf_event *event,
return 0;
/*
* Here, handle_nmi tells us if the tracing is on
* There is no PT interrupt in this mode, so stop the trace and it will
* remain stopped while the buffer is copied.
*/
if (READ_ONCE(pt->handle_nmi))
pt_config_stop(event);
pt_config_stop(event);
pt_read_offset(buf);
pt_update_head(pt);
@ -1669,11 +1669,10 @@ static long pt_event_snapshot_aux(struct perf_event *event,
ret = perf_output_copy_aux(&pt->handle, handle, from, to);
/*
* If the tracing was on when we turned up, restart it.
* Compiler barrier not needed as we couldn't have been
* preempted by anything that touches pt->handle_nmi.
* Here, handle_nmi tells us if the tracing was on.
* If the tracing was on, restart it.
*/
if (pt->handle_nmi)
if (READ_ONCE(pt->handle_nmi))
pt_config_start(event);
return ret;

View File

@ -35,7 +35,6 @@
#include <clocksource/hyperv_timer.h>
#include <linux/highmem.h>
int hyperv_init_cpuhp;
u64 hv_current_partition_id = ~0ull;
EXPORT_SYMBOL_GPL(hv_current_partition_id);
@ -607,8 +606,6 @@ skip_hypercall_pg_init:
register_syscore_ops(&hv_syscore_ops);
hyperv_init_cpuhp = cpuhp;
if (cpuid_ebx(HYPERV_CPUID_FEATURES) & HV_ACCESS_PARTITION_ID)
hv_get_partition_id();
@ -637,7 +634,7 @@ skip_hypercall_pg_init:
clean_guest_os_id:
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
cpuhp_remove_state(cpuhp);
cpuhp_remove_state(CPUHP_AP_HYPERV_ONLINE);
free_ghcb_page:
free_percpu(hv_ghcb_pg);
free_vp_assist_page:

View File

@ -165,6 +165,14 @@ void acpi_generic_reduced_hw_init(void);
void x86_default_set_root_pointer(u64 addr);
u64 x86_default_get_root_pointer(void);
#ifdef CONFIG_XEN_PV
/* A Xen PV domain needs a special acpi_os_ioremap() handling. */
extern void __iomem * (*acpi_os_ioremap)(acpi_physical_address phys,
acpi_size size);
void __iomem *x86_acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
#define acpi_os_ioremap acpi_os_ioremap
#endif
#else /* !CONFIG_ACPI */
#define acpi_lapic 0

View File

@ -589,6 +589,13 @@ struct fpu_state_config {
* even without XSAVE support, i.e. legacy features FP + SSE
*/
u64 legacy_features;
/*
* @independent_features:
*
* Features that are supported by XSAVES, but not managed as part of
* the FPU core, such as LBR
*/
u64 independent_features;
};
/* FPU state configuration information */

View File

@ -63,7 +63,11 @@ extern u64 arch_irq_stat(void);
#define local_softirq_pending_ref pcpu_hot.softirq_pending
#if IS_ENABLED(CONFIG_KVM_INTEL)
static inline void kvm_set_cpu_l1tf_flush_l1d(void)
/*
* This function is called from noinstr interrupt contexts
* and must be inlined to not get instrumentation.
*/
static __always_inline void kvm_set_cpu_l1tf_flush_l1d(void)
{
__this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1);
}
@ -78,7 +82,7 @@ static __always_inline bool kvm_get_cpu_l1tf_flush_l1d(void)
return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d);
}
#else /* !IS_ENABLED(CONFIG_KVM_INTEL) */
static inline void kvm_set_cpu_l1tf_flush_l1d(void) { }
static __always_inline void kvm_set_cpu_l1tf_flush_l1d(void) { }
#endif /* IS_ENABLED(CONFIG_KVM_INTEL) */
#endif /* _ASM_X86_HARDIRQ_H */

View File

@ -13,15 +13,18 @@
#include <asm/irq_stack.h>
typedef void (*idtentry_t)(struct pt_regs *regs);
/**
* DECLARE_IDTENTRY - Declare functions for simple IDT entry points
* No error code pushed by hardware
* @vector: Vector number (ignored for C)
* @func: Function name of the entry point
*
* Declares three functions:
* Declares four functions:
* - The ASM entry point: asm_##func
* - The XEN PV trap entry point: xen_##func (maybe unused)
* - The C handler called from the FRED event dispatcher (maybe unused)
* - The C handler called from the ASM entry point
*
* Note: This is the C variant of DECLARE_IDTENTRY(). As the name says it
@ -31,6 +34,7 @@
#define DECLARE_IDTENTRY(vector, func) \
asmlinkage void asm_##func(void); \
asmlinkage void xen_asm_##func(void); \
void fred_##func(struct pt_regs *regs); \
__visible void func(struct pt_regs *regs)
/**
@ -137,6 +141,17 @@ static __always_inline void __##func(struct pt_regs *regs, \
#define DEFINE_IDTENTRY_RAW(func) \
__visible noinstr void func(struct pt_regs *regs)
/**
* DEFINE_FREDENTRY_RAW - Emit code for raw FRED entry points
* @func: Function name of the entry point
*
* @func is called from the FRED event dispatcher with interrupts disabled.
*
* See @DEFINE_IDTENTRY_RAW for further details.
*/
#define DEFINE_FREDENTRY_RAW(func) \
noinstr void fred_##func(struct pt_regs *regs)
/**
* DECLARE_IDTENTRY_RAW_ERRORCODE - Declare functions for raw IDT entry points
* Error code pushed by hardware
@ -197,8 +212,8 @@ __visible noinstr void func(struct pt_regs *regs, \
irqentry_state_t state = irqentry_enter(regs); \
u32 vector = (u32)(u8)error_code; \
\
kvm_set_cpu_l1tf_flush_l1d(); \
instrumentation_begin(); \
kvm_set_cpu_l1tf_flush_l1d(); \
run_irq_on_irqstack_cond(__##func, regs, vector); \
instrumentation_end(); \
irqentry_exit(regs, state); \
@ -233,17 +248,27 @@ static noinline void __##func(struct pt_regs *regs, u32 vector)
#define DEFINE_IDTENTRY_SYSVEC(func) \
static void __##func(struct pt_regs *regs); \
\
static __always_inline void instr_##func(struct pt_regs *regs) \
{ \
run_sysvec_on_irqstack_cond(__##func, regs); \
} \
\
__visible noinstr void func(struct pt_regs *regs) \
{ \
irqentry_state_t state = irqentry_enter(regs); \
\
kvm_set_cpu_l1tf_flush_l1d(); \
instrumentation_begin(); \
kvm_set_cpu_l1tf_flush_l1d(); \
run_sysvec_on_irqstack_cond(__##func, regs); \
instr_##func (regs); \
instrumentation_end(); \
irqentry_exit(regs, state); \
} \
\
void fred_##func(struct pt_regs *regs) \
{ \
instr_##func (regs); \
} \
\
static noinline void __##func(struct pt_regs *regs)
/**
@ -260,19 +285,29 @@ static noinline void __##func(struct pt_regs *regs)
#define DEFINE_IDTENTRY_SYSVEC_SIMPLE(func) \
static __always_inline void __##func(struct pt_regs *regs); \
\
static __always_inline void instr_##func(struct pt_regs *regs) \
{ \
__irq_enter_raw(); \
__##func (regs); \
__irq_exit_raw(); \
} \
\
__visible noinstr void func(struct pt_regs *regs) \
{ \
irqentry_state_t state = irqentry_enter(regs); \
\
kvm_set_cpu_l1tf_flush_l1d(); \
instrumentation_begin(); \
__irq_enter_raw(); \
kvm_set_cpu_l1tf_flush_l1d(); \
__##func (regs); \
__irq_exit_raw(); \
instr_##func (regs); \
instrumentation_end(); \
irqentry_exit(regs, state); \
} \
\
void fred_##func(struct pt_regs *regs) \
{ \
instr_##func (regs); \
} \
\
static __always_inline void __##func(struct pt_regs *regs)
/**
@ -410,15 +445,18 @@ __visible noinstr void func(struct pt_regs *regs, \
/* C-Code mapping */
#define DECLARE_IDTENTRY_NMI DECLARE_IDTENTRY_RAW
#define DEFINE_IDTENTRY_NMI DEFINE_IDTENTRY_RAW
#define DEFINE_FREDENTRY_NMI DEFINE_FREDENTRY_RAW
#ifdef CONFIG_X86_64
#define DECLARE_IDTENTRY_MCE DECLARE_IDTENTRY_IST
#define DEFINE_IDTENTRY_MCE DEFINE_IDTENTRY_IST
#define DEFINE_IDTENTRY_MCE_USER DEFINE_IDTENTRY_NOIST
#define DEFINE_FREDENTRY_MCE DEFINE_FREDENTRY_RAW
#define DECLARE_IDTENTRY_DEBUG DECLARE_IDTENTRY_IST
#define DEFINE_IDTENTRY_DEBUG DEFINE_IDTENTRY_IST
#define DEFINE_IDTENTRY_DEBUG_USER DEFINE_IDTENTRY_NOIST
#define DEFINE_FREDENTRY_DEBUG DEFINE_FREDENTRY_RAW
#endif
#else /* !__ASSEMBLY__ */
@ -655,23 +693,36 @@ DECLARE_IDTENTRY(RESCHEDULE_VECTOR, sysvec_reschedule_ipi);
DECLARE_IDTENTRY_SYSVEC(REBOOT_VECTOR, sysvec_reboot);
DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_SINGLE_VECTOR, sysvec_call_function_single);
DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_VECTOR, sysvec_call_function);
#else
# define fred_sysvec_reschedule_ipi NULL
# define fred_sysvec_reboot NULL
# define fred_sysvec_call_function_single NULL
# define fred_sysvec_call_function NULL
#endif
#ifdef CONFIG_X86_LOCAL_APIC
# ifdef CONFIG_X86_MCE_THRESHOLD
DECLARE_IDTENTRY_SYSVEC(THRESHOLD_APIC_VECTOR, sysvec_threshold);
# else
# define fred_sysvec_threshold NULL
# endif
# ifdef CONFIG_X86_MCE_AMD
DECLARE_IDTENTRY_SYSVEC(DEFERRED_ERROR_VECTOR, sysvec_deferred_error);
# else
# define fred_sysvec_deferred_error NULL
# endif
# ifdef CONFIG_X86_THERMAL_VECTOR
DECLARE_IDTENTRY_SYSVEC(THERMAL_APIC_VECTOR, sysvec_thermal);
# else
# define fred_sysvec_thermal NULL
# endif
# ifdef CONFIG_IRQ_WORK
DECLARE_IDTENTRY_SYSVEC(IRQ_WORK_VECTOR, sysvec_irq_work);
# else
# define fred_sysvec_irq_work NULL
# endif
#endif
@ -679,12 +730,16 @@ DECLARE_IDTENTRY_SYSVEC(IRQ_WORK_VECTOR, sysvec_irq_work);
DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_VECTOR, sysvec_kvm_posted_intr_ipi);
DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_WAKEUP_VECTOR, sysvec_kvm_posted_intr_wakeup_ipi);
DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_NESTED_VECTOR, sysvec_kvm_posted_intr_nested_ipi);
#else
# define fred_sysvec_kvm_posted_intr_ipi NULL
# define fred_sysvec_kvm_posted_intr_wakeup_ipi NULL
# define fred_sysvec_kvm_posted_intr_nested_ipi NULL
#endif
#if IS_ENABLED(CONFIG_HYPERV)
DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR, sysvec_hyperv_callback);
DECLARE_IDTENTRY_SYSVEC(HYPERV_REENLIGHTENMENT_VECTOR, sysvec_hyperv_reenlightenment);
DECLARE_IDTENTRY_SYSVEC(HYPERV_STIMER0_VECTOR, sysvec_hyperv_stimer0);
DECLARE_IDTENTRY_SYSVEC(HYPERV_STIMER0_VECTOR, sysvec_hyperv_stimer0);
#endif
#if IS_ENABLED(CONFIG_ACRN_GUEST)

View File

@ -40,7 +40,6 @@ static inline unsigned char hv_get_nmi_reason(void)
}
#if IS_ENABLED(CONFIG_HYPERV)
extern int hyperv_init_cpuhp;
extern bool hyperv_paravisor_present;
extern void *hv_hypercall_pg;

View File

@ -17,6 +17,7 @@ extern unsigned long phys_base;
extern unsigned long page_offset_base;
extern unsigned long vmalloc_base;
extern unsigned long vmemmap_base;
extern unsigned long physmem_end;
static __always_inline unsigned long __phys_addr_nodebug(unsigned long x)
{

View File

@ -140,6 +140,10 @@ extern unsigned int ptrs_per_p4d;
# define VMEMMAP_START __VMEMMAP_BASE_L4
#endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */
#ifdef CONFIG_RANDOMIZE_MEMORY
# define PHYSMEM_END physmem_end
#endif
/*
* End of the region for which vmalloc page tables are pre-allocated.
* For non-KMSAN builds, this is the same as VMALLOC_END.

View File

@ -1901,3 +1901,14 @@ u64 x86_default_get_root_pointer(void)
{
return boot_params.acpi_rsdp_addr;
}
#ifdef CONFIG_XEN_PV
void __iomem *x86_acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
{
return ioremap_cache(phys, size);
}
void __iomem * (*acpi_os_ioremap)(acpi_physical_address phys, acpi_size size) =
x86_acpi_os_ioremap;
EXPORT_SYMBOL_GPL(acpi_os_ioremap);
#endif

View File

@ -1812,12 +1812,9 @@ static __init void apic_set_fixmap(bool read_apic);
static __init void x2apic_disable(void)
{
u32 x2apic_id, state = x2apic_state;
u32 x2apic_id;
x2apic_mode = 0;
x2apic_state = X2APIC_DISABLED;
if (state != X2APIC_ON)
if (x2apic_state < X2APIC_ON)
return;
x2apic_id = read_apic_id();
@ -1830,6 +1827,10 @@ static __init void x2apic_disable(void)
}
__x2apic_disable();
x2apic_mode = 0;
x2apic_state = X2APIC_DISABLED;
/*
* Don't reread the APIC ID as it was already done from
* check_x2apic() and the APIC driver still is a x2APIC variant,

View File

@ -199,8 +199,8 @@ static void hv_machine_shutdown(void)
* Call hv_cpu_die() on all the CPUs, otherwise later the hypervisor
* corrupts the old VP Assist Pages and can crash the kexec kernel.
*/
if (kexec_in_progress && hyperv_init_cpuhp > 0)
cpuhp_remove_state(hyperv_init_cpuhp);
if (kexec_in_progress)
cpuhp_remove_state(CPUHP_AP_HYPERV_ONLINE);
/* The function calls stop_other_cpus(). */
native_machine_shutdown();
@ -423,6 +423,7 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
x86_platform.calibrate_tsc = hv_get_tsc_khz;
x86_platform.calibrate_cpu = hv_get_tsc_khz;
setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
}
if (ms_hyperv.priv_high & HV_ISOLATION) {

View File

@ -474,24 +474,25 @@ struct sgx_epc_page *__sgx_alloc_epc_page(void)
{
struct sgx_epc_page *page;
int nid_of_current = numa_node_id();
int nid = nid_of_current;
int nid_start, nid;
if (node_isset(nid_of_current, sgx_numa_mask)) {
page = __sgx_alloc_epc_page_from_node(nid_of_current);
if (page)
return page;
}
/* Fall back to the non-local NUMA nodes: */
while (true) {
nid = next_node_in(nid, sgx_numa_mask);
if (nid == nid_of_current)
break;
/*
* Try local node first. If it doesn't have an EPC section,
* fall back to the non-local NUMA nodes.
*/
if (node_isset(nid_of_current, sgx_numa_mask))
nid_start = nid_of_current;
else
nid_start = next_node_in(nid_of_current, sgx_numa_mask);
nid = nid_start;
do {
page = __sgx_alloc_epc_page_from_node(nid);
if (page)
return page;
}
nid = next_node_in(nid, sgx_numa_mask);
} while (nid != nid_start);
return ERR_PTR(-ENOMEM);
}

Some files were not shown because too many files have changed in this diff Show More