soc: driver updates for 6.15, part 1

These are the updates for SoC specific drivers and related subsystems:
 
  - Firmware driver updates for SCMI, FF-A and SMCCC firmware interfaces,
    adding support for additional firmware features including SoC
    identification and FF-A SRI callbacks as well as various bugfixes
 
  - Memory controller updates for Nvidia and Mediatek
 
  - Reset controller support for microchip sam9x7 and imx8qxp/imx8qm
 
  - New hardware support for multiple Mediatek, Renesas and Samsung Exynos chips
 
  - Minor updates on Zynq, Qualcomm, Amlogic, TI, Samsung, Nvidia and Apple chips
 
 There will be a follow up with a few more driver updates that are still
 causing build regressions at the moment.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEiK/NIGsWEZVxh/FrYKtH/8kJUicFAmfkIIkACgkQYKtH/8kJ
 UifSJg//cnhEFErOIMxWeNcnNBKY2ly7VXRBwCU10jXXri16VMIz601vEb8du+qU
 Wyoi2LlTeXx6cKkfsPEYgbbpo9iOHli0y8RHpQe2LwfCfSNK4ToXtL+aYPgLJQD8
 4jdEv/QLnPnHsRp9+XW4HND9ZCV7N5CFaqozFe6BLWSczW4OYkUSXVfNN2VUDb8F
 cYy4+bRpZ1MQ6cmSCFXMJJ9BT/aBb4o0WGgr9jKe5p4YgDFjaw7a6sinVqBmoyoi
 bVNbEVSYrOFF03CoSpA+oRblE+lWQneWUHwUaXMPIcfScsbJ/5j2r1jjBhYauiTv
 memmeZJBg4w+gTlLVRlV66dhmRcwQhIOId2Or3yvnOoohnEZbV4KMR0P/cblfVvl
 0TV4uRJh3uKoHYXpDIw3URz68a6ceW86JUx7kfBU+gVhDKKve3YowozFs8DTsE5s
 xUlhXarEnimFlicslckl0vsFudwk8ovoFe0ahdBk1KZ0wBMtbBWdrQMECkWF0PlG
 8D4CilSq78jE9vTpSN7aEZB+xcLGUhYfusCJe2Wut6ZEATVUB9RSLHkgSPG44an2
 szJwm2oI7uVIfj7VhaSglK1JpCXQJPycGC4Y6D2DO1QDjb7UveCs3s2DbMx78G/r
 Rj5NpQh8vZjTozE3nQhvC0dPglrpo4OipsyOlVl2ZMm5u4C5e4s=
 =QcjY
 -----END PGP SIGNATURE-----

Merge tag 'soc-drivers-6.15-1' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc

Pull SoC driver updates from Arnd Bergmann:
 "These are the updates for SoC specific drivers and related subsystems:

   - Firmware driver updates for SCMI, FF-A and SMCCC firmware
     interfaces, adding support for additional firmware features
     including SoC identification and FF-A SRI callbacks as well as
     various bugfixes

   - Memory controller updates for Nvidia and Mediatek

   - Reset controller support for microchip sam9x7 and imx8qxp/imx8qm

   - New hardware support for multiple Mediatek, Renesas and Samsung
     Exynos chips

   - Minor updates on Zynq, Qualcomm, Amlogic, TI, Samsung, Nvidia and
     Apple chips

  There will be a follow up with a few more driver updates that are
  still causing build regressions at the moment"

* tag 'soc-drivers-6.15-1' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc: (97 commits)
  irqchip: Add support for Amlogic A4 and A5 SoCs
  dt-bindings: interrupt-controller: Add support for Amlogic A4 and A5 SoCs
  reset: imx: fix incorrect module device table
  dt-bindings: power: qcom,kpss-acc-v2: add qcom,msm8916-acc compatible
  bus: qcom-ssc-block-bus: Fix the error handling path of qcom_ssc_block_bus_probe()
  bus: qcom-ssc-block-bus: Remove some duplicated iounmap() calls
  soc: qcom: pd-mapper: Add support for SDM630/636
  reset: imx: Add SCU reset driver for i.MX8QXP and i.MX8QM
  dt-bindings: firmware: imx: add property reset-controller
  dt-bindings: reset: atmel,at91sam9260-reset: add sam9x7
  memory: mtk-smi: Add ostd setting for mt8192
  dt-bindings: soc: samsung: exynos-usi: Drop unnecessary status from example
  firmware: tegra: bpmp: Fix typo in bpmp-abi.h
  soc/tegra: pmc: Use str_enable_disable-like helpers
  soc: samsung: include linux/array_size.h where needed
  firmware: arm_scmi: use ioread64() instead of ioread64_hi_lo()
  soc: mediatek: mtk-socinfo: Add extra entry for MT8395AV/ZA Genio 1200
  soc: mediatek: mt8188-mmsys: Add support for DSC on VDO0
  soc: mediatek: mmsys: Migrate all tables to MMSYS_ROUTE() macro
  soc: mediatek: mt8365-mmsys: Fix routing table masks and values
  ...
This commit is contained in:
Linus Torvalds 2025-03-27 09:05:55 -07:00
commit a9fc230497
79 changed files with 3264 additions and 1094 deletions

View File

@ -45,6 +45,18 @@ properties:
Keys provided by the SCU
$ref: /schemas/input/fsl,scu-key.yaml
reset-controller:
type: object
properties:
compatible:
const: fsl,imx-scu-reset
'#reset-cells':
const: 1
required:
- compatible
- '#reset-cells'
additionalProperties: false
mboxes:
description:
A list of phandles of TX MU channels followed by a list of phandles of

View File

@ -0,0 +1,50 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
# Copyright 2024 Linaro Ltd.
%YAML 1.2
---
$id: http://devicetree.org/schemas/firmware/google,gs101-acpm-ipc.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung Exynos ACPM mailbox protocol
maintainers:
- Tudor Ambarus <tudor.ambarus@linaro.org>
description: |
ACPM (Alive Clock and Power Manager) is a firmware that operates on the
APM (Active Power Management) module that handles overall power management
activities. ACPM and masters regard each other as independent hardware
component and communicate with each other using mailbox messages and
shared memory.
This binding is intended to define the interface the firmware implementing
ACPM provides for OSPM in the device tree.
properties:
compatible:
const: google,gs101-acpm-ipc
mboxes:
maxItems: 1
shmem:
description:
List of phandle pointing to the shared memory (SHM) area. The memory
contains channels configuration data and the TX/RX ring buffers that
are used for passing messages to/from the ACPM firmware.
maxItems: 1
required:
- compatible
- mboxes
- shmem
additionalProperties: false
examples:
- |
power-management {
compatible = "google,gs101-acpm-ipc";
mboxes = <&ap2apm_mailbox>;
shmem = <&apm_sram>;
};

View File

@ -19,9 +19,11 @@ properties:
- enum:
- samsung,exynos5433-chipid
- samsung,exynos7-chipid
- samsung,exynos7870-chipid
- const: samsung,exynos4210-chipid
- items:
- enum:
- samsung,exynos2200-chipid
- samsung,exynos7885-chipid
- samsung,exynos8895-chipid
- samsung,exynos9810-chipid

View File

@ -35,6 +35,9 @@ properties:
- amlogic,meson-sm1-gpio-intc
- amlogic,meson-a1-gpio-intc
- amlogic,meson-s4-gpio-intc
- amlogic,a4-gpio-intc
- amlogic,a4-gpio-ao-intc
- amlogic,a5-gpio-intc
- amlogic,c3-gpio-intc
- amlogic,t7-gpio-intc
- const: amlogic,meson-gpio-intc
@ -49,7 +52,7 @@ properties:
amlogic,channel-interrupts:
description: Array with the upstream hwirq numbers
minItems: 8
minItems: 2
maxItems: 12
$ref: /schemas/types.yaml#/definitions/uint32-array
@ -60,6 +63,20 @@ required:
- "#interrupt-cells"
- amlogic,channel-interrupts
if:
properties:
compatible:
contains:
const: amlogic,a4-gpio-ao-intc
then:
properties:
amlogic,channel-interrupts:
maxItems: 2
else:
properties:
amlogic,channel-interrupts:
minItems: 8
additionalProperties: false
examples:

View File

@ -18,7 +18,9 @@ description:
properties:
compatible:
const: qcom,kpss-acc-v2
enum:
- qcom,kpss-acc-v2
- qcom,msm8916-acc
reg:
items:

View File

@ -26,6 +26,10 @@ properties:
- items:
- const: atmel,sama5d3-rstc
- const: atmel,at91sam9g45-rstc
- items:
- enum:
- microchip,sam9x7-rstc
- const: microchip,sam9x60-rstc
reg:
minItems: 1

View File

@ -54,6 +54,10 @@ properties:
dma-coherent: true
firmware-name:
maxItems: 1
description: Specify the name of the QUP firmware to load.
required:
- compatible
- reg
@ -135,6 +139,7 @@ examples:
#address-cells = <2>;
#size-cells = <2>;
ranges;
firmware-name = "qcom/sa8775p/qupv3fw.elf";
i2c0: i2c@a94000 {
compatible = "qcom,geni-i2c";

View File

@ -38,6 +38,7 @@ properties:
- items:
- enum:
- qcom,sm8650-pmic-glink
- qcom,sm8750-pmic-glink
- qcom,x1e80100-pmic-glink
- const: qcom,sm8550-pmic-glink
- const: qcom,pmic-glink

View File

@ -52,6 +52,8 @@ properties:
- const: syscon
- items:
- enum:
- samsung,exynos2200-pmu
- samsung,exynos7870-pmu
- samsung,exynos7885-pmu
- samsung,exynos8895-pmu
- samsung,exynos9810-pmu

View File

@ -191,7 +191,6 @@ examples:
interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cmu_peri 32>, <&cmu_peri 31>;
clock-names = "uart", "clk_uart_baud0";
status = "disabled";
};
hsi2c_0: i2c@13820000 {

View File

@ -18,6 +18,11 @@ properties:
- google,gs101-hsi2-sysreg
- google,gs101-peric0-sysreg
- google,gs101-peric1-sysreg
- samsung,exynos2200-cmgp-sysreg
- samsung,exynos2200-peric0-sysreg
- samsung,exynos2200-peric1-sysreg
- samsung,exynos2200-peric2-sysreg
- samsung,exynos2200-ufs-sysreg
- samsung,exynos3-sysreg
- samsung,exynos4-sysreg
- samsung,exynos5-sysreg

View File

@ -2481,6 +2481,7 @@ X: arch/arm64/boot/dts/freescale/qoriq-*
X: drivers/media/i2c/
N: imx
N: mxs
N: \bmxc[^\d]
ARM/FREESCALE LAYERSCAPE ARM ARCHITECTURE
M: Shawn Guo <shawnguo@kernel.org>
@ -3083,6 +3084,7 @@ F: drivers/*/*s3c24*
F: drivers/*/*s3c64xx*
F: drivers/*/*s5pv210*
F: drivers/clocksource/samsung_pwm_timer.c
F: drivers/firmware/samsung/
F: drivers/mailbox/exynos-mailbox.c
F: drivers/memory/samsung/
F: drivers/pwm/pwm-samsung.c
@ -21088,6 +21090,15 @@ F: arch/arm64/boot/dts/exynos/exynos850*
F: drivers/clk/samsung/clk-exynos850.c
F: include/dt-bindings/clock/exynos850.h
SAMSUNG EXYNOS ACPM MAILBOX PROTOCOL
M: Tudor Ambarus <tudor.ambarus@linaro.org>
L: linux-kernel@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/firmware/google,gs101-acpm-ipc.yaml
F: drivers/firmware/samsung/exynos-acpm*
F: include/linux/firmware/samsung/exynos-acpm-protocol.h
SAMSUNG EXYNOS MAILBOX DRIVER
M: Tudor Ambarus <tudor.ambarus@linaro.org>
L: linux-kernel@vger.kernel.org

View File

@ -19,7 +19,6 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
#include <linux/fb.h>
#include <linux/gfp.h>
#include <linux/mmc/host.h>
#include <linux/ioport.h>

View File

@ -9,7 +9,6 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/fb.h>
#include <linux/gpio.h>
#include "fb.h"

View File

@ -264,18 +264,6 @@ static int qcom_ssc_block_bus_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
data->pd_names = qcom_ssc_block_pd_names;
data->num_pds = ARRAY_SIZE(qcom_ssc_block_pd_names);
/* power domains */
ret = qcom_ssc_block_bus_pds_attach(&pdev->dev, data->pds, data->pd_names, data->num_pds);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "error when attaching power domains\n");
ret = qcom_ssc_block_bus_pds_enable(data->pds, data->num_pds);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "error when enabling power domains\n");
/* low level overrides for when the HW logic doesn't "just work" */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpm_sscaon_config0");
data->reg_mpm_sscaon_config0 = devm_ioremap_resource(&pdev->dev, res);
@ -343,11 +331,30 @@ static int qcom_ssc_block_bus_probe(struct platform_device *pdev)
data->ssc_axi_halt = halt_args.args[0];
/* power domains */
data->pd_names = qcom_ssc_block_pd_names;
data->num_pds = ARRAY_SIZE(qcom_ssc_block_pd_names);
ret = qcom_ssc_block_bus_pds_attach(&pdev->dev, data->pds, data->pd_names, data->num_pds);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "error when attaching power domains\n");
ret = qcom_ssc_block_bus_pds_enable(data->pds, data->num_pds);
if (ret < 0) {
dev_err_probe(&pdev->dev, ret, "error when enabling power domains\n");
goto err_detach_pds_bus;
}
qcom_ssc_block_bus_init(&pdev->dev);
of_platform_populate(np, NULL, NULL, &pdev->dev);
return 0;
err_detach_pds_bus:
qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds);
return ret;
}
static void qcom_ssc_block_bus_remove(struct platform_device *pdev)
@ -356,9 +363,6 @@ static void qcom_ssc_block_bus_remove(struct platform_device *pdev)
qcom_ssc_block_bus_deinit(&pdev->dev);
iounmap(data->reg_mpm_sscaon_config0);
iounmap(data->reg_mpm_sscaon_config1);
qcom_ssc_block_bus_pds_disable(data->pds, data->num_pds);
qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds);
pm_runtime_disable(&pdev->dev);

View File

@ -225,6 +225,7 @@ config TH1520_AON_PROTOCOL
config TI_SCI_PROTOCOL
tristate "TI System Control Interface (TISCI) Message Protocol"
depends on TI_MESSAGE_MANAGER
default ARCH_K3
help
TI System Control Interface (TISCI) Message Protocol is used to manage
compute systems such as ARM, DSP etc with the system controller in
@ -277,6 +278,7 @@ source "drivers/firmware/meson/Kconfig"
source "drivers/firmware/microchip/Kconfig"
source "drivers/firmware/psci/Kconfig"
source "drivers/firmware/qcom/Kconfig"
source "drivers/firmware/samsung/Kconfig"
source "drivers/firmware/smccc/Kconfig"
source "drivers/firmware/tegra/Kconfig"
source "drivers/firmware/xilinx/Kconfig"

View File

@ -34,6 +34,7 @@ obj-y += efi/
obj-y += imx/
obj-y += psci/
obj-y += qcom/
obj-y += samsung/
obj-y += smccc/
obj-y += tegra/
obj-y += xilinx/

View File

@ -15,7 +15,7 @@
#include "common.h"
#define SCMI_UEVENT_MODALIAS_FMT "arm_ffa:%04x:%pUb"
#define FFA_UEVENT_MODALIAS_FMT "arm_ffa:%04x:%pUb"
static DEFINE_IDA(ffa_bus_id);
@ -68,7 +68,7 @@ static int ffa_device_uevent(const struct device *dev, struct kobj_uevent_env *e
{
const struct ffa_device *ffa_dev = to_ffa_dev(dev);
return add_uevent_var(env, "MODALIAS=" SCMI_UEVENT_MODALIAS_FMT,
return add_uevent_var(env, "MODALIAS=" FFA_UEVENT_MODALIAS_FMT,
ffa_dev->vm_id, &ffa_dev->uuid);
}
@ -77,7 +77,7 @@ static ssize_t modalias_show(struct device *dev,
{
struct ffa_device *ffa_dev = to_ffa_dev(dev);
return sysfs_emit(buf, SCMI_UEVENT_MODALIAS_FMT, ffa_dev->vm_id,
return sysfs_emit(buf, FFA_UEVENT_MODALIAS_FMT, ffa_dev->vm_id,
&ffa_dev->uuid);
}
static DEVICE_ATTR_RO(modalias);
@ -160,11 +160,12 @@ static int __ffa_devices_unregister(struct device *dev, void *data)
return 0;
}
static void ffa_devices_unregister(void)
void ffa_devices_unregister(void)
{
bus_for_each_dev(&ffa_bus_type, NULL, NULL,
__ffa_devices_unregister);
}
EXPORT_SYMBOL_GPL(ffa_devices_unregister);
bool ffa_device_is_valid(struct ffa_device *ffa_dev)
{
@ -192,7 +193,6 @@ ffa_device_register(const struct ffa_partition_info *part_info,
const struct ffa_ops *ops)
{
int id, ret;
uuid_t uuid;
struct device *dev;
struct ffa_device *ffa_dev;
@ -212,14 +212,14 @@ ffa_device_register(const struct ffa_partition_info *part_info,
dev = &ffa_dev->dev;
dev->bus = &ffa_bus_type;
dev->release = ffa_release_device;
dev->dma_mask = &dev->coherent_dma_mask;
dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
ffa_dev->id = id;
ffa_dev->vm_id = part_info->id;
ffa_dev->properties = part_info->properties;
ffa_dev->ops = ops;
import_uuid(&uuid, (u8 *)part_info->uuid);
uuid_copy(&ffa_dev->uuid, &uuid);
uuid_copy(&ffa_dev->uuid, &part_info->uuid);
ret = device_register(&ffa_dev->dev);
if (ret) {

View File

@ -44,7 +44,7 @@
#include "common.h"
#define FFA_DRIVER_VERSION FFA_VERSION_1_1
#define FFA_DRIVER_VERSION FFA_VERSION_1_2
#define FFA_MIN_VERSION FFA_VERSION_1_0
#define SENDER_ID_MASK GENMASK(31, 16)
@ -114,7 +114,6 @@ struct ffa_drv_info {
};
static struct ffa_drv_info *drv_info;
static void ffa_partitions_cleanup(void);
/*
* The driver must be able to support all the versions from the earliest
@ -145,11 +144,19 @@ static int ffa_version_check(u32 *version)
.a0 = FFA_VERSION, .a1 = FFA_DRIVER_VERSION,
}, &ver);
if (ver.a0 == FFA_RET_NOT_SUPPORTED) {
if ((s32)ver.a0 == FFA_RET_NOT_SUPPORTED) {
pr_info("FFA_VERSION returned not supported\n");
return -EOPNOTSUPP;
}
if (FFA_MAJOR_VERSION(ver.a0) > FFA_MAJOR_VERSION(FFA_DRIVER_VERSION)) {
pr_err("Incompatible v%d.%d! Latest supported v%d.%d\n",
FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
FFA_MAJOR_VERSION(FFA_DRIVER_VERSION),
FFA_MINOR_VERSION(FFA_DRIVER_VERSION));
return -EINVAL;
}
if (ver.a0 < FFA_MIN_VERSION) {
pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n",
FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
@ -276,9 +283,21 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
}
if (buffer && count <= num_partitions)
for (idx = 0; idx < count; idx++)
memcpy(buffer + idx, drv_info->rx_buffer + idx * sz,
buf_sz);
for (idx = 0; idx < count; idx++) {
struct ffa_partition_info_le {
__le16 id;
__le16 exec_ctxt;
__le32 properties;
uuid_t uuid;
} *rx_buf = drv_info->rx_buffer + idx * sz;
struct ffa_partition_info *buf = buffer + idx;
buf->id = le16_to_cpu(rx_buf->id);
buf->exec_ctxt = le16_to_cpu(rx_buf->exec_ctxt);
buf->properties = le32_to_cpu(rx_buf->properties);
if (buf_sz > 8)
import_uuid(&buf->uuid, (u8 *)&rx_buf->uuid);
}
ffa_rx_release();
@ -295,14 +314,24 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
#define CURRENT_INDEX(x) ((u16)(FIELD_GET(CURRENT_INDEX_MASK, (x))))
#define UUID_INFO_TAG(x) ((u16)(FIELD_GET(UUID_INFO_TAG_MASK, (x))))
#define PARTITION_INFO_SZ(x) ((u16)(FIELD_GET(PARTITION_INFO_SZ_MASK, (x))))
#define PART_INFO_ID_MASK GENMASK(15, 0)
#define PART_INFO_EXEC_CXT_MASK GENMASK(31, 16)
#define PART_INFO_PROPS_MASK GENMASK(63, 32)
#define PART_INFO_ID(x) ((u16)(FIELD_GET(PART_INFO_ID_MASK, (x))))
#define PART_INFO_EXEC_CXT(x) ((u16)(FIELD_GET(PART_INFO_EXEC_CXT_MASK, (x))))
#define PART_INFO_PROPERTIES(x) ((u32)(FIELD_GET(PART_INFO_PROPS_MASK, (x))))
static int
__ffa_partition_info_get_regs(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
struct ffa_partition_info *buffer, int num_parts)
{
u16 buf_sz, start_idx, cur_idx, count = 0, prev_idx = 0, tag = 0;
struct ffa_partition_info *buf = buffer;
ffa_value_t partition_info;
do {
__le64 *regs;
int idx;
start_idx = prev_idx ? prev_idx + 1 : 0;
invoke_ffa_fn((ffa_value_t){
@ -326,8 +355,25 @@ __ffa_partition_info_get_regs(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
if (buf_sz > sizeof(*buffer))
buf_sz = sizeof(*buffer);
memcpy(buffer + prev_idx * buf_sz, &partition_info.a3,
(cur_idx - start_idx + 1) * buf_sz);
regs = (void *)&partition_info.a3;
for (idx = 0; idx < cur_idx - start_idx + 1; idx++, buf++) {
union {
uuid_t uuid;
u64 regs[2];
} uuid_regs = {
.regs = {
le64_to_cpu(*(regs + 1)),
le64_to_cpu(*(regs + 2)),
}
};
u64 val = *(u64 *)regs;
buf->id = PART_INFO_ID(val);
buf->exec_ctxt = PART_INFO_EXEC_CXT(val);
buf->properties = PART_INFO_PROPERTIES(val);
uuid_copy(&buf->uuid, &uuid_regs.uuid);
regs += 3;
}
prev_idx = cur_idx;
} while (cur_idx < (count - 1));
@ -445,9 +491,9 @@ static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit,
return -EINVAL;
}
static int ffa_msg_send2(u16 src_id, u16 dst_id, void *buf, size_t sz)
static int ffa_msg_send2(struct ffa_device *dev, u16 src_id, void *buf, size_t sz)
{
u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id);
u32 src_dst_ids = PACK_TARGET_INFO(src_id, dev->vm_id);
struct ffa_indirect_msg_hdr *msg;
ffa_value_t ret;
int retval = 0;
@ -463,6 +509,7 @@ static int ffa_msg_send2(u16 src_id, u16 dst_id, void *buf, size_t sz)
msg->offset = sizeof(*msg);
msg->send_recv_id = src_dst_ids;
msg->size = sz;
uuid_copy(&msg->uuid, &dev->uuid);
memcpy((u8 *)msg + msg->offset, buf, sz);
/* flags = 0, sender VMID = 0 works for both physical/virtual NS */
@ -760,6 +807,13 @@ static int ffa_notification_bitmap_destroy(void)
return 0;
}
enum notify_type {
SECURE_PARTITION,
NON_SECURE_VM,
SPM_FRAMEWORK,
NS_HYP_FRAMEWORK,
};
#define NOTIFICATION_LOW_MASK GENMASK(31, 0)
#define NOTIFICATION_HIGH_MASK GENMASK(63, 32)
#define NOTIFICATION_BITMAP_HIGH(x) \
@ -783,10 +837,22 @@ static int ffa_notification_bitmap_destroy(void)
#define MAX_IDS_32 10
#define PER_VCPU_NOTIFICATION_FLAG BIT(0)
#define SECURE_PARTITION_BITMAP BIT(0)
#define NON_SECURE_VM_BITMAP BIT(1)
#define SPM_FRAMEWORK_BITMAP BIT(2)
#define NS_HYP_FRAMEWORK_BITMAP BIT(3)
#define SECURE_PARTITION_BITMAP_ENABLE BIT(SECURE_PARTITION)
#define NON_SECURE_VM_BITMAP_ENABLE BIT(NON_SECURE_VM)
#define SPM_FRAMEWORK_BITMAP_ENABLE BIT(SPM_FRAMEWORK)
#define NS_HYP_FRAMEWORK_BITMAP_ENABLE BIT(NS_HYP_FRAMEWORK)
#define FFA_BITMAP_SECURE_ENABLE_MASK \
(SECURE_PARTITION_BITMAP_ENABLE | SPM_FRAMEWORK_BITMAP_ENABLE)
#define FFA_BITMAP_NS_ENABLE_MASK \
(NON_SECURE_VM_BITMAP_ENABLE | NS_HYP_FRAMEWORK_BITMAP_ENABLE)
#define FFA_BITMAP_ALL_ENABLE_MASK \
(FFA_BITMAP_SECURE_ENABLE_MASK | FFA_BITMAP_NS_ENABLE_MASK)
#define FFA_SECURE_PARTITION_ID_FLAG BIT(15)
#define SPM_FRAMEWORK_BITMAP(x) NOTIFICATION_BITMAP_LOW(x)
#define NS_HYP_FRAMEWORK_BITMAP(x) NOTIFICATION_BITMAP_HIGH(x)
#define FRAMEWORK_NOTIFY_RX_BUFFER_FULL BIT(0)
static int ffa_notification_bind_common(u16 dst_id, u64 bitmap,
u32 flags, bool is_bind)
@ -852,9 +918,15 @@ static int ffa_notification_get(u32 flags, struct ffa_notify_bitmaps *notify)
else if (ret.a0 != FFA_SUCCESS)
return -EINVAL; /* Something else went wrong. */
notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3);
notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5);
notify->arch_map = PACK_NOTIFICATION_BITMAP(ret.a6, ret.a7);
if (flags & SECURE_PARTITION_BITMAP_ENABLE)
notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3);
if (flags & NON_SECURE_VM_BITMAP_ENABLE)
notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5);
if (flags & SPM_FRAMEWORK_BITMAP_ENABLE)
notify->arch_map = SPM_FRAMEWORK_BITMAP(ret.a6);
if (flags & NS_HYP_FRAMEWORK_BITMAP_ENABLE)
notify->arch_map = PACK_NOTIFICATION_BITMAP(notify->arch_map,
ret.a7);
return 0;
}
@ -863,27 +935,32 @@ struct ffa_dev_part_info {
ffa_sched_recv_cb callback;
void *cb_data;
rwlock_t rw_lock;
struct ffa_device *dev;
struct list_head node;
};
static void __do_sched_recv_cb(u16 part_id, u16 vcpu, bool is_per_vcpu)
{
struct ffa_dev_part_info *partition;
struct ffa_dev_part_info *partition = NULL, *tmp;
ffa_sched_recv_cb callback;
struct list_head *phead;
void *cb_data;
partition = xa_load(&drv_info->partition_info, part_id);
if (!partition) {
phead = xa_load(&drv_info->partition_info, part_id);
if (!phead) {
pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
return;
}
read_lock(&partition->rw_lock);
callback = partition->callback;
cb_data = partition->cb_data;
read_unlock(&partition->rw_lock);
list_for_each_entry_safe(partition, tmp, phead, node) {
read_lock(&partition->rw_lock);
callback = partition->callback;
cb_data = partition->cb_data;
read_unlock(&partition->rw_lock);
if (callback)
callback(vcpu, is_per_vcpu, cb_data);
if (callback)
callback(vcpu, is_per_vcpu, cb_data);
}
}
static void ffa_notification_info_get(void)
@ -899,7 +976,7 @@ static void ffa_notification_info_get(void)
}, &ret);
if (ret.a0 != FFA_FN_NATIVE(SUCCESS) && ret.a0 != FFA_SUCCESS) {
if (ret.a2 != FFA_RET_NO_DATA)
if ((s32)ret.a2 != FFA_RET_NO_DATA)
pr_err("Notification Info fetch failed: 0x%lx (0x%lx)",
ret.a0, ret.a2);
return;
@ -935,7 +1012,7 @@ static void ffa_notification_info_get(void)
}
/* Per vCPU Notification */
for (idx = 0; idx < ids_count[list]; idx++) {
for (idx = 1; idx < ids_count[list]; idx++) {
if (ids_processed >= max_ids - 1)
break;
@ -1015,17 +1092,17 @@ static int ffa_sync_send_receive(struct ffa_device *dev,
static int ffa_indirect_msg_send(struct ffa_device *dev, void *buf, size_t sz)
{
return ffa_msg_send2(drv_info->vm_id, dev->vm_id, buf, sz);
return ffa_msg_send2(dev, drv_info->vm_id, buf, sz);
}
static int ffa_sync_send_receive2(struct ffa_device *dev, const uuid_t *uuid,
static int ffa_sync_send_receive2(struct ffa_device *dev,
struct ffa_send_direct_data2 *data)
{
if (!drv_info->msg_direct_req2_supp)
return -EOPNOTSUPP;
return ffa_msg_send_direct_req2(drv_info->vm_id, dev->vm_id,
uuid, data);
&dev->uuid, data);
}
static int ffa_memory_share(struct ffa_mem_ops_args *args)
@ -1051,35 +1128,39 @@ static int ffa_memory_lend(struct ffa_mem_ops_args *args)
return ffa_memory_ops(FFA_MEM_LEND, args);
}
#define FFA_SECURE_PARTITION_ID_FLAG BIT(15)
#define ffa_notifications_disabled() (!drv_info->notif_enabled)
enum notify_type {
NON_SECURE_VM,
SECURE_PARTITION,
FRAMEWORK,
};
struct notifier_cb_info {
struct hlist_node hnode;
struct ffa_device *dev;
ffa_fwk_notifier_cb fwk_cb;
ffa_notifier_cb cb;
void *cb_data;
enum notify_type type;
};
static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback,
void *cb_data, bool is_registration)
static int
ffa_sched_recv_cb_update(struct ffa_device *dev, ffa_sched_recv_cb callback,
void *cb_data, bool is_registration)
{
struct ffa_dev_part_info *partition;
struct ffa_dev_part_info *partition = NULL, *tmp;
struct list_head *phead;
bool cb_valid;
if (ffa_notifications_disabled())
return -EOPNOTSUPP;
partition = xa_load(&drv_info->partition_info, part_id);
phead = xa_load(&drv_info->partition_info, dev->vm_id);
if (!phead) {
pr_err("%s: Invalid partition ID 0x%x\n", __func__, dev->vm_id);
return -EINVAL;
}
list_for_each_entry_safe(partition, tmp, phead, node)
if (partition->dev == dev)
break;
if (!partition) {
pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
pr_err("%s: No such partition ID 0x%x\n", __func__, dev->vm_id);
return -EINVAL;
}
@ -1101,12 +1182,12 @@ static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback,
static int ffa_sched_recv_cb_register(struct ffa_device *dev,
ffa_sched_recv_cb cb, void *cb_data)
{
return ffa_sched_recv_cb_update(dev->vm_id, cb, cb_data, true);
return ffa_sched_recv_cb_update(dev, cb, cb_data, true);
}
static int ffa_sched_recv_cb_unregister(struct ffa_device *dev)
{
return ffa_sched_recv_cb_update(dev->vm_id, NULL, NULL, false);
return ffa_sched_recv_cb_update(dev, NULL, NULL, false);
}
static int ffa_notification_bind(u16 dst_id, u64 bitmap, u32 flags)
@ -1119,27 +1200,69 @@ static int ffa_notification_unbind(u16 dst_id, u64 bitmap)
return ffa_notification_bind_common(dst_id, bitmap, 0, false);
}
/* Should be called while the notify_lock is taken */
static enum notify_type ffa_notify_type_get(u16 vm_id)
{
if (vm_id & FFA_SECURE_PARTITION_ID_FLAG)
return SECURE_PARTITION;
else
return NON_SECURE_VM;
}
/* notifier_hnode_get* should be called with notify_lock held */
static struct notifier_cb_info *
notifier_hash_node_get(u16 notify_id, enum notify_type type)
notifier_hnode_get_by_vmid(u16 notify_id, int vmid)
{
struct notifier_cb_info *node;
hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
if (type == node->type)
if (node->fwk_cb && vmid == node->dev->vm_id)
return node;
return NULL;
}
static struct notifier_cb_info *
notifier_hnode_get_by_vmid_uuid(u16 notify_id, int vmid, const uuid_t *uuid)
{
struct notifier_cb_info *node;
if (uuid_is_null(uuid))
return notifier_hnode_get_by_vmid(notify_id, vmid);
hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
if (node->fwk_cb && vmid == node->dev->vm_id &&
uuid_equal(&node->dev->uuid, uuid))
return node;
return NULL;
}
static struct notifier_cb_info *
notifier_hnode_get_by_type(u16 notify_id, enum notify_type type)
{
struct notifier_cb_info *node;
hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
if (node->cb && type == ffa_notify_type_get(node->dev->vm_id))
return node;
return NULL;
}
static int
update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb,
void *cb_data, bool is_registration)
update_notifier_cb(struct ffa_device *dev, int notify_id, void *cb,
void *cb_data, bool is_registration, bool is_framework)
{
struct notifier_cb_info *cb_info = NULL;
enum notify_type type = ffa_notify_type_get(dev->vm_id);
bool cb_found;
cb_info = notifier_hash_node_get(notify_id, type);
if (is_framework)
cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, dev->vm_id,
&dev->uuid);
else
cb_info = notifier_hnode_get_by_type(notify_id, type);
cb_found = !!cb_info;
if (!(is_registration ^ cb_found))
@ -1150,9 +1273,12 @@ update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb,
if (!cb_info)
return -ENOMEM;
cb_info->type = type;
cb_info->cb = cb;
cb_info->dev = dev;
cb_info->cb_data = cb_data;
if (is_framework)
cb_info->fwk_cb = cb;
else
cb_info->cb = cb;
hash_add(drv_info->notifier_hash, &cb_info->hnode, notify_id);
} else {
@ -1162,18 +1288,10 @@ update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb,
return 0;
}
static enum notify_type ffa_notify_type_get(u16 vm_id)
{
if (vm_id & FFA_SECURE_PARTITION_ID_FLAG)
return SECURE_PARTITION;
else
return NON_SECURE_VM;
}
static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
static int __ffa_notify_relinquish(struct ffa_device *dev, int notify_id,
bool is_framework)
{
int rc;
enum notify_type type = ffa_notify_type_get(dev->vm_id);
if (ffa_notifications_disabled())
return -EOPNOTSUPP;
@ -1183,53 +1301,83 @@ static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
mutex_lock(&drv_info->notify_lock);
rc = update_notifier_cb(notify_id, type, NULL, NULL, false);
rc = update_notifier_cb(dev, notify_id, NULL, NULL, false,
is_framework);
if (rc) {
pr_err("Could not unregister notification callback\n");
mutex_unlock(&drv_info->notify_lock);
return rc;
}
rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id));
if (!is_framework)
rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id));
mutex_unlock(&drv_info->notify_lock);
return rc;
}
static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
{
return __ffa_notify_relinquish(dev, notify_id, false);
}
static int ffa_fwk_notify_relinquish(struct ffa_device *dev, int notify_id)
{
return __ffa_notify_relinquish(dev, notify_id, true);
}
static int __ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
void *cb, void *cb_data,
int notify_id, bool is_framework)
{
int rc;
u32 flags = 0;
if (ffa_notifications_disabled())
return -EOPNOTSUPP;
if (notify_id >= FFA_MAX_NOTIFICATIONS)
return -EINVAL;
mutex_lock(&drv_info->notify_lock);
if (!is_framework) {
if (is_per_vcpu)
flags = PER_VCPU_NOTIFICATION_FLAG;
rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags);
if (rc) {
mutex_unlock(&drv_info->notify_lock);
return rc;
}
}
rc = update_notifier_cb(dev, notify_id, cb, cb_data, true,
is_framework);
if (rc) {
pr_err("Failed to register callback for %d - %d\n",
notify_id, rc);
if (!is_framework)
ffa_notification_unbind(dev->vm_id, BIT(notify_id));
}
mutex_unlock(&drv_info->notify_lock);
return rc;
}
static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
ffa_notifier_cb cb, void *cb_data, int notify_id)
{
int rc;
u32 flags = 0;
enum notify_type type = ffa_notify_type_get(dev->vm_id);
return __ffa_notify_request(dev, is_per_vcpu, cb, cb_data, notify_id,
false);
}
if (ffa_notifications_disabled())
return -EOPNOTSUPP;
if (notify_id >= FFA_MAX_NOTIFICATIONS)
return -EINVAL;
mutex_lock(&drv_info->notify_lock);
if (is_per_vcpu)
flags = PER_VCPU_NOTIFICATION_FLAG;
rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags);
if (rc) {
mutex_unlock(&drv_info->notify_lock);
return rc;
}
rc = update_notifier_cb(notify_id, type, cb, cb_data, true);
if (rc) {
pr_err("Failed to register callback for %d - %d\n",
notify_id, rc);
ffa_notification_unbind(dev->vm_id, BIT(notify_id));
}
mutex_unlock(&drv_info->notify_lock);
return rc;
static int
ffa_fwk_notify_request(struct ffa_device *dev, ffa_fwk_notifier_cb cb,
void *cb_data, int notify_id)
{
return __ffa_notify_request(dev, false, cb, cb_data, notify_id, true);
}
static int ffa_notify_send(struct ffa_device *dev, int notify_id,
@ -1258,7 +1406,7 @@ static void handle_notif_callbacks(u64 bitmap, enum notify_type type)
continue;
mutex_lock(&drv_info->notify_lock);
cb_info = notifier_hash_node_get(notify_id, type);
cb_info = notifier_hnode_get_by_type(notify_id, type);
mutex_unlock(&drv_info->notify_lock);
if (cb_info && cb_info->cb)
@ -1266,21 +1414,68 @@ static void handle_notif_callbacks(u64 bitmap, enum notify_type type)
}
}
static void notif_get_and_handle(void *unused)
static void handle_fwk_notif_callbacks(u32 bitmap)
{
void *buf;
uuid_t uuid;
int notify_id = 0, target;
struct ffa_indirect_msg_hdr *msg;
struct notifier_cb_info *cb_info = NULL;
/* Only one framework notification defined and supported for now */
if (!(bitmap & FRAMEWORK_NOTIFY_RX_BUFFER_FULL))
return;
mutex_lock(&drv_info->rx_lock);
msg = drv_info->rx_buffer;
buf = kmemdup((void *)msg + msg->offset, msg->size, GFP_KERNEL);
if (!buf) {
mutex_unlock(&drv_info->rx_lock);
return;
}
target = SENDER_ID(msg->send_recv_id);
if (msg->offset >= sizeof(*msg))
uuid_copy(&uuid, &msg->uuid);
else
uuid_copy(&uuid, &uuid_null);
mutex_unlock(&drv_info->rx_lock);
ffa_rx_release();
mutex_lock(&drv_info->notify_lock);
cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, target, &uuid);
mutex_unlock(&drv_info->notify_lock);
if (cb_info && cb_info->fwk_cb)
cb_info->fwk_cb(notify_id, cb_info->cb_data, buf);
kfree(buf);
}
static void notif_get_and_handle(void *cb_data)
{
int rc;
struct ffa_notify_bitmaps bitmaps;
u32 flags;
struct ffa_drv_info *info = cb_data;
struct ffa_notify_bitmaps bitmaps = { 0 };
rc = ffa_notification_get(SECURE_PARTITION_BITMAP |
SPM_FRAMEWORK_BITMAP, &bitmaps);
if (info->vm_id == 0) /* Non secure physical instance */
flags = FFA_BITMAP_SECURE_ENABLE_MASK;
else
flags = FFA_BITMAP_ALL_ENABLE_MASK;
rc = ffa_notification_get(flags, &bitmaps);
if (rc) {
pr_err("Failed to retrieve notifications with %d!\n", rc);
return;
}
handle_fwk_notif_callbacks(SPM_FRAMEWORK_BITMAP(bitmaps.arch_map));
handle_fwk_notif_callbacks(NS_HYP_FRAMEWORK_BITMAP(bitmaps.arch_map));
handle_notif_callbacks(bitmaps.vm_map, NON_SECURE_VM);
handle_notif_callbacks(bitmaps.sp_map, SECURE_PARTITION);
handle_notif_callbacks(bitmaps.arch_map, FRAMEWORK);
}
static void
@ -1329,6 +1524,8 @@ static const struct ffa_notifier_ops ffa_drv_notifier_ops = {
.sched_recv_cb_unregister = ffa_sched_recv_cb_unregister,
.notify_request = ffa_notify_request,
.notify_relinquish = ffa_notify_relinquish,
.fwk_notify_request = ffa_fwk_notify_request,
.fwk_notify_relinquish = ffa_fwk_notify_relinquish,
.notify_send = ffa_notify_send,
};
@ -1384,11 +1581,110 @@ static struct notifier_block ffa_bus_nb = {
.notifier_call = ffa_bus_notifier,
};
static int ffa_xa_add_partition_info(struct ffa_device *dev)
{
struct ffa_dev_part_info *info;
struct list_head *head, *phead;
int ret = -ENOMEM;
phead = xa_load(&drv_info->partition_info, dev->vm_id);
if (phead) {
head = phead;
list_for_each_entry(info, head, node) {
if (info->dev == dev) {
pr_err("%s: duplicate dev %p part ID 0x%x\n",
__func__, dev, dev->vm_id);
return -EEXIST;
}
}
}
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return ret;
rwlock_init(&info->rw_lock);
info->dev = dev;
if (!phead) {
phead = kzalloc(sizeof(*phead), GFP_KERNEL);
if (!phead)
goto free_out;
INIT_LIST_HEAD(phead);
ret = xa_insert(&drv_info->partition_info, dev->vm_id, phead,
GFP_KERNEL);
if (ret) {
pr_err("%s: failed to save part ID 0x%x Ret:%d\n",
__func__, dev->vm_id, ret);
goto free_out;
}
}
list_add(&info->node, phead);
return 0;
free_out:
kfree(phead);
kfree(info);
return ret;
}
static int ffa_setup_host_partition(int vm_id)
{
struct ffa_partition_info buf = { 0 };
struct ffa_device *ffa_dev;
int ret;
buf.id = vm_id;
ffa_dev = ffa_device_register(&buf, &ffa_drv_ops);
if (!ffa_dev) {
pr_err("%s: failed to register host partition ID 0x%x\n",
__func__, vm_id);
return -EINVAL;
}
ret = ffa_xa_add_partition_info(ffa_dev);
if (ret)
return ret;
if (ffa_notifications_disabled())
return 0;
ret = ffa_sched_recv_cb_update(ffa_dev, ffa_self_notif_handle,
drv_info, true);
if (ret)
pr_info("Failed to register driver sched callback %d\n", ret);
return ret;
}
static void ffa_partitions_cleanup(void)
{
struct list_head *phead;
unsigned long idx;
/* Clean up/free all registered devices */
ffa_devices_unregister();
xa_for_each(&drv_info->partition_info, idx, phead) {
struct ffa_dev_part_info *info, *tmp;
xa_erase(&drv_info->partition_info, idx);
list_for_each_entry_safe(info, tmp, phead, node) {
list_del(&info->node);
kfree(info);
}
kfree(phead);
}
xa_destroy(&drv_info->partition_info);
}
static int ffa_setup_partitions(void)
{
int count, idx, ret;
struct ffa_device *ffa_dev;
struct ffa_dev_part_info *info;
struct ffa_partition_info *pbuf, *tpbuf;
if (drv_info->version == FFA_VERSION_1_0) {
@ -1422,59 +1718,30 @@ static int ffa_setup_partitions(void)
!(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
ffa_mode_32bit_set(ffa_dev);
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
if (ffa_xa_add_partition_info(ffa_dev)) {
ffa_device_unregister(ffa_dev);
continue;
}
rwlock_init(&info->rw_lock);
ret = xa_insert(&drv_info->partition_info, tpbuf->id,
info, GFP_KERNEL);
if (ret) {
pr_err("%s: failed to save partition ID 0x%x - ret:%d\n",
__func__, tpbuf->id, ret);
ffa_device_unregister(ffa_dev);
kfree(info);
}
}
kfree(pbuf);
/* Allocate for the host */
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
/* Already registered devices are freed on bus_exit */
ffa_partitions_cleanup();
return -ENOMEM;
}
/*
* Check if the host is already added as part of partition info
* No multiple UUID possible for the host, so just checking if
* there is an entry will suffice
*/
if (xa_load(&drv_info->partition_info, drv_info->vm_id))
return 0;
rwlock_init(&info->rw_lock);
ret = xa_insert(&drv_info->partition_info, drv_info->vm_id,
info, GFP_KERNEL);
if (ret) {
pr_err("%s: failed to save Host partition ID 0x%x - ret:%d. Abort.\n",
__func__, drv_info->vm_id, ret);
kfree(info);
/* Already registered devices are freed on bus_exit */
/* Allocate for the host */
ret = ffa_setup_host_partition(drv_info->vm_id);
if (ret)
ffa_partitions_cleanup();
}
return ret;
}
static void ffa_partitions_cleanup(void)
{
struct ffa_dev_part_info *info;
unsigned long idx;
xa_for_each(&drv_info->partition_info, idx, info) {
xa_erase(&drv_info->partition_info, idx);
kfree(info);
}
xa_destroy(&drv_info->partition_info);
}
/* FFA FEATURE IDs */
#define FFA_FEAT_NOTIFICATION_PENDING_INT (1)
#define FFA_FEAT_SCHEDULE_RECEIVER_INT (2)
@ -1777,19 +2044,10 @@ static int __init ffa_init(void)
ffa_notifications_setup();
ret = ffa_setup_partitions();
if (ret) {
pr_err("failed to setup partitions\n");
goto cleanup_notifs;
}
if (!ret)
return ret;
ret = ffa_sched_recv_cb_update(drv_info->vm_id, ffa_self_notif_handle,
drv_info, true);
if (ret)
pr_info("Failed to register driver sched callback %d\n", ret);
return 0;
cleanup_notifs:
pr_err("failed to setup partitions\n");
ffa_notifications_cleanup();
free_pages:
if (drv_info->tx_buffer)

View File

@ -17,6 +17,8 @@
#include "common.h"
#define SCMI_UEVENT_MODALIAS_FMT "%s:%02x:%s"
BLOCKING_NOTIFIER_HEAD(scmi_requested_devices_nh);
EXPORT_SYMBOL_GPL(scmi_requested_devices_nh);
@ -42,7 +44,7 @@ static atomic_t scmi_syspower_registered = ATOMIC_INIT(0);
* This helper let an SCMI driver request specific devices identified by the
* @id_table to be created for each active SCMI instance.
*
* The requested device name MUST NOT be already existent for any protocol;
* The requested device name MUST NOT be already existent for this protocol;
* at first the freshly requested @id_table is annotated in the IDR table
* @scmi_requested_devices and then the requested device is advertised to any
* registered party via the @scmi_requested_devices_nh notification chain.
@ -52,7 +54,6 @@ static atomic_t scmi_syspower_registered = ATOMIC_INIT(0);
static int scmi_protocol_device_request(const struct scmi_device_id *id_table)
{
int ret = 0;
unsigned int id = 0;
struct list_head *head, *phead = NULL;
struct scmi_requested_dev *rdev;
@ -67,19 +68,13 @@ static int scmi_protocol_device_request(const struct scmi_device_id *id_table)
}
/*
* Search for the matching protocol rdev list and then search
* of any existent equally named device...fails if any duplicate found.
* Find the matching protocol rdev list and then search of any
* existent equally named device...fails if any duplicate found.
*/
mutex_lock(&scmi_requested_devices_mtx);
idr_for_each_entry(&scmi_requested_devices, head, id) {
if (!phead) {
/* A list found registered in the IDR is never empty */
rdev = list_first_entry(head, struct scmi_requested_dev,
node);
if (rdev->id_table->protocol_id ==
id_table->protocol_id)
phead = head;
}
phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
if (phead) {
head = phead;
list_for_each_entry(rdev, head, node) {
if (!strcmp(rdev->id_table->name, id_table->name)) {
pr_err("Ignoring duplicate request [%d] %s\n",
@ -283,11 +278,59 @@ static void scmi_dev_remove(struct device *dev)
scmi_drv->remove(scmi_dev);
}
static int scmi_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct scmi_device *scmi_dev = to_scmi_dev(dev);
return add_uevent_var(env, "MODALIAS=" SCMI_UEVENT_MODALIAS_FMT,
dev_name(&scmi_dev->dev), scmi_dev->protocol_id,
scmi_dev->name);
}
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct scmi_device *scmi_dev = to_scmi_dev(dev);
return sysfs_emit(buf, SCMI_UEVENT_MODALIAS_FMT,
dev_name(&scmi_dev->dev), scmi_dev->protocol_id,
scmi_dev->name);
}
static DEVICE_ATTR_RO(modalias);
static ssize_t protocol_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct scmi_device *scmi_dev = to_scmi_dev(dev);
return sprintf(buf, "0x%02x\n", scmi_dev->protocol_id);
}
static DEVICE_ATTR_RO(protocol_id);
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scmi_device *scmi_dev = to_scmi_dev(dev);
return sprintf(buf, "%s\n", scmi_dev->name);
}
static DEVICE_ATTR_RO(name);
static struct attribute *scmi_device_attributes_attrs[] = {
&dev_attr_protocol_id.attr,
&dev_attr_name.attr,
&dev_attr_modalias.attr,
NULL,
};
ATTRIBUTE_GROUPS(scmi_device_attributes);
const struct bus_type scmi_bus_type = {
.name = "scmi_protocol",
.match = scmi_dev_match,
.probe = scmi_dev_probe,
.remove = scmi_dev_remove,
.uevent = scmi_device_uevent,
.dev_groups = scmi_device_attributes_groups,
};
EXPORT_SYMBOL_GPL(scmi_bus_type);

View File

@ -1997,17 +1997,7 @@ static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
else if (db->width == 4)
SCMI_PROTO_FC_RING_DB(32);
else /* db->width == 8 */
#ifdef CONFIG_64BIT
SCMI_PROTO_FC_RING_DB(64);
#else
{
u64 val = 0;
if (db->mask)
val = ioread64_hi_lo(db->addr) & db->mask;
iowrite64_hi_lo(db->set | val, db->addr);
}
#endif
}
/**

View File

@ -0,0 +1,14 @@
# SPDX-License-Identifier: GPL-2.0-only
config EXYNOS_ACPM_PROTOCOL
tristate "Exynos Alive Clock and Power Manager (ACPM) Message Protocol"
depends on ARCH_EXYNOS || COMPILE_TEST
depends on MAILBOX
help
Alive Clock and Power Manager (ACPM) Message Protocol is defined for
the purpose of communication between the ACPM firmware and masters
(AP, AOC, ...). ACPM firmware operates on the Active Power Management
(APM) module that handles overall power activities.
This protocol driver provides interface for all the client drivers
making use of the features offered by the APM.

View File

@ -0,0 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
acpm-protocol-objs := exynos-acpm.o exynos-acpm-pmic.o
obj-$(CONFIG_EXYNOS_ACPM_PROTOCOL) += acpm-protocol.o

View File

@ -0,0 +1,224 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2020 Samsung Electronics Co., Ltd.
* Copyright 2020 Google LLC.
* Copyright 2024 Linaro Ltd.
*/
#include <linux/bitfield.h>
#include <linux/firmware/samsung/exynos-acpm-protocol.h>
#include <linux/ktime.h>
#include <linux/types.h>
#include "exynos-acpm.h"
#include "exynos-acpm-pmic.h"
#define ACPM_PMIC_CHANNEL GENMASK(15, 12)
#define ACPM_PMIC_TYPE GENMASK(11, 8)
#define ACPM_PMIC_REG GENMASK(7, 0)
#define ACPM_PMIC_RETURN GENMASK(31, 24)
#define ACPM_PMIC_MASK GENMASK(23, 16)
#define ACPM_PMIC_VALUE GENMASK(15, 8)
#define ACPM_PMIC_FUNC GENMASK(7, 0)
#define ACPM_PMIC_BULK_SHIFT 8
#define ACPM_PMIC_BULK_MASK GENMASK(7, 0)
#define ACPM_PMIC_BULK_MAX_COUNT 8
enum exynos_acpm_pmic_func {
ACPM_PMIC_READ,
ACPM_PMIC_WRITE,
ACPM_PMIC_UPDATE,
ACPM_PMIC_BULK_READ,
ACPM_PMIC_BULK_WRITE,
};
static inline u32 acpm_pmic_set_bulk(u32 data, unsigned int i)
{
return (data & ACPM_PMIC_BULK_MASK) << (ACPM_PMIC_BULK_SHIFT * i);
}
static inline u32 acpm_pmic_get_bulk(u32 data, unsigned int i)
{
return (data >> (ACPM_PMIC_BULK_SHIFT * i)) & ACPM_PMIC_BULK_MASK;
}
static void acpm_pmic_set_xfer(struct acpm_xfer *xfer, u32 *cmd,
unsigned int acpm_chan_id)
{
xfer->txd = cmd;
xfer->rxd = cmd;
xfer->txlen = sizeof(cmd);
xfer->rxlen = sizeof(cmd);
xfer->acpm_chan_id = acpm_chan_id;
}
static void acpm_pmic_init_read_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan)
{
cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
FIELD_PREP(ACPM_PMIC_REG, reg) |
FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_READ);
cmd[3] = ktime_to_ms(ktime_get());
}
int acpm_pmic_read_reg(const struct acpm_handle *handle,
unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
u8 *buf)
{
struct acpm_xfer xfer;
u32 cmd[4] = {0};
int ret;
acpm_pmic_init_read_cmd(cmd, type, reg, chan);
acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
ret = acpm_do_xfer(handle, &xfer);
if (ret)
return ret;
*buf = FIELD_GET(ACPM_PMIC_VALUE, xfer.rxd[1]);
return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
}
static void acpm_pmic_init_bulk_read_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
u8 count)
{
cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
FIELD_PREP(ACPM_PMIC_REG, reg) |
FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_BULK_READ) |
FIELD_PREP(ACPM_PMIC_VALUE, count);
}
int acpm_pmic_bulk_read(const struct acpm_handle *handle,
unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
u8 count, u8 *buf)
{
struct acpm_xfer xfer;
u32 cmd[4] = {0};
int i, ret;
if (count > ACPM_PMIC_BULK_MAX_COUNT)
return -EINVAL;
acpm_pmic_init_bulk_read_cmd(cmd, type, reg, chan, count);
acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
ret = acpm_do_xfer(handle, &xfer);
if (ret)
return ret;
ret = FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
if (ret)
return ret;
for (i = 0; i < count; i++) {
if (i < 4)
buf[i] = acpm_pmic_get_bulk(xfer.rxd[2], i);
else
buf[i] = acpm_pmic_get_bulk(xfer.rxd[3], i - 4);
}
return 0;
}
static void acpm_pmic_init_write_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
u8 value)
{
cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
FIELD_PREP(ACPM_PMIC_REG, reg) |
FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_WRITE) |
FIELD_PREP(ACPM_PMIC_VALUE, value);
cmd[3] = ktime_to_ms(ktime_get());
}
int acpm_pmic_write_reg(const struct acpm_handle *handle,
unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
u8 value)
{
struct acpm_xfer xfer;
u32 cmd[4] = {0};
int ret;
acpm_pmic_init_write_cmd(cmd, type, reg, chan, value);
acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
ret = acpm_do_xfer(handle, &xfer);
if (ret)
return ret;
return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
}
static void acpm_pmic_init_bulk_write_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
u8 count, const u8 *buf)
{
int i;
cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
FIELD_PREP(ACPM_PMIC_REG, reg) |
FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_BULK_WRITE) |
FIELD_PREP(ACPM_PMIC_VALUE, count);
for (i = 0; i < count; i++) {
if (i < 4)
cmd[2] |= acpm_pmic_set_bulk(buf[i], i);
else
cmd[3] |= acpm_pmic_set_bulk(buf[i], i - 4);
}
}
int acpm_pmic_bulk_write(const struct acpm_handle *handle,
unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
u8 count, const u8 *buf)
{
struct acpm_xfer xfer;
u32 cmd[4] = {0};
int ret;
if (count > ACPM_PMIC_BULK_MAX_COUNT)
return -EINVAL;
acpm_pmic_init_bulk_write_cmd(cmd, type, reg, chan, count, buf);
acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
ret = acpm_do_xfer(handle, &xfer);
if (ret)
return ret;
return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
}
static void acpm_pmic_init_update_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
u8 value, u8 mask)
{
cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
FIELD_PREP(ACPM_PMIC_REG, reg) |
FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_UPDATE) |
FIELD_PREP(ACPM_PMIC_VALUE, value) |
FIELD_PREP(ACPM_PMIC_MASK, mask);
cmd[3] = ktime_to_ms(ktime_get());
}
int acpm_pmic_update_reg(const struct acpm_handle *handle,
unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
u8 value, u8 mask)
{
struct acpm_xfer xfer;
u32 cmd[4] = {0};
int ret;
acpm_pmic_init_update_cmd(cmd, type, reg, chan, value, mask);
acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
ret = acpm_do_xfer(handle, &xfer);
if (ret)
return ret;
return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
}

View File

@ -0,0 +1,29 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2020 Samsung Electronics Co., Ltd.
* Copyright 2020 Google LLC.
* Copyright 2024 Linaro Ltd.
*/
#ifndef __EXYNOS_ACPM_PMIC_H__
#define __EXYNOS_ACPM_PMIC_H__
#include <linux/types.h>
struct acpm_handle;
int acpm_pmic_read_reg(const struct acpm_handle *handle,
unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
u8 *buf);
int acpm_pmic_bulk_read(const struct acpm_handle *handle,
unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
u8 count, u8 *buf);
int acpm_pmic_write_reg(const struct acpm_handle *handle,
unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
u8 value);
int acpm_pmic_bulk_write(const struct acpm_handle *handle,
unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
u8 count, const u8 *buf);
int acpm_pmic_update_reg(const struct acpm_handle *handle,
unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
u8 value, u8 mask);
#endif /* __EXYNOS_ACPM_PMIC_H__ */

View File

@ -0,0 +1,769 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2020 Samsung Electronics Co., Ltd.
* Copyright 2020 Google LLC.
* Copyright 2024 Linaro Ltd.
*/
#include <linux/bitfield.h>
#include <linux/bitmap.h>
#include <linux/bits.h>
#include <linux/cleanup.h>
#include <linux/container_of.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/firmware/samsung/exynos-acpm-protocol.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/mailbox/exynos-message.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/math.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
#include "exynos-acpm.h"
#include "exynos-acpm-pmic.h"
#define ACPM_PROTOCOL_SEQNUM GENMASK(21, 16)
/* The unit of counter is 20 us. 5000 * 20 = 100 ms */
#define ACPM_POLL_TIMEOUT 5000
#define ACPM_TX_TIMEOUT_US 500000
#define ACPM_GS101_INITDATA_BASE 0xa000
/**
* struct acpm_shmem - shared memory configuration information.
* @reserved: unused fields.
* @chans: offset to array of struct acpm_chan_shmem.
* @reserved1: unused fields.
* @num_chans: number of channels.
*/
struct acpm_shmem {
u32 reserved[2];
u32 chans;
u32 reserved1[3];
u32 num_chans;
};
/**
* struct acpm_chan_shmem - descriptor of a shared memory channel.
*
* @id: channel ID.
* @reserved: unused fields.
* @rx_rear: rear pointer of APM RX queue (TX for AP).
* @rx_front: front pointer of APM RX queue (TX for AP).
* @rx_base: base address of APM RX queue (TX for AP).
* @reserved1: unused fields.
* @tx_rear: rear pointer of APM TX queue (RX for AP).
* @tx_front: front pointer of APM TX queue (RX for AP).
* @tx_base: base address of APM TX queue (RX for AP).
* @qlen: queue length. Applies to both TX/RX queues.
* @mlen: message length. Applies to both TX/RX queues.
* @reserved2: unused fields.
* @poll_completion: true when the channel works on polling.
*/
struct acpm_chan_shmem {
u32 id;
u32 reserved[3];
u32 rx_rear;
u32 rx_front;
u32 rx_base;
u32 reserved1[3];
u32 tx_rear;
u32 tx_front;
u32 tx_base;
u32 qlen;
u32 mlen;
u32 reserved2[2];
u32 poll_completion;
};
/**
* struct acpm_queue - exynos acpm queue.
*
* @rear: rear address of the queue.
* @front: front address of the queue.
* @base: base address of the queue.
*/
struct acpm_queue {
void __iomem *rear;
void __iomem *front;
void __iomem *base;
};
/**
* struct acpm_rx_data - RX queue data.
*
* @cmd: pointer to where the data shall be saved.
* @n_cmd: number of 32-bit commands.
* @response: true if the client expects the RX data.
*/
struct acpm_rx_data {
u32 *cmd;
size_t n_cmd;
bool response;
};
#define ACPM_SEQNUM_MAX 64
/**
* struct acpm_chan - driver internal representation of a channel.
* @cl: mailbox client.
* @chan: mailbox channel.
* @acpm: pointer to driver private data.
* @tx: TX queue. The enqueue is done by the host.
* - front index is written by the host.
* - rear index is written by the firmware.
*
* @rx: RX queue. The enqueue is done by the firmware.
* - front index is written by the firmware.
* - rear index is written by the host.
* @tx_lock: protects TX queue.
* @rx_lock: protects RX queue.
* @qlen: queue length. Applies to both TX/RX queues.
* @mlen: message length. Applies to both TX/RX queues.
* @seqnum: sequence number of the last message enqueued on TX queue.
* @id: channel ID.
* @poll_completion: indicates if the transfer needs to be polled for
* completion or interrupt mode is used.
* @bitmap_seqnum: bitmap that tracks the messages on the TX/RX queues.
* @rx_data: internal buffer used to drain the RX queue.
*/
struct acpm_chan {
struct mbox_client cl;
struct mbox_chan *chan;
struct acpm_info *acpm;
struct acpm_queue tx;
struct acpm_queue rx;
struct mutex tx_lock;
struct mutex rx_lock;
unsigned int qlen;
unsigned int mlen;
u8 seqnum;
u8 id;
bool poll_completion;
DECLARE_BITMAP(bitmap_seqnum, ACPM_SEQNUM_MAX - 1);
struct acpm_rx_data rx_data[ACPM_SEQNUM_MAX];
};
/**
* struct acpm_info - driver's private data.
* @shmem: pointer to the SRAM configuration data.
* @sram_base: base address of SRAM.
* @chans: pointer to the ACPM channel parameters retrieved from SRAM.
* @dev: pointer to the exynos-acpm device.
* @handle: instance of acpm_handle to send to clients.
* @num_chans: number of channels available for this controller.
*/
struct acpm_info {
struct acpm_shmem __iomem *shmem;
void __iomem *sram_base;
struct acpm_chan *chans;
struct device *dev;
struct acpm_handle handle;
u32 num_chans;
};
/**
* struct acpm_match_data - of_device_id data.
* @initdata_base: offset in SRAM where the channels configuration resides.
*/
struct acpm_match_data {
loff_t initdata_base;
};
#define client_to_acpm_chan(c) container_of(c, struct acpm_chan, cl)
#define handle_to_acpm_info(h) container_of(h, struct acpm_info, handle)
/**
* acpm_get_rx() - get response from RX queue.
* @achan: ACPM channel info.
* @xfer: reference to the transfer to get response for.
*
* Return: 0 on success, -errno otherwise.
*/
static int acpm_get_rx(struct acpm_chan *achan, const struct acpm_xfer *xfer)
{
u32 rx_front, rx_seqnum, tx_seqnum, seqnum;
const void __iomem *base, *addr;
struct acpm_rx_data *rx_data;
u32 i, val, mlen;
bool rx_set = false;
guard(mutex)(&achan->rx_lock);
rx_front = readl(achan->rx.front);
i = readl(achan->rx.rear);
/* Bail out if RX is empty. */
if (i == rx_front)
return 0;
base = achan->rx.base;
mlen = achan->mlen;
tx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
/* Drain RX queue. */
do {
/* Read RX seqnum. */
addr = base + mlen * i;
val = readl(addr);
rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, val);
if (!rx_seqnum)
return -EIO;
/*
* mssg seqnum starts with value 1, whereas the driver considers
* the first mssg at index 0.
*/
seqnum = rx_seqnum - 1;
rx_data = &achan->rx_data[seqnum];
if (rx_data->response) {
if (rx_seqnum == tx_seqnum) {
__ioread32_copy(xfer->rxd, addr,
xfer->rxlen / 4);
rx_set = true;
clear_bit(seqnum, achan->bitmap_seqnum);
} else {
/*
* The RX data corresponds to another request.
* Save the data to drain the queue, but don't
* clear yet the bitmap. It will be cleared
* after the response is copied to the request.
*/
__ioread32_copy(rx_data->cmd, addr,
xfer->rxlen / 4);
}
} else {
clear_bit(seqnum, achan->bitmap_seqnum);
}
i = (i + 1) % achan->qlen;
} while (i != rx_front);
/* We saved all responses, mark RX empty. */
writel(rx_front, achan->rx.rear);
/*
* If the response was not in this iteration of the queue, check if the
* RX data was previously saved.
*/
rx_data = &achan->rx_data[tx_seqnum - 1];
if (!rx_set && rx_data->response) {
rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM,
rx_data->cmd[0]);
if (rx_seqnum == tx_seqnum) {
memcpy(xfer->rxd, rx_data->cmd, xfer->rxlen);
clear_bit(rx_seqnum - 1, achan->bitmap_seqnum);
}
}
return 0;
}
/**
* acpm_dequeue_by_polling() - RX dequeue by polling.
* @achan: ACPM channel info.
* @xfer: reference to the transfer being waited for.
*
* Return: 0 on success, -errno otherwise.
*/
static int acpm_dequeue_by_polling(struct acpm_chan *achan,
const struct acpm_xfer *xfer)
{
struct device *dev = achan->acpm->dev;
unsigned int cnt_20us = 0;
u32 seqnum;
int ret;
seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
do {
ret = acpm_get_rx(achan, xfer);
if (ret)
return ret;
if (!test_bit(seqnum - 1, achan->bitmap_seqnum))
return 0;
/* Determined experimentally. */
usleep_range(20, 30);
cnt_20us++;
} while (cnt_20us < ACPM_POLL_TIMEOUT);
dev_err(dev, "Timeout! ch:%u s:%u bitmap:%lx, cnt_20us = %d.\n",
achan->id, seqnum, achan->bitmap_seqnum[0], cnt_20us);
return -ETIME;
}
/**
* acpm_wait_for_queue_slots() - wait for queue slots.
*
* @achan: ACPM channel info.
* @next_tx_front: next front index of the TX queue.
*
* Return: 0 on success, -errno otherwise.
*/
static int acpm_wait_for_queue_slots(struct acpm_chan *achan, u32 next_tx_front)
{
u32 val, ret;
/*
* Wait for RX front to keep up with TX front. Make sure there's at
* least one element between them.
*/
ret = readl_poll_timeout(achan->rx.front, val, next_tx_front != val, 0,
ACPM_TX_TIMEOUT_US);
if (ret) {
dev_err(achan->acpm->dev, "RX front can not keep up with TX front.\n");
return ret;
}
ret = readl_poll_timeout(achan->tx.rear, val, next_tx_front != val, 0,
ACPM_TX_TIMEOUT_US);
if (ret)
dev_err(achan->acpm->dev, "TX queue is full.\n");
return ret;
}
/**
* acpm_prepare_xfer() - prepare a transfer before writing the message to the
* TX queue.
* @achan: ACPM channel info.
* @xfer: reference to the transfer being prepared.
*/
static void acpm_prepare_xfer(struct acpm_chan *achan,
const struct acpm_xfer *xfer)
{
struct acpm_rx_data *rx_data;
u32 *txd = (u32 *)xfer->txd;
/* Prevent chan->seqnum from being re-used */
do {
if (++achan->seqnum == ACPM_SEQNUM_MAX)
achan->seqnum = 1;
} while (test_bit(achan->seqnum - 1, achan->bitmap_seqnum));
txd[0] |= FIELD_PREP(ACPM_PROTOCOL_SEQNUM, achan->seqnum);
/* Clear data for upcoming responses */
rx_data = &achan->rx_data[achan->seqnum - 1];
memset(rx_data->cmd, 0, sizeof(*rx_data->cmd) * rx_data->n_cmd);
if (xfer->rxd)
rx_data->response = true;
/* Flag the index based on seqnum. (seqnum: 1~63, bitmap: 0~62) */
set_bit(achan->seqnum - 1, achan->bitmap_seqnum);
}
/**
* acpm_wait_for_message_response - an helper to group all possible ways of
* waiting for a synchronous message response.
*
* @achan: ACPM channel info.
* @xfer: reference to the transfer being waited for.
*
* Return: 0 on success, -errno otherwise.
*/
static int acpm_wait_for_message_response(struct acpm_chan *achan,
const struct acpm_xfer *xfer)
{
/* Just polling mode supported for now. */
return acpm_dequeue_by_polling(achan, xfer);
}
/**
* acpm_do_xfer() - do one transfer.
* @handle: pointer to the acpm handle.
* @xfer: transfer to initiate and wait for response.
*
* Return: 0 on success, -errno otherwise.
*/
int acpm_do_xfer(const struct acpm_handle *handle, const struct acpm_xfer *xfer)
{
struct acpm_info *acpm = handle_to_acpm_info(handle);
struct exynos_mbox_msg msg;
struct acpm_chan *achan;
u32 idx, tx_front;
int ret;
if (xfer->acpm_chan_id >= acpm->num_chans)
return -EINVAL;
achan = &acpm->chans[xfer->acpm_chan_id];
if (!xfer->txd || xfer->txlen > achan->mlen || xfer->rxlen > achan->mlen)
return -EINVAL;
if (!achan->poll_completion) {
dev_err(achan->acpm->dev, "Interrupt mode not supported\n");
return -EOPNOTSUPP;
}
scoped_guard(mutex, &achan->tx_lock) {
tx_front = readl(achan->tx.front);
idx = (tx_front + 1) % achan->qlen;
ret = acpm_wait_for_queue_slots(achan, idx);
if (ret)
return ret;
acpm_prepare_xfer(achan, xfer);
/* Write TX command. */
__iowrite32_copy(achan->tx.base + achan->mlen * tx_front,
xfer->txd, xfer->txlen / 4);
/* Advance TX front. */
writel(idx, achan->tx.front);
}
msg.chan_id = xfer->acpm_chan_id;
msg.chan_type = EXYNOS_MBOX_CHAN_TYPE_DOORBELL;
ret = mbox_send_message(achan->chan, (void *)&msg);
if (ret < 0)
return ret;
ret = acpm_wait_for_message_response(achan, xfer);
/*
* NOTE: we might prefer not to need the mailbox ticker to manage the
* transfer queueing since the protocol layer queues things by itself.
* Unfortunately, we have to kick the mailbox framework after we have
* received our message.
*/
mbox_client_txdone(achan->chan, ret);
return ret;
}
/**
* acpm_chan_shmem_get_params() - get channel parameters and addresses of the
* TX/RX queues.
* @achan: ACPM channel info.
* @chan_shmem: __iomem pointer to a channel described in shared memory.
*/
static void acpm_chan_shmem_get_params(struct acpm_chan *achan,
struct acpm_chan_shmem __iomem *chan_shmem)
{
void __iomem *base = achan->acpm->sram_base;
struct acpm_queue *rx = &achan->rx;
struct acpm_queue *tx = &achan->tx;
achan->mlen = readl(&chan_shmem->mlen);
achan->poll_completion = readl(&chan_shmem->poll_completion);
achan->id = readl(&chan_shmem->id);
achan->qlen = readl(&chan_shmem->qlen);
tx->base = base + readl(&chan_shmem->rx_base);
tx->rear = base + readl(&chan_shmem->rx_rear);
tx->front = base + readl(&chan_shmem->rx_front);
rx->base = base + readl(&chan_shmem->tx_base);
rx->rear = base + readl(&chan_shmem->tx_rear);
rx->front = base + readl(&chan_shmem->tx_front);
dev_vdbg(achan->acpm->dev, "ID = %d poll = %d, mlen = %d, qlen = %d\n",
achan->id, achan->poll_completion, achan->mlen, achan->qlen);
}
/**
* acpm_achan_alloc_cmds() - allocate buffers for retrieving data from the ACPM
* firmware.
* @achan: ACPM channel info.
*
* Return: 0 on success, -errno otherwise.
*/
static int acpm_achan_alloc_cmds(struct acpm_chan *achan)
{
struct device *dev = achan->acpm->dev;
struct acpm_rx_data *rx_data;
size_t cmd_size, n_cmd;
int i;
if (achan->mlen == 0)
return 0;
cmd_size = sizeof(*(achan->rx_data[0].cmd));
n_cmd = DIV_ROUND_UP_ULL(achan->mlen, cmd_size);
for (i = 0; i < ACPM_SEQNUM_MAX; i++) {
rx_data = &achan->rx_data[i];
rx_data->n_cmd = n_cmd;
rx_data->cmd = devm_kcalloc(dev, n_cmd, cmd_size, GFP_KERNEL);
if (!rx_data->cmd)
return -ENOMEM;
}
return 0;
}
/**
* acpm_free_mbox_chans() - free mailbox channels.
* @acpm: pointer to driver data.
*/
static void acpm_free_mbox_chans(struct acpm_info *acpm)
{
int i;
for (i = 0; i < acpm->num_chans; i++)
if (!IS_ERR_OR_NULL(acpm->chans[i].chan))
mbox_free_channel(acpm->chans[i].chan);
}
/**
* acpm_channels_init() - initialize channels based on the configuration data in
* the shared memory.
* @acpm: pointer to driver data.
*
* Return: 0 on success, -errno otherwise.
*/
static int acpm_channels_init(struct acpm_info *acpm)
{
struct acpm_shmem __iomem *shmem = acpm->shmem;
struct acpm_chan_shmem __iomem *chans_shmem;
struct device *dev = acpm->dev;
int i, ret;
acpm->num_chans = readl(&shmem->num_chans);
acpm->chans = devm_kcalloc(dev, acpm->num_chans, sizeof(*acpm->chans),
GFP_KERNEL);
if (!acpm->chans)
return -ENOMEM;
chans_shmem = acpm->sram_base + readl(&shmem->chans);
for (i = 0; i < acpm->num_chans; i++) {
struct acpm_chan_shmem __iomem *chan_shmem = &chans_shmem[i];
struct acpm_chan *achan = &acpm->chans[i];
struct mbox_client *cl = &achan->cl;
achan->acpm = acpm;
acpm_chan_shmem_get_params(achan, chan_shmem);
ret = acpm_achan_alloc_cmds(achan);
if (ret)
return ret;
mutex_init(&achan->rx_lock);
mutex_init(&achan->tx_lock);
cl->dev = dev;
achan->chan = mbox_request_channel(cl, 0);
if (IS_ERR(achan->chan)) {
acpm_free_mbox_chans(acpm);
return PTR_ERR(achan->chan);
}
}
return 0;
}
/**
* acpm_setup_ops() - setup the operations structures.
* @acpm: pointer to the driver data.
*/
static void acpm_setup_ops(struct acpm_info *acpm)
{
struct acpm_pmic_ops *pmic_ops = &acpm->handle.ops.pmic_ops;
pmic_ops->read_reg = acpm_pmic_read_reg;
pmic_ops->bulk_read = acpm_pmic_bulk_read;
pmic_ops->write_reg = acpm_pmic_write_reg;
pmic_ops->bulk_write = acpm_pmic_bulk_write;
pmic_ops->update_reg = acpm_pmic_update_reg;
}
static int acpm_probe(struct platform_device *pdev)
{
const struct acpm_match_data *match_data;
struct device *dev = &pdev->dev;
struct device_node *shmem;
struct acpm_info *acpm;
resource_size_t size;
struct resource res;
int ret;
acpm = devm_kzalloc(dev, sizeof(*acpm), GFP_KERNEL);
if (!acpm)
return -ENOMEM;
shmem = of_parse_phandle(dev->of_node, "shmem", 0);
ret = of_address_to_resource(shmem, 0, &res);
of_node_put(shmem);
if (ret)
return dev_err_probe(dev, ret,
"Failed to get shared memory.\n");
size = resource_size(&res);
acpm->sram_base = devm_ioremap(dev, res.start, size);
if (!acpm->sram_base)
return dev_err_probe(dev, -ENOMEM,
"Failed to ioremap shared memory.\n");
match_data = of_device_get_match_data(dev);
if (!match_data)
return dev_err_probe(dev, -EINVAL,
"Failed to get match data.\n");
acpm->shmem = acpm->sram_base + match_data->initdata_base;
acpm->dev = dev;
ret = acpm_channels_init(acpm);
if (ret)
return ret;
acpm_setup_ops(acpm);
platform_set_drvdata(pdev, acpm);
return 0;
}
/**
* acpm_handle_put() - release the handle acquired by acpm_get_by_phandle.
* @handle: Handle acquired by acpm_get_by_phandle.
*/
static void acpm_handle_put(const struct acpm_handle *handle)
{
struct acpm_info *acpm = handle_to_acpm_info(handle);
struct device *dev = acpm->dev;
module_put(dev->driver->owner);
/* Drop reference taken with of_find_device_by_node(). */
put_device(dev);
}
/**
* devm_acpm_release() - devres release method.
* @dev: pointer to device.
* @res: pointer to resource.
*/
static void devm_acpm_release(struct device *dev, void *res)
{
acpm_handle_put(*(struct acpm_handle **)res);
}
/**
* acpm_get_by_phandle() - get the ACPM handle using DT phandle.
* @dev: device pointer requesting ACPM handle.
* @property: property name containing phandle on ACPM node.
*
* Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
*/
static const struct acpm_handle *acpm_get_by_phandle(struct device *dev,
const char *property)
{
struct platform_device *pdev;
struct device_node *acpm_np;
struct device_link *link;
struct acpm_info *acpm;
acpm_np = of_parse_phandle(dev->of_node, property, 0);
if (!acpm_np)
return ERR_PTR(-ENODEV);
pdev = of_find_device_by_node(acpm_np);
if (!pdev) {
dev_err(dev, "Cannot find device node %s\n", acpm_np->name);
of_node_put(acpm_np);
return ERR_PTR(-EPROBE_DEFER);
}
of_node_put(acpm_np);
acpm = platform_get_drvdata(pdev);
if (!acpm) {
dev_err(dev, "Cannot get drvdata from %s\n",
dev_name(&pdev->dev));
platform_device_put(pdev);
return ERR_PTR(-EPROBE_DEFER);
}
if (!try_module_get(pdev->dev.driver->owner)) {
dev_err(dev, "Cannot get module reference.\n");
platform_device_put(pdev);
return ERR_PTR(-EPROBE_DEFER);
}
link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER);
if (!link) {
dev_err(&pdev->dev,
"Failed to create device link to consumer %s.\n",
dev_name(dev));
platform_device_put(pdev);
module_put(pdev->dev.driver->owner);
return ERR_PTR(-EINVAL);
}
return &acpm->handle;
}
/**
* devm_acpm_get_by_phandle() - managed get handle using phandle.
* @dev: device pointer requesting ACPM handle.
* @property: property name containing phandle on ACPM node.
*
* Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
*/
const struct acpm_handle *devm_acpm_get_by_phandle(struct device *dev,
const char *property)
{
const struct acpm_handle **ptr, *handle;
ptr = devres_alloc(devm_acpm_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
handle = acpm_get_by_phandle(dev, property);
if (!IS_ERR(handle)) {
*ptr = handle;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return handle;
}
static const struct acpm_match_data acpm_gs101 = {
.initdata_base = ACPM_GS101_INITDATA_BASE,
};
static const struct of_device_id acpm_match[] = {
{
.compatible = "google,gs101-acpm-ipc",
.data = &acpm_gs101,
},
{},
};
MODULE_DEVICE_TABLE(of, acpm_match);
static struct platform_driver acpm_driver = {
.probe = acpm_probe,
.driver = {
.name = "exynos-acpm-protocol",
.of_match_table = acpm_match,
},
};
module_platform_driver(acpm_driver);
MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@linaro.org>");
MODULE_DESCRIPTION("Samsung Exynos ACPM mailbox protocol driver");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2020 Samsung Electronics Co., Ltd.
* Copyright 2020 Google LLC.
* Copyright 2024 Linaro Ltd.
*/
#ifndef __EXYNOS_ACPM_H__
#define __EXYNOS_ACPM_H__
struct acpm_xfer {
const u32 *txd;
u32 *rxd;
size_t txlen;
size_t rxlen;
unsigned int acpm_chan_id;
};
struct acpm_handle;
int acpm_do_xfer(const struct acpm_handle *handle,
const struct acpm_xfer *xfer);
#endif /* __EXYNOS_ACPM_H__ */

View File

@ -32,6 +32,85 @@
static struct soc_device *soc_dev;
static struct soc_device_attribute *soc_dev_attr;
#ifdef CONFIG_ARM64
static char __ro_after_init smccc_soc_id_name[136] = "";
static inline void str_fragment_from_reg(char *dst, unsigned long reg)
{
dst[0] = (reg >> 0) & 0xff;
dst[1] = (reg >> 8) & 0xff;
dst[2] = (reg >> 16) & 0xff;
dst[3] = (reg >> 24) & 0xff;
dst[4] = (reg >> 32) & 0xff;
dst[5] = (reg >> 40) & 0xff;
dst[6] = (reg >> 48) & 0xff;
dst[7] = (reg >> 56) & 0xff;
}
static char __init *smccc_soc_name_init(void)
{
struct arm_smccc_1_2_regs args;
struct arm_smccc_1_2_regs res;
size_t len;
/*
* Issue Number 1.6 of the Arm SMC Calling Convention
* specification introduces an optional "name" string
* to the ARM_SMCCC_ARCH_SOC_ID function. Fetch it if
* available.
*/
args.a0 = ARM_SMCCC_ARCH_SOC_ID;
args.a1 = 2; /* SOC_ID name */
arm_smccc_1_2_invoke(&args, &res);
if ((u32)res.a0 == 0) {
/*
* Copy res.a1..res.a17 to the smccc_soc_id_name string
* 8 bytes at a time. As per Issue 1.6 of the Arm SMC
* Calling Convention, the string will be NUL terminated
* and padded, from the end of the string to the end of the
* 136 byte buffer, with NULs.
*/
str_fragment_from_reg(smccc_soc_id_name + 8 * 0, res.a1);
str_fragment_from_reg(smccc_soc_id_name + 8 * 1, res.a2);
str_fragment_from_reg(smccc_soc_id_name + 8 * 2, res.a3);
str_fragment_from_reg(smccc_soc_id_name + 8 * 3, res.a4);
str_fragment_from_reg(smccc_soc_id_name + 8 * 4, res.a5);
str_fragment_from_reg(smccc_soc_id_name + 8 * 5, res.a6);
str_fragment_from_reg(smccc_soc_id_name + 8 * 6, res.a7);
str_fragment_from_reg(smccc_soc_id_name + 8 * 7, res.a8);
str_fragment_from_reg(smccc_soc_id_name + 8 * 8, res.a9);
str_fragment_from_reg(smccc_soc_id_name + 8 * 9, res.a10);
str_fragment_from_reg(smccc_soc_id_name + 8 * 10, res.a11);
str_fragment_from_reg(smccc_soc_id_name + 8 * 11, res.a12);
str_fragment_from_reg(smccc_soc_id_name + 8 * 12, res.a13);
str_fragment_from_reg(smccc_soc_id_name + 8 * 13, res.a14);
str_fragment_from_reg(smccc_soc_id_name + 8 * 14, res.a15);
str_fragment_from_reg(smccc_soc_id_name + 8 * 15, res.a16);
str_fragment_from_reg(smccc_soc_id_name + 8 * 16, res.a17);
len = strnlen(smccc_soc_id_name, sizeof(smccc_soc_id_name));
if (len) {
if (len == sizeof(smccc_soc_id_name))
pr_warn(FW_BUG "Ignoring improperly formatted name\n");
else
return smccc_soc_id_name;
}
}
return NULL;
}
#else
static char __init *smccc_soc_name_init(void)
{
return NULL;
}
#endif
static int __init smccc_soc_init(void)
{
int soc_id_rev, soc_id_version;
@ -72,6 +151,7 @@ static int __init smccc_soc_init(void)
soc_dev_attr->soc_id = soc_id_str;
soc_dev_attr->revision = soc_id_rev_str;
soc_dev_attr->family = soc_id_jep106_id_str;
soc_dev_attr->machine = smccc_soc_name_init();
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {

View File

@ -1139,17 +1139,13 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_status);
int zynqmp_pm_fpga_get_config_status(u32 *value)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
u32 buf, lower_addr, upper_addr;
int ret;
if (!value)
return -EINVAL;
lower_addr = lower_32_bits((u64)&buf);
upper_addr = upper_32_bits((u64)&buf);
ret = zynqmp_pm_invoke_fn(PM_FPGA_READ, ret_payload, 4,
XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET, lower_addr, upper_addr,
XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET, 0, 0,
XILINX_ZYNQMP_PM_FPGA_READ_CONFIG_REG);
*value = ret_payload[1];

View File

@ -26,8 +26,6 @@
/* use for A1 like chips */
#define REG_PIN_A1_SEL 0x04
/* Used for s4 chips */
#define REG_EDGE_POL_S4 0x1c
/*
* Note: The S905X3 datasheet reports that BOTH_EDGE is controlled by
@ -72,6 +70,7 @@ struct meson_gpio_irq_params {
bool support_edge_both;
unsigned int edge_both_offset;
unsigned int edge_single_offset;
unsigned int edge_pol_reg;
unsigned int pol_low_offset;
unsigned int pin_sel_mask;
struct irq_ctl_ops ops;
@ -105,6 +104,18 @@ struct meson_gpio_irq_params {
.pin_sel_mask = 0x7f, \
.nr_channels = 8, \
#define INIT_MESON_A4_AO_COMMON_DATA(irqs) \
INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \
meson_a1_gpio_irq_sel_pin, \
meson_s4_gpio_irq_set_type) \
.support_edge_both = true, \
.edge_both_offset = 0, \
.edge_single_offset = 12, \
.edge_pol_reg = 0x8, \
.pol_low_offset = 0, \
.pin_sel_mask = 0xff, \
.nr_channels = 2, \
#define INIT_MESON_S4_COMMON_DATA(irqs) \
INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \
meson_a1_gpio_irq_sel_pin, \
@ -112,6 +123,7 @@ struct meson_gpio_irq_params {
.support_edge_both = true, \
.edge_both_offset = 0, \
.edge_single_offset = 12, \
.edge_pol_reg = 0x1c, \
.pol_low_offset = 0, \
.pin_sel_mask = 0xff, \
.nr_channels = 12, \
@ -146,6 +158,18 @@ static const struct meson_gpio_irq_params a1_params = {
INIT_MESON_A1_COMMON_DATA(62)
};
static const struct meson_gpio_irq_params a4_params = {
INIT_MESON_S4_COMMON_DATA(81)
};
static const struct meson_gpio_irq_params a4_ao_params = {
INIT_MESON_A4_AO_COMMON_DATA(8)
};
static const struct meson_gpio_irq_params a5_params = {
INIT_MESON_S4_COMMON_DATA(99)
};
static const struct meson_gpio_irq_params s4_params = {
INIT_MESON_S4_COMMON_DATA(82)
};
@ -168,6 +192,9 @@ static const struct of_device_id meson_irq_gpio_matches[] __maybe_unused = {
{ .compatible = "amlogic,meson-sm1-gpio-intc", .data = &sm1_params },
{ .compatible = "amlogic,meson-a1-gpio-intc", .data = &a1_params },
{ .compatible = "amlogic,meson-s4-gpio-intc", .data = &s4_params },
{ .compatible = "amlogic,a4-gpio-ao-intc", .data = &a4_ao_params },
{ .compatible = "amlogic,a4-gpio-intc", .data = &a4_params },
{ .compatible = "amlogic,a5-gpio-intc", .data = &a5_params },
{ .compatible = "amlogic,c3-gpio-intc", .data = &c3_params },
{ .compatible = "amlogic,t7-gpio-intc", .data = &t7_params },
{ }
@ -299,11 +326,10 @@ meson_gpio_irq_release_channel(struct meson_gpio_irq_controller *ctl,
static int meson8_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
unsigned int type, u32 *channel_hwirq)
{
u32 val = 0;
const struct meson_gpio_irq_params *params = ctl->params;
unsigned int idx;
const struct meson_gpio_irq_params *params;
u32 val = 0;
params = ctl->params;
idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
/*
@ -356,19 +382,19 @@ static int meson8_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
static int meson_s4_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
unsigned int type, u32 *channel_hwirq)
{
u32 val = 0;
const struct meson_gpio_irq_params *params = ctl->params;
unsigned int idx;
u32 val = 0;
idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
type &= IRQ_TYPE_SENSE_MASK;
meson_gpio_irq_update_bits(ctl, REG_EDGE_POL_S4, BIT(idx), 0);
meson_gpio_irq_update_bits(ctl, params->edge_pol_reg, BIT(idx), 0);
if (type == IRQ_TYPE_EDGE_BOTH) {
val |= BIT(ctl->params->edge_both_offset + idx);
meson_gpio_irq_update_bits(ctl, REG_EDGE_POL_S4,
BIT(ctl->params->edge_both_offset + idx), val);
val = BIT(ctl->params->edge_both_offset + idx);
meson_gpio_irq_update_bits(ctl, params->edge_pol_reg, val, val);
return 0;
}
@ -378,7 +404,7 @@ static int meson_s4_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
val |= BIT(ctl->params->edge_single_offset + idx);
meson_gpio_irq_update_bits(ctl, REG_EDGE_POL,
meson_gpio_irq_update_bits(ctl, params->edge_pol_reg,
BIT(idx) | BIT(12 + idx), val);
return 0;
};

View File

@ -332,6 +332,38 @@ static const u8 mtk_smi_larb_mt8188_ostd[][SMI_LARB_PORT_NR_MAX] = {
[25] = {0x01},
};
static const u8 mtk_smi_larb_mt8192_ostd[][SMI_LARB_PORT_NR_MAX] = {
[0] = {0x2, 0x2, 0x28, 0xa, 0xc, 0x28,},
[1] = {0x2, 0x2, 0x18, 0x18, 0x18, 0xa, 0xc, 0x28,},
[2] = {0x5, 0x5, 0x5, 0x5, 0x1,},
[3] = {},
[4] = {0x28, 0x19, 0xb, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x4, 0x1,},
[5] = {0x1, 0x1, 0x4, 0x1, 0x1, 0x1, 0x1, 0x16,},
[6] = {},
[7] = {0x1, 0x3, 0x2, 0x1, 0x1, 0x5, 0x2, 0x12, 0x13, 0x4, 0x4, 0x1,
0x4, 0x2, 0x1,},
[8] = {},
[9] = {0xa, 0x7, 0xf, 0x8, 0x1, 0x8, 0x9, 0x3, 0x3, 0x6, 0x7, 0x4,
0xa, 0x3, 0x4, 0xe, 0x1, 0x7, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1,
0x1, 0x1, 0x1, 0x1, 0x1,},
[10] = {},
[11] = {0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1,
0x1, 0x1, 0x1, 0xe, 0x1, 0x7, 0x8, 0x7, 0x7, 0x1, 0x6, 0x2,
0xf, 0x8, 0x1, 0x1, 0x1,},
[12] = {},
[13] = {0x2, 0xc, 0xc, 0xe, 0x6, 0x6, 0x6, 0x6, 0x6, 0x12, 0x6, 0x28,
0x2, 0xc, 0xc, 0x28, 0x12, 0x6,},
[14] = {},
[15] = {0x28, 0x14, 0x2, 0xc, 0x18, 0x4, 0x28, 0x14, 0x4, 0x4, 0x4, 0x2,
0x4, 0x2, 0x8, 0x4, 0x4,},
[16] = {0x28, 0x14, 0x2, 0xc, 0x18, 0x4, 0x28, 0x14, 0x4, 0x4, 0x4, 0x2,
0x4, 0x2, 0x8, 0x4, 0x4,},
[17] = {0x28, 0x14, 0x2, 0xc, 0x18, 0x4, 0x28, 0x14, 0x4, 0x4, 0x4, 0x2,
0x4, 0x2, 0x8, 0x4, 0x4,},
[18] = {0x2, 0x2, 0x4, 0x2,},
[19] = {0x9, 0x9, 0x5, 0x5, 0x1, 0x1,},
};
static const u8 mtk_smi_larb_mt8195_ostd[][SMI_LARB_PORT_NR_MAX] = {
[0] = {0x0a, 0xc, 0x22, 0x22, 0x01, 0x0a,}, /* larb0 */
[1] = {0x0a, 0xc, 0x22, 0x22, 0x01, 0x0a,}, /* larb1 */
@ -427,6 +459,7 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt8188 = {
static const struct mtk_smi_larb_gen mtk_smi_larb_mt8192 = {
.config_port = mtk_smi_larb_config_port_gen2_general,
.ostd = mtk_smi_larb_mt8192_ostd,
};
static const struct mtk_smi_larb_gen mtk_smi_larb_mt8195 = {

View File

@ -1191,10 +1191,8 @@ static int tegra_emc_probe(struct platform_device *pdev)
int irq, err;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "please update your device tree\n");
if (irq < 0)
return irq;
}
emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
if (!emc)

View File

@ -1873,7 +1873,7 @@ static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
if (!(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS))
return 0;
ice = of_qcom_ice_get(dev);
ice = devm_of_qcom_ice_get(dev);
if (ice == ERR_PTR(-EOPNOTSUPP)) {
dev_warn(dev, "Disabling inline encryption support\n");
ice = NULL;

View File

@ -221,7 +221,7 @@ static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
return APPLE_ANS_MAX_QUEUE_DEPTH;
}
static void apple_nvme_rtkit_crashed(void *cookie)
static void apple_nvme_rtkit_crashed(void *cookie, const void *crashlog, size_t crashlog_size)
{
struct apple_nvme *anv = cookie;

View File

@ -96,6 +96,13 @@ config RESET_HSDK
help
This enables the reset controller driver for HSDK board.
config RESET_IMX_SCU
tristate "i.MX8Q Reset Driver"
depends on IMX_SCU && HAVE_ARM_SMCCC
depends on (ARM64 && ARCH_MXC) || COMPILE_TEST
help
This enables the reset controller driver for i.MX8QM/i.MX8QXP
config RESET_IMX7
tristate "i.MX7/8 Reset Driver"
depends on HAS_IOMEM

View File

@ -15,6 +15,7 @@ obj-$(CONFIG_RESET_BRCMSTB_RESCAL) += reset-brcmstb-rescal.o
obj-$(CONFIG_RESET_EYEQ) += reset-eyeq.o
obj-$(CONFIG_RESET_GPIO) += reset-gpio.o
obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o
obj-$(CONFIG_RESET_IMX_SCU) += reset-imx-scu.o
obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
obj-$(CONFIG_RESET_IMX8MP_AUDIOMIX) += reset-imx8mp-audiomix.o
obj-$(CONFIG_RESET_INTEL_GW) += reset-intel-gw.o

View File

@ -0,0 +1,101 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2025 NXP
* Frank Li <Frank.Li@nxp.com>
*/
#include <linux/firmware/imx/svc/misc.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <dt-bindings/firmware/imx/rsrc.h>
struct imx_scu_reset {
struct reset_controller_dev rc;
struct imx_sc_ipc *ipc_handle;
};
static struct imx_scu_reset *to_imx_scu(struct reset_controller_dev *rc)
{
return container_of(rc, struct imx_scu_reset, rc);
}
struct imx_scu_id_map {
u32 resource_id;
u32 command_id;
};
static const struct imx_scu_id_map imx_scu_id_map[] = {
{ IMX_SC_R_CSI_0, IMX_SC_C_MIPI_RESET },
{ IMX_SC_R_CSI_1, IMX_SC_C_MIPI_RESET },
};
static int imx_scu_reset_assert(struct reset_controller_dev *rc, unsigned long id)
{
struct imx_scu_reset *priv = to_imx_scu(rc);
return imx_sc_misc_set_control(priv->ipc_handle, imx_scu_id_map[id].resource_id,
imx_scu_id_map[id].command_id, true);
}
static const struct reset_control_ops imx_scu_reset_ops = {
.assert = imx_scu_reset_assert,
};
static int imx_scu_xlate(struct reset_controller_dev *rc, const struct of_phandle_args *reset_spec)
{
int i;
for (i = 0; i < rc->nr_resets; i++)
if (reset_spec->args[0] == imx_scu_id_map[i].resource_id)
return i;
return -EINVAL;
}
static int imx_scu_reset_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct imx_scu_reset *priv;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
platform_set_drvdata(pdev, &priv->rc);
ret = imx_scu_get_handle(&priv->ipc_handle);
if (ret)
return dev_err_probe(dev, ret, "sc_misc_MIPI get ipc handle failed!\n");
priv->rc.ops = &imx_scu_reset_ops;
priv->rc.owner = THIS_MODULE;
priv->rc.of_node = dev->of_node;
priv->rc.of_reset_n_cells = 1;
priv->rc.of_xlate = imx_scu_xlate;
priv->rc.nr_resets = ARRAY_SIZE(imx_scu_id_map);
return devm_reset_controller_register(dev, &priv->rc);
}
static const struct of_device_id imx_scu_reset_ids[] = {
{ .compatible = "fsl,imx-scu-reset", },
{}
};
MODULE_DEVICE_TABLE(of, imx_scu_reset_ids);
static struct platform_driver imx_scu_reset_driver = {
.probe = imx_scu_reset_probe,
.driver = {
.name = "scu-reset",
.of_match_table = imx_scu_reset_ids,
},
};
module_platform_driver(imx_scu_reset_driver);
MODULE_AUTHOR("Frank Li <Frank.Li@nxp.com>");
MODULE_DESCRIPTION("i.MX scu reset driver");
MODULE_LICENSE("GPL");

View File

@ -44,6 +44,7 @@ struct apple_rtkit {
struct apple_rtkit_shmem ioreport_buffer;
struct apple_rtkit_shmem crashlog_buffer;
struct apple_rtkit_shmem oslog_buffer;
struct apple_rtkit_shmem syslog_buffer;
char *syslog_msg_buffer;

View File

@ -12,6 +12,7 @@ enum {
APPLE_RTKIT_PWR_STATE_IDLE = 0x201, /* sleeping, retain state */
APPLE_RTKIT_PWR_STATE_QUIESCED = 0x10, /* running but no communication */
APPLE_RTKIT_PWR_STATE_ON = 0x20, /* normal operating state */
APPLE_RTKIT_PWR_STATE_INIT = 0x220, /* init after starting the coproc */
};
enum {
@ -66,8 +67,9 @@ enum {
#define APPLE_RTKIT_SYSLOG_MSG_SIZE GENMASK_ULL(31, 24)
#define APPLE_RTKIT_OSLOG_TYPE GENMASK_ULL(63, 56)
#define APPLE_RTKIT_OSLOG_INIT 1
#define APPLE_RTKIT_OSLOG_ACK 3
#define APPLE_RTKIT_OSLOG_BUFFER_REQUEST 1
#define APPLE_RTKIT_OSLOG_SIZE GENMASK_ULL(55, 36)
#define APPLE_RTKIT_OSLOG_IOVA GENMASK_ULL(35, 0)
#define APPLE_RTKIT_MIN_SUPPORTED_VERSION 11
#define APPLE_RTKIT_MAX_SUPPORTED_VERSION 12
@ -97,12 +99,19 @@ bool apple_rtkit_is_crashed(struct apple_rtkit *rtk)
}
EXPORT_SYMBOL_GPL(apple_rtkit_is_crashed);
static void apple_rtkit_management_send(struct apple_rtkit *rtk, u8 type,
static int apple_rtkit_management_send(struct apple_rtkit *rtk, u8 type,
u64 msg)
{
int ret;
msg &= ~APPLE_RTKIT_MGMT_TYPE;
msg |= FIELD_PREP(APPLE_RTKIT_MGMT_TYPE, type);
apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_MGMT, msg, NULL, false);
ret = apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_MGMT, msg, NULL, false);
if (ret)
dev_err(rtk->dev, "RTKit: Failed to send management message: %d\n", ret);
return ret;
}
static void apple_rtkit_management_rx_hello(struct apple_rtkit *rtk, u64 msg)
@ -251,15 +260,21 @@ static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk,
struct apple_rtkit_shmem *buffer,
u8 ep, u64 msg)
{
size_t n_4kpages = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg);
u64 reply;
int err;
/* The different size vs. IOVA shifts look odd but are indeed correct this way */
if (ep == APPLE_RTKIT_EP_OSLOG) {
buffer->size = FIELD_GET(APPLE_RTKIT_OSLOG_SIZE, msg);
buffer->iova = FIELD_GET(APPLE_RTKIT_OSLOG_IOVA, msg) << 12;
} else {
buffer->size = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg) << 12;
buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg);
}
buffer->buffer = NULL;
buffer->iomem = NULL;
buffer->is_mapped = false;
buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg);
buffer->size = n_4kpages << 12;
dev_dbg(rtk->dev, "RTKit: buffer request for 0x%zx bytes at %pad\n",
buffer->size, &buffer->iova);
@ -284,17 +299,30 @@ static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk,
}
if (!buffer->is_mapped) {
reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE,
APPLE_RTKIT_BUFFER_REQUEST);
reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE, n_4kpages);
reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA,
buffer->iova);
/* oslog uses different fields and needs a shifted IOVA instead of size */
if (ep == APPLE_RTKIT_EP_OSLOG) {
reply = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE,
APPLE_RTKIT_OSLOG_BUFFER_REQUEST);
reply |= FIELD_PREP(APPLE_RTKIT_OSLOG_SIZE, buffer->size);
reply |= FIELD_PREP(APPLE_RTKIT_OSLOG_IOVA,
buffer->iova >> 12);
} else {
reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE,
APPLE_RTKIT_BUFFER_REQUEST);
reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE,
buffer->size >> 12);
reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA,
buffer->iova);
}
apple_rtkit_send_message(rtk, ep, reply, NULL, false);
}
return 0;
error:
dev_err(rtk->dev, "RTKit: failed buffer request for 0x%zx bytes (%d)\n",
buffer->size, err);
buffer->buffer = NULL;
buffer->iomem = NULL;
buffer->iova = 0;
@ -360,7 +388,6 @@ static void apple_rtkit_crashlog_rx(struct apple_rtkit *rtk, u64 msg)
apple_rtkit_memcpy(rtk, bfr, &rtk->crashlog_buffer, 0,
rtk->crashlog_buffer.size);
apple_rtkit_crashlog_dump(rtk, bfr, rtk->crashlog_buffer.size);
kfree(bfr);
} else {
dev_err(rtk->dev,
"RTKit: Couldn't allocate crashlog shadow buffer\n");
@ -368,7 +395,9 @@ static void apple_rtkit_crashlog_rx(struct apple_rtkit *rtk, u64 msg)
rtk->crashed = true;
if (rtk->ops->crashed)
rtk->ops->crashed(rtk->cookie);
rtk->ops->crashed(rtk->cookie, bfr, rtk->crashlog_buffer.size);
kfree(bfr);
}
static void apple_rtkit_ioreport_rx(struct apple_rtkit *rtk, u64 msg)
@ -448,7 +477,7 @@ static void apple_rtkit_syslog_rx_log(struct apple_rtkit *rtk, u64 msg)
log_context[sizeof(log_context) - 1] = 0;
msglen = rtk->syslog_msg_size - 1;
msglen = strnlen(rtk->syslog_msg_buffer, rtk->syslog_msg_size - 1);
while (msglen > 0 &&
should_crop_syslog_char(rtk->syslog_msg_buffer[msglen - 1]))
msglen--;
@ -482,25 +511,18 @@ static void apple_rtkit_syslog_rx(struct apple_rtkit *rtk, u64 msg)
}
}
static void apple_rtkit_oslog_rx_init(struct apple_rtkit *rtk, u64 msg)
{
u64 ack;
dev_dbg(rtk->dev, "RTKit: oslog init: msg: 0x%llx\n", msg);
ack = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE, APPLE_RTKIT_OSLOG_ACK);
apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_OSLOG, ack, NULL, false);
}
static void apple_rtkit_oslog_rx(struct apple_rtkit *rtk, u64 msg)
{
u8 type = FIELD_GET(APPLE_RTKIT_OSLOG_TYPE, msg);
switch (type) {
case APPLE_RTKIT_OSLOG_INIT:
apple_rtkit_oslog_rx_init(rtk, msg);
case APPLE_RTKIT_OSLOG_BUFFER_REQUEST:
apple_rtkit_common_rx_get_buffer(rtk, &rtk->oslog_buffer,
APPLE_RTKIT_EP_OSLOG, msg);
break;
default:
dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n", msg);
dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n",
msg);
}
}
@ -588,11 +610,18 @@ int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
.msg1 = ep,
};
if (rtk->crashed)
if (rtk->crashed) {
dev_warn(rtk->dev,
"RTKit: Device is crashed, cannot send message\n");
return -EINVAL;
}
if (ep >= APPLE_RTKIT_APP_ENDPOINT_START &&
!apple_rtkit_is_running(rtk))
!apple_rtkit_is_running(rtk)) {
dev_warn(rtk->dev,
"RTKit: Endpoint 0x%02x is not running, cannot send message\n", ep);
return -EINVAL;
}
/*
* The message will be sent with a MMIO write. We need the barrier
@ -667,7 +696,7 @@ struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
rtk->mbox->rx = apple_rtkit_rx;
rtk->mbox->cookie = rtk;
rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_MEM_RECLAIM,
rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_HIGHPRI | WQ_MEM_RECLAIM,
dev_name(rtk->dev));
if (!rtk->wq) {
ret = -ENOMEM;
@ -710,6 +739,7 @@ int apple_rtkit_reinit(struct apple_rtkit *rtk)
apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
apple_rtkit_free_buffer(rtk, &rtk->oslog_buffer);
apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
kfree(rtk->syslog_msg_buffer);
@ -742,8 +772,10 @@ static int apple_rtkit_set_ap_power_state(struct apple_rtkit *rtk,
reinit_completion(&rtk->ap_pwr_ack_completion);
msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state);
apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_AP_PWR_STATE,
msg);
ret = apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_AP_PWR_STATE,
msg);
if (ret)
return ret;
ret = apple_rtkit_wait_for_completion(&rtk->ap_pwr_ack_completion);
if (ret)
@ -763,8 +795,10 @@ static int apple_rtkit_set_iop_power_state(struct apple_rtkit *rtk,
reinit_completion(&rtk->iop_pwr_ack_completion);
msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state);
apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
msg);
ret = apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
msg);
if (ret)
return ret;
ret = apple_rtkit_wait_for_completion(&rtk->iop_pwr_ack_completion);
if (ret)
@ -865,6 +899,7 @@ EXPORT_SYMBOL_GPL(apple_rtkit_quiesce);
int apple_rtkit_wake(struct apple_rtkit *rtk)
{
u64 msg;
int ret;
if (apple_rtkit_is_running(rtk))
return -EINVAL;
@ -875,9 +910,11 @@ int apple_rtkit_wake(struct apple_rtkit *rtk)
* Use open-coded apple_rtkit_set_iop_power_state since apple_rtkit_boot
* will wait for the completion anyway.
*/
msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, APPLE_RTKIT_PWR_STATE_ON);
apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
msg);
msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, APPLE_RTKIT_PWR_STATE_INIT);
ret = apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
msg);
if (ret)
return ret;
return apple_rtkit_boot(rtk);
}
@ -890,6 +927,7 @@ void apple_rtkit_free(struct apple_rtkit *rtk)
apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
apple_rtkit_free_buffer(rtk, &rtk->oslog_buffer);
apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
kfree(rtk->syslog_msg_buffer);

View File

@ -14,22 +14,21 @@
#define MT8167_DSI0_SEL_IN_RDMA0 0x1
static const struct mtk_mmsys_routes mt8167_mmsys_routing_table[] = {
{
DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
MT8167_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, OVL0_MOUT_EN_COLOR0,
}, {
DDP_COMPONENT_DITHER0, DDP_COMPONENT_RDMA0,
MT8167_DISP_REG_CONFIG_DISP_DITHER_MOUT_EN, MT8167_DITHER_MOUT_EN_RDMA0
}, {
DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
MT8167_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, COLOR0_SEL_IN_OVL0
}, {
DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI0,
MT8167_DISP_REG_CONFIG_DISP_DSI0_SEL_IN, MT8167_DSI0_SEL_IN_RDMA0
}, {
DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI0,
MT8167_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN, MT8167_RDMA0_SOUT_DSI0
},
MMSYS_ROUTE(OVL0, COLOR0,
MT8167_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, OVL0_MOUT_EN_COLOR0,
OVL0_MOUT_EN_COLOR0),
MMSYS_ROUTE(DITHER0, RDMA0,
MT8167_DISP_REG_CONFIG_DISP_DITHER_MOUT_EN, MT8167_DITHER_MOUT_EN_RDMA0,
MT8167_DITHER_MOUT_EN_RDMA0),
MMSYS_ROUTE(OVL0, COLOR0,
MT8167_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, COLOR0_SEL_IN_OVL0,
COLOR0_SEL_IN_OVL0),
MMSYS_ROUTE(RDMA0, DSI0,
MT8167_DISP_REG_CONFIG_DISP_DSI0_SEL_IN, MT8167_DSI0_SEL_IN_RDMA0,
MT8167_DSI0_SEL_IN_RDMA0),
MMSYS_ROUTE(RDMA0, DSI0,
MT8167_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN, MT8167_RDMA0_SOUT_DSI0,
MT8167_RDMA0_SOUT_DSI0),
};
#endif /* __SOC_MEDIATEK_MT8167_MMSYS_H */

View File

@ -33,63 +33,48 @@
#define MT8173_RDMA0_SOUT_COLOR0 BIT(0)
static const struct mtk_mmsys_routes mt8173_mmsys_routing_table[] = {
{
DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
MT8173_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN,
MT8173_OVL0_MOUT_EN_COLOR0, MT8173_OVL0_MOUT_EN_COLOR0
}, {
DDP_COMPONENT_OD0, DDP_COMPONENT_RDMA0,
MT8173_DISP_REG_CONFIG_DISP_OD_MOUT_EN,
MT8173_OD0_MOUT_EN_RDMA0, MT8173_OD0_MOUT_EN_RDMA0
}, {
DDP_COMPONENT_UFOE, DDP_COMPONENT_DSI0,
MT8173_DISP_REG_CONFIG_DISP_UFOE_MOUT_EN,
MT8173_UFOE_MOUT_EN_DSI0, MT8173_UFOE_MOUT_EN_DSI0
}, {
DDP_COMPONENT_COLOR0, DDP_COMPONENT_AAL0,
MT8173_DISP_REG_CONFIG_DISP_COLOR0_SOUT_SEL_IN,
MT8173_COLOR0_SOUT_MERGE, 0 /* SOUT to AAL */
}, {
DDP_COMPONENT_RDMA0, DDP_COMPONENT_UFOE,
MT8173_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN,
MT8173_RDMA0_SOUT_COLOR0, 0 /* SOUT to UFOE */
}, {
DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
MT8173_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN,
MT8173_COLOR0_SEL_IN_OVL0, MT8173_COLOR0_SEL_IN_OVL0
}, {
DDP_COMPONENT_AAL0, DDP_COMPONENT_COLOR0,
MT8173_DISP_REG_CONFIG_DISP_AAL_SEL_IN,
MT8173_AAL_SEL_IN_MERGE, 0 /* SEL_IN from COLOR0 */
}, {
DDP_COMPONENT_RDMA0, DDP_COMPONENT_UFOE,
MT8173_DISP_REG_CONFIG_DISP_UFOE_SEL_IN,
MT8173_UFOE_SEL_IN_RDMA0, 0 /* SEL_IN from RDMA0 */
}, {
DDP_COMPONENT_UFOE, DDP_COMPONENT_DSI0,
MT8173_DISP_REG_CONFIG_DSI0_SEL_IN,
MT8173_DSI0_SEL_IN_UFOE, 0, /* SEL_IN from UFOE */
}, {
DDP_COMPONENT_OVL1, DDP_COMPONENT_COLOR1,
MT8173_DISP_REG_CONFIG_DISP_OVL1_MOUT_EN,
MT8173_OVL1_MOUT_EN_COLOR1, MT8173_OVL1_MOUT_EN_COLOR1
}, {
DDP_COMPONENT_GAMMA, DDP_COMPONENT_RDMA1,
MT8173_DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN,
MT8173_GAMMA_MOUT_EN_RDMA1, MT8173_GAMMA_MOUT_EN_RDMA1
}, {
DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
MT8173_DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN,
RDMA1_SOUT_MASK, RDMA1_SOUT_DPI0
}, {
DDP_COMPONENT_OVL1, DDP_COMPONENT_COLOR1,
MT8173_DISP_REG_CONFIG_DISP_COLOR1_SEL_IN,
COLOR1_SEL_IN_OVL1, COLOR1_SEL_IN_OVL1
}, {
DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
MT8173_DISP_REG_CONFIG_DPI_SEL_IN,
MT8173_DPI0_SEL_IN_MASK, MT8173_DPI0_SEL_IN_RDMA1
}
MMSYS_ROUTE(OVL0, COLOR0,
MT8173_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, MT8173_OVL0_MOUT_EN_COLOR0,
MT8173_OVL0_MOUT_EN_COLOR0),
MMSYS_ROUTE(OD0, RDMA0,
MT8173_DISP_REG_CONFIG_DISP_OD_MOUT_EN, MT8173_OD0_MOUT_EN_RDMA0,
MT8173_OD0_MOUT_EN_RDMA0),
MMSYS_ROUTE(UFOE, DSI0,
MT8173_DISP_REG_CONFIG_DISP_UFOE_MOUT_EN, MT8173_UFOE_MOUT_EN_DSI0,
MT8173_UFOE_MOUT_EN_DSI0),
MMSYS_ROUTE(COLOR0, AAL0,
MT8173_DISP_REG_CONFIG_DISP_COLOR0_SOUT_SEL_IN, MT8173_COLOR0_SOUT_MERGE,
0 /* SOUT to AAL */),
MMSYS_ROUTE(RDMA0, UFOE,
MT8173_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN, MT8173_RDMA0_SOUT_COLOR0,
0 /* SOUT to UFOE */),
MMSYS_ROUTE(OVL0, COLOR0,
MT8173_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, MT8173_COLOR0_SEL_IN_OVL0,
MT8173_COLOR0_SEL_IN_OVL0),
MMSYS_ROUTE(AAL0, COLOR0,
MT8173_DISP_REG_CONFIG_DISP_AAL_SEL_IN, MT8173_AAL_SEL_IN_MERGE,
0 /* SEL_IN from COLOR0 */),
MMSYS_ROUTE(RDMA0, UFOE,
MT8173_DISP_REG_CONFIG_DISP_UFOE_SEL_IN, MT8173_UFOE_SEL_IN_RDMA0,
0 /* SEL_IN from RDMA0 */),
MMSYS_ROUTE(UFOE, DSI0,
MT8173_DISP_REG_CONFIG_DSI0_SEL_IN, MT8173_DSI0_SEL_IN_UFOE,
0 /* SEL_IN from UFOE */),
MMSYS_ROUTE(OVL1, COLOR1,
MT8173_DISP_REG_CONFIG_DISP_OVL1_MOUT_EN, MT8173_OVL1_MOUT_EN_COLOR1,
MT8173_OVL1_MOUT_EN_COLOR1),
MMSYS_ROUTE(GAMMA, RDMA1,
MT8173_DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN, MT8173_GAMMA_MOUT_EN_RDMA1,
MT8173_GAMMA_MOUT_EN_RDMA1),
MMSYS_ROUTE(RDMA1, DPI0,
MT8173_DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK,
RDMA1_SOUT_DPI0),
MMSYS_ROUTE(OVL1, COLOR1,
MT8173_DISP_REG_CONFIG_DISP_COLOR1_SEL_IN, COLOR1_SEL_IN_OVL1,
COLOR1_SEL_IN_OVL1),
MMSYS_ROUTE(RDMA1, DPI0,
MT8173_DISP_REG_CONFIG_DPI_SEL_IN, MT8173_DPI0_SEL_IN_MASK,
MT8173_DPI0_SEL_IN_RDMA1),
};
#endif /* __SOC_MEDIATEK_MT8173_MMSYS_H */

View File

@ -28,35 +28,27 @@
#define MT8183_MMSYS_SW0_RST_B 0x140
static const struct mtk_mmsys_routes mmsys_mt8183_routing_table[] = {
{
DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL_2L0,
MT8183_DISP_OVL0_MOUT_EN, MT8183_OVL0_MOUT_EN_OVL0_2L,
MT8183_OVL0_MOUT_EN_OVL0_2L
}, {
DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
MT8183_DISP_OVL0_2L_MOUT_EN, MT8183_OVL0_2L_MOUT_EN_DISP_PATH0,
MT8183_OVL0_2L_MOUT_EN_DISP_PATH0
}, {
DDP_COMPONENT_OVL_2L1, DDP_COMPONENT_RDMA1,
MT8183_DISP_OVL1_2L_MOUT_EN, MT8183_OVL1_2L_MOUT_EN_RDMA1,
MT8183_OVL1_2L_MOUT_EN_RDMA1
}, {
DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
MT8183_DISP_DITHER0_MOUT_EN, MT8183_DITHER0_MOUT_IN_DSI0,
MT8183_DITHER0_MOUT_IN_DSI0
}, {
DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
MT8183_DISP_PATH0_SEL_IN, MT8183_DISP_PATH0_SEL_IN_OVL0_2L,
MT8183_DISP_PATH0_SEL_IN_OVL0_2L
}, {
DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
MT8183_DISP_DPI0_SEL_IN, MT8183_DPI0_SEL_IN_RDMA1,
MT8183_DPI0_SEL_IN_RDMA1
}, {
DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
MT8183_DISP_RDMA0_SOUT_SEL_IN, MT8183_RDMA0_SOUT_COLOR0,
MT8183_RDMA0_SOUT_COLOR0
}
MMSYS_ROUTE(OVL0, OVL_2L0,
MT8183_DISP_OVL0_MOUT_EN, MT8183_OVL0_MOUT_EN_OVL0_2L,
MT8183_OVL0_MOUT_EN_OVL0_2L),
MMSYS_ROUTE(OVL_2L0, RDMA0,
MT8183_DISP_OVL0_2L_MOUT_EN, MT8183_OVL0_2L_MOUT_EN_DISP_PATH0,
MT8183_OVL0_2L_MOUT_EN_DISP_PATH0),
MMSYS_ROUTE(OVL_2L1, RDMA1,
MT8183_DISP_OVL1_2L_MOUT_EN, MT8183_OVL1_2L_MOUT_EN_RDMA1,
MT8183_OVL1_2L_MOUT_EN_RDMA1),
MMSYS_ROUTE(DITHER0, DSI0,
MT8183_DISP_DITHER0_MOUT_EN, MT8183_DITHER0_MOUT_IN_DSI0,
MT8183_DITHER0_MOUT_IN_DSI0),
MMSYS_ROUTE(OVL_2L0, RDMA0,
MT8183_DISP_PATH0_SEL_IN, MT8183_DISP_PATH0_SEL_IN_OVL0_2L,
MT8183_DISP_PATH0_SEL_IN_OVL0_2L),
MMSYS_ROUTE(RDMA1, DPI0,
MT8183_DISP_DPI0_SEL_IN, MT8183_DPI0_SEL_IN_RDMA1,
MT8183_DPI0_SEL_IN_RDMA1),
MMSYS_ROUTE(RDMA0, COLOR0,
MT8183_DISP_RDMA0_SOUT_SEL_IN, MT8183_RDMA0_SOUT_COLOR0,
MT8183_RDMA0_SOUT_COLOR0),
};
#endif /* __SOC_MEDIATEK_MT8183_MMSYS_H */

View File

@ -63,61 +63,39 @@
#define MT8186_MMSYS_SW0_RST_B 0x160
static const struct mtk_mmsys_routes mmsys_mt8186_routing_table[] = {
{
DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
MT8186_DISP_OVL0_MOUT_EN, MT8186_OVL0_MOUT_EN_MASK,
MT8186_OVL0_MOUT_TO_RDMA0
},
{
DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
MT8186_DISP_RDMA0_SEL_IN, MT8186_RDMA0_SEL_IN_MASK,
MT8186_RDMA0_FROM_OVL0
},
{
DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_CON_MASK,
MT8186_OVL0_GO_BLEND
},
{
DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
MT8186_DISP_RDMA0_SOUT_SEL, MT8186_RDMA0_SOUT_SEL_MASK,
MT8186_RDMA0_SOUT_TO_COLOR0
},
{
DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
MT8186_DISP_DITHER0_MOUT_EN, MT8186_DITHER0_MOUT_EN_MASK,
MT8186_DITHER0_MOUT_TO_DSI0,
},
{
DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
MT8186_DISP_DSI0_SEL_IN, MT8186_DSI0_SEL_IN_MASK,
MT8186_DSI0_FROM_DITHER0
},
{
DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA1,
MT8186_DISP_OVL0_2L_MOUT_EN, MT8186_OVL0_2L_MOUT_EN_MASK,
MT8186_OVL0_2L_MOUT_TO_RDMA1
},
{
DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA1,
MT8186_DISP_RDMA1_SEL_IN, MT8186_RDMA1_SEL_IN_MASK,
MT8186_RDMA1_FROM_OVL0_2L
},
{
DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA1,
MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_2L_CON_MASK,
MT8186_OVL0_2L_GO_BLEND
},
{
DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
MT8186_DISP_RDMA1_MOUT_EN, MT8186_RDMA1_MOUT_EN_MASK,
MT8186_RDMA1_MOUT_TO_DPI0_SEL
},
{
DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
MT8186_DISP_DPI0_SEL_IN, MT8186_DPI0_SEL_IN_MASK,
MT8186_DPI0_FROM_RDMA1
},
MMSYS_ROUTE(OVL0, RDMA0,
MT8186_DISP_OVL0_MOUT_EN, MT8186_OVL0_MOUT_EN_MASK,
MT8186_OVL0_MOUT_TO_RDMA0),
MMSYS_ROUTE(OVL0, RDMA0,
MT8186_DISP_RDMA0_SEL_IN, MT8186_RDMA0_SEL_IN_MASK,
MT8186_RDMA0_FROM_OVL0),
MMSYS_ROUTE(OVL0, RDMA0,
MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_CON_MASK,
MT8186_OVL0_GO_BLEND),
MMSYS_ROUTE(RDMA0, COLOR0,
MT8186_DISP_RDMA0_SOUT_SEL, MT8186_RDMA0_SOUT_SEL_MASK,
MT8186_RDMA0_SOUT_TO_COLOR0),
MMSYS_ROUTE(DITHER0, DSI0,
MT8186_DISP_DITHER0_MOUT_EN, MT8186_DITHER0_MOUT_EN_MASK,
MT8186_DITHER0_MOUT_TO_DSI0),
MMSYS_ROUTE(DITHER0, DSI0,
MT8186_DISP_DSI0_SEL_IN, MT8186_DSI0_SEL_IN_MASK,
MT8186_DSI0_FROM_DITHER0),
MMSYS_ROUTE(OVL_2L0, RDMA1,
MT8186_DISP_OVL0_2L_MOUT_EN, MT8186_OVL0_2L_MOUT_EN_MASK,
MT8186_OVL0_2L_MOUT_TO_RDMA1),
MMSYS_ROUTE(OVL_2L0, RDMA1,
MT8186_DISP_RDMA1_SEL_IN, MT8186_RDMA1_SEL_IN_MASK,
MT8186_RDMA1_FROM_OVL0_2L),
MMSYS_ROUTE(OVL_2L0, RDMA1,
MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_2L_CON_MASK,
MT8186_OVL0_2L_GO_BLEND),
MMSYS_ROUTE(RDMA1, DPI0,
MT8186_DISP_RDMA1_MOUT_EN, MT8186_RDMA1_MOUT_EN_MASK,
MT8186_RDMA1_MOUT_TO_DPI0_SEL),
MMSYS_ROUTE(RDMA1, DPI0,
MT8186_DISP_DPI0_SEL_IN, MT8186_DPI0_SEL_IN_MASK,
MT8186_DPI0_FROM_RDMA1),
};
#endif /* __SOC_MEDIATEK_MT8186_MMSYS_H */

View File

@ -202,158 +202,126 @@ static const u8 mmsys_mt8188_vdo1_rst_tb[] = {
};
static const struct mtk_mmsys_routes mmsys_mt8188_routing_table[] = {
{
DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0,
MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0
}, {
DDP_COMPONENT_OVL0, DDP_COMPONENT_WDMA0,
MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0,
MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0
}, {
DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
MT8188_VDO0_DISP_RDMA_SEL, MT8188_SEL_IN_DISP_RDMA0_FROM_MASK,
MT8188_SEL_IN_DISP_RDMA0_FROM_DISP_OVL0
}, {
DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK,
MT8188_SEL_IN_DSI0_FROM_DISP_DITHER0
}, {
DDP_COMPONENT_DITHER0, DDP_COMPONENT_MERGE0,
MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK,
MT8188_SEL_IN_VPP_MERGE_FROM_DITHER0_OUT
}, {
DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSC0,
MT8188_VDO0_DSC_WARP_SEL,
MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_MASK,
MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_DISP_DITHER0
}, {
DDP_COMPONENT_DITHER0, DDP_COMPONENT_DP_INTF0,
MT8188_VDO0_DP_INTF0_SEL_IN, MT8188_SEL_IN_DP_INTF0_FROM_MASK,
MT8188_SEL_IN_DP_INTF0_FROM_DISP_DITHER0
}, {
DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK,
MT8188_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT
}, {
DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK,
MT8188_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT
}, {
DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
MT8188_VDO0_DISP_RDMA_SEL, MT8188_SOUT_DISP_RDMA0_TO_MASK,
MT8188_SOUT_DISP_RDMA0_TO_DISP_COLOR0
}, {
DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
MT8188_VDO0_DISP_DITHER0_SEL_OUT,
MT8188_SOUT_DISP_DITHER0_TO_MASK,
MT8188_SOUT_DISP_DITHER0_TO_DSI0
}, {
DDP_COMPONENT_DITHER0, DDP_COMPONENT_DP_INTF0,
MT8188_VDO0_DISP_DITHER0_SEL_OUT,
MT8188_SOUT_DISP_DITHER0_TO_MASK,
MT8188_SOUT_DISP_DITHER0_TO_DP_INTF0
}, {
DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF0,
MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
MT8188_SOUT_VPP_MERGE_TO_DP_INTF0
}, {
DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI0,
MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
MT8188_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
}, {
DDP_COMPONENT_MERGE0, DDP_COMPONENT_WDMA0,
MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
MT8188_SOUT_VPP_MERGE_TO_DISP_WDMA0
}, {
DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC0,
MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
MT8188_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN
}, {
DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK,
MT8188_SOUT_DSC_WRAP0_OUT_TO_DSI0
}, {
DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK,
MT8188_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE
},
MMSYS_ROUTE(OVL0, RDMA0,
MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0,
MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0),
MMSYS_ROUTE(OVL0, WDMA0,
MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0,
MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0),
MMSYS_ROUTE(OVL0, RDMA0,
MT8188_VDO0_DISP_RDMA_SEL, MT8188_SEL_IN_DISP_RDMA0_FROM_MASK,
MT8188_SEL_IN_DISP_RDMA0_FROM_DISP_OVL0),
MMSYS_ROUTE(DITHER0, DSI0,
MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK,
MT8188_SEL_IN_DSI0_FROM_DISP_DITHER0),
MMSYS_ROUTE(DITHER0, MERGE0,
MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK,
MT8188_SEL_IN_DP_INTF0_FROM_DISP_DITHER0),
MMSYS_ROUTE(DITHER0, DSC0,
MT8188_VDO0_DSC_WARP_SEL, MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_MASK,
MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_DISP_DITHER0),
MMSYS_ROUTE(DITHER0, DP_INTF0,
MT8188_VDO0_DP_INTF0_SEL_IN, MT8188_SEL_IN_DP_INTF0_FROM_MASK,
MT8188_SEL_IN_DP_INTF0_FROM_DISP_DITHER0),
MMSYS_ROUTE(DSC0, MERGE0,
MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK,
MT8188_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT),
MMSYS_ROUTE(MERGE0, DP_INTF0,
MT8188_VDO0_DP_INTF0_SEL_IN, MT8188_SEL_IN_DP_INTF0_FROM_MASK,
MT8188_SEL_IN_DP_INTF0_FROM_VPP_MERGE),
MMSYS_ROUTE(DSC0, DSI0,
MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK,
MT8188_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT),
MMSYS_ROUTE(RDMA0, COLOR0,
MT8188_VDO0_DISP_RDMA_SEL, GENMASK(1, 0),
MT8188_SOUT_DISP_RDMA0_TO_DISP_COLOR0),
MMSYS_ROUTE(DITHER0, DSC0,
MT8188_VDO0_DISP_DITHER0_SEL_OUT, MT8188_SOUT_DISP_DITHER0_TO_MASK,
MT8188_SOUT_DISP_DITHER0_TO_DSC_WRAP0_IN),
MMSYS_ROUTE(DITHER0, DSI0,
MT8188_VDO0_DISP_DITHER0_SEL_OUT, MT8188_SOUT_DISP_DITHER0_TO_MASK,
MT8188_SOUT_DISP_DITHER0_TO_DSI0),
MMSYS_ROUTE(DITHER0, MERGE0,
MT8188_VDO0_DISP_DITHER0_SEL_OUT, MT8188_SOUT_DISP_DITHER0_TO_MASK,
MT8188_SOUT_DISP_DITHER0_TO_VPP_MERGE0),
MMSYS_ROUTE(DITHER0, DP_INTF0,
MT8188_VDO0_DISP_DITHER0_SEL_OUT, MT8188_SOUT_DISP_DITHER0_TO_MASK,
MT8188_SOUT_DISP_DITHER0_TO_DP_INTF0),
MMSYS_ROUTE(MERGE0, DP_INTF0,
MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
MT8188_SOUT_VPP_MERGE_TO_DP_INTF0),
MMSYS_ROUTE(MERGE0, DPI0,
MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
MT8188_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0),
MMSYS_ROUTE(MERGE0, WDMA0,
MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
MT8188_SOUT_VPP_MERGE_TO_DISP_WDMA0),
MMSYS_ROUTE(MERGE0, DSC0,
MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
MT8188_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN),
MMSYS_ROUTE(DSC0, DSI0,
MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK,
MT8188_SOUT_DSC_WRAP0_OUT_TO_DSI0),
MMSYS_ROUTE(DSC0, MERGE0,
MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK,
MT8188_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE),
};
static const struct mtk_mmsys_routes mmsys_mt8188_vdo1_routing_table[] = {
{
DDP_COMPONENT_MDP_RDMA0, DDP_COMPONENT_MERGE1,
MT8188_VDO1_VPP_MERGE0_P0_SEL_IN, GENMASK(0, 0),
MT8188_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0
}, {
DDP_COMPONENT_MDP_RDMA1, DDP_COMPONENT_MERGE1,
MT8188_VDO1_VPP_MERGE0_P1_SEL_IN, GENMASK(0, 0),
MT8188_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1
}, {
DDP_COMPONENT_MDP_RDMA2, DDP_COMPONENT_MERGE2,
MT8188_VDO1_VPP_MERGE1_P0_SEL_IN, GENMASK(0, 0),
MT8188_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2
}, {
DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER,
MT8188_VDO1_MERGE0_ASYNC_SOUT_SEL, GENMASK(1, 0),
MT8188_SOUT_TO_MIXER_IN1_SEL
}, {
DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER,
MT8188_VDO1_MERGE1_ASYNC_SOUT_SEL, GENMASK(1, 0),
MT8188_SOUT_TO_MIXER_IN2_SEL
}, {
DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER,
MT8188_VDO1_MERGE2_ASYNC_SOUT_SEL, GENMASK(1, 0),
MT8188_SOUT_TO_MIXER_IN3_SEL
}, {
DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER,
MT8188_VDO1_MERGE3_ASYNC_SOUT_SEL, GENMASK(1, 0),
MT8188_SOUT_TO_MIXER_IN4_SEL
}, {
DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
MT8188_VDO1_MIXER_OUT_SOUT_SEL, GENMASK(0, 0),
MT8188_MIXER_SOUT_TO_MERGE4_ASYNC_SEL
}, {
DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER,
MT8188_VDO1_MIXER_IN1_SEL_IN, GENMASK(0, 0),
MT8188_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT
}, {
DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER,
MT8188_VDO1_MIXER_IN2_SEL_IN, GENMASK(0, 0),
MT8188_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT
}, {
DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER,
MT8188_VDO1_MIXER_IN3_SEL_IN, GENMASK(0, 0),
MT8188_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT
}, {
DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER,
MT8188_VDO1_MIXER_IN4_SEL_IN, GENMASK(0, 0),
MT8188_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT
}, {
DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
MT8188_VDO1_MIXER_SOUT_SEL_IN, GENMASK(2, 0),
MT8188_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER
}, {
DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
MT8188_VDO1_MERGE4_ASYNC_SEL_IN, GENMASK(2, 0),
MT8188_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT
}, {
DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1,
MT8188_VDO1_DISP_DPI1_SEL_IN, GENMASK(1, 0),
MT8188_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT
}, {
DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1,
MT8188_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
MT8188_MERGE4_SOUT_TO_DPI1_SEL
}, {
DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1,
MT8188_VDO1_DISP_DP_INTF0_SEL_IN, GENMASK(1, 0),
MT8188_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT
}, {
DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1,
MT8188_VDO1_MERGE4_SOUT_SEL, GENMASK(3, 0),
MT8188_MERGE4_SOUT_TO_DP_INTF0_SEL
}
MMSYS_ROUTE(MDP_RDMA0, MERGE1,
MT8188_VDO1_VPP_MERGE0_P0_SEL_IN, GENMASK(0, 0),
MT8188_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0),
MMSYS_ROUTE(MDP_RDMA1, MERGE1,
MT8188_VDO1_VPP_MERGE0_P1_SEL_IN, GENMASK(0, 0),
MT8188_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1),
MMSYS_ROUTE(MDP_RDMA2, MERGE2,
MT8188_VDO1_VPP_MERGE1_P0_SEL_IN, GENMASK(0, 0),
MT8188_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2),
MMSYS_ROUTE(MERGE1, ETHDR_MIXER,
MT8188_VDO1_MERGE0_ASYNC_SOUT_SEL, GENMASK(1, 0),
MT8188_SOUT_TO_MIXER_IN1_SEL),
MMSYS_ROUTE(MERGE2, ETHDR_MIXER,
MT8188_VDO1_MERGE1_ASYNC_SOUT_SEL, GENMASK(1, 0),
MT8188_SOUT_TO_MIXER_IN2_SEL),
MMSYS_ROUTE(MERGE3, ETHDR_MIXER,
MT8188_VDO1_MERGE2_ASYNC_SOUT_SEL, GENMASK(1, 0),
MT8188_SOUT_TO_MIXER_IN3_SEL),
MMSYS_ROUTE(MERGE4, ETHDR_MIXER,
MT8188_VDO1_MERGE3_ASYNC_SOUT_SEL, GENMASK(1, 0),
MT8188_SOUT_TO_MIXER_IN4_SEL),
MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
MT8188_VDO1_MIXER_OUT_SOUT_SEL, GENMASK(0, 0),
MT8188_MIXER_SOUT_TO_MERGE4_ASYNC_SEL),
MMSYS_ROUTE(MERGE1, ETHDR_MIXER,
MT8188_VDO1_MIXER_IN1_SEL_IN, GENMASK(0, 0),
MT8188_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT),
MMSYS_ROUTE(MERGE2, ETHDR_MIXER,
MT8188_VDO1_MIXER_IN2_SEL_IN, GENMASK(0, 0),
MT8188_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT),
MMSYS_ROUTE(MERGE3, ETHDR_MIXER,
MT8188_VDO1_MIXER_IN3_SEL_IN, GENMASK(0, 0),
MT8188_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT),
MMSYS_ROUTE(MERGE4, ETHDR_MIXER,
MT8188_VDO1_MIXER_IN4_SEL_IN, GENMASK(0, 0),
MT8188_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT),
MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
MT8188_VDO1_MIXER_SOUT_SEL_IN, GENMASK(2, 0),
MT8188_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER),
MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
MT8188_VDO1_MERGE4_ASYNC_SEL_IN, GENMASK(2, 0),
MT8188_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT),
MMSYS_ROUTE(MERGE5, DPI1,
MT8188_VDO1_DISP_DPI1_SEL_IN, GENMASK(1, 0),
MT8188_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT),
MMSYS_ROUTE(MERGE5, DPI1,
MT8188_VDO1_MERGE4_SOUT_SEL, GENMASK(3, 0),
MT8188_MERGE4_SOUT_TO_DPI1_SEL),
MMSYS_ROUTE(MERGE5, DP_INTF1,
MT8188_VDO1_DISP_DP_INTF0_SEL_IN, GENMASK(1, 0),
MT8188_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT),
MMSYS_ROUTE(MERGE5, DP_INTF1,
MT8188_VDO1_MERGE4_SOUT_SEL, GENMASK(3, 0),
MT8188_MERGE4_SOUT_TO_DP_INTF0_SEL),
};
#endif /* __SOC_MEDIATEK_MT8188_MMSYS_H */

View File

@ -31,47 +31,36 @@
#define MT8192_DSI0_SEL_IN_DITHER0 0x1
static const struct mtk_mmsys_routes mmsys_mt8192_routing_table[] = {
{
DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
MT8192_DISP_OVL0_2L_MOUT_EN, MT8192_OVL0_MOUT_EN_DISP_RDMA0,
MT8192_OVL0_MOUT_EN_DISP_RDMA0
}, {
DDP_COMPONENT_OVL_2L2, DDP_COMPONENT_RDMA4,
MT8192_DISP_OVL2_2L_MOUT_EN, MT8192_OVL2_2L_MOUT_EN_RDMA4,
MT8192_OVL2_2L_MOUT_EN_RDMA4
}, {
DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
MT8192_DISP_DITHER0_MOUT_EN, MT8192_DITHER0_MOUT_IN_DSI0,
MT8192_DITHER0_MOUT_IN_DSI0
}, {
DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
MT8192_DISP_RDMA0_SEL_IN, MT8192_RDMA0_SEL_IN_OVL0_2L,
MT8192_RDMA0_SEL_IN_OVL0_2L
}, {
DDP_COMPONENT_CCORR, DDP_COMPONENT_AAL0,
MT8192_DISP_AAL0_SEL_IN, MT8192_AAL0_SEL_IN_CCORR0,
MT8192_AAL0_SEL_IN_CCORR0
}, {
DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
MT8192_DISP_DSI0_SEL_IN, MT8192_DSI0_SEL_IN_DITHER0,
MT8192_DSI0_SEL_IN_DITHER0
}, {
DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
MT8192_DISP_RDMA0_SOUT_SEL, MT8192_RDMA0_SOUT_COLOR0,
MT8192_RDMA0_SOUT_COLOR0
}, {
DDP_COMPONENT_CCORR, DDP_COMPONENT_AAL0,
MT8192_DISP_CCORR0_SOUT_SEL, MT8192_CCORR0_SOUT_AAL0,
MT8192_CCORR0_SOUT_AAL0
}, {
DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL_2L0,
MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_GO_BG,
MT8192_DISP_OVL0_GO_BG
}, {
DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_2L_GO_BLEND,
MT8192_DISP_OVL0_2L_GO_BLEND
}
MMSYS_ROUTE(OVL_2L0, RDMA0,
MT8192_DISP_OVL0_2L_MOUT_EN, MT8192_OVL0_MOUT_EN_DISP_RDMA0,
MT8192_OVL0_MOUT_EN_DISP_RDMA0),
MMSYS_ROUTE(OVL_2L2, RDMA4,
MT8192_DISP_OVL2_2L_MOUT_EN, MT8192_OVL2_2L_MOUT_EN_RDMA4,
MT8192_OVL2_2L_MOUT_EN_RDMA4),
MMSYS_ROUTE(DITHER0, DSI0,
MT8192_DISP_DITHER0_MOUT_EN, MT8192_DITHER0_MOUT_IN_DSI0,
MT8192_DITHER0_MOUT_IN_DSI0),
MMSYS_ROUTE(OVL_2L0, RDMA0,
MT8192_DISP_RDMA0_SEL_IN, MT8192_RDMA0_SEL_IN_OVL0_2L,
MT8192_RDMA0_SEL_IN_OVL0_2L),
MMSYS_ROUTE(CCORR, AAL0,
MT8192_DISP_AAL0_SEL_IN, MT8192_AAL0_SEL_IN_CCORR0,
MT8192_AAL0_SEL_IN_CCORR0),
MMSYS_ROUTE(DITHER0, DSI0,
MT8192_DISP_DSI0_SEL_IN, MT8192_DSI0_SEL_IN_DITHER0,
MT8192_DSI0_SEL_IN_DITHER0),
MMSYS_ROUTE(RDMA0, COLOR0,
MT8192_DISP_RDMA0_SOUT_SEL, MT8192_RDMA0_SOUT_COLOR0,
MT8192_RDMA0_SOUT_COLOR0),
MMSYS_ROUTE(CCORR, AAL0,
MT8192_DISP_CCORR0_SOUT_SEL, MT8192_CCORR0_SOUT_AAL0,
MT8192_CCORR0_SOUT_AAL0),
MMSYS_ROUTE(OVL0, OVL_2L0,
MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_GO_BG,
MT8192_DISP_OVL0_GO_BG),
MMSYS_ROUTE(OVL_2L0, RDMA0,
MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_2L_GO_BLEND,
MT8192_DISP_OVL0_2L_GO_BLEND),
};
#endif /* __SOC_MEDIATEK_MT8192_MMSYS_H */

View File

@ -160,370 +160,278 @@
#define MT8195_SVPP3_MDP_RSZ BIT(5)
static const struct mtk_mmsys_routes mmsys_mt8195_routing_table[] = {
{
DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0,
MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0
}, {
DDP_COMPONENT_OVL0, DDP_COMPONENT_WDMA0,
MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0,
MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0
}, {
DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL1,
MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1,
MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1
}, {
DDP_COMPONENT_OVL1, DDP_COMPONENT_RDMA1,
MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1,
MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1
}, {
DDP_COMPONENT_OVL1, DDP_COMPONENT_WDMA1,
MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1,
MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1
}, {
DDP_COMPONENT_OVL1, DDP_COMPONENT_OVL0,
MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0,
MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0
}, {
DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
MT8195_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT
}, {
DDP_COMPONENT_DITHER1, DDP_COMPONENT_MERGE0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
MT8195_SEL_IN_VPP_MERGE_FROM_DISP_DITHER1
}, {
DDP_COMPONENT_MERGE5, DDP_COMPONENT_MERGE0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
MT8195_SEL_IN_VPP_MERGE_FROM_VDO1_VIRTUAL0
}, {
DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSC0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP0_IN_FROM_DISP_DITHER0
}, {
DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP0_IN_FROM_VPP_MERGE
}, {
DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSC1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP1_IN_FROM_DISP_DITHER1
}, {
DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP1_IN_FROM_VPP_MERGE
}, {
DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE
}, {
DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE
}, {
DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE
}, {
DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT
}, {
DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT
}, {
DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT
}, {
DDP_COMPONENT_DSC0, DDP_COMPONENT_DP_INTF1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT
}, {
DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT
}, {
DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT
}, {
DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
MT8195_SEL_IN_DP_INTF0_FROM_DSC_WRAP1_OUT
}, {
DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
MT8195_SEL_IN_DP_INTF0_FROM_VPP_MERGE
}, {
DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
MT8195_SEL_IN_DP_INTF0_FROM_VDO1_VIRTUAL0
}, {
DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK,
MT8195_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT
}, {
DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK,
MT8195_SEL_IN_DSI0_FROM_DISP_DITHER0
}, {
DDP_COMPONENT_DSC1, DDP_COMPONENT_DSI1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK,
MT8195_SEL_IN_DSI1_FROM_DSC_WRAP1_OUT
}, {
DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSI1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK,
MT8195_SEL_IN_DSI1_FROM_VPP_MERGE
}, {
DDP_COMPONENT_OVL1, DDP_COMPONENT_WDMA1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK,
MT8195_SEL_IN_DISP_WDMA1_FROM_DISP_OVL1
}, {
DDP_COMPONENT_MERGE0, DDP_COMPONENT_WDMA1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK,
MT8195_SEL_IN_DISP_WDMA1_FROM_VPP_MERGE
}, {
DDP_COMPONENT_DSC1, DDP_COMPONENT_DSI1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
}, {
DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
}, {
DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
}, {
DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
}, {
DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
}, {
DDP_COMPONENT_DSC1, DDP_COMPONENT_MERGE0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
}, {
DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSI1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
}, {
DDP_COMPONENT_DITHER1, DDP_COMPONENT_DP_INTF0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
}, {
DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
}, {
DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
}, {
DDP_COMPONENT_OVL0, DDP_COMPONENT_WDMA0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA0_FROM_MASK,
MT8195_SEL_IN_DISP_WDMA0_FROM_DISP_OVL0
}, {
DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSC0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK,
MT8195_SOUT_DISP_DITHER0_TO_DSC_WRAP0_IN
}, {
DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK,
MT8195_SOUT_DISP_DITHER0_TO_DSI0
}, {
DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSC1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_IN
}, {
DDP_COMPONENT_DITHER1, DDP_COMPONENT_MERGE0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
MT8195_SOUT_DISP_DITHER1_TO_VPP_MERGE
}, {
DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSI1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
}, {
DDP_COMPONENT_DITHER1, DDP_COMPONENT_DP_INTF0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
}, {
DDP_COMPONENT_DITHER1, DDP_COMPONENT_DP_INTF1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
}, {
DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
}, {
DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
}, {
DDP_COMPONENT_MERGE5, DDP_COMPONENT_MERGE0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK,
MT8195_SOUT_VDO1_VIRTUAL0_TO_VPP_MERGE
}, {
DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK,
MT8195_SOUT_VDO1_VIRTUAL0_TO_DP_INTF0
}, {
DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSI1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
MT8195_SOUT_VPP_MERGE_TO_DSI1
}, {
DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
MT8195_SOUT_VPP_MERGE_TO_DP_INTF0
}, {
DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
}, {
DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
}, {
DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
}, {
DDP_COMPONENT_MERGE0, DDP_COMPONENT_WDMA1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
MT8195_SOUT_VPP_MERGE_TO_DISP_WDMA1
}, {
DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN
}, {
DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN_MASK,
MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN
}, {
DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
MT8195_SOUT_DSC_WRAP0_OUT_TO_DSI0
}, {
DDP_COMPONENT_DSC0, DDP_COMPONENT_DP_INTF1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0
}, {
DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0
}, {
DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0
}, {
DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
MT8195_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE
}, {
DDP_COMPONENT_DSC1, DDP_COMPONENT_DSI1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
MT8195_SOUT_DSC_WRAP1_OUT_TO_DSI1
}, {
DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
MT8195_SOUT_DSC_WRAP1_OUT_TO_DP_INTF0
}, {
DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0
}, {
DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0
}, {
DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0
}, {
DDP_COMPONENT_DSC1, DDP_COMPONENT_MERGE0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
MT8195_SOUT_DSC_WRAP1_OUT_TO_VPP_MERGE
}
MMSYS_ROUTE(OVL0, RDMA0,
MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0,
MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0),
MMSYS_ROUTE(OVL0, WDMA0,
MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0,
MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0),
MMSYS_ROUTE(OVL0, OVL1,
MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1,
MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1),
MMSYS_ROUTE(OVL1, RDMA1,
MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1,
MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1),
MMSYS_ROUTE(OVL1, WDMA1,
MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1,
MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1),
MMSYS_ROUTE(OVL1, OVL0,
MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0,
MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0),
MMSYS_ROUTE(DSC0, MERGE0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
MT8195_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT),
MMSYS_ROUTE(DITHER1, MERGE0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
MT8195_SEL_IN_VPP_MERGE_FROM_DISP_DITHER1),
MMSYS_ROUTE(MERGE5, MERGE0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
MT8195_SEL_IN_VPP_MERGE_FROM_VDO1_VIRTUAL0),
MMSYS_ROUTE(DITHER0, DSC0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP0_IN_FROM_DISP_DITHER0),
MMSYS_ROUTE(MERGE0, DSC0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP0_IN_FROM_VPP_MERGE),
MMSYS_ROUTE(DITHER1, DSC1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP1_IN_FROM_DISP_DITHER1),
MMSYS_ROUTE(MERGE0, DSC1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP1_IN_FROM_VPP_MERGE),
MMSYS_ROUTE(MERGE0, DP_INTF1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE),
MMSYS_ROUTE(MERGE0, DPI0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE),
MMSYS_ROUTE(MERGE0, DPI1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE),
MMSYS_ROUTE(DSC1, DP_INTF1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT),
MMSYS_ROUTE(DSC1, DPI0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT),
MMSYS_ROUTE(DSC1, DPI1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT),
MMSYS_ROUTE(DSC0, DP_INTF1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT),
MMSYS_ROUTE(DSC0, DPI0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT),
MMSYS_ROUTE(DSC0, DPI1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT),
MMSYS_ROUTE(DSC1, DP_INTF0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
MT8195_SEL_IN_DP_INTF0_FROM_DSC_WRAP1_OUT),
MMSYS_ROUTE(MERGE0, DP_INTF0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
MT8195_SEL_IN_DP_INTF0_FROM_VPP_MERGE),
MMSYS_ROUTE(MERGE5, DP_INTF0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
MT8195_SEL_IN_DP_INTF0_FROM_VDO1_VIRTUAL0),
MMSYS_ROUTE(DSC0, DSI0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK,
MT8195_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT),
MMSYS_ROUTE(DITHER0, DSI0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK,
MT8195_SEL_IN_DSI0_FROM_DISP_DITHER0),
MMSYS_ROUTE(DSC1, DSI1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK,
MT8195_SEL_IN_DSI1_FROM_DSC_WRAP1_OUT),
MMSYS_ROUTE(MERGE0, DSI1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK,
MT8195_SEL_IN_DSI1_FROM_VPP_MERGE),
MMSYS_ROUTE(OVL1, WDMA1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK,
MT8195_SEL_IN_DISP_WDMA1_FROM_DISP_OVL1),
MMSYS_ROUTE(MERGE0, WDMA1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK,
MT8195_SEL_IN_DISP_WDMA1_FROM_VPP_MERGE),
MMSYS_ROUTE(DSC1, DSI1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
MMSYS_ROUTE(DSC1, DP_INTF0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
MMSYS_ROUTE(DSC1, DP_INTF1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
MMSYS_ROUTE(DSC1, DPI0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
MMSYS_ROUTE(DSC1, DPI1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
MMSYS_ROUTE(DSC1, MERGE0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
MMSYS_ROUTE(DITHER1, DSI1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1),
MMSYS_ROUTE(DITHER1, DP_INTF0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1),
MMSYS_ROUTE(DITHER1, DPI0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1),
MMSYS_ROUTE(DITHER1, DPI1,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1),
MMSYS_ROUTE(OVL0, WDMA0,
MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA0_FROM_MASK,
MT8195_SEL_IN_DISP_WDMA0_FROM_DISP_OVL0),
MMSYS_ROUTE(DITHER0, DSC0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK,
MT8195_SOUT_DISP_DITHER0_TO_DSC_WRAP0_IN),
MMSYS_ROUTE(DITHER0, DSI0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK,
MT8195_SOUT_DISP_DITHER0_TO_DSI0),
MMSYS_ROUTE(DITHER1, DSC1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_IN),
MMSYS_ROUTE(DITHER1, MERGE0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
MT8195_SOUT_DISP_DITHER1_TO_VPP_MERGE),
MMSYS_ROUTE(DITHER1, DSI1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT),
MMSYS_ROUTE(DITHER1, DP_INTF0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT),
MMSYS_ROUTE(DITHER1, DP_INTF1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT),
MMSYS_ROUTE(DITHER1, DPI0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT),
MMSYS_ROUTE(DITHER1, DPI1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT),
MMSYS_ROUTE(MERGE5, MERGE0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK,
MT8195_SOUT_VDO1_VIRTUAL0_TO_VPP_MERGE),
MMSYS_ROUTE(MERGE5, DP_INTF0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK,
MT8195_SOUT_VDO1_VIRTUAL0_TO_DP_INTF0),
MMSYS_ROUTE(MERGE0, DSI1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
MT8195_SOUT_VPP_MERGE_TO_DSI1),
MMSYS_ROUTE(MERGE0, DP_INTF0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
MT8195_SOUT_VPP_MERGE_TO_DP_INTF0),
MMSYS_ROUTE(MERGE0, DP_INTF1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0),
MMSYS_ROUTE(MERGE0, DPI0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0),
MMSYS_ROUTE(MERGE0, DPI1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0),
MMSYS_ROUTE(MERGE0, WDMA1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
MT8195_SOUT_VPP_MERGE_TO_DISP_WDMA1),
MMSYS_ROUTE(MERGE0, DSC0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN),
MMSYS_ROUTE(MERGE0, DSC1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN_MASK,
MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN),
MMSYS_ROUTE(DSC0, DSI0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
MT8195_SOUT_DSC_WRAP0_OUT_TO_DSI0),
MMSYS_ROUTE(DSC0, DP_INTF1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0),
MMSYS_ROUTE(DSC0, DPI0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0),
MMSYS_ROUTE(DSC0, DPI1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0),
MMSYS_ROUTE(DSC0, MERGE0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
MT8195_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE),
MMSYS_ROUTE(DSC1, DSI1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
MT8195_SOUT_DSC_WRAP1_OUT_TO_DSI1),
MMSYS_ROUTE(DSC1, DP_INTF0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
MT8195_SOUT_DSC_WRAP1_OUT_TO_DP_INTF0),
MMSYS_ROUTE(DSC1, DP_INTF1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0),
MMSYS_ROUTE(DSC1, DPI0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0),
MMSYS_ROUTE(DSC1, DPI1,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0),
MMSYS_ROUTE(DSC1, MERGE0,
MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
MT8195_SOUT_DSC_WRAP1_OUT_TO_VPP_MERGE),
};
static const struct mtk_mmsys_routes mmsys_mt8195_vdo1_routing_table[] = {
{
DDP_COMPONENT_MDP_RDMA0, DDP_COMPONENT_MERGE1,
MT8195_VDO1_VPP_MERGE0_P0_SEL_IN, GENMASK(0, 0),
MT8195_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0
}, {
DDP_COMPONENT_MDP_RDMA1, DDP_COMPONENT_MERGE1,
MT8195_VDO1_VPP_MERGE0_P1_SEL_IN, GENMASK(0, 0),
MT8195_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1
}, {
DDP_COMPONENT_MDP_RDMA2, DDP_COMPONENT_MERGE2,
MT8195_VDO1_VPP_MERGE1_P0_SEL_IN, GENMASK(0, 0),
MT8195_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2
}, {
DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER,
MT8195_VDO1_MERGE0_ASYNC_SOUT_SEL, GENMASK(1, 0),
MT8195_SOUT_TO_MIXER_IN1_SEL
}, {
DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER,
MT8195_VDO1_MERGE1_ASYNC_SOUT_SEL, GENMASK(1, 0),
MT8195_SOUT_TO_MIXER_IN2_SEL
}, {
DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER,
MT8195_VDO1_MERGE2_ASYNC_SOUT_SEL, GENMASK(1, 0),
MT8195_SOUT_TO_MIXER_IN3_SEL
}, {
DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER,
MT8195_VDO1_MERGE3_ASYNC_SOUT_SEL, GENMASK(1, 0),
MT8195_SOUT_TO_MIXER_IN4_SEL
}, {
DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
MT8195_VDO1_MIXER_OUT_SOUT_SEL, GENMASK(0, 0),
MT8195_MIXER_SOUT_TO_MERGE4_ASYNC_SEL
}, {
DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER,
MT8195_VDO1_MIXER_IN1_SEL_IN, GENMASK(0, 0),
MT8195_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT
}, {
DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER,
MT8195_VDO1_MIXER_IN2_SEL_IN, GENMASK(0, 0),
MT8195_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT
}, {
DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER,
MT8195_VDO1_MIXER_IN3_SEL_IN, GENMASK(0, 0),
MT8195_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT
}, {
DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER,
MT8195_VDO1_MIXER_IN4_SEL_IN, GENMASK(0, 0),
MT8195_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT
}, {
DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
MT8195_VDO1_MIXER_SOUT_SEL_IN, GENMASK(2, 0),
MT8195_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER
}, {
DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
MT8195_VDO1_MERGE4_ASYNC_SEL_IN, GENMASK(2, 0),
MT8195_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT
}, {
DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1,
MT8195_VDO1_DISP_DPI1_SEL_IN, GENMASK(1, 0),
MT8195_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT
}, {
DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1,
MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
MT8195_MERGE4_SOUT_TO_DPI1_SEL
}, {
DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1,
MT8195_VDO1_DISP_DP_INTF0_SEL_IN, GENMASK(1, 0),
MT8195_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT
}, {
DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1,
MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
MT8195_MERGE4_SOUT_TO_DP_INTF0_SEL
}
MMSYS_ROUTE(MDP_RDMA0, MERGE1,
MT8195_VDO1_VPP_MERGE0_P0_SEL_IN, GENMASK(0, 0),
MT8195_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0),
MMSYS_ROUTE(MDP_RDMA1, MERGE1,
MT8195_VDO1_VPP_MERGE0_P1_SEL_IN, GENMASK(0, 0),
MT8195_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1),
MMSYS_ROUTE(MDP_RDMA2, MERGE2,
MT8195_VDO1_VPP_MERGE1_P0_SEL_IN, GENMASK(0, 0),
MT8195_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2),
MMSYS_ROUTE(MERGE1, ETHDR_MIXER,
MT8195_VDO1_MERGE0_ASYNC_SOUT_SEL, GENMASK(1, 0),
MT8195_SOUT_TO_MIXER_IN1_SEL),
MMSYS_ROUTE(MERGE2, ETHDR_MIXER,
MT8195_VDO1_MERGE1_ASYNC_SOUT_SEL, GENMASK(1, 0),
MT8195_SOUT_TO_MIXER_IN2_SEL),
MMSYS_ROUTE(MERGE3, ETHDR_MIXER,
MT8195_VDO1_MERGE2_ASYNC_SOUT_SEL, GENMASK(1, 0),
MT8195_SOUT_TO_MIXER_IN3_SEL),
MMSYS_ROUTE(MERGE4, ETHDR_MIXER,
MT8195_VDO1_MERGE3_ASYNC_SOUT_SEL, GENMASK(1, 0),
MT8195_SOUT_TO_MIXER_IN4_SEL),
MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
MT8195_VDO1_MIXER_OUT_SOUT_SEL, GENMASK(0, 0),
MT8195_MIXER_SOUT_TO_MERGE4_ASYNC_SEL),
MMSYS_ROUTE(MERGE1, ETHDR_MIXER,
MT8195_VDO1_MIXER_IN1_SEL_IN, GENMASK(0, 0),
MT8195_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT),
MMSYS_ROUTE(MERGE2, ETHDR_MIXER,
MT8195_VDO1_MIXER_IN2_SEL_IN, GENMASK(0, 0),
MT8195_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT),
MMSYS_ROUTE(MERGE3, ETHDR_MIXER,
MT8195_VDO1_MIXER_IN3_SEL_IN, GENMASK(0, 0),
MT8195_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT),
MMSYS_ROUTE(MERGE4, ETHDR_MIXER,
MT8195_VDO1_MIXER_IN4_SEL_IN, GENMASK(0, 0),
MT8195_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT),
MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
MT8195_VDO1_MIXER_SOUT_SEL_IN, GENMASK(2, 0),
MT8195_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER),
MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
MT8195_VDO1_MERGE4_ASYNC_SEL_IN, GENMASK(2, 0),
MT8195_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT),
MMSYS_ROUTE(MERGE5, DPI1,
MT8195_VDO1_DISP_DPI1_SEL_IN, GENMASK(1, 0),
MT8195_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT),
MMSYS_ROUTE(MERGE5, DPI1,
MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
MT8195_MERGE4_SOUT_TO_DPI1_SEL),
MMSYS_ROUTE(MERGE5, DP_INTF1,
MT8195_VDO1_DISP_DP_INTF0_SEL_IN, GENMASK(1, 0),
MT8195_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT),
MMSYS_ROUTE(MERGE5, DP_INTF1,
MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
MT8195_MERGE4_SOUT_TO_DP_INTF0_SEL),
};
#endif /* __SOC_MEDIATEK_MT8195_MMSYS_H */

View File

@ -14,8 +14,9 @@
#define MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN 0xfd8
#define MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00 0xfdc
#define MT8365_DISP_MS_IN_OUT_MASK GENMASK(3, 0)
#define MT8365_RDMA0_SOUT_COLOR0 0x1
#define MT8365_DITHER_MOUT_EN_DSI0 0x1
#define MT8365_DITHER_MOUT_EN_DSI0 BIT(0)
#define MT8365_DSI0_SEL_IN_DITHER 0x1
#define MT8365_RDMA0_SEL_IN_OVL0 0x0
#define MT8365_RDMA0_RSZ0_SEL_IN_RDMA0 0x0
@ -27,56 +28,37 @@
#define MT8365_DPI0_SEL_IN_RDMA1 0x0
static const struct mtk_mmsys_routes mt8365_mmsys_routing_table[] = {
{
DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
MT8365_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN,
MT8365_OVL0_MOUT_PATH0_SEL, MT8365_OVL0_MOUT_PATH0_SEL
},
{
DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
MT8365_DISP_REG_CONFIG_DISP_RDMA0_SEL_IN,
MT8365_RDMA0_SEL_IN_OVL0, MT8365_RDMA0_SEL_IN_OVL0
},
{
DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
MT8365_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL,
MT8365_RDMA0_SOUT_COLOR0, MT8365_RDMA0_SOUT_COLOR0
},
{
DDP_COMPONENT_COLOR0, DDP_COMPONENT_CCORR,
MT8365_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN,
MT8365_DISP_COLOR_SEL_IN_COLOR0,MT8365_DISP_COLOR_SEL_IN_COLOR0
},
{
DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
MT8365_DISP_REG_CONFIG_DISP_DITHER0_MOUT_EN,
MT8365_DITHER_MOUT_EN_DSI0, MT8365_DITHER_MOUT_EN_DSI0
},
{
DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
MT8365_DISP_REG_CONFIG_DISP_DSI0_SEL_IN,
MT8365_DSI0_SEL_IN_DITHER, MT8365_DSI0_SEL_IN_DITHER
},
{
DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN,
MT8365_RDMA0_RSZ0_SEL_IN_RDMA0, MT8365_RDMA0_RSZ0_SEL_IN_RDMA0
},
{
DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00,
MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK, MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK
},
{
DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN,
MT8365_DPI0_SEL_IN_RDMA1, MT8365_DPI0_SEL_IN_RDMA1
},
{
DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
MT8365_DISP_REG_CONFIG_DISP_RDMA1_SOUT_SEL,
MT8365_RDMA1_SOUT_DPI0, MT8365_RDMA1_SOUT_DPI0
},
MMSYS_ROUTE(OVL0, RDMA0,
MT8365_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN,
MT8365_DISP_MS_IN_OUT_MASK, MT8365_OVL0_MOUT_PATH0_SEL),
MMSYS_ROUTE(OVL0, RDMA0,
MT8365_DISP_REG_CONFIG_DISP_RDMA0_SEL_IN,
MT8365_DISP_MS_IN_OUT_MASK, MT8365_RDMA0_SEL_IN_OVL0),
MMSYS_ROUTE(RDMA0, COLOR0,
MT8365_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL,
MT8365_DISP_MS_IN_OUT_MASK, MT8365_RDMA0_SOUT_COLOR0),
MMSYS_ROUTE(COLOR0, CCORR,
MT8365_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN,
MT8365_DISP_MS_IN_OUT_MASK, MT8365_DISP_COLOR_SEL_IN_COLOR0),
MMSYS_ROUTE(DITHER0, DSI0,
MT8365_DISP_REG_CONFIG_DISP_DITHER0_MOUT_EN,
MT8365_DISP_MS_IN_OUT_MASK, MT8365_DITHER_MOUT_EN_DSI0),
MMSYS_ROUTE(DITHER0, DSI0,
MT8365_DISP_REG_CONFIG_DISP_DSI0_SEL_IN,
MT8365_DISP_MS_IN_OUT_MASK, MT8365_DSI0_SEL_IN_DITHER),
MMSYS_ROUTE(RDMA0, COLOR0,
MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN,
MT8365_DISP_MS_IN_OUT_MASK, MT8365_RDMA0_RSZ0_SEL_IN_RDMA0),
MMSYS_ROUTE(RDMA1, DPI0,
MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00,
MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK,
MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK),
MMSYS_ROUTE(RDMA1, DPI0,
MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN,
MT8365_DISP_MS_IN_OUT_MASK, MT8365_DPI0_SEL_IN_RDMA1),
MMSYS_ROUTE(RDMA1, DPI0,
MT8365_DISP_REG_CONFIG_DISP_RDMA1_SOUT_SEL,
MT8365_DISP_MS_IN_OUT_MASK, MT8365_RDMA1_SOUT_DPI0),
};
#endif /* __SOC_MEDIATEK_MT8365_MMSYS_H */

View File

@ -80,6 +80,20 @@
#define MMSYS_RST_NR(bank, bit) (((bank) * 32) + (bit))
/*
* This macro adds a compile time check to make sure that the in/out
* selection bit(s) fit in the register mask, similar to bitfield
* macros, but this does not transform the value.
*/
#define MMSYS_ROUTE(from, to, reg_addr, reg_mask, selection) \
{ DDP_COMPONENT_##from, DDP_COMPONENT_##to, reg_addr, reg_mask, \
(__BUILD_BUG_ON_ZERO_MSG((reg_mask) == 0, "Invalid mask") + \
__BUILD_BUG_ON_ZERO_MSG(~(reg_mask) & (selection), \
#selection " does not fit in " \
#reg_mask) + \
(selection)) \
}
struct mtk_mmsys_routes {
u32 from_comp;
u32 to_comp;

View File

@ -155,6 +155,7 @@
#define MT8188_MUTEX_MOD_DISP1_VPP_MERGE3 23
#define MT8188_MUTEX_MOD_DISP1_VPP_MERGE4 24
#define MT8188_MUTEX_MOD_DISP1_DISP_MIXER 30
#define MT8188_MUTEX_MOD_DISP1_DPI1 38
#define MT8188_MUTEX_MOD_DISP1_DP_INTF1 39
#define MT8195_MUTEX_MOD_DISP_OVL0 0
@ -289,6 +290,7 @@
#define MT8188_MUTEX_SOF_DSI0 1
#define MT8188_MUTEX_SOF_DP_INTF0 3
#define MT8188_MUTEX_SOF_DP_INTF1 4
#define MT8188_MUTEX_SOF_DPI1 5
#define MT8195_MUTEX_SOF_DSI0 1
#define MT8195_MUTEX_SOF_DSI1 2
#define MT8195_MUTEX_SOF_DP_INTF0 3
@ -301,6 +303,7 @@
#define MT8188_MUTEX_EOF_DSI0 (MT8188_MUTEX_SOF_DSI0 << 7)
#define MT8188_MUTEX_EOF_DP_INTF0 (MT8188_MUTEX_SOF_DP_INTF0 << 7)
#define MT8188_MUTEX_EOF_DP_INTF1 (MT8188_MUTEX_SOF_DP_INTF1 << 7)
#define MT8188_MUTEX_EOF_DPI1 (MT8188_MUTEX_SOF_DPI1 << 7)
#define MT8195_MUTEX_EOF_DSI0 (MT8195_MUTEX_SOF_DSI0 << 7)
#define MT8195_MUTEX_EOF_DSI1 (MT8195_MUTEX_SOF_DSI1 << 7)
#define MT8195_MUTEX_EOF_DP_INTF0 (MT8195_MUTEX_SOF_DP_INTF0 << 7)
@ -472,6 +475,7 @@ static const u8 mt8188_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_PWM0] = MT8188_MUTEX_MOD2_DISP_PWM0,
[DDP_COMPONENT_DP_INTF0] = MT8188_MUTEX_MOD_DISP_DP_INTF0,
[DDP_COMPONENT_DP_INTF1] = MT8188_MUTEX_MOD_DISP1_DP_INTF1,
[DDP_COMPONENT_DPI1] = MT8188_MUTEX_MOD_DISP1_DPI1,
[DDP_COMPONENT_ETHDR_MIXER] = MT8188_MUTEX_MOD_DISP1_DISP_MIXER,
[DDP_COMPONENT_MDP_RDMA0] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA0,
[DDP_COMPONENT_MDP_RDMA1] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA1,
@ -686,6 +690,8 @@ static const u16 mt8188_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] =
MT8188_MUTEX_SOF_DSI0 | MT8188_MUTEX_EOF_DSI0,
[MUTEX_SOF_DPI1] =
MT8188_MUTEX_SOF_DPI1 | MT8188_MUTEX_EOF_DPI1,
[MUTEX_SOF_DP_INTF0] =
MT8188_MUTEX_SOF_DP_INTF0 | MT8188_MUTEX_EOF_DP_INTF0,
[MUTEX_SOF_DP_INTF1] =

View File

@ -56,29 +56,39 @@ static struct socinfo_data socinfo_data_table[] = {
MTK_SOCINFO_ENTRY("MT8195", "MT8195GV/EHZA", "Kompanio 1200", 0x81950304, CELL_NOT_USED),
MTK_SOCINFO_ENTRY("MT8195", "MT8195TV/EZA", "Kompanio 1380", 0x81950400, CELL_NOT_USED),
MTK_SOCINFO_ENTRY("MT8195", "MT8195TV/EHZA", "Kompanio 1380", 0x81950404, CELL_NOT_USED),
MTK_SOCINFO_ENTRY("MT8370", "MT8370AV/AZA", "Genio 510", 0x83700000, 0x00000081),
MTK_SOCINFO_ENTRY("MT8390", "MT8390AV/AZA", "Genio 700", 0x83900000, 0x00000080),
MTK_SOCINFO_ENTRY("MT8395", "MT8395AV/ZA", "Genio 1200", 0x83950100, CELL_NOT_USED),
MTK_SOCINFO_ENTRY("MT8395", "MT8395AV/ZA", "Genio 1200", 0x83950800, CELL_NOT_USED),
};
static int mtk_socinfo_create_socinfo_node(struct mtk_socinfo *mtk_socinfop)
{
struct soc_device_attribute *attrs;
static char machine[30] = {0};
struct socinfo_data *data = mtk_socinfop->socinfo_data;
static const char *soc_manufacturer = "MediaTek";
attrs = devm_kzalloc(mtk_socinfop->dev, sizeof(*attrs), GFP_KERNEL);
if (!attrs)
return -ENOMEM;
snprintf(machine, sizeof(machine), "%s (%s)", mtk_socinfop->socinfo_data->marketing_name,
mtk_socinfop->socinfo_data->soc_name);
attrs->family = soc_manufacturer;
attrs->machine = machine;
if (data->marketing_name != NULL && data->marketing_name[0] != '\0')
attrs->family = devm_kasprintf(mtk_socinfop->dev, GFP_KERNEL, "MediaTek %s",
data->marketing_name);
else
attrs->family = soc_manufacturer;
attrs->soc_id = data->soc_name;
/*
* The "machine" field will be populated automatically with the model
* name from board DTS (if available).
**/
mtk_socinfop->soc_dev = soc_device_register(attrs);
if (IS_ERR(mtk_socinfop->soc_dev))
return PTR_ERR(mtk_socinfop->soc_dev);
dev_info(mtk_socinfop->dev, "%s %s SoC detected.\n", soc_manufacturer, attrs->machine);
dev_info(mtk_socinfop->dev, "%s (%s) SoC detected.\n", attrs->family, attrs->soc_id);
return 0;
}

View File

@ -11,6 +11,7 @@
#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@ -261,7 +262,7 @@ static struct qcom_ice *qcom_ice_create(struct device *dev,
* Return: ICE pointer on success, NULL if there is no ICE data provided by the
* consumer or ERR_PTR() on error.
*/
struct qcom_ice *of_qcom_ice_get(struct device *dev)
static struct qcom_ice *of_qcom_ice_get(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct qcom_ice *ice;
@ -322,7 +323,53 @@ struct qcom_ice *of_qcom_ice_get(struct device *dev)
return ice;
}
EXPORT_SYMBOL_GPL(of_qcom_ice_get);
static void qcom_ice_put(const struct qcom_ice *ice)
{
struct platform_device *pdev = to_platform_device(ice->dev);
if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice"))
platform_device_put(pdev);
}
static void devm_of_qcom_ice_put(struct device *dev, void *res)
{
qcom_ice_put(*(struct qcom_ice **)res);
}
/**
* devm_of_qcom_ice_get() - Devres managed helper to get an ICE instance from
* a DT node.
* @dev: device pointer for the consumer device.
*
* This function will provide an ICE instance either by creating one for the
* consumer device if its DT node provides the 'ice' reg range and the 'ice'
* clock (for legacy DT style). On the other hand, if consumer provides a
* phandle via 'qcom,ice' property to an ICE DT, the ICE instance will already
* be created and so this function will return that instead.
*
* Return: ICE pointer on success, NULL if there is no ICE data provided by the
* consumer or ERR_PTR() on error.
*/
struct qcom_ice *devm_of_qcom_ice_get(struct device *dev)
{
struct qcom_ice *ice, **dr;
dr = devres_alloc(devm_of_qcom_ice_put, sizeof(*dr), GFP_KERNEL);
if (!dr)
return ERR_PTR(-ENOMEM);
ice = of_qcom_ice_get(dev);
if (!IS_ERR_OR_NULL(ice)) {
*dr = ice;
devres_add(dev, dr);
} else {
devres_free(dr);
}
return ice;
}
EXPORT_SYMBOL_GPL(devm_of_qcom_ice_get);
static int qcom_ice_probe(struct platform_device *pdev)
{

View File

@ -91,7 +91,6 @@ struct servreg_loc_pfr_resp {
struct qmi_response_type_v01 rsp;
};
extern const struct qmi_elem_info servreg_location_entry_ei[];
extern const struct qmi_elem_info servreg_get_domain_list_req_ei[];
extern const struct qmi_elem_info servreg_get_domain_list_resp_ei[];
extern const struct qmi_elem_info servreg_register_listener_req_ei[];

View File

@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <linux/thermal.h>
#include <linux/slab.h>
#include <linux/string_choices.h>
#include <linux/soc/qcom/qcom_aoss.h>
#define CREATE_TRACE_POINTS
@ -358,7 +359,7 @@ static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev,
return 0;
ret = qmp_send(qmp_cdev->qmp, "{class: volt_flr, event:zero_temp, res:%s, value:%s}",
qmp_cdev->name, cdev_state ? "on" : "off");
qmp_cdev->name, str_on_off(cdev_state));
if (!ret)
qmp_cdev->state = cdev_state;

View File

@ -429,6 +429,16 @@ static const struct qcom_pdm_domain_data *sc8280xp_domains[] = {
NULL,
};
/* Unlike SDM660, SDM630/636 lack CDSP */
static const struct qcom_pdm_domain_data *sdm630_domains[] = {
&adsp_audio_pd,
&adsp_root_pd,
&adsp_sensor_pd,
&mpss_root_pd,
&mpss_wlan_pd,
NULL,
};
static const struct qcom_pdm_domain_data *sdm660_domains[] = {
&adsp_audio_pd,
&adsp_root_pd,
@ -546,6 +556,8 @@ static const struct of_device_id qcom_pdm_domains[] __maybe_unused = {
{ .compatible = "qcom,sc7280", .data = sc7280_domains, },
{ .compatible = "qcom,sc8180x", .data = sc8180x_domains, },
{ .compatible = "qcom,sc8280xp", .data = sc8280xp_domains, },
{ .compatible = "qcom,sdm630", .data = sdm630_domains, },
{ .compatible = "qcom,sdm636", .data = sdm630_domains, },
{ .compatible = "qcom,sda660", .data = sdm660_domains, },
{ .compatible = "qcom,sdm660", .data = sdm660_domains, },
{ .compatible = "qcom,sdm670", .data = sdm670_domains, },

View File

@ -8,7 +8,7 @@
#include "pdr_internal.h"
const struct qmi_elem_info servreg_location_entry_ei[] = {
static const struct qmi_elem_info servreg_location_entry_ei[] = {
{
.data_type = QMI_STRING,
.elem_len = SERVREG_NAME_LENGTH + 1,
@ -47,7 +47,6 @@ const struct qmi_elem_info servreg_location_entry_ei[] = {
},
{}
};
EXPORT_SYMBOL_GPL(servreg_location_entry_ei);
const struct qmi_elem_info servreg_get_domain_list_req_ei[] = {
{

View File

@ -334,6 +334,7 @@ config ARCH_R9A07G054
config ARCH_R9A08G045
bool "ARM64 Platform support for RZ/G3S"
select ARCH_RZG2L
select SYSC_R9A08G045
help
This enables support for the Renesas RZ/G3S SoC variants.
@ -347,12 +348,14 @@ config ARCH_R9A09G011
config ARCH_R9A09G047
bool "ARM64 Platform support for RZ/G3E"
select SYS_R9A09G047
help
This enables support for the Renesas RZ/G3E SoC variants.
config ARCH_R9A09G057
bool "ARM64 Platform support for RZ/V2H(P)"
select RENESAS_RZV2H_ICU
select SYS_R9A09G057
help
This enables support for the Renesas RZ/V2H(P) SoC variants.
@ -383,4 +386,19 @@ config PWC_RZV2M
config RST_RCAR
bool "Reset Controller support for R-Car" if COMPILE_TEST
config SYSC_RZ
bool "System controller for RZ SoCs" if COMPILE_TEST
config SYSC_R9A08G045
bool "Renesas RZ/G3S System controller support" if COMPILE_TEST
select SYSC_RZ
config SYS_R9A09G047
bool "Renesas RZ/G3E System controller support" if COMPILE_TEST
select SYSC_RZ
config SYS_R9A09G057
bool "Renesas RZ/V2H System controller support" if COMPILE_TEST
select SYSC_RZ
endif # SOC_RENESAS

View File

@ -6,7 +6,11 @@ obj-$(CONFIG_SOC_RENESAS) += renesas-soc.o
ifdef CONFIG_SMP
obj-$(CONFIG_ARCH_R9A06G032) += r9a06g032-smp.o
endif
obj-$(CONFIG_SYSC_R9A08G045) += r9a08g045-sysc.o
obj-$(CONFIG_SYS_R9A09G047) += r9a09g047-sys.o
obj-$(CONFIG_SYS_R9A09G057) += r9a09g057-sys.o
# Family
obj-$(CONFIG_PWC_RZV2M) += pwc-rzv2m.o
obj-$(CONFIG_RST_RCAR) += rcar-rst.o
obj-$(CONFIG_SYSC_RZ) += rz-sysc.o

View File

@ -0,0 +1,23 @@
// SPDX-License-Identifier: GPL-2.0
/*
* RZ/G3S System controller driver
*
* Copyright (C) 2024 Renesas Electronics Corp.
*/
#include <linux/bits.h>
#include <linux/init.h>
#include "rz-sysc.h"
static const struct rz_sysc_soc_id_init_data rzg3s_sysc_soc_id_init_data __initconst = {
.family = "RZ/G3S",
.id = 0x85e0447,
.devid_offset = 0xa04,
.revision_mask = GENMASK(31, 28),
.specific_id_mask = GENMASK(27, 0),
};
const struct rz_sysc_init_data rzg3s_sysc_init_data __initconst = {
.soc_id_init_data = &rzg3s_sysc_soc_id_init_data,
};

View File

@ -0,0 +1,67 @@
// SPDX-License-Identifier: GPL-2.0
/*
* RZ/G3E System controller (SYS) driver
*
* Copyright (C) 2025 Renesas Electronics Corp.
*/
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/io.h>
#include "rz-sysc.h"
/* Register Offsets */
#define SYS_LSI_MODE 0x300
/*
* BOOTPLLCA[1:0]
* [0,0] => 1.1GHZ
* [0,1] => 1.5GHZ
* [1,0] => 1.6GHZ
* [1,1] => 1.7GHZ
*/
#define SYS_LSI_MODE_STAT_BOOTPLLCA55 GENMASK(12, 11)
#define SYS_LSI_MODE_CA55_1_7GHZ 0x3
#define SYS_LSI_PRR 0x308
#define SYS_LSI_PRR_CA55_DIS BIT(8)
#define SYS_LSI_PRR_NPU_DIS BIT(1)
static void rzg3e_sys_print_id(struct device *dev,
void __iomem *sysc_base,
struct soc_device_attribute *soc_dev_attr)
{
bool is_quad_core, npu_enabled;
u32 prr_val, mode_val;
prr_val = readl(sysc_base + SYS_LSI_PRR);
mode_val = readl(sysc_base + SYS_LSI_MODE);
/* Check CPU and NPU configuration */
is_quad_core = !(prr_val & SYS_LSI_PRR_CA55_DIS);
npu_enabled = !(prr_val & SYS_LSI_PRR_NPU_DIS);
dev_info(dev, "Detected Renesas %s Core %s %s Rev %s%s\n",
is_quad_core ? "Quad" : "Dual", soc_dev_attr->family,
soc_dev_attr->soc_id, soc_dev_attr->revision,
npu_enabled ? " with Ethos-U55" : "");
/* Check CA55 PLL configuration */
if (FIELD_GET(SYS_LSI_MODE_STAT_BOOTPLLCA55, mode_val) != SYS_LSI_MODE_CA55_1_7GHZ)
dev_warn(dev, "CA55 PLL is not set to 1.7GHz\n");
}
static const struct rz_sysc_soc_id_init_data rzg3e_sys_soc_id_init_data __initconst = {
.family = "RZ/G3E",
.id = 0x8679447,
.devid_offset = 0x304,
.revision_mask = GENMASK(31, 28),
.specific_id_mask = GENMASK(27, 0),
.print_id = rzg3e_sys_print_id,
};
const struct rz_sysc_init_data rzg3e_sys_init_data = {
.soc_id_init_data = &rzg3e_sys_soc_id_init_data,
};

View File

@ -0,0 +1,67 @@
// SPDX-License-Identifier: GPL-2.0
/*
* RZ/V2H System controller (SYS) driver
*
* Copyright (C) 2025 Renesas Electronics Corp.
*/
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/io.h>
#include "rz-sysc.h"
/* Register Offsets */
#define SYS_LSI_MODE 0x300
/*
* BOOTPLLCA[1:0]
* [0,0] => 1.1GHZ
* [0,1] => 1.5GHZ
* [1,0] => 1.6GHZ
* [1,1] => 1.7GHZ
*/
#define SYS_LSI_MODE_STAT_BOOTPLLCA55 GENMASK(12, 11)
#define SYS_LSI_MODE_CA55_1_7GHZ 0x3
#define SYS_LSI_PRR 0x308
#define SYS_LSI_PRR_GPU_DIS BIT(0)
#define SYS_LSI_PRR_ISP_DIS BIT(4)
static void rzv2h_sys_print_id(struct device *dev,
void __iomem *sysc_base,
struct soc_device_attribute *soc_dev_attr)
{
bool gpu_enabled, isp_enabled;
u32 prr_val, mode_val;
prr_val = readl(sysc_base + SYS_LSI_PRR);
mode_val = readl(sysc_base + SYS_LSI_MODE);
/* Check GPU and ISP configuration */
gpu_enabled = !(prr_val & SYS_LSI_PRR_GPU_DIS);
isp_enabled = !(prr_val & SYS_LSI_PRR_ISP_DIS);
dev_info(dev, "Detected Renesas %s %s Rev %s%s%s\n",
soc_dev_attr->family, soc_dev_attr->soc_id, soc_dev_attr->revision,
gpu_enabled ? " with GE3D (Mali-G31)" : "",
isp_enabled ? " with ISP (Mali-C55)" : "");
/* Check CA55 PLL configuration */
if (FIELD_GET(SYS_LSI_MODE_STAT_BOOTPLLCA55, mode_val) != SYS_LSI_MODE_CA55_1_7GHZ)
dev_warn(dev, "CA55 PLL is not set to 1.7GHz\n");
}
static const struct rz_sysc_soc_id_init_data rzv2h_sys_soc_id_init_data __initconst = {
.family = "RZ/V2H",
.id = 0x847a447,
.devid_offset = 0x304,
.revision_mask = GENMASK(31, 28),
.specific_id_mask = GENMASK(27, 0),
.print_id = rzv2h_sys_print_id,
};
const struct rz_sysc_init_data rzv2h_sys_init_data = {
.soc_id_init_data = &rzv2h_sys_soc_id_init_data,
};

View File

@ -71,14 +71,6 @@ static const struct renesas_family fam_rzg2ul __initconst __maybe_unused = {
.name = "RZ/G2UL",
};
static const struct renesas_family fam_rzg3s __initconst __maybe_unused = {
.name = "RZ/G3S",
};
static const struct renesas_family fam_rzv2h __initconst __maybe_unused = {
.name = "RZ/V2H",
};
static const struct renesas_family fam_rzv2l __initconst __maybe_unused = {
.name = "RZ/V2L",
};
@ -176,16 +168,6 @@ static const struct renesas_soc soc_rz_g2ul __initconst __maybe_unused = {
.id = 0x8450447,
};
static const struct renesas_soc soc_rz_g3s __initconst __maybe_unused = {
.family = &fam_rzg3s,
.id = 0x85e0447,
};
static const struct renesas_soc soc_rz_v2h __initconst __maybe_unused = {
.family = &fam_rzv2h,
.id = 0x847a447,
};
static const struct renesas_soc soc_rz_v2l __initconst __maybe_unused = {
.family = &fam_rzv2l,
.id = 0x8447447,
@ -289,7 +271,6 @@ static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = {
.id = 0x37,
};
static const struct of_device_id renesas_socs[] __initconst __maybe_unused = {
#ifdef CONFIG_ARCH_R7S72100
{ .compatible = "renesas,r7s72100", .data = &soc_rz_a1h },
@ -410,15 +391,9 @@ static const struct of_device_id renesas_socs[] __initconst __maybe_unused = {
#ifdef CONFIG_ARCH_R9A07G054
{ .compatible = "renesas,r9a07g054", .data = &soc_rz_v2l },
#endif
#ifdef CONFIG_ARCH_R9A08G045
{ .compatible = "renesas,r9a08g045", .data = &soc_rz_g3s },
#endif
#ifdef CONFIG_ARCH_R9A09G011
{ .compatible = "renesas,r9a09g011", .data = &soc_rz_v2m },
#endif
#ifdef CONFIG_ARCH_R9A09G057
{ .compatible = "renesas,r9a09g057", .data = &soc_rz_v2h },
#endif
#ifdef CONFIG_ARCH_SH73A0
{ .compatible = "renesas,sh73a0", .data = &soc_shmobile_ag5 },
#endif
@ -444,11 +419,6 @@ static const struct renesas_id id_rzg2l __initconst = {
.mask = 0xfffffff,
};
static const struct renesas_id id_rzv2h __initconst = {
.offset = 0x304,
.mask = 0xfffffff,
};
static const struct renesas_id id_rzv2m __initconst = {
.offset = 0x104,
.mask = 0xff,
@ -466,7 +436,6 @@ static const struct of_device_id renesas_ids[] __initconst = {
{ .compatible = "renesas,r9a07g054-sysc", .data = &id_rzg2l },
{ .compatible = "renesas,r9a08g045-sysc", .data = &id_rzg2l },
{ .compatible = "renesas,r9a09g011-sys", .data = &id_rzv2m },
{ .compatible = "renesas,r9a09g057-sys", .data = &id_rzv2h },
{ .compatible = "renesas,prr", .data = &id_prr },
{ /* sentinel */ }
};
@ -531,7 +500,7 @@ static int __init renesas_soc_init(void)
eslo = product & 0xf;
soc_dev_attr->revision = kasprintf(GFP_KERNEL, "ES%u.%u",
eshi, eslo);
} else if (id == &id_rzg2l || id == &id_rzv2h) {
} else if (id == &id_rzg2l) {
eshi = ((product >> 28) & 0x0f);
soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%u",
eshi);

View File

@ -0,0 +1,137 @@
// SPDX-License-Identifier: GPL-2.0
/*
* RZ System controller driver
*
* Copyright (C) 2024 Renesas Electronics Corp.
*/
#include <linux/io.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/sys_soc.h>
#include "rz-sysc.h"
#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
/**
* struct rz_sysc - RZ SYSC private data structure
* @base: SYSC base address
* @dev: SYSC device pointer
*/
struct rz_sysc {
void __iomem *base;
struct device *dev;
};
static int rz_sysc_soc_init(struct rz_sysc *sysc, const struct of_device_id *match)
{
const struct rz_sysc_init_data *sysc_data = match->data;
const struct rz_sysc_soc_id_init_data *soc_data = sysc_data->soc_id_init_data;
struct soc_device_attribute *soc_dev_attr;
const char *soc_id_start, *soc_id_end;
u32 val, revision, specific_id;
struct soc_device *soc_dev;
char soc_id[32] = {0};
size_t size;
soc_id_start = strchr(match->compatible, ',') + 1;
soc_id_end = strchr(match->compatible, '-');
size = soc_id_end - soc_id_start + 1;
if (size > 32)
size = sizeof(soc_id);
strscpy(soc_id, soc_id_start, size);
soc_dev_attr = devm_kzalloc(sysc->dev, sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr)
return -ENOMEM;
soc_dev_attr->family = devm_kstrdup(sysc->dev, soc_data->family, GFP_KERNEL);
if (!soc_dev_attr->family)
return -ENOMEM;
soc_dev_attr->soc_id = devm_kstrdup(sysc->dev, soc_id, GFP_KERNEL);
if (!soc_dev_attr->soc_id)
return -ENOMEM;
val = readl(sysc->base + soc_data->devid_offset);
revision = field_get(soc_data->revision_mask, val);
specific_id = field_get(soc_data->specific_id_mask, val);
soc_dev_attr->revision = devm_kasprintf(sysc->dev, GFP_KERNEL, "%u", revision);
if (!soc_dev_attr->revision)
return -ENOMEM;
if (soc_data->id && specific_id != soc_data->id) {
dev_warn(sysc->dev, "SoC mismatch (product = 0x%x)\n", specific_id);
return -ENODEV;
}
/* Try to call SoC-specific device identification */
if (soc_data->print_id) {
soc_data->print_id(sysc->dev, sysc->base, soc_dev_attr);
} else {
dev_info(sysc->dev, "Detected Renesas %s %s Rev %s\n",
soc_dev_attr->family, soc_dev_attr->soc_id, soc_dev_attr->revision);
}
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev))
return PTR_ERR(soc_dev);
return 0;
}
static const struct of_device_id rz_sysc_match[] = {
#ifdef CONFIG_SYSC_R9A08G045
{ .compatible = "renesas,r9a08g045-sysc", .data = &rzg3s_sysc_init_data },
#endif
#ifdef CONFIG_SYS_R9A09G047
{ .compatible = "renesas,r9a09g047-sys", .data = &rzg3e_sys_init_data },
#endif
#ifdef CONFIG_SYS_R9A09G057
{ .compatible = "renesas,r9a09g057-sys", .data = &rzv2h_sys_init_data },
#endif
{ }
};
MODULE_DEVICE_TABLE(of, rz_sysc_match);
static int rz_sysc_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct rz_sysc *sysc;
match = of_match_node(rz_sysc_match, dev->of_node);
if (!match)
return -ENODEV;
sysc = devm_kzalloc(dev, sizeof(*sysc), GFP_KERNEL);
if (!sysc)
return -ENOMEM;
sysc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sysc->base))
return PTR_ERR(sysc->base);
sysc->dev = dev;
return rz_sysc_soc_init(sysc, match);
}
static struct platform_driver rz_sysc_driver = {
.driver = {
.name = "renesas-rz-sysc",
.suppress_bind_attrs = true,
.of_match_table = rz_sysc_match
},
.probe = rz_sysc_probe
};
static int __init rz_sysc_init(void)
{
return platform_driver_register(&rz_sysc_driver);
}
subsys_initcall(rz_sysc_init);
MODULE_DESCRIPTION("Renesas RZ System Controller Driver");
MODULE_AUTHOR("Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,46 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Renesas RZ System Controller
*
* Copyright (C) 2024 Renesas Electronics Corp.
*/
#ifndef __SOC_RENESAS_RZ_SYSC_H__
#define __SOC_RENESAS_RZ_SYSC_H__
#include <linux/device.h>
#include <linux/sys_soc.h>
#include <linux/types.h>
/**
* struct rz_syc_soc_id_init_data - RZ SYSC SoC identification initialization data
* @family: RZ SoC family
* @id: RZ SoC expected ID
* @devid_offset: SYSC SoC ID register offset
* @revision_mask: SYSC SoC ID revision mask
* @specific_id_mask: SYSC SoC ID specific ID mask
* @print_id: print SoC-specific extended device identification
*/
struct rz_sysc_soc_id_init_data {
const char * const family;
u32 id;
u32 devid_offset;
u32 revision_mask;
u32 specific_id_mask;
void (*print_id)(struct device *dev, void __iomem *sysc_base,
struct soc_device_attribute *soc_dev_attr);
};
/**
* struct rz_sysc_init_data - RZ SYSC initialization data
* @soc_id_init_data: RZ SYSC SoC ID initialization data
*/
struct rz_sysc_init_data {
const struct rz_sysc_soc_id_init_data *soc_id_init_data;
};
extern const struct rz_sysc_init_data rzg3e_sys_init_data;
extern const struct rz_sysc_init_data rzg3s_sysc_init_data;
extern const struct rz_sysc_init_data rzv2h_sys_init_data;
#endif /* __SOC_RENESAS_RZ_SYSC_H__ */

View File

@ -9,6 +9,7 @@
* Samsung Exynos SoC Adaptive Supply Voltage support
*/
#include <linux/array_size.h>
#include <linux/cpu.h>
#include <linux/device.h>
#include <linux/energy_model.h>

View File

@ -12,6 +12,7 @@
* Samsung Exynos SoC Adaptive Supply Voltage and Chip ID support
*/
#include <linux/array_size.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/mfd/syscon.h>
@ -55,7 +56,9 @@ static const struct exynos_soc_id {
{ "EXYNOS5440", 0xE5440000 },
{ "EXYNOS5800", 0xE5422000 },
{ "EXYNOS7420", 0xE7420000 },
{ "EXYNOS7870", 0xE7870000 },
/* Compatible with: samsung,exynos850-chipid */
{ "EXYNOS2200", 0xE9925000 },
{ "EXYNOS7885", 0xE7885000 },
{ "EXYNOS850", 0xE3830000 },
{ "EXYNOS8895", 0xE8895000 },
@ -134,6 +137,8 @@ static int exynos_chipid_probe(struct platform_device *pdev)
soc_dev_attr->revision = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"%x", soc_info.revision);
if (!soc_dev_attr->revision)
return -ENOMEM;
soc_dev_attr->soc_id = product_id_to_soc_id(soc_info.product_id);
if (!soc_dev_attr->soc_id) {
pr_err("Unknown SoC\n");

View File

@ -5,6 +5,7 @@
//
// Exynos - CPU PMU(Power Management Unit) support
#include <linux/array_size.h>
#include <linux/arm-smccc.h>
#include <linux/of.h>
#include <linux/of_address.h>

View File

@ -6,6 +6,7 @@
* Samsung Exynos USI driver (Universal Serial Interface).
*/
#include <linux/array_size.h>
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@ -16,6 +17,18 @@
#include <dt-bindings/soc/samsung,exynos-usi.h>
/* USIv1: System Register: SW_CONF register bits */
#define USI_V1_SW_CONF_NONE 0x0
#define USI_V1_SW_CONF_I2C0 0x1
#define USI_V1_SW_CONF_I2C1 0x2
#define USI_V1_SW_CONF_I2C0_1 0x3
#define USI_V1_SW_CONF_SPI 0x4
#define USI_V1_SW_CONF_UART 0x8
#define USI_V1_SW_CONF_UART_I2C1 0xa
#define USI_V1_SW_CONF_MASK (USI_V1_SW_CONF_I2C0 | USI_V1_SW_CONF_I2C1 | \
USI_V1_SW_CONF_I2C0_1 | USI_V1_SW_CONF_SPI | \
USI_V1_SW_CONF_UART | USI_V1_SW_CONF_UART_I2C1)
/* USIv2: System Register: SW_CONF register bits */
#define USI_V2_SW_CONF_NONE 0x0
#define USI_V2_SW_CONF_UART BIT(0)
@ -34,7 +47,8 @@
#define USI_OPTION_CLKSTOP_ON BIT(2)
enum exynos_usi_ver {
USI_VER2 = 2,
USI_VER1 = 0,
USI_VER2,
};
struct exynos_usi_variant {
@ -66,19 +80,39 @@ struct exynos_usi_mode {
unsigned int val; /* mode register value */
};
static const struct exynos_usi_mode exynos_usi_modes[] = {
[USI_V2_NONE] = { .name = "none", .val = USI_V2_SW_CONF_NONE },
[USI_V2_UART] = { .name = "uart", .val = USI_V2_SW_CONF_UART },
[USI_V2_SPI] = { .name = "spi", .val = USI_V2_SW_CONF_SPI },
[USI_V2_I2C] = { .name = "i2c", .val = USI_V2_SW_CONF_I2C },
#define USI_MODES_MAX (USI_MODE_UART_I2C1 + 1)
static const struct exynos_usi_mode exynos_usi_modes[][USI_MODES_MAX] = {
[USI_VER1] = {
[USI_MODE_NONE] = { .name = "none", .val = USI_V1_SW_CONF_NONE },
[USI_MODE_UART] = { .name = "uart", .val = USI_V1_SW_CONF_UART },
[USI_MODE_SPI] = { .name = "spi", .val = USI_V1_SW_CONF_SPI },
[USI_MODE_I2C] = { .name = "i2c", .val = USI_V1_SW_CONF_I2C0 },
[USI_MODE_I2C1] = { .name = "i2c1", .val = USI_V1_SW_CONF_I2C1 },
[USI_MODE_I2C0_1] = { .name = "i2c0_1", .val = USI_V1_SW_CONF_I2C0_1 },
[USI_MODE_UART_I2C1] = { .name = "uart_i2c1", .val = USI_V1_SW_CONF_UART_I2C1 },
}, [USI_VER2] = {
[USI_MODE_NONE] = { .name = "none", .val = USI_V2_SW_CONF_NONE },
[USI_MODE_UART] = { .name = "uart", .val = USI_V2_SW_CONF_UART },
[USI_MODE_SPI] = { .name = "spi", .val = USI_V2_SW_CONF_SPI },
[USI_MODE_I2C] = { .name = "i2c", .val = USI_V2_SW_CONF_I2C },
},
};
static const char * const exynos850_usi_clk_names[] = { "pclk", "ipclk" };
static const struct exynos_usi_variant exynos850_usi_data = {
.ver = USI_VER2,
.sw_conf_mask = USI_V2_SW_CONF_MASK,
.min_mode = USI_V2_NONE,
.max_mode = USI_V2_I2C,
.min_mode = USI_MODE_NONE,
.max_mode = USI_MODE_I2C,
.num_clks = ARRAY_SIZE(exynos850_usi_clk_names),
.clk_names = exynos850_usi_clk_names,
};
static const struct exynos_usi_variant exynos8895_usi_data = {
.ver = USI_VER1,
.sw_conf_mask = USI_V1_SW_CONF_MASK,
.min_mode = USI_MODE_NONE,
.max_mode = USI_MODE_UART_I2C1,
.num_clks = ARRAY_SIZE(exynos850_usi_clk_names),
.clk_names = exynos850_usi_clk_names,
};
@ -87,6 +121,9 @@ static const struct of_device_id exynos_usi_dt_match[] = {
{
.compatible = "samsung,exynos850-usi",
.data = &exynos850_usi_data,
}, {
.compatible = "samsung,exynos8895-usi",
.data = &exynos8895_usi_data,
},
{ } /* sentinel */
};
@ -109,14 +146,15 @@ static int exynos_usi_set_sw_conf(struct exynos_usi *usi, size_t mode)
if (mode < usi->data->min_mode || mode > usi->data->max_mode)
return -EINVAL;
val = exynos_usi_modes[mode].val;
val = exynos_usi_modes[usi->data->ver][mode].val;
ret = regmap_update_bits(usi->sysreg, usi->sw_conf,
usi->data->sw_conf_mask, val);
if (ret)
return ret;
usi->mode = mode;
dev_dbg(usi->dev, "protocol: %s\n", exynos_usi_modes[usi->mode].name);
dev_dbg(usi->dev, "protocol: %s\n",
exynos_usi_modes[usi->data->ver][usi->mode].name);
return 0;
}
@ -168,10 +206,42 @@ static int exynos_usi_configure(struct exynos_usi *usi)
if (ret)
return ret;
if (usi->data->ver == USI_VER2)
return exynos_usi_enable(usi);
if (usi->data->ver == USI_VER1)
ret = clk_bulk_prepare_enable(usi->data->num_clks,
usi->clks);
else if (usi->data->ver == USI_VER2)
ret = exynos_usi_enable(usi);
return 0;
return ret;
}
static void exynos_usi_unconfigure(void *data)
{
struct exynos_usi *usi = data;
u32 val;
int ret;
if (usi->data->ver == USI_VER1) {
clk_bulk_disable_unprepare(usi->data->num_clks, usi->clks);
return;
}
ret = clk_bulk_prepare_enable(usi->data->num_clks, usi->clks);
if (ret)
return;
/* Make sure that we've stopped providing the clock to USI IP */
val = readl(usi->regs + USI_OPTION);
val &= ~USI_OPTION_CLKREQ_ON;
val |= ~USI_OPTION_CLKSTOP_ON;
writel(val, usi->regs + USI_OPTION);
/* Set USI block state to reset */
val = readl(usi->regs + USI_CON);
val |= USI_CON_RESET;
writel(val, usi->regs + USI_CON);
clk_bulk_disable_unprepare(usi->data->num_clks, usi->clks);
}
static int exynos_usi_parse_dt(struct device_node *np, struct exynos_usi *usi)
@ -186,15 +256,11 @@ static int exynos_usi_parse_dt(struct device_node *np, struct exynos_usi *usi)
return -EINVAL;
usi->mode = mode;
usi->sysreg = syscon_regmap_lookup_by_phandle(np, "samsung,sysreg");
usi->sysreg = syscon_regmap_lookup_by_phandle_args(np, "samsung,sysreg",
1, &usi->sw_conf);
if (IS_ERR(usi->sysreg))
return PTR_ERR(usi->sysreg);
ret = of_property_read_u32_index(np, "samsung,sysreg", 1,
&usi->sw_conf);
if (ret)
return ret;
usi->clkreq_on = of_property_read_bool(np, "samsung,clkreq-on");
return 0;
@ -255,6 +321,10 @@ static int exynos_usi_probe(struct platform_device *pdev)
if (ret)
return ret;
ret = devm_add_action_or_reset(&pdev->dev, exynos_usi_unconfigure, usi);
if (ret)
return ret;
/* Make it possible to embed protocol nodes into USI np */
return of_platform_populate(np, NULL, NULL, dev);
}

View File

@ -5,6 +5,7 @@
//
// Exynos3250 - CPU PMU (Power Management Unit) support
#include <linux/array_size.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>

View File

@ -5,6 +5,7 @@
//
// Exynos5250 - CPU PMU (Power Management Unit) support
#include <linux/array_size.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>

View File

@ -5,6 +5,7 @@
//
// Exynos5420 - CPU PMU (Power Management Unit) support
#include <linux/array_size.h>
#include <linux/pm.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>

View File

@ -47,6 +47,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string_choices.h>
#include <linux/syscore_ops.h>
#include <soc/tegra/common.h>
@ -1181,7 +1182,7 @@ static int powergate_show(struct seq_file *s, void *data)
continue;
seq_printf(s, " %9s %7s\n", pmc->soc->powergates[i],
status ? "yes" : "no");
str_yes_no(status));
}
return 0;

View File

@ -105,6 +105,12 @@ err_unknown_variant:
return -ENODEV;
}
static const struct regmap_config k3_chipinfo_regmap_cfg = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
};
static int k3_chipinfo_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
@ -112,13 +118,18 @@ static int k3_chipinfo_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct soc_device *soc_dev;
struct regmap *regmap;
void __iomem *base;
u32 partno_id;
u32 variant;
u32 jtag_id;
u32 mfg;
int ret;
regmap = device_node_to_regmap(node);
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
regmap = regmap_init_mmio(dev, base, &k3_chipinfo_regmap_cfg);
if (IS_ERR(regmap))
return PTR_ERR(regmap);

View File

@ -147,7 +147,7 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
int err;
int i;
ice = of_qcom_ice_get(dev);
ice = devm_of_qcom_ice_get(dev);
if (ice == ERR_PTR(-EOPNOTSUPP)) {
dev_warn(dev, "Disabling inline encryption support\n");
ice = NULL;

View File

@ -654,5 +654,45 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
method; \
})
#ifdef CONFIG_ARM64
#define __fail_smccc_1_2(___res) \
do { \
if (___res) \
___res->a0 = SMCCC_RET_NOT_SUPPORTED; \
} while (0)
/*
* arm_smccc_1_2_invoke() - make an SMCCC v1.2 compliant call
*
* @args: SMC args are in the a0..a17 fields of the arm_smcc_1_2_regs structure
* @res: result values from registers 0 to 17
*
* This macro will make either an HVC call or an SMC call depending on the
* current SMCCC conduit. If no valid conduit is available then -1
* (SMCCC_RET_NOT_SUPPORTED) is returned in @res.a0 (if supplied).
*
* The return value also provides the conduit that was used.
*/
#define arm_smccc_1_2_invoke(args, res) ({ \
struct arm_smccc_1_2_regs *__args = args; \
struct arm_smccc_1_2_regs *__res = res; \
int method = arm_smccc_1_1_get_conduit(); \
switch (method) { \
case SMCCC_CONDUIT_HVC: \
arm_smccc_1_2_hvc(__args, __res); \
break; \
case SMCCC_CONDUIT_SMC: \
arm_smccc_1_2_smc(__args, __res); \
break; \
default: \
__fail_smccc_1_2(__res); \
method = SMCCC_CONDUIT_NONE; \
break; \
} \
method; \
})
#endif /*CONFIG_ARM64*/
#endif /*__ASSEMBLY__*/
#endif /*__LINUX_ARM_SMCCC_H*/

View File

@ -112,6 +112,7 @@
FIELD_PREP(FFA_MINOR_VERSION_MASK, (minor)))
#define FFA_VERSION_1_0 FFA_PACK_VERSION_INFO(1, 0)
#define FFA_VERSION_1_1 FFA_PACK_VERSION_INFO(1, 1)
#define FFA_VERSION_1_2 FFA_PACK_VERSION_INFO(1, 2)
/**
* FF-A specification mentions explicitly about '4K pages'. This should
@ -176,6 +177,7 @@ void ffa_device_unregister(struct ffa_device *ffa_dev);
int ffa_driver_register(struct ffa_driver *driver, struct module *owner,
const char *mod_name);
void ffa_driver_unregister(struct ffa_driver *driver);
void ffa_devices_unregister(void);
bool ffa_device_is_valid(struct ffa_device *ffa_dev);
#else
@ -188,6 +190,8 @@ ffa_device_register(const struct ffa_partition_info *part_info,
static inline void ffa_device_unregister(struct ffa_device *dev) {}
static inline void ffa_devices_unregister(void) {}
static inline int
ffa_driver_register(struct ffa_driver *driver, struct module *owner,
const char *mod_name)
@ -237,8 +241,12 @@ struct ffa_partition_info {
#define FFA_PARTITION_NOTIFICATION_RECV BIT(3)
/* partition runs in the AArch64 execution state. */
#define FFA_PARTITION_AARCH64_EXEC BIT(8)
/* partition supports receipt of direct request2 */
#define FFA_PARTITION_DIRECT_REQ2_RECV BIT(9)
/* partition can send direct request2. */
#define FFA_PARTITION_DIRECT_REQ2_SEND BIT(10)
u32 properties;
u32 uuid[4];
uuid_t uuid;
};
static inline
@ -256,6 +264,10 @@ bool ffa_partition_check_property(struct ffa_device *dev, u32 property)
#define ffa_partition_supports_direct_recv(dev) \
ffa_partition_check_property(dev, FFA_PARTITION_DIRECT_RECV)
#define ffa_partition_supports_direct_req2_recv(dev) \
(ffa_partition_check_property(dev, FFA_PARTITION_DIRECT_REQ2_RECV) && \
!dev->mode_32bit)
/* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP} which pass data via registers */
struct ffa_send_direct_data {
unsigned long data0; /* w3/x3 */
@ -271,6 +283,7 @@ struct ffa_indirect_msg_hdr {
u32 offset;
u32 send_recv_id;
u32 size;
uuid_t uuid;
};
/* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP}2 which pass data via registers */
@ -439,7 +452,7 @@ struct ffa_msg_ops {
int (*sync_send_receive)(struct ffa_device *dev,
struct ffa_send_direct_data *data);
int (*indirect_send)(struct ffa_device *dev, void *buf, size_t sz);
int (*sync_send_receive2)(struct ffa_device *dev, const uuid_t *uuid,
int (*sync_send_receive2)(struct ffa_device *dev,
struct ffa_send_direct_data2 *data);
};
@ -455,6 +468,7 @@ struct ffa_cpu_ops {
typedef void (*ffa_sched_recv_cb)(u16 vcpu, bool is_per_vcpu, void *cb_data);
typedef void (*ffa_notifier_cb)(int notify_id, void *cb_data);
typedef void (*ffa_fwk_notifier_cb)(int notify_id, void *cb_data, void *buf);
struct ffa_notifier_ops {
int (*sched_recv_cb_register)(struct ffa_device *dev,
@ -463,6 +477,10 @@ struct ffa_notifier_ops {
int (*notify_request)(struct ffa_device *dev, bool per_vcpu,
ffa_notifier_cb cb, void *cb_data, int notify_id);
int (*notify_relinquish)(struct ffa_device *dev, int notify_id);
int (*fwk_notify_request)(struct ffa_device *dev,
ffa_fwk_notifier_cb cb, void *cb_data,
int notify_id);
int (*fwk_notify_relinquish)(struct ffa_device *dev, int notify_id);
int (*notify_send)(struct ffa_device *dev, int notify_id, bool per_vcpu,
u16 vcpu);
};

View File

@ -0,0 +1,49 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2020 Samsung Electronics Co., Ltd.
* Copyright 2020 Google LLC.
* Copyright 2024 Linaro Ltd.
*/
#ifndef __EXYNOS_ACPM_PROTOCOL_H
#define __EXYNOS_ACPM_PROTOCOL_H
#include <linux/types.h>
struct acpm_handle;
struct acpm_pmic_ops {
int (*read_reg)(const struct acpm_handle *handle,
unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
u8 *buf);
int (*bulk_read)(const struct acpm_handle *handle,
unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
u8 count, u8 *buf);
int (*write_reg)(const struct acpm_handle *handle,
unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
u8 value);
int (*bulk_write)(const struct acpm_handle *handle,
unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
u8 count, const u8 *buf);
int (*update_reg)(const struct acpm_handle *handle,
unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
u8 value, u8 mask);
};
struct acpm_ops {
struct acpm_pmic_ops pmic_ops;
};
/**
* struct acpm_handle - Reference to an initialized protocol instance
* @ops:
*/
struct acpm_handle {
struct acpm_ops ops;
};
struct device;
const struct acpm_handle *devm_acpm_get_by_phandle(struct device *dev,
const char *property);
#endif /* __EXYNOS_ACPM_PROTOCOL_H */

View File

@ -56,7 +56,7 @@ struct apple_rtkit_shmem {
* context.
*/
struct apple_rtkit_ops {
void (*crashed)(void *cookie);
void (*crashed)(void *cookie, const void *crashlog, size_t crashlog_size);
void (*recv_message)(void *cookie, u8 endpoint, u64 message);
bool (*recv_message_early)(void *cookie, u8 endpoint, u64 message);
int (*shmem_setup)(void *cookie, struct apple_rtkit_shmem *bfr);

View File

@ -33,5 +33,6 @@ int qcom_ice_program_key(struct qcom_ice *ice,
const u8 crypto_key[], u8 data_unit_size,
int slot);
int qcom_ice_evict_key(struct qcom_ice *ice, int slot);
struct qcom_ice *of_qcom_ice_get(struct device *dev);
struct qcom_ice *devm_of_qcom_ice_get(struct device *dev);
#endif /* __QCOM_ICE_H__ */

View File

@ -3755,7 +3755,7 @@ enum mrq_pwr_limit_cmd {
* @defgroup bpmp_pwr_limit_type PWR_LIMIT TYPEs
* @{
*/
/** @brief Limit value specifies traget cap */
/** @brief Limit value specifies target cap */
#define PWR_LIMIT_TYPE_TARGET_CAP 0U
/** @brief Limit value specifies maximum possible target cap */
#define PWR_LIMIT_TYPE_BOUND_MAX 1U