mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/
synced 2025-04-19 20:58:31 +09:00
Revert "Merge tag 'irq-msi-2025-03-23' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip"
This reverts commit 36f5f026df6c1cd8a20373adc4388d2b3401ce91, reversing changes made to 43a7eec035a5b64546c8adefdc9cf96a116da14b. Thomas says: "I just noticed that for some incomprehensible reason, probably sheer incompetemce when trying to utilize b4, I managed to merge an outdated _and_ buggy version of that series. Can you please revert that merge completely?" Done. Requested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
acb4f33713
commit
112e43e9fd
@ -106,10 +106,10 @@ int ntb_msi_setup_mws(struct ntb_dev *ntb)
|
||||
if (!ntb->msi)
|
||||
return -EINVAL;
|
||||
|
||||
scoped_guard (msi_descs_lock, &ntb->pdev->dev) {
|
||||
desc = msi_first_desc(&ntb->pdev->dev, MSI_DESC_ASSOCIATED);
|
||||
addr = desc->msg.address_lo + ((uint64_t)desc->msg.address_hi << 32);
|
||||
}
|
||||
msi_lock_descs(&ntb->pdev->dev);
|
||||
desc = msi_first_desc(&ntb->pdev->dev, MSI_DESC_ASSOCIATED);
|
||||
addr = desc->msg.address_lo + ((uint64_t)desc->msg.address_hi << 32);
|
||||
msi_unlock_descs(&ntb->pdev->dev);
|
||||
|
||||
for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) {
|
||||
peer_widx = ntb_peer_highest_mw_idx(ntb, peer);
|
||||
@ -289,7 +289,7 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
|
||||
if (!ntb->msi)
|
||||
return -EINVAL;
|
||||
|
||||
guard(msi_descs_lock)(dev);
|
||||
msi_lock_descs(dev);
|
||||
msi_for_each_desc(entry, dev, MSI_DESC_ASSOCIATED) {
|
||||
if (irq_has_action(entry->irq))
|
||||
continue;
|
||||
@ -307,11 +307,17 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
|
||||
ret = ntbm_msi_setup_callback(ntb, entry, msi_desc);
|
||||
if (ret) {
|
||||
devm_free_irq(&ntb->dev, entry->irq, dev_id);
|
||||
return ret;
|
||||
goto unlock;
|
||||
}
|
||||
return entry->irq;
|
||||
|
||||
ret = entry->irq;
|
||||
goto unlock;
|
||||
}
|
||||
return -ENODEV;
|
||||
ret = -ENODEV;
|
||||
|
||||
unlock:
|
||||
msi_unlock_descs(dev);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ntbm_msi_request_threaded_irq);
|
||||
|
||||
|
@ -3975,18 +3975,24 @@ static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
|
||||
{
|
||||
struct irq_data *irq_data;
|
||||
struct msi_desc *entry;
|
||||
int ret = 0;
|
||||
|
||||
if (!pdev->msi_enabled && !pdev->msix_enabled)
|
||||
return 0;
|
||||
|
||||
guard(msi_descs_lock)(&pdev->dev);
|
||||
msi_lock_descs(&pdev->dev);
|
||||
msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
|
||||
irq_data = irq_get_irq_data(entry->irq);
|
||||
if (WARN_ON_ONCE(!irq_data))
|
||||
return -EINVAL;
|
||||
if (WARN_ON_ONCE(!irq_data)) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
hv_compose_msi_msg(irq_data, &entry->msg);
|
||||
}
|
||||
return 0;
|
||||
msi_unlock_descs(&pdev->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -53,9 +53,10 @@ void pci_disable_msi(struct pci_dev *dev)
|
||||
if (!pci_msi_enabled() || !dev || !dev->msi_enabled)
|
||||
return;
|
||||
|
||||
guard(msi_descs_lock)(&dev->dev);
|
||||
msi_lock_descs(&dev->dev);
|
||||
pci_msi_shutdown(dev);
|
||||
pci_free_msi_irqs(dev);
|
||||
msi_unlock_descs(&dev->dev);
|
||||
}
|
||||
EXPORT_SYMBOL(pci_disable_msi);
|
||||
|
||||
@ -195,9 +196,10 @@ void pci_disable_msix(struct pci_dev *dev)
|
||||
if (!pci_msi_enabled() || !dev || !dev->msix_enabled)
|
||||
return;
|
||||
|
||||
guard(msi_descs_lock)(&dev->dev);
|
||||
msi_lock_descs(&dev->dev);
|
||||
pci_msix_shutdown(dev);
|
||||
pci_free_msi_irqs(dev);
|
||||
msi_unlock_descs(&dev->dev);
|
||||
}
|
||||
EXPORT_SYMBOL(pci_disable_msix);
|
||||
|
||||
|
@ -335,11 +335,41 @@ static int msi_verify_entries(struct pci_dev *dev)
|
||||
return !entry ? 0 : -EIO;
|
||||
}
|
||||
|
||||
static int __msi_capability_init(struct pci_dev *dev, int nvec, struct irq_affinity_desc *masks)
|
||||
/**
|
||||
* msi_capability_init - configure device's MSI capability structure
|
||||
* @dev: pointer to the pci_dev data structure of MSI device function
|
||||
* @nvec: number of interrupts to allocate
|
||||
* @affd: description of automatic IRQ affinity assignments (may be %NULL)
|
||||
*
|
||||
* Setup the MSI capability structure of the device with the requested
|
||||
* number of interrupts. A return value of zero indicates the successful
|
||||
* setup of an entry with the new MSI IRQ. A negative return value indicates
|
||||
* an error, and a positive return value indicates the number of interrupts
|
||||
* which could have been allocated.
|
||||
*/
|
||||
static int msi_capability_init(struct pci_dev *dev, int nvec,
|
||||
struct irq_affinity *affd)
|
||||
{
|
||||
int ret = msi_setup_msi_desc(dev, nvec, masks);
|
||||
struct irq_affinity_desc *masks = NULL;
|
||||
struct msi_desc *entry, desc;
|
||||
int ret;
|
||||
|
||||
/* Reject multi-MSI early on irq domain enabled architectures */
|
||||
if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY))
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* Disable MSI during setup in the hardware, but mark it enabled
|
||||
* so that setup code can evaluate it.
|
||||
*/
|
||||
pci_msi_set_enable(dev, 0);
|
||||
dev->msi_enabled = 1;
|
||||
|
||||
if (affd)
|
||||
masks = irq_create_affinity_masks(nvec, affd);
|
||||
|
||||
msi_lock_descs(&dev->dev);
|
||||
ret = msi_setup_msi_desc(dev, nvec, masks);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
@ -368,48 +398,19 @@ static int __msi_capability_init(struct pci_dev *dev, int nvec, struct irq_affin
|
||||
|
||||
pcibios_free_irq(dev);
|
||||
dev->irq = entry->irq;
|
||||
return 0;
|
||||
goto unlock;
|
||||
|
||||
err:
|
||||
pci_msi_unmask(&desc, msi_multi_mask(&desc));
|
||||
pci_free_msi_irqs(dev);
|
||||
fail:
|
||||
dev->msi_enabled = 0;
|
||||
unlock:
|
||||
msi_unlock_descs(&dev->dev);
|
||||
kfree(masks);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* msi_capability_init - configure device's MSI capability structure
|
||||
* @dev: pointer to the pci_dev data structure of MSI device function
|
||||
* @nvec: number of interrupts to allocate
|
||||
* @affd: description of automatic IRQ affinity assignments (may be %NULL)
|
||||
*
|
||||
* Setup the MSI capability structure of the device with the requested
|
||||
* number of interrupts. A return value of zero indicates the successful
|
||||
* setup of an entry with the new MSI IRQ. A negative return value indicates
|
||||
* an error, and a positive return value indicates the number of interrupts
|
||||
* which could have been allocated.
|
||||
*/
|
||||
static int msi_capability_init(struct pci_dev *dev, int nvec,
|
||||
struct irq_affinity *affd)
|
||||
{
|
||||
/* Reject multi-MSI early on irq domain enabled architectures */
|
||||
if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY))
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* Disable MSI during setup in the hardware, but mark it enabled
|
||||
* so that setup code can evaluate it.
|
||||
*/
|
||||
pci_msi_set_enable(dev, 0);
|
||||
dev->msi_enabled = 1;
|
||||
|
||||
struct irq_affinity_desc *masks __free(kfree) =
|
||||
affd ? irq_create_affinity_masks(nvec, affd) : NULL;
|
||||
|
||||
guard(msi_descs_lock)(&dev->dev);
|
||||
return __msi_capability_init(dev, nvec, masks);
|
||||
}
|
||||
|
||||
int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
|
||||
struct irq_affinity *affd)
|
||||
{
|
||||
@ -662,41 +663,40 @@ static void msix_mask_all(void __iomem *base, int tsize)
|
||||
writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
|
||||
}
|
||||
|
||||
static int __msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
|
||||
int nvec, struct irq_affinity_desc *masks)
|
||||
static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
|
||||
int nvec, struct irq_affinity *affd)
|
||||
{
|
||||
int ret = msix_setup_msi_descs(dev, entries, nvec, masks);
|
||||
struct irq_affinity_desc *masks = NULL;
|
||||
int ret;
|
||||
|
||||
if (affd)
|
||||
masks = irq_create_affinity_masks(nvec, affd);
|
||||
|
||||
msi_lock_descs(&dev->dev);
|
||||
ret = msix_setup_msi_descs(dev, entries, nvec, masks);
|
||||
if (ret)
|
||||
goto fail;
|
||||
goto out_free;
|
||||
|
||||
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
|
||||
if (ret)
|
||||
goto fail;
|
||||
goto out_free;
|
||||
|
||||
/* Check if all MSI entries honor device restrictions */
|
||||
ret = msi_verify_entries(dev);
|
||||
if (ret)
|
||||
goto fail;
|
||||
goto out_free;
|
||||
|
||||
msix_update_entries(dev, entries);
|
||||
return 0;
|
||||
goto out_unlock;
|
||||
|
||||
fail:
|
||||
out_free:
|
||||
pci_free_msi_irqs(dev);
|
||||
out_unlock:
|
||||
msi_unlock_descs(&dev->dev);
|
||||
kfree(masks);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
|
||||
int nvec, struct irq_affinity *affd)
|
||||
{
|
||||
struct irq_affinity_desc *masks __free(kfree) =
|
||||
affd ? irq_create_affinity_masks(nvec, affd) : NULL;
|
||||
|
||||
guard(msi_descs_lock)(&dev->dev);
|
||||
return __msix_setup_interrupts(dev, entries, nvec, masks);
|
||||
}
|
||||
|
||||
/**
|
||||
* msix_capability_init - configure device's MSI-X capability
|
||||
* @dev: pointer to the pci_dev data structure of MSI-X device function
|
||||
@ -870,13 +870,13 @@ void __pci_restore_msix_state(struct pci_dev *dev)
|
||||
|
||||
write_msg = arch_restore_msi_irqs(dev);
|
||||
|
||||
scoped_guard (msi_descs_lock, &dev->dev) {
|
||||
msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
|
||||
if (write_msg)
|
||||
__pci_write_msi_msg(entry, &entry->msg);
|
||||
pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
|
||||
}
|
||||
msi_lock_descs(&dev->dev);
|
||||
msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
|
||||
if (write_msg)
|
||||
__pci_write_msi_msg(entry, &entry->msg);
|
||||
pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
|
||||
}
|
||||
msi_unlock_descs(&dev->dev);
|
||||
|
||||
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
|
||||
}
|
||||
@ -915,53 +915,6 @@ void pci_free_msi_irqs(struct pci_dev *dev)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCIE_TPH
|
||||
/**
|
||||
* pci_msix_write_tph_tag - Update the TPH tag for a given MSI-X vector
|
||||
* @pdev: The PCIe device to update
|
||||
* @index: The MSI-X index to update
|
||||
* @tag: The tag to write
|
||||
*
|
||||
* Returns: 0 on success, error code on failure
|
||||
*/
|
||||
int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag)
|
||||
{
|
||||
struct msi_desc *msi_desc;
|
||||
struct irq_desc *irq_desc;
|
||||
unsigned int virq;
|
||||
|
||||
if (!pdev->msix_enabled)
|
||||
return -ENXIO;
|
||||
|
||||
guard(msi_descs_lock)(&pdev->dev);
|
||||
virq = msi_get_virq(&pdev->dev, index);
|
||||
if (!virq)
|
||||
return -ENXIO;
|
||||
/*
|
||||
* This is a horrible hack, but short of implementing a PCI
|
||||
* specific interrupt chip callback and a huge pile of
|
||||
* infrastructure, this is the minor nuissance. It provides the
|
||||
* protection against concurrent operations on this entry and keeps
|
||||
* the control word cache in sync.
|
||||
*/
|
||||
irq_desc = irq_to_desc(virq);
|
||||
if (!irq_desc)
|
||||
return -ENXIO;
|
||||
|
||||
guard(raw_spinlock_irq)(&irq_desc->lock);
|
||||
msi_desc = irq_data_get_msi_desc(&irq_desc->irq_data);
|
||||
if (!msi_desc || msi_desc->pci.msi_attrib.is_virtual)
|
||||
return -ENXIO;
|
||||
|
||||
msi_desc->pci.msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_ST;
|
||||
msi_desc->pci.msix_ctrl |= FIELD_PREP(PCI_MSIX_ENTRY_CTRL_ST, tag);
|
||||
pci_msix_write_vector_ctrl(msi_desc, msi_desc->pci.msix_ctrl);
|
||||
/* Flush the write */
|
||||
readl(pci_msix_desc_addr(msi_desc));
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Misc. infrastructure */
|
||||
|
||||
struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
|
||||
|
@ -989,15 +989,6 @@ int pcim_request_region_exclusive(struct pci_dev *pdev, int bar,
|
||||
const char *name);
|
||||
void pcim_release_region(struct pci_dev *pdev, int bar);
|
||||
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag);
|
||||
#else
|
||||
static inline int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Config Address for PCI Configuration Mechanism #1
|
||||
*
|
||||
|
@ -204,6 +204,48 @@ static u8 get_rp_completer_type(struct pci_dev *pdev)
|
||||
return FIELD_GET(PCI_EXP_DEVCAP2_TPH_COMP_MASK, reg);
|
||||
}
|
||||
|
||||
/* Write ST to MSI-X vector control reg - Return 0 if OK, otherwise -errno */
|
||||
static int write_tag_to_msix(struct pci_dev *pdev, int msix_idx, u16 tag)
|
||||
{
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
struct msi_desc *msi_desc = NULL;
|
||||
void __iomem *vec_ctrl;
|
||||
u32 val;
|
||||
int err = 0;
|
||||
|
||||
msi_lock_descs(&pdev->dev);
|
||||
|
||||
/* Find the msi_desc entry with matching msix_idx */
|
||||
msi_for_each_desc(msi_desc, &pdev->dev, MSI_DESC_ASSOCIATED) {
|
||||
if (msi_desc->msi_index == msix_idx)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!msi_desc) {
|
||||
err = -ENXIO;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* Get the vector control register (offset 0xc) pointed by msix_idx */
|
||||
vec_ctrl = pdev->msix_base + msix_idx * PCI_MSIX_ENTRY_SIZE;
|
||||
vec_ctrl += PCI_MSIX_ENTRY_VECTOR_CTRL;
|
||||
|
||||
val = readl(vec_ctrl);
|
||||
val &= ~PCI_MSIX_ENTRY_CTRL_ST;
|
||||
val |= FIELD_PREP(PCI_MSIX_ENTRY_CTRL_ST, tag);
|
||||
writel(val, vec_ctrl);
|
||||
|
||||
/* Read back to flush the update */
|
||||
val = readl(vec_ctrl);
|
||||
|
||||
err_out:
|
||||
msi_unlock_descs(&pdev->dev);
|
||||
return err;
|
||||
#else
|
||||
return -ENODEV;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Write tag to ST table - Return 0 if OK, otherwise -errno */
|
||||
static int write_tag_to_st_table(struct pci_dev *pdev, int index, u16 tag)
|
||||
{
|
||||
@ -304,7 +346,7 @@ int pcie_tph_set_st_entry(struct pci_dev *pdev, unsigned int index, u16 tag)
|
||||
|
||||
switch (loc) {
|
||||
case PCI_TPH_LOC_MSIX:
|
||||
err = pci_msix_write_tph_tag(pdev, index, tag);
|
||||
err = write_tag_to_msix(pdev, index, tag);
|
||||
break;
|
||||
case PCI_TPH_LOC_CAP:
|
||||
err = write_tag_to_st_table(pdev, index, tag);
|
||||
|
@ -103,15 +103,19 @@ int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
guard(msi_descs_lock)(dev);
|
||||
msi_lock_descs(dev);
|
||||
nvec = ti_sci_inta_msi_alloc_descs(dev, res);
|
||||
if (nvec <= 0)
|
||||
return nvec;
|
||||
if (nvec <= 0) {
|
||||
ret = nvec;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* Use alloc ALL as it's unclear whether there are gaps in the indices */
|
||||
ret = msi_domain_alloc_irqs_all_locked(dev, MSI_DEFAULT_DOMAIN, nvec);
|
||||
if (ret)
|
||||
dev_err(dev, "Failed to allocate IRQs %d\n", ret);
|
||||
unlock:
|
||||
msi_unlock_descs(dev);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ti_sci_inta_msi_domain_alloc_irqs);
|
||||
|
@ -1806,19 +1806,15 @@ static void ufs_qcom_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
|
||||
ufshcd_mcq_config_esi(hba, msg);
|
||||
}
|
||||
|
||||
struct ufs_qcom_irq {
|
||||
unsigned int irq;
|
||||
unsigned int idx;
|
||||
struct ufs_hba *hba;
|
||||
};
|
||||
|
||||
static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data)
|
||||
{
|
||||
struct ufs_qcom_irq *qi = data;
|
||||
struct ufs_hba *hba = qi->hba;
|
||||
struct ufs_hw_queue *hwq = &hba->uhq[qi->idx];
|
||||
struct msi_desc *desc = data;
|
||||
struct device *dev = msi_desc_to_dev(desc);
|
||||
struct ufs_hba *hba = dev_get_drvdata(dev);
|
||||
u32 id = desc->msi_index;
|
||||
struct ufs_hw_queue *hwq = &hba->uhq[id];
|
||||
|
||||
ufshcd_mcq_write_cqis(hba, 0x1, qi->idx);
|
||||
ufshcd_mcq_write_cqis(hba, 0x1, id);
|
||||
ufshcd_mcq_poll_cqe_lock(hba, hwq);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@ -1827,7 +1823,8 @@ static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data)
|
||||
static int ufs_qcom_config_esi(struct ufs_hba *hba)
|
||||
{
|
||||
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
|
||||
struct ufs_qcom_irq *qi;
|
||||
struct msi_desc *desc;
|
||||
struct msi_desc *failed_desc = NULL;
|
||||
int nr_irqs, ret;
|
||||
|
||||
if (host->esi_enabled)
|
||||
@ -1838,47 +1835,47 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
|
||||
* 2. Poll queues do not need ESI.
|
||||
*/
|
||||
nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
|
||||
qi = devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL);
|
||||
if (qi)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs,
|
||||
ufs_qcom_write_msi_msg);
|
||||
if (ret) {
|
||||
dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret);
|
||||
goto cleanup;
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (int idx = 0; idx < nr_irqs; idx++) {
|
||||
qi[idx].irq = msi_get_virq(hba->dev, idx);
|
||||
qi[idx].idx = idx;
|
||||
qi[idx].hba = hba;
|
||||
|
||||
ret = devm_request_irq(hba->dev, qi[idx].irq, ufs_qcom_mcq_esi_handler,
|
||||
IRQF_SHARED, "qcom-mcq-esi", qi + idx);
|
||||
msi_lock_descs(hba->dev);
|
||||
msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
|
||||
ret = devm_request_irq(hba->dev, desc->irq,
|
||||
ufs_qcom_mcq_esi_handler,
|
||||
IRQF_SHARED, "qcom-mcq-esi", desc);
|
||||
if (ret) {
|
||||
dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n",
|
||||
__func__, qi[idx].irq, ret);
|
||||
qi[idx].irq = 0;
|
||||
goto cleanup;
|
||||
__func__, desc->irq, ret);
|
||||
failed_desc = desc;
|
||||
break;
|
||||
}
|
||||
}
|
||||
msi_unlock_descs(hba->dev);
|
||||
|
||||
if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
|
||||
host->hw_ver.step == 0) {
|
||||
ufshcd_rmwl(hba, ESI_VEC_MASK,
|
||||
FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
|
||||
REG_UFS_CFG3);
|
||||
if (ret) {
|
||||
/* Rewind */
|
||||
msi_lock_descs(hba->dev);
|
||||
msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
|
||||
if (desc == failed_desc)
|
||||
break;
|
||||
devm_free_irq(hba->dev, desc->irq, hba);
|
||||
}
|
||||
msi_unlock_descs(hba->dev);
|
||||
platform_device_msi_free_irqs_all(hba->dev);
|
||||
} else {
|
||||
if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
|
||||
host->hw_ver.step == 0)
|
||||
ufshcd_rmwl(hba, ESI_VEC_MASK,
|
||||
FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
|
||||
REG_UFS_CFG3);
|
||||
ufshcd_mcq_enable_esi(hba);
|
||||
host->esi_enabled = true;
|
||||
}
|
||||
ufshcd_mcq_enable_esi(hba);
|
||||
host->esi_enabled = true;
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
for (int idx = 0; qi[idx].irq; idx++)
|
||||
devm_free_irq(hba->dev, qi[idx].irq, hba);
|
||||
platform_device_msi_free_irqs_all(hba->dev);
|
||||
devm_kfree(hba->dev, qi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -216,23 +216,6 @@ const volatile void * __must_check_fn(const volatile void *val)
|
||||
|
||||
#define return_ptr(p) return no_free_ptr(p)
|
||||
|
||||
/*
|
||||
* Only for situations where an allocation is handed in to another function
|
||||
* and consumed by that function on success.
|
||||
*
|
||||
* struct foo *f __free(kfree) = kzalloc(sizeof(*f), GFP_KERNEL);
|
||||
*
|
||||
* setup(f);
|
||||
* if (some_condition)
|
||||
* return -EINVAL;
|
||||
* ....
|
||||
* ret = bar(f);
|
||||
* if (!ret)
|
||||
* retain_ptr(f);
|
||||
* return ret;
|
||||
*/
|
||||
#define retain_ptr(p) \
|
||||
__get_and_null(p, NULL)
|
||||
|
||||
/*
|
||||
* DEFINE_CLASS(name, type, exit, init, init_args...):
|
||||
|
@ -281,8 +281,6 @@ static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa)
|
||||
|
||||
void irq_domain_free_fwnode(struct fwnode_handle *fwnode);
|
||||
|
||||
DEFINE_FREE(irq_domain_free_fwnode, struct fwnode_handle *, if (_T) irq_domain_free_fwnode(_T))
|
||||
|
||||
struct irq_domain_chip_generic_info;
|
||||
|
||||
/**
|
||||
|
@ -80,6 +80,7 @@ struct device_attribute;
|
||||
struct irq_domain;
|
||||
struct irq_affinity_desc;
|
||||
|
||||
void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
|
||||
#ifdef CONFIG_GENERIC_MSI_IRQ
|
||||
void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
|
||||
#else
|
||||
@ -228,11 +229,8 @@ struct msi_dev_domain {
|
||||
|
||||
int msi_setup_device_data(struct device *dev);
|
||||
|
||||
void __msi_lock_descs(struct device *dev);
|
||||
void __msi_unlock_descs(struct device *dev);
|
||||
|
||||
DEFINE_LOCK_GUARD_1(msi_descs_lock, struct device, __msi_lock_descs(_T->lock),
|
||||
__msi_unlock_descs(_T->lock));
|
||||
void msi_lock_descs(struct device *dev);
|
||||
void msi_unlock_descs(struct device *dev);
|
||||
|
||||
struct msi_desc *msi_domain_first_desc(struct device *dev, unsigned int domid,
|
||||
enum msi_desc_filter filter);
|
||||
@ -636,6 +634,8 @@ void msi_remove_device_irq_domain(struct device *dev, unsigned int domid);
|
||||
bool msi_match_device_irq_domain(struct device *dev, unsigned int domid,
|
||||
enum irq_domain_bus_token bus_token);
|
||||
|
||||
int msi_domain_alloc_irqs_range_locked(struct device *dev, unsigned int domid,
|
||||
unsigned int first, unsigned int last);
|
||||
int msi_domain_alloc_irqs_range(struct device *dev, unsigned int domid,
|
||||
unsigned int first, unsigned int last);
|
||||
int msi_domain_alloc_irqs_all_locked(struct device *dev, unsigned int domid, int nirqs);
|
||||
@ -644,6 +644,8 @@ struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, u
|
||||
const struct irq_affinity_desc *affdesc,
|
||||
union msi_instance_cookie *cookie);
|
||||
|
||||
void msi_domain_free_irqs_range_locked(struct device *dev, unsigned int domid,
|
||||
unsigned int first, unsigned int last);
|
||||
void msi_domain_free_irqs_range(struct device *dev, unsigned int domid,
|
||||
unsigned int first, unsigned int last);
|
||||
void msi_domain_free_irqs_all_locked(struct device *dev, unsigned int domid);
|
||||
|
169
kernel/irq/msi.c
169
kernel/irq/msi.c
@ -270,11 +270,16 @@ fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
|
||||
{
|
||||
*msg = entry->msg;
|
||||
}
|
||||
|
||||
void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
|
||||
{
|
||||
struct msi_desc *entry = irq_get_msi_desc(irq);
|
||||
|
||||
*msg = entry->msg;
|
||||
__get_cached_msi_msg(entry, msg);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_cached_msi_msg);
|
||||
|
||||
@ -338,30 +343,26 @@ int msi_setup_device_data(struct device *dev)
|
||||
}
|
||||
|
||||
/**
|
||||
* __msi_lock_descs - Lock the MSI descriptor storage of a device
|
||||
* msi_lock_descs - Lock the MSI descriptor storage of a device
|
||||
* @dev: Device to operate on
|
||||
*
|
||||
* Internal function for guard(msi_descs_lock). Don't use in code.
|
||||
*/
|
||||
void __msi_lock_descs(struct device *dev)
|
||||
void msi_lock_descs(struct device *dev)
|
||||
{
|
||||
mutex_lock(&dev->msi.data->mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__msi_lock_descs);
|
||||
EXPORT_SYMBOL_GPL(msi_lock_descs);
|
||||
|
||||
/**
|
||||
* __msi_unlock_descs - Unlock the MSI descriptor storage of a device
|
||||
* msi_unlock_descs - Unlock the MSI descriptor storage of a device
|
||||
* @dev: Device to operate on
|
||||
*
|
||||
* Internal function for guard(msi_descs_lock). Don't use in code.
|
||||
*/
|
||||
void __msi_unlock_descs(struct device *dev)
|
||||
void msi_unlock_descs(struct device *dev)
|
||||
{
|
||||
/* Invalidate the index which was cached by the iterator */
|
||||
dev->msi.data->__iter_idx = MSI_XA_MAX_INDEX;
|
||||
mutex_unlock(&dev->msi.data->mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__msi_unlock_descs);
|
||||
EXPORT_SYMBOL_GPL(msi_unlock_descs);
|
||||
|
||||
static struct msi_desc *msi_find_desc(struct msi_device_data *md, unsigned int domid,
|
||||
enum msi_desc_filter filter)
|
||||
@ -447,6 +448,7 @@ EXPORT_SYMBOL_GPL(msi_next_desc);
|
||||
unsigned int msi_domain_get_virq(struct device *dev, unsigned int domid, unsigned int index)
|
||||
{
|
||||
struct msi_desc *desc;
|
||||
unsigned int ret = 0;
|
||||
bool pcimsi = false;
|
||||
struct xarray *xa;
|
||||
|
||||
@ -460,7 +462,7 @@ unsigned int msi_domain_get_virq(struct device *dev, unsigned int domid, unsigne
|
||||
if (dev_is_pci(dev) && domid == MSI_DEFAULT_DOMAIN)
|
||||
pcimsi = to_pci_dev(dev)->msi_enabled;
|
||||
|
||||
guard(msi_descs_lock)(dev);
|
||||
msi_lock_descs(dev);
|
||||
xa = &dev->msi.data->__domains[domid].store;
|
||||
desc = xa_load(xa, pcimsi ? 0 : index);
|
||||
if (desc && desc->irq) {
|
||||
@ -469,12 +471,16 @@ unsigned int msi_domain_get_virq(struct device *dev, unsigned int domid, unsigne
|
||||
* PCI-MSIX and platform MSI use a descriptor per
|
||||
* interrupt.
|
||||
*/
|
||||
if (!pcimsi)
|
||||
return desc->irq;
|
||||
if (index < desc->nvec_used)
|
||||
return desc->irq + index;
|
||||
if (pcimsi) {
|
||||
if (index < desc->nvec_used)
|
||||
ret = desc->irq + index;
|
||||
} else {
|
||||
ret = desc->irq;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
msi_unlock_descs(dev);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(msi_domain_get_virq);
|
||||
|
||||
@ -992,8 +998,9 @@ bool msi_create_device_irq_domain(struct device *dev, unsigned int domid,
|
||||
void *chip_data)
|
||||
{
|
||||
struct irq_domain *domain, *parent = dev->msi.domain;
|
||||
struct fwnode_handle *fwnode, *fwnalloced = NULL;
|
||||
struct msi_domain_template *bundle;
|
||||
const struct msi_parent_ops *pops;
|
||||
struct fwnode_handle *fwnode;
|
||||
|
||||
if (!irq_domain_is_msi_parent(parent))
|
||||
return false;
|
||||
@ -1001,8 +1008,7 @@ bool msi_create_device_irq_domain(struct device *dev, unsigned int domid,
|
||||
if (domid >= MSI_MAX_DEVICE_IRQDOMAINS)
|
||||
return false;
|
||||
|
||||
struct msi_domain_template *bundle __free(kfree) =
|
||||
bundle = kmemdup(template, sizeof(*bundle), GFP_KERNEL);
|
||||
bundle = kmemdup(template, sizeof(*bundle), GFP_KERNEL);
|
||||
if (!bundle)
|
||||
return false;
|
||||
|
||||
@ -1025,36 +1031,41 @@ bool msi_create_device_irq_domain(struct device *dev, unsigned int domid,
|
||||
* node as they are not guaranteed to have a fwnode. They are never
|
||||
* looked up and always handled in the context of the device.
|
||||
*/
|
||||
struct fwnode_handle *fwnode_alloced __free(irq_domain_free_fwnode) = NULL;
|
||||
|
||||
if (!(bundle->info.flags & MSI_FLAG_USE_DEV_FWNODE))
|
||||
fwnode = fwnode_alloced = irq_domain_alloc_named_fwnode(bundle->name);
|
||||
else
|
||||
if (bundle->info.flags & MSI_FLAG_USE_DEV_FWNODE)
|
||||
fwnode = dev->fwnode;
|
||||
else
|
||||
fwnode = fwnalloced = irq_domain_alloc_named_fwnode(bundle->name);
|
||||
|
||||
if (!fwnode)
|
||||
return false;
|
||||
goto free_bundle;
|
||||
|
||||
if (msi_setup_device_data(dev))
|
||||
return false;
|
||||
goto free_fwnode;
|
||||
|
||||
msi_lock_descs(dev);
|
||||
|
||||
guard(msi_descs_lock)(dev);
|
||||
if (WARN_ON_ONCE(msi_get_device_domain(dev, domid)))
|
||||
return false;
|
||||
goto fail;
|
||||
|
||||
if (!pops->init_dev_msi_info(dev, parent, parent, &bundle->info))
|
||||
return false;
|
||||
goto fail;
|
||||
|
||||
domain = __msi_create_irq_domain(fwnode, &bundle->info, IRQ_DOMAIN_FLAG_MSI_DEVICE, parent);
|
||||
if (!domain)
|
||||
return false;
|
||||
goto fail;
|
||||
|
||||
/* @bundle and @fwnode_alloced are now in use. Prevent cleanup */
|
||||
retain_ptr(bundle);
|
||||
retain_ptr(fwnode_alloced);
|
||||
domain->dev = dev;
|
||||
dev->msi.data->__domains[domid].domain = domain;
|
||||
msi_unlock_descs(dev);
|
||||
return true;
|
||||
|
||||
fail:
|
||||
msi_unlock_descs(dev);
|
||||
free_fwnode:
|
||||
irq_domain_free_fwnode(fwnalloced);
|
||||
free_bundle:
|
||||
kfree(bundle);
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1068,10 +1079,12 @@ void msi_remove_device_irq_domain(struct device *dev, unsigned int domid)
|
||||
struct msi_domain_info *info;
|
||||
struct irq_domain *domain;
|
||||
|
||||
guard(msi_descs_lock)(dev);
|
||||
msi_lock_descs(dev);
|
||||
|
||||
domain = msi_get_device_domain(dev, domid);
|
||||
|
||||
if (!domain || !irq_domain_is_msi_device(domain))
|
||||
return;
|
||||
goto unlock;
|
||||
|
||||
dev->msi.data->__domains[domid].domain = NULL;
|
||||
info = domain->host_data;
|
||||
@ -1080,6 +1093,9 @@ void msi_remove_device_irq_domain(struct device *dev, unsigned int domid)
|
||||
irq_domain_remove(domain);
|
||||
irq_domain_free_fwnode(fwnode);
|
||||
kfree(container_of(info, struct msi_domain_template, info));
|
||||
|
||||
unlock:
|
||||
msi_unlock_descs(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1095,14 +1111,16 @@ bool msi_match_device_irq_domain(struct device *dev, unsigned int domid,
|
||||
{
|
||||
struct msi_domain_info *info;
|
||||
struct irq_domain *domain;
|
||||
bool ret = false;
|
||||
|
||||
guard(msi_descs_lock)(dev);
|
||||
msi_lock_descs(dev);
|
||||
domain = msi_get_device_domain(dev, domid);
|
||||
if (domain && irq_domain_is_msi_device(domain)) {
|
||||
info = domain->host_data;
|
||||
return info->bus_token == bus_token;
|
||||
ret = info->bus_token == bus_token;
|
||||
}
|
||||
return false;
|
||||
msi_unlock_descs(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
|
||||
@ -1333,6 +1351,33 @@ static int msi_domain_alloc_locked(struct device *dev, struct msi_ctrl *ctrl)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* msi_domain_alloc_irqs_range_locked - Allocate interrupts from a MSI interrupt domain
|
||||
* @dev: Pointer to device struct of the device for which the interrupts
|
||||
* are allocated
|
||||
* @domid: Id of the interrupt domain to operate on
|
||||
* @first: First index to allocate (inclusive)
|
||||
* @last: Last index to allocate (inclusive)
|
||||
*
|
||||
* Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
|
||||
* pair. Use this for MSI irqdomains which implement their own descriptor
|
||||
* allocation/free.
|
||||
*
|
||||
* Return: %0 on success or an error code.
|
||||
*/
|
||||
int msi_domain_alloc_irqs_range_locked(struct device *dev, unsigned int domid,
|
||||
unsigned int first, unsigned int last)
|
||||
{
|
||||
struct msi_ctrl ctrl = {
|
||||
.domid = domid,
|
||||
.first = first,
|
||||
.last = last,
|
||||
.nirqs = last + 1 - first,
|
||||
};
|
||||
|
||||
return msi_domain_alloc_locked(dev, &ctrl);
|
||||
}
|
||||
|
||||
/**
|
||||
* msi_domain_alloc_irqs_range - Allocate interrupts from a MSI interrupt domain
|
||||
* @dev: Pointer to device struct of the device for which the interrupts
|
||||
@ -1346,15 +1391,12 @@ static int msi_domain_alloc_locked(struct device *dev, struct msi_ctrl *ctrl)
|
||||
int msi_domain_alloc_irqs_range(struct device *dev, unsigned int domid,
|
||||
unsigned int first, unsigned int last)
|
||||
{
|
||||
struct msi_ctrl ctrl = {
|
||||
.domid = domid,
|
||||
.first = first,
|
||||
.last = last,
|
||||
.nirqs = last + 1 - first,
|
||||
};
|
||||
int ret;
|
||||
|
||||
guard(msi_descs_lock)(dev);
|
||||
return msi_domain_alloc_locked(dev, &ctrl);
|
||||
msi_lock_descs(dev);
|
||||
ret = msi_domain_alloc_irqs_range_locked(dev, domid, first, last);
|
||||
msi_unlock_descs(dev);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(msi_domain_alloc_irqs_range);
|
||||
|
||||
@ -1458,8 +1500,12 @@ struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, u
|
||||
const struct irq_affinity_desc *affdesc,
|
||||
union msi_instance_cookie *icookie)
|
||||
{
|
||||
guard(msi_descs_lock)(dev);
|
||||
return __msi_domain_alloc_irq_at(dev, domid, index, affdesc, icookie);
|
||||
struct msi_map map;
|
||||
|
||||
msi_lock_descs(dev);
|
||||
map = __msi_domain_alloc_irq_at(dev, domid, index, affdesc, icookie);
|
||||
msi_unlock_descs(dev);
|
||||
return map;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1496,11 +1542,13 @@ int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq,
|
||||
|
||||
icookie.value = ((u64)type << 32) | hwirq;
|
||||
|
||||
guard(msi_descs_lock)(dev);
|
||||
msi_lock_descs(dev);
|
||||
if (WARN_ON_ONCE(msi_get_device_domain(dev, domid) != domain))
|
||||
map.index = -EINVAL;
|
||||
else
|
||||
map = __msi_domain_alloc_irq_at(dev, domid, MSI_ANY_INDEX, NULL, &icookie);
|
||||
msi_unlock_descs(dev);
|
||||
|
||||
return map.index >= 0 ? map.virq : map.index;
|
||||
}
|
||||
|
||||
@ -1570,8 +1618,8 @@ static void msi_domain_free_locked(struct device *dev, struct msi_ctrl *ctrl)
|
||||
* @first: First index to free (inclusive)
|
||||
* @last: Last index to free (inclusive)
|
||||
*/
|
||||
static void msi_domain_free_irqs_range_locked(struct device *dev, unsigned int domid,
|
||||
unsigned int first, unsigned int last)
|
||||
void msi_domain_free_irqs_range_locked(struct device *dev, unsigned int domid,
|
||||
unsigned int first, unsigned int last)
|
||||
{
|
||||
struct msi_ctrl ctrl = {
|
||||
.domid = domid,
|
||||
@ -1593,8 +1641,9 @@ static void msi_domain_free_irqs_range_locked(struct device *dev, unsigned int d
|
||||
void msi_domain_free_irqs_range(struct device *dev, unsigned int domid,
|
||||
unsigned int first, unsigned int last)
|
||||
{
|
||||
guard(msi_descs_lock)(dev);
|
||||
msi_lock_descs(dev);
|
||||
msi_domain_free_irqs_range_locked(dev, domid, first, last);
|
||||
msi_unlock_descs(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(msi_domain_free_irqs_all);
|
||||
|
||||
@ -1624,8 +1673,9 @@ void msi_domain_free_irqs_all_locked(struct device *dev, unsigned int domid)
|
||||
*/
|
||||
void msi_domain_free_irqs_all(struct device *dev, unsigned int domid)
|
||||
{
|
||||
guard(msi_descs_lock)(dev);
|
||||
msi_lock_descs(dev);
|
||||
msi_domain_free_irqs_all_locked(dev, domid);
|
||||
msi_unlock_descs(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1644,11 +1694,12 @@ void msi_device_domain_free_wired(struct irq_domain *domain, unsigned int virq)
|
||||
if (WARN_ON_ONCE(!dev || !desc || domain->bus_token != DOMAIN_BUS_WIRED_TO_MSI))
|
||||
return;
|
||||
|
||||
guard(msi_descs_lock)(dev);
|
||||
if (WARN_ON_ONCE(msi_get_device_domain(dev, MSI_DEFAULT_DOMAIN) != domain))
|
||||
return;
|
||||
msi_domain_free_irqs_range_locked(dev, MSI_DEFAULT_DOMAIN, desc->msi_index,
|
||||
desc->msi_index);
|
||||
msi_lock_descs(dev);
|
||||
if (!WARN_ON_ONCE(msi_get_device_domain(dev, MSI_DEFAULT_DOMAIN) != domain)) {
|
||||
msi_domain_free_irqs_range_locked(dev, MSI_DEFAULT_DOMAIN, desc->msi_index,
|
||||
desc->msi_index);
|
||||
}
|
||||
msi_unlock_descs(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
Loading…
x
Reference in New Issue
Block a user