iommu: Pass in old domain to attach_dev callback functions
The IOMMU core attaches each device to a default domain on probe(). Then, every new "attach" operation has a fundamental meaning of two-fold: - detach from its currently attached (old) domain - attach to a given new domain Modern IOMMU drivers following this pattern usually want to clean up the things related to the old domain, so they call iommu_get_domain_for_dev() to fetch the old domain. Pass in the old domain pointer from the core to drivers, aligning with the set_dev_pasid op that does so already. Ensure all low-level attach fcuntions in the core can forward the correct old domain pointer. Thus, rework those functions as well. Suggested-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Nicolin Chen <nicolinc@nvidia.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
This commit is contained in:
committed by
Joerg Roedel
parent
2b33598e66
commit
fd714986e4
@@ -1156,7 +1156,8 @@ EXPORT_SYMBOL_GPL(iommu_add_device);
|
||||
*/
|
||||
static int
|
||||
spapr_tce_platform_iommu_attach_dev(struct iommu_domain *platform_domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
struct iommu_table_group *table_group;
|
||||
@@ -1189,7 +1190,7 @@ static struct iommu_domain spapr_tce_platform_domain = {
|
||||
|
||||
static int
|
||||
spapr_tce_blocked_iommu_attach_dev(struct iommu_domain *platform_domain,
|
||||
struct device *dev)
|
||||
struct device *dev, struct iommu_domain *old)
|
||||
{
|
||||
struct iommu_group *grp = iommu_group_get(dev);
|
||||
struct iommu_table_group *table_group;
|
||||
|
||||
@@ -70,8 +70,8 @@ int amd_iommu_max_glx_val = -1;
|
||||
*/
|
||||
DEFINE_IDA(pdom_ids);
|
||||
|
||||
static int amd_iommu_attach_device(struct iommu_domain *dom,
|
||||
struct device *dev);
|
||||
static int amd_iommu_attach_device(struct iommu_domain *dom, struct device *dev,
|
||||
struct iommu_domain *old);
|
||||
|
||||
static void set_dte_entry(struct amd_iommu *iommu,
|
||||
struct iommu_dev_data *dev_data);
|
||||
@@ -2635,7 +2635,8 @@ void amd_iommu_domain_free(struct iommu_domain *dom)
|
||||
}
|
||||
|
||||
static int blocked_domain_attach_device(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
|
||||
|
||||
@@ -2685,8 +2686,8 @@ void amd_iommu_init_identity_domain(void)
|
||||
protection_domain_init(&identity_domain);
|
||||
}
|
||||
|
||||
static int amd_iommu_attach_device(struct iommu_domain *dom,
|
||||
struct device *dev)
|
||||
static int amd_iommu_attach_device(struct iommu_domain *dom, struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
|
||||
struct protection_domain *domain = to_pdomain(dom);
|
||||
|
||||
@@ -672,7 +672,8 @@ static int apple_dart_domain_add_streams(struct apple_dart_domain *domain,
|
||||
}
|
||||
|
||||
static int apple_dart_attach_dev_paging(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
int ret, i;
|
||||
struct apple_dart_stream_map *stream_map;
|
||||
@@ -693,7 +694,8 @@ static int apple_dart_attach_dev_paging(struct iommu_domain *domain,
|
||||
}
|
||||
|
||||
static int apple_dart_attach_dev_identity(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
|
||||
struct apple_dart_stream_map *stream_map;
|
||||
@@ -717,7 +719,8 @@ static struct iommu_domain apple_dart_identity_domain = {
|
||||
};
|
||||
|
||||
static int apple_dart_attach_dev_blocked(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
|
||||
struct apple_dart_stream_map *stream_map;
|
||||
|
||||
@@ -138,14 +138,15 @@ void arm_smmu_master_clear_vmaster(struct arm_smmu_master *master)
|
||||
}
|
||||
|
||||
static int arm_smmu_attach_dev_nested(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old_domain)
|
||||
{
|
||||
struct arm_smmu_nested_domain *nested_domain =
|
||||
to_smmu_nested_domain(domain);
|
||||
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
|
||||
struct arm_smmu_attach_state state = {
|
||||
.master = master,
|
||||
.old_domain = iommu_get_domain_for_dev(dev),
|
||||
.old_domain = old_domain,
|
||||
.ssid = IOMMU_NO_PASID,
|
||||
};
|
||||
struct arm_smmu_ste ste;
|
||||
|
||||
@@ -3002,7 +3002,8 @@ void arm_smmu_attach_commit(struct arm_smmu_attach_state *state)
|
||||
master->ats_enabled = state->ats_enabled;
|
||||
}
|
||||
|
||||
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev,
|
||||
struct iommu_domain *old_domain)
|
||||
{
|
||||
int ret = 0;
|
||||
struct arm_smmu_ste target;
|
||||
@@ -3010,7 +3011,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
struct arm_smmu_device *smmu;
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
struct arm_smmu_attach_state state = {
|
||||
.old_domain = iommu_get_domain_for_dev(dev),
|
||||
.old_domain = old_domain,
|
||||
.ssid = IOMMU_NO_PASID,
|
||||
};
|
||||
struct arm_smmu_master *master;
|
||||
@@ -3186,7 +3187,7 @@ static int arm_smmu_blocking_set_dev_pasid(struct iommu_domain *new_domain,
|
||||
|
||||
/*
|
||||
* When the last user of the CD table goes away downgrade the STE back
|
||||
* to a non-cd_table one.
|
||||
* to a non-cd_table one, by re-attaching its sid_domain.
|
||||
*/
|
||||
if (!arm_smmu_ssids_in_use(&master->cd_table)) {
|
||||
struct iommu_domain *sid_domain =
|
||||
@@ -3194,12 +3195,14 @@ static int arm_smmu_blocking_set_dev_pasid(struct iommu_domain *new_domain,
|
||||
|
||||
if (sid_domain->type == IOMMU_DOMAIN_IDENTITY ||
|
||||
sid_domain->type == IOMMU_DOMAIN_BLOCKED)
|
||||
sid_domain->ops->attach_dev(sid_domain, dev);
|
||||
sid_domain->ops->attach_dev(sid_domain, dev,
|
||||
sid_domain);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void arm_smmu_attach_dev_ste(struct iommu_domain *domain,
|
||||
struct iommu_domain *old_domain,
|
||||
struct device *dev,
|
||||
struct arm_smmu_ste *ste,
|
||||
unsigned int s1dss)
|
||||
@@ -3207,7 +3210,7 @@ static void arm_smmu_attach_dev_ste(struct iommu_domain *domain,
|
||||
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
|
||||
struct arm_smmu_attach_state state = {
|
||||
.master = master,
|
||||
.old_domain = iommu_get_domain_for_dev(dev),
|
||||
.old_domain = old_domain,
|
||||
.ssid = IOMMU_NO_PASID,
|
||||
};
|
||||
|
||||
@@ -3248,14 +3251,16 @@ static void arm_smmu_attach_dev_ste(struct iommu_domain *domain,
|
||||
}
|
||||
|
||||
static int arm_smmu_attach_dev_identity(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old_domain)
|
||||
{
|
||||
struct arm_smmu_ste ste;
|
||||
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
|
||||
|
||||
arm_smmu_master_clear_vmaster(master);
|
||||
arm_smmu_make_bypass_ste(master->smmu, &ste);
|
||||
arm_smmu_attach_dev_ste(domain, dev, &ste, STRTAB_STE_1_S1DSS_BYPASS);
|
||||
arm_smmu_attach_dev_ste(domain, old_domain, dev, &ste,
|
||||
STRTAB_STE_1_S1DSS_BYPASS);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -3269,14 +3274,15 @@ static struct iommu_domain arm_smmu_identity_domain = {
|
||||
};
|
||||
|
||||
static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old_domain)
|
||||
{
|
||||
struct arm_smmu_ste ste;
|
||||
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
|
||||
|
||||
arm_smmu_master_clear_vmaster(master);
|
||||
arm_smmu_make_abort_ste(&ste);
|
||||
arm_smmu_attach_dev_ste(domain, dev, &ste,
|
||||
arm_smmu_attach_dev_ste(domain, old_domain, dev, &ste,
|
||||
STRTAB_STE_1_S1DSS_TERMINATE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1165,7 +1165,8 @@ static void arm_smmu_master_install_s2crs(struct arm_smmu_master_cfg *cfg,
|
||||
}
|
||||
}
|
||||
|
||||
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
||||
@@ -1234,7 +1235,8 @@ static int arm_smmu_attach_dev_type(struct device *dev,
|
||||
}
|
||||
|
||||
static int arm_smmu_attach_dev_identity(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
return arm_smmu_attach_dev_type(dev, S2CR_TYPE_BYPASS);
|
||||
}
|
||||
@@ -1249,7 +1251,8 @@ static struct iommu_domain arm_smmu_identity_domain = {
|
||||
};
|
||||
|
||||
static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
return arm_smmu_attach_dev_type(dev, S2CR_TYPE_FAULT);
|
||||
}
|
||||
|
||||
@@ -359,7 +359,8 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain)
|
||||
kfree(qcom_domain);
|
||||
}
|
||||
|
||||
static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
static int qcom_iommu_attach_dev(struct iommu_domain *domain,
|
||||
struct device *dev, struct iommu_domain *old)
|
||||
{
|
||||
struct qcom_iommu_dev *qcom_iommu = dev_iommu_priv_get(dev);
|
||||
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
|
||||
@@ -388,18 +389,18 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev
|
||||
}
|
||||
|
||||
static int qcom_iommu_identity_attach(struct iommu_domain *identity_domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
struct qcom_iommu_domain *qcom_domain;
|
||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
||||
struct qcom_iommu_dev *qcom_iommu = dev_iommu_priv_get(dev);
|
||||
unsigned int i;
|
||||
|
||||
if (domain == identity_domain || !domain)
|
||||
if (old == identity_domain || !old)
|
||||
return 0;
|
||||
|
||||
qcom_domain = to_qcom_iommu_domain(domain);
|
||||
qcom_domain = to_qcom_iommu_domain(old);
|
||||
if (WARN_ON(!qcom_domain->iommu))
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
@@ -984,7 +984,8 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
|
||||
}
|
||||
|
||||
static int exynos_iommu_identity_attach(struct iommu_domain *identity_domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
|
||||
struct exynos_iommu_domain *domain;
|
||||
@@ -1035,7 +1036,8 @@ static struct iommu_domain exynos_identity_domain = {
|
||||
};
|
||||
|
||||
static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
|
||||
struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
|
||||
@@ -1044,7 +1046,7 @@ static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
|
||||
unsigned long flags;
|
||||
int err;
|
||||
|
||||
err = exynos_iommu_identity_attach(&exynos_identity_domain, dev);
|
||||
err = exynos_iommu_identity_attach(&exynos_identity_domain, dev, old);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
||||
@@ -238,7 +238,7 @@ static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
|
||||
}
|
||||
|
||||
static int fsl_pamu_attach_device(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
struct device *dev, struct iommu_domain *old)
|
||||
{
|
||||
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
|
||||
unsigned long flags;
|
||||
@@ -298,9 +298,9 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
|
||||
* switches to what looks like BLOCKING.
|
||||
*/
|
||||
static int fsl_pamu_platform_attach(struct iommu_domain *platform_domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
struct fsl_dma_domain *dma_domain;
|
||||
const u32 *prop;
|
||||
int len;
|
||||
@@ -311,11 +311,11 @@ static int fsl_pamu_platform_attach(struct iommu_domain *platform_domain,
|
||||
* Hack to keep things working as they always have, only leaving an
|
||||
* UNMANAGED domain makes it BLOCKING.
|
||||
*/
|
||||
if (domain == platform_domain || !domain ||
|
||||
domain->type != IOMMU_DOMAIN_UNMANAGED)
|
||||
if (old == platform_domain || !old ||
|
||||
old->type != IOMMU_DOMAIN_UNMANAGED)
|
||||
return 0;
|
||||
|
||||
dma_domain = to_fsl_dma_domain(domain);
|
||||
dma_domain = to_fsl_dma_domain(old);
|
||||
|
||||
/*
|
||||
* Use LIODN of the PCI controller while detaching a
|
||||
|
||||
@@ -3230,7 +3230,8 @@ void device_block_translation(struct device *dev)
|
||||
}
|
||||
|
||||
static int blocking_domain_attach_dev(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct device_domain_info *info = dev_iommu_priv_get(dev);
|
||||
|
||||
@@ -3537,7 +3538,8 @@ int paging_domain_compatible(struct iommu_domain *domain, struct device *dev)
|
||||
}
|
||||
|
||||
static int intel_iommu_attach_device(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@@ -4401,7 +4403,9 @@ static int device_setup_pass_through(struct device *dev)
|
||||
context_setup_pass_through_cb, dev);
|
||||
}
|
||||
|
||||
static int identity_domain_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
static int identity_domain_attach_dev(struct iommu_domain *domain,
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct device_domain_info *info = dev_iommu_priv_get(dev);
|
||||
struct intel_iommu *iommu = info->iommu;
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
#include "pasid.h"
|
||||
|
||||
static int intel_nested_attach_dev(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
struct device *dev, struct iommu_domain *old)
|
||||
{
|
||||
struct device_domain_info *info = dev_iommu_priv_get(dev);
|
||||
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
|
||||
|
||||
@@ -100,7 +100,7 @@ static int iommu_bus_notifier(struct notifier_block *nb,
|
||||
unsigned long action, void *data);
|
||||
static void iommu_release_device(struct device *dev);
|
||||
static int __iommu_attach_device(struct iommu_domain *domain,
|
||||
struct device *dev);
|
||||
struct device *dev, struct iommu_domain *old);
|
||||
static int __iommu_attach_group(struct iommu_domain *domain,
|
||||
struct iommu_group *group);
|
||||
static struct iommu_domain *__iommu_paging_domain_alloc_flags(struct device *dev,
|
||||
@@ -114,6 +114,7 @@ enum {
|
||||
static int __iommu_device_set_domain(struct iommu_group *group,
|
||||
struct device *dev,
|
||||
struct iommu_domain *new_domain,
|
||||
struct iommu_domain *old_domain,
|
||||
unsigned int flags);
|
||||
static int __iommu_group_set_domain_internal(struct iommu_group *group,
|
||||
struct iommu_domain *new_domain,
|
||||
@@ -554,7 +555,8 @@ static void iommu_deinit_device(struct device *dev)
|
||||
release_domain == ops->blocked_domain)
|
||||
release_domain = ops->identity_domain;
|
||||
|
||||
release_domain->ops->attach_dev(release_domain, dev);
|
||||
release_domain->ops->attach_dev(release_domain, dev,
|
||||
group->domain);
|
||||
}
|
||||
|
||||
if (ops->release_device)
|
||||
@@ -640,7 +642,8 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
|
||||
if (group->default_domain)
|
||||
iommu_create_device_direct_mappings(group->default_domain, dev);
|
||||
if (group->domain) {
|
||||
ret = __iommu_device_set_domain(group, dev, group->domain, 0);
|
||||
ret = __iommu_device_set_domain(group, dev, group->domain, NULL,
|
||||
0);
|
||||
if (ret)
|
||||
goto err_remove_gdev;
|
||||
} else if (!group->default_domain && !group_list) {
|
||||
@@ -2127,14 +2130,14 @@ static void __iommu_group_set_core_domain(struct iommu_group *group)
|
||||
}
|
||||
|
||||
static int __iommu_attach_device(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
struct device *dev, struct iommu_domain *old)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (unlikely(domain->ops->attach_dev == NULL))
|
||||
return -ENODEV;
|
||||
|
||||
ret = domain->ops->attach_dev(domain, dev);
|
||||
ret = domain->ops->attach_dev(domain, dev, old);
|
||||
if (ret)
|
||||
return ret;
|
||||
dev->iommu->attach_deferred = 0;
|
||||
@@ -2183,7 +2186,7 @@ EXPORT_SYMBOL_GPL(iommu_attach_device);
|
||||
int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
|
||||
{
|
||||
if (dev->iommu && dev->iommu->attach_deferred)
|
||||
return __iommu_attach_device(domain, dev);
|
||||
return __iommu_attach_device(domain, dev, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -2296,6 +2299,7 @@ EXPORT_SYMBOL_GPL(iommu_attach_group);
|
||||
static int __iommu_device_set_domain(struct iommu_group *group,
|
||||
struct device *dev,
|
||||
struct iommu_domain *new_domain,
|
||||
struct iommu_domain *old_domain,
|
||||
unsigned int flags)
|
||||
{
|
||||
int ret;
|
||||
@@ -2321,7 +2325,7 @@ static int __iommu_device_set_domain(struct iommu_group *group,
|
||||
dev->iommu->attach_deferred = 0;
|
||||
}
|
||||
|
||||
ret = __iommu_attach_device(new_domain, dev);
|
||||
ret = __iommu_attach_device(new_domain, dev, old_domain);
|
||||
if (ret) {
|
||||
/*
|
||||
* If we have a blocking domain then try to attach that in hopes
|
||||
@@ -2331,7 +2335,8 @@ static int __iommu_device_set_domain(struct iommu_group *group,
|
||||
if ((flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) &&
|
||||
group->blocking_domain &&
|
||||
group->blocking_domain != new_domain)
|
||||
__iommu_attach_device(group->blocking_domain, dev);
|
||||
__iommu_attach_device(group->blocking_domain, dev,
|
||||
old_domain);
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
@@ -2378,7 +2383,7 @@ static int __iommu_group_set_domain_internal(struct iommu_group *group,
|
||||
result = 0;
|
||||
for_each_group_device(group, gdev) {
|
||||
ret = __iommu_device_set_domain(group, gdev->dev, new_domain,
|
||||
flags);
|
||||
group->domain, flags);
|
||||
if (ret) {
|
||||
result = ret;
|
||||
/*
|
||||
@@ -2413,7 +2418,7 @@ err_revert:
|
||||
*/
|
||||
if (group->domain)
|
||||
WARN_ON(__iommu_device_set_domain(
|
||||
group, gdev->dev, group->domain,
|
||||
group, gdev->dev, group->domain, new_domain,
|
||||
IOMMU_SET_DOMAIN_MUST_SUCCEED));
|
||||
}
|
||||
return ret;
|
||||
|
||||
@@ -216,7 +216,7 @@ static inline struct selftest_obj *to_selftest_obj(struct iommufd_object *obj)
|
||||
}
|
||||
|
||||
static int mock_domain_nop_attach(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
struct device *dev, struct iommu_domain *old)
|
||||
{
|
||||
struct mock_dev *mdev = to_mock_dev(dev);
|
||||
struct mock_viommu *new_viommu = NULL;
|
||||
|
||||
@@ -590,7 +590,7 @@ static void ipmmu_domain_free(struct iommu_domain *io_domain)
|
||||
}
|
||||
|
||||
static int ipmmu_attach_device(struct iommu_domain *io_domain,
|
||||
struct device *dev)
|
||||
struct device *dev, struct iommu_domain *old)
|
||||
{
|
||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
||||
struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
|
||||
@@ -637,17 +637,17 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
|
||||
}
|
||||
|
||||
static int ipmmu_iommu_identity_attach(struct iommu_domain *identity_domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct iommu_domain *io_domain = iommu_get_domain_for_dev(dev);
|
||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
||||
struct ipmmu_vmsa_domain *domain;
|
||||
unsigned int i;
|
||||
|
||||
if (io_domain == identity_domain || !io_domain)
|
||||
if (old == identity_domain || !old)
|
||||
return 0;
|
||||
|
||||
domain = to_vmsa_domain(io_domain);
|
||||
domain = to_vmsa_domain(old);
|
||||
for (i = 0; i < fwspec->num_ids; ++i)
|
||||
ipmmu_utlb_disable(domain, fwspec->ids[i]);
|
||||
|
||||
|
||||
@@ -391,7 +391,8 @@ static struct iommu_device *msm_iommu_probe_device(struct device *dev)
|
||||
return &iommu->iommu;
|
||||
}
|
||||
|
||||
static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
@@ -441,19 +442,19 @@ fail:
|
||||
}
|
||||
|
||||
static int msm_iommu_identity_attach(struct iommu_domain *identity_domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
struct msm_priv *priv;
|
||||
unsigned long flags;
|
||||
struct msm_iommu_dev *iommu;
|
||||
struct msm_iommu_ctx_dev *master;
|
||||
int ret = 0;
|
||||
|
||||
if (domain == identity_domain || !domain)
|
||||
if (old == identity_domain || !old)
|
||||
return 0;
|
||||
|
||||
priv = to_msm_priv(domain);
|
||||
priv = to_msm_priv(old);
|
||||
free_io_pgtable_ops(priv->iop);
|
||||
|
||||
spin_lock_irqsave(&msm_iommu_lock, flags);
|
||||
|
||||
@@ -705,7 +705,7 @@ static void mtk_iommu_domain_free(struct iommu_domain *domain)
|
||||
}
|
||||
|
||||
static int mtk_iommu_attach_device(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
struct device *dev, struct iommu_domain *old)
|
||||
{
|
||||
struct mtk_iommu_data *data = dev_iommu_priv_get(dev), *frstdata;
|
||||
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
|
||||
@@ -773,12 +773,12 @@ err_unlock:
|
||||
}
|
||||
|
||||
static int mtk_iommu_identity_attach(struct iommu_domain *identity_domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
|
||||
|
||||
if (domain == identity_domain || !domain)
|
||||
if (old == identity_domain || !old)
|
||||
return 0;
|
||||
|
||||
mtk_iommu_config(data, dev, false, 0);
|
||||
|
||||
@@ -303,7 +303,9 @@ static void mtk_iommu_v1_domain_free(struct iommu_domain *domain)
|
||||
kfree(to_mtk_domain(domain));
|
||||
}
|
||||
|
||||
static int mtk_iommu_v1_attach_device(struct iommu_domain *domain, struct device *dev)
|
||||
static int mtk_iommu_v1_attach_device(struct iommu_domain *domain,
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
|
||||
struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
|
||||
@@ -329,7 +331,8 @@ static int mtk_iommu_v1_attach_device(struct iommu_domain *domain, struct device
|
||||
}
|
||||
|
||||
static int mtk_iommu_v1_identity_attach(struct iommu_domain *identity_domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
|
||||
|
||||
|
||||
@@ -1431,8 +1431,8 @@ static void omap_iommu_detach_fini(struct omap_iommu_domain *odomain)
|
||||
odomain->iommus = NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
static int omap_iommu_attach_dev(struct iommu_domain *domain,
|
||||
struct device *dev, struct iommu_domain *old)
|
||||
{
|
||||
struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
|
||||
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
|
||||
@@ -1536,15 +1536,15 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
|
||||
}
|
||||
|
||||
static int omap_iommu_identity_attach(struct iommu_domain *identity_domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
struct omap_iommu_domain *omap_domain;
|
||||
|
||||
if (domain == identity_domain || !domain)
|
||||
if (old == identity_domain || !old)
|
||||
return 0;
|
||||
|
||||
omap_domain = to_omap_domain(domain);
|
||||
omap_domain = to_omap_domain(old);
|
||||
spin_lock(&omap_domain->lock);
|
||||
_omap_iommu_detach_dev(omap_domain, dev);
|
||||
spin_unlock(&omap_domain->lock);
|
||||
|
||||
@@ -1321,7 +1321,8 @@ static bool riscv_iommu_pt_supported(struct riscv_iommu_device *iommu, int pgd_m
|
||||
}
|
||||
|
||||
static int riscv_iommu_attach_paging_domain(struct iommu_domain *iommu_domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain);
|
||||
struct riscv_iommu_device *iommu = dev_to_iommu(dev);
|
||||
@@ -1426,7 +1427,8 @@ static struct iommu_domain *riscv_iommu_alloc_paging_domain(struct device *dev)
|
||||
}
|
||||
|
||||
static int riscv_iommu_attach_blocking_domain(struct iommu_domain *iommu_domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct riscv_iommu_device *iommu = dev_to_iommu(dev);
|
||||
struct riscv_iommu_info *info = dev_iommu_priv_get(dev);
|
||||
@@ -1447,7 +1449,8 @@ static struct iommu_domain riscv_iommu_blocking_domain = {
|
||||
};
|
||||
|
||||
static int riscv_iommu_attach_identity_domain(struct iommu_domain *iommu_domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct riscv_iommu_device *iommu = dev_to_iommu(dev);
|
||||
struct riscv_iommu_info *info = dev_iommu_priv_get(dev);
|
||||
|
||||
@@ -960,7 +960,8 @@ out_disable_clocks:
|
||||
}
|
||||
|
||||
static int rk_iommu_identity_attach(struct iommu_domain *identity_domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct rk_iommu *iommu;
|
||||
struct rk_iommu_domain *rk_domain;
|
||||
@@ -1005,7 +1006,7 @@ static struct iommu_domain rk_identity_domain = {
|
||||
};
|
||||
|
||||
static int rk_iommu_attach_device(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
struct device *dev, struct iommu_domain *old)
|
||||
{
|
||||
struct rk_iommu *iommu;
|
||||
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
|
||||
@@ -1026,7 +1027,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
|
||||
if (iommu->domain == domain)
|
||||
return 0;
|
||||
|
||||
ret = rk_iommu_identity_attach(&rk_identity_domain, dev);
|
||||
ret = rk_iommu_identity_attach(&rk_identity_domain, dev, old);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -1041,8 +1042,17 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
|
||||
return 0;
|
||||
|
||||
ret = rk_iommu_enable(iommu);
|
||||
if (ret)
|
||||
WARN_ON(rk_iommu_identity_attach(&rk_identity_domain, dev));
|
||||
if (ret) {
|
||||
/*
|
||||
* Note rk_iommu_identity_attach() might fail before physically
|
||||
* attaching the dev to iommu->domain, in which case the actual
|
||||
* old domain for this revert should be rk_identity_domain v.s.
|
||||
* iommu->domain. Since rk_iommu_identity_attach() does not care
|
||||
* about the old domain argument for now, this is not a problem.
|
||||
*/
|
||||
WARN_ON(rk_iommu_identity_attach(&rk_identity_domain, dev,
|
||||
iommu->domain));
|
||||
}
|
||||
|
||||
pm_runtime_put(iommu->dev);
|
||||
|
||||
|
||||
@@ -670,7 +670,8 @@ int zpci_iommu_register_ioat(struct zpci_dev *zdev, u8 *status)
|
||||
}
|
||||
|
||||
static int blocking_domain_attach_device(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct zpci_dev *zdev = to_zpci_dev(dev);
|
||||
struct s390_domain *s390_domain;
|
||||
@@ -694,7 +695,8 @@ static int blocking_domain_attach_device(struct iommu_domain *domain,
|
||||
}
|
||||
|
||||
static int s390_iommu_attach_device(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct s390_domain *s390_domain = to_s390_domain(domain);
|
||||
struct zpci_dev *zdev = to_zpci_dev(dev);
|
||||
@@ -709,7 +711,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
|
||||
domain->geometry.aperture_end < zdev->start_dma))
|
||||
return -EINVAL;
|
||||
|
||||
blocking_domain_attach_device(&blocking_domain, dev);
|
||||
blocking_domain_attach_device(&blocking_domain, dev, old);
|
||||
|
||||
/* If we fail now DMA remains blocked via blocking domain */
|
||||
cc = s390_iommu_domain_reg_ioat(zdev, domain, &status);
|
||||
@@ -1131,13 +1133,14 @@ static int __init s390_iommu_init(void)
|
||||
subsys_initcall(s390_iommu_init);
|
||||
|
||||
static int s390_attach_dev_identity(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct zpci_dev *zdev = to_zpci_dev(dev);
|
||||
u8 status;
|
||||
int cc;
|
||||
|
||||
blocking_domain_attach_device(&blocking_domain, dev);
|
||||
blocking_domain_attach_device(&blocking_domain, dev, old);
|
||||
|
||||
/* If we fail now DMA remains blocked via blocking domain */
|
||||
cc = s390_iommu_domain_reg_ioat(zdev, domain, &status);
|
||||
|
||||
@@ -247,7 +247,8 @@ static void sprd_iommu_domain_free(struct iommu_domain *domain)
|
||||
}
|
||||
|
||||
static int sprd_iommu_attach_device(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct sprd_iommu_device *sdev = dev_iommu_priv_get(dev);
|
||||
struct sprd_iommu_domain *dom = to_sprd_domain(domain);
|
||||
|
||||
@@ -771,7 +771,8 @@ static void sun50i_iommu_detach_domain(struct sun50i_iommu *iommu,
|
||||
}
|
||||
|
||||
static int sun50i_iommu_identity_attach(struct iommu_domain *identity_domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct sun50i_iommu *iommu = dev_iommu_priv_get(dev);
|
||||
struct sun50i_iommu_domain *sun50i_domain;
|
||||
@@ -797,7 +798,8 @@ static struct iommu_domain sun50i_iommu_identity_domain = {
|
||||
};
|
||||
|
||||
static int sun50i_iommu_attach_device(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
|
||||
struct sun50i_iommu *iommu;
|
||||
@@ -813,7 +815,7 @@ static int sun50i_iommu_attach_device(struct iommu_domain *domain,
|
||||
if (iommu->domain == domain)
|
||||
return 0;
|
||||
|
||||
sun50i_iommu_identity_attach(&sun50i_iommu_identity_domain, dev);
|
||||
sun50i_iommu_identity_attach(&sun50i_iommu_identity_domain, dev, old);
|
||||
|
||||
sun50i_iommu_attach_domain(iommu, sun50i_domain);
|
||||
|
||||
|
||||
@@ -490,7 +490,7 @@ static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
|
||||
}
|
||||
|
||||
static int tegra_smmu_attach_dev(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
struct device *dev, struct iommu_domain *old)
|
||||
{
|
||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
||||
struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
|
||||
@@ -524,9 +524,9 @@ disable:
|
||||
}
|
||||
|
||||
static int tegra_smmu_identity_attach(struct iommu_domain *identity_domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
||||
struct tegra_smmu_as *as;
|
||||
struct tegra_smmu *smmu;
|
||||
@@ -535,10 +535,10 @@ static int tegra_smmu_identity_attach(struct iommu_domain *identity_domain,
|
||||
if (!fwspec)
|
||||
return -ENODEV;
|
||||
|
||||
if (domain == identity_domain || !domain)
|
||||
if (old == identity_domain || !old)
|
||||
return 0;
|
||||
|
||||
as = to_smmu_as(domain);
|
||||
as = to_smmu_as(old);
|
||||
smmu = as->smmu;
|
||||
for (index = 0; index < fwspec->num_ids; index++) {
|
||||
tegra_smmu_disable(smmu, fwspec->ids[index], as->id);
|
||||
|
||||
@@ -730,7 +730,8 @@ static struct iommu_domain *viommu_domain_alloc_identity(struct device *dev)
|
||||
return domain;
|
||||
}
|
||||
|
||||
static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
int ret = 0;
|
||||
struct virtio_iommu_req_attach req;
|
||||
@@ -781,7 +782,8 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
}
|
||||
|
||||
static int viommu_attach_identity_domain(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
struct device *dev,
|
||||
struct iommu_domain *old)
|
||||
{
|
||||
int ret = 0;
|
||||
struct virtio_iommu_req_attach req;
|
||||
|
||||
@@ -751,7 +751,8 @@ struct iommu_ops {
|
||||
* @free: Release the domain after use.
|
||||
*/
|
||||
struct iommu_domain_ops {
|
||||
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
|
||||
int (*attach_dev)(struct iommu_domain *domain, struct device *dev,
|
||||
struct iommu_domain *old);
|
||||
int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev,
|
||||
ioasid_t pasid, struct iommu_domain *old);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user