From 8ee37d0bcd40117999fc5e226cc4e5b107ead09b Mon Sep 17 00:00:00 2001 From: Yunfei Wang Date: Thu, 23 Jun 2022 15:43:53 +0800 Subject: [PATCH] BACKPORT: iommu/dma: Fix race condition during iova_domain initialization When many devices share the same iova domain, iommu_dma_init_domain() may be called at the same time. The checking of iovad->start_pfn will all get false in iommu_dma_init_domain() and both enter init_iova_domain() to do iovad initialization. Fix this by protecting init_iova_domain() with iommu_dma_cookie->mutex. Exception backtrace: rb_insert_color(param1=0xFFFFFF80CD2BDB40, param3=1) + 64 init_iova_domain() + 180 iommu_setup_dma_ops() + 260 arch_setup_dma_ops() + 132 of_dma_configure_id() + 468 platform_dma_configure() + 32 really_probe() + 1168 driver_probe_device() + 268 __device_attach_driver() + 524 __device_attach() + 524 bus_probe_device() + 64 deferred_probe_work_func() + 260 process_one_work() + 580 worker_thread() + 1076 kthread() + 332 ret_from_fork() + 16 Signed-off-by: Ning Li Signed-off-by: Yunfei Wang Acked-by: Robin Murphy Reviewed-by: Miles Chen Link: https://lore.kernel.org/r/20220530120748.31733-1-yf.wang@mediatek.com Signed-off-by: Joerg Roedel Bug: 236922015 (cherry picked from commit ac9a5d522bb80be50ea84965699e1c8257d745ce https://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git core) [Yunfei: Embed iommu_dma_cookie into iommu_dma_cookie_ext to avoid changing struct iommu_dma_cookie] Signed-off-by: Yunfei Wang Change-Id: I9b7931bea912837f17d2322713ba68a37122499d (cherry picked from commit 8a410d778a3c3b1f535acecff7f53c542ffb348c) --- drivers/iommu/dma-iommu.c | 36 +++++++++++++++++++++++++++--------- 1 file changed, 27 insertions(+), 9 deletions(-) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 2591b973a31b..6f0ba38aaeea 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -50,6 +50,11 @@ struct iommu_dma_cookie { struct iommu_domain *fq_domain; }; +struct iommu_dma_cookie_ext { + struct iommu_dma_cookie cookie; + struct mutex mutex; +}; + static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) { if (cookie->type == IOMMU_DMA_IOVA_COOKIE) @@ -59,14 +64,15 @@ static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) { - struct iommu_dma_cookie *cookie; + struct iommu_dma_cookie_ext *cookie; cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); if (cookie) { - INIT_LIST_HEAD(&cookie->msi_page_list); - cookie->type = type; + INIT_LIST_HEAD(&cookie->cookie.msi_page_list); + cookie->cookie.type = type; + mutex_init(&cookie->mutex); } - return cookie; + return &cookie->cookie; } /** @@ -305,9 +311,11 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size, struct device *dev) { struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iommu_dma_cookie_ext *cookie_ext; unsigned long order, base_pfn; struct iova_domain *iovad; int attr; + int ret; if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) return -EINVAL; @@ -331,14 +339,18 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, } /* start_pfn is always nonzero for an already-initialised domain */ + cookie_ext = container_of(cookie, struct iommu_dma_cookie_ext, cookie); + mutex_lock(&cookie_ext->mutex); if (iovad->start_pfn) { if (1UL << order != iovad->granule || base_pfn != iovad->start_pfn) { pr_warn("Incompatible range for DMA domain\n"); - return -EFAULT; + ret = -EFAULT; + goto done_unlock; } - return 0; + ret = 0; + goto done_unlock; } init_iova_domain(iovad, 1UL << order, base_pfn); @@ -352,10 +364,16 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, cookie->fq_domain = domain; } - if (!dev) - return 0; + if (!dev) { + ret = 0; + goto done_unlock; + } - return iova_reserve_iommu_regions(dev, domain); + ret = iova_reserve_iommu_regions(dev, domain); + +done_unlock: + mutex_unlock(&cookie_ext->mutex); + return ret; } static int iommu_dma_deferred_attach(struct device *dev,