diff options
author | David Woodhouse <David.Woodhouse@intel.com> | 2009-06-28 14:01:43 +0100 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2009-06-29 13:34:24 +0100 |
commit | 0ab36de274ab094c3992b50c9c48c5c89072ec94 (patch) | |
tree | 62f7e197958fca2d846bb8821d22fc97e9675231 /drivers/pci | |
parent | 61df744314079e8cb8cdec75f517cf0e704e41ef (diff) | |
download | linux-3.10-0ab36de274ab094c3992b50c9c48c5c89072ec94.tar.gz linux-3.10-0ab36de274ab094c3992b50c9c48c5c89072ec94.tar.bz2 linux-3.10-0ab36de274ab094c3992b50c9c48c5c89072ec94.zip |
intel-iommu: Use domain_pfn_mapping() in __intel_map_single()
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci')
-rw-r--r-- | drivers/pci/intel-iommu.c | 18 |
1 files changed, 8 insertions, 10 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 7540ef91d5f..dccd0a7b7a5 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -2477,14 +2477,12 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, return 0; iommu = domain_get_iommu(domain); - size = aligned_size((u64)paddr, size); + size = aligned_size(paddr, size) >> VTD_PAGE_SHIFT; - iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); + iova = __intel_alloc_iova(hwdev, domain, size << VTD_PAGE_SHIFT, pdev->dma_mask); if (!iova) goto error; - start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT; - /* * Check if DMAR supports zero-length reads on write only * mappings.. @@ -2500,20 +2498,20 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, * might have two guest_addr mapping to the same host paddr, but this * is not a big problem */ - ret = domain_page_mapping(domain, start_paddr, - ((u64)paddr) & PHYSICAL_PAGE_MASK, - size, prot); + ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), + paddr >> VTD_PAGE_SHIFT, size, prot); if (ret) goto error; + start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT; + /* it's a non-present to present mapping. Only flush if caching mode */ if (cap_caching_mode(iommu->cap)) - iommu_flush_iotlb_psi(iommu, 0, start_paddr, - size >> VTD_PAGE_SHIFT); + iommu_flush_iotlb_psi(iommu, 0, start_paddr, size); else iommu_flush_write_buffer(iommu); - return start_paddr + ((u64)paddr & (~PAGE_MASK)); + return start_paddr + (paddr & (~PAGE_MASK)); error: if (iova) |