The default domain type of a group may be modified only when
- - The group has only one device.
- The device in the group is not bound to any device driver.
So, the users must unbind the appropriate driver before
changing the default domain type.
PASID Life Cycle Management
===========================
-PASID is initialized as INVALID_IOASID (-1) when a process is created.
+PASID is initialized as IOMMU_PASID_INVALID (-1) when a process is created.
Only processes that access SVA-capable devices need to have a PASID
allocated. This allocation happens when a process opens/binds an SVA-capable
- qcom,sm8250-smmu-500
- qcom,sm8350-smmu-500
- qcom,sm8450-smmu-500
+ - qcom,sm8550-smmu-500
- const: qcom,smmu-500
- const: arm,mmu-500
- qcom,sm8350-smmu-500
- qcom,sm8450-smmu-500
- const: arm,mmu-500
-
- - description: Qcom Adreno GPUs implementing "arm,smmu-500"
+ - description: Qcom Adreno GPUs implementing "qcom,smmu-500" and "arm,mmu-500"
+ items:
+ - enum:
+ - qcom,sc7280-smmu-500
+ - qcom,sm6115-smmu-500
+ - qcom,sm6125-smmu-500
+ - qcom,sm8150-smmu-500
+ - qcom,sm8250-smmu-500
+ - qcom,sm8350-smmu-500
+ - const: qcom,adreno-smmu
+ - const: qcom,smmu-500
+ - const: arm,mmu-500
+ - description: Qcom Adreno GPUs implementing "arm,mmu-500" (legacy binding)
+ deprecated: true
items:
+ # Do not add additional SoC to this list. Instead use previous list.
- enum:
- qcom,sc7280-smmu-500
- qcom,sm8150-smmu-500
- description: interface clock required to access smmu's registers
through the TCU's programming interface.
+ - if:
+ properties:
+ compatible:
+ items:
+ - enum:
+ - qcom,sm6115-smmu-500
+ - qcom,sm6125-smmu-500
+ - const: qcom,adreno-smmu
+ - const: qcom,smmu-500
+ - const: arm,mmu-500
+ then:
+ properties:
+ clock-names:
+ items:
+ - const: mem
+ - const: hlos
+ - const: iface
+
+ clocks:
+ items:
+ - description: GPU memory bus clock
+ - description: Voter clock required for HLOS SMMU access
+ - description: Interface clock required for register access
+
# Disallow clocks for all other platforms with specific compatibles
- if:
properties:
- qcom,sdm845-smmu-500
- qcom,sdx55-smmu-500
- qcom,sdx65-smmu-500
- - qcom,sm6115-smmu-500
- - qcom,sm6125-smmu-500
- qcom,sm6350-smmu-500
- qcom,sm6375-smmu-500
- qcom,sm8350-smmu-500
- qcom,sm8450-smmu-500
+ - qcom,sm8550-smmu-500
then:
properties:
clock-names: false
renesas,ipmmu-main:
$ref: /schemas/types.yaml#/definitions/phandle-array
items:
- - items:
+ - minItems: 1
+ items:
- description: phandle to main IPMMU
- - description: the interrupt bit number associated with the particular
- cache IPMMU device. The interrupt bit number needs to match the main
- IPMMU IMSSTR register. Only used by cache IPMMU instances.
+ - description:
+ The interrupt bit number associated with the particular cache
+ IPMMU device. If present, the interrupt bit number needs to match
+ the main IPMMU IMSSTR register. Only used by cache IPMMU
+ instances.
description:
- Reference to the main IPMMU phandle plus 1 cell. The cell is
- the interrupt bit number associated with the particular cache IPMMU
- device. The interrupt bit number needs to match the main IPMMU IMSSTR
- register. Only used by cache IPMMU instances.
+ Reference to the main IPMMU.
required:
- compatible
required:
- power-domains
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,rcar-gen4-ipmmu-vmsa
+ then:
+ properties:
+ renesas,ipmmu-main:
+ items:
+ - maxItems: 1
+ else:
+ properties:
+ renesas,ipmmu-main:
+ items:
+ - minItems: 2
+
examples:
- |
#include <dt-bindings/clock/r8a7791-cpg-mssr.h>
Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml for details.
Ports are according to the HW.
- dma-ranges:
- maxItems: 1
- description: |
- Describes the physical address space of IOMMU maps to memory.
-
"#address-cells":
const: 2
- compatible
- power-domains
- iommus
- - dma-ranges
- ranges
additionalProperties: false
<&iommu_vpp M4U_PORT_L19_JPGDEC_BSDMA1>,
<&iommu_vpp M4U_PORT_L19_JPGDEC_BUFF_OFFSET1>,
<&iommu_vpp M4U_PORT_L19_JPGDEC_BUFF_OFFSET0>;
- dma-ranges = <0x1 0x0 0x0 0x40000000 0x0 0xfff00000>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml for details.
Ports are according to the HW.
- dma-ranges:
- maxItems: 1
- description: |
- Describes the physical address space of IOMMU maps to memory.
-
"#address-cells":
const: 2
- compatible
- power-domains
- iommus
- - dma-ranges
- ranges
additionalProperties: false
<&iommu_vpp M4U_PORT_L20_JPGENC_C_RDMA>,
<&iommu_vpp M4U_PORT_L20_JPGENC_Q_TABLE>,
<&iommu_vpp M4U_PORT_L20_JPGENC_BSDMA>;
- dma-ranges = <0x1 0x0 0x0 0x40000000 0x0 0xfff00000>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
List of the hardware port in respective IOMMU block for current Socs.
Refer to bindings/iommu/mediatek,iommu.yaml.
- dma-ranges:
- maxItems: 1
- description: |
- Describes the physical address space of IOMMU maps to memory.
-
mediatek,vpu:
$ref: /schemas/types.yaml#/definitions/phandle
description:
List of the hardware port in respective IOMMU block for current Socs.
Refer to bindings/iommu/mediatek,iommu.yaml.
- dma-ranges:
- maxItems: 1
- description: |
- Describes the physical address space of IOMMU maps to memory.
-
mediatek,vpu:
$ref: /schemas/types.yaml#/definitions/phandle
description:
Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml for details.
Ports are according to the HW.
- dma-ranges:
- maxItems: 1
- description: |
- Describes the physical address space of IOMMU maps to memory.
-
required:
- compatible
- reg
#address-cells = <2>;
#size-cells = <2>;
compatible = "simple-bus";
+ dma-ranges = <0x0 0x0 0x0 0x0 0x4 0x0>;
ranges;
gic: interrupt-controller@c000000 {
#size-cells = <2>;
compatible = "simple-bus";
ranges;
+ dma-ranges = <0x0 0x0 0x0 0x0 0x4 0x0>;
gic: interrupt-controller@c000000 {
compatible = "arm,gic-v3";
power-domains = <&spm MT8195_POWER_DOMAIN_VENC>;
#address-cells = <2>;
#size-cells = <2>;
- dma-ranges = <0x1 0x0 0x0 0x40000000 0x0 0xfff00000>;
};
jpgdec-master {
<&iommu_vdo M4U_PORT_L19_JPGDEC_BSDMA1>,
<&iommu_vdo M4U_PORT_L19_JPGDEC_BUFF_OFFSET1>,
<&iommu_vdo M4U_PORT_L19_JPGDEC_BUFF_OFFSET0>;
- dma-ranges = <0x1 0x0 0x0 0x40000000 0x0 0xfff00000>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
<&iommu_vpp M4U_PORT_L20_JPGENC_C_RDMA>,
<&iommu_vpp M4U_PORT_L20_JPGENC_Q_TABLE>,
<&iommu_vpp M4U_PORT_L20_JPGENC_BSDMA>;
- dma-ranges = <0x1 0x0 0x0 0x40000000 0x0 0xfff00000>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
#include <linux/io.h>
#include <linux/ftrace.h>
#include <linux/syscalls.h>
+#include <linux/iommu.h>
#include <asm/processor.h>
#include <asm/pkru.h>
#include <linux/io.h>
#include <linux/hardirq.h>
#include <linux/atomic.h>
-#include <linux/ioasid.h>
+#include <linux/iommu.h>
#include <asm/stacktrace.h>
#include <asm/processor.h>
{
union msix_perm mperm;
- if (ie->pasid == INVALID_IOASID)
+ if (ie->pasid == IOMMU_PASID_INVALID)
return;
mperm.bits = 0;
idxd_device_clear_perm_entry(idxd, ie);
ie->vector = -1;
ie->int_handle = INVALID_INT_HANDLE;
- ie->pasid = INVALID_IOASID;
+ ie->pasid = IOMMU_PASID_INVALID;
}
int idxd_wq_request_irq(struct idxd_wq *wq)
ie = &wq->ie;
ie->vector = pci_irq_vector(pdev, ie->id);
- ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : INVALID_IOASID;
+ ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : IOMMU_PASID_INVALID;
idxd_device_set_perm_entry(idxd, ie);
rc = request_threaded_irq(ie->vector, NULL, idxd_wq_thread, 0, "idxd-portal", ie);
free_irq(ie->vector, ie);
err_irq:
idxd_device_clear_perm_entry(idxd, ie);
- ie->pasid = INVALID_IOASID;
+ ie->pasid = IOMMU_PASID_INVALID;
return rc;
}
#include <linux/cdev.h>
#include <linux/idr.h>
#include <linux/pci.h>
-#include <linux/ioasid.h>
#include <linux/bitmap.h>
#include <linux/perf_event.h>
+#include <linux/iommu.h>
#include <uapi/linux/idxd.h>
#include "registers.h"
ie = idxd_get_ie(idxd, msix_idx);
ie->id = msix_idx;
ie->int_handle = INVALID_INT_HANDLE;
- ie->pasid = INVALID_IOASID;
+ ie->pasid = IOMMU_PASID_INVALID;
spin_lock_init(&ie->list_lock);
init_llist_head(&ie->pending_llist);
idxd->sva = NULL;
}
+static int idxd_enable_sva(struct pci_dev *pdev)
+{
+ int ret;
+
+ ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
+ if (ret)
+ return ret;
+
+ ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+ if (ret)
+ iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
+
+ return ret;
+}
+
+static void idxd_disable_sva(struct pci_dev *pdev)
+{
+ iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+ iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
+}
+
static int idxd_probe(struct idxd_device *idxd)
{
struct pci_dev *pdev = idxd->pdev;
dev_dbg(dev, "IDXD reset complete\n");
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
- if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA)) {
+ if (idxd_enable_sva(pdev)) {
dev_warn(dev, "Unable to turn on user SVA feature.\n");
} else {
set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
if (device_user_pasid_enabled(idxd))
- iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
+ idxd_disable_sva(pdev);
return rc;
}
static void idxd_cleanup(struct idxd_device *idxd)
{
- struct device *dev = &idxd->pdev->dev;
-
perfmon_pmu_remove(idxd);
idxd_cleanup_interrupts(idxd);
idxd_cleanup_internals(idxd);
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
if (device_user_pasid_enabled(idxd))
- iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
+ idxd_disable_sva(idxd->pdev);
}
static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_free_irq_vectors(pdev);
pci_iounmap(pdev, idxd->reg_base);
if (device_user_pasid_enabled(idxd))
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+ idxd_disable_sva(pdev);
pci_disable_device(pdev);
destroy_workqueue(idxd->wq);
perfmon_pmu_remove(idxd);
desc.opcode = DSA_OPCODE_DRAIN;
desc.priv = 1;
- if (ie->pasid != INVALID_IOASID)
+ if (ie->pasid != IOMMU_PASID_INVALID)
desc.pasid = ie->pasid;
desc.int_handle = ie->int_handle;
portal = idxd_wq_portal_addr(wq);
config IOMMU_IOVA
tristate
-# The IOASID library may also be used by non-IOMMU_API users
-config IOASID
- tristate
-
# IOMMU_API always gets selected by whoever wants it.
config IOMMU_API
bool
bool "ARMv7/v8 Long Descriptor Format"
select IOMMU_IO_PGTABLE
depends on ARM || ARM64 || COMPILE_TEST
- depends on !GENERIC_ATOMIC64 # for cpmxchg64()
+ depends on !GENERIC_ATOMIC64 # for cmpxchg64()
help
Enable support for the ARM long descriptor pagetable format.
This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
bool "Apple DART Formats"
select IOMMU_IO_PGTABLE
depends on ARM64 || COMPILE_TEST
- depends on !GENERIC_ATOMIC64 # for cpmxchg64()
+ depends on !GENERIC_ATOMIC64 # for cmpxchg64()
help
Enable support for the Apple DART pagetable formats. These include
the t8020 and t6000/t8110 DART formats used in Apple M1/M2 family
# Shared Virtual Addressing
config IOMMU_SVA
bool
- select IOASID
config FSL_PAMU
bool "Freescale IOMMU support"
obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_DART) += io-pgtable-dart.o
-obj-$(CONFIG_IOASID) += ioasid.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
extern int amd_iommu_enable_faulting(void);
extern int amd_iommu_guest_ir;
extern enum io_pgtable_fmt amd_iommu_pgtable;
+extern int amd_iommu_gpt_level;
/* IOMMUv2 specific functions */
struct iommu_domain;
return PCI_SEG_DEVID_TO_SBDF(seg, devid);
}
+static inline void *alloc_pgtable_page(int nid, gfp_t gfp)
+{
+ struct page *page;
+
+ page = alloc_pages_node(nid, gfp | __GFP_ZERO, 0);
+ return page ? page_address(page) : NULL;
+}
+
extern bool translation_pre_enabled(struct amd_iommu *iommu);
extern bool amd_iommu_is_attach_deferred(struct device *dev);
extern int __init add_special_device(u8 type, u8 id, u32 *devid,
#define FEATURE_GA (1ULL<<7)
#define FEATURE_HE (1ULL<<8)
#define FEATURE_PC (1ULL<<9)
+#define FEATURE_GATS_SHIFT (12)
+#define FEATURE_GATS_MASK (3ULL)
#define FEATURE_GAM_VAPIC (1ULL<<21)
#define FEATURE_GIOSUP (1ULL<<48)
#define FEATURE_EPHSUP (1ULL<<50)
#define PAGE_MODE_6_LEVEL 0x06
#define PAGE_MODE_7_LEVEL 0x07
+#define GUEST_PGTABLE_4_LEVEL 0x00
+#define GUEST_PGTABLE_5_LEVEL 0x01
+
#define PM_LEVEL_SHIFT(x) (12 + ((x) * 9))
#define PM_LEVEL_SIZE(x) (((x) < 6) ? \
((1ULL << PM_LEVEL_SHIFT((x))) - 1): \
#define DTE_GCR3_SHIFT_B 16
#define DTE_GCR3_SHIFT_C 43
+#define DTE_GPT_LEVEL_SHIFT 54
+
#define GCR3_VALID 0x01ULL
#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
spinlock_t lock; /* mostly used to lock the page table*/
u16 id; /* the domain id written to the device table */
int glx; /* Number of levels for GCR3 table */
+ int nid; /* Node ID */
u64 *gcr3_tbl; /* Guest CR3 table */
unsigned long flags; /* flags to find out type of domain */
unsigned dev_cnt; /* devices assigned to this domain */
*/
struct irq_cfg *cfg;
int ga_vector;
- int ga_root_ptr;
- int ga_tag;
+ u64 ga_root_ptr;
+ u32 ga_tag;
};
struct amd_irte_ops {
bool amd_iommu_irq_remap __read_mostly;
enum io_pgtable_fmt amd_iommu_pgtable = AMD_IOMMU_V1;
+/* Guest page table level */
+int amd_iommu_gpt_level = PAGE_MODE_4_LEVEL;
int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
return !!(amd_iommu_efr & mask);
}
+static inline int check_feature_gpt_level(void)
+{
+ return ((amd_iommu_efr >> FEATURE_GATS_SHIFT) & FEATURE_GATS_MASK);
+}
+
/*
* For IVHD type 0x11/0x40, EFR is also available via IVHD.
* Default to IVHD EFR since it is available sooner
char *buf)
{
struct amd_iommu *iommu = dev_to_amd_iommu(dev);
- return sprintf(buf, "%x\n", iommu->cap);
+ return sysfs_emit(buf, "%x\n", iommu->cap);
}
static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
char *buf)
{
struct amd_iommu *iommu = dev_to_amd_iommu(dev);
- return sprintf(buf, "%llx:%llx\n", iommu->features2, iommu->features);
+ return sysfs_emit(buf, "%llx:%llx\n", iommu->features2, iommu->features);
}
static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
pr_info("X2APIC enabled\n");
}
- if (amd_iommu_pgtable == AMD_IOMMU_V2)
- pr_info("V2 page table enabled\n");
+ if (amd_iommu_pgtable == AMD_IOMMU_V2) {
+ pr_info("V2 page table enabled (Paging mode : %d level)\n",
+ amd_iommu_gpt_level);
+ }
}
static int __init amd_iommu_init_pci(void)
struct irq_domain *domain;
struct irq_alloc_info info;
int irq, ret;
+ int node = dev_to_node(&iommu->dev->dev);
domain = iommu_get_irqdomain();
if (!domain)
info.type = X86_IRQ_ALLOC_TYPE_AMDVI;
info.data = iommu;
- irq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, &info);
+ irq = irq_domain_alloc_irqs(domain, 1, node, &info);
if (irq < 0) {
irq_domain_remove(domain);
return irq;
if (ret)
goto out;
+ /* 5 level guest page table */
+ if (cpu_feature_enabled(X86_FEATURE_LA57) &&
+ check_feature_gpt_level() == GUEST_PGTABLE_5_LEVEL)
+ amd_iommu_gpt_level = PAGE_MODE_5_LEVEL;
+
/* Disable any previously enabled IOMMUs */
if (!is_kdump_kernel() || amd_iommu_disabled)
disable_iommus();
bool amd_iommu_v2_supported(void)
{
+ /* CPU page table size should match IOMMU guest page table size */
+ if (cpu_feature_enabled(X86_FEATURE_LA57) &&
+ amd_iommu_gpt_level != PAGE_MODE_5_LEVEL)
+ return false;
+
/*
* Since DTE[Mode]=0 is prohibited on SNP-enabled system
* (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without
bool ret = true;
u64 *pte;
- pte = (void *)get_zeroed_page(gfp);
+ pte = alloc_pgtable_page(domain->nid, gfp);
if (!pte)
return false;
if (!IOMMU_PTE_PRESENT(__pte) ||
pte_level == PAGE_MODE_NONE) {
- page = (u64 *)get_zeroed_page(gfp);
+ page = alloc_pgtable_page(domain->nid, gfp);
if (!page)
return NULL;
static inline int get_pgtable_level(void)
{
- /* 5 level page table is not supported */
- return PAGE_MODE_4_LEVEL;
+ return amd_iommu_gpt_level;
}
static inline bool is_large_pte(u64 pte)
return (pte & IOMMU_PAGE_PSE);
}
-static inline void *alloc_pgtable_page(void)
-{
- return (void *)get_zeroed_page(GFP_KERNEL);
-}
-
static inline u64 set_pgtable_attr(u64 *page)
{
u64 prot;
}
/* Allocate page table */
-static u64 *v2_alloc_pte(u64 *pgd, unsigned long iova,
- unsigned long pg_size, bool *updated)
+static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova,
+ unsigned long pg_size, gfp_t gfp, bool *updated)
{
u64 *pte, *page;
int level, end_level;
}
if (!IOMMU_PTE_PRESENT(__pte)) {
- page = alloc_pgtable_page();
+ page = alloc_pgtable_page(nid, gfp);
if (!page)
return NULL;
while (mapped_size < size) {
map_size = get_alloc_page_size(pgsize);
- pte = v2_alloc_pte(pdom->iop.pgd, iova, map_size, &updated);
+ pte = v2_alloc_pte(pdom->nid, pdom->iop.pgd,
+ iova, map_size, gfp, &updated);
if (!pte) {
ret = -EINVAL;
goto out;
struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
struct protection_domain *pdom = (struct protection_domain *)cookie;
int ret;
+ int ias = IOMMU_IN_ADDR_BIT_SIZE;
- pgtable->pgd = alloc_pgtable_page();
+ pgtable->pgd = alloc_pgtable_page(pdom->nid, GFP_ATOMIC);
if (!pgtable->pgd)
return NULL;
if (ret)
goto err_free_pgd;
+ if (get_pgtable_level() == PAGE_MODE_5_LEVEL)
+ ias = 57;
+
pgtable->iop.ops.map_pages = iommu_v2_map_pages;
pgtable->iop.ops.unmap_pages = iommu_v2_unmap_pages;
pgtable->iop.ops.iova_to_phys = iommu_v2_iova_to_phys;
cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES_V2,
- cfg->ias = IOMMU_IN_ADDR_BIT_SIZE,
+ cfg->ias = ias,
cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
cfg->tlb = &v2_flush_ops;
tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
flags |= tmp;
+ if (amd_iommu_gpt_level == PAGE_MODE_5_LEVEL) {
+ dev_table[devid].data[2] |=
+ ((u64)GUEST_PGTABLE_5_LEVEL << DTE_GPT_LEVEL_SHIFT);
+ }
+
if (domain->flags & PD_GIOV_MASK)
pte_root |= DTE_FLAG_GIOV;
}
dev_data->domain = domain;
list_add(&dev_data->list, &domain->dev_list);
+ /* Update NUMA Node ID */
+ if (domain->nid == NUMA_NO_NODE)
+ domain->nid = dev_to_node(dev_data->dev);
+
/* Do reference counting */
domain->dev_iommu[iommu->index] += 1;
domain->dev_cnt += 1;
- /* Override supported page sizes */
- if (domain->flags & PD_GIOV_MASK)
- domain->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
-
/* Update device table */
set_dte_entry(iommu, dev_data->devid, domain,
ats, dev_data->iommu_v2);
domain->flags |= PD_GIOV_MASK;
+ domain->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
+
if (domain_enable_v2(domain, 1)) {
domain_id_free(domain->id);
return -ENOMEM;
if (type == IOMMU_DOMAIN_IDENTITY)
return domain;
+ domain->nid = NUMA_NO_NODE;
+
pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain);
if (!pgtbl_ops) {
domain_id_free(domain->id);
return ret;
}
-static int apple_dart_remove(struct platform_device *pdev)
+static void apple_dart_remove(struct platform_device *pdev)
{
struct apple_dart *dart = platform_get_drvdata(pdev);
iommu_device_sysfs_remove(&dart->iommu);
clk_bulk_disable_unprepare(dart->num_clks, dart->clks);
-
- return 0;
}
static const struct apple_dart_hw apple_dart_hw_t8103 = {
.pm = pm_sleep_ptr(&apple_dart_pm_ops),
},
.probe = apple_dart_probe,
- .remove = apple_dart_remove,
+ .remove_new = apple_dart_remove,
};
module_platform_driver(apple_dart_driver);
q->cons = Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
}
+static void queue_sync_cons_ovf(struct arm_smmu_queue *q)
+{
+ struct arm_smmu_ll_queue *llq = &q->llq;
+
+ if (likely(Q_OVF(llq->prod) == Q_OVF(llq->cons)))
+ return;
+
+ llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
+ Q_IDX(llq, llq->cons);
+ queue_sync_cons_out(q);
+}
+
static int queue_sync_prod_in(struct arm_smmu_queue *q)
{
u32 prod;
} while (!queue_empty(llq));
/* Sync our overflow flag, as we believe we're up to speed */
- llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
- Q_IDX(llq, llq->cons);
+ queue_sync_cons_ovf(q);
return IRQ_HANDLED;
}
} while (!queue_empty(llq));
/* Sync our overflow flag, as we believe we're up to speed */
- llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
- Q_IDX(llq, llq->cons);
- queue_sync_cons_out(q);
+ queue_sync_cons_ovf(q);
return IRQ_HANDLED;
}
master->domain = smmu_domain;
+ /*
+ * The SMMU does not support enabling ATS with bypass. When the STE is
+ * in bypass (STE.Config[2:0] == 0b100), ATS Translation Requests and
+ * Translated transactions are denied as though ATS is disabled for the
+ * stream (STE.EATS == 0b00), causing F_BAD_ATS_TREQ and
+ * F_TRANSL_FORBIDDEN events (IHI0070Ea 5.2 Stream Table Entry).
+ */
if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS)
master->ats_enabled = arm_smmu_ats_supported(master);
return 0;
}
-static int arm_smmu_device_remove(struct platform_device *pdev)
+static void arm_smmu_device_remove(struct platform_device *pdev)
{
struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
iommu_device_sysfs_remove(&smmu->iommu);
arm_smmu_device_disable(smmu);
iopf_queue_free(smmu->evtq.iopf);
-
- return 0;
}
static void arm_smmu_device_shutdown(struct platform_device *pdev)
.suppress_bind_attrs = true,
},
.probe = arm_smmu_device_probe,
- .remove = arm_smmu_device_remove,
+ .remove_new = arm_smmu_device_remove,
.shutdown = arm_smmu_device_shutdown,
};
module_driver(arm_smmu_driver, platform_driver_register,
static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
{
- unsigned int last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1);
struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
+ unsigned int last_s2cr;
u32 reg;
u32 smr;
int i;
+ /*
+ * Some platforms support more than the Arm SMMU architected maximum of
+ * 128 stream matching groups. For unknown reasons, the additional
+ * groups don't exhibit the same behavior as the architected registers,
+ * so limit the groups to 128 until the behavior is fixed for the other
+ * groups.
+ */
+ if (smmu->num_mapping_groups > 128) {
+ dev_notice(smmu->dev, "\tLimiting the stream matching groups to 128\n");
+ smmu->num_mapping_groups = 128;
+ }
+
+ last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1);
+
/*
* With some firmware versions writes to S2CR of type FAULT are
* ignored, and writing BYPASS will end up written as FAULT in the
int err;
np = dev_get_dev_node(dev);
- if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
+ if (!np || !of_property_present(np, "#stream-id-cells")) {
of_node_put(np);
return -ENODEV;
}
{
struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
- if (!smmu)
- return;
-
if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
dev_notice(&pdev->dev, "disabling translation\n");
clk_bulk_unprepare(smmu->num_clks, smmu->clks);
}
-static int arm_smmu_device_remove(struct platform_device *pdev)
+static void arm_smmu_device_remove(struct platform_device *pdev)
{
struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
- if (!smmu)
- return -ENODEV;
-
iommu_device_unregister(&smmu->iommu);
iommu_device_sysfs_remove(&smmu->iommu);
arm_smmu_device_shutdown(pdev);
-
- return 0;
}
static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
.suppress_bind_attrs = true,
},
.probe = arm_smmu_device_probe,
- .remove = arm_smmu_device_remove,
+ .remove_new = arm_smmu_device_remove,
.shutdown = arm_smmu_device_shutdown,
};
module_platform_driver(arm_smmu_driver);
return 0;
}
-static int qcom_iommu_ctx_remove(struct platform_device *pdev)
+static void qcom_iommu_ctx_remove(struct platform_device *pdev)
{
struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(pdev->dev.parent);
struct qcom_iommu_ctx *ctx = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
qcom_iommu->ctxs[ctx->asid - 1] = NULL;
-
- return 0;
}
static const struct of_device_id ctx_of_match[] = {
.of_match_table = ctx_of_match,
},
.probe = qcom_iommu_ctx_probe,
- .remove = qcom_iommu_ctx_remove,
+ .remove_new = qcom_iommu_ctx_remove,
};
static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu)
return ret;
}
-static int qcom_iommu_device_remove(struct platform_device *pdev)
+static void qcom_iommu_device_remove(struct platform_device *pdev)
{
struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
iommu_device_sysfs_remove(&qcom_iommu->iommu);
iommu_device_unregister(&qcom_iommu->iommu);
-
- return 0;
}
static int __maybe_unused qcom_iommu_resume(struct device *dev)
.pm = &qcom_iommu_pm_ops,
},
.probe = qcom_iommu_device_probe,
- .remove = qcom_iommu_device_remove,
+ .remove_new = qcom_iommu_device_remove,
};
static int __init qcom_iommu_init(void)
return ret;
}
- data->clk = devm_clk_get(dev, "sysmmu");
- if (PTR_ERR(data->clk) == -ENOENT)
- data->clk = NULL;
- else if (IS_ERR(data->clk))
+ data->clk = devm_clk_get_optional(dev, "sysmmu");
+ if (IS_ERR(data->clk))
return PTR_ERR(data->clk);
- data->aclk = devm_clk_get(dev, "aclk");
- if (PTR_ERR(data->aclk) == -ENOENT)
- data->aclk = NULL;
- else if (IS_ERR(data->aclk))
+ data->aclk = devm_clk_get_optional(dev, "aclk");
+ if (IS_ERR(data->aclk))
return PTR_ERR(data->aclk);
- data->pclk = devm_clk_get(dev, "pclk");
- if (PTR_ERR(data->pclk) == -ENOENT)
- data->pclk = NULL;
- else if (IS_ERR(data->pclk))
+ data->pclk = devm_clk_get_optional(dev, "pclk");
+ if (IS_ERR(data->pclk))
return PTR_ERR(data->pclk);
if (!data->clk && (!data->aclk || !data->pclk)) {
return -ENOSYS;
}
- data->clk_master = devm_clk_get(dev, "master");
- if (PTR_ERR(data->clk_master) == -ENOENT)
- data->clk_master = NULL;
- else if (IS_ERR(data->clk_master))
+ data->clk_master = devm_clk_get_optional(dev, "master");
+ if (IS_ERR(data->clk_master))
return PTR_ERR(data->clk_master);
data->sysmmu = dev;
}
/**
- * pamu_config_paace() - Sets up PPAACE entry for specified liodn
+ * pamu_config_ppaace() - Sets up PPAACE entry for specified liodn
*
* @liodn: Logical IO device number
* @omi: Operation mapping index -- if ~omi == 0 then omi not defined
/**
* get_ome_index() - Returns the index in the operation mapping table
* for device.
- * @*omi_index: pointer for storing the index value
+ * @omi_index: pointer for storing the index value
+ * @dev: target device
*
*/
void get_ome_index(u32 *omi_index, struct device *dev)
#define QMAN_PORTAL_PAACE 2
#define BMAN_PAACE 3
-/**
+/*
* Setup operation mapping and stash destinations for QMAN and QMAN portal.
* Memory accesses to QMAN and BMAN private memory need not be coherent, so
* clear the PAACE entry coherency attribute for them.
}
}
-/**
+/*
* Setup the operation mapping table for various devices. This is a static
* table where each table index corresponds to a particular device. PAMU uses
* this table to translate device transaction to appropriate corenet
select NEED_DMA_MAP_STATE
select DMAR_TABLE
select SWIOTLB
- select IOASID
select PCI_ATS
select PCI_PRI
select PCI_PASID
CHECK_FEATURE_MISMATCH(a, b, ecap, slts, ECAP_SLTS_MASK);
CHECK_FEATURE_MISMATCH(a, b, ecap, nwfs, ECAP_NWFS_MASK);
CHECK_FEATURE_MISMATCH(a, b, ecap, slads, ECAP_SLADS_MASK);
- CHECK_FEATURE_MISMATCH(a, b, ecap, vcs, ECAP_VCS_MASK);
CHECK_FEATURE_MISMATCH(a, b, ecap, smts, ECAP_SMTS_MASK);
CHECK_FEATURE_MISMATCH(a, b, ecap, pds, ECAP_PDS_MASK);
CHECK_FEATURE_MISMATCH(a, b, ecap, dit, ECAP_DIT_MASK);
CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, slts, ECAP_SLTS_MASK);
CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, nwfs, ECAP_NWFS_MASK);
CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, slads, ECAP_SLADS_MASK);
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, vcs, ECAP_VCS_MASK);
CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, smts, ECAP_SMTS_MASK);
CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, pds, ECAP_PDS_MASK);
CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, ecap, dit, ECAP_DIT_MASK);
struct pci_dev *tmp;
struct dmar_pci_notify_info *info;
- BUG_ON(dev->is_virtfn);
-
/*
* Ignore devices that have a domain number higher than what can
* be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
get_device(dev));
return 1;
}
- BUG_ON(i >= devices_cnt);
+ if (WARN_ON(i >= devices_cnt))
+ return -EINVAL;
}
return 0;
warn_invalid_dmar(phys_addr, " returns all ones");
goto unmap;
}
- if (ecap_vcs(iommu->ecap))
- iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG);
/* the registers might be more than one page */
map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
* is present.
*/
if (ecap_smts(iommu->ecap))
- val |= (1 << 11) | 1;
+ val |= BIT_ULL(11) | BIT_ULL(0);
raw_spin_lock_irqsave(&iommu->register_lock, flags);
return 0;
}
- if (pasid == INVALID_IOASID)
+ if (pasid == IOMMU_PASID_INVALID)
pr_err("[%s NO_PASID] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
type ? "DMA Read" : "DMA Write",
source_id >> 8, PCI_SLOT(source_id & 0xFF),
if (!ratelimited)
/* Using pasid -1 if pasid is not present */
dmar_fault_do_one(iommu, type, fault_reason,
- pasid_present ? pasid : INVALID_IOASID,
+ pasid_present ? pasid : IOMMU_PASID_INVALID,
source_id, guest_addr);
fault_index++;
return;
}
/* For request-without-pasid, get the pasid from context entry */
- if (intel_iommu_sm && pasid == INVALID_IOASID)
+ if (intel_iommu_sm && pasid == IOMMU_PASID_INVALID)
pasid = PASID_RID2PASID;
dir_index = pasid >> PASID_PDE_SHIFT;
int level = agaw_to_level(domain->agaw);
int offset;
- BUG_ON(!domain->pgd);
-
if (!domain_pfn_supported(domain, pfn))
/* Address beyond IOMMU's addressing capabilities. */
return NULL;
unsigned int large_page;
struct dma_pte *first_pte, *pte;
- BUG_ON(!domain_pfn_supported(domain, start_pfn));
- BUG_ON(!domain_pfn_supported(domain, last_pfn));
- BUG_ON(start_pfn > last_pfn);
+ if (WARN_ON(!domain_pfn_supported(domain, last_pfn)) ||
+ WARN_ON(start_pfn > last_pfn))
+ return;
/* we don't need lock here; nobody else touches the iova range */
do {
static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn,
unsigned long last_pfn, struct list_head *freelist)
{
- BUG_ON(!domain_pfn_supported(domain, start_pfn));
- BUG_ON(!domain_pfn_supported(domain, last_pfn));
- BUG_ON(start_pfn > last_pfn);
+ if (WARN_ON(!domain_pfn_supported(domain, last_pfn)) ||
+ WARN_ON(start_pfn > last_pfn))
+ return;
/* we don't need lock here; nobody else touches the iova range */
dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
| DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
break;
default:
- BUG();
+ pr_warn("%s: Unexpected context-cache invalidation type 0x%llx\n",
+ iommu->name, type);
+ return;
}
val |= DMA_CCMD_ICC;
val_iva = size_order | addr;
break;
default:
- BUG();
+ pr_warn("%s: Unexpected iotlb invalidation type 0x%llx\n",
+ iommu->name, type);
+ return;
}
/* Note: set drain read/write */
#if 0
return;
pdev = to_pci_dev(info->dev);
- /* For IOMMU that supports device IOTLB throttling (DIT), we assign
- * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
- * queue depth at PF level. If DIT is not set, PFSID will be treated as
- * reserved, which should be set to 0.
- */
- if (!ecap_dit(info->iommu->ecap))
- info->pfsid = 0;
- else {
- struct pci_dev *pf_pdev;
-
- /* pdev will be returned if device is not a vf */
- pf_pdev = pci_physfn(pdev);
- info->pfsid = pci_dev_id(pf_pdev);
- }
/* The PCIe spec, in its wisdom, declares that the behaviour of
the device if you enable PASID support after ATS support is
if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
info->pasid_enabled = 1;
- if (info->pri_supported &&
- (info->pasid_enabled ? pci_prg_resp_pasid_required(pdev) : 1) &&
- !pci_reset_pri(pdev) && !pci_enable_pri(pdev, PRQ_DEPTH))
- info->pri_enabled = 1;
-
if (info->ats_supported && pci_ats_page_aligned(pdev) &&
!pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
info->ats_enabled = 1;
domain_update_iotlb(info->domain);
- info->ats_qdep = pci_ats_queue_depth(pdev);
}
}
domain_update_iotlb(info->domain);
}
- if (info->pri_enabled) {
- pci_disable_pri(pdev);
- info->pri_enabled = 0;
- }
-
if (info->pasid_enabled) {
pci_disable_pasid(pdev);
info->pasid_enabled = 0;
uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
u16 did = domain_id_iommu(domain, iommu);
- BUG_ON(pages == 0);
+ if (WARN_ON(!pages))
+ return;
if (ih)
ih = 1 << 6;
if (ecap_prs(iommu->ecap))
intel_svm_finish_prq(iommu);
}
- if (vccap_pasid(iommu->vccap))
- ioasid_unregister_allocator(&iommu->pasid_allocator);
-
#endif
}
*/
static inline void context_set_sm_dte(struct context_entry *context)
{
- context->lo |= (1 << 2);
+ context->lo |= BIT_ULL(2);
}
/*
*/
static inline void context_set_sm_pre(struct context_entry *context)
{
- context->lo |= (1 << 4);
+ context->lo |= BIT_ULL(4);
}
/* Convert value to context PASID directory size field coding. */
pr_debug("Set context mapping for %02x:%02x.%d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
- BUG_ON(!domain->pgd);
-
spin_lock(&iommu->lock);
ret = -ENOMEM;
context = iommu_context_addr(iommu, bus, devfn, 1);
phys_addr_t pteval;
u64 attr;
- BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
+ if (unlikely(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1)))
+ return -EINVAL;
if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
return -EINVAL;
if (level != 4 && level != 5)
return -EINVAL;
- if (pasid != PASID_RID2PASID)
- flags |= PASID_FLAG_SUPERVISOR_MODE;
if (level == 5)
flags |= PASID_FLAG_FL5LP;
return ret;
}
-#ifdef CONFIG_INTEL_IOMMU_SVM
-static ioasid_t intel_vcmd_ioasid_alloc(ioasid_t min, ioasid_t max, void *data)
-{
- struct intel_iommu *iommu = data;
- ioasid_t ioasid;
-
- if (!iommu)
- return INVALID_IOASID;
- /*
- * VT-d virtual command interface always uses the full 20 bit
- * PASID range. Host can partition guest PASID range based on
- * policies but it is out of guest's control.
- */
- if (min < PASID_MIN || max > intel_pasid_max_id)
- return INVALID_IOASID;
-
- if (vcmd_alloc_pasid(iommu, &ioasid))
- return INVALID_IOASID;
-
- return ioasid;
-}
-
-static void intel_vcmd_ioasid_free(ioasid_t ioasid, void *data)
-{
- struct intel_iommu *iommu = data;
-
- if (!iommu)
- return;
- /*
- * Sanity check the ioasid owner is done at upper layer, e.g. VFIO
- * We can only free the PASID when all the devices are unbound.
- */
- if (ioasid_find(NULL, ioasid, NULL)) {
- pr_alert("Cannot free active IOASID %d\n", ioasid);
- return;
- }
- vcmd_free_pasid(iommu, ioasid);
-}
-
-static void register_pasid_allocator(struct intel_iommu *iommu)
-{
- /*
- * If we are running in the host, no need for custom allocator
- * in that PASIDs are allocated from the host system-wide.
- */
- if (!cap_caching_mode(iommu->cap))
- return;
-
- if (!sm_supported(iommu)) {
- pr_warn("VT-d Scalable Mode not enabled, no PASID allocation\n");
- return;
- }
-
- /*
- * Register a custom PASID allocator if we are running in a guest,
- * guest PASID must be obtained via virtual command interface.
- * There can be multiple vIOMMUs in each guest but only one allocator
- * is active. All vIOMMU allocators will eventually be calling the same
- * host allocator.
- */
- if (!vccap_pasid(iommu->vccap))
- return;
-
- pr_info("Register custom PASID allocator\n");
- iommu->pasid_allocator.alloc = intel_vcmd_ioasid_alloc;
- iommu->pasid_allocator.free = intel_vcmd_ioasid_free;
- iommu->pasid_allocator.pdata = (void *)iommu;
- if (ioasid_register_allocator(&iommu->pasid_allocator)) {
- pr_warn("Custom PASID allocator failed, scalable mode disabled\n");
- /*
- * Disable scalable mode on this IOMMU if there
- * is no custom allocator. Mixing SM capable vIOMMU
- * and non-SM vIOMMU are not supported.
- */
- intel_iommu_sm = 0;
- }
-}
-#endif
-
static int __init init_dmars(void)
{
struct dmar_drhd_unit *drhd;
*/
for_each_active_iommu(iommu, drhd) {
iommu_flush_write_buffer(iommu);
-#ifdef CONFIG_INTEL_IOMMU_SVM
- register_pasid_allocator(iommu);
-#endif
iommu_set_root_entry(iommu);
}
{
struct intel_iommu *iommu = dev_to_intel_iommu(dev);
u32 ver = readl(iommu->reg + DMAR_VER_REG);
- return sprintf(buf, "%d:%d\n",
- DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
+ return sysfs_emit(buf, "%d:%d\n",
+ DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
}
static DEVICE_ATTR_RO(version);
struct device_attribute *attr, char *buf)
{
struct intel_iommu *iommu = dev_to_intel_iommu(dev);
- return sprintf(buf, "%llx\n", iommu->reg_phys);
+ return sysfs_emit(buf, "%llx\n", iommu->reg_phys);
}
static DEVICE_ATTR_RO(address);
struct device_attribute *attr, char *buf)
{
struct intel_iommu *iommu = dev_to_intel_iommu(dev);
- return sprintf(buf, "%llx\n", iommu->cap);
+ return sysfs_emit(buf, "%llx\n", iommu->cap);
}
static DEVICE_ATTR_RO(cap);
struct device_attribute *attr, char *buf)
{
struct intel_iommu *iommu = dev_to_intel_iommu(dev);
- return sprintf(buf, "%llx\n", iommu->ecap);
+ return sysfs_emit(buf, "%llx\n", iommu->ecap);
}
static DEVICE_ATTR_RO(ecap);
struct device_attribute *attr, char *buf)
{
struct intel_iommu *iommu = dev_to_intel_iommu(dev);
- return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
+ return sysfs_emit(buf, "%ld\n", cap_ndoms(iommu->cap));
}
static DEVICE_ATTR_RO(domains_supported);
struct device_attribute *attr, char *buf)
{
struct intel_iommu *iommu = dev_to_intel_iommu(dev);
- return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
- cap_ndoms(iommu->cap)));
+ return sysfs_emit(buf, "%d\n",
+ bitmap_weight(iommu->domain_ids,
+ cap_ndoms(iommu->cap)));
}
static DEVICE_ATTR_RO(domains_used);
/* Cope with horrid API which requires us to unmap more than the
size argument if it happens to be a large-page mapping. */
- BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level,
- GFP_ATOMIC));
+ if (unlikely(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT,
+ &level, GFP_ATOMIC)))
+ return 0;
if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
size = VTD_PAGE_SIZE << level_to_offset_bits(level);
dmar_ats_supported(pdev, iommu)) {
info->ats_supported = 1;
info->dtlb_extra_inval = dev_needs_extra_dtlb_flush(pdev);
+
+ /*
+ * For IOMMU that supports device IOTLB throttling
+ * (DIT), we assign PFSID to the invalidation desc
+ * of a VF such that IOMMU HW can gauge queue depth
+ * at PF level. If DIT is not set, PFSID will be
+ * treated as reserved, which should be set to 0.
+ */
+ if (ecap_dit(iommu->ecap))
+ info->pfsid = pci_dev_id(pci_physfn(pdev));
+ info->ats_qdep = pci_ats_queue_depth(pdev);
}
if (sm_supported(iommu)) {
if (pasid_supported(iommu)) {
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu;
- int ret;
if (!info || dmar_disabled)
return -EINVAL;
if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE))
return -ENODEV;
- if (!info->pasid_enabled || !info->pri_enabled || !info->ats_enabled)
+ if (!info->pasid_enabled || !info->ats_enabled)
return -EINVAL;
- ret = iopf_queue_add_device(iommu->iopf_queue, dev);
- if (ret)
- return ret;
+ /*
+ * Devices having device-specific I/O fault handling should not
+ * support PCI/PRI. The IOMMU side has no means to check the
+ * capability of device-specific IOPF. Therefore, IOMMU can only
+ * default that if the device driver enables SVA on a non-PRI
+ * device, it will handle IOPF in its own way.
+ */
+ if (!info->pri_supported)
+ return 0;
- ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
- if (ret)
- iopf_queue_remove_device(iommu->iopf_queue, dev);
+ /* Devices supporting PRI should have it enabled. */
+ if (!info->pri_enabled)
+ return -EINVAL;
- return ret;
+ return 0;
}
-static int intel_iommu_disable_sva(struct device *dev)
+static int intel_iommu_enable_iopf(struct device *dev)
{
+ struct pci_dev *pdev = dev_is_pci(dev) ? to_pci_dev(dev) : NULL;
struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct intel_iommu *iommu = info->iommu;
+ struct intel_iommu *iommu;
int ret;
- ret = iommu_unregister_device_fault_handler(dev);
+ if (!pdev || !info || !info->ats_enabled || !info->pri_supported)
+ return -ENODEV;
+
+ if (info->pri_enabled)
+ return -EBUSY;
+
+ iommu = info->iommu;
+ if (!iommu)
+ return -EINVAL;
+
+ /* PASID is required in PRG Response Message. */
+ if (info->pasid_enabled && !pci_prg_resp_pasid_required(pdev))
+ return -EINVAL;
+
+ ret = pci_reset_pri(pdev);
+ if (ret)
+ return ret;
+
+ ret = iopf_queue_add_device(iommu->iopf_queue, dev);
if (ret)
return ret;
- ret = iopf_queue_remove_device(iommu->iopf_queue, dev);
+ ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
+ if (ret)
+ goto iopf_remove_device;
+
+ ret = pci_enable_pri(pdev, PRQ_DEPTH);
if (ret)
- iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
+ goto iopf_unregister_handler;
+ info->pri_enabled = 1;
+
+ return 0;
+
+iopf_unregister_handler:
+ iommu_unregister_device_fault_handler(dev);
+iopf_remove_device:
+ iopf_queue_remove_device(iommu->iopf_queue, dev);
return ret;
}
-static int intel_iommu_enable_iopf(struct device *dev)
+static int intel_iommu_disable_iopf(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
- if (info && info->pri_supported)
- return 0;
+ if (!info->pri_enabled)
+ return -EINVAL;
+
+ /*
+ * PCIe spec states that by clearing PRI enable bit, the Page
+ * Request Interface will not issue new page requests, but has
+ * outstanding page requests that have been transmitted or are
+ * queued for transmission. This is supposed to be called after
+ * the device driver has stopped DMA, all PASIDs have been
+ * unbound and the outstanding PRQs have been drained.
+ */
+ pci_disable_pri(to_pci_dev(dev));
+ info->pri_enabled = 0;
- return -ENODEV;
+ /*
+ * With PRI disabled and outstanding PRQs drained, unregistering
+ * fault handler and removing device from iopf queue should never
+ * fail.
+ */
+ WARN_ON(iommu_unregister_device_fault_handler(dev));
+ WARN_ON(iopf_queue_remove_device(iommu->iopf_queue, dev));
+
+ return 0;
}
static int
{
switch (feat) {
case IOMMU_DEV_FEAT_IOPF:
- return 0;
+ return intel_iommu_disable_iopf(dev);
case IOMMU_DEV_FEAT_SVA:
- return intel_iommu_disable_sva(dev);
+ return 0;
default:
return -ENODEV;
#include <linux/iommu.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/dmar.h>
-#include <linux/ioasid.h>
#include <linux/bitfield.h>
#include <linux/xarray.h>
#include <linux/perf_event.h>
#define ecap_flts(e) (((e) >> 47) & 0x1)
#define ecap_slts(e) (((e) >> 46) & 0x1)
#define ecap_slads(e) (((e) >> 45) & 0x1)
-#define ecap_vcs(e) (((e) >> 44) & 0x1)
#define ecap_smts(e) (((e) >> 43) & 0x1)
#define ecap_dit(e) (((e) >> 41) & 0x1)
#define ecap_pds(e) (((e) >> 42) & 0x1)
unsigned char prq_name[16]; /* Name for PRQ interrupt */
unsigned long prq_seq_number;
struct completion prq_complete;
- struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */
#endif
struct iopf_queue *iopf_queue;
unsigned char iopfq_name[16];
return (context->lo & 1);
}
-extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
+struct dmar_drhd_unit *dmar_find_matched_drhd_unit(struct pci_dev *dev);
-extern int dmar_enable_qi(struct intel_iommu *iommu);
-extern void dmar_disable_qi(struct intel_iommu *iommu);
-extern int dmar_reenable_qi(struct intel_iommu *iommu);
-extern void qi_global_iec(struct intel_iommu *iommu);
+int dmar_enable_qi(struct intel_iommu *iommu);
+void dmar_disable_qi(struct intel_iommu *iommu);
+int dmar_reenable_qi(struct intel_iommu *iommu);
+void qi_global_iec(struct intel_iommu *iommu);
-extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
- u8 fm, u64 type);
-extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
- unsigned int size_order, u64 type);
-extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+void qi_flush_context(struct intel_iommu *iommu, u16 did,
+ u16 sid, u8 fm, u64 type);
+void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type);
+void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
u16 qdep, u64 addr, unsigned mask);
void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
*/
#define QI_OPT_WAIT_DRAIN BIT(0)
-extern int dmar_ir_support(void);
+int dmar_ir_support(void);
void *alloc_pgtable_page(int node, gfp_t gfp);
void free_pgtable_page(void *vaddr);
struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
#ifdef CONFIG_INTEL_IOMMU_SVM
-extern void intel_svm_check(struct intel_iommu *iommu);
-extern int intel_svm_enable_prq(struct intel_iommu *iommu);
-extern int intel_svm_finish_prq(struct intel_iommu *iommu);
+void intel_svm_check(struct intel_iommu *iommu);
+int intel_svm_enable_prq(struct intel_iommu *iommu);
+int intel_svm_finish_prq(struct intel_iommu *iommu);
int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt,
struct iommu_page_response *msg);
struct iommu_domain *intel_svm_domain_alloc(void);
#ifdef CONFIG_INTEL_IOMMU
extern int intel_iommu_sm;
-extern int iommu_calculate_agaw(struct intel_iommu *iommu);
-extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
+int iommu_calculate_agaw(struct intel_iommu *iommu);
+int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
int ecmd_submit_sync(struct intel_iommu *iommu, u8 ecmd, u64 oa, u64 ob);
static inline bool ecmd_has_pmu_essential(struct intel_iommu *iommu)
goto out_free_table;
}
- bitmap = bitmap_zalloc(INTR_REMAP_TABLE_ENTRIES, GFP_ATOMIC);
+ bitmap = bitmap_zalloc(INTR_REMAP_TABLE_ENTRIES, GFP_KERNEL);
if (bitmap == NULL) {
pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
goto out_free_pages;
pasid_set_bits(&pe->val[0], 1 << 1, 0);
}
-/*
- * Setup the SRE(Supervisor Request Enable) field (Bit 128) of a
- * scalable mode PASID entry.
- */
-static inline void pasid_set_sre(struct pasid_entry *pe)
-{
- pasid_set_bits(&pe->val[2], 1 << 0, 1);
-}
-
/*
* Setup the WPE(Write Protect Enable) field (Bit 132) of a
* scalable mode PASID entry.
return -EINVAL;
}
- if (flags & PASID_FLAG_SUPERVISOR_MODE) {
-#ifdef CONFIG_X86
- unsigned long cr0 = read_cr0();
-
- /* CR0.WP is normally set but just to be sure */
- if (unlikely(!(cr0 & X86_CR0_WP))) {
- pr_err("No CPU write protect!\n");
- return -EINVAL;
- }
-#endif
- if (!ecap_srs(iommu->ecap)) {
- pr_err("No supervisor request support on %s\n",
- iommu->name);
- return -EINVAL;
- }
- }
-
if ((flags & PASID_FLAG_FL5LP) && !cap_fl5lp_support(iommu->cap)) {
pr_err("No 5-level paging support for first-level on %s\n",
iommu->name);
/* Setup the first level page table pointer: */
pasid_set_flptr(pte, (u64)__pa(pgd));
- if (flags & PASID_FLAG_SUPERVISOR_MODE) {
- pasid_set_sre(pte);
- pasid_set_wpe(pte);
- }
if (flags & PASID_FLAG_FL5LP)
pasid_set_flpm(pte, 1);
pasid_set_fault_enable(pte);
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
- /*
- * Since it is a second level only translation setup, we should
- * set SRE bit as well (addresses are expected to be GPAs).
- */
- if (pasid != PASID_RID2PASID && ecap_srs(iommu->ecap))
- pasid_set_sre(pte);
pasid_set_present(pte);
spin_unlock(&iommu->lock);
pasid_set_translation_type(pte, PASID_ENTRY_PGTT_PT);
pasid_set_fault_enable(pte);
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
-
- /*
- * We should set SRE bit as well since the addresses are expected
- * to be GPAs.
- */
- if (ecap_srs(iommu->ecap))
- pasid_set_sre(pte);
pasid_set_present(pte);
spin_unlock(&iommu->lock);
#define FLPT_DEFAULT_DID 1
#define NUM_RESERVED_DID 2
-/*
- * The SUPERVISOR_MODE flag indicates a first level translation which
- * can be used for access to kernel addresses. It is valid only for
- * access to the kernel's static 1:1 mapping of physical memory — not
- * to vmalloc or even module mappings.
- */
-#define PASID_FLAG_SUPERVISOR_MODE BIT(0)
#define PASID_FLAG_NESTED BIT(1)
#define PASID_FLAG_PAGE_SNOOP BIT(2)
#include <linux/interrupt.h>
#include <linux/mm_types.h>
#include <linux/xarray.h>
-#include <linux/ioasid.h>
#include <asm/page.h>
#include <asm/fpu/api.h>
if (WARN_ON(!mutex_is_locked(&pasid_mutex)))
return -EINVAL;
- if (pasid == INVALID_IOASID || pasid >= PASID_MAX)
+ if (pasid == IOMMU_PASID_INVALID || pasid >= PASID_MAX)
return -EINVAL;
svm = pasid_private_find(pasid);
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-/*
- * I/O Address Space ID allocator. There is one global IOASID space, split into
- * subsets. Users create a subset with DECLARE_IOASID_SET, then allocate and
- * free IOASIDs with ioasid_alloc() and ioasid_free().
- */
-#include <linux/ioasid.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/xarray.h>
-
-struct ioasid_data {
- ioasid_t id;
- struct ioasid_set *set;
- void *private;
- struct rcu_head rcu;
-};
-
-/*
- * struct ioasid_allocator_data - Internal data structure to hold information
- * about an allocator. There are two types of allocators:
- *
- * - Default allocator always has its own XArray to track the IOASIDs allocated.
- * - Custom allocators may share allocation helpers with different private data.
- * Custom allocators that share the same helper functions also share the same
- * XArray.
- * Rules:
- * 1. Default allocator is always available, not dynamically registered. This is
- * to prevent race conditions with early boot code that want to register
- * custom allocators or allocate IOASIDs.
- * 2. Custom allocators take precedence over the default allocator.
- * 3. When all custom allocators sharing the same helper functions are
- * unregistered (e.g. due to hotplug), all outstanding IOASIDs must be
- * freed. Otherwise, outstanding IOASIDs will be lost and orphaned.
- * 4. When switching between custom allocators sharing the same helper
- * functions, outstanding IOASIDs are preserved.
- * 5. When switching between custom allocator and default allocator, all IOASIDs
- * must be freed to ensure unadulterated space for the new allocator.
- *
- * @ops: allocator helper functions and its data
- * @list: registered custom allocators
- * @slist: allocators share the same ops but different data
- * @flags: attributes of the allocator
- * @xa: xarray holds the IOASID space
- * @rcu: used for kfree_rcu when unregistering allocator
- */
-struct ioasid_allocator_data {
- struct ioasid_allocator_ops *ops;
- struct list_head list;
- struct list_head slist;
-#define IOASID_ALLOCATOR_CUSTOM BIT(0) /* Needs framework to track results */
- unsigned long flags;
- struct xarray xa;
- struct rcu_head rcu;
-};
-
-static DEFINE_SPINLOCK(ioasid_allocator_lock);
-static LIST_HEAD(allocators_list);
-
-static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque);
-static void default_free(ioasid_t ioasid, void *opaque);
-
-static struct ioasid_allocator_ops default_ops = {
- .alloc = default_alloc,
- .free = default_free,
-};
-
-static struct ioasid_allocator_data default_allocator = {
- .ops = &default_ops,
- .flags = 0,
- .xa = XARRAY_INIT(ioasid_xa, XA_FLAGS_ALLOC),
-};
-
-static struct ioasid_allocator_data *active_allocator = &default_allocator;
-
-static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque)
-{
- ioasid_t id;
-
- if (xa_alloc(&default_allocator.xa, &id, opaque, XA_LIMIT(min, max), GFP_ATOMIC)) {
- pr_err("Failed to alloc ioasid from %d to %d\n", min, max);
- return INVALID_IOASID;
- }
-
- return id;
-}
-
-static void default_free(ioasid_t ioasid, void *opaque)
-{
- struct ioasid_data *ioasid_data;
-
- ioasid_data = xa_erase(&default_allocator.xa, ioasid);
- kfree_rcu(ioasid_data, rcu);
-}
-
-/* Allocate and initialize a new custom allocator with its helper functions */
-static struct ioasid_allocator_data *ioasid_alloc_allocator(struct ioasid_allocator_ops *ops)
-{
- struct ioasid_allocator_data *ia_data;
-
- ia_data = kzalloc(sizeof(*ia_data), GFP_ATOMIC);
- if (!ia_data)
- return NULL;
-
- xa_init_flags(&ia_data->xa, XA_FLAGS_ALLOC);
- INIT_LIST_HEAD(&ia_data->slist);
- ia_data->flags |= IOASID_ALLOCATOR_CUSTOM;
- ia_data->ops = ops;
-
- /* For tracking custom allocators that share the same ops */
- list_add_tail(&ops->list, &ia_data->slist);
-
- return ia_data;
-}
-
-static bool use_same_ops(struct ioasid_allocator_ops *a, struct ioasid_allocator_ops *b)
-{
- return (a->free == b->free) && (a->alloc == b->alloc);
-}
-
-/**
- * ioasid_register_allocator - register a custom allocator
- * @ops: the custom allocator ops to be registered
- *
- * Custom allocators take precedence over the default xarray based allocator.
- * Private data associated with the IOASID allocated by the custom allocators
- * are managed by IOASID framework similar to data stored in xa by default
- * allocator.
- *
- * There can be multiple allocators registered but only one is active. In case
- * of runtime removal of a custom allocator, the next one is activated based
- * on the registration ordering.
- *
- * Multiple allocators can share the same alloc() function, in this case the
- * IOASID space is shared.
- */
-int ioasid_register_allocator(struct ioasid_allocator_ops *ops)
-{
- struct ioasid_allocator_data *ia_data;
- struct ioasid_allocator_data *pallocator;
- int ret = 0;
-
- spin_lock(&ioasid_allocator_lock);
-
- ia_data = ioasid_alloc_allocator(ops);
- if (!ia_data) {
- ret = -ENOMEM;
- goto out_unlock;
- }
-
- /*
- * No particular preference, we activate the first one and keep
- * the later registered allocators in a list in case the first one gets
- * removed due to hotplug.
- */
- if (list_empty(&allocators_list)) {
- WARN_ON(active_allocator != &default_allocator);
- /* Use this new allocator if default is not active */
- if (xa_empty(&active_allocator->xa)) {
- rcu_assign_pointer(active_allocator, ia_data);
- list_add_tail(&ia_data->list, &allocators_list);
- goto out_unlock;
- }
- pr_warn("Default allocator active with outstanding IOASID\n");
- ret = -EAGAIN;
- goto out_free;
- }
-
- /* Check if the allocator is already registered */
- list_for_each_entry(pallocator, &allocators_list, list) {
- if (pallocator->ops == ops) {
- pr_err("IOASID allocator already registered\n");
- ret = -EEXIST;
- goto out_free;
- } else if (use_same_ops(pallocator->ops, ops)) {
- /*
- * If the new allocator shares the same ops,
- * then they will share the same IOASID space.
- * We should put them under the same xarray.
- */
- list_add_tail(&ops->list, &pallocator->slist);
- goto out_free;
- }
- }
- list_add_tail(&ia_data->list, &allocators_list);
-
- spin_unlock(&ioasid_allocator_lock);
- return 0;
-out_free:
- kfree(ia_data);
-out_unlock:
- spin_unlock(&ioasid_allocator_lock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(ioasid_register_allocator);
-
-/**
- * ioasid_unregister_allocator - Remove a custom IOASID allocator ops
- * @ops: the custom allocator to be removed
- *
- * Remove an allocator from the list, activate the next allocator in
- * the order it was registered. Or revert to default allocator if all
- * custom allocators are unregistered without outstanding IOASIDs.
- */
-void ioasid_unregister_allocator(struct ioasid_allocator_ops *ops)
-{
- struct ioasid_allocator_data *pallocator;
- struct ioasid_allocator_ops *sops;
-
- spin_lock(&ioasid_allocator_lock);
- if (list_empty(&allocators_list)) {
- pr_warn("No custom IOASID allocators active!\n");
- goto exit_unlock;
- }
-
- list_for_each_entry(pallocator, &allocators_list, list) {
- if (!use_same_ops(pallocator->ops, ops))
- continue;
-
- if (list_is_singular(&pallocator->slist)) {
- /* No shared helper functions */
- list_del(&pallocator->list);
- /*
- * All IOASIDs should have been freed before
- * the last allocator that shares the same ops
- * is unregistered.
- */
- WARN_ON(!xa_empty(&pallocator->xa));
- if (list_empty(&allocators_list)) {
- pr_info("No custom IOASID allocators, switch to default.\n");
- rcu_assign_pointer(active_allocator, &default_allocator);
- } else if (pallocator == active_allocator) {
- rcu_assign_pointer(active_allocator,
- list_first_entry(&allocators_list,
- struct ioasid_allocator_data, list));
- pr_info("IOASID allocator changed");
- }
- kfree_rcu(pallocator, rcu);
- break;
- }
- /*
- * Find the matching shared ops to delete,
- * but keep outstanding IOASIDs
- */
- list_for_each_entry(sops, &pallocator->slist, list) {
- if (sops == ops) {
- list_del(&ops->list);
- break;
- }
- }
- break;
- }
-
-exit_unlock:
- spin_unlock(&ioasid_allocator_lock);
-}
-EXPORT_SYMBOL_GPL(ioasid_unregister_allocator);
-
-/**
- * ioasid_set_data - Set private data for an allocated ioasid
- * @ioasid: the ID to set data
- * @data: the private data
- *
- * For IOASID that is already allocated, private data can be set
- * via this API. Future lookup can be done via ioasid_find.
- */
-int ioasid_set_data(ioasid_t ioasid, void *data)
-{
- struct ioasid_data *ioasid_data;
- int ret = 0;
-
- spin_lock(&ioasid_allocator_lock);
- ioasid_data = xa_load(&active_allocator->xa, ioasid);
- if (ioasid_data)
- rcu_assign_pointer(ioasid_data->private, data);
- else
- ret = -ENOENT;
- spin_unlock(&ioasid_allocator_lock);
-
- /*
- * Wait for readers to stop accessing the old private data, so the
- * caller can free it.
- */
- if (!ret)
- synchronize_rcu();
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(ioasid_set_data);
-
-/**
- * ioasid_alloc - Allocate an IOASID
- * @set: the IOASID set
- * @min: the minimum ID (inclusive)
- * @max: the maximum ID (inclusive)
- * @private: data private to the caller
- *
- * Allocate an ID between @min and @max. The @private pointer is stored
- * internally and can be retrieved with ioasid_find().
- *
- * Return: the allocated ID on success, or %INVALID_IOASID on failure.
- */
-ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
- void *private)
-{
- struct ioasid_data *data;
- void *adata;
- ioasid_t id;
-
- data = kzalloc(sizeof(*data), GFP_ATOMIC);
- if (!data)
- return INVALID_IOASID;
-
- data->set = set;
- data->private = private;
-
- /*
- * Custom allocator needs allocator data to perform platform specific
- * operations.
- */
- spin_lock(&ioasid_allocator_lock);
- adata = active_allocator->flags & IOASID_ALLOCATOR_CUSTOM ? active_allocator->ops->pdata : data;
- id = active_allocator->ops->alloc(min, max, adata);
- if (id == INVALID_IOASID) {
- pr_err("Failed ASID allocation %lu\n", active_allocator->flags);
- goto exit_free;
- }
-
- if ((active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) &&
- xa_alloc(&active_allocator->xa, &id, data, XA_LIMIT(id, id), GFP_ATOMIC)) {
- /* Custom allocator needs framework to store and track allocation results */
- pr_err("Failed to alloc ioasid from %d\n", id);
- active_allocator->ops->free(id, active_allocator->ops->pdata);
- goto exit_free;
- }
- data->id = id;
-
- spin_unlock(&ioasid_allocator_lock);
- return id;
-exit_free:
- spin_unlock(&ioasid_allocator_lock);
- kfree(data);
- return INVALID_IOASID;
-}
-EXPORT_SYMBOL_GPL(ioasid_alloc);
-
-/**
- * ioasid_free - Free an ioasid
- * @ioasid: the ID to remove
- */
-void ioasid_free(ioasid_t ioasid)
-{
- struct ioasid_data *ioasid_data;
-
- spin_lock(&ioasid_allocator_lock);
- ioasid_data = xa_load(&active_allocator->xa, ioasid);
- if (!ioasid_data) {
- pr_err("Trying to free unknown IOASID %u\n", ioasid);
- goto exit_unlock;
- }
-
- active_allocator->ops->free(ioasid, active_allocator->ops->pdata);
- /* Custom allocator needs additional steps to free the xa element */
- if (active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) {
- ioasid_data = xa_erase(&active_allocator->xa, ioasid);
- kfree_rcu(ioasid_data, rcu);
- }
-
-exit_unlock:
- spin_unlock(&ioasid_allocator_lock);
-}
-EXPORT_SYMBOL_GPL(ioasid_free);
-
-/**
- * ioasid_find - Find IOASID data
- * @set: the IOASID set
- * @ioasid: the IOASID to find
- * @getter: function to call on the found object
- *
- * The optional getter function allows to take a reference to the found object
- * under the rcu lock. The function can also check if the object is still valid:
- * if @getter returns false, then the object is invalid and NULL is returned.
- *
- * If the IOASID exists, return the private pointer passed to ioasid_alloc.
- * Private data can be NULL if not set. Return an error if the IOASID is not
- * found, or if @set is not NULL and the IOASID does not belong to the set.
- */
-void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
- bool (*getter)(void *))
-{
- void *priv;
- struct ioasid_data *ioasid_data;
- struct ioasid_allocator_data *idata;
-
- rcu_read_lock();
- idata = rcu_dereference(active_allocator);
- ioasid_data = xa_load(&idata->xa, ioasid);
- if (!ioasid_data) {
- priv = ERR_PTR(-ENOENT);
- goto unlock;
- }
- if (set && ioasid_data->set != set) {
- /* data found but does not belong to the set */
- priv = ERR_PTR(-EACCES);
- goto unlock;
- }
- /* Now IOASID and its set is verified, we can return the private data */
- priv = rcu_dereference(ioasid_data->private);
- if (getter && !getter(priv))
- priv = NULL;
-unlock:
- rcu_read_unlock();
-
- return priv;
-}
-EXPORT_SYMBOL_GPL(ioasid_find);
-
-MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
-MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>");
-MODULE_DESCRIPTION("IO Address Space ID (IOASID) allocator");
-MODULE_LICENSE("GPL");
#include "iommu-sva.h"
static DEFINE_MUTEX(iommu_sva_lock);
-static DECLARE_IOASID_SET(iommu_sva_pasid);
+static DEFINE_IDA(iommu_global_pasid_ida);
-/**
- * iommu_sva_alloc_pasid - Allocate a PASID for the mm
- * @mm: the mm
- * @min: minimum PASID value (inclusive)
- * @max: maximum PASID value (inclusive)
- *
- * Try to allocate a PASID for this mm, or take a reference to the existing one
- * provided it fits within the [@min, @max] range. On success the PASID is
- * available in mm->pasid and will be available for the lifetime of the mm.
- *
- * Returns 0 on success and < 0 on error.
- */
-int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
+/* Allocate a PASID for the mm within range (inclusive) */
+static int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
{
int ret = 0;
- ioasid_t pasid;
- if (min == INVALID_IOASID || max == INVALID_IOASID ||
+ if (min == IOMMU_PASID_INVALID ||
+ max == IOMMU_PASID_INVALID ||
min == 0 || max < min)
return -EINVAL;
mutex_lock(&iommu_sva_lock);
/* Is a PASID already associated with this mm? */
if (mm_valid_pasid(mm)) {
- if (mm->pasid < min || mm->pasid >= max)
+ if (mm->pasid < min || mm->pasid > max)
ret = -EOVERFLOW;
goto out;
}
- pasid = ioasid_alloc(&iommu_sva_pasid, min, max, mm);
- if (pasid == INVALID_IOASID)
- ret = -ENOMEM;
- else
- mm_pasid_set(mm, pasid);
+ ret = ida_alloc_range(&iommu_global_pasid_ida, min, max, GFP_KERNEL);
+ if (ret < min)
+ goto out;
+ mm->pasid = ret;
+ ret = 0;
out:
mutex_unlock(&iommu_sva_lock);
return ret;
}
-EXPORT_SYMBOL_GPL(iommu_sva_alloc_pasid);
-
-/* ioasid_find getter() requires a void * argument */
-static bool __mmget_not_zero(void *mm)
-{
- return mmget_not_zero(mm);
-}
-
-/**
- * iommu_sva_find() - Find mm associated to the given PASID
- * @pasid: Process Address Space ID assigned to the mm
- *
- * On success a reference to the mm is taken, and must be released with mmput().
- *
- * Returns the mm corresponding to this PASID, or an error if not found.
- */
-struct mm_struct *iommu_sva_find(ioasid_t pasid)
-{
- return ioasid_find(&iommu_sva_pasid, pasid, __mmget_not_zero);
-}
-EXPORT_SYMBOL_GPL(iommu_sva_find);
/**
* iommu_sva_bind_device() - Bind a process address space to a device
return status;
}
+
+void mm_pasid_drop(struct mm_struct *mm)
+{
+ if (likely(!mm_valid_pasid(mm)))
+ return;
+
+ ida_free(&iommu_global_pasid_ida, mm->pasid);
+}
#ifndef _IOMMU_SVA_H
#define _IOMMU_SVA_H
-#include <linux/ioasid.h>
#include <linux/mm_types.h>
-int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max);
-struct mm_struct *iommu_sva_find(ioasid_t pasid);
-
/* I/O Page fault */
struct device;
struct iommu_fault;
static int iommu_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data);
+static void iommu_release_device(struct device *dev);
static int iommu_alloc_default_domain(struct iommu_group *group,
struct device *dev);
static struct iommu_domain *__iommu_domain_alloc(const struct bus_type *bus,
}
-void iommu_release_device(struct device *dev)
+/*
+ * Remove a device from a group's device list and return the group device
+ * if successful.
+ */
+static struct group_device *
+__iommu_group_remove_device(struct iommu_group *group, struct device *dev)
{
+ struct group_device *device;
+
+ lockdep_assert_held(&group->mutex);
+ list_for_each_entry(device, &group->devices, list) {
+ if (device->dev == dev) {
+ list_del(&device->list);
+ return device;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Release a device from its group and decrements the iommu group reference
+ * count.
+ */
+static void __iommu_group_release_device(struct iommu_group *group,
+ struct group_device *grp_dev)
+{
+ struct device *dev = grp_dev->dev;
+
+ sysfs_remove_link(group->devices_kobj, grp_dev->name);
+ sysfs_remove_link(&dev->kobj, "iommu_group");
+
+ trace_remove_device_from_group(group->id, dev);
+
+ kfree(grp_dev->name);
+ kfree(grp_dev);
+ dev->iommu_group = NULL;
+ kobject_put(group->devices_kobj);
+}
+
+static void iommu_release_device(struct device *dev)
+{
+ struct iommu_group *group = dev->iommu_group;
+ struct group_device *device;
const struct iommu_ops *ops;
- if (!dev->iommu)
+ if (!dev->iommu || !group)
return;
iommu_device_unlink(dev->iommu->iommu_dev, dev);
+ mutex_lock(&group->mutex);
+ device = __iommu_group_remove_device(group, dev);
+
+ /*
+ * If the group has become empty then ownership must have been released,
+ * and the current domain must be set back to NULL or the default
+ * domain.
+ */
+ if (list_empty(&group->devices))
+ WARN_ON(group->owner_cnt ||
+ group->domain != group->default_domain);
+
+ /*
+ * release_device() must stop using any attached domain on the device.
+ * If there are still other devices in the group they are not effected
+ * by this callback.
+ *
+ * The IOMMU driver must set the device to either an identity or
+ * blocking translation and stop using any domain pointer, as it is
+ * going to be freed.
+ */
ops = dev_iommu_ops(dev);
if (ops->release_device)
ops->release_device(dev);
+ mutex_unlock(&group->mutex);
+
+ if (device)
+ __iommu_group_release_device(group, device);
- iommu_group_remove_device(dev);
module_put(ops->owner);
dev_iommu_free(dev);
}
static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
{
- return sprintf(buf, "%s\n", group->name);
+ return sysfs_emit(buf, "%s\n", group->name);
}
/**
{
struct iommu_resv_region *region, *next;
struct list_head group_resv_regions;
- char *str = buf;
+ int offset = 0;
INIT_LIST_HEAD(&group_resv_regions);
iommu_get_group_resv_regions(group, &group_resv_regions);
list_for_each_entry_safe(region, next, &group_resv_regions, list) {
- str += sprintf(str, "0x%016llx 0x%016llx %s\n",
- (long long int)region->start,
- (long long int)(region->start +
- region->length - 1),
- iommu_group_resv_type_string[region->type]);
+ offset += sysfs_emit_at(buf, offset, "0x%016llx 0x%016llx %s\n",
+ (long long)region->start,
+ (long long)(region->start +
+ region->length - 1),
+ iommu_group_resv_type_string[region->type]);
kfree(region);
}
- return (str - buf);
+ return offset;
}
static ssize_t iommu_group_show_type(struct iommu_group *group,
char *buf)
{
- char *type = "unknown\n";
+ char *type = "unknown";
mutex_lock(&group->mutex);
if (group->default_domain) {
switch (group->default_domain->type) {
case IOMMU_DOMAIN_BLOCKED:
- type = "blocked\n";
+ type = "blocked";
break;
case IOMMU_DOMAIN_IDENTITY:
- type = "identity\n";
+ type = "identity";
break;
case IOMMU_DOMAIN_UNMANAGED:
- type = "unmanaged\n";
+ type = "unmanaged";
break;
case IOMMU_DOMAIN_DMA:
- type = "DMA\n";
+ type = "DMA";
break;
case IOMMU_DOMAIN_DMA_FQ:
- type = "DMA-FQ\n";
+ type = "DMA-FQ";
break;
}
}
mutex_unlock(&group->mutex);
- strcpy(buf, type);
- return strlen(type);
+ return sysfs_emit(buf, "%s\n", type);
}
static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
kfree(group);
}
-static struct kobj_type iommu_group_ktype = {
+static const struct kobj_type iommu_group_ktype = {
.sysfs_ops = &iommu_group_sysfs_ops,
.release = iommu_group_release,
};
}
EXPORT_SYMBOL_GPL(iommu_group_alloc);
-struct iommu_group *iommu_group_get_by_id(int id)
-{
- struct kobject *group_kobj;
- struct iommu_group *group;
- const char *name;
-
- if (!iommu_group_kset)
- return NULL;
-
- name = kasprintf(GFP_KERNEL, "%d", id);
- if (!name)
- return NULL;
-
- group_kobj = kset_find_obj(iommu_group_kset, name);
- kfree(name);
-
- if (!group_kobj)
- return NULL;
-
- group = container_of(group_kobj, struct iommu_group, kobj);
- BUG_ON(group->id != id);
-
- kobject_get(group->devices_kobj);
- kobject_put(&group->kobj);
-
- return group;
-}
-EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
-
/**
* iommu_group_get_iommudata - retrieve iommu_data registered for a group
* @group: the group
void iommu_group_remove_device(struct device *dev)
{
struct iommu_group *group = dev->iommu_group;
- struct group_device *tmp_device, *device = NULL;
+ struct group_device *device;
if (!group)
return;
dev_info(dev, "Removing from iommu group %d\n", group->id);
mutex_lock(&group->mutex);
- list_for_each_entry(tmp_device, &group->devices, list) {
- if (tmp_device->dev == dev) {
- device = tmp_device;
- list_del(&device->list);
- break;
- }
- }
+ device = __iommu_group_remove_device(group, dev);
mutex_unlock(&group->mutex);
- if (!device)
- return;
-
- sysfs_remove_link(group->devices_kobj, device->name);
- sysfs_remove_link(&dev->kobj, "iommu_group");
-
- trace_remove_device_from_group(group->id, dev);
-
- kfree(device->name);
- kfree(device);
- dev->iommu_group = NULL;
- kobject_put(group->devices_kobj);
+ if (device)
+ __iommu_group_release_device(group, device);
}
EXPORT_SYMBOL_GPL(iommu_group_remove_device);
return NULL;
domain->type = type;
- /* Assume all sizes by default; the driver may override this later */
- domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
+ /*
+ * If not already set, assume all sizes by default; the driver
+ * may override this later
+ */
+ if (!domain->pgsize_bitmap)
+ domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
+
if (!domain->ops)
domain->ops = bus->iommu_ops->default_domain_ops;
EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
/*
- * Changes the default domain of an iommu group that has *only* one device
+ * Changes the default domain of an iommu group
*
* @group: The group for which the default domain should be changed
- * @prev_dev: The device in the group (this is used to make sure that the device
- * hasn't changed after the caller has called this function)
+ * @dev: The first device in the group
* @type: The type of the new default domain that gets associated with the group
*
* Returns 0 on success and error code on failure
* Please take a closer look if intended to use for other purposes.
*/
static int iommu_change_dev_def_domain(struct iommu_group *group,
- struct device *prev_dev, int type)
+ struct device *dev, int type)
{
+ struct __group_domain_type gtype = {NULL, 0};
struct iommu_domain *prev_dom;
- struct group_device *grp_dev;
- int ret, dev_def_dom;
- struct device *dev;
-
- mutex_lock(&group->mutex);
-
- if (group->default_domain != group->domain) {
- dev_err_ratelimited(prev_dev, "Group not assigned to default domain\n");
- ret = -EBUSY;
- goto out;
- }
-
- /*
- * iommu group wasn't locked while acquiring device lock in
- * iommu_group_store_type(). So, make sure that the device count hasn't
- * changed while acquiring device lock.
- *
- * Changing default domain of an iommu group with two or more devices
- * isn't supported because there could be a potential deadlock. Consider
- * the following scenario. T1 is trying to acquire device locks of all
- * the devices in the group and before it could acquire all of them,
- * there could be another thread T2 (from different sub-system and use
- * case) that has already acquired some of the device locks and might be
- * waiting for T1 to release other device locks.
- */
- if (iommu_group_device_count(group) != 1) {
- dev_err_ratelimited(prev_dev, "Cannot change default domain: Group has more than one device\n");
- ret = -EINVAL;
- goto out;
- }
+ int ret;
- /* Since group has only one device */
- grp_dev = list_first_entry(&group->devices, struct group_device, list);
- dev = grp_dev->dev;
-
- if (prev_dev != dev) {
- dev_err_ratelimited(prev_dev, "Cannot change default domain: Device has been changed\n");
- ret = -EBUSY;
- goto out;
- }
+ lockdep_assert_held(&group->mutex);
prev_dom = group->default_domain;
- if (!prev_dom) {
- ret = -EINVAL;
- goto out;
- }
-
- dev_def_dom = iommu_get_def_domain_type(dev);
+ __iommu_group_for_each_dev(group, >ype,
+ probe_get_default_domain_type);
if (!type) {
/*
* If the user hasn't requested any specific type of domain and
* if the device supports both the domains, then default to the
* domain the device was booted with
*/
- type = dev_def_dom ? : iommu_def_domain_type;
- } else if (dev_def_dom && type != dev_def_dom) {
- dev_err_ratelimited(prev_dev, "Device cannot be in %s domain\n",
+ type = gtype.type ? : iommu_def_domain_type;
+ } else if (gtype.type && type != gtype.type) {
+ dev_err_ratelimited(dev, "Device cannot be in %s domain\n",
iommu_domain_type_str(type));
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
/*
* Switch to a new domain only if the requested domain type is different
* from the existing default domain type
*/
- if (prev_dom->type == type) {
- ret = 0;
- goto out;
- }
+ if (prev_dom->type == type)
+ return 0;
- /* We can bring up a flush queue without tearing down the domain */
- if (type == IOMMU_DOMAIN_DMA_FQ && prev_dom->type == IOMMU_DOMAIN_DMA) {
- ret = iommu_dma_init_fq(prev_dom);
- if (!ret)
- prev_dom->type = IOMMU_DOMAIN_DMA_FQ;
- goto out;
- }
+ group->default_domain = NULL;
+ group->domain = NULL;
/* Sets group->default_domain to the newly allocated domain */
ret = iommu_group_alloc_default_domain(dev->bus, group, type);
if (ret)
- goto out;
+ goto restore_old_domain;
- ret = iommu_create_device_direct_mappings(group, dev);
+ ret = iommu_group_create_direct_mappings(group);
if (ret)
goto free_new_domain;
- ret = __iommu_attach_device(group->default_domain, dev);
+ ret = __iommu_attach_group(group->default_domain, group);
if (ret)
goto free_new_domain;
- group->domain = group->default_domain;
-
- /*
- * Release the mutex here because ops->probe_finalize() call-back of
- * some vendor IOMMU drivers calls arm_iommu_attach_device() which
- * in-turn might call back into IOMMU core code, where it tries to take
- * group->mutex, resulting in a deadlock.
- */
- mutex_unlock(&group->mutex);
-
- /* Make sure dma_ops is appropriatley set */
- iommu_group_do_probe_finalize(dev, group->default_domain);
iommu_domain_free(prev_dom);
+
return 0;
free_new_domain:
iommu_domain_free(group->default_domain);
+restore_old_domain:
group->default_domain = prev_dom;
group->domain = prev_dom;
-out:
- mutex_unlock(&group->mutex);
-
return ret;
}
* transition. Return failure if this isn't met.
*
* We need to consider the race between this and the device release path.
- * device_lock(dev) is used here to guarantee that the device release path
+ * group->mutex is used here to guarantee that the device release path
* will not be entered at the same time.
*/
static ssize_t iommu_group_store_type(struct iommu_group *group,
else
return -EINVAL;
- /*
- * Lock/Unlock the group mutex here before device lock to
- * 1. Make sure that the iommu group has only one device (this is a
- * prerequisite for step 2)
- * 2. Get struct *dev which is needed to lock device
- */
mutex_lock(&group->mutex);
- if (iommu_group_device_count(group) != 1) {
+ /* We can bring up a flush queue without tearing down the domain. */
+ if (req_type == IOMMU_DOMAIN_DMA_FQ &&
+ group->default_domain->type == IOMMU_DOMAIN_DMA) {
+ ret = iommu_dma_init_fq(group->default_domain);
+ if (!ret)
+ group->default_domain->type = IOMMU_DOMAIN_DMA_FQ;
mutex_unlock(&group->mutex);
- pr_err_ratelimited("Cannot change default domain: Group has more than one device\n");
- return -EINVAL;
+
+ return ret ?: count;
+ }
+
+ /* Otherwise, ensure that device exists and no driver is bound. */
+ if (list_empty(&group->devices) || group->owner_cnt) {
+ mutex_unlock(&group->mutex);
+ return -EPERM;
}
- /* Since group has only one device */
grp_dev = list_first_entry(&group->devices, struct group_device, list);
dev = grp_dev->dev;
- get_device(dev);
+
+ ret = iommu_change_dev_def_domain(group, dev, req_type);
/*
- * Don't hold the group mutex because taking group mutex first and then
- * the device lock could potentially cause a deadlock as below. Assume
- * two threads T1 and T2. T1 is trying to change default domain of an
- * iommu group and T2 is trying to hot unplug a device or release [1] VF
- * of a PCIe device which is in the same iommu group. T1 takes group
- * mutex and before it could take device lock assume T2 has taken device
- * lock and is yet to take group mutex. Now, both the threads will be
- * waiting for the other thread to release lock. Below, lock order was
- * suggested.
- * device_lock(dev);
- * mutex_lock(&group->mutex);
- * iommu_change_dev_def_domain();
- * mutex_unlock(&group->mutex);
- * device_unlock(dev);
- *
- * [1] Typical device release path
- * device_lock() from device/driver core code
- * -> bus_notifier()
- * -> iommu_bus_notifier()
- * -> iommu_release_device()
- * -> ops->release_device() vendor driver calls back iommu core code
- * -> mutex_lock() from iommu core code
+ * Release the mutex here because ops->probe_finalize() call-back of
+ * some vendor IOMMU drivers calls arm_iommu_attach_device() which
+ * in-turn might call back into IOMMU core code, where it tries to take
+ * group->mutex, resulting in a deadlock.
*/
mutex_unlock(&group->mutex);
- /* Check if the device in the group still has a driver bound to it */
- device_lock(dev);
- if (device_is_bound(dev) && !(req_type == IOMMU_DOMAIN_DMA_FQ &&
- group->default_domain->type == IOMMU_DOMAIN_DMA)) {
- pr_err_ratelimited("Device is still bound to driver\n");
- ret = -EBUSY;
- goto out;
- }
-
- ret = iommu_change_dev_def_domain(group, dev, req_type);
- ret = ret ?: count;
-
-out:
- device_unlock(dev);
- put_device(dev);
+ /* Make sure dma_ops is appropriatley set */
+ if (!ret)
+ __iommu_group_dma_finalize(group);
- return ret;
+ return ret ?: count;
}
static bool iommu_is_default_domain(struct iommu_group *group)
#define arm_iommu_create_mapping(...) NULL
#define arm_iommu_attach_device(...) -ENODEV
#define arm_iommu_release_mapping(...) do {} while (0)
-#define arm_iommu_detach_device(...) do {} while (0)
#endif
#define IPMMU_CTX_MAX 16U
static const struct soc_device_attribute soc_denylist[] = {
{ .soc_id = "r8a774a1", },
- { .soc_id = "r8a7795", .revision = "ES1.*" },
{ .soc_id = "r8a7795", .revision = "ES2.*" },
{ .soc_id = "r8a7796", },
{ /* sentinel */ }
static void ipmmu_release_device(struct device *dev)
{
- arm_iommu_detach_device(dev);
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
+ unsigned int i;
+
+ for (i = 0; i < fwspec->num_ids; ++i) {
+ unsigned int utlb = fwspec->ids[i];
+
+ ipmmu_imuctr_write(mmu, utlb, 0);
+ mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
+ }
+
+ arm_iommu_release_mapping(mmu->mapping);
}
static struct iommu_group *ipmmu_find_group(struct device *dev)
* the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
*/
if (!mmu->features->has_cache_leaf_nodes ||
- !of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL))
+ !of_property_present(pdev->dev.of_node, "renesas,ipmmu-main"))
mmu->root = mmu;
else
mmu->root = ipmmu_find_root();
return 0;
}
-static int ipmmu_remove(struct platform_device *pdev)
+static void ipmmu_remove(struct platform_device *pdev)
{
struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
arm_iommu_release_mapping(mmu->mapping);
ipmmu_device_reset(mmu);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
.pm = DEV_PM_OPS,
},
.probe = ipmmu_probe,
- .remove = ipmmu_remove,
+ .remove_new = ipmmu_remove,
};
builtin_platform_driver(ipmmu_driver);
{}
};
-static int msm_iommu_remove(struct platform_device *pdev)
+static void msm_iommu_remove(struct platform_device *pdev)
{
struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
clk_unprepare(iommu->clk);
clk_unprepare(iommu->pclk);
- return 0;
}
static struct platform_driver msm_iommu_driver = {
.of_match_table = msm_iommu_dt_match,
},
.probe = msm_iommu_probe,
- .remove = msm_iommu_remove,
+ .remove_new = msm_iommu_remove,
};
builtin_platform_driver(msm_iommu_driver);
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/device.h>
-#include <linux/dma-direct.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
char *pericfg_comp_str;
struct list_head *hw_list;
- unsigned int iova_region_nr;
- const struct mtk_iommu_iova_region *iova_region;
- u8 banks_num;
- bool banks_enable[MTK_IOMMU_BANK_MAX];
- unsigned int banks_portmsk[MTK_IOMMU_BANK_MAX];
+ /*
+ * The IOMMU HW may support 16GB iova. In order to balance the IOVA ranges,
+ * different masters will be put in different iova ranges, for example vcodec
+ * is in 4G-8G and cam is in 8G-12G. Meanwhile, some masters may have the
+ * special IOVA range requirement, like CCU can only support the address
+ * 0x40000000-0x44000000.
+ * Here list the iova ranges this SoC supports and which larbs/ports are in
+ * which region.
+ *
+ * 16GB iova all use one pgtable, but each a region is a iommu group.
+ */
+ struct {
+ unsigned int iova_region_nr;
+ const struct mtk_iommu_iova_region *iova_region;
+ /*
+ * Indicate the correspondance between larbs, ports and regions.
+ *
+ * The index is the same as iova_region and larb port numbers are
+ * described as bit positions.
+ * For example, storing BIT(0) at index 2,1 means "larb 1, port0 is in region 2".
+ * [2] = { [1] = BIT(0) }
+ */
+ const u32 (*iova_region_larb_msk)[MTK_LARB_NR_MAX];
+ };
+
+ /*
+ * The IOMMU HW may have 5 banks. Each bank has a independent pgtable.
+ * Here list how many banks this SoC supports/enables and which ports are in which bank.
+ */
+ struct {
+ u8 banks_num;
+ bool banks_enable[MTK_IOMMU_BANK_MAX];
+ unsigned int banks_portmsk[MTK_IOMMU_BANK_MAX];
+ };
+
unsigned char larbid_remap[MTK_LARB_COM_MAX][MTK_LARB_SUBCOM_MAX];
};
#define for_each_m4u(data, head) list_for_each_entry(data, head, list)
+#define MTK_IOMMU_IOVA_SZ_4G (SZ_4G - SZ_8M) /* 8M as gap */
+
static const struct mtk_iommu_iova_region single_domain[] = {
- {.iova_base = 0, .size = SZ_4G},
+ {.iova_base = 0, .size = MTK_IOMMU_IOVA_SZ_4G},
};
-static const struct mtk_iommu_iova_region mt8192_multi_dom[] = {
- { .iova_base = 0x0, .size = SZ_4G}, /* 0 ~ 4G */
+#define MT8192_MULTI_REGION_NR_MAX 6
+
+#define MT8192_MULTI_REGION_NR (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) ? \
+ MT8192_MULTI_REGION_NR_MAX : 1)
+
+static const struct mtk_iommu_iova_region mt8192_multi_dom[MT8192_MULTI_REGION_NR] = {
+ { .iova_base = 0x0, .size = MTK_IOMMU_IOVA_SZ_4G}, /* 0 ~ 4G, */
#if IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)
- { .iova_base = SZ_4G, .size = SZ_4G}, /* 4G ~ 8G */
- { .iova_base = SZ_4G * 2, .size = SZ_4G}, /* 8G ~ 12G */
- { .iova_base = SZ_4G * 3, .size = SZ_4G}, /* 12G ~ 16G */
+ { .iova_base = SZ_4G, .size = MTK_IOMMU_IOVA_SZ_4G}, /* 4G ~ 8G */
+ { .iova_base = SZ_4G * 2, .size = MTK_IOMMU_IOVA_SZ_4G}, /* 8G ~ 12G */
+ { .iova_base = SZ_4G * 3, .size = MTK_IOMMU_IOVA_SZ_4G}, /* 12G ~ 16G */
{ .iova_base = 0x240000000ULL, .size = 0x4000000}, /* CCU0 */
{ .iova_base = 0x244000000ULL, .size = 0x4000000}, /* CCU1 */
static int mtk_iommu_get_iova_region_id(struct device *dev,
const struct mtk_iommu_plat_data *plat_data)
{
- const struct mtk_iommu_iova_region *rgn = plat_data->iova_region;
- const struct bus_dma_region *dma_rgn = dev->dma_range_map;
- int i, candidate = -1;
- dma_addr_t dma_end;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ unsigned int portidmsk = 0, larbid;
+ const u32 *rgn_larb_msk;
+ int i;
- if (!dma_rgn || plat_data->iova_region_nr == 1)
+ if (plat_data->iova_region_nr == 1)
return 0;
- dma_end = dma_rgn->dma_start + dma_rgn->size - 1;
- for (i = 0; i < plat_data->iova_region_nr; i++, rgn++) {
- /* Best fit. */
- if (dma_rgn->dma_start == rgn->iova_base &&
- dma_end == rgn->iova_base + rgn->size - 1)
+ larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
+ for (i = 0; i < fwspec->num_ids; i++)
+ portidmsk |= BIT(MTK_M4U_TO_PORT(fwspec->ids[i]));
+
+ for (i = 0; i < plat_data->iova_region_nr; i++) {
+ rgn_larb_msk = plat_data->iova_region_larb_msk[i];
+ if (!rgn_larb_msk)
+ continue;
+
+ if ((rgn_larb_msk[larbid] & portidmsk) == portidmsk)
return i;
- /* ok if it is inside this region. */
- if (dma_rgn->dma_start >= rgn->iova_base &&
- dma_end < rgn->iova_base + rgn->size)
- candidate = i;
}
- if (candidate >= 0)
- return candidate;
- dev_err(dev, "Can NOT find the iommu domain id(%pad 0x%llx).\n",
- &dma_rgn->dma_start, dma_rgn->size);
+ dev_err(dev, "Can NOT find the region for larb(%d-%x).\n",
+ larbid, portidmsk);
return -EINVAL;
}
}
mutex_unlock(&data->mutex);
+ if (region_id > 0) {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(34));
+ if (ret) {
+ dev_err(m4udev, "Failed to set dma_mask for %s(%d).\n", dev_name(dev), ret);
+ return ret;
+ }
+ }
+
return mtk_iommu_config(data, dev, true, region_id);
err_unlock:
return PTR_ERR(data->bclk);
}
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, PGTABLE_PA_35_EN)) {
+ ret = dma_set_mask(dev, DMA_BIT_MASK(35));
+ if (ret) {
+ dev_err(dev, "Failed to set dma_mask 35.\n");
+ return ret;
+ }
+ }
+
pm_runtime_enable(dev);
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
return ret;
}
-static int mtk_iommu_remove(struct platform_device *pdev)
+static void mtk_iommu_remove(struct platform_device *pdev)
{
struct mtk_iommu_data *data = platform_get_drvdata(pdev);
struct mtk_iommu_bank_data *bank;
continue;
devm_free_irq(&pdev->dev, bank->irq, bank);
}
- return 0;
}
static int __maybe_unused mtk_iommu_runtime_suspend(struct device *dev)
.larbid_remap = {{0}, {4}, {5}, {6}, {7}, {2}, {3}, {1}},
};
+static const unsigned int mt8186_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = {
+ [0] = {~0, ~0, ~0}, /* Region0: all ports for larb0/1/2 */
+ [1] = {0, 0, 0, 0, ~0, 0, 0, ~0}, /* Region1: larb4/7 */
+ [2] = {0, 0, 0, 0, 0, 0, 0, 0, /* Region2: larb8/9/11/13/16/17/19/20 */
+ ~0, ~0, 0, ~0, 0, ~(u32)(BIT(9) | BIT(10)), 0, 0,
+ /* larb13: the other ports except port9/10 */
+ ~0, ~0, 0, ~0, ~0},
+ [3] = {0},
+ [4] = {[13] = BIT(9) | BIT(10)}, /* larb13 port9/10 */
+ [5] = {[14] = ~0}, /* larb14 */
+};
+
static const struct mtk_iommu_plat_data mt8186_data_mm = {
.m4u_plat = M4U_MT8186,
.flags = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN |
.banks_enable = {true},
.iova_region = mt8192_multi_dom,
.iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
+ .iova_region_larb_msk = mt8186_larb_region_msk,
+};
+
+static const unsigned int mt8192_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = {
+ [0] = {~0, ~0}, /* Region0: larb0/1 */
+ [1] = {0, 0, 0, 0, ~0, ~0, 0, ~0}, /* Region1: larb4/5/7 */
+ [2] = {0, 0, ~0, 0, 0, 0, 0, 0, /* Region2: larb2/9/11/13/14/16/17/18/19/20 */
+ 0, ~0, 0, ~0, 0, ~(u32)(BIT(9) | BIT(10)), ~(u32)(BIT(4) | BIT(5)), 0,
+ ~0, ~0, ~0, ~0, ~0},
+ [3] = {0},
+ [4] = {[13] = BIT(9) | BIT(10)}, /* larb13 port9/10 */
+ [5] = {[14] = BIT(4) | BIT(5)}, /* larb14 port4/5 */
};
static const struct mtk_iommu_plat_data mt8192_data = {
.banks_enable = {true},
.iova_region = mt8192_multi_dom,
.iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
+ .iova_region_larb_msk = mt8192_larb_region_msk,
.larbid_remap = {{0}, {1}, {4, 5}, {7}, {2}, {9, 11, 19, 20},
{0, 14, 16}, {0, 13, 18, 17}},
};
.iova_region_nr = ARRAY_SIZE(single_domain),
};
+static const unsigned int mt8195_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = {
+ [0] = {~0, ~0, ~0, ~0}, /* Region0: all ports for larb0/1/2/3 */
+ [1] = {0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, ~0, ~0, ~0, ~0, ~0, /* Region1: larb19/20/21/22/23/24 */
+ ~0},
+ [2] = {0, 0, 0, 0, ~0, ~0, ~0, ~0, /* Region2: the other larbs. */
+ ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0,
+ ~0, ~0, 0, 0, 0, 0, 0, 0,
+ 0, ~0, ~0, ~0, ~0},
+ [3] = {0},
+ [4] = {[18] = BIT(0) | BIT(1)}, /* Only larb18 port0/1 */
+ [5] = {[18] = BIT(2) | BIT(3)}, /* Only larb18 port2/3 */
+};
+
static const struct mtk_iommu_plat_data mt8195_data_vdo = {
.m4u_plat = M4U_MT8195,
.flags = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN |
.banks_enable = {true},
.iova_region = mt8192_multi_dom,
.iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
+ .iova_region_larb_msk = mt8195_larb_region_msk,
.larbid_remap = {{2, 0}, {21}, {24}, {7}, {19}, {9, 10, 11},
{13, 17, 15/* 17b */, 25}, {5}},
};
.banks_enable = {true},
.iova_region = mt8192_multi_dom,
.iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
+ .iova_region_larb_msk = mt8195_larb_region_msk,
.larbid_remap = {{1}, {3},
{22, MTK_INVALID_LARBID, MTK_INVALID_LARBID, MTK_INVALID_LARBID, 23},
{8}, {20}, {12},
static struct platform_driver mtk_iommu_driver = {
.probe = mtk_iommu_probe,
- .remove = mtk_iommu_remove,
+ .remove_new = mtk_iommu_remove,
.driver = {
.name = "mtk-iommu",
.of_match_table = mtk_iommu_of_ids,
return ret;
}
-static int mtk_iommu_v1_remove(struct platform_device *pdev)
+static void mtk_iommu_v1_remove(struct platform_device *pdev)
{
struct mtk_iommu_v1_data *data = platform_get_drvdata(pdev);
clk_disable_unprepare(data->bclk);
devm_free_irq(&pdev->dev, data->irq, data);
component_master_del(&pdev->dev, &mtk_iommu_v1_com_ops);
- return 0;
}
static int __maybe_unused mtk_iommu_v1_suspend(struct device *dev)
static struct platform_driver mtk_iommu_v1_driver = {
.probe = mtk_iommu_v1_probe,
- .remove = mtk_iommu_v1_remove,
+ .remove_new = mtk_iommu_v1_remove,
.driver = {
.name = "mtk-iommu-v1",
.of_match_table = mtk_iommu_v1_of_ids,
return err;
if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8)
return -EINVAL;
- if (of_find_property(of, "ti,iommu-bus-err-back", NULL))
+ if (of_property_read_bool(of, "ti,iommu-bus-err-back"))
obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN;
obj->dev = &pdev->dev;
return err;
}
-static int omap_iommu_remove(struct platform_device *pdev)
+static void omap_iommu_remove(struct platform_device *pdev)
{
struct omap_iommu *obj = platform_get_drvdata(pdev);
pm_runtime_disable(obj->dev);
dev_info(&pdev->dev, "%s removed\n", obj->name);
- return 0;
}
static const struct dev_pm_ops omap_iommu_pm_ops = {
static struct platform_driver omap_iommu_driver = {
.probe = omap_iommu_probe,
- .remove = omap_iommu_remove,
+ .remove_new = omap_iommu_remove,
.driver = {
.name = "omap-iommu",
.pm = &omap_iommu_pm_ops,
static struct device *dma_dev;
static const struct rk_iommu_ops *rk_ops;
+static struct iommu_domain rk_identity_domain;
static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
unsigned int count)
* Ignore the return code, though, since we always zap cache
* and clear the page fault anyway.
*/
- if (iommu->domain)
+ if (iommu->domain != &rk_identity_domain)
report_iommu_fault(iommu->domain, iommu->dev, iova,
flags);
else
return ret;
}
-static void rk_iommu_detach_device(struct iommu_domain *domain,
- struct device *dev)
+static int rk_iommu_identity_attach(struct iommu_domain *identity_domain,
+ struct device *dev)
{
struct rk_iommu *iommu;
- struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
+ struct rk_iommu_domain *rk_domain;
unsigned long flags;
int ret;
/* Allow 'virtual devices' (eg drm) to detach from domain */
iommu = rk_iommu_from_dev(dev);
if (!iommu)
- return;
+ return -ENODEV;
+
+ rk_domain = to_rk_domain(iommu->domain);
dev_dbg(dev, "Detaching from iommu domain\n");
- /* iommu already detached */
- if (iommu->domain != domain)
- return;
+ if (iommu->domain == identity_domain)
+ return 0;
- iommu->domain = NULL;
+ iommu->domain = identity_domain;
spin_lock_irqsave(&rk_domain->iommus_lock, flags);
list_del_init(&iommu->node);
rk_iommu_disable(iommu);
pm_runtime_put(iommu->dev);
}
+
+ return 0;
}
+static void rk_iommu_identity_free(struct iommu_domain *domain)
+{
+}
+
+static struct iommu_domain_ops rk_identity_ops = {
+ .attach_dev = rk_iommu_identity_attach,
+ .free = rk_iommu_identity_free,
+};
+
+static struct iommu_domain rk_identity_domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &rk_identity_ops,
+};
+
+#ifdef CONFIG_ARM
+static void rk_iommu_set_platform_dma(struct device *dev)
+{
+ WARN_ON(rk_iommu_identity_attach(&rk_identity_domain, dev));
+}
+#endif
+
static int rk_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
if (iommu->domain == domain)
return 0;
- if (iommu->domain)
- rk_iommu_detach_device(iommu->domain, dev);
+ ret = rk_iommu_identity_attach(&rk_identity_domain, dev);
+ if (ret)
+ return ret;
iommu->domain = domain;
ret = rk_iommu_enable(iommu);
if (ret)
- rk_iommu_detach_device(iommu->domain, dev);
+ WARN_ON(rk_iommu_identity_attach(&rk_identity_domain, dev));
pm_runtime_put(iommu->dev);
{
struct rk_iommu_domain *rk_domain;
+ if (type == IOMMU_DOMAIN_IDENTITY)
+ return &rk_identity_domain;
+
if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
return NULL;
iommu_dev = of_find_device_by_node(args->np);
data->iommu = platform_get_drvdata(iommu_dev);
+ data->iommu->domain = &rk_identity_domain;
dev_iommu_priv_set(dev, data);
platform_device_put(iommu_dev);
.probe_device = rk_iommu_probe_device,
.release_device = rk_iommu_release_device,
.device_group = rk_iommu_device_group,
+#ifdef CONFIG_ARM
+ .set_platform_dma_ops = rk_iommu_set_platform_dma,
+#endif
.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
.of_xlate = rk_iommu_of_xlate,
.default_domain_ops = &(const struct iommu_domain_ops) {
{
struct rk_iommu *iommu = dev_get_drvdata(dev);
- if (!iommu->domain)
+ if (iommu->domain == &rk_identity_domain)
return 0;
rk_iommu_disable(iommu);
{
struct rk_iommu *iommu = dev_get_drvdata(dev);
- if (!iommu->domain)
+ if (iommu->domain == &rk_identity_domain)
return 0;
return rk_iommu_enable(iommu);
* @eb: gate clock which controls IOMMU access
*/
struct sprd_iommu_device {
+ struct sprd_iommu_domain *dom;
enum sprd_iommu_version ver;
u32 *prot_page_va;
dma_addr_t prot_page_pa;
return &dom->domain;
}
-static void sprd_iommu_domain_free(struct iommu_domain *domain)
-{
- struct sprd_iommu_domain *dom = to_sprd_domain(domain);
-
- kfree(dom);
-}
-
static void sprd_iommu_first_vpn(struct sprd_iommu_domain *dom)
{
struct sprd_iommu_device *sdev = dom->sdev;
sprd_iommu_update_bits(sdev, reg_cfg, mask, 0, val);
}
+static void sprd_iommu_cleanup(struct sprd_iommu_domain *dom)
+{
+ size_t pgt_size;
+
+ /* Nothing need to do if the domain hasn't been attached */
+ if (!dom->sdev)
+ return;
+
+ pgt_size = sprd_iommu_pgt_size(&dom->domain);
+ dma_free_coherent(dom->sdev->dev, pgt_size, dom->pgt_va, dom->pgt_pa);
+ dom->sdev = NULL;
+ sprd_iommu_hw_en(dom->sdev, false);
+}
+
+static void sprd_iommu_domain_free(struct iommu_domain *domain)
+{
+ struct sprd_iommu_domain *dom = to_sprd_domain(domain);
+
+ sprd_iommu_cleanup(dom);
+ kfree(dom);
+}
+
static int sprd_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct sprd_iommu_domain *dom = to_sprd_domain(domain);
size_t pgt_size = sprd_iommu_pgt_size(domain);
- if (dom->sdev)
- return -EINVAL;
+ /* The device is attached to this domain */
+ if (sdev->dom == dom)
+ return 0;
- dom->pgt_va = dma_alloc_coherent(sdev->dev, pgt_size, &dom->pgt_pa, GFP_KERNEL);
- if (!dom->pgt_va)
- return -ENOMEM;
+ /* The first time that domain is attaching to a device */
+ if (!dom->pgt_va) {
+ dom->pgt_va = dma_alloc_coherent(sdev->dev, pgt_size, &dom->pgt_pa, GFP_KERNEL);
+ if (!dom->pgt_va)
+ return -ENOMEM;
+
+ dom->sdev = sdev;
+ }
- dom->sdev = sdev;
+ sdev->dom = dom;
+ /*
+ * One sprd IOMMU serves one client device only, disabled it before
+ * configure mapping table to avoid access conflict in case other
+ * mapping table is stored in.
+ */
+ sprd_iommu_hw_en(sdev, false);
sprd_iommu_first_ppn(dom);
sprd_iommu_first_vpn(dom);
sprd_iommu_vpn_range(dom);
return ret;
}
-static int sprd_iommu_remove(struct platform_device *pdev)
+static void sprd_iommu_remove(struct platform_device *pdev)
{
struct sprd_iommu_device *sdev = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
iommu_device_sysfs_remove(&sdev->iommu);
iommu_device_unregister(&sdev->iommu);
-
- return 0;
}
static struct platform_driver sprd_iommu_driver = {
.suppress_bind_attrs = true,
},
.probe = sprd_iommu_probe,
- .remove = sprd_iommu_remove,
+ .remove_new = sprd_iommu_remove,
};
module_platform_driver(sprd_iommu_driver);
jpeg->vdev->device_caps = V4L2_CAP_STREAMING |
V4L2_CAP_VIDEO_M2M_MPLANE;
- if (of_property_present(pdev->dev.of_node, "dma-ranges"))
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(34));
-
ret = video_register_device(jpeg->vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n");
}
}
- if (of_property_present(pdev->dev.of_node, "dma-ranges")) {
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(34));
- if (ret) {
- mtk_v4l2_err("Failed to set mask");
- goto err_core_workq;
- }
- }
-
for (i = 0; i < MTK_VDEC_HW_MAX; i++)
mutex_init(&dev->dec_mutex[i]);
mutex_init(&dev->dev_mutex);
goto err_event_workq;
}
- if (of_property_present(pdev->dev.of_node, "dma-ranges"))
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(34));
-
ret = video_register_device(vfd_enc, VFL_TYPE_VIDEO, -1);
if (ret) {
mtk_v4l2_err("Failed to register video device");
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_IOASID_H
-#define __LINUX_IOASID_H
-
-#include <linux/types.h>
-#include <linux/errno.h>
-
-#define INVALID_IOASID ((ioasid_t)-1)
-typedef unsigned int ioasid_t;
-typedef ioasid_t (*ioasid_alloc_fn_t)(ioasid_t min, ioasid_t max, void *data);
-typedef void (*ioasid_free_fn_t)(ioasid_t ioasid, void *data);
-
-struct ioasid_set {
- int dummy;
-};
-
-/**
- * struct ioasid_allocator_ops - IOASID allocator helper functions and data
- *
- * @alloc: helper function to allocate IOASID
- * @free: helper function to free IOASID
- * @list: for tracking ops that share helper functions but not data
- * @pdata: data belong to the allocator, provided when calling alloc()
- */
-struct ioasid_allocator_ops {
- ioasid_alloc_fn_t alloc;
- ioasid_free_fn_t free;
- struct list_head list;
- void *pdata;
-};
-
-#define DECLARE_IOASID_SET(name) struct ioasid_set name = { 0 }
-
-#if IS_ENABLED(CONFIG_IOASID)
-ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
- void *private);
-void ioasid_free(ioasid_t ioasid);
-void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
- bool (*getter)(void *));
-int ioasid_register_allocator(struct ioasid_allocator_ops *allocator);
-void ioasid_unregister_allocator(struct ioasid_allocator_ops *allocator);
-int ioasid_set_data(ioasid_t ioasid, void *data);
-
-#else /* !CONFIG_IOASID */
-static inline ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min,
- ioasid_t max, void *private)
-{
- return INVALID_IOASID;
-}
-
-static inline void ioasid_free(ioasid_t ioasid) { }
-
-static inline void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
- bool (*getter)(void *))
-{
- return NULL;
-}
-
-static inline int ioasid_register_allocator(struct ioasid_allocator_ops *allocator)
-{
- return -ENOTSUPP;
-}
-
-static inline void ioasid_unregister_allocator(struct ioasid_allocator_ops *allocator)
-{
-}
-
-static inline int ioasid_set_data(ioasid_t ioasid, void *data)
-{
- return -ENOTSUPP;
-}
-
-#endif /* CONFIG_IOASID */
-#endif /* __LINUX_IOASID_H */
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <linux/ioasid.h>
#include <uapi/linux/iommu.h>
#define IOMMU_READ (1 << 0)
};
#define IOMMU_PASID_INVALID (-1U)
+typedef unsigned int ioasid_t;
#ifdef CONFIG_IOMMU_API
extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
extern bool iommu_group_has_isolated_msi(struct iommu_group *group);
extern struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus);
-extern struct iommu_group *iommu_group_get_by_id(int id);
extern void iommu_domain_free(struct iommu_domain *domain);
extern int iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
}
int iommu_probe_device(struct device *dev);
-void iommu_release_device(struct device *dev);
int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
return NULL;
}
-static inline struct iommu_group *iommu_group_get_by_id(int id)
-{
- return NULL;
-}
-
static inline void iommu_domain_free(struct iommu_domain *domain)
{
}
}
#ifdef CONFIG_IOMMU_SVA
+static inline void mm_pasid_init(struct mm_struct *mm)
+{
+ mm->pasid = IOMMU_PASID_INVALID;
+}
+static inline bool mm_valid_pasid(struct mm_struct *mm)
+{
+ return mm->pasid != IOMMU_PASID_INVALID;
+}
+void mm_pasid_drop(struct mm_struct *mm);
struct iommu_sva *iommu_sva_bind_device(struct device *dev,
struct mm_struct *mm);
void iommu_sva_unbind_device(struct iommu_sva *handle);
{
return IOMMU_PASID_INVALID;
}
+static inline void mm_pasid_init(struct mm_struct *mm) {}
+static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; }
+static inline void mm_pasid_drop(struct mm_struct *mm) {}
#endif /* CONFIG_IOMMU_SVA */
#endif /* __LINUX_IOMMU_H */
#include <linux/mm_types.h>
#include <linux/gfp.h>
#include <linux/sync_core.h>
-#include <linux/ioasid.h>
/*
* Routines for handling mm_structs
}
#endif
-#ifdef CONFIG_IOMMU_SVA
-static inline void mm_pasid_init(struct mm_struct *mm)
-{
- mm->pasid = INVALID_IOASID;
-}
-
-static inline bool mm_valid_pasid(struct mm_struct *mm)
-{
- return mm->pasid != INVALID_IOASID;
-}
-
-/* Associate a PASID with an mm_struct: */
-static inline void mm_pasid_set(struct mm_struct *mm, u32 pasid)
-{
- mm->pasid = pasid;
-}
-
-static inline void mm_pasid_drop(struct mm_struct *mm)
-{
- if (mm_valid_pasid(mm)) {
- ioasid_free(mm->pasid);
- mm->pasid = INVALID_IOASID;
- }
-}
-#else
-static inline void mm_pasid_init(struct mm_struct *mm) {}
-static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; }
-static inline void mm_pasid_set(struct mm_struct *mm, u32 pasid) {}
-static inline void mm_pasid_drop(struct mm_struct *mm) {}
-#endif
-
#endif /* _LINUX_SCHED_MM_H */
#include <linux/bpf.h>
#include <linux/stackprotector.h>
#include <linux/user_events.h>
+#include <linux/iommu.h>
#include <asm/pgalloc.h>
#include <linux/uaccess.h>
#include <linux/atomic.h>
#include <linux/user_namespace.h>
-#include <linux/ioasid.h>
+#include <linux/iommu.h>
#include <asm/mmu.h>
#ifndef INIT_MM_CONTEXT
.user_ns = &init_user_ns,
.cpu_bitmap = CPU_BITS_NONE,
#ifdef CONFIG_IOMMU_SVA
- .pasid = INVALID_IOASID,
+ .pasid = IOMMU_PASID_INVALID,
#endif
INIT_MM_CONTEXT(init_mm)
};