Merge tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg...
[sfrench/cifs-2.6.git] / drivers / iommu / intel / iommu.h
index 7dac94f62b4ec661af7030b475103ef4ac184fee..d796d0d9b114a4cf29bda9202636a05df091421d 100644 (file)
@@ -25,6 +25,7 @@
 
 #include <asm/cacheflush.h>
 #include <asm/iommu.h>
+#include <uapi/linux/iommufd.h>
 
 /*
  * VT-d hardware uses 4KiB page size regardless of host page size.
@@ -48,6 +49,9 @@
 #define DMA_FL_PTE_DIRTY       BIT_ULL(6)
 #define DMA_FL_PTE_XD          BIT_ULL(63)
 
+#define DMA_SL_PTE_DIRTY_BIT   9
+#define DMA_SL_PTE_DIRTY       BIT_ULL(DMA_SL_PTE_DIRTY_BIT)
+
 #define ADDR_WIDTH_5LEVEL      (57)
 #define ADDR_WIDTH_4LEVEL      (48)
 
@@ -539,6 +543,10 @@ enum {
 #define sm_supported(iommu)    (intel_iommu_sm && ecap_smts((iommu)->ecap))
 #define pasid_supported(iommu) (sm_supported(iommu) &&                 \
                                 ecap_pasid((iommu)->ecap))
+#define ssads_supported(iommu) (sm_supported(iommu) &&                 \
+                               ecap_slads((iommu)->ecap))
+#define nested_supported(iommu)        (sm_supported(iommu) &&                 \
+                                ecap_nest((iommu)->ecap))
 
 struct pasid_entry;
 struct pasid_state_entry;
@@ -592,20 +600,45 @@ struct dmar_domain {
                                         * otherwise, goes through the second
                                         * level.
                                         */
+       u8 dirty_tracking:1;            /* Dirty tracking is enabled */
+       u8 nested_parent:1;             /* Has other domains nested on it */
 
        spinlock_t lock;                /* Protect device tracking lists */
        struct list_head devices;       /* all devices' list */
        struct list_head dev_pasids;    /* all attached pasids */
 
-       struct dma_pte  *pgd;           /* virtual address */
-       int             gaw;            /* max guest address width */
-
-       /* adjusted guest address width, 0 is level 2 30-bit */
-       int             agaw;
        int             iommu_superpage;/* Level of superpages supported:
                                           0 == 4KiB (no superpages), 1 == 2MiB,
                                           2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
-       u64             max_addr;       /* maximum mapped address */
+       union {
+               /* DMA remapping domain */
+               struct {
+                       /* virtual address */
+                       struct dma_pte  *pgd;
+                       /* max guest address width */
+                       int             gaw;
+                       /*
+                        * adjusted guest address width:
+                        *   0: level 2 30-bit
+                        *   1: level 3 39-bit
+                        *   2: level 4 48-bit
+                        *   3: level 5 57-bit
+                        */
+                       int             agaw;
+                       /* maximum mapped address */
+                       u64             max_addr;
+               };
+
+               /* Nested user domain */
+               struct {
+                       /* parent page table which the user domain is nested on */
+                       struct dmar_domain *s2_domain;
+                       /* user page table pointer (in GPA) */
+                       unsigned long s1_pgtbl;
+                       /* page table attributes */
+                       struct iommu_hwpt_vtd_s1 s1_cfg;
+               };
+       };
 
        struct iommu_domain domain;     /* generic domain data structure for
                                           iommu core */
@@ -781,6 +814,16 @@ static inline bool dma_pte_present(struct dma_pte *pte)
        return (pte->val & 3) != 0;
 }
 
+static inline bool dma_sl_pte_test_and_clear_dirty(struct dma_pte *pte,
+                                                  unsigned long flags)
+{
+       if (flags & IOMMU_DIRTY_NO_CLEAR)
+               return (pte->val & DMA_SL_PTE_DIRTY) != 0;
+
+       return test_and_clear_bit(DMA_SL_PTE_DIRTY_BIT,
+                                 (unsigned long *)&pte->val);
+}
+
 static inline bool dma_pte_superpage(struct dma_pte *pte)
 {
        return (pte->val & DMA_PTE_LARGE_PAGE);
@@ -836,12 +879,21 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
  */
 #define QI_OPT_WAIT_DRAIN              BIT(0)
 
+int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
+void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
+void device_block_translation(struct device *dev);
+int prepare_domain_attach_device(struct iommu_domain *domain,
+                                struct device *dev);
+void domain_update_iommu_cap(struct dmar_domain *domain);
+
 int dmar_ir_support(void);
 
 void *alloc_pgtable_page(int node, gfp_t gfp);
 void free_pgtable_page(void *vaddr);
 void iommu_flush_write_buffer(struct intel_iommu *iommu);
 struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
+struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
+                                              const struct iommu_user_data *user_data);
 
 #ifdef CONFIG_INTEL_IOMMU_SVM
 void intel_svm_check(struct intel_iommu *iommu);