Merge tag 'firewire-updates' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee139...
[sfrench/cifs-2.6.git] / drivers / iommu / intel-iommu.c
1 /*
2  * Copyright © 2006-2014 Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * Authors: David Woodhouse <dwmw2@infradead.org>,
14  *          Ashok Raj <ashok.raj@intel.com>,
15  *          Shaohua Li <shaohua.li@intel.com>,
16  *          Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17  *          Fenghua Yu <fenghua.yu@intel.com>
18  *          Joerg Roedel <jroedel@suse.de>
19  */
20
21 #define pr_fmt(fmt)     "DMAR: " fmt
22
23 #include <linux/init.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/export.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/memory.h>
36 #include <linux/cpu.h>
37 #include <linux/timer.h>
38 #include <linux/io.h>
39 #include <linux/iova.h>
40 #include <linux/iommu.h>
41 #include <linux/intel-iommu.h>
42 #include <linux/syscore_ops.h>
43 #include <linux/tboot.h>
44 #include <linux/dmi.h>
45 #include <linux/pci-ats.h>
46 #include <linux/memblock.h>
47 #include <linux/dma-contiguous.h>
48 #include <linux/crash_dump.h>
49 #include <asm/irq_remapping.h>
50 #include <asm/cacheflush.h>
51 #include <asm/iommu.h>
52
53 #include "irq_remapping.h"
54
55 #define ROOT_SIZE               VTD_PAGE_SIZE
56 #define CONTEXT_SIZE            VTD_PAGE_SIZE
57
58 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
59 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
60 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
61 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
62
63 #define IOAPIC_RANGE_START      (0xfee00000)
64 #define IOAPIC_RANGE_END        (0xfeefffff)
65 #define IOVA_START_ADDR         (0x1000)
66
67 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
68
69 #define MAX_AGAW_WIDTH 64
70 #define MAX_AGAW_PFN_WIDTH      (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
71
72 #define __DOMAIN_MAX_PFN(gaw)  ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
73 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
74
75 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
76    to match. That way, we can use 'unsigned long' for PFNs with impunity. */
77 #define DOMAIN_MAX_PFN(gaw)     ((unsigned long) min_t(uint64_t, \
78                                 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
79 #define DOMAIN_MAX_ADDR(gaw)    (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
80
81 /* IO virtual address start page frame number */
82 #define IOVA_START_PFN          (1)
83
84 #define IOVA_PFN(addr)          ((addr) >> PAGE_SHIFT)
85
86 /* page table handling */
87 #define LEVEL_STRIDE            (9)
88 #define LEVEL_MASK              (((u64)1 << LEVEL_STRIDE) - 1)
89
90 /*
91  * This bitmap is used to advertise the page sizes our hardware support
92  * to the IOMMU core, which will then use this information to split
93  * physically contiguous memory regions it is mapping into page sizes
94  * that we support.
95  *
96  * Traditionally the IOMMU core just handed us the mappings directly,
97  * after making sure the size is an order of a 4KiB page and that the
98  * mapping has natural alignment.
99  *
100  * To retain this behavior, we currently advertise that we support
101  * all page sizes that are an order of 4KiB.
102  *
103  * If at some point we'd like to utilize the IOMMU core's new behavior,
104  * we could change this to advertise the real page sizes we support.
105  */
106 #define INTEL_IOMMU_PGSIZES     (~0xFFFUL)
107
108 static inline int agaw_to_level(int agaw)
109 {
110         return agaw + 2;
111 }
112
113 static inline int agaw_to_width(int agaw)
114 {
115         return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
116 }
117
118 static inline int width_to_agaw(int width)
119 {
120         return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
121 }
122
123 static inline unsigned int level_to_offset_bits(int level)
124 {
125         return (level - 1) * LEVEL_STRIDE;
126 }
127
128 static inline int pfn_level_offset(unsigned long pfn, int level)
129 {
130         return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
131 }
132
133 static inline unsigned long level_mask(int level)
134 {
135         return -1UL << level_to_offset_bits(level);
136 }
137
138 static inline unsigned long level_size(int level)
139 {
140         return 1UL << level_to_offset_bits(level);
141 }
142
143 static inline unsigned long align_to_level(unsigned long pfn, int level)
144 {
145         return (pfn + level_size(level) - 1) & level_mask(level);
146 }
147
148 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
149 {
150         return  1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
151 }
152
153 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
154    are never going to work. */
155 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
156 {
157         return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
158 }
159
160 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
161 {
162         return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
163 }
164 static inline unsigned long page_to_dma_pfn(struct page *pg)
165 {
166         return mm_to_dma_pfn(page_to_pfn(pg));
167 }
168 static inline unsigned long virt_to_dma_pfn(void *p)
169 {
170         return page_to_dma_pfn(virt_to_page(p));
171 }
172
173 /* global iommu list, set NULL for ignored DMAR units */
174 static struct intel_iommu **g_iommus;
175
176 static void __init check_tylersburg_isoch(void);
177 static int rwbf_quirk;
178
179 /*
180  * set to 1 to panic kernel if can't successfully enable VT-d
181  * (used when kernel is launched w/ TXT)
182  */
183 static int force_on = 0;
184 int intel_iommu_tboot_noforce;
185
186 /*
187  * 0: Present
188  * 1-11: Reserved
189  * 12-63: Context Ptr (12 - (haw-1))
190  * 64-127: Reserved
191  */
192 struct root_entry {
193         u64     lo;
194         u64     hi;
195 };
196 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
197
198 /*
199  * Take a root_entry and return the Lower Context Table Pointer (LCTP)
200  * if marked present.
201  */
202 static phys_addr_t root_entry_lctp(struct root_entry *re)
203 {
204         if (!(re->lo & 1))
205                 return 0;
206
207         return re->lo & VTD_PAGE_MASK;
208 }
209
210 /*
211  * Take a root_entry and return the Upper Context Table Pointer (UCTP)
212  * if marked present.
213  */
214 static phys_addr_t root_entry_uctp(struct root_entry *re)
215 {
216         if (!(re->hi & 1))
217                 return 0;
218
219         return re->hi & VTD_PAGE_MASK;
220 }
221 /*
222  * low 64 bits:
223  * 0: present
224  * 1: fault processing disable
225  * 2-3: translation type
226  * 12-63: address space root
227  * high 64 bits:
228  * 0-2: address width
229  * 3-6: aval
230  * 8-23: domain id
231  */
232 struct context_entry {
233         u64 lo;
234         u64 hi;
235 };
236
237 static inline void context_clear_pasid_enable(struct context_entry *context)
238 {
239         context->lo &= ~(1ULL << 11);
240 }
241
242 static inline bool context_pasid_enabled(struct context_entry *context)
243 {
244         return !!(context->lo & (1ULL << 11));
245 }
246
247 static inline void context_set_copied(struct context_entry *context)
248 {
249         context->hi |= (1ull << 3);
250 }
251
252 static inline bool context_copied(struct context_entry *context)
253 {
254         return !!(context->hi & (1ULL << 3));
255 }
256
257 static inline bool __context_present(struct context_entry *context)
258 {
259         return (context->lo & 1);
260 }
261
262 static inline bool context_present(struct context_entry *context)
263 {
264         return context_pasid_enabled(context) ?
265              __context_present(context) :
266              __context_present(context) && !context_copied(context);
267 }
268
269 static inline void context_set_present(struct context_entry *context)
270 {
271         context->lo |= 1;
272 }
273
274 static inline void context_set_fault_enable(struct context_entry *context)
275 {
276         context->lo &= (((u64)-1) << 2) | 1;
277 }
278
279 static inline void context_set_translation_type(struct context_entry *context,
280                                                 unsigned long value)
281 {
282         context->lo &= (((u64)-1) << 4) | 3;
283         context->lo |= (value & 3) << 2;
284 }
285
286 static inline void context_set_address_root(struct context_entry *context,
287                                             unsigned long value)
288 {
289         context->lo &= ~VTD_PAGE_MASK;
290         context->lo |= value & VTD_PAGE_MASK;
291 }
292
293 static inline void context_set_address_width(struct context_entry *context,
294                                              unsigned long value)
295 {
296         context->hi |= value & 7;
297 }
298
299 static inline void context_set_domain_id(struct context_entry *context,
300                                          unsigned long value)
301 {
302         context->hi |= (value & ((1 << 16) - 1)) << 8;
303 }
304
305 static inline int context_domain_id(struct context_entry *c)
306 {
307         return((c->hi >> 8) & 0xffff);
308 }
309
310 static inline void context_clear_entry(struct context_entry *context)
311 {
312         context->lo = 0;
313         context->hi = 0;
314 }
315
316 /*
317  * 0: readable
318  * 1: writable
319  * 2-6: reserved
320  * 7: super page
321  * 8-10: available
322  * 11: snoop behavior
323  * 12-63: Host physcial address
324  */
325 struct dma_pte {
326         u64 val;
327 };
328
329 static inline void dma_clear_pte(struct dma_pte *pte)
330 {
331         pte->val = 0;
332 }
333
334 static inline u64 dma_pte_addr(struct dma_pte *pte)
335 {
336 #ifdef CONFIG_64BIT
337         return pte->val & VTD_PAGE_MASK;
338 #else
339         /* Must have a full atomic 64-bit read */
340         return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
341 #endif
342 }
343
344 static inline bool dma_pte_present(struct dma_pte *pte)
345 {
346         return (pte->val & 3) != 0;
347 }
348
349 static inline bool dma_pte_superpage(struct dma_pte *pte)
350 {
351         return (pte->val & DMA_PTE_LARGE_PAGE);
352 }
353
354 static inline int first_pte_in_page(struct dma_pte *pte)
355 {
356         return !((unsigned long)pte & ~VTD_PAGE_MASK);
357 }
358
359 /*
360  * This domain is a statically identity mapping domain.
361  *      1. This domain creats a static 1:1 mapping to all usable memory.
362  *      2. It maps to each iommu if successful.
363  *      3. Each iommu mapps to this domain if successful.
364  */
365 static struct dmar_domain *si_domain;
366 static int hw_pass_through = 1;
367
368 /*
369  * Domain represents a virtual machine, more than one devices
370  * across iommus may be owned in one domain, e.g. kvm guest.
371  */
372 #define DOMAIN_FLAG_VIRTUAL_MACHINE     (1 << 0)
373
374 /* si_domain contains mulitple devices */
375 #define DOMAIN_FLAG_STATIC_IDENTITY     (1 << 1)
376
377 #define for_each_domain_iommu(idx, domain)                      \
378         for (idx = 0; idx < g_num_of_iommus; idx++)             \
379                 if (domain->iommu_refcnt[idx])
380
381 struct dmar_domain {
382         int     nid;                    /* node id */
383
384         unsigned        iommu_refcnt[DMAR_UNITS_SUPPORTED];
385                                         /* Refcount of devices per iommu */
386
387
388         u16             iommu_did[DMAR_UNITS_SUPPORTED];
389                                         /* Domain ids per IOMMU. Use u16 since
390                                          * domain ids are 16 bit wide according
391                                          * to VT-d spec, section 9.3 */
392
393         bool has_iotlb_device;
394         struct list_head devices;       /* all devices' list */
395         struct iova_domain iovad;       /* iova's that belong to this domain */
396
397         struct dma_pte  *pgd;           /* virtual address */
398         int             gaw;            /* max guest address width */
399
400         /* adjusted guest address width, 0 is level 2 30-bit */
401         int             agaw;
402
403         int             flags;          /* flags to find out type of domain */
404
405         int             iommu_coherency;/* indicate coherency of iommu access */
406         int             iommu_snooping; /* indicate snooping control feature*/
407         int             iommu_count;    /* reference count of iommu */
408         int             iommu_superpage;/* Level of superpages supported:
409                                            0 == 4KiB (no superpages), 1 == 2MiB,
410                                            2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
411         u64             max_addr;       /* maximum mapped address */
412
413         struct iommu_domain domain;     /* generic domain data structure for
414                                            iommu core */
415 };
416
417 /* PCI domain-device relationship */
418 struct device_domain_info {
419         struct list_head link;  /* link to domain siblings */
420         struct list_head global; /* link to global list */
421         u8 bus;                 /* PCI bus number */
422         u8 devfn;               /* PCI devfn number */
423         u8 pasid_supported:3;
424         u8 pasid_enabled:1;
425         u8 pri_supported:1;
426         u8 pri_enabled:1;
427         u8 ats_supported:1;
428         u8 ats_enabled:1;
429         u8 ats_qdep;
430         struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
431         struct intel_iommu *iommu; /* IOMMU used by this device */
432         struct dmar_domain *domain; /* pointer to domain */
433 };
434
435 struct dmar_rmrr_unit {
436         struct list_head list;          /* list of rmrr units   */
437         struct acpi_dmar_header *hdr;   /* ACPI header          */
438         u64     base_address;           /* reserved base address*/
439         u64     end_address;            /* reserved end address */
440         struct dmar_dev_scope *devices; /* target devices */
441         int     devices_cnt;            /* target device count */
442         struct iommu_resv_region *resv; /* reserved region handle */
443 };
444
445 struct dmar_atsr_unit {
446         struct list_head list;          /* list of ATSR units */
447         struct acpi_dmar_header *hdr;   /* ACPI header */
448         struct dmar_dev_scope *devices; /* target devices */
449         int devices_cnt;                /* target device count */
450         u8 include_all:1;               /* include all ports */
451 };
452
453 static LIST_HEAD(dmar_atsr_units);
454 static LIST_HEAD(dmar_rmrr_units);
455
456 #define for_each_rmrr_units(rmrr) \
457         list_for_each_entry(rmrr, &dmar_rmrr_units, list)
458
459 /* bitmap for indexing intel_iommus */
460 static int g_num_of_iommus;
461
462 static void domain_exit(struct dmar_domain *domain);
463 static void domain_remove_dev_info(struct dmar_domain *domain);
464 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
465                                      struct device *dev);
466 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
467 static void domain_context_clear(struct intel_iommu *iommu,
468                                  struct device *dev);
469 static int domain_detach_iommu(struct dmar_domain *domain,
470                                struct intel_iommu *iommu);
471
472 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
473 int dmar_disabled = 0;
474 #else
475 int dmar_disabled = 1;
476 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
477
478 int intel_iommu_enabled = 0;
479 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
480
481 static int dmar_map_gfx = 1;
482 static int dmar_forcedac;
483 static int intel_iommu_strict;
484 static int intel_iommu_superpage = 1;
485 static int intel_iommu_ecs = 1;
486 static int intel_iommu_pasid28;
487 static int iommu_identity_mapping;
488
489 #define IDENTMAP_ALL            1
490 #define IDENTMAP_GFX            2
491 #define IDENTMAP_AZALIA         4
492
493 /* Broadwell and Skylake have broken ECS support — normal so-called "second
494  * level" translation of DMA requests-without-PASID doesn't actually happen
495  * unless you also set the NESTE bit in an extended context-entry. Which of
496  * course means that SVM doesn't work because it's trying to do nested
497  * translation of the physical addresses it finds in the process page tables,
498  * through the IOVA->phys mapping found in the "second level" page tables.
499  *
500  * The VT-d specification was retroactively changed to change the definition
501  * of the capability bits and pretend that Broadwell/Skylake never happened...
502  * but unfortunately the wrong bit was changed. It's ECS which is broken, but
503  * for some reason it was the PASID capability bit which was redefined (from
504  * bit 28 on BDW/SKL to bit 40 in future).
505  *
506  * So our test for ECS needs to eschew those implementations which set the old
507  * PASID capabiity bit 28, since those are the ones on which ECS is broken.
508  * Unless we are working around the 'pasid28' limitations, that is, by putting
509  * the device into passthrough mode for normal DMA and thus masking the bug.
510  */
511 #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
512                             (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
513 /* PASID support is thus enabled if ECS is enabled and *either* of the old
514  * or new capability bits are set. */
515 #define pasid_enabled(iommu) (ecs_enabled(iommu) &&                     \
516                               (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
517
518 int intel_iommu_gfx_mapped;
519 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
520
521 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
522 static DEFINE_SPINLOCK(device_domain_lock);
523 static LIST_HEAD(device_domain_list);
524
525 const struct iommu_ops intel_iommu_ops;
526
527 static bool translation_pre_enabled(struct intel_iommu *iommu)
528 {
529         return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
530 }
531
532 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
533 {
534         iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
535 }
536
537 static void init_translation_status(struct intel_iommu *iommu)
538 {
539         u32 gsts;
540
541         gsts = readl(iommu->reg + DMAR_GSTS_REG);
542         if (gsts & DMA_GSTS_TES)
543                 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
544 }
545
546 /* Convert generic 'struct iommu_domain to private struct dmar_domain */
547 static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
548 {
549         return container_of(dom, struct dmar_domain, domain);
550 }
551
552 static int __init intel_iommu_setup(char *str)
553 {
554         if (!str)
555                 return -EINVAL;
556         while (*str) {
557                 if (!strncmp(str, "on", 2)) {
558                         dmar_disabled = 0;
559                         pr_info("IOMMU enabled\n");
560                 } else if (!strncmp(str, "off", 3)) {
561                         dmar_disabled = 1;
562                         pr_info("IOMMU disabled\n");
563                 } else if (!strncmp(str, "igfx_off", 8)) {
564                         dmar_map_gfx = 0;
565                         pr_info("Disable GFX device mapping\n");
566                 } else if (!strncmp(str, "forcedac", 8)) {
567                         pr_info("Forcing DAC for PCI devices\n");
568                         dmar_forcedac = 1;
569                 } else if (!strncmp(str, "strict", 6)) {
570                         pr_info("Disable batched IOTLB flush\n");
571                         intel_iommu_strict = 1;
572                 } else if (!strncmp(str, "sp_off", 6)) {
573                         pr_info("Disable supported super page\n");
574                         intel_iommu_superpage = 0;
575                 } else if (!strncmp(str, "ecs_off", 7)) {
576                         printk(KERN_INFO
577                                 "Intel-IOMMU: disable extended context table support\n");
578                         intel_iommu_ecs = 0;
579                 } else if (!strncmp(str, "pasid28", 7)) {
580                         printk(KERN_INFO
581                                 "Intel-IOMMU: enable pre-production PASID support\n");
582                         intel_iommu_pasid28 = 1;
583                         iommu_identity_mapping |= IDENTMAP_GFX;
584                 } else if (!strncmp(str, "tboot_noforce", 13)) {
585                         printk(KERN_INFO
586                                 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
587                         intel_iommu_tboot_noforce = 1;
588                 }
589
590                 str += strcspn(str, ",");
591                 while (*str == ',')
592                         str++;
593         }
594         return 0;
595 }
596 __setup("intel_iommu=", intel_iommu_setup);
597
598 static struct kmem_cache *iommu_domain_cache;
599 static struct kmem_cache *iommu_devinfo_cache;
600
601 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
602 {
603         struct dmar_domain **domains;
604         int idx = did >> 8;
605
606         domains = iommu->domains[idx];
607         if (!domains)
608                 return NULL;
609
610         return domains[did & 0xff];
611 }
612
613 static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
614                              struct dmar_domain *domain)
615 {
616         struct dmar_domain **domains;
617         int idx = did >> 8;
618
619         if (!iommu->domains[idx]) {
620                 size_t size = 256 * sizeof(struct dmar_domain *);
621                 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
622         }
623
624         domains = iommu->domains[idx];
625         if (WARN_ON(!domains))
626                 return;
627         else
628                 domains[did & 0xff] = domain;
629 }
630
631 static inline void *alloc_pgtable_page(int node)
632 {
633         struct page *page;
634         void *vaddr = NULL;
635
636         page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
637         if (page)
638                 vaddr = page_address(page);
639         return vaddr;
640 }
641
642 static inline void free_pgtable_page(void *vaddr)
643 {
644         free_page((unsigned long)vaddr);
645 }
646
647 static inline void *alloc_domain_mem(void)
648 {
649         return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
650 }
651
652 static void free_domain_mem(void *vaddr)
653 {
654         kmem_cache_free(iommu_domain_cache, vaddr);
655 }
656
657 static inline void * alloc_devinfo_mem(void)
658 {
659         return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
660 }
661
662 static inline void free_devinfo_mem(void *vaddr)
663 {
664         kmem_cache_free(iommu_devinfo_cache, vaddr);
665 }
666
667 static inline int domain_type_is_vm(struct dmar_domain *domain)
668 {
669         return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
670 }
671
672 static inline int domain_type_is_si(struct dmar_domain *domain)
673 {
674         return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
675 }
676
677 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
678 {
679         return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
680                                 DOMAIN_FLAG_STATIC_IDENTITY);
681 }
682
683 static inline int domain_pfn_supported(struct dmar_domain *domain,
684                                        unsigned long pfn)
685 {
686         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
687
688         return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
689 }
690
691 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
692 {
693         unsigned long sagaw;
694         int agaw = -1;
695
696         sagaw = cap_sagaw(iommu->cap);
697         for (agaw = width_to_agaw(max_gaw);
698              agaw >= 0; agaw--) {
699                 if (test_bit(agaw, &sagaw))
700                         break;
701         }
702
703         return agaw;
704 }
705
706 /*
707  * Calculate max SAGAW for each iommu.
708  */
709 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
710 {
711         return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
712 }
713
714 /*
715  * calculate agaw for each iommu.
716  * "SAGAW" may be different across iommus, use a default agaw, and
717  * get a supported less agaw for iommus that don't support the default agaw.
718  */
719 int iommu_calculate_agaw(struct intel_iommu *iommu)
720 {
721         return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
722 }
723
724 /* This functionin only returns single iommu in a domain */
725 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
726 {
727         int iommu_id;
728
729         /* si_domain and vm domain should not get here. */
730         BUG_ON(domain_type_is_vm_or_si(domain));
731         for_each_domain_iommu(iommu_id, domain)
732                 break;
733
734         if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
735                 return NULL;
736
737         return g_iommus[iommu_id];
738 }
739
740 static void domain_update_iommu_coherency(struct dmar_domain *domain)
741 {
742         struct dmar_drhd_unit *drhd;
743         struct intel_iommu *iommu;
744         bool found = false;
745         int i;
746
747         domain->iommu_coherency = 1;
748
749         for_each_domain_iommu(i, domain) {
750                 found = true;
751                 if (!ecap_coherent(g_iommus[i]->ecap)) {
752                         domain->iommu_coherency = 0;
753                         break;
754                 }
755         }
756         if (found)
757                 return;
758
759         /* No hardware attached; use lowest common denominator */
760         rcu_read_lock();
761         for_each_active_iommu(iommu, drhd) {
762                 if (!ecap_coherent(iommu->ecap)) {
763                         domain->iommu_coherency = 0;
764                         break;
765                 }
766         }
767         rcu_read_unlock();
768 }
769
770 static int domain_update_iommu_snooping(struct intel_iommu *skip)
771 {
772         struct dmar_drhd_unit *drhd;
773         struct intel_iommu *iommu;
774         int ret = 1;
775
776         rcu_read_lock();
777         for_each_active_iommu(iommu, drhd) {
778                 if (iommu != skip) {
779                         if (!ecap_sc_support(iommu->ecap)) {
780                                 ret = 0;
781                                 break;
782                         }
783                 }
784         }
785         rcu_read_unlock();
786
787         return ret;
788 }
789
790 static int domain_update_iommu_superpage(struct intel_iommu *skip)
791 {
792         struct dmar_drhd_unit *drhd;
793         struct intel_iommu *iommu;
794         int mask = 0xf;
795
796         if (!intel_iommu_superpage) {
797                 return 0;
798         }
799
800         /* set iommu_superpage to the smallest common denominator */
801         rcu_read_lock();
802         for_each_active_iommu(iommu, drhd) {
803                 if (iommu != skip) {
804                         mask &= cap_super_page_val(iommu->cap);
805                         if (!mask)
806                                 break;
807                 }
808         }
809         rcu_read_unlock();
810
811         return fls(mask);
812 }
813
814 /* Some capabilities may be different across iommus */
815 static void domain_update_iommu_cap(struct dmar_domain *domain)
816 {
817         domain_update_iommu_coherency(domain);
818         domain->iommu_snooping = domain_update_iommu_snooping(NULL);
819         domain->iommu_superpage = domain_update_iommu_superpage(NULL);
820 }
821
822 static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
823                                                        u8 bus, u8 devfn, int alloc)
824 {
825         struct root_entry *root = &iommu->root_entry[bus];
826         struct context_entry *context;
827         u64 *entry;
828
829         entry = &root->lo;
830         if (ecs_enabled(iommu)) {
831                 if (devfn >= 0x80) {
832                         devfn -= 0x80;
833                         entry = &root->hi;
834                 }
835                 devfn *= 2;
836         }
837         if (*entry & 1)
838                 context = phys_to_virt(*entry & VTD_PAGE_MASK);
839         else {
840                 unsigned long phy_addr;
841                 if (!alloc)
842                         return NULL;
843
844                 context = alloc_pgtable_page(iommu->node);
845                 if (!context)
846                         return NULL;
847
848                 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
849                 phy_addr = virt_to_phys((void *)context);
850                 *entry = phy_addr | 1;
851                 __iommu_flush_cache(iommu, entry, sizeof(*entry));
852         }
853         return &context[devfn];
854 }
855
856 static int iommu_dummy(struct device *dev)
857 {
858         return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
859 }
860
861 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
862 {
863         struct dmar_drhd_unit *drhd = NULL;
864         struct intel_iommu *iommu;
865         struct device *tmp;
866         struct pci_dev *ptmp, *pdev = NULL;
867         u16 segment = 0;
868         int i;
869
870         if (iommu_dummy(dev))
871                 return NULL;
872
873         if (dev_is_pci(dev)) {
874                 struct pci_dev *pf_pdev;
875
876                 pdev = to_pci_dev(dev);
877
878 #ifdef CONFIG_X86
879                 /* VMD child devices currently cannot be handled individually */
880                 if (is_vmd(pdev->bus))
881                         return NULL;
882 #endif
883
884                 /* VFs aren't listed in scope tables; we need to look up
885                  * the PF instead to find the IOMMU. */
886                 pf_pdev = pci_physfn(pdev);
887                 dev = &pf_pdev->dev;
888                 segment = pci_domain_nr(pdev->bus);
889         } else if (has_acpi_companion(dev))
890                 dev = &ACPI_COMPANION(dev)->dev;
891
892         rcu_read_lock();
893         for_each_active_iommu(iommu, drhd) {
894                 if (pdev && segment != drhd->segment)
895                         continue;
896
897                 for_each_active_dev_scope(drhd->devices,
898                                           drhd->devices_cnt, i, tmp) {
899                         if (tmp == dev) {
900                                 /* For a VF use its original BDF# not that of the PF
901                                  * which we used for the IOMMU lookup. Strictly speaking
902                                  * we could do this for all PCI devices; we only need to
903                                  * get the BDF# from the scope table for ACPI matches. */
904                                 if (pdev && pdev->is_virtfn)
905                                         goto got_pdev;
906
907                                 *bus = drhd->devices[i].bus;
908                                 *devfn = drhd->devices[i].devfn;
909                                 goto out;
910                         }
911
912                         if (!pdev || !dev_is_pci(tmp))
913                                 continue;
914
915                         ptmp = to_pci_dev(tmp);
916                         if (ptmp->subordinate &&
917                             ptmp->subordinate->number <= pdev->bus->number &&
918                             ptmp->subordinate->busn_res.end >= pdev->bus->number)
919                                 goto got_pdev;
920                 }
921
922                 if (pdev && drhd->include_all) {
923                 got_pdev:
924                         *bus = pdev->bus->number;
925                         *devfn = pdev->devfn;
926                         goto out;
927                 }
928         }
929         iommu = NULL;
930  out:
931         rcu_read_unlock();
932
933         return iommu;
934 }
935
936 static void domain_flush_cache(struct dmar_domain *domain,
937                                void *addr, int size)
938 {
939         if (!domain->iommu_coherency)
940                 clflush_cache_range(addr, size);
941 }
942
943 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
944 {
945         struct context_entry *context;
946         int ret = 0;
947         unsigned long flags;
948
949         spin_lock_irqsave(&iommu->lock, flags);
950         context = iommu_context_addr(iommu, bus, devfn, 0);
951         if (context)
952                 ret = context_present(context);
953         spin_unlock_irqrestore(&iommu->lock, flags);
954         return ret;
955 }
956
957 static void free_context_table(struct intel_iommu *iommu)
958 {
959         int i;
960         unsigned long flags;
961         struct context_entry *context;
962
963         spin_lock_irqsave(&iommu->lock, flags);
964         if (!iommu->root_entry) {
965                 goto out;
966         }
967         for (i = 0; i < ROOT_ENTRY_NR; i++) {
968                 context = iommu_context_addr(iommu, i, 0, 0);
969                 if (context)
970                         free_pgtable_page(context);
971
972                 if (!ecs_enabled(iommu))
973                         continue;
974
975                 context = iommu_context_addr(iommu, i, 0x80, 0);
976                 if (context)
977                         free_pgtable_page(context);
978
979         }
980         free_pgtable_page(iommu->root_entry);
981         iommu->root_entry = NULL;
982 out:
983         spin_unlock_irqrestore(&iommu->lock, flags);
984 }
985
986 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
987                                       unsigned long pfn, int *target_level)
988 {
989         struct dma_pte *parent, *pte = NULL;
990         int level = agaw_to_level(domain->agaw);
991         int offset;
992
993         BUG_ON(!domain->pgd);
994
995         if (!domain_pfn_supported(domain, pfn))
996                 /* Address beyond IOMMU's addressing capabilities. */
997                 return NULL;
998
999         parent = domain->pgd;
1000
1001         while (1) {
1002                 void *tmp_page;
1003
1004                 offset = pfn_level_offset(pfn, level);
1005                 pte = &parent[offset];
1006                 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
1007                         break;
1008                 if (level == *target_level)
1009                         break;
1010
1011                 if (!dma_pte_present(pte)) {
1012                         uint64_t pteval;
1013
1014                         tmp_page = alloc_pgtable_page(domain->nid);
1015
1016                         if (!tmp_page)
1017                                 return NULL;
1018
1019                         domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
1020                         pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
1021                         if (cmpxchg64(&pte->val, 0ULL, pteval))
1022                                 /* Someone else set it while we were thinking; use theirs. */
1023                                 free_pgtable_page(tmp_page);
1024                         else
1025                                 domain_flush_cache(domain, pte, sizeof(*pte));
1026                 }
1027                 if (level == 1)
1028                         break;
1029
1030                 parent = phys_to_virt(dma_pte_addr(pte));
1031                 level--;
1032         }
1033
1034         if (!*target_level)
1035                 *target_level = level;
1036
1037         return pte;
1038 }
1039
1040
1041 /* return address's pte at specific level */
1042 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1043                                          unsigned long pfn,
1044                                          int level, int *large_page)
1045 {
1046         struct dma_pte *parent, *pte = NULL;
1047         int total = agaw_to_level(domain->agaw);
1048         int offset;
1049
1050         parent = domain->pgd;
1051         while (level <= total) {
1052                 offset = pfn_level_offset(pfn, total);
1053                 pte = &parent[offset];
1054                 if (level == total)
1055                         return pte;
1056
1057                 if (!dma_pte_present(pte)) {
1058                         *large_page = total;
1059                         break;
1060                 }
1061
1062                 if (dma_pte_superpage(pte)) {
1063                         *large_page = total;
1064                         return pte;
1065                 }
1066
1067                 parent = phys_to_virt(dma_pte_addr(pte));
1068                 total--;
1069         }
1070         return NULL;
1071 }
1072
1073 /* clear last level pte, a tlb flush should be followed */
1074 static void dma_pte_clear_range(struct dmar_domain *domain,
1075                                 unsigned long start_pfn,
1076                                 unsigned long last_pfn)
1077 {
1078         unsigned int large_page = 1;
1079         struct dma_pte *first_pte, *pte;
1080
1081         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1082         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1083         BUG_ON(start_pfn > last_pfn);
1084
1085         /* we don't need lock here; nobody else touches the iova range */
1086         do {
1087                 large_page = 1;
1088                 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
1089                 if (!pte) {
1090                         start_pfn = align_to_level(start_pfn + 1, large_page + 1);
1091                         continue;
1092                 }
1093                 do {
1094                         dma_clear_pte(pte);
1095                         start_pfn += lvl_to_nr_pages(large_page);
1096                         pte++;
1097                 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1098
1099                 domain_flush_cache(domain, first_pte,
1100                                    (void *)pte - (void *)first_pte);
1101
1102         } while (start_pfn && start_pfn <= last_pfn);
1103 }
1104
1105 static void dma_pte_free_level(struct dmar_domain *domain, int level,
1106                                int retain_level, struct dma_pte *pte,
1107                                unsigned long pfn, unsigned long start_pfn,
1108                                unsigned long last_pfn)
1109 {
1110         pfn = max(start_pfn, pfn);
1111         pte = &pte[pfn_level_offset(pfn, level)];
1112
1113         do {
1114                 unsigned long level_pfn;
1115                 struct dma_pte *level_pte;
1116
1117                 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1118                         goto next;
1119
1120                 level_pfn = pfn & level_mask(level);
1121                 level_pte = phys_to_virt(dma_pte_addr(pte));
1122
1123                 if (level > 2) {
1124                         dma_pte_free_level(domain, level - 1, retain_level,
1125                                            level_pte, level_pfn, start_pfn,
1126                                            last_pfn);
1127                 }
1128
1129                 /*
1130                  * Free the page table if we're below the level we want to
1131                  * retain and the range covers the entire table.
1132                  */
1133                 if (level < retain_level && !(start_pfn > level_pfn ||
1134                       last_pfn < level_pfn + level_size(level) - 1)) {
1135                         dma_clear_pte(pte);
1136                         domain_flush_cache(domain, pte, sizeof(*pte));
1137                         free_pgtable_page(level_pte);
1138                 }
1139 next:
1140                 pfn += level_size(level);
1141         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1142 }
1143
1144 /*
1145  * clear last level (leaf) ptes and free page table pages below the
1146  * level we wish to keep intact.
1147  */
1148 static void dma_pte_free_pagetable(struct dmar_domain *domain,
1149                                    unsigned long start_pfn,
1150                                    unsigned long last_pfn,
1151                                    int retain_level)
1152 {
1153         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1154         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1155         BUG_ON(start_pfn > last_pfn);
1156
1157         dma_pte_clear_range(domain, start_pfn, last_pfn);
1158
1159         /* We don't need lock here; nobody else touches the iova range */
1160         dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
1161                            domain->pgd, 0, start_pfn, last_pfn);
1162
1163         /* free pgd */
1164         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1165                 free_pgtable_page(domain->pgd);
1166                 domain->pgd = NULL;
1167         }
1168 }
1169
1170 /* When a page at a given level is being unlinked from its parent, we don't
1171    need to *modify* it at all. All we need to do is make a list of all the
1172    pages which can be freed just as soon as we've flushed the IOTLB and we
1173    know the hardware page-walk will no longer touch them.
1174    The 'pte' argument is the *parent* PTE, pointing to the page that is to
1175    be freed. */
1176 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1177                                             int level, struct dma_pte *pte,
1178                                             struct page *freelist)
1179 {
1180         struct page *pg;
1181
1182         pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1183         pg->freelist = freelist;
1184         freelist = pg;
1185
1186         if (level == 1)
1187                 return freelist;
1188
1189         pte = page_address(pg);
1190         do {
1191                 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1192                         freelist = dma_pte_list_pagetables(domain, level - 1,
1193                                                            pte, freelist);
1194                 pte++;
1195         } while (!first_pte_in_page(pte));
1196
1197         return freelist;
1198 }
1199
1200 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1201                                         struct dma_pte *pte, unsigned long pfn,
1202                                         unsigned long start_pfn,
1203                                         unsigned long last_pfn,
1204                                         struct page *freelist)
1205 {
1206         struct dma_pte *first_pte = NULL, *last_pte = NULL;
1207
1208         pfn = max(start_pfn, pfn);
1209         pte = &pte[pfn_level_offset(pfn, level)];
1210
1211         do {
1212                 unsigned long level_pfn;
1213
1214                 if (!dma_pte_present(pte))
1215                         goto next;
1216
1217                 level_pfn = pfn & level_mask(level);
1218
1219                 /* If range covers entire pagetable, free it */
1220                 if (start_pfn <= level_pfn &&
1221                     last_pfn >= level_pfn + level_size(level) - 1) {
1222                         /* These suborbinate page tables are going away entirely. Don't
1223                            bother to clear them; we're just going to *free* them. */
1224                         if (level > 1 && !dma_pte_superpage(pte))
1225                                 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1226
1227                         dma_clear_pte(pte);
1228                         if (!first_pte)
1229                                 first_pte = pte;
1230                         last_pte = pte;
1231                 } else if (level > 1) {
1232                         /* Recurse down into a level that isn't *entirely* obsolete */
1233                         freelist = dma_pte_clear_level(domain, level - 1,
1234                                                        phys_to_virt(dma_pte_addr(pte)),
1235                                                        level_pfn, start_pfn, last_pfn,
1236                                                        freelist);
1237                 }
1238 next:
1239                 pfn += level_size(level);
1240         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1241
1242         if (first_pte)
1243                 domain_flush_cache(domain, first_pte,
1244                                    (void *)++last_pte - (void *)first_pte);
1245
1246         return freelist;
1247 }
1248
1249 /* We can't just free the pages because the IOMMU may still be walking
1250    the page tables, and may have cached the intermediate levels. The
1251    pages can only be freed after the IOTLB flush has been done. */
1252 static struct page *domain_unmap(struct dmar_domain *domain,
1253                                  unsigned long start_pfn,
1254                                  unsigned long last_pfn)
1255 {
1256         struct page *freelist = NULL;
1257
1258         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1259         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1260         BUG_ON(start_pfn > last_pfn);
1261
1262         /* we don't need lock here; nobody else touches the iova range */
1263         freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1264                                        domain->pgd, 0, start_pfn, last_pfn, NULL);
1265
1266         /* free pgd */
1267         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1268                 struct page *pgd_page = virt_to_page(domain->pgd);
1269                 pgd_page->freelist = freelist;
1270                 freelist = pgd_page;
1271
1272                 domain->pgd = NULL;
1273         }
1274
1275         return freelist;
1276 }
1277
1278 static void dma_free_pagelist(struct page *freelist)
1279 {
1280         struct page *pg;
1281
1282         while ((pg = freelist)) {
1283                 freelist = pg->freelist;
1284                 free_pgtable_page(page_address(pg));
1285         }
1286 }
1287
1288 static void iova_entry_free(unsigned long data)
1289 {
1290         struct page *freelist = (struct page *)data;
1291
1292         dma_free_pagelist(freelist);
1293 }
1294
1295 /* iommu handling */
1296 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1297 {
1298         struct root_entry *root;
1299         unsigned long flags;
1300
1301         root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1302         if (!root) {
1303                 pr_err("Allocating root entry for %s failed\n",
1304                         iommu->name);
1305                 return -ENOMEM;
1306         }
1307
1308         __iommu_flush_cache(iommu, root, ROOT_SIZE);
1309
1310         spin_lock_irqsave(&iommu->lock, flags);
1311         iommu->root_entry = root;
1312         spin_unlock_irqrestore(&iommu->lock, flags);
1313
1314         return 0;
1315 }
1316
1317 static void iommu_set_root_entry(struct intel_iommu *iommu)
1318 {
1319         u64 addr;
1320         u32 sts;
1321         unsigned long flag;
1322
1323         addr = virt_to_phys(iommu->root_entry);
1324         if (ecs_enabled(iommu))
1325                 addr |= DMA_RTADDR_RTT;
1326
1327         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1328         dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1329
1330         writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1331
1332         /* Make sure hardware complete it */
1333         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1334                       readl, (sts & DMA_GSTS_RTPS), sts);
1335
1336         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1337 }
1338
1339 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1340 {
1341         u32 val;
1342         unsigned long flag;
1343
1344         if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1345                 return;
1346
1347         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1348         writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1349
1350         /* Make sure hardware complete it */
1351         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1352                       readl, (!(val & DMA_GSTS_WBFS)), val);
1353
1354         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1355 }
1356
1357 /* return value determine if we need a write buffer flush */
1358 static void __iommu_flush_context(struct intel_iommu *iommu,
1359                                   u16 did, u16 source_id, u8 function_mask,
1360                                   u64 type)
1361 {
1362         u64 val = 0;
1363         unsigned long flag;
1364
1365         switch (type) {
1366         case DMA_CCMD_GLOBAL_INVL:
1367                 val = DMA_CCMD_GLOBAL_INVL;
1368                 break;
1369         case DMA_CCMD_DOMAIN_INVL:
1370                 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1371                 break;
1372         case DMA_CCMD_DEVICE_INVL:
1373                 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1374                         | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1375                 break;
1376         default:
1377                 BUG();
1378         }
1379         val |= DMA_CCMD_ICC;
1380
1381         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1382         dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1383
1384         /* Make sure hardware complete it */
1385         IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1386                 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1387
1388         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1389 }
1390
1391 /* return value determine if we need a write buffer flush */
1392 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1393                                 u64 addr, unsigned int size_order, u64 type)
1394 {
1395         int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1396         u64 val = 0, val_iva = 0;
1397         unsigned long flag;
1398
1399         switch (type) {
1400         case DMA_TLB_GLOBAL_FLUSH:
1401                 /* global flush doesn't need set IVA_REG */
1402                 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1403                 break;
1404         case DMA_TLB_DSI_FLUSH:
1405                 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1406                 break;
1407         case DMA_TLB_PSI_FLUSH:
1408                 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1409                 /* IH bit is passed in as part of address */
1410                 val_iva = size_order | addr;
1411                 break;
1412         default:
1413                 BUG();
1414         }
1415         /* Note: set drain read/write */
1416 #if 0
1417         /*
1418          * This is probably to be super secure.. Looks like we can
1419          * ignore it without any impact.
1420          */
1421         if (cap_read_drain(iommu->cap))
1422                 val |= DMA_TLB_READ_DRAIN;
1423 #endif
1424         if (cap_write_drain(iommu->cap))
1425                 val |= DMA_TLB_WRITE_DRAIN;
1426
1427         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1428         /* Note: Only uses first TLB reg currently */
1429         if (val_iva)
1430                 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1431         dmar_writeq(iommu->reg + tlb_offset + 8, val);
1432
1433         /* Make sure hardware complete it */
1434         IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1435                 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1436
1437         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1438
1439         /* check IOTLB invalidation granularity */
1440         if (DMA_TLB_IAIG(val) == 0)
1441                 pr_err("Flush IOTLB failed\n");
1442         if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1443                 pr_debug("TLB flush request %Lx, actual %Lx\n",
1444                         (unsigned long long)DMA_TLB_IIRG(type),
1445                         (unsigned long long)DMA_TLB_IAIG(val));
1446 }
1447
1448 static struct device_domain_info *
1449 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1450                          u8 bus, u8 devfn)
1451 {
1452         struct device_domain_info *info;
1453
1454         assert_spin_locked(&device_domain_lock);
1455
1456         if (!iommu->qi)
1457                 return NULL;
1458
1459         list_for_each_entry(info, &domain->devices, link)
1460                 if (info->iommu == iommu && info->bus == bus &&
1461                     info->devfn == devfn) {
1462                         if (info->ats_supported && info->dev)
1463                                 return info;
1464                         break;
1465                 }
1466
1467         return NULL;
1468 }
1469
1470 static void domain_update_iotlb(struct dmar_domain *domain)
1471 {
1472         struct device_domain_info *info;
1473         bool has_iotlb_device = false;
1474
1475         assert_spin_locked(&device_domain_lock);
1476
1477         list_for_each_entry(info, &domain->devices, link) {
1478                 struct pci_dev *pdev;
1479
1480                 if (!info->dev || !dev_is_pci(info->dev))
1481                         continue;
1482
1483                 pdev = to_pci_dev(info->dev);
1484                 if (pdev->ats_enabled) {
1485                         has_iotlb_device = true;
1486                         break;
1487                 }
1488         }
1489
1490         domain->has_iotlb_device = has_iotlb_device;
1491 }
1492
1493 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1494 {
1495         struct pci_dev *pdev;
1496
1497         assert_spin_locked(&device_domain_lock);
1498
1499         if (!info || !dev_is_pci(info->dev))
1500                 return;
1501
1502         pdev = to_pci_dev(info->dev);
1503
1504 #ifdef CONFIG_INTEL_IOMMU_SVM
1505         /* The PCIe spec, in its wisdom, declares that the behaviour of
1506            the device if you enable PASID support after ATS support is
1507            undefined. So always enable PASID support on devices which
1508            have it, even if we can't yet know if we're ever going to
1509            use it. */
1510         if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1511                 info->pasid_enabled = 1;
1512
1513         if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1514                 info->pri_enabled = 1;
1515 #endif
1516         if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1517                 info->ats_enabled = 1;
1518                 domain_update_iotlb(info->domain);
1519                 info->ats_qdep = pci_ats_queue_depth(pdev);
1520         }
1521 }
1522
1523 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1524 {
1525         struct pci_dev *pdev;
1526
1527         assert_spin_locked(&device_domain_lock);
1528
1529         if (!dev_is_pci(info->dev))
1530                 return;
1531
1532         pdev = to_pci_dev(info->dev);
1533
1534         if (info->ats_enabled) {
1535                 pci_disable_ats(pdev);
1536                 info->ats_enabled = 0;
1537                 domain_update_iotlb(info->domain);
1538         }
1539 #ifdef CONFIG_INTEL_IOMMU_SVM
1540         if (info->pri_enabled) {
1541                 pci_disable_pri(pdev);
1542                 info->pri_enabled = 0;
1543         }
1544         if (info->pasid_enabled) {
1545                 pci_disable_pasid(pdev);
1546                 info->pasid_enabled = 0;
1547         }
1548 #endif
1549 }
1550
1551 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1552                                   u64 addr, unsigned mask)
1553 {
1554         u16 sid, qdep;
1555         unsigned long flags;
1556         struct device_domain_info *info;
1557
1558         if (!domain->has_iotlb_device)
1559                 return;
1560
1561         spin_lock_irqsave(&device_domain_lock, flags);
1562         list_for_each_entry(info, &domain->devices, link) {
1563                 if (!info->ats_enabled)
1564                         continue;
1565
1566                 sid = info->bus << 8 | info->devfn;
1567                 qdep = info->ats_qdep;
1568                 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1569         }
1570         spin_unlock_irqrestore(&device_domain_lock, flags);
1571 }
1572
1573 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1574                                   struct dmar_domain *domain,
1575                                   unsigned long pfn, unsigned int pages,
1576                                   int ih, int map)
1577 {
1578         unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1579         uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1580         u16 did = domain->iommu_did[iommu->seq_id];
1581
1582         BUG_ON(pages == 0);
1583
1584         if (ih)
1585                 ih = 1 << 6;
1586         /*
1587          * Fallback to domain selective flush if no PSI support or the size is
1588          * too big.
1589          * PSI requires page size to be 2 ^ x, and the base address is naturally
1590          * aligned to the size
1591          */
1592         if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1593                 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1594                                                 DMA_TLB_DSI_FLUSH);
1595         else
1596                 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1597                                                 DMA_TLB_PSI_FLUSH);
1598
1599         /*
1600          * In caching mode, changes of pages from non-present to present require
1601          * flush. However, device IOTLB doesn't need to be flushed in this case.
1602          */
1603         if (!cap_caching_mode(iommu->cap) || !map)
1604                 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1605                                       addr, mask);
1606 }
1607
1608 static void iommu_flush_iova(struct iova_domain *iovad)
1609 {
1610         struct dmar_domain *domain;
1611         int idx;
1612
1613         domain = container_of(iovad, struct dmar_domain, iovad);
1614
1615         for_each_domain_iommu(idx, domain) {
1616                 struct intel_iommu *iommu = g_iommus[idx];
1617                 u16 did = domain->iommu_did[iommu->seq_id];
1618
1619                 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
1620
1621                 if (!cap_caching_mode(iommu->cap))
1622                         iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1623                                               0, MAX_AGAW_PFN_WIDTH);
1624         }
1625 }
1626
1627 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1628 {
1629         u32 pmen;
1630         unsigned long flags;
1631
1632         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1633         pmen = readl(iommu->reg + DMAR_PMEN_REG);
1634         pmen &= ~DMA_PMEN_EPM;
1635         writel(pmen, iommu->reg + DMAR_PMEN_REG);
1636
1637         /* wait for the protected region status bit to clear */
1638         IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1639                 readl, !(pmen & DMA_PMEN_PRS), pmen);
1640
1641         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1642 }
1643
1644 static void iommu_enable_translation(struct intel_iommu *iommu)
1645 {
1646         u32 sts;
1647         unsigned long flags;
1648
1649         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1650         iommu->gcmd |= DMA_GCMD_TE;
1651         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1652
1653         /* Make sure hardware complete it */
1654         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1655                       readl, (sts & DMA_GSTS_TES), sts);
1656
1657         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1658 }
1659
1660 static void iommu_disable_translation(struct intel_iommu *iommu)
1661 {
1662         u32 sts;
1663         unsigned long flag;
1664
1665         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1666         iommu->gcmd &= ~DMA_GCMD_TE;
1667         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1668
1669         /* Make sure hardware complete it */
1670         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1671                       readl, (!(sts & DMA_GSTS_TES)), sts);
1672
1673         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1674 }
1675
1676
1677 static int iommu_init_domains(struct intel_iommu *iommu)
1678 {
1679         u32 ndomains, nlongs;
1680         size_t size;
1681
1682         ndomains = cap_ndoms(iommu->cap);
1683         pr_debug("%s: Number of Domains supported <%d>\n",
1684                  iommu->name, ndomains);
1685         nlongs = BITS_TO_LONGS(ndomains);
1686
1687         spin_lock_init(&iommu->lock);
1688
1689         iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1690         if (!iommu->domain_ids) {
1691                 pr_err("%s: Allocating domain id array failed\n",
1692                        iommu->name);
1693                 return -ENOMEM;
1694         }
1695
1696         size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
1697         iommu->domains = kzalloc(size, GFP_KERNEL);
1698
1699         if (iommu->domains) {
1700                 size = 256 * sizeof(struct dmar_domain *);
1701                 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1702         }
1703
1704         if (!iommu->domains || !iommu->domains[0]) {
1705                 pr_err("%s: Allocating domain array failed\n",
1706                        iommu->name);
1707                 kfree(iommu->domain_ids);
1708                 kfree(iommu->domains);
1709                 iommu->domain_ids = NULL;
1710                 iommu->domains    = NULL;
1711                 return -ENOMEM;
1712         }
1713
1714
1715
1716         /*
1717          * If Caching mode is set, then invalid translations are tagged
1718          * with domain-id 0, hence we need to pre-allocate it. We also
1719          * use domain-id 0 as a marker for non-allocated domain-id, so
1720          * make sure it is not used for a real domain.
1721          */
1722         set_bit(0, iommu->domain_ids);
1723
1724         return 0;
1725 }
1726
1727 static void disable_dmar_iommu(struct intel_iommu *iommu)
1728 {
1729         struct device_domain_info *info, *tmp;
1730         unsigned long flags;
1731
1732         if (!iommu->domains || !iommu->domain_ids)
1733                 return;
1734
1735 again:
1736         spin_lock_irqsave(&device_domain_lock, flags);
1737         list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1738                 struct dmar_domain *domain;
1739
1740                 if (info->iommu != iommu)
1741                         continue;
1742
1743                 if (!info->dev || !info->domain)
1744                         continue;
1745
1746                 domain = info->domain;
1747
1748                 __dmar_remove_one_dev_info(info);
1749
1750                 if (!domain_type_is_vm_or_si(domain)) {
1751                         /*
1752                          * The domain_exit() function  can't be called under
1753                          * device_domain_lock, as it takes this lock itself.
1754                          * So release the lock here and re-run the loop
1755                          * afterwards.
1756                          */
1757                         spin_unlock_irqrestore(&device_domain_lock, flags);
1758                         domain_exit(domain);
1759                         goto again;
1760                 }
1761         }
1762         spin_unlock_irqrestore(&device_domain_lock, flags);
1763
1764         if (iommu->gcmd & DMA_GCMD_TE)
1765                 iommu_disable_translation(iommu);
1766 }
1767
1768 static void free_dmar_iommu(struct intel_iommu *iommu)
1769 {
1770         if ((iommu->domains) && (iommu->domain_ids)) {
1771                 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
1772                 int i;
1773
1774                 for (i = 0; i < elems; i++)
1775                         kfree(iommu->domains[i]);
1776                 kfree(iommu->domains);
1777                 kfree(iommu->domain_ids);
1778                 iommu->domains = NULL;
1779                 iommu->domain_ids = NULL;
1780         }
1781
1782         g_iommus[iommu->seq_id] = NULL;
1783
1784         /* free context mapping */
1785         free_context_table(iommu);
1786
1787 #ifdef CONFIG_INTEL_IOMMU_SVM
1788         if (pasid_enabled(iommu)) {
1789                 if (ecap_prs(iommu->ecap))
1790                         intel_svm_finish_prq(iommu);
1791                 intel_svm_free_pasid_tables(iommu);
1792         }
1793 #endif
1794 }
1795
1796 static struct dmar_domain *alloc_domain(int flags)
1797 {
1798         struct dmar_domain *domain;
1799
1800         domain = alloc_domain_mem();
1801         if (!domain)
1802                 return NULL;
1803
1804         memset(domain, 0, sizeof(*domain));
1805         domain->nid = -1;
1806         domain->flags = flags;
1807         domain->has_iotlb_device = false;
1808         INIT_LIST_HEAD(&domain->devices);
1809
1810         return domain;
1811 }
1812
1813 /* Must be called with iommu->lock */
1814 static int domain_attach_iommu(struct dmar_domain *domain,
1815                                struct intel_iommu *iommu)
1816 {
1817         unsigned long ndomains;
1818         int num;
1819
1820         assert_spin_locked(&device_domain_lock);
1821         assert_spin_locked(&iommu->lock);
1822
1823         domain->iommu_refcnt[iommu->seq_id] += 1;
1824         domain->iommu_count += 1;
1825         if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1826                 ndomains = cap_ndoms(iommu->cap);
1827                 num      = find_first_zero_bit(iommu->domain_ids, ndomains);
1828
1829                 if (num >= ndomains) {
1830                         pr_err("%s: No free domain ids\n", iommu->name);
1831                         domain->iommu_refcnt[iommu->seq_id] -= 1;
1832                         domain->iommu_count -= 1;
1833                         return -ENOSPC;
1834                 }
1835
1836                 set_bit(num, iommu->domain_ids);
1837                 set_iommu_domain(iommu, num, domain);
1838
1839                 domain->iommu_did[iommu->seq_id] = num;
1840                 domain->nid                      = iommu->node;
1841
1842                 domain_update_iommu_cap(domain);
1843         }
1844
1845         return 0;
1846 }
1847
1848 static int domain_detach_iommu(struct dmar_domain *domain,
1849                                struct intel_iommu *iommu)
1850 {
1851         int num, count = INT_MAX;
1852
1853         assert_spin_locked(&device_domain_lock);
1854         assert_spin_locked(&iommu->lock);
1855
1856         domain->iommu_refcnt[iommu->seq_id] -= 1;
1857         count = --domain->iommu_count;
1858         if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1859                 num = domain->iommu_did[iommu->seq_id];
1860                 clear_bit(num, iommu->domain_ids);
1861                 set_iommu_domain(iommu, num, NULL);
1862
1863                 domain_update_iommu_cap(domain);
1864                 domain->iommu_did[iommu->seq_id] = 0;
1865         }
1866
1867         return count;
1868 }
1869
1870 static struct iova_domain reserved_iova_list;
1871 static struct lock_class_key reserved_rbtree_key;
1872
1873 static int dmar_init_reserved_ranges(void)
1874 {
1875         struct pci_dev *pdev = NULL;
1876         struct iova *iova;
1877         int i;
1878
1879         init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN);
1880
1881         lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1882                 &reserved_rbtree_key);
1883
1884         /* IOAPIC ranges shouldn't be accessed by DMA */
1885         iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1886                 IOVA_PFN(IOAPIC_RANGE_END));
1887         if (!iova) {
1888                 pr_err("Reserve IOAPIC range failed\n");
1889                 return -ENODEV;
1890         }
1891
1892         /* Reserve all PCI MMIO to avoid peer-to-peer access */
1893         for_each_pci_dev(pdev) {
1894                 struct resource *r;
1895
1896                 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1897                         r = &pdev->resource[i];
1898                         if (!r->flags || !(r->flags & IORESOURCE_MEM))
1899                                 continue;
1900                         iova = reserve_iova(&reserved_iova_list,
1901                                             IOVA_PFN(r->start),
1902                                             IOVA_PFN(r->end));
1903                         if (!iova) {
1904                                 pr_err("Reserve iova failed\n");
1905                                 return -ENODEV;
1906                         }
1907                 }
1908         }
1909         return 0;
1910 }
1911
1912 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1913 {
1914         copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1915 }
1916
1917 static inline int guestwidth_to_adjustwidth(int gaw)
1918 {
1919         int agaw;
1920         int r = (gaw - 12) % 9;
1921
1922         if (r == 0)
1923                 agaw = gaw;
1924         else
1925                 agaw = gaw + 9 - r;
1926         if (agaw > 64)
1927                 agaw = 64;
1928         return agaw;
1929 }
1930
1931 static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1932                        int guest_width)
1933 {
1934         int adjust_width, agaw;
1935         unsigned long sagaw;
1936         int err;
1937
1938         init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
1939
1940         err = init_iova_flush_queue(&domain->iovad,
1941                                     iommu_flush_iova, iova_entry_free);
1942         if (err)
1943                 return err;
1944
1945         domain_reserve_special_ranges(domain);
1946
1947         /* calculate AGAW */
1948         if (guest_width > cap_mgaw(iommu->cap))
1949                 guest_width = cap_mgaw(iommu->cap);
1950         domain->gaw = guest_width;
1951         adjust_width = guestwidth_to_adjustwidth(guest_width);
1952         agaw = width_to_agaw(adjust_width);
1953         sagaw = cap_sagaw(iommu->cap);
1954         if (!test_bit(agaw, &sagaw)) {
1955                 /* hardware doesn't support it, choose a bigger one */
1956                 pr_debug("Hardware doesn't support agaw %d\n", agaw);
1957                 agaw = find_next_bit(&sagaw, 5, agaw);
1958                 if (agaw >= 5)
1959                         return -ENODEV;
1960         }
1961         domain->agaw = agaw;
1962
1963         if (ecap_coherent(iommu->ecap))
1964                 domain->iommu_coherency = 1;
1965         else
1966                 domain->iommu_coherency = 0;
1967
1968         if (ecap_sc_support(iommu->ecap))
1969                 domain->iommu_snooping = 1;
1970         else
1971                 domain->iommu_snooping = 0;
1972
1973         if (intel_iommu_superpage)
1974                 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1975         else
1976                 domain->iommu_superpage = 0;
1977
1978         domain->nid = iommu->node;
1979
1980         /* always allocate the top pgd */
1981         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1982         if (!domain->pgd)
1983                 return -ENOMEM;
1984         __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1985         return 0;
1986 }
1987
1988 static void domain_exit(struct dmar_domain *domain)
1989 {
1990         struct page *freelist = NULL;
1991
1992         /* Domain 0 is reserved, so dont process it */
1993         if (!domain)
1994                 return;
1995
1996         /* Remove associated devices and clear attached or cached domains */
1997         rcu_read_lock();
1998         domain_remove_dev_info(domain);
1999         rcu_read_unlock();
2000
2001         /* destroy iovas */
2002         put_iova_domain(&domain->iovad);
2003
2004         freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
2005
2006         dma_free_pagelist(freelist);
2007
2008         free_domain_mem(domain);
2009 }
2010
2011 static int domain_context_mapping_one(struct dmar_domain *domain,
2012                                       struct intel_iommu *iommu,
2013                                       u8 bus, u8 devfn)
2014 {
2015         u16 did = domain->iommu_did[iommu->seq_id];
2016         int translation = CONTEXT_TT_MULTI_LEVEL;
2017         struct device_domain_info *info = NULL;
2018         struct context_entry *context;
2019         unsigned long flags;
2020         struct dma_pte *pgd;
2021         int ret, agaw;
2022
2023         WARN_ON(did == 0);
2024
2025         if (hw_pass_through && domain_type_is_si(domain))
2026                 translation = CONTEXT_TT_PASS_THROUGH;
2027
2028         pr_debug("Set context mapping for %02x:%02x.%d\n",
2029                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
2030
2031         BUG_ON(!domain->pgd);
2032
2033         spin_lock_irqsave(&device_domain_lock, flags);
2034         spin_lock(&iommu->lock);
2035
2036         ret = -ENOMEM;
2037         context = iommu_context_addr(iommu, bus, devfn, 1);
2038         if (!context)
2039                 goto out_unlock;
2040
2041         ret = 0;
2042         if (context_present(context))
2043                 goto out_unlock;
2044
2045         /*
2046          * For kdump cases, old valid entries may be cached due to the
2047          * in-flight DMA and copied pgtable, but there is no unmapping
2048          * behaviour for them, thus we need an explicit cache flush for
2049          * the newly-mapped device. For kdump, at this point, the device
2050          * is supposed to finish reset at its driver probe stage, so no
2051          * in-flight DMA will exist, and we don't need to worry anymore
2052          * hereafter.
2053          */
2054         if (context_copied(context)) {
2055                 u16 did_old = context_domain_id(context);
2056
2057                 if (did_old < cap_ndoms(iommu->cap)) {
2058                         iommu->flush.flush_context(iommu, did_old,
2059                                                    (((u16)bus) << 8) | devfn,
2060                                                    DMA_CCMD_MASK_NOBIT,
2061                                                    DMA_CCMD_DEVICE_INVL);
2062                         iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
2063                                                  DMA_TLB_DSI_FLUSH);
2064                 }
2065         }
2066
2067         pgd = domain->pgd;
2068
2069         context_clear_entry(context);
2070         context_set_domain_id(context, did);
2071
2072         /*
2073          * Skip top levels of page tables for iommu which has less agaw
2074          * than default.  Unnecessary for PT mode.
2075          */
2076         if (translation != CONTEXT_TT_PASS_THROUGH) {
2077                 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
2078                         ret = -ENOMEM;
2079                         pgd = phys_to_virt(dma_pte_addr(pgd));
2080                         if (!dma_pte_present(pgd))
2081                                 goto out_unlock;
2082                 }
2083
2084                 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2085                 if (info && info->ats_supported)
2086                         translation = CONTEXT_TT_DEV_IOTLB;
2087                 else
2088                         translation = CONTEXT_TT_MULTI_LEVEL;
2089
2090                 context_set_address_root(context, virt_to_phys(pgd));
2091                 context_set_address_width(context, iommu->agaw);
2092         } else {
2093                 /*
2094                  * In pass through mode, AW must be programmed to
2095                  * indicate the largest AGAW value supported by
2096                  * hardware. And ASR is ignored by hardware.
2097                  */
2098                 context_set_address_width(context, iommu->msagaw);
2099         }
2100
2101         context_set_translation_type(context, translation);
2102         context_set_fault_enable(context);
2103         context_set_present(context);
2104         domain_flush_cache(domain, context, sizeof(*context));
2105
2106         /*
2107          * It's a non-present to present mapping. If hardware doesn't cache
2108          * non-present entry we only need to flush the write-buffer. If the
2109          * _does_ cache non-present entries, then it does so in the special
2110          * domain #0, which we have to flush:
2111          */
2112         if (cap_caching_mode(iommu->cap)) {
2113                 iommu->flush.flush_context(iommu, 0,
2114                                            (((u16)bus) << 8) | devfn,
2115                                            DMA_CCMD_MASK_NOBIT,
2116                                            DMA_CCMD_DEVICE_INVL);
2117                 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
2118         } else {
2119                 iommu_flush_write_buffer(iommu);
2120         }
2121         iommu_enable_dev_iotlb(info);
2122
2123         ret = 0;
2124
2125 out_unlock:
2126         spin_unlock(&iommu->lock);
2127         spin_unlock_irqrestore(&device_domain_lock, flags);
2128
2129         return ret;
2130 }
2131
2132 struct domain_context_mapping_data {
2133         struct dmar_domain *domain;
2134         struct intel_iommu *iommu;
2135 };
2136
2137 static int domain_context_mapping_cb(struct pci_dev *pdev,
2138                                      u16 alias, void *opaque)
2139 {
2140         struct domain_context_mapping_data *data = opaque;
2141
2142         return domain_context_mapping_one(data->domain, data->iommu,
2143                                           PCI_BUS_NUM(alias), alias & 0xff);
2144 }
2145
2146 static int
2147 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2148 {
2149         struct intel_iommu *iommu;
2150         u8 bus, devfn;
2151         struct domain_context_mapping_data data;
2152
2153         iommu = device_to_iommu(dev, &bus, &devfn);
2154         if (!iommu)
2155                 return -ENODEV;
2156
2157         if (!dev_is_pci(dev))
2158                 return domain_context_mapping_one(domain, iommu, bus, devfn);
2159
2160         data.domain = domain;
2161         data.iommu = iommu;
2162
2163         return pci_for_each_dma_alias(to_pci_dev(dev),
2164                                       &domain_context_mapping_cb, &data);
2165 }
2166
2167 static int domain_context_mapped_cb(struct pci_dev *pdev,
2168                                     u16 alias, void *opaque)
2169 {
2170         struct intel_iommu *iommu = opaque;
2171
2172         return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2173 }
2174
2175 static int domain_context_mapped(struct device *dev)
2176 {
2177         struct intel_iommu *iommu;
2178         u8 bus, devfn;
2179
2180         iommu = device_to_iommu(dev, &bus, &devfn);
2181         if (!iommu)
2182                 return -ENODEV;
2183
2184         if (!dev_is_pci(dev))
2185                 return device_context_mapped(iommu, bus, devfn);
2186
2187         return !pci_for_each_dma_alias(to_pci_dev(dev),
2188                                        domain_context_mapped_cb, iommu);
2189 }
2190
2191 /* Returns a number of VTD pages, but aligned to MM page size */
2192 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2193                                             size_t size)
2194 {
2195         host_addr &= ~PAGE_MASK;
2196         return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2197 }
2198
2199 /* Return largest possible superpage level for a given mapping */
2200 static inline int hardware_largepage_caps(struct dmar_domain *domain,
2201                                           unsigned long iov_pfn,
2202                                           unsigned long phy_pfn,
2203                                           unsigned long pages)
2204 {
2205         int support, level = 1;
2206         unsigned long pfnmerge;
2207
2208         support = domain->iommu_superpage;
2209
2210         /* To use a large page, the virtual *and* physical addresses
2211            must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2212            of them will mean we have to use smaller pages. So just
2213            merge them and check both at once. */
2214         pfnmerge = iov_pfn | phy_pfn;
2215
2216         while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2217                 pages >>= VTD_STRIDE_SHIFT;
2218                 if (!pages)
2219                         break;
2220                 pfnmerge >>= VTD_STRIDE_SHIFT;
2221                 level++;
2222                 support--;
2223         }
2224         return level;
2225 }
2226
2227 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2228                             struct scatterlist *sg, unsigned long phys_pfn,
2229                             unsigned long nr_pages, int prot)
2230 {
2231         struct dma_pte *first_pte = NULL, *pte = NULL;
2232         phys_addr_t uninitialized_var(pteval);
2233         unsigned long sg_res = 0;
2234         unsigned int largepage_lvl = 0;
2235         unsigned long lvl_pages = 0;
2236
2237         BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2238
2239         if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2240                 return -EINVAL;
2241
2242         prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2243
2244         if (!sg) {
2245                 sg_res = nr_pages;
2246                 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2247         }
2248
2249         while (nr_pages > 0) {
2250                 uint64_t tmp;
2251
2252                 if (!sg_res) {
2253                         unsigned int pgoff = sg->offset & ~PAGE_MASK;
2254
2255                         sg_res = aligned_nrpages(sg->offset, sg->length);
2256                         sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
2257                         sg->dma_length = sg->length;
2258                         pteval = (sg_phys(sg) - pgoff) | prot;
2259                         phys_pfn = pteval >> VTD_PAGE_SHIFT;
2260                 }
2261
2262                 if (!pte) {
2263                         largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2264
2265                         first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2266                         if (!pte)
2267                                 return -ENOMEM;
2268                         /* It is large page*/
2269                         if (largepage_lvl > 1) {
2270                                 unsigned long nr_superpages, end_pfn;
2271
2272                                 pteval |= DMA_PTE_LARGE_PAGE;
2273                                 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2274
2275                                 nr_superpages = sg_res / lvl_pages;
2276                                 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2277
2278                                 /*
2279                                  * Ensure that old small page tables are
2280                                  * removed to make room for superpage(s).
2281                                  * We're adding new large pages, so make sure
2282                                  * we don't remove their parent tables.
2283                                  */
2284                                 dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
2285                                                        largepage_lvl + 1);
2286                         } else {
2287                                 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2288                         }
2289
2290                 }
2291                 /* We don't need lock here, nobody else
2292                  * touches the iova range
2293                  */
2294                 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2295                 if (tmp) {
2296                         static int dumps = 5;
2297                         pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2298                                 iov_pfn, tmp, (unsigned long long)pteval);
2299                         if (dumps) {
2300                                 dumps--;
2301                                 debug_dma_dump_mappings(NULL);
2302                         }
2303                         WARN_ON(1);
2304                 }
2305
2306                 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2307
2308                 BUG_ON(nr_pages < lvl_pages);
2309                 BUG_ON(sg_res < lvl_pages);
2310
2311                 nr_pages -= lvl_pages;
2312                 iov_pfn += lvl_pages;
2313                 phys_pfn += lvl_pages;
2314                 pteval += lvl_pages * VTD_PAGE_SIZE;
2315                 sg_res -= lvl_pages;
2316
2317                 /* If the next PTE would be the first in a new page, then we
2318                    need to flush the cache on the entries we've just written.
2319                    And then we'll need to recalculate 'pte', so clear it and
2320                    let it get set again in the if (!pte) block above.
2321
2322                    If we're done (!nr_pages) we need to flush the cache too.
2323
2324                    Also if we've been setting superpages, we may need to
2325                    recalculate 'pte' and switch back to smaller pages for the
2326                    end of the mapping, if the trailing size is not enough to
2327                    use another superpage (i.e. sg_res < lvl_pages). */
2328                 pte++;
2329                 if (!nr_pages || first_pte_in_page(pte) ||
2330                     (largepage_lvl > 1 && sg_res < lvl_pages)) {
2331                         domain_flush_cache(domain, first_pte,
2332                                            (void *)pte - (void *)first_pte);
2333                         pte = NULL;
2334                 }
2335
2336                 if (!sg_res && nr_pages)
2337                         sg = sg_next(sg);
2338         }
2339         return 0;
2340 }
2341
2342 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2343                                     struct scatterlist *sg, unsigned long nr_pages,
2344                                     int prot)
2345 {
2346         return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2347 }
2348
2349 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2350                                      unsigned long phys_pfn, unsigned long nr_pages,
2351                                      int prot)
2352 {
2353         return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2354 }
2355
2356 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
2357 {
2358         unsigned long flags;
2359         struct context_entry *context;
2360         u16 did_old;
2361
2362         if (!iommu)
2363                 return;
2364
2365         spin_lock_irqsave(&iommu->lock, flags);
2366         context = iommu_context_addr(iommu, bus, devfn, 0);
2367         if (!context) {
2368                 spin_unlock_irqrestore(&iommu->lock, flags);
2369                 return;
2370         }
2371         did_old = context_domain_id(context);
2372         context_clear_entry(context);
2373         __iommu_flush_cache(iommu, context, sizeof(*context));
2374         spin_unlock_irqrestore(&iommu->lock, flags);
2375         iommu->flush.flush_context(iommu,
2376                                    did_old,
2377                                    (((u16)bus) << 8) | devfn,
2378                                    DMA_CCMD_MASK_NOBIT,
2379                                    DMA_CCMD_DEVICE_INVL);
2380         iommu->flush.flush_iotlb(iommu,
2381                                  did_old,
2382                                  0,
2383                                  0,
2384                                  DMA_TLB_DSI_FLUSH);
2385 }
2386
2387 static inline void unlink_domain_info(struct device_domain_info *info)
2388 {
2389         assert_spin_locked(&device_domain_lock);
2390         list_del(&info->link);
2391         list_del(&info->global);
2392         if (info->dev)
2393                 info->dev->archdata.iommu = NULL;
2394 }
2395
2396 static void domain_remove_dev_info(struct dmar_domain *domain)
2397 {
2398         struct device_domain_info *info, *tmp;
2399         unsigned long flags;
2400
2401         spin_lock_irqsave(&device_domain_lock, flags);
2402         list_for_each_entry_safe(info, tmp, &domain->devices, link)
2403                 __dmar_remove_one_dev_info(info);
2404         spin_unlock_irqrestore(&device_domain_lock, flags);
2405 }
2406
2407 /*
2408  * find_domain
2409  * Note: we use struct device->archdata.iommu stores the info
2410  */
2411 static struct dmar_domain *find_domain(struct device *dev)
2412 {
2413         struct device_domain_info *info;
2414
2415         /* No lock here, assumes no domain exit in normal case */
2416         info = dev->archdata.iommu;
2417         if (likely(info))
2418                 return info->domain;
2419         return NULL;
2420 }
2421
2422 static inline struct device_domain_info *
2423 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2424 {
2425         struct device_domain_info *info;
2426
2427         list_for_each_entry(info, &device_domain_list, global)
2428                 if (info->iommu->segment == segment && info->bus == bus &&
2429                     info->devfn == devfn)
2430                         return info;
2431
2432         return NULL;
2433 }
2434
2435 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2436                                                     int bus, int devfn,
2437                                                     struct device *dev,
2438                                                     struct dmar_domain *domain)
2439 {
2440         struct dmar_domain *found = NULL;
2441         struct device_domain_info *info;
2442         unsigned long flags;
2443         int ret;
2444
2445         info = alloc_devinfo_mem();
2446         if (!info)
2447                 return NULL;
2448
2449         info->bus = bus;
2450         info->devfn = devfn;
2451         info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2452         info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2453         info->ats_qdep = 0;
2454         info->dev = dev;
2455         info->domain = domain;
2456         info->iommu = iommu;
2457
2458         if (dev && dev_is_pci(dev)) {
2459                 struct pci_dev *pdev = to_pci_dev(info->dev);
2460
2461                 if (ecap_dev_iotlb_support(iommu->ecap) &&
2462                     pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
2463                     dmar_find_matched_atsr_unit(pdev))
2464                         info->ats_supported = 1;
2465
2466                 if (ecs_enabled(iommu)) {
2467                         if (pasid_enabled(iommu)) {
2468                                 int features = pci_pasid_features(pdev);
2469                                 if (features >= 0)
2470                                         info->pasid_supported = features | 1;
2471                         }
2472
2473                         if (info->ats_supported && ecap_prs(iommu->ecap) &&
2474                             pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
2475                                 info->pri_supported = 1;
2476                 }
2477         }
2478
2479         spin_lock_irqsave(&device_domain_lock, flags);
2480         if (dev)
2481                 found = find_domain(dev);
2482
2483         if (!found) {
2484                 struct device_domain_info *info2;
2485                 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2486                 if (info2) {
2487                         found      = info2->domain;
2488                         info2->dev = dev;
2489                 }
2490         }
2491
2492         if (found) {
2493                 spin_unlock_irqrestore(&device_domain_lock, flags);
2494                 free_devinfo_mem(info);
2495                 /* Caller must free the original domain */
2496                 return found;
2497         }
2498
2499         spin_lock(&iommu->lock);
2500         ret = domain_attach_iommu(domain, iommu);
2501         spin_unlock(&iommu->lock);
2502
2503         if (ret) {
2504                 spin_unlock_irqrestore(&device_domain_lock, flags);
2505                 free_devinfo_mem(info);
2506                 return NULL;
2507         }
2508
2509         list_add(&info->link, &domain->devices);
2510         list_add(&info->global, &device_domain_list);
2511         if (dev)
2512                 dev->archdata.iommu = info;
2513         spin_unlock_irqrestore(&device_domain_lock, flags);
2514
2515         if (dev && domain_context_mapping(domain, dev)) {
2516                 pr_err("Domain context map for %s failed\n", dev_name(dev));
2517                 dmar_remove_one_dev_info(domain, dev);
2518                 return NULL;
2519         }
2520
2521         return domain;
2522 }
2523
2524 static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2525 {
2526         *(u16 *)opaque = alias;
2527         return 0;
2528 }
2529
2530 static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
2531 {
2532         struct device_domain_info *info = NULL;
2533         struct dmar_domain *domain = NULL;
2534         struct intel_iommu *iommu;
2535         u16 req_id, dma_alias;
2536         unsigned long flags;
2537         u8 bus, devfn;
2538
2539         iommu = device_to_iommu(dev, &bus, &devfn);
2540         if (!iommu)
2541                 return NULL;
2542
2543         req_id = ((u16)bus << 8) | devfn;
2544
2545         if (dev_is_pci(dev)) {
2546                 struct pci_dev *pdev = to_pci_dev(dev);
2547
2548                 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2549
2550                 spin_lock_irqsave(&device_domain_lock, flags);
2551                 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2552                                                       PCI_BUS_NUM(dma_alias),
2553                                                       dma_alias & 0xff);
2554                 if (info) {
2555                         iommu = info->iommu;
2556                         domain = info->domain;
2557                 }
2558                 spin_unlock_irqrestore(&device_domain_lock, flags);
2559
2560                 /* DMA alias already has a domain, use it */
2561                 if (info)
2562                         goto out;
2563         }
2564
2565         /* Allocate and initialize new domain for the device */
2566         domain = alloc_domain(0);
2567         if (!domain)
2568                 return NULL;
2569         if (domain_init(domain, iommu, gaw)) {
2570                 domain_exit(domain);
2571                 return NULL;
2572         }
2573
2574 out:
2575
2576         return domain;
2577 }
2578
2579 static struct dmar_domain *set_domain_for_dev(struct device *dev,
2580                                               struct dmar_domain *domain)
2581 {
2582         struct intel_iommu *iommu;
2583         struct dmar_domain *tmp;
2584         u16 req_id, dma_alias;
2585         u8 bus, devfn;
2586
2587         iommu = device_to_iommu(dev, &bus, &devfn);
2588         if (!iommu)
2589                 return NULL;
2590
2591         req_id = ((u16)bus << 8) | devfn;
2592
2593         if (dev_is_pci(dev)) {
2594                 struct pci_dev *pdev = to_pci_dev(dev);
2595
2596                 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2597
2598                 /* register PCI DMA alias device */
2599                 if (req_id != dma_alias) {
2600                         tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2601                                         dma_alias & 0xff, NULL, domain);
2602
2603                         if (!tmp || tmp != domain)
2604                                 return tmp;
2605                 }
2606         }
2607
2608         tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2609         if (!tmp || tmp != domain)
2610                 return tmp;
2611
2612         return domain;
2613 }
2614
2615 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2616 {
2617         struct dmar_domain *domain, *tmp;
2618
2619         domain = find_domain(dev);
2620         if (domain)
2621                 goto out;
2622
2623         domain = find_or_alloc_domain(dev, gaw);
2624         if (!domain)
2625                 goto out;
2626
2627         tmp = set_domain_for_dev(dev, domain);
2628         if (!tmp || domain != tmp) {
2629                 domain_exit(domain);
2630                 domain = tmp;
2631         }
2632
2633 out:
2634
2635         return domain;
2636 }
2637
2638 static int iommu_domain_identity_map(struct dmar_domain *domain,
2639                                      unsigned long long start,
2640                                      unsigned long long end)
2641 {
2642         unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2643         unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2644
2645         if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2646                           dma_to_mm_pfn(last_vpfn))) {
2647                 pr_err("Reserving iova failed\n");
2648                 return -ENOMEM;
2649         }
2650
2651         pr_debug("Mapping reserved region %llx-%llx\n", start, end);
2652         /*
2653          * RMRR range might have overlap with physical memory range,
2654          * clear it first
2655          */
2656         dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2657
2658         return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2659                                   last_vpfn - first_vpfn + 1,
2660                                   DMA_PTE_READ|DMA_PTE_WRITE);
2661 }
2662
2663 static int domain_prepare_identity_map(struct device *dev,
2664                                        struct dmar_domain *domain,
2665                                        unsigned long long start,
2666                                        unsigned long long end)
2667 {
2668         /* For _hardware_ passthrough, don't bother. But for software
2669            passthrough, we do it anyway -- it may indicate a memory
2670            range which is reserved in E820, so which didn't get set
2671            up to start with in si_domain */
2672         if (domain == si_domain && hw_pass_through) {
2673                 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2674                         dev_name(dev), start, end);
2675                 return 0;
2676         }
2677
2678         pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2679                 dev_name(dev), start, end);
2680
2681         if (end < start) {
2682                 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2683                         "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2684                         dmi_get_system_info(DMI_BIOS_VENDOR),
2685                         dmi_get_system_info(DMI_BIOS_VERSION),
2686                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2687                 return -EIO;
2688         }
2689
2690         if (end >> agaw_to_width(domain->agaw)) {
2691                 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2692                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2693                      agaw_to_width(domain->agaw),
2694                      dmi_get_system_info(DMI_BIOS_VENDOR),
2695                      dmi_get_system_info(DMI_BIOS_VERSION),
2696                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2697                 return -EIO;
2698         }
2699
2700         return iommu_domain_identity_map(domain, start, end);
2701 }
2702
2703 static int iommu_prepare_identity_map(struct device *dev,
2704                                       unsigned long long start,
2705                                       unsigned long long end)
2706 {
2707         struct dmar_domain *domain;
2708         int ret;
2709
2710         domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2711         if (!domain)
2712                 return -ENOMEM;
2713
2714         ret = domain_prepare_identity_map(dev, domain, start, end);
2715         if (ret)
2716                 domain_exit(domain);
2717
2718         return ret;
2719 }
2720
2721 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2722                                          struct device *dev)
2723 {
2724         if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2725                 return 0;
2726         return iommu_prepare_identity_map(dev, rmrr->base_address,
2727                                           rmrr->end_address);
2728 }
2729
2730 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2731 static inline void iommu_prepare_isa(void)
2732 {
2733         struct pci_dev *pdev;
2734         int ret;
2735
2736         pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2737         if (!pdev)
2738                 return;
2739
2740         pr_info("Prepare 0-16MiB unity mapping for LPC\n");
2741         ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2742
2743         if (ret)
2744                 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
2745
2746         pci_dev_put(pdev);
2747 }
2748 #else
2749 static inline void iommu_prepare_isa(void)
2750 {
2751         return;
2752 }
2753 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2754
2755 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2756
2757 static int __init si_domain_init(int hw)
2758 {
2759         int nid, ret = 0;
2760
2761         si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2762         if (!si_domain)
2763                 return -EFAULT;
2764
2765         if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2766                 domain_exit(si_domain);
2767                 return -EFAULT;
2768         }
2769
2770         pr_debug("Identity mapping domain allocated\n");
2771
2772         if (hw)
2773                 return 0;
2774
2775         for_each_online_node(nid) {
2776                 unsigned long start_pfn, end_pfn;
2777                 int i;
2778
2779                 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2780                         ret = iommu_domain_identity_map(si_domain,
2781                                         PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2782                         if (ret)
2783                                 return ret;
2784                 }
2785         }
2786
2787         return 0;
2788 }
2789
2790 static int identity_mapping(struct device *dev)
2791 {
2792         struct device_domain_info *info;
2793
2794         if (likely(!iommu_identity_mapping))
2795                 return 0;
2796
2797         info = dev->archdata.iommu;
2798         if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2799                 return (info->domain == si_domain);
2800
2801         return 0;
2802 }
2803
2804 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2805 {
2806         struct dmar_domain *ndomain;
2807         struct intel_iommu *iommu;
2808         u8 bus, devfn;
2809
2810         iommu = device_to_iommu(dev, &bus, &devfn);
2811         if (!iommu)
2812                 return -ENODEV;
2813
2814         ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2815         if (ndomain != domain)
2816                 return -EBUSY;
2817
2818         return 0;
2819 }
2820
2821 static bool device_has_rmrr(struct device *dev)
2822 {
2823         struct dmar_rmrr_unit *rmrr;
2824         struct device *tmp;
2825         int i;
2826
2827         rcu_read_lock();
2828         for_each_rmrr_units(rmrr) {
2829                 /*
2830                  * Return TRUE if this RMRR contains the device that
2831                  * is passed in.
2832                  */
2833                 for_each_active_dev_scope(rmrr->devices,
2834                                           rmrr->devices_cnt, i, tmp)
2835                         if (tmp == dev) {
2836                                 rcu_read_unlock();
2837                                 return true;
2838                         }
2839         }
2840         rcu_read_unlock();
2841         return false;
2842 }
2843
2844 /*
2845  * There are a couple cases where we need to restrict the functionality of
2846  * devices associated with RMRRs.  The first is when evaluating a device for
2847  * identity mapping because problems exist when devices are moved in and out
2848  * of domains and their respective RMRR information is lost.  This means that
2849  * a device with associated RMRRs will never be in a "passthrough" domain.
2850  * The second is use of the device through the IOMMU API.  This interface
2851  * expects to have full control of the IOVA space for the device.  We cannot
2852  * satisfy both the requirement that RMRR access is maintained and have an
2853  * unencumbered IOVA space.  We also have no ability to quiesce the device's
2854  * use of the RMRR space or even inform the IOMMU API user of the restriction.
2855  * We therefore prevent devices associated with an RMRR from participating in
2856  * the IOMMU API, which eliminates them from device assignment.
2857  *
2858  * In both cases we assume that PCI USB devices with RMRRs have them largely
2859  * for historical reasons and that the RMRR space is not actively used post
2860  * boot.  This exclusion may change if vendors begin to abuse it.
2861  *
2862  * The same exception is made for graphics devices, with the requirement that
2863  * any use of the RMRR regions will be torn down before assigning the device
2864  * to a guest.
2865  */
2866 static bool device_is_rmrr_locked(struct device *dev)
2867 {
2868         if (!device_has_rmrr(dev))
2869                 return false;
2870
2871         if (dev_is_pci(dev)) {
2872                 struct pci_dev *pdev = to_pci_dev(dev);
2873
2874                 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2875                         return false;
2876         }
2877
2878         return true;
2879 }
2880
2881 static int iommu_should_identity_map(struct device *dev, int startup)
2882 {
2883
2884         if (dev_is_pci(dev)) {
2885                 struct pci_dev *pdev = to_pci_dev(dev);
2886
2887                 if (device_is_rmrr_locked(dev))
2888                         return 0;
2889
2890                 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2891                         return 1;
2892
2893                 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2894                         return 1;
2895
2896                 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2897                         return 0;
2898
2899                 /*
2900                  * We want to start off with all devices in the 1:1 domain, and
2901                  * take them out later if we find they can't access all of memory.
2902                  *
2903                  * However, we can't do this for PCI devices behind bridges,
2904                  * because all PCI devices behind the same bridge will end up
2905                  * with the same source-id on their transactions.
2906                  *
2907                  * Practically speaking, we can't change things around for these
2908                  * devices at run-time, because we can't be sure there'll be no
2909                  * DMA transactions in flight for any of their siblings.
2910                  *
2911                  * So PCI devices (unless they're on the root bus) as well as
2912                  * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2913                  * the 1:1 domain, just in _case_ one of their siblings turns out
2914                  * not to be able to map all of memory.
2915                  */
2916                 if (!pci_is_pcie(pdev)) {
2917                         if (!pci_is_root_bus(pdev->bus))
2918                                 return 0;
2919                         if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2920                                 return 0;
2921                 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2922                         return 0;
2923         } else {
2924                 if (device_has_rmrr(dev))
2925                         return 0;
2926         }
2927
2928         /*
2929          * At boot time, we don't yet know if devices will be 64-bit capable.
2930          * Assume that they will — if they turn out not to be, then we can
2931          * take them out of the 1:1 domain later.
2932          */
2933         if (!startup) {
2934                 /*
2935                  * If the device's dma_mask is less than the system's memory
2936                  * size then this is not a candidate for identity mapping.
2937                  */
2938                 u64 dma_mask = *dev->dma_mask;
2939
2940                 if (dev->coherent_dma_mask &&
2941                     dev->coherent_dma_mask < dma_mask)
2942                         dma_mask = dev->coherent_dma_mask;
2943
2944                 return dma_mask >= dma_get_required_mask(dev);
2945         }
2946
2947         return 1;
2948 }
2949
2950 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2951 {
2952         int ret;
2953
2954         if (!iommu_should_identity_map(dev, 1))
2955                 return 0;
2956
2957         ret = domain_add_dev_info(si_domain, dev);
2958         if (!ret)
2959                 pr_info("%s identity mapping for device %s\n",
2960                         hw ? "Hardware" : "Software", dev_name(dev));
2961         else if (ret == -ENODEV)
2962                 /* device not associated with an iommu */
2963                 ret = 0;
2964
2965         return ret;
2966 }
2967
2968
2969 static int __init iommu_prepare_static_identity_mapping(int hw)
2970 {
2971         struct pci_dev *pdev = NULL;
2972         struct dmar_drhd_unit *drhd;
2973         struct intel_iommu *iommu;
2974         struct device *dev;
2975         int i;
2976         int ret = 0;
2977
2978         for_each_pci_dev(pdev) {
2979                 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2980                 if (ret)
2981                         return ret;
2982         }
2983
2984         for_each_active_iommu(iommu, drhd)
2985                 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2986                         struct acpi_device_physical_node *pn;
2987                         struct acpi_device *adev;
2988
2989                         if (dev->bus != &acpi_bus_type)
2990                                 continue;
2991
2992                         adev= to_acpi_device(dev);
2993                         mutex_lock(&adev->physical_node_lock);
2994                         list_for_each_entry(pn, &adev->physical_node_list, node) {
2995                                 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2996                                 if (ret)
2997                                         break;
2998                         }
2999                         mutex_unlock(&adev->physical_node_lock);
3000                         if (ret)
3001                                 return ret;
3002                 }
3003
3004         return 0;
3005 }
3006
3007 static void intel_iommu_init_qi(struct intel_iommu *iommu)
3008 {
3009         /*
3010          * Start from the sane iommu hardware state.
3011          * If the queued invalidation is already initialized by us
3012          * (for example, while enabling interrupt-remapping) then
3013          * we got the things already rolling from a sane state.
3014          */
3015         if (!iommu->qi) {
3016                 /*
3017                  * Clear any previous faults.
3018                  */
3019                 dmar_fault(-1, iommu);
3020                 /*
3021                  * Disable queued invalidation if supported and already enabled
3022                  * before OS handover.
3023                  */
3024                 dmar_disable_qi(iommu);
3025         }
3026
3027         if (dmar_enable_qi(iommu)) {
3028                 /*
3029                  * Queued Invalidate not enabled, use Register Based Invalidate
3030                  */
3031                 iommu->flush.flush_context = __iommu_flush_context;
3032                 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
3033                 pr_info("%s: Using Register based invalidation\n",
3034                         iommu->name);
3035         } else {
3036                 iommu->flush.flush_context = qi_flush_context;
3037                 iommu->flush.flush_iotlb = qi_flush_iotlb;
3038                 pr_info("%s: Using Queued invalidation\n", iommu->name);
3039         }
3040 }
3041
3042 static int copy_context_table(struct intel_iommu *iommu,
3043                               struct root_entry *old_re,
3044                               struct context_entry **tbl,
3045                               int bus, bool ext)
3046 {
3047         int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
3048         struct context_entry *new_ce = NULL, ce;
3049         struct context_entry *old_ce = NULL;
3050         struct root_entry re;
3051         phys_addr_t old_ce_phys;
3052
3053         tbl_idx = ext ? bus * 2 : bus;
3054         memcpy(&re, old_re, sizeof(re));
3055
3056         for (devfn = 0; devfn < 256; devfn++) {
3057                 /* First calculate the correct index */
3058                 idx = (ext ? devfn * 2 : devfn) % 256;
3059
3060                 if (idx == 0) {
3061                         /* First save what we may have and clean up */
3062                         if (new_ce) {
3063                                 tbl[tbl_idx] = new_ce;
3064                                 __iommu_flush_cache(iommu, new_ce,
3065                                                     VTD_PAGE_SIZE);
3066                                 pos = 1;
3067                         }
3068
3069                         if (old_ce)
3070                                 iounmap(old_ce);
3071
3072                         ret = 0;
3073                         if (devfn < 0x80)
3074                                 old_ce_phys = root_entry_lctp(&re);
3075                         else
3076                                 old_ce_phys = root_entry_uctp(&re);
3077
3078                         if (!old_ce_phys) {
3079                                 if (ext && devfn == 0) {
3080                                         /* No LCTP, try UCTP */
3081                                         devfn = 0x7f;
3082                                         continue;
3083                                 } else {
3084                                         goto out;
3085                                 }
3086                         }
3087
3088                         ret = -ENOMEM;
3089                         old_ce = memremap(old_ce_phys, PAGE_SIZE,
3090                                         MEMREMAP_WB);
3091                         if (!old_ce)
3092                                 goto out;
3093
3094                         new_ce = alloc_pgtable_page(iommu->node);
3095                         if (!new_ce)
3096                                 goto out_unmap;
3097
3098                         ret = 0;
3099                 }
3100
3101                 /* Now copy the context entry */
3102                 memcpy(&ce, old_ce + idx, sizeof(ce));
3103
3104                 if (!__context_present(&ce))
3105                         continue;
3106
3107                 did = context_domain_id(&ce);
3108                 if (did >= 0 && did < cap_ndoms(iommu->cap))
3109                         set_bit(did, iommu->domain_ids);
3110
3111                 /*
3112                  * We need a marker for copied context entries. This
3113                  * marker needs to work for the old format as well as
3114                  * for extended context entries.
3115                  *
3116                  * Bit 67 of the context entry is used. In the old
3117                  * format this bit is available to software, in the
3118                  * extended format it is the PGE bit, but PGE is ignored
3119                  * by HW if PASIDs are disabled (and thus still
3120                  * available).
3121                  *
3122                  * So disable PASIDs first and then mark the entry
3123                  * copied. This means that we don't copy PASID
3124                  * translations from the old kernel, but this is fine as
3125                  * faults there are not fatal.
3126                  */
3127                 context_clear_pasid_enable(&ce);
3128                 context_set_copied(&ce);
3129
3130                 new_ce[idx] = ce;
3131         }
3132
3133         tbl[tbl_idx + pos] = new_ce;
3134
3135         __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3136
3137 out_unmap:
3138         memunmap(old_ce);
3139
3140 out:
3141         return ret;
3142 }
3143
3144 static int copy_translation_tables(struct intel_iommu *iommu)
3145 {
3146         struct context_entry **ctxt_tbls;
3147         struct root_entry *old_rt;
3148         phys_addr_t old_rt_phys;
3149         int ctxt_table_entries;
3150         unsigned long flags;
3151         u64 rtaddr_reg;
3152         int bus, ret;
3153         bool new_ext, ext;
3154
3155         rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3156         ext        = !!(rtaddr_reg & DMA_RTADDR_RTT);
3157         new_ext    = !!ecap_ecs(iommu->ecap);
3158
3159         /*
3160          * The RTT bit can only be changed when translation is disabled,
3161          * but disabling translation means to open a window for data
3162          * corruption. So bail out and don't copy anything if we would
3163          * have to change the bit.
3164          */
3165         if (new_ext != ext)
3166                 return -EINVAL;
3167
3168         old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3169         if (!old_rt_phys)
3170                 return -EINVAL;
3171
3172         old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
3173         if (!old_rt)
3174                 return -ENOMEM;
3175
3176         /* This is too big for the stack - allocate it from slab */
3177         ctxt_table_entries = ext ? 512 : 256;
3178         ret = -ENOMEM;
3179         ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
3180         if (!ctxt_tbls)
3181                 goto out_unmap;
3182
3183         for (bus = 0; bus < 256; bus++) {
3184                 ret = copy_context_table(iommu, &old_rt[bus],
3185                                          ctxt_tbls, bus, ext);
3186                 if (ret) {
3187                         pr_err("%s: Failed to copy context table for bus %d\n",
3188                                 iommu->name, bus);
3189                         continue;
3190                 }
3191         }
3192
3193         spin_lock_irqsave(&iommu->lock, flags);
3194
3195         /* Context tables are copied, now write them to the root_entry table */
3196         for (bus = 0; bus < 256; bus++) {
3197                 int idx = ext ? bus * 2 : bus;
3198                 u64 val;
3199
3200                 if (ctxt_tbls[idx]) {
3201                         val = virt_to_phys(ctxt_tbls[idx]) | 1;
3202                         iommu->root_entry[bus].lo = val;
3203                 }
3204
3205                 if (!ext || !ctxt_tbls[idx + 1])
3206                         continue;
3207
3208                 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3209                 iommu->root_entry[bus].hi = val;
3210         }
3211
3212         spin_unlock_irqrestore(&iommu->lock, flags);
3213
3214         kfree(ctxt_tbls);
3215
3216         __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3217
3218         ret = 0;
3219
3220 out_unmap:
3221         memunmap(old_rt);
3222
3223         return ret;
3224 }
3225
3226 static int __init init_dmars(void)
3227 {
3228         struct dmar_drhd_unit *drhd;
3229         struct dmar_rmrr_unit *rmrr;
3230         bool copied_tables = false;
3231         struct device *dev;
3232         struct intel_iommu *iommu;
3233         int i, ret;
3234
3235         /*
3236          * for each drhd
3237          *    allocate root
3238          *    initialize and program root entry to not present
3239          * endfor
3240          */
3241         for_each_drhd_unit(drhd) {
3242                 /*
3243                  * lock not needed as this is only incremented in the single
3244                  * threaded kernel __init code path all other access are read
3245                  * only
3246                  */
3247                 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3248                         g_num_of_iommus++;
3249                         continue;
3250                 }
3251                 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
3252         }
3253
3254         /* Preallocate enough resources for IOMMU hot-addition */
3255         if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3256                 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3257
3258         g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3259                         GFP_KERNEL);
3260         if (!g_iommus) {
3261                 pr_err("Allocating global iommu array failed\n");
3262                 ret = -ENOMEM;
3263                 goto error;
3264         }
3265
3266         for_each_active_iommu(iommu, drhd) {
3267                 g_iommus[iommu->seq_id] = iommu;
3268
3269                 intel_iommu_init_qi(iommu);
3270
3271                 ret = iommu_init_domains(iommu);
3272                 if (ret)
3273                         goto free_iommu;
3274
3275                 init_translation_status(iommu);
3276
3277                 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3278                         iommu_disable_translation(iommu);
3279                         clear_translation_pre_enabled(iommu);
3280                         pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3281                                 iommu->name);
3282                 }
3283
3284                 /*
3285                  * TBD:
3286                  * we could share the same root & context tables
3287                  * among all IOMMU's. Need to Split it later.
3288                  */
3289                 ret = iommu_alloc_root_entry(iommu);
3290                 if (ret)
3291                         goto free_iommu;
3292
3293                 if (translation_pre_enabled(iommu)) {
3294                         pr_info("Translation already enabled - trying to copy translation structures\n");
3295
3296                         ret = copy_translation_tables(iommu);
3297                         if (ret) {
3298                                 /*
3299                                  * We found the IOMMU with translation
3300                                  * enabled - but failed to copy over the
3301                                  * old root-entry table. Try to proceed
3302                                  * by disabling translation now and
3303                                  * allocating a clean root-entry table.
3304                                  * This might cause DMAR faults, but
3305                                  * probably the dump will still succeed.
3306                                  */
3307                                 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3308                                        iommu->name);
3309                                 iommu_disable_translation(iommu);
3310                                 clear_translation_pre_enabled(iommu);
3311                         } else {
3312                                 pr_info("Copied translation tables from previous kernel for %s\n",
3313                                         iommu->name);
3314                                 copied_tables = true;
3315                         }
3316                 }
3317
3318                 if (!ecap_pass_through(iommu->ecap))
3319                         hw_pass_through = 0;
3320 #ifdef CONFIG_INTEL_IOMMU_SVM
3321                 if (pasid_enabled(iommu))
3322                         intel_svm_alloc_pasid_tables(iommu);
3323 #endif
3324         }
3325
3326         /*
3327          * Now that qi is enabled on all iommus, set the root entry and flush
3328          * caches. This is required on some Intel X58 chipsets, otherwise the
3329          * flush_context function will loop forever and the boot hangs.
3330          */
3331         for_each_active_iommu(iommu, drhd) {
3332                 iommu_flush_write_buffer(iommu);
3333                 iommu_set_root_entry(iommu);
3334                 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3335                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3336         }
3337
3338         if (iommu_pass_through)
3339                 iommu_identity_mapping |= IDENTMAP_ALL;
3340
3341 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3342         iommu_identity_mapping |= IDENTMAP_GFX;
3343 #endif
3344
3345         check_tylersburg_isoch();
3346
3347         if (iommu_identity_mapping) {
3348                 ret = si_domain_init(hw_pass_through);
3349                 if (ret)
3350                         goto free_iommu;
3351         }
3352
3353
3354         /*
3355          * If we copied translations from a previous kernel in the kdump
3356          * case, we can not assign the devices to domains now, as that
3357          * would eliminate the old mappings. So skip this part and defer
3358          * the assignment to device driver initialization time.
3359          */
3360         if (copied_tables)
3361                 goto domains_done;
3362
3363         /*
3364          * If pass through is not set or not enabled, setup context entries for
3365          * identity mappings for rmrr, gfx, and isa and may fall back to static
3366          * identity mapping if iommu_identity_mapping is set.
3367          */
3368         if (iommu_identity_mapping) {
3369                 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3370                 if (ret) {
3371                         pr_crit("Failed to setup IOMMU pass-through\n");
3372                         goto free_iommu;
3373                 }
3374         }
3375         /*
3376          * For each rmrr
3377          *   for each dev attached to rmrr
3378          *   do
3379          *     locate drhd for dev, alloc domain for dev
3380          *     allocate free domain
3381          *     allocate page table entries for rmrr
3382          *     if context not allocated for bus
3383          *           allocate and init context
3384          *           set present in root table for this bus
3385          *     init context with domain, translation etc
3386          *    endfor
3387          * endfor
3388          */
3389         pr_info("Setting RMRR:\n");
3390         for_each_rmrr_units(rmrr) {
3391                 /* some BIOS lists non-exist devices in DMAR table. */
3392                 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3393                                           i, dev) {
3394                         ret = iommu_prepare_rmrr_dev(rmrr, dev);
3395                         if (ret)
3396                                 pr_err("Mapping reserved region failed\n");
3397                 }
3398         }
3399
3400         iommu_prepare_isa();
3401
3402 domains_done:
3403
3404         /*
3405          * for each drhd
3406          *   enable fault log
3407          *   global invalidate context cache
3408          *   global invalidate iotlb
3409          *   enable translation
3410          */
3411         for_each_iommu(iommu, drhd) {
3412                 if (drhd->ignored) {
3413                         /*
3414                          * we always have to disable PMRs or DMA may fail on
3415                          * this device
3416                          */
3417                         if (force_on)
3418                                 iommu_disable_protect_mem_regions(iommu);
3419                         continue;
3420                 }
3421
3422                 iommu_flush_write_buffer(iommu);
3423
3424 #ifdef CONFIG_INTEL_IOMMU_SVM
3425                 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
3426                         ret = intel_svm_enable_prq(iommu);
3427                         if (ret)
3428                                 goto free_iommu;
3429                 }
3430 #endif
3431                 ret = dmar_set_interrupt(iommu);
3432                 if (ret)
3433                         goto free_iommu;
3434
3435                 if (!translation_pre_enabled(iommu))
3436                         iommu_enable_translation(iommu);
3437
3438                 iommu_disable_protect_mem_regions(iommu);
3439         }
3440
3441         return 0;
3442
3443 free_iommu:
3444         for_each_active_iommu(iommu, drhd) {
3445                 disable_dmar_iommu(iommu);
3446                 free_dmar_iommu(iommu);
3447         }
3448
3449         kfree(g_iommus);
3450
3451 error:
3452         return ret;
3453 }
3454
3455 /* This takes a number of _MM_ pages, not VTD pages */
3456 static unsigned long intel_alloc_iova(struct device *dev,
3457                                      struct dmar_domain *domain,
3458                                      unsigned long nrpages, uint64_t dma_mask)
3459 {
3460         unsigned long iova_pfn = 0;
3461
3462         /* Restrict dma_mask to the width that the iommu can handle */
3463         dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3464         /* Ensure we reserve the whole size-aligned region */
3465         nrpages = __roundup_pow_of_two(nrpages);
3466
3467         if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3468                 /*
3469                  * First try to allocate an io virtual address in
3470                  * DMA_BIT_MASK(32) and if that fails then try allocating
3471                  * from higher range
3472                  */
3473                 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3474                                            IOVA_PFN(DMA_BIT_MASK(32)), false);
3475                 if (iova_pfn)
3476                         return iova_pfn;
3477         }
3478         iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3479                                    IOVA_PFN(dma_mask), true);
3480         if (unlikely(!iova_pfn)) {
3481                 pr_err("Allocating %ld-page iova for %s failed",
3482                        nrpages, dev_name(dev));
3483                 return 0;
3484         }
3485
3486         return iova_pfn;
3487 }
3488
3489 static struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
3490 {
3491         struct dmar_domain *domain, *tmp;
3492         struct dmar_rmrr_unit *rmrr;
3493         struct device *i_dev;
3494         int i, ret;
3495
3496         domain = find_domain(dev);
3497         if (domain)
3498                 goto out;
3499
3500         domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3501         if (!domain)
3502                 goto out;
3503
3504         /* We have a new domain - setup possible RMRRs for the device */
3505         rcu_read_lock();
3506         for_each_rmrr_units(rmrr) {
3507                 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3508                                           i, i_dev) {
3509                         if (i_dev != dev)
3510                                 continue;
3511
3512                         ret = domain_prepare_identity_map(dev, domain,
3513                                                           rmrr->base_address,
3514                                                           rmrr->end_address);
3515                         if (ret)
3516                                 dev_err(dev, "Mapping reserved region failed\n");
3517                 }
3518         }
3519         rcu_read_unlock();
3520
3521         tmp = set_domain_for_dev(dev, domain);
3522         if (!tmp || domain != tmp) {
3523                 domain_exit(domain);
3524                 domain = tmp;
3525         }
3526
3527 out:
3528
3529         if (!domain)
3530                 pr_err("Allocating domain for %s failed\n", dev_name(dev));
3531
3532
3533         return domain;
3534 }
3535
3536 /* Check if the dev needs to go through non-identity map and unmap process.*/
3537 static int iommu_no_mapping(struct device *dev)
3538 {
3539         int found;
3540
3541         if (iommu_dummy(dev))
3542                 return 1;
3543
3544         if (!iommu_identity_mapping)
3545                 return 0;
3546
3547         found = identity_mapping(dev);
3548         if (found) {
3549                 if (iommu_should_identity_map(dev, 0))
3550                         return 1;
3551                 else {
3552                         /*
3553                          * 32 bit DMA is removed from si_domain and fall back
3554                          * to non-identity mapping.
3555                          */
3556                         dmar_remove_one_dev_info(si_domain, dev);
3557                         pr_info("32bit %s uses non-identity mapping\n",
3558                                 dev_name(dev));
3559                         return 0;
3560                 }
3561         } else {
3562                 /*
3563                  * In case of a detached 64 bit DMA device from vm, the device
3564                  * is put into si_domain for identity mapping.
3565                  */
3566                 if (iommu_should_identity_map(dev, 0)) {
3567                         int ret;
3568                         ret = domain_add_dev_info(si_domain, dev);
3569                         if (!ret) {
3570                                 pr_info("64bit %s uses identity mapping\n",
3571                                         dev_name(dev));
3572                                 return 1;
3573                         }
3574                 }
3575         }
3576
3577         return 0;
3578 }
3579
3580 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3581                                      size_t size, int dir, u64 dma_mask)
3582 {
3583         struct dmar_domain *domain;
3584         phys_addr_t start_paddr;
3585         unsigned long iova_pfn;
3586         int prot = 0;
3587         int ret;
3588         struct intel_iommu *iommu;
3589         unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3590
3591         BUG_ON(dir == DMA_NONE);
3592
3593         if (iommu_no_mapping(dev))
3594                 return paddr;
3595
3596         domain = get_valid_domain_for_dev(dev);
3597         if (!domain)
3598                 return 0;
3599
3600         iommu = domain_get_iommu(domain);
3601         size = aligned_nrpages(paddr, size);
3602
3603         iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3604         if (!iova_pfn)
3605                 goto error;
3606
3607         /*
3608          * Check if DMAR supports zero-length reads on write only
3609          * mappings..
3610          */
3611         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3612                         !cap_zlr(iommu->cap))
3613                 prot |= DMA_PTE_READ;
3614         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3615                 prot |= DMA_PTE_WRITE;
3616         /*
3617          * paddr - (paddr + size) might be partial page, we should map the whole
3618          * page.  Note: if two part of one page are separately mapped, we
3619          * might have two guest_addr mapping to the same host paddr, but this
3620          * is not a big problem
3621          */
3622         ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
3623                                  mm_to_dma_pfn(paddr_pfn), size, prot);
3624         if (ret)
3625                 goto error;
3626
3627         /* it's a non-present to present mapping. Only flush if caching mode */
3628         if (cap_caching_mode(iommu->cap))
3629                 iommu_flush_iotlb_psi(iommu, domain,
3630                                       mm_to_dma_pfn(iova_pfn),
3631                                       size, 0, 1);
3632         else
3633                 iommu_flush_write_buffer(iommu);
3634
3635         start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
3636         start_paddr += paddr & ~PAGE_MASK;
3637         return start_paddr;
3638
3639 error:
3640         if (iova_pfn)
3641                 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3642         pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
3643                 dev_name(dev), size, (unsigned long long)paddr, dir);
3644         return 0;
3645 }
3646
3647 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3648                                  unsigned long offset, size_t size,
3649                                  enum dma_data_direction dir,
3650                                  unsigned long attrs)
3651 {
3652         return __intel_map_single(dev, page_to_phys(page) + offset, size,
3653                                   dir, *dev->dma_mask);
3654 }
3655
3656 static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
3657 {
3658         struct dmar_domain *domain;
3659         unsigned long start_pfn, last_pfn;
3660         unsigned long nrpages;
3661         unsigned long iova_pfn;
3662         struct intel_iommu *iommu;
3663         struct page *freelist;
3664
3665         if (iommu_no_mapping(dev))
3666                 return;
3667
3668         domain = find_domain(dev);
3669         BUG_ON(!domain);
3670
3671         iommu = domain_get_iommu(domain);
3672
3673         iova_pfn = IOVA_PFN(dev_addr);
3674
3675         nrpages = aligned_nrpages(dev_addr, size);
3676         start_pfn = mm_to_dma_pfn(iova_pfn);
3677         last_pfn = start_pfn + nrpages - 1;
3678
3679         pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3680                  dev_name(dev), start_pfn, last_pfn);
3681
3682         freelist = domain_unmap(domain, start_pfn, last_pfn);
3683
3684         if (intel_iommu_strict) {
3685                 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3686                                       nrpages, !freelist, 0);
3687                 /* free iova */
3688                 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
3689                 dma_free_pagelist(freelist);
3690         } else {
3691                 queue_iova(&domain->iovad, iova_pfn, nrpages,
3692                            (unsigned long)freelist);
3693                 /*
3694                  * queue up the release of the unmap to save the 1/6th of the
3695                  * cpu used up by the iotlb flush operation...
3696                  */
3697         }
3698 }
3699
3700 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3701                              size_t size, enum dma_data_direction dir,
3702                              unsigned long attrs)
3703 {
3704         intel_unmap(dev, dev_addr, size);
3705 }
3706
3707 static void *intel_alloc_coherent(struct device *dev, size_t size,
3708                                   dma_addr_t *dma_handle, gfp_t flags,
3709                                   unsigned long attrs)
3710 {
3711         struct page *page = NULL;
3712         int order;
3713
3714         size = PAGE_ALIGN(size);
3715         order = get_order(size);
3716
3717         if (!iommu_no_mapping(dev))
3718                 flags &= ~(GFP_DMA | GFP_DMA32);
3719         else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3720                 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3721                         flags |= GFP_DMA;
3722                 else
3723                         flags |= GFP_DMA32;
3724         }
3725
3726         if (gfpflags_allow_blocking(flags)) {
3727                 unsigned int count = size >> PAGE_SHIFT;
3728
3729                 page = dma_alloc_from_contiguous(dev, count, order, flags);
3730                 if (page && iommu_no_mapping(dev) &&
3731                     page_to_phys(page) + size > dev->coherent_dma_mask) {
3732                         dma_release_from_contiguous(dev, page, count);
3733                         page = NULL;
3734                 }
3735         }
3736
3737         if (!page)
3738                 page = alloc_pages(flags, order);
3739         if (!page)
3740                 return NULL;
3741         memset(page_address(page), 0, size);
3742
3743         *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3744                                          DMA_BIDIRECTIONAL,
3745                                          dev->coherent_dma_mask);
3746         if (*dma_handle)
3747                 return page_address(page);
3748         if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3749                 __free_pages(page, order);
3750
3751         return NULL;
3752 }
3753
3754 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3755                                 dma_addr_t dma_handle, unsigned long attrs)
3756 {
3757         int order;
3758         struct page *page = virt_to_page(vaddr);
3759
3760         size = PAGE_ALIGN(size);
3761         order = get_order(size);
3762
3763         intel_unmap(dev, dma_handle, size);
3764         if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3765                 __free_pages(page, order);
3766 }
3767
3768 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3769                            int nelems, enum dma_data_direction dir,
3770                            unsigned long attrs)
3771 {
3772         dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
3773         unsigned long nrpages = 0;
3774         struct scatterlist *sg;
3775         int i;
3776
3777         for_each_sg(sglist, sg, nelems, i) {
3778                 nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
3779         }
3780
3781         intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
3782 }
3783
3784 static int intel_nontranslate_map_sg(struct device *hddev,
3785         struct scatterlist *sglist, int nelems, int dir)
3786 {
3787         int i;
3788         struct scatterlist *sg;
3789
3790         for_each_sg(sglist, sg, nelems, i) {
3791                 BUG_ON(!sg_page(sg));
3792                 sg->dma_address = sg_phys(sg);
3793                 sg->dma_length = sg->length;
3794         }
3795         return nelems;
3796 }
3797
3798 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3799                         enum dma_data_direction dir, unsigned long attrs)
3800 {
3801         int i;
3802         struct dmar_domain *domain;
3803         size_t size = 0;
3804         int prot = 0;
3805         unsigned long iova_pfn;
3806         int ret;
3807         struct scatterlist *sg;
3808         unsigned long start_vpfn;
3809         struct intel_iommu *iommu;
3810
3811         BUG_ON(dir == DMA_NONE);
3812         if (iommu_no_mapping(dev))
3813                 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
3814
3815         domain = get_valid_domain_for_dev(dev);
3816         if (!domain)
3817                 return 0;
3818
3819         iommu = domain_get_iommu(domain);
3820
3821         for_each_sg(sglist, sg, nelems, i)
3822                 size += aligned_nrpages(sg->offset, sg->length);
3823
3824         iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3825                                 *dev->dma_mask);
3826         if (!iova_pfn) {
3827                 sglist->dma_length = 0;
3828                 return 0;
3829         }
3830
3831         /*
3832          * Check if DMAR supports zero-length reads on write only
3833          * mappings..
3834          */
3835         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3836                         !cap_zlr(iommu->cap))
3837                 prot |= DMA_PTE_READ;
3838         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3839                 prot |= DMA_PTE_WRITE;
3840
3841         start_vpfn = mm_to_dma_pfn(iova_pfn);
3842
3843         ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3844         if (unlikely(ret)) {
3845                 dma_pte_free_pagetable(domain, start_vpfn,
3846                                        start_vpfn + size - 1,
3847                                        agaw_to_level(domain->agaw) + 1);
3848                 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3849                 return 0;
3850         }
3851
3852         /* it's a non-present to present mapping. Only flush if caching mode */
3853         if (cap_caching_mode(iommu->cap))
3854                 iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
3855         else
3856                 iommu_flush_write_buffer(iommu);
3857
3858         return nelems;
3859 }
3860
3861 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3862 {
3863         return !dma_addr;
3864 }
3865
3866 const struct dma_map_ops intel_dma_ops = {
3867         .alloc = intel_alloc_coherent,
3868         .free = intel_free_coherent,
3869         .map_sg = intel_map_sg,
3870         .unmap_sg = intel_unmap_sg,
3871         .map_page = intel_map_page,
3872         .unmap_page = intel_unmap_page,
3873         .mapping_error = intel_mapping_error,
3874 #ifdef CONFIG_X86
3875         .dma_supported = x86_dma_supported,
3876 #endif
3877 };
3878
3879 static inline int iommu_domain_cache_init(void)
3880 {
3881         int ret = 0;
3882
3883         iommu_domain_cache = kmem_cache_create("iommu_domain",
3884                                          sizeof(struct dmar_domain),
3885                                          0,
3886                                          SLAB_HWCACHE_ALIGN,
3887
3888                                          NULL);
3889         if (!iommu_domain_cache) {
3890                 pr_err("Couldn't create iommu_domain cache\n");
3891                 ret = -ENOMEM;
3892         }
3893
3894         return ret;
3895 }
3896
3897 static inline int iommu_devinfo_cache_init(void)
3898 {
3899         int ret = 0;
3900
3901         iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3902                                          sizeof(struct device_domain_info),
3903                                          0,
3904                                          SLAB_HWCACHE_ALIGN,
3905                                          NULL);
3906         if (!iommu_devinfo_cache) {
3907                 pr_err("Couldn't create devinfo cache\n");
3908                 ret = -ENOMEM;
3909         }
3910
3911         return ret;
3912 }
3913
3914 static int __init iommu_init_mempool(void)
3915 {
3916         int ret;
3917         ret = iova_cache_get();
3918         if (ret)
3919                 return ret;
3920
3921         ret = iommu_domain_cache_init();
3922         if (ret)
3923                 goto domain_error;
3924
3925         ret = iommu_devinfo_cache_init();
3926         if (!ret)
3927                 return ret;
3928
3929         kmem_cache_destroy(iommu_domain_cache);
3930 domain_error:
3931         iova_cache_put();
3932
3933         return -ENOMEM;
3934 }
3935
3936 static void __init iommu_exit_mempool(void)
3937 {
3938         kmem_cache_destroy(iommu_devinfo_cache);
3939         kmem_cache_destroy(iommu_domain_cache);
3940         iova_cache_put();
3941 }
3942
3943 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3944 {
3945         struct dmar_drhd_unit *drhd;
3946         u32 vtbar;
3947         int rc;
3948
3949         /* We know that this device on this chipset has its own IOMMU.
3950          * If we find it under a different IOMMU, then the BIOS is lying
3951          * to us. Hope that the IOMMU for this device is actually
3952          * disabled, and it needs no translation...
3953          */
3954         rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3955         if (rc) {
3956                 /* "can't" happen */
3957                 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3958                 return;
3959         }
3960         vtbar &= 0xffff0000;
3961
3962         /* we know that the this iommu should be at offset 0xa000 from vtbar */
3963         drhd = dmar_find_matched_drhd_unit(pdev);
3964         if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3965                             TAINT_FIRMWARE_WORKAROUND,
3966                             "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3967                 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3968 }
3969 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3970
3971 static void __init init_no_remapping_devices(void)
3972 {
3973         struct dmar_drhd_unit *drhd;
3974         struct device *dev;
3975         int i;
3976
3977         for_each_drhd_unit(drhd) {
3978                 if (!drhd->include_all) {
3979                         for_each_active_dev_scope(drhd->devices,
3980                                                   drhd->devices_cnt, i, dev)
3981                                 break;
3982                         /* ignore DMAR unit if no devices exist */
3983                         if (i == drhd->devices_cnt)
3984                                 drhd->ignored = 1;
3985                 }
3986         }
3987
3988         for_each_active_drhd_unit(drhd) {
3989                 if (drhd->include_all)
3990                         continue;
3991
3992                 for_each_active_dev_scope(drhd->devices,
3993                                           drhd->devices_cnt, i, dev)
3994                         if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
3995                                 break;
3996                 if (i < drhd->devices_cnt)
3997                         continue;
3998
3999                 /* This IOMMU has *only* gfx devices. Either bypass it or
4000                    set the gfx_mapped flag, as appropriate */
4001                 if (dmar_map_gfx) {
4002                         intel_iommu_gfx_mapped = 1;
4003                 } else {
4004                         drhd->ignored = 1;
4005                         for_each_active_dev_scope(drhd->devices,
4006                                                   drhd->devices_cnt, i, dev)
4007                                 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
4008                 }
4009         }
4010 }
4011
4012 #ifdef CONFIG_SUSPEND
4013 static int init_iommu_hw(void)
4014 {
4015         struct dmar_drhd_unit *drhd;
4016         struct intel_iommu *iommu = NULL;
4017
4018         for_each_active_iommu(iommu, drhd)
4019                 if (iommu->qi)
4020                         dmar_reenable_qi(iommu);
4021
4022         for_each_iommu(iommu, drhd) {
4023                 if (drhd->ignored) {
4024                         /*
4025                          * we always have to disable PMRs or DMA may fail on
4026                          * this device
4027                          */
4028                         if (force_on)
4029                                 iommu_disable_protect_mem_regions(iommu);
4030                         continue;
4031                 }
4032         
4033                 iommu_flush_write_buffer(iommu);
4034
4035                 iommu_set_root_entry(iommu);
4036
4037                 iommu->flush.flush_context(iommu, 0, 0, 0,
4038                                            DMA_CCMD_GLOBAL_INVL);
4039                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4040                 iommu_enable_translation(iommu);
4041                 iommu_disable_protect_mem_regions(iommu);
4042         }
4043
4044         return 0;
4045 }
4046
4047 static void iommu_flush_all(void)
4048 {
4049         struct dmar_drhd_unit *drhd;
4050         struct intel_iommu *iommu;
4051
4052         for_each_active_iommu(iommu, drhd) {
4053                 iommu->flush.flush_context(iommu, 0, 0, 0,
4054                                            DMA_CCMD_GLOBAL_INVL);
4055                 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
4056                                          DMA_TLB_GLOBAL_FLUSH);
4057         }
4058 }
4059
4060 static int iommu_suspend(void)
4061 {
4062         struct dmar_drhd_unit *drhd;
4063         struct intel_iommu *iommu = NULL;
4064         unsigned long flag;
4065
4066         for_each_active_iommu(iommu, drhd) {
4067                 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
4068                                                  GFP_ATOMIC);
4069                 if (!iommu->iommu_state)
4070                         goto nomem;
4071         }
4072
4073         iommu_flush_all();
4074
4075         for_each_active_iommu(iommu, drhd) {
4076                 iommu_disable_translation(iommu);
4077
4078                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4079
4080                 iommu->iommu_state[SR_DMAR_FECTL_REG] =
4081                         readl(iommu->reg + DMAR_FECTL_REG);
4082                 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
4083                         readl(iommu->reg + DMAR_FEDATA_REG);
4084                 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
4085                         readl(iommu->reg + DMAR_FEADDR_REG);
4086                 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
4087                         readl(iommu->reg + DMAR_FEUADDR_REG);
4088
4089                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4090         }
4091         return 0;
4092
4093 nomem:
4094         for_each_active_iommu(iommu, drhd)
4095                 kfree(iommu->iommu_state);
4096
4097         return -ENOMEM;
4098 }
4099
4100 static void iommu_resume(void)
4101 {
4102         struct dmar_drhd_unit *drhd;
4103         struct intel_iommu *iommu = NULL;
4104         unsigned long flag;
4105
4106         if (init_iommu_hw()) {
4107                 if (force_on)
4108                         panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
4109                 else
4110                         WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
4111                 return;
4112         }
4113
4114         for_each_active_iommu(iommu, drhd) {
4115
4116                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4117
4118                 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
4119                         iommu->reg + DMAR_FECTL_REG);
4120                 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
4121                         iommu->reg + DMAR_FEDATA_REG);
4122                 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
4123                         iommu->reg + DMAR_FEADDR_REG);
4124                 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
4125                         iommu->reg + DMAR_FEUADDR_REG);
4126
4127                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4128         }
4129
4130         for_each_active_iommu(iommu, drhd)
4131                 kfree(iommu->iommu_state);
4132 }
4133
4134 static struct syscore_ops iommu_syscore_ops = {
4135         .resume         = iommu_resume,
4136         .suspend        = iommu_suspend,
4137 };
4138
4139 static void __init init_iommu_pm_ops(void)
4140 {
4141         register_syscore_ops(&iommu_syscore_ops);
4142 }
4143
4144 #else
4145 static inline void init_iommu_pm_ops(void) {}
4146 #endif  /* CONFIG_PM */
4147
4148
4149 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
4150 {
4151         struct acpi_dmar_reserved_memory *rmrr;
4152         int prot = DMA_PTE_READ|DMA_PTE_WRITE;
4153         struct dmar_rmrr_unit *rmrru;
4154         size_t length;
4155
4156         rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
4157         if (!rmrru)
4158                 goto out;
4159
4160         rmrru->hdr = header;
4161         rmrr = (struct acpi_dmar_reserved_memory *)header;
4162         rmrru->base_address = rmrr->base_address;
4163         rmrru->end_address = rmrr->end_address;
4164
4165         length = rmrr->end_address - rmrr->base_address + 1;
4166         rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
4167                                               IOMMU_RESV_DIRECT);
4168         if (!rmrru->resv)
4169                 goto free_rmrru;
4170
4171         rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
4172                                 ((void *)rmrr) + rmrr->header.length,
4173                                 &rmrru->devices_cnt);
4174         if (rmrru->devices_cnt && rmrru->devices == NULL)
4175                 goto free_all;
4176
4177         list_add(&rmrru->list, &dmar_rmrr_units);
4178
4179         return 0;
4180 free_all:
4181         kfree(rmrru->resv);
4182 free_rmrru:
4183         kfree(rmrru);
4184 out:
4185         return -ENOMEM;
4186 }
4187
4188 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4189 {
4190         struct dmar_atsr_unit *atsru;
4191         struct acpi_dmar_atsr *tmp;
4192
4193         list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4194                 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4195                 if (atsr->segment != tmp->segment)
4196                         continue;
4197                 if (atsr->header.length != tmp->header.length)
4198                         continue;
4199                 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4200                         return atsru;
4201         }
4202
4203         return NULL;
4204 }
4205
4206 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4207 {
4208         struct acpi_dmar_atsr *atsr;
4209         struct dmar_atsr_unit *atsru;
4210
4211         if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
4212                 return 0;
4213
4214         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4215         atsru = dmar_find_atsr(atsr);
4216         if (atsru)
4217                 return 0;
4218
4219         atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
4220         if (!atsru)
4221                 return -ENOMEM;
4222
4223         /*
4224          * If memory is allocated from slab by ACPI _DSM method, we need to
4225          * copy the memory content because the memory buffer will be freed
4226          * on return.
4227          */
4228         atsru->hdr = (void *)(atsru + 1);
4229         memcpy(atsru->hdr, hdr, hdr->length);
4230         atsru->include_all = atsr->flags & 0x1;
4231         if (!atsru->include_all) {
4232                 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4233                                 (void *)atsr + atsr->header.length,
4234                                 &atsru->devices_cnt);
4235                 if (atsru->devices_cnt && atsru->devices == NULL) {
4236                         kfree(atsru);
4237                         return -ENOMEM;
4238                 }
4239         }
4240
4241         list_add_rcu(&atsru->list, &dmar_atsr_units);
4242
4243         return 0;
4244 }
4245
4246 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4247 {
4248         dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4249         kfree(atsru);
4250 }
4251
4252 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4253 {
4254         struct acpi_dmar_atsr *atsr;
4255         struct dmar_atsr_unit *atsru;
4256
4257         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4258         atsru = dmar_find_atsr(atsr);
4259         if (atsru) {
4260                 list_del_rcu(&atsru->list);
4261                 synchronize_rcu();
4262                 intel_iommu_free_atsr(atsru);
4263         }
4264
4265         return 0;
4266 }
4267
4268 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4269 {
4270         int i;
4271         struct device *dev;
4272         struct acpi_dmar_atsr *atsr;
4273         struct dmar_atsr_unit *atsru;
4274
4275         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4276         atsru = dmar_find_atsr(atsr);
4277         if (!atsru)
4278                 return 0;
4279
4280         if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
4281                 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4282                                           i, dev)
4283                         return -EBUSY;
4284         }
4285
4286         return 0;
4287 }
4288
4289 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4290 {
4291         int sp, ret = 0;
4292         struct intel_iommu *iommu = dmaru->iommu;
4293
4294         if (g_iommus[iommu->seq_id])
4295                 return 0;
4296
4297         if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
4298                 pr_warn("%s: Doesn't support hardware pass through.\n",
4299                         iommu->name);
4300                 return -ENXIO;
4301         }
4302         if (!ecap_sc_support(iommu->ecap) &&
4303             domain_update_iommu_snooping(iommu)) {
4304                 pr_warn("%s: Doesn't support snooping.\n",
4305                         iommu->name);
4306                 return -ENXIO;
4307         }
4308         sp = domain_update_iommu_superpage(iommu) - 1;
4309         if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
4310                 pr_warn("%s: Doesn't support large page.\n",
4311                         iommu->name);
4312                 return -ENXIO;
4313         }
4314
4315         /*
4316          * Disable translation if already enabled prior to OS handover.
4317          */
4318         if (iommu->gcmd & DMA_GCMD_TE)
4319                 iommu_disable_translation(iommu);
4320
4321         g_iommus[iommu->seq_id] = iommu;
4322         ret = iommu_init_domains(iommu);
4323         if (ret == 0)
4324                 ret = iommu_alloc_root_entry(iommu);
4325         if (ret)
4326                 goto out;
4327
4328 #ifdef CONFIG_INTEL_IOMMU_SVM
4329         if (pasid_enabled(iommu))
4330                 intel_svm_alloc_pasid_tables(iommu);
4331 #endif
4332
4333         if (dmaru->ignored) {
4334                 /*
4335                  * we always have to disable PMRs or DMA may fail on this device
4336                  */
4337                 if (force_on)
4338                         iommu_disable_protect_mem_regions(iommu);
4339                 return 0;
4340         }
4341
4342         intel_iommu_init_qi(iommu);
4343         iommu_flush_write_buffer(iommu);
4344
4345 #ifdef CONFIG_INTEL_IOMMU_SVM
4346         if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
4347                 ret = intel_svm_enable_prq(iommu);
4348                 if (ret)
4349                         goto disable_iommu;
4350         }
4351 #endif
4352         ret = dmar_set_interrupt(iommu);
4353         if (ret)
4354                 goto disable_iommu;
4355
4356         iommu_set_root_entry(iommu);
4357         iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4358         iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4359         iommu_enable_translation(iommu);
4360
4361         iommu_disable_protect_mem_regions(iommu);
4362         return 0;
4363
4364 disable_iommu:
4365         disable_dmar_iommu(iommu);
4366 out:
4367         free_dmar_iommu(iommu);
4368         return ret;
4369 }
4370
4371 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4372 {
4373         int ret = 0;
4374         struct intel_iommu *iommu = dmaru->iommu;
4375
4376         if (!intel_iommu_enabled)
4377                 return 0;
4378         if (iommu == NULL)
4379                 return -EINVAL;
4380
4381         if (insert) {
4382                 ret = intel_iommu_add(dmaru);
4383         } else {
4384                 disable_dmar_iommu(iommu);
4385                 free_dmar_iommu(iommu);
4386         }
4387
4388         return ret;
4389 }
4390
4391 static void intel_iommu_free_dmars(void)
4392 {
4393         struct dmar_rmrr_unit *rmrru, *rmrr_n;
4394         struct dmar_atsr_unit *atsru, *atsr_n;
4395
4396         list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4397                 list_del(&rmrru->list);
4398                 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4399                 kfree(rmrru->resv);
4400                 kfree(rmrru);
4401         }
4402
4403         list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4404                 list_del(&atsru->list);
4405                 intel_iommu_free_atsr(atsru);
4406         }
4407 }
4408
4409 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4410 {
4411         int i, ret = 1;
4412         struct pci_bus *bus;
4413         struct pci_dev *bridge = NULL;
4414         struct device *tmp;
4415         struct acpi_dmar_atsr *atsr;
4416         struct dmar_atsr_unit *atsru;
4417
4418         dev = pci_physfn(dev);
4419         for (bus = dev->bus; bus; bus = bus->parent) {
4420                 bridge = bus->self;
4421                 /* If it's an integrated device, allow ATS */
4422                 if (!bridge)
4423                         return 1;
4424                 /* Connected via non-PCIe: no ATS */
4425                 if (!pci_is_pcie(bridge) ||
4426                     pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
4427                         return 0;
4428                 /* If we found the root port, look it up in the ATSR */
4429                 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
4430                         break;
4431         }
4432
4433         rcu_read_lock();
4434         list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4435                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4436                 if (atsr->segment != pci_domain_nr(dev->bus))
4437                         continue;
4438
4439                 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
4440                         if (tmp == &bridge->dev)
4441                                 goto out;
4442
4443                 if (atsru->include_all)
4444                         goto out;
4445         }
4446         ret = 0;
4447 out:
4448         rcu_read_unlock();
4449
4450         return ret;
4451 }
4452
4453 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4454 {
4455         int ret = 0;
4456         struct dmar_rmrr_unit *rmrru;
4457         struct dmar_atsr_unit *atsru;
4458         struct acpi_dmar_atsr *atsr;
4459         struct acpi_dmar_reserved_memory *rmrr;
4460
4461         if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
4462                 return 0;
4463
4464         list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4465                 rmrr = container_of(rmrru->hdr,
4466                                     struct acpi_dmar_reserved_memory, header);
4467                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4468                         ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4469                                 ((void *)rmrr) + rmrr->header.length,
4470                                 rmrr->segment, rmrru->devices,
4471                                 rmrru->devices_cnt);
4472                         if(ret < 0)
4473                                 return ret;
4474                 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4475                         dmar_remove_dev_scope(info, rmrr->segment,
4476                                 rmrru->devices, rmrru->devices_cnt);
4477                 }
4478         }
4479
4480         list_for_each_entry(atsru, &dmar_atsr_units, list) {
4481                 if (atsru->include_all)
4482                         continue;
4483
4484                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4485                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4486                         ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4487                                         (void *)atsr + atsr->header.length,
4488                                         atsr->segment, atsru->devices,
4489                                         atsru->devices_cnt);
4490                         if (ret > 0)
4491                                 break;
4492                         else if(ret < 0)
4493                                 return ret;
4494                 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4495                         if (dmar_remove_dev_scope(info, atsr->segment,
4496                                         atsru->devices, atsru->devices_cnt))
4497                                 break;
4498                 }
4499         }
4500
4501         return 0;
4502 }
4503
4504 /*
4505  * Here we only respond to action of unbound device from driver.
4506  *
4507  * Added device is not attached to its DMAR domain here yet. That will happen
4508  * when mapping the device to iova.
4509  */
4510 static int device_notifier(struct notifier_block *nb,
4511                                   unsigned long action, void *data)
4512 {
4513         struct device *dev = data;
4514         struct dmar_domain *domain;
4515
4516         if (iommu_dummy(dev))
4517                 return 0;
4518
4519         if (action != BUS_NOTIFY_REMOVED_DEVICE)
4520                 return 0;
4521
4522         domain = find_domain(dev);
4523         if (!domain)
4524                 return 0;
4525
4526         dmar_remove_one_dev_info(domain, dev);
4527         if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
4528                 domain_exit(domain);
4529
4530         return 0;
4531 }
4532
4533 static struct notifier_block device_nb = {
4534         .notifier_call = device_notifier,
4535 };
4536
4537 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4538                                        unsigned long val, void *v)
4539 {
4540         struct memory_notify *mhp = v;
4541         unsigned long long start, end;
4542         unsigned long start_vpfn, last_vpfn;
4543
4544         switch (val) {
4545         case MEM_GOING_ONLINE:
4546                 start = mhp->start_pfn << PAGE_SHIFT;
4547                 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4548                 if (iommu_domain_identity_map(si_domain, start, end)) {
4549                         pr_warn("Failed to build identity map for [%llx-%llx]\n",
4550                                 start, end);
4551                         return NOTIFY_BAD;
4552                 }
4553                 break;
4554
4555         case MEM_OFFLINE:
4556         case MEM_CANCEL_ONLINE:
4557                 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4558                 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4559                 while (start_vpfn <= last_vpfn) {
4560                         struct iova *iova;
4561                         struct dmar_drhd_unit *drhd;
4562                         struct intel_iommu *iommu;
4563                         struct page *freelist;
4564
4565                         iova = find_iova(&si_domain->iovad, start_vpfn);
4566                         if (iova == NULL) {
4567                                 pr_debug("Failed get IOVA for PFN %lx\n",
4568                                          start_vpfn);
4569                                 break;
4570                         }
4571
4572                         iova = split_and_remove_iova(&si_domain->iovad, iova,
4573                                                      start_vpfn, last_vpfn);
4574                         if (iova == NULL) {
4575                                 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
4576                                         start_vpfn, last_vpfn);
4577                                 return NOTIFY_BAD;
4578                         }
4579
4580                         freelist = domain_unmap(si_domain, iova->pfn_lo,
4581                                                iova->pfn_hi);
4582
4583                         rcu_read_lock();
4584                         for_each_active_iommu(iommu, drhd)
4585                                 iommu_flush_iotlb_psi(iommu, si_domain,
4586                                         iova->pfn_lo, iova_size(iova),
4587                                         !freelist, 0);
4588                         rcu_read_unlock();
4589                         dma_free_pagelist(freelist);
4590
4591                         start_vpfn = iova->pfn_hi + 1;
4592                         free_iova_mem(iova);
4593                 }
4594                 break;
4595         }
4596
4597         return NOTIFY_OK;
4598 }
4599
4600 static struct notifier_block intel_iommu_memory_nb = {
4601         .notifier_call = intel_iommu_memory_notifier,
4602         .priority = 0
4603 };
4604
4605 static void free_all_cpu_cached_iovas(unsigned int cpu)
4606 {
4607         int i;
4608
4609         for (i = 0; i < g_num_of_iommus; i++) {
4610                 struct intel_iommu *iommu = g_iommus[i];
4611                 struct dmar_domain *domain;
4612                 int did;
4613
4614                 if (!iommu)
4615                         continue;
4616
4617                 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
4618                         domain = get_iommu_domain(iommu, (u16)did);
4619
4620                         if (!domain)
4621                                 continue;
4622                         free_cpu_cached_iovas(cpu, &domain->iovad);
4623                 }
4624         }
4625 }
4626
4627 static int intel_iommu_cpu_dead(unsigned int cpu)
4628 {
4629         free_all_cpu_cached_iovas(cpu);
4630         return 0;
4631 }
4632
4633 static void intel_disable_iommus(void)
4634 {
4635         struct intel_iommu *iommu = NULL;
4636         struct dmar_drhd_unit *drhd;
4637
4638         for_each_iommu(iommu, drhd)
4639                 iommu_disable_translation(iommu);
4640 }
4641
4642 static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4643 {
4644         struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
4645
4646         return container_of(iommu_dev, struct intel_iommu, iommu);
4647 }
4648
4649 static ssize_t intel_iommu_show_version(struct device *dev,
4650                                         struct device_attribute *attr,
4651                                         char *buf)
4652 {
4653         struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4654         u32 ver = readl(iommu->reg + DMAR_VER_REG);
4655         return sprintf(buf, "%d:%d\n",
4656                        DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4657 }
4658 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4659
4660 static ssize_t intel_iommu_show_address(struct device *dev,
4661                                         struct device_attribute *attr,
4662                                         char *buf)
4663 {
4664         struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4665         return sprintf(buf, "%llx\n", iommu->reg_phys);
4666 }
4667 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4668
4669 static ssize_t intel_iommu_show_cap(struct device *dev,
4670                                     struct device_attribute *attr,
4671                                     char *buf)
4672 {
4673         struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4674         return sprintf(buf, "%llx\n", iommu->cap);
4675 }
4676 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4677
4678 static ssize_t intel_iommu_show_ecap(struct device *dev,
4679                                     struct device_attribute *attr,
4680                                     char *buf)
4681 {
4682         struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4683         return sprintf(buf, "%llx\n", iommu->ecap);
4684 }
4685 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4686
4687 static ssize_t intel_iommu_show_ndoms(struct device *dev,
4688                                       struct device_attribute *attr,
4689                                       char *buf)
4690 {
4691         struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4692         return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4693 }
4694 static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4695
4696 static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4697                                            struct device_attribute *attr,
4698                                            char *buf)
4699 {
4700         struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4701         return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4702                                                   cap_ndoms(iommu->cap)));
4703 }
4704 static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4705
4706 static struct attribute *intel_iommu_attrs[] = {
4707         &dev_attr_version.attr,
4708         &dev_attr_address.attr,
4709         &dev_attr_cap.attr,
4710         &dev_attr_ecap.attr,
4711         &dev_attr_domains_supported.attr,
4712         &dev_attr_domains_used.attr,
4713         NULL,
4714 };
4715
4716 static struct attribute_group intel_iommu_group = {
4717         .name = "intel-iommu",
4718         .attrs = intel_iommu_attrs,
4719 };
4720
4721 const struct attribute_group *intel_iommu_groups[] = {
4722         &intel_iommu_group,
4723         NULL,
4724 };
4725
4726 int __init intel_iommu_init(void)
4727 {
4728         int ret = -ENODEV;
4729         struct dmar_drhd_unit *drhd;
4730         struct intel_iommu *iommu;
4731
4732         /* VT-d is required for a TXT/tboot launch, so enforce that */
4733         force_on = tboot_force_iommu();
4734
4735         if (iommu_init_mempool()) {
4736                 if (force_on)
4737                         panic("tboot: Failed to initialize iommu memory\n");
4738                 return -ENOMEM;
4739         }
4740
4741         down_write(&dmar_global_lock);
4742         if (dmar_table_init()) {
4743                 if (force_on)
4744                         panic("tboot: Failed to initialize DMAR table\n");
4745                 goto out_free_dmar;
4746         }
4747
4748         if (dmar_dev_scope_init() < 0) {
4749                 if (force_on)
4750                         panic("tboot: Failed to initialize DMAR device scope\n");
4751                 goto out_free_dmar;
4752         }
4753
4754         up_write(&dmar_global_lock);
4755
4756         /*
4757          * The bus notifier takes the dmar_global_lock, so lockdep will
4758          * complain later when we register it under the lock.
4759          */
4760         dmar_register_bus_notifier();
4761
4762         down_write(&dmar_global_lock);
4763
4764         if (no_iommu || dmar_disabled) {
4765                 /*
4766                  * We exit the function here to ensure IOMMU's remapping and
4767                  * mempool aren't setup, which means that the IOMMU's PMRs
4768                  * won't be disabled via the call to init_dmars(). So disable
4769                  * it explicitly here. The PMRs were setup by tboot prior to
4770                  * calling SENTER, but the kernel is expected to reset/tear
4771                  * down the PMRs.
4772                  */
4773                 if (intel_iommu_tboot_noforce) {
4774                         for_each_iommu(iommu, drhd)
4775                                 iommu_disable_protect_mem_regions(iommu);
4776                 }
4777
4778                 /*
4779                  * Make sure the IOMMUs are switched off, even when we
4780                  * boot into a kexec kernel and the previous kernel left
4781                  * them enabled
4782                  */
4783                 intel_disable_iommus();
4784                 goto out_free_dmar;
4785         }
4786
4787         if (list_empty(&dmar_rmrr_units))
4788                 pr_info("No RMRR found\n");
4789
4790         if (list_empty(&dmar_atsr_units))
4791                 pr_info("No ATSR found\n");
4792
4793         if (dmar_init_reserved_ranges()) {
4794                 if (force_on)
4795                         panic("tboot: Failed to reserve iommu ranges\n");
4796                 goto out_free_reserved_range;
4797         }
4798
4799         init_no_remapping_devices();
4800
4801         ret = init_dmars();
4802         if (ret) {
4803                 if (force_on)
4804                         panic("tboot: Failed to initialize DMARs\n");
4805                 pr_err("Initialization failed\n");
4806                 goto out_free_reserved_range;
4807         }
4808         up_write(&dmar_global_lock);
4809         pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4810
4811 #if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
4812         swiotlb = 0;
4813 #endif
4814         dma_ops = &intel_dma_ops;
4815
4816         init_iommu_pm_ops();
4817
4818         for_each_active_iommu(iommu, drhd) {
4819                 iommu_device_sysfs_add(&iommu->iommu, NULL,
4820                                        intel_iommu_groups,
4821                                        "%s", iommu->name);
4822                 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
4823                 iommu_device_register(&iommu->iommu);
4824         }
4825
4826         bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4827         bus_register_notifier(&pci_bus_type, &device_nb);
4828         if (si_domain && !hw_pass_through)
4829                 register_memory_notifier(&intel_iommu_memory_nb);
4830         cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
4831                           intel_iommu_cpu_dead);
4832         intel_iommu_enabled = 1;
4833
4834         return 0;
4835
4836 out_free_reserved_range:
4837         put_iova_domain(&reserved_iova_list);
4838 out_free_dmar:
4839         intel_iommu_free_dmars();
4840         up_write(&dmar_global_lock);
4841         iommu_exit_mempool();
4842         return ret;
4843 }
4844
4845 static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4846 {
4847         struct intel_iommu *iommu = opaque;
4848
4849         domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4850         return 0;
4851 }
4852
4853 /*
4854  * NB - intel-iommu lacks any sort of reference counting for the users of
4855  * dependent devices.  If multiple endpoints have intersecting dependent
4856  * devices, unbinding the driver from any one of them will possibly leave
4857  * the others unable to operate.
4858  */
4859 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
4860 {
4861         if (!iommu || !dev || !dev_is_pci(dev))
4862                 return;
4863
4864         pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
4865 }
4866
4867 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4868 {
4869         struct intel_iommu *iommu;
4870         unsigned long flags;
4871
4872         assert_spin_locked(&device_domain_lock);
4873
4874         if (WARN_ON(!info))
4875                 return;
4876
4877         iommu = info->iommu;
4878
4879         if (info->dev) {
4880                 iommu_disable_dev_iotlb(info);
4881                 domain_context_clear(iommu, info->dev);
4882         }
4883
4884         unlink_domain_info(info);
4885
4886         spin_lock_irqsave(&iommu->lock, flags);
4887         domain_detach_iommu(info->domain, iommu);
4888         spin_unlock_irqrestore(&iommu->lock, flags);
4889
4890         free_devinfo_mem(info);
4891 }
4892
4893 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
4894                                      struct device *dev)
4895 {
4896         struct device_domain_info *info;
4897         unsigned long flags;
4898
4899         spin_lock_irqsave(&device_domain_lock, flags);
4900         info = dev->archdata.iommu;
4901         __dmar_remove_one_dev_info(info);
4902         spin_unlock_irqrestore(&device_domain_lock, flags);
4903 }
4904
4905 static int md_domain_init(struct dmar_domain *domain, int guest_width)
4906 {
4907         int adjust_width;
4908
4909         init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
4910         domain_reserve_special_ranges(domain);
4911
4912         /* calculate AGAW */
4913         domain->gaw = guest_width;
4914         adjust_width = guestwidth_to_adjustwidth(guest_width);
4915         domain->agaw = width_to_agaw(adjust_width);
4916
4917         domain->iommu_coherency = 0;
4918         domain->iommu_snooping = 0;
4919         domain->iommu_superpage = 0;
4920         domain->max_addr = 0;
4921
4922         /* always allocate the top pgd */
4923         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4924         if (!domain->pgd)
4925                 return -ENOMEM;
4926         domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4927         return 0;
4928 }
4929
4930 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
4931 {
4932         struct dmar_domain *dmar_domain;
4933         struct iommu_domain *domain;
4934
4935         if (type != IOMMU_DOMAIN_UNMANAGED)
4936                 return NULL;
4937
4938         dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
4939         if (!dmar_domain) {
4940                 pr_err("Can't allocate dmar_domain\n");
4941                 return NULL;
4942         }
4943         if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4944                 pr_err("Domain initialization failed\n");
4945                 domain_exit(dmar_domain);
4946                 return NULL;
4947         }
4948         domain_update_iommu_cap(dmar_domain);
4949
4950         domain = &dmar_domain->domain;
4951         domain->geometry.aperture_start = 0;
4952         domain->geometry.aperture_end   = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4953         domain->geometry.force_aperture = true;
4954
4955         return domain;
4956 }
4957
4958 static void intel_iommu_domain_free(struct iommu_domain *domain)
4959 {
4960         domain_exit(to_dmar_domain(domain));
4961 }
4962
4963 static int intel_iommu_attach_device(struct iommu_domain *domain,
4964                                      struct device *dev)
4965 {
4966         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4967         struct intel_iommu *iommu;
4968         int addr_width;
4969         u8 bus, devfn;
4970
4971         if (device_is_rmrr_locked(dev)) {
4972                 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement.  Contact your platform vendor.\n");
4973                 return -EPERM;
4974         }
4975
4976         /* normally dev is not mapped */
4977         if (unlikely(domain_context_mapped(dev))) {
4978                 struct dmar_domain *old_domain;
4979
4980                 old_domain = find_domain(dev);
4981                 if (old_domain) {
4982                         rcu_read_lock();
4983                         dmar_remove_one_dev_info(old_domain, dev);
4984                         rcu_read_unlock();
4985
4986                         if (!domain_type_is_vm_or_si(old_domain) &&
4987                              list_empty(&old_domain->devices))
4988                                 domain_exit(old_domain);
4989                 }
4990         }
4991
4992         iommu = device_to_iommu(dev, &bus, &devfn);
4993         if (!iommu)
4994                 return -ENODEV;
4995
4996         /* check if this iommu agaw is sufficient for max mapped address */
4997         addr_width = agaw_to_width(iommu->agaw);
4998         if (addr_width > cap_mgaw(iommu->cap))
4999                 addr_width = cap_mgaw(iommu->cap);
5000
5001         if (dmar_domain->max_addr > (1LL << addr_width)) {
5002                 pr_err("%s: iommu width (%d) is not "
5003                        "sufficient for the mapped address (%llx)\n",
5004                        __func__, addr_width, dmar_domain->max_addr);
5005                 return -EFAULT;
5006         }
5007         dmar_domain->gaw = addr_width;
5008
5009         /*
5010          * Knock out extra levels of page tables if necessary
5011          */
5012         while (iommu->agaw < dmar_domain->agaw) {
5013                 struct dma_pte *pte;
5014
5015                 pte = dmar_domain->pgd;
5016                 if (dma_pte_present(pte)) {
5017                         dmar_domain->pgd = (struct dma_pte *)
5018                                 phys_to_virt(dma_pte_addr(pte));
5019                         free_pgtable_page(pte);
5020                 }
5021                 dmar_domain->agaw--;
5022         }
5023
5024         return domain_add_dev_info(dmar_domain, dev);
5025 }
5026
5027 static void intel_iommu_detach_device(struct iommu_domain *domain,
5028                                       struct device *dev)
5029 {
5030         dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
5031 }
5032
5033 static int intel_iommu_map(struct iommu_domain *domain,
5034                            unsigned long iova, phys_addr_t hpa,
5035                            size_t size, int iommu_prot)
5036 {
5037         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5038         u64 max_addr;
5039         int prot = 0;
5040         int ret;
5041
5042         if (iommu_prot & IOMMU_READ)
5043                 prot |= DMA_PTE_READ;
5044         if (iommu_prot & IOMMU_WRITE)
5045                 prot |= DMA_PTE_WRITE;
5046         if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5047                 prot |= DMA_PTE_SNP;
5048
5049         max_addr = iova + size;
5050         if (dmar_domain->max_addr < max_addr) {
5051                 u64 end;
5052
5053                 /* check if minimum agaw is sufficient for mapped address */
5054                 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
5055                 if (end < max_addr) {
5056                         pr_err("%s: iommu width (%d) is not "
5057                                "sufficient for the mapped address (%llx)\n",
5058                                __func__, dmar_domain->gaw, max_addr);
5059                         return -EFAULT;
5060                 }
5061                 dmar_domain->max_addr = max_addr;
5062         }
5063         /* Round up size to next multiple of PAGE_SIZE, if it and
5064            the low bits of hpa would take us onto the next page */
5065         size = aligned_nrpages(hpa, size);
5066         ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5067                                  hpa >> VTD_PAGE_SHIFT, size, prot);
5068         return ret;
5069 }
5070
5071 static size_t intel_iommu_unmap(struct iommu_domain *domain,
5072                                 unsigned long iova, size_t size)
5073 {
5074         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5075         struct page *freelist = NULL;
5076         struct intel_iommu *iommu;
5077         unsigned long start_pfn, last_pfn;
5078         unsigned int npages;
5079         int iommu_id, level = 0;
5080
5081         /* Cope with horrid API which requires us to unmap more than the
5082            size argument if it happens to be a large-page mapping. */
5083         BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
5084
5085         if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5086                 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5087
5088         start_pfn = iova >> VTD_PAGE_SHIFT;
5089         last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5090
5091         freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
5092
5093         npages = last_pfn - start_pfn + 1;
5094
5095         for_each_domain_iommu(iommu_id, dmar_domain) {
5096                 iommu = g_iommus[iommu_id];
5097
5098                 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5099                                       start_pfn, npages, !freelist, 0);
5100         }
5101
5102         dma_free_pagelist(freelist);
5103
5104         if (dmar_domain->max_addr == iova + size)
5105                 dmar_domain->max_addr = iova;
5106
5107         return size;
5108 }
5109
5110 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
5111                                             dma_addr_t iova)
5112 {
5113         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5114         struct dma_pte *pte;
5115         int level = 0;
5116         u64 phys = 0;
5117
5118         pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
5119         if (pte)
5120                 phys = dma_pte_addr(pte);
5121
5122         return phys;
5123 }
5124
5125 static bool intel_iommu_capable(enum iommu_cap cap)
5126 {
5127         if (cap == IOMMU_CAP_CACHE_COHERENCY)
5128                 return domain_update_iommu_snooping(NULL) == 1;
5129         if (cap == IOMMU_CAP_INTR_REMAP)
5130                 return irq_remapping_enabled == 1;
5131
5132         return false;
5133 }
5134
5135 static int intel_iommu_add_device(struct device *dev)
5136 {
5137         struct intel_iommu *iommu;
5138         struct iommu_group *group;
5139         u8 bus, devfn;
5140
5141         iommu = device_to_iommu(dev, &bus, &devfn);
5142         if (!iommu)
5143                 return -ENODEV;
5144
5145         iommu_device_link(&iommu->iommu, dev);
5146
5147         group = iommu_group_get_for_dev(dev);
5148
5149         if (IS_ERR(group))
5150                 return PTR_ERR(group);
5151
5152         iommu_group_put(group);
5153         return 0;
5154 }
5155
5156 static void intel_iommu_remove_device(struct device *dev)
5157 {
5158         struct intel_iommu *iommu;
5159         u8 bus, devfn;
5160
5161         iommu = device_to_iommu(dev, &bus, &devfn);
5162         if (!iommu)
5163                 return;
5164
5165         iommu_group_remove_device(dev);
5166
5167         iommu_device_unlink(&iommu->iommu, dev);
5168 }
5169
5170 static void intel_iommu_get_resv_regions(struct device *device,
5171                                          struct list_head *head)
5172 {
5173         struct iommu_resv_region *reg;
5174         struct dmar_rmrr_unit *rmrr;
5175         struct device *i_dev;
5176         int i;
5177
5178         rcu_read_lock();
5179         for_each_rmrr_units(rmrr) {
5180                 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5181                                           i, i_dev) {
5182                         if (i_dev != device)
5183                                 continue;
5184
5185                         list_add_tail(&rmrr->resv->list, head);
5186                 }
5187         }
5188         rcu_read_unlock();
5189
5190         reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5191                                       IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
5192                                       0, IOMMU_RESV_MSI);
5193         if (!reg)
5194                 return;
5195         list_add_tail(&reg->list, head);
5196 }
5197
5198 static void intel_iommu_put_resv_regions(struct device *dev,
5199                                          struct list_head *head)
5200 {
5201         struct iommu_resv_region *entry, *next;
5202
5203         list_for_each_entry_safe(entry, next, head, list) {
5204                 if (entry->type == IOMMU_RESV_RESERVED)
5205                         kfree(entry);
5206         }
5207 }
5208
5209 #ifdef CONFIG_INTEL_IOMMU_SVM
5210 #define MAX_NR_PASID_BITS (20)
5211 static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
5212 {
5213         /*
5214          * Convert ecap_pss to extend context entry pts encoding, also
5215          * respect the soft pasid_max value set by the iommu.
5216          * - number of PASID bits = ecap_pss + 1
5217          * - number of PASID table entries = 2^(pts + 5)
5218          * Therefore, pts = ecap_pss - 4
5219          * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15
5220          */
5221         if (ecap_pss(iommu->ecap) < 5)
5222                 return 0;
5223
5224         /* pasid_max is encoded as actual number of entries not the bits */
5225         return find_first_bit((unsigned long *)&iommu->pasid_max,
5226                         MAX_NR_PASID_BITS) - 5;
5227 }
5228
5229 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
5230 {
5231         struct device_domain_info *info;
5232         struct context_entry *context;
5233         struct dmar_domain *domain;
5234         unsigned long flags;
5235         u64 ctx_lo;
5236         int ret;
5237
5238         domain = get_valid_domain_for_dev(sdev->dev);
5239         if (!domain)
5240                 return -EINVAL;
5241
5242         spin_lock_irqsave(&device_domain_lock, flags);
5243         spin_lock(&iommu->lock);
5244
5245         ret = -EINVAL;
5246         info = sdev->dev->archdata.iommu;
5247         if (!info || !info->pasid_supported)
5248                 goto out;
5249
5250         context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5251         if (WARN_ON(!context))
5252                 goto out;
5253
5254         ctx_lo = context[0].lo;
5255
5256         sdev->did = domain->iommu_did[iommu->seq_id];
5257         sdev->sid = PCI_DEVID(info->bus, info->devfn);
5258
5259         if (!(ctx_lo & CONTEXT_PASIDE)) {
5260                 if (iommu->pasid_state_table)
5261                         context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
5262                 context[1].lo = (u64)virt_to_phys(iommu->pasid_table) |
5263                         intel_iommu_get_pts(iommu);
5264
5265                 wmb();
5266                 /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
5267                  * extended to permit requests-with-PASID if the PASIDE bit
5268                  * is set. which makes sense. For CONTEXT_TT_PASS_THROUGH,
5269                  * however, the PASIDE bit is ignored and requests-with-PASID
5270                  * are unconditionally blocked. Which makes less sense.
5271                  * So convert from CONTEXT_TT_PASS_THROUGH to one of the new
5272                  * "guest mode" translation types depending on whether ATS
5273                  * is available or not. Annoyingly, we can't use the new
5274                  * modes *unless* PASIDE is set. */
5275                 if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) {
5276                         ctx_lo &= ~CONTEXT_TT_MASK;
5277                         if (info->ats_supported)
5278                                 ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2;
5279                         else
5280                                 ctx_lo |= CONTEXT_TT_PT_PASID << 2;
5281                 }
5282                 ctx_lo |= CONTEXT_PASIDE;
5283                 if (iommu->pasid_state_table)
5284                         ctx_lo |= CONTEXT_DINVE;
5285                 if (info->pri_supported)
5286                         ctx_lo |= CONTEXT_PRS;
5287                 context[0].lo = ctx_lo;
5288                 wmb();
5289                 iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
5290                                            DMA_CCMD_MASK_NOBIT,
5291                                            DMA_CCMD_DEVICE_INVL);
5292         }
5293
5294         /* Enable PASID support in the device, if it wasn't already */
5295         if (!info->pasid_enabled)
5296                 iommu_enable_dev_iotlb(info);
5297
5298         if (info->ats_enabled) {
5299                 sdev->dev_iotlb = 1;
5300                 sdev->qdep = info->ats_qdep;
5301                 if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
5302                         sdev->qdep = 0;
5303         }
5304         ret = 0;
5305
5306  out:
5307         spin_unlock(&iommu->lock);
5308         spin_unlock_irqrestore(&device_domain_lock, flags);
5309
5310         return ret;
5311 }
5312
5313 struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
5314 {
5315         struct intel_iommu *iommu;
5316         u8 bus, devfn;
5317
5318         if (iommu_dummy(dev)) {
5319                 dev_warn(dev,
5320                          "No IOMMU translation for device; cannot enable SVM\n");
5321                 return NULL;
5322         }
5323
5324         iommu = device_to_iommu(dev, &bus, &devfn);
5325         if ((!iommu)) {
5326                 dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
5327                 return NULL;
5328         }
5329
5330         if (!iommu->pasid_table) {
5331                 dev_err(dev, "PASID not enabled on IOMMU; cannot enable SVM\n");
5332                 return NULL;
5333         }
5334
5335         return iommu;
5336 }
5337 #endif /* CONFIG_INTEL_IOMMU_SVM */
5338
5339 const struct iommu_ops intel_iommu_ops = {
5340         .capable                = intel_iommu_capable,
5341         .domain_alloc           = intel_iommu_domain_alloc,
5342         .domain_free            = intel_iommu_domain_free,
5343         .attach_dev             = intel_iommu_attach_device,
5344         .detach_dev             = intel_iommu_detach_device,
5345         .map                    = intel_iommu_map,
5346         .unmap                  = intel_iommu_unmap,
5347         .map_sg                 = default_iommu_map_sg,
5348         .iova_to_phys           = intel_iommu_iova_to_phys,
5349         .add_device             = intel_iommu_add_device,
5350         .remove_device          = intel_iommu_remove_device,
5351         .get_resv_regions       = intel_iommu_get_resv_regions,
5352         .put_resv_regions       = intel_iommu_put_resv_regions,
5353         .device_group           = pci_device_group,
5354         .pgsize_bitmap          = INTEL_IOMMU_PGSIZES,
5355 };
5356
5357 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
5358 {
5359         /* G4x/GM45 integrated gfx dmar support is totally busted. */
5360         pr_info("Disabling IOMMU for graphics on this chipset\n");
5361         dmar_map_gfx = 0;
5362 }
5363
5364 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
5365 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
5366 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
5367 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
5368 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
5369 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
5370 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
5371
5372 static void quirk_iommu_rwbf(struct pci_dev *dev)
5373 {
5374         /*
5375          * Mobile 4 Series Chipset neglects to set RWBF capability,
5376          * but needs it. Same seems to hold for the desktop versions.
5377          */
5378         pr_info("Forcing write-buffer flush capability\n");
5379         rwbf_quirk = 1;
5380 }
5381
5382 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
5383 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5384 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5385 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5386 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5387 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5388 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
5389
5390 #define GGC 0x52
5391 #define GGC_MEMORY_SIZE_MASK    (0xf << 8)
5392 #define GGC_MEMORY_SIZE_NONE    (0x0 << 8)
5393 #define GGC_MEMORY_SIZE_1M      (0x1 << 8)
5394 #define GGC_MEMORY_SIZE_2M      (0x3 << 8)
5395 #define GGC_MEMORY_VT_ENABLED   (0x8 << 8)
5396 #define GGC_MEMORY_SIZE_2M_VT   (0x9 << 8)
5397 #define GGC_MEMORY_SIZE_3M_VT   (0xa << 8)
5398 #define GGC_MEMORY_SIZE_4M_VT   (0xb << 8)
5399
5400 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
5401 {
5402         unsigned short ggc;
5403
5404         if (pci_read_config_word(dev, GGC, &ggc))
5405                 return;
5406
5407         if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
5408                 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
5409                 dmar_map_gfx = 0;
5410         } else if (dmar_map_gfx) {
5411                 /* we have to ensure the gfx device is idle before we flush */
5412                 pr_info("Disabling batched IOTLB flush on Ironlake\n");
5413                 intel_iommu_strict = 1;
5414        }
5415 }
5416 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5417 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5418 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5419 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5420
5421 /* On Tylersburg chipsets, some BIOSes have been known to enable the
5422    ISOCH DMAR unit for the Azalia sound device, but not give it any
5423    TLB entries, which causes it to deadlock. Check for that.  We do
5424    this in a function called from init_dmars(), instead of in a PCI
5425    quirk, because we don't want to print the obnoxious "BIOS broken"
5426    message if VT-d is actually disabled.
5427 */
5428 static void __init check_tylersburg_isoch(void)
5429 {
5430         struct pci_dev *pdev;
5431         uint32_t vtisochctrl;
5432
5433         /* If there's no Azalia in the system anyway, forget it. */
5434         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5435         if (!pdev)
5436                 return;
5437         pci_dev_put(pdev);
5438
5439         /* System Management Registers. Might be hidden, in which case
5440            we can't do the sanity check. But that's OK, because the
5441            known-broken BIOSes _don't_ actually hide it, so far. */
5442         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5443         if (!pdev)
5444                 return;
5445
5446         if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5447                 pci_dev_put(pdev);
5448                 return;
5449         }
5450
5451         pci_dev_put(pdev);
5452
5453         /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5454         if (vtisochctrl & 1)
5455                 return;
5456
5457         /* Drop all bits other than the number of TLB entries */
5458         vtisochctrl &= 0x1c;
5459
5460         /* If we have the recommended number of TLB entries (16), fine. */
5461         if (vtisochctrl == 0x10)
5462                 return;
5463
5464         /* Zero TLB entries? You get to ride the short bus to school. */
5465         if (!vtisochctrl) {
5466                 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5467                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5468                      dmi_get_system_info(DMI_BIOS_VENDOR),
5469                      dmi_get_system_info(DMI_BIOS_VERSION),
5470                      dmi_get_system_info(DMI_PRODUCT_VERSION));
5471                 iommu_identity_mapping |= IDENTMAP_AZALIA;
5472                 return;
5473         }
5474
5475         pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
5476                vtisochctrl);
5477 }