Merge remote-tracking branches 'asoc/fix/rockchip', 'asoc/fix/rt5645', 'asoc/fix...
[sfrench/cifs-2.6.git] / drivers / iommu / intel-iommu.c
1 /*
2  * Copyright © 2006-2014 Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * Authors: David Woodhouse <dwmw2@infradead.org>,
14  *          Ashok Raj <ashok.raj@intel.com>,
15  *          Shaohua Li <shaohua.li@intel.com>,
16  *          Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17  *          Fenghua Yu <fenghua.yu@intel.com>
18  *          Joerg Roedel <jroedel@suse.de>
19  */
20
21 #define pr_fmt(fmt)     "DMAR: " fmt
22
23 #include <linux/init.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/export.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/memory.h>
36 #include <linux/cpu.h>
37 #include <linux/timer.h>
38 #include <linux/io.h>
39 #include <linux/iova.h>
40 #include <linux/iommu.h>
41 #include <linux/intel-iommu.h>
42 #include <linux/syscore_ops.h>
43 #include <linux/tboot.h>
44 #include <linux/dmi.h>
45 #include <linux/pci-ats.h>
46 #include <linux/memblock.h>
47 #include <linux/dma-contiguous.h>
48 #include <linux/crash_dump.h>
49 #include <asm/irq_remapping.h>
50 #include <asm/cacheflush.h>
51 #include <asm/iommu.h>
52
53 #include "irq_remapping.h"
54
55 #define ROOT_SIZE               VTD_PAGE_SIZE
56 #define CONTEXT_SIZE            VTD_PAGE_SIZE
57
58 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
59 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
60 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
61 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
62
63 #define IOAPIC_RANGE_START      (0xfee00000)
64 #define IOAPIC_RANGE_END        (0xfeefffff)
65 #define IOVA_START_ADDR         (0x1000)
66
67 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
68
69 #define MAX_AGAW_WIDTH 64
70 #define MAX_AGAW_PFN_WIDTH      (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
71
72 #define __DOMAIN_MAX_PFN(gaw)  ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
73 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
74
75 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
76    to match. That way, we can use 'unsigned long' for PFNs with impunity. */
77 #define DOMAIN_MAX_PFN(gaw)     ((unsigned long) min_t(uint64_t, \
78                                 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
79 #define DOMAIN_MAX_ADDR(gaw)    (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
80
81 /* IO virtual address start page frame number */
82 #define IOVA_START_PFN          (1)
83
84 #define IOVA_PFN(addr)          ((addr) >> PAGE_SHIFT)
85 #define DMA_32BIT_PFN           IOVA_PFN(DMA_BIT_MASK(32))
86 #define DMA_64BIT_PFN           IOVA_PFN(DMA_BIT_MASK(64))
87
88 /* page table handling */
89 #define LEVEL_STRIDE            (9)
90 #define LEVEL_MASK              (((u64)1 << LEVEL_STRIDE) - 1)
91
92 /*
93  * This bitmap is used to advertise the page sizes our hardware support
94  * to the IOMMU core, which will then use this information to split
95  * physically contiguous memory regions it is mapping into page sizes
96  * that we support.
97  *
98  * Traditionally the IOMMU core just handed us the mappings directly,
99  * after making sure the size is an order of a 4KiB page and that the
100  * mapping has natural alignment.
101  *
102  * To retain this behavior, we currently advertise that we support
103  * all page sizes that are an order of 4KiB.
104  *
105  * If at some point we'd like to utilize the IOMMU core's new behavior,
106  * we could change this to advertise the real page sizes we support.
107  */
108 #define INTEL_IOMMU_PGSIZES     (~0xFFFUL)
109
110 static inline int agaw_to_level(int agaw)
111 {
112         return agaw + 2;
113 }
114
115 static inline int agaw_to_width(int agaw)
116 {
117         return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
118 }
119
120 static inline int width_to_agaw(int width)
121 {
122         return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
123 }
124
125 static inline unsigned int level_to_offset_bits(int level)
126 {
127         return (level - 1) * LEVEL_STRIDE;
128 }
129
130 static inline int pfn_level_offset(unsigned long pfn, int level)
131 {
132         return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
133 }
134
135 static inline unsigned long level_mask(int level)
136 {
137         return -1UL << level_to_offset_bits(level);
138 }
139
140 static inline unsigned long level_size(int level)
141 {
142         return 1UL << level_to_offset_bits(level);
143 }
144
145 static inline unsigned long align_to_level(unsigned long pfn, int level)
146 {
147         return (pfn + level_size(level) - 1) & level_mask(level);
148 }
149
150 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
151 {
152         return  1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
153 }
154
155 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
156    are never going to work. */
157 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
158 {
159         return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
160 }
161
162 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
163 {
164         return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
165 }
166 static inline unsigned long page_to_dma_pfn(struct page *pg)
167 {
168         return mm_to_dma_pfn(page_to_pfn(pg));
169 }
170 static inline unsigned long virt_to_dma_pfn(void *p)
171 {
172         return page_to_dma_pfn(virt_to_page(p));
173 }
174
175 /* global iommu list, set NULL for ignored DMAR units */
176 static struct intel_iommu **g_iommus;
177
178 static void __init check_tylersburg_isoch(void);
179 static int rwbf_quirk;
180
181 /*
182  * set to 1 to panic kernel if can't successfully enable VT-d
183  * (used when kernel is launched w/ TXT)
184  */
185 static int force_on = 0;
186 int intel_iommu_tboot_noforce;
187
188 /*
189  * 0: Present
190  * 1-11: Reserved
191  * 12-63: Context Ptr (12 - (haw-1))
192  * 64-127: Reserved
193  */
194 struct root_entry {
195         u64     lo;
196         u64     hi;
197 };
198 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
199
200 /*
201  * Take a root_entry and return the Lower Context Table Pointer (LCTP)
202  * if marked present.
203  */
204 static phys_addr_t root_entry_lctp(struct root_entry *re)
205 {
206         if (!(re->lo & 1))
207                 return 0;
208
209         return re->lo & VTD_PAGE_MASK;
210 }
211
212 /*
213  * Take a root_entry and return the Upper Context Table Pointer (UCTP)
214  * if marked present.
215  */
216 static phys_addr_t root_entry_uctp(struct root_entry *re)
217 {
218         if (!(re->hi & 1))
219                 return 0;
220
221         return re->hi & VTD_PAGE_MASK;
222 }
223 /*
224  * low 64 bits:
225  * 0: present
226  * 1: fault processing disable
227  * 2-3: translation type
228  * 12-63: address space root
229  * high 64 bits:
230  * 0-2: address width
231  * 3-6: aval
232  * 8-23: domain id
233  */
234 struct context_entry {
235         u64 lo;
236         u64 hi;
237 };
238
239 static inline void context_clear_pasid_enable(struct context_entry *context)
240 {
241         context->lo &= ~(1ULL << 11);
242 }
243
244 static inline bool context_pasid_enabled(struct context_entry *context)
245 {
246         return !!(context->lo & (1ULL << 11));
247 }
248
249 static inline void context_set_copied(struct context_entry *context)
250 {
251         context->hi |= (1ull << 3);
252 }
253
254 static inline bool context_copied(struct context_entry *context)
255 {
256         return !!(context->hi & (1ULL << 3));
257 }
258
259 static inline bool __context_present(struct context_entry *context)
260 {
261         return (context->lo & 1);
262 }
263
264 static inline bool context_present(struct context_entry *context)
265 {
266         return context_pasid_enabled(context) ?
267              __context_present(context) :
268              __context_present(context) && !context_copied(context);
269 }
270
271 static inline void context_set_present(struct context_entry *context)
272 {
273         context->lo |= 1;
274 }
275
276 static inline void context_set_fault_enable(struct context_entry *context)
277 {
278         context->lo &= (((u64)-1) << 2) | 1;
279 }
280
281 static inline void context_set_translation_type(struct context_entry *context,
282                                                 unsigned long value)
283 {
284         context->lo &= (((u64)-1) << 4) | 3;
285         context->lo |= (value & 3) << 2;
286 }
287
288 static inline void context_set_address_root(struct context_entry *context,
289                                             unsigned long value)
290 {
291         context->lo &= ~VTD_PAGE_MASK;
292         context->lo |= value & VTD_PAGE_MASK;
293 }
294
295 static inline void context_set_address_width(struct context_entry *context,
296                                              unsigned long value)
297 {
298         context->hi |= value & 7;
299 }
300
301 static inline void context_set_domain_id(struct context_entry *context,
302                                          unsigned long value)
303 {
304         context->hi |= (value & ((1 << 16) - 1)) << 8;
305 }
306
307 static inline int context_domain_id(struct context_entry *c)
308 {
309         return((c->hi >> 8) & 0xffff);
310 }
311
312 static inline void context_clear_entry(struct context_entry *context)
313 {
314         context->lo = 0;
315         context->hi = 0;
316 }
317
318 /*
319  * 0: readable
320  * 1: writable
321  * 2-6: reserved
322  * 7: super page
323  * 8-10: available
324  * 11: snoop behavior
325  * 12-63: Host physcial address
326  */
327 struct dma_pte {
328         u64 val;
329 };
330
331 static inline void dma_clear_pte(struct dma_pte *pte)
332 {
333         pte->val = 0;
334 }
335
336 static inline u64 dma_pte_addr(struct dma_pte *pte)
337 {
338 #ifdef CONFIG_64BIT
339         return pte->val & VTD_PAGE_MASK;
340 #else
341         /* Must have a full atomic 64-bit read */
342         return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
343 #endif
344 }
345
346 static inline bool dma_pte_present(struct dma_pte *pte)
347 {
348         return (pte->val & 3) != 0;
349 }
350
351 static inline bool dma_pte_superpage(struct dma_pte *pte)
352 {
353         return (pte->val & DMA_PTE_LARGE_PAGE);
354 }
355
356 static inline int first_pte_in_page(struct dma_pte *pte)
357 {
358         return !((unsigned long)pte & ~VTD_PAGE_MASK);
359 }
360
361 /*
362  * This domain is a statically identity mapping domain.
363  *      1. This domain creats a static 1:1 mapping to all usable memory.
364  *      2. It maps to each iommu if successful.
365  *      3. Each iommu mapps to this domain if successful.
366  */
367 static struct dmar_domain *si_domain;
368 static int hw_pass_through = 1;
369
370 /*
371  * Domain represents a virtual machine, more than one devices
372  * across iommus may be owned in one domain, e.g. kvm guest.
373  */
374 #define DOMAIN_FLAG_VIRTUAL_MACHINE     (1 << 0)
375
376 /* si_domain contains mulitple devices */
377 #define DOMAIN_FLAG_STATIC_IDENTITY     (1 << 1)
378
379 #define for_each_domain_iommu(idx, domain)                      \
380         for (idx = 0; idx < g_num_of_iommus; idx++)             \
381                 if (domain->iommu_refcnt[idx])
382
383 struct dmar_domain {
384         int     nid;                    /* node id */
385
386         unsigned        iommu_refcnt[DMAR_UNITS_SUPPORTED];
387                                         /* Refcount of devices per iommu */
388
389
390         u16             iommu_did[DMAR_UNITS_SUPPORTED];
391                                         /* Domain ids per IOMMU. Use u16 since
392                                          * domain ids are 16 bit wide according
393                                          * to VT-d spec, section 9.3 */
394
395         bool has_iotlb_device;
396         struct list_head devices;       /* all devices' list */
397         struct iova_domain iovad;       /* iova's that belong to this domain */
398
399         struct dma_pte  *pgd;           /* virtual address */
400         int             gaw;            /* max guest address width */
401
402         /* adjusted guest address width, 0 is level 2 30-bit */
403         int             agaw;
404
405         int             flags;          /* flags to find out type of domain */
406
407         int             iommu_coherency;/* indicate coherency of iommu access */
408         int             iommu_snooping; /* indicate snooping control feature*/
409         int             iommu_count;    /* reference count of iommu */
410         int             iommu_superpage;/* Level of superpages supported:
411                                            0 == 4KiB (no superpages), 1 == 2MiB,
412                                            2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
413         u64             max_addr;       /* maximum mapped address */
414
415         struct iommu_domain domain;     /* generic domain data structure for
416                                            iommu core */
417 };
418
419 /* PCI domain-device relationship */
420 struct device_domain_info {
421         struct list_head link;  /* link to domain siblings */
422         struct list_head global; /* link to global list */
423         u8 bus;                 /* PCI bus number */
424         u8 devfn;               /* PCI devfn number */
425         u8 pasid_supported:3;
426         u8 pasid_enabled:1;
427         u8 pri_supported:1;
428         u8 pri_enabled:1;
429         u8 ats_supported:1;
430         u8 ats_enabled:1;
431         u8 ats_qdep;
432         struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
433         struct intel_iommu *iommu; /* IOMMU used by this device */
434         struct dmar_domain *domain; /* pointer to domain */
435 };
436
437 struct dmar_rmrr_unit {
438         struct list_head list;          /* list of rmrr units   */
439         struct acpi_dmar_header *hdr;   /* ACPI header          */
440         u64     base_address;           /* reserved base address*/
441         u64     end_address;            /* reserved end address */
442         struct dmar_dev_scope *devices; /* target devices */
443         int     devices_cnt;            /* target device count */
444         struct iommu_resv_region *resv; /* reserved region handle */
445 };
446
447 struct dmar_atsr_unit {
448         struct list_head list;          /* list of ATSR units */
449         struct acpi_dmar_header *hdr;   /* ACPI header */
450         struct dmar_dev_scope *devices; /* target devices */
451         int devices_cnt;                /* target device count */
452         u8 include_all:1;               /* include all ports */
453 };
454
455 static LIST_HEAD(dmar_atsr_units);
456 static LIST_HEAD(dmar_rmrr_units);
457
458 #define for_each_rmrr_units(rmrr) \
459         list_for_each_entry(rmrr, &dmar_rmrr_units, list)
460
461 static void flush_unmaps_timeout(unsigned long data);
462
463 struct deferred_flush_entry {
464         unsigned long iova_pfn;
465         unsigned long nrpages;
466         struct dmar_domain *domain;
467         struct page *freelist;
468 };
469
470 #define HIGH_WATER_MARK 250
471 struct deferred_flush_table {
472         int next;
473         struct deferred_flush_entry entries[HIGH_WATER_MARK];
474 };
475
476 struct deferred_flush_data {
477         spinlock_t lock;
478         int timer_on;
479         struct timer_list timer;
480         long size;
481         struct deferred_flush_table *tables;
482 };
483
484 DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
485
486 /* bitmap for indexing intel_iommus */
487 static int g_num_of_iommus;
488
489 static void domain_exit(struct dmar_domain *domain);
490 static void domain_remove_dev_info(struct dmar_domain *domain);
491 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
492                                      struct device *dev);
493 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
494 static void domain_context_clear(struct intel_iommu *iommu,
495                                  struct device *dev);
496 static int domain_detach_iommu(struct dmar_domain *domain,
497                                struct intel_iommu *iommu);
498
499 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
500 int dmar_disabled = 0;
501 #else
502 int dmar_disabled = 1;
503 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
504
505 int intel_iommu_enabled = 0;
506 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
507
508 static int dmar_map_gfx = 1;
509 static int dmar_forcedac;
510 static int intel_iommu_strict;
511 static int intel_iommu_superpage = 1;
512 static int intel_iommu_ecs = 1;
513 static int intel_iommu_pasid28;
514 static int iommu_identity_mapping;
515
516 #define IDENTMAP_ALL            1
517 #define IDENTMAP_GFX            2
518 #define IDENTMAP_AZALIA         4
519
520 /* Broadwell and Skylake have broken ECS support — normal so-called "second
521  * level" translation of DMA requests-without-PASID doesn't actually happen
522  * unless you also set the NESTE bit in an extended context-entry. Which of
523  * course means that SVM doesn't work because it's trying to do nested
524  * translation of the physical addresses it finds in the process page tables,
525  * through the IOVA->phys mapping found in the "second level" page tables.
526  *
527  * The VT-d specification was retroactively changed to change the definition
528  * of the capability bits and pretend that Broadwell/Skylake never happened...
529  * but unfortunately the wrong bit was changed. It's ECS which is broken, but
530  * for some reason it was the PASID capability bit which was redefined (from
531  * bit 28 on BDW/SKL to bit 40 in future).
532  *
533  * So our test for ECS needs to eschew those implementations which set the old
534  * PASID capabiity bit 28, since those are the ones on which ECS is broken.
535  * Unless we are working around the 'pasid28' limitations, that is, by putting
536  * the device into passthrough mode for normal DMA and thus masking the bug.
537  */
538 #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
539                             (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
540 /* PASID support is thus enabled if ECS is enabled and *either* of the old
541  * or new capability bits are set. */
542 #define pasid_enabled(iommu) (ecs_enabled(iommu) &&                     \
543                               (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
544
545 int intel_iommu_gfx_mapped;
546 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
547
548 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
549 static DEFINE_SPINLOCK(device_domain_lock);
550 static LIST_HEAD(device_domain_list);
551
552 const struct iommu_ops intel_iommu_ops;
553
554 static bool translation_pre_enabled(struct intel_iommu *iommu)
555 {
556         return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
557 }
558
559 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
560 {
561         iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
562 }
563
564 static void init_translation_status(struct intel_iommu *iommu)
565 {
566         u32 gsts;
567
568         gsts = readl(iommu->reg + DMAR_GSTS_REG);
569         if (gsts & DMA_GSTS_TES)
570                 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
571 }
572
573 /* Convert generic 'struct iommu_domain to private struct dmar_domain */
574 static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
575 {
576         return container_of(dom, struct dmar_domain, domain);
577 }
578
579 static int __init intel_iommu_setup(char *str)
580 {
581         if (!str)
582                 return -EINVAL;
583         while (*str) {
584                 if (!strncmp(str, "on", 2)) {
585                         dmar_disabled = 0;
586                         pr_info("IOMMU enabled\n");
587                 } else if (!strncmp(str, "off", 3)) {
588                         dmar_disabled = 1;
589                         pr_info("IOMMU disabled\n");
590                 } else if (!strncmp(str, "igfx_off", 8)) {
591                         dmar_map_gfx = 0;
592                         pr_info("Disable GFX device mapping\n");
593                 } else if (!strncmp(str, "forcedac", 8)) {
594                         pr_info("Forcing DAC for PCI devices\n");
595                         dmar_forcedac = 1;
596                 } else if (!strncmp(str, "strict", 6)) {
597                         pr_info("Disable batched IOTLB flush\n");
598                         intel_iommu_strict = 1;
599                 } else if (!strncmp(str, "sp_off", 6)) {
600                         pr_info("Disable supported super page\n");
601                         intel_iommu_superpage = 0;
602                 } else if (!strncmp(str, "ecs_off", 7)) {
603                         printk(KERN_INFO
604                                 "Intel-IOMMU: disable extended context table support\n");
605                         intel_iommu_ecs = 0;
606                 } else if (!strncmp(str, "pasid28", 7)) {
607                         printk(KERN_INFO
608                                 "Intel-IOMMU: enable pre-production PASID support\n");
609                         intel_iommu_pasid28 = 1;
610                         iommu_identity_mapping |= IDENTMAP_GFX;
611                 } else if (!strncmp(str, "tboot_noforce", 13)) {
612                         printk(KERN_INFO
613                                 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
614                         intel_iommu_tboot_noforce = 1;
615                 }
616
617                 str += strcspn(str, ",");
618                 while (*str == ',')
619                         str++;
620         }
621         return 0;
622 }
623 __setup("intel_iommu=", intel_iommu_setup);
624
625 static struct kmem_cache *iommu_domain_cache;
626 static struct kmem_cache *iommu_devinfo_cache;
627
628 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
629 {
630         struct dmar_domain **domains;
631         int idx = did >> 8;
632
633         domains = iommu->domains[idx];
634         if (!domains)
635                 return NULL;
636
637         return domains[did & 0xff];
638 }
639
640 static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
641                              struct dmar_domain *domain)
642 {
643         struct dmar_domain **domains;
644         int idx = did >> 8;
645
646         if (!iommu->domains[idx]) {
647                 size_t size = 256 * sizeof(struct dmar_domain *);
648                 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
649         }
650
651         domains = iommu->domains[idx];
652         if (WARN_ON(!domains))
653                 return;
654         else
655                 domains[did & 0xff] = domain;
656 }
657
658 static inline void *alloc_pgtable_page(int node)
659 {
660         struct page *page;
661         void *vaddr = NULL;
662
663         page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
664         if (page)
665                 vaddr = page_address(page);
666         return vaddr;
667 }
668
669 static inline void free_pgtable_page(void *vaddr)
670 {
671         free_page((unsigned long)vaddr);
672 }
673
674 static inline void *alloc_domain_mem(void)
675 {
676         return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
677 }
678
679 static void free_domain_mem(void *vaddr)
680 {
681         kmem_cache_free(iommu_domain_cache, vaddr);
682 }
683
684 static inline void * alloc_devinfo_mem(void)
685 {
686         return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
687 }
688
689 static inline void free_devinfo_mem(void *vaddr)
690 {
691         kmem_cache_free(iommu_devinfo_cache, vaddr);
692 }
693
694 static inline int domain_type_is_vm(struct dmar_domain *domain)
695 {
696         return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
697 }
698
699 static inline int domain_type_is_si(struct dmar_domain *domain)
700 {
701         return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
702 }
703
704 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
705 {
706         return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
707                                 DOMAIN_FLAG_STATIC_IDENTITY);
708 }
709
710 static inline int domain_pfn_supported(struct dmar_domain *domain,
711                                        unsigned long pfn)
712 {
713         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
714
715         return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
716 }
717
718 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
719 {
720         unsigned long sagaw;
721         int agaw = -1;
722
723         sagaw = cap_sagaw(iommu->cap);
724         for (agaw = width_to_agaw(max_gaw);
725              agaw >= 0; agaw--) {
726                 if (test_bit(agaw, &sagaw))
727                         break;
728         }
729
730         return agaw;
731 }
732
733 /*
734  * Calculate max SAGAW for each iommu.
735  */
736 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
737 {
738         return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
739 }
740
741 /*
742  * calculate agaw for each iommu.
743  * "SAGAW" may be different across iommus, use a default agaw, and
744  * get a supported less agaw for iommus that don't support the default agaw.
745  */
746 int iommu_calculate_agaw(struct intel_iommu *iommu)
747 {
748         return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
749 }
750
751 /* This functionin only returns single iommu in a domain */
752 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
753 {
754         int iommu_id;
755
756         /* si_domain and vm domain should not get here. */
757         BUG_ON(domain_type_is_vm_or_si(domain));
758         for_each_domain_iommu(iommu_id, domain)
759                 break;
760
761         if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
762                 return NULL;
763
764         return g_iommus[iommu_id];
765 }
766
767 static void domain_update_iommu_coherency(struct dmar_domain *domain)
768 {
769         struct dmar_drhd_unit *drhd;
770         struct intel_iommu *iommu;
771         bool found = false;
772         int i;
773
774         domain->iommu_coherency = 1;
775
776         for_each_domain_iommu(i, domain) {
777                 found = true;
778                 if (!ecap_coherent(g_iommus[i]->ecap)) {
779                         domain->iommu_coherency = 0;
780                         break;
781                 }
782         }
783         if (found)
784                 return;
785
786         /* No hardware attached; use lowest common denominator */
787         rcu_read_lock();
788         for_each_active_iommu(iommu, drhd) {
789                 if (!ecap_coherent(iommu->ecap)) {
790                         domain->iommu_coherency = 0;
791                         break;
792                 }
793         }
794         rcu_read_unlock();
795 }
796
797 static int domain_update_iommu_snooping(struct intel_iommu *skip)
798 {
799         struct dmar_drhd_unit *drhd;
800         struct intel_iommu *iommu;
801         int ret = 1;
802
803         rcu_read_lock();
804         for_each_active_iommu(iommu, drhd) {
805                 if (iommu != skip) {
806                         if (!ecap_sc_support(iommu->ecap)) {
807                                 ret = 0;
808                                 break;
809                         }
810                 }
811         }
812         rcu_read_unlock();
813
814         return ret;
815 }
816
817 static int domain_update_iommu_superpage(struct intel_iommu *skip)
818 {
819         struct dmar_drhd_unit *drhd;
820         struct intel_iommu *iommu;
821         int mask = 0xf;
822
823         if (!intel_iommu_superpage) {
824                 return 0;
825         }
826
827         /* set iommu_superpage to the smallest common denominator */
828         rcu_read_lock();
829         for_each_active_iommu(iommu, drhd) {
830                 if (iommu != skip) {
831                         mask &= cap_super_page_val(iommu->cap);
832                         if (!mask)
833                                 break;
834                 }
835         }
836         rcu_read_unlock();
837
838         return fls(mask);
839 }
840
841 /* Some capabilities may be different across iommus */
842 static void domain_update_iommu_cap(struct dmar_domain *domain)
843 {
844         domain_update_iommu_coherency(domain);
845         domain->iommu_snooping = domain_update_iommu_snooping(NULL);
846         domain->iommu_superpage = domain_update_iommu_superpage(NULL);
847 }
848
849 static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
850                                                        u8 bus, u8 devfn, int alloc)
851 {
852         struct root_entry *root = &iommu->root_entry[bus];
853         struct context_entry *context;
854         u64 *entry;
855
856         entry = &root->lo;
857         if (ecs_enabled(iommu)) {
858                 if (devfn >= 0x80) {
859                         devfn -= 0x80;
860                         entry = &root->hi;
861                 }
862                 devfn *= 2;
863         }
864         if (*entry & 1)
865                 context = phys_to_virt(*entry & VTD_PAGE_MASK);
866         else {
867                 unsigned long phy_addr;
868                 if (!alloc)
869                         return NULL;
870
871                 context = alloc_pgtable_page(iommu->node);
872                 if (!context)
873                         return NULL;
874
875                 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
876                 phy_addr = virt_to_phys((void *)context);
877                 *entry = phy_addr | 1;
878                 __iommu_flush_cache(iommu, entry, sizeof(*entry));
879         }
880         return &context[devfn];
881 }
882
883 static int iommu_dummy(struct device *dev)
884 {
885         return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
886 }
887
888 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
889 {
890         struct dmar_drhd_unit *drhd = NULL;
891         struct intel_iommu *iommu;
892         struct device *tmp;
893         struct pci_dev *ptmp, *pdev = NULL;
894         u16 segment = 0;
895         int i;
896
897         if (iommu_dummy(dev))
898                 return NULL;
899
900         if (dev_is_pci(dev)) {
901                 struct pci_dev *pf_pdev;
902
903                 pdev = to_pci_dev(dev);
904                 /* VFs aren't listed in scope tables; we need to look up
905                  * the PF instead to find the IOMMU. */
906                 pf_pdev = pci_physfn(pdev);
907                 dev = &pf_pdev->dev;
908                 segment = pci_domain_nr(pdev->bus);
909         } else if (has_acpi_companion(dev))
910                 dev = &ACPI_COMPANION(dev)->dev;
911
912         rcu_read_lock();
913         for_each_active_iommu(iommu, drhd) {
914                 if (pdev && segment != drhd->segment)
915                         continue;
916
917                 for_each_active_dev_scope(drhd->devices,
918                                           drhd->devices_cnt, i, tmp) {
919                         if (tmp == dev) {
920                                 /* For a VF use its original BDF# not that of the PF
921                                  * which we used for the IOMMU lookup. Strictly speaking
922                                  * we could do this for all PCI devices; we only need to
923                                  * get the BDF# from the scope table for ACPI matches. */
924                                 if (pdev && pdev->is_virtfn)
925                                         goto got_pdev;
926
927                                 *bus = drhd->devices[i].bus;
928                                 *devfn = drhd->devices[i].devfn;
929                                 goto out;
930                         }
931
932                         if (!pdev || !dev_is_pci(tmp))
933                                 continue;
934
935                         ptmp = to_pci_dev(tmp);
936                         if (ptmp->subordinate &&
937                             ptmp->subordinate->number <= pdev->bus->number &&
938                             ptmp->subordinate->busn_res.end >= pdev->bus->number)
939                                 goto got_pdev;
940                 }
941
942                 if (pdev && drhd->include_all) {
943                 got_pdev:
944                         *bus = pdev->bus->number;
945                         *devfn = pdev->devfn;
946                         goto out;
947                 }
948         }
949         iommu = NULL;
950  out:
951         rcu_read_unlock();
952
953         return iommu;
954 }
955
956 static void domain_flush_cache(struct dmar_domain *domain,
957                                void *addr, int size)
958 {
959         if (!domain->iommu_coherency)
960                 clflush_cache_range(addr, size);
961 }
962
963 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
964 {
965         struct context_entry *context;
966         int ret = 0;
967         unsigned long flags;
968
969         spin_lock_irqsave(&iommu->lock, flags);
970         context = iommu_context_addr(iommu, bus, devfn, 0);
971         if (context)
972                 ret = context_present(context);
973         spin_unlock_irqrestore(&iommu->lock, flags);
974         return ret;
975 }
976
977 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
978 {
979         struct context_entry *context;
980         unsigned long flags;
981
982         spin_lock_irqsave(&iommu->lock, flags);
983         context = iommu_context_addr(iommu, bus, devfn, 0);
984         if (context) {
985                 context_clear_entry(context);
986                 __iommu_flush_cache(iommu, context, sizeof(*context));
987         }
988         spin_unlock_irqrestore(&iommu->lock, flags);
989 }
990
991 static void free_context_table(struct intel_iommu *iommu)
992 {
993         int i;
994         unsigned long flags;
995         struct context_entry *context;
996
997         spin_lock_irqsave(&iommu->lock, flags);
998         if (!iommu->root_entry) {
999                 goto out;
1000         }
1001         for (i = 0; i < ROOT_ENTRY_NR; i++) {
1002                 context = iommu_context_addr(iommu, i, 0, 0);
1003                 if (context)
1004                         free_pgtable_page(context);
1005
1006                 if (!ecs_enabled(iommu))
1007                         continue;
1008
1009                 context = iommu_context_addr(iommu, i, 0x80, 0);
1010                 if (context)
1011                         free_pgtable_page(context);
1012
1013         }
1014         free_pgtable_page(iommu->root_entry);
1015         iommu->root_entry = NULL;
1016 out:
1017         spin_unlock_irqrestore(&iommu->lock, flags);
1018 }
1019
1020 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
1021                                       unsigned long pfn, int *target_level)
1022 {
1023         struct dma_pte *parent, *pte = NULL;
1024         int level = agaw_to_level(domain->agaw);
1025         int offset;
1026
1027         BUG_ON(!domain->pgd);
1028
1029         if (!domain_pfn_supported(domain, pfn))
1030                 /* Address beyond IOMMU's addressing capabilities. */
1031                 return NULL;
1032
1033         parent = domain->pgd;
1034
1035         while (1) {
1036                 void *tmp_page;
1037
1038                 offset = pfn_level_offset(pfn, level);
1039                 pte = &parent[offset];
1040                 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
1041                         break;
1042                 if (level == *target_level)
1043                         break;
1044
1045                 if (!dma_pte_present(pte)) {
1046                         uint64_t pteval;
1047
1048                         tmp_page = alloc_pgtable_page(domain->nid);
1049
1050                         if (!tmp_page)
1051                                 return NULL;
1052
1053                         domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
1054                         pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
1055                         if (cmpxchg64(&pte->val, 0ULL, pteval))
1056                                 /* Someone else set it while we were thinking; use theirs. */
1057                                 free_pgtable_page(tmp_page);
1058                         else
1059                                 domain_flush_cache(domain, pte, sizeof(*pte));
1060                 }
1061                 if (level == 1)
1062                         break;
1063
1064                 parent = phys_to_virt(dma_pte_addr(pte));
1065                 level--;
1066         }
1067
1068         if (!*target_level)
1069                 *target_level = level;
1070
1071         return pte;
1072 }
1073
1074
1075 /* return address's pte at specific level */
1076 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1077                                          unsigned long pfn,
1078                                          int level, int *large_page)
1079 {
1080         struct dma_pte *parent, *pte = NULL;
1081         int total = agaw_to_level(domain->agaw);
1082         int offset;
1083
1084         parent = domain->pgd;
1085         while (level <= total) {
1086                 offset = pfn_level_offset(pfn, total);
1087                 pte = &parent[offset];
1088                 if (level == total)
1089                         return pte;
1090
1091                 if (!dma_pte_present(pte)) {
1092                         *large_page = total;
1093                         break;
1094                 }
1095
1096                 if (dma_pte_superpage(pte)) {
1097                         *large_page = total;
1098                         return pte;
1099                 }
1100
1101                 parent = phys_to_virt(dma_pte_addr(pte));
1102                 total--;
1103         }
1104         return NULL;
1105 }
1106
1107 /* clear last level pte, a tlb flush should be followed */
1108 static void dma_pte_clear_range(struct dmar_domain *domain,
1109                                 unsigned long start_pfn,
1110                                 unsigned long last_pfn)
1111 {
1112         unsigned int large_page = 1;
1113         struct dma_pte *first_pte, *pte;
1114
1115         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1116         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1117         BUG_ON(start_pfn > last_pfn);
1118
1119         /* we don't need lock here; nobody else touches the iova range */
1120         do {
1121                 large_page = 1;
1122                 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
1123                 if (!pte) {
1124                         start_pfn = align_to_level(start_pfn + 1, large_page + 1);
1125                         continue;
1126                 }
1127                 do {
1128                         dma_clear_pte(pte);
1129                         start_pfn += lvl_to_nr_pages(large_page);
1130                         pte++;
1131                 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1132
1133                 domain_flush_cache(domain, first_pte,
1134                                    (void *)pte - (void *)first_pte);
1135
1136         } while (start_pfn && start_pfn <= last_pfn);
1137 }
1138
1139 static void dma_pte_free_level(struct dmar_domain *domain, int level,
1140                                struct dma_pte *pte, unsigned long pfn,
1141                                unsigned long start_pfn, unsigned long last_pfn)
1142 {
1143         pfn = max(start_pfn, pfn);
1144         pte = &pte[pfn_level_offset(pfn, level)];
1145
1146         do {
1147                 unsigned long level_pfn;
1148                 struct dma_pte *level_pte;
1149
1150                 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1151                         goto next;
1152
1153                 level_pfn = pfn & level_mask(level);
1154                 level_pte = phys_to_virt(dma_pte_addr(pte));
1155
1156                 if (level > 2)
1157                         dma_pte_free_level(domain, level - 1, level_pte,
1158                                            level_pfn, start_pfn, last_pfn);
1159
1160                 /* If range covers entire pagetable, free it */
1161                 if (!(start_pfn > level_pfn ||
1162                       last_pfn < level_pfn + level_size(level) - 1)) {
1163                         dma_clear_pte(pte);
1164                         domain_flush_cache(domain, pte, sizeof(*pte));
1165                         free_pgtable_page(level_pte);
1166                 }
1167 next:
1168                 pfn += level_size(level);
1169         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1170 }
1171
1172 /* clear last level (leaf) ptes and free page table pages. */
1173 static void dma_pte_free_pagetable(struct dmar_domain *domain,
1174                                    unsigned long start_pfn,
1175                                    unsigned long last_pfn)
1176 {
1177         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1178         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1179         BUG_ON(start_pfn > last_pfn);
1180
1181         dma_pte_clear_range(domain, start_pfn, last_pfn);
1182
1183         /* We don't need lock here; nobody else touches the iova range */
1184         dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1185                            domain->pgd, 0, start_pfn, last_pfn);
1186
1187         /* free pgd */
1188         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1189                 free_pgtable_page(domain->pgd);
1190                 domain->pgd = NULL;
1191         }
1192 }
1193
1194 /* When a page at a given level is being unlinked from its parent, we don't
1195    need to *modify* it at all. All we need to do is make a list of all the
1196    pages which can be freed just as soon as we've flushed the IOTLB and we
1197    know the hardware page-walk will no longer touch them.
1198    The 'pte' argument is the *parent* PTE, pointing to the page that is to
1199    be freed. */
1200 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1201                                             int level, struct dma_pte *pte,
1202                                             struct page *freelist)
1203 {
1204         struct page *pg;
1205
1206         pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1207         pg->freelist = freelist;
1208         freelist = pg;
1209
1210         if (level == 1)
1211                 return freelist;
1212
1213         pte = page_address(pg);
1214         do {
1215                 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1216                         freelist = dma_pte_list_pagetables(domain, level - 1,
1217                                                            pte, freelist);
1218                 pte++;
1219         } while (!first_pte_in_page(pte));
1220
1221         return freelist;
1222 }
1223
1224 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1225                                         struct dma_pte *pte, unsigned long pfn,
1226                                         unsigned long start_pfn,
1227                                         unsigned long last_pfn,
1228                                         struct page *freelist)
1229 {
1230         struct dma_pte *first_pte = NULL, *last_pte = NULL;
1231
1232         pfn = max(start_pfn, pfn);
1233         pte = &pte[pfn_level_offset(pfn, level)];
1234
1235         do {
1236                 unsigned long level_pfn;
1237
1238                 if (!dma_pte_present(pte))
1239                         goto next;
1240
1241                 level_pfn = pfn & level_mask(level);
1242
1243                 /* If range covers entire pagetable, free it */
1244                 if (start_pfn <= level_pfn &&
1245                     last_pfn >= level_pfn + level_size(level) - 1) {
1246                         /* These suborbinate page tables are going away entirely. Don't
1247                            bother to clear them; we're just going to *free* them. */
1248                         if (level > 1 && !dma_pte_superpage(pte))
1249                                 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1250
1251                         dma_clear_pte(pte);
1252                         if (!first_pte)
1253                                 first_pte = pte;
1254                         last_pte = pte;
1255                 } else if (level > 1) {
1256                         /* Recurse down into a level that isn't *entirely* obsolete */
1257                         freelist = dma_pte_clear_level(domain, level - 1,
1258                                                        phys_to_virt(dma_pte_addr(pte)),
1259                                                        level_pfn, start_pfn, last_pfn,
1260                                                        freelist);
1261                 }
1262 next:
1263                 pfn += level_size(level);
1264         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1265
1266         if (first_pte)
1267                 domain_flush_cache(domain, first_pte,
1268                                    (void *)++last_pte - (void *)first_pte);
1269
1270         return freelist;
1271 }
1272
1273 /* We can't just free the pages because the IOMMU may still be walking
1274    the page tables, and may have cached the intermediate levels. The
1275    pages can only be freed after the IOTLB flush has been done. */
1276 static struct page *domain_unmap(struct dmar_domain *domain,
1277                                  unsigned long start_pfn,
1278                                  unsigned long last_pfn)
1279 {
1280         struct page *freelist = NULL;
1281
1282         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1283         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1284         BUG_ON(start_pfn > last_pfn);
1285
1286         /* we don't need lock here; nobody else touches the iova range */
1287         freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1288                                        domain->pgd, 0, start_pfn, last_pfn, NULL);
1289
1290         /* free pgd */
1291         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1292                 struct page *pgd_page = virt_to_page(domain->pgd);
1293                 pgd_page->freelist = freelist;
1294                 freelist = pgd_page;
1295
1296                 domain->pgd = NULL;
1297         }
1298
1299         return freelist;
1300 }
1301
1302 static void dma_free_pagelist(struct page *freelist)
1303 {
1304         struct page *pg;
1305
1306         while ((pg = freelist)) {
1307                 freelist = pg->freelist;
1308                 free_pgtable_page(page_address(pg));
1309         }
1310 }
1311
1312 /* iommu handling */
1313 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1314 {
1315         struct root_entry *root;
1316         unsigned long flags;
1317
1318         root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1319         if (!root) {
1320                 pr_err("Allocating root entry for %s failed\n",
1321                         iommu->name);
1322                 return -ENOMEM;
1323         }
1324
1325         __iommu_flush_cache(iommu, root, ROOT_SIZE);
1326
1327         spin_lock_irqsave(&iommu->lock, flags);
1328         iommu->root_entry = root;
1329         spin_unlock_irqrestore(&iommu->lock, flags);
1330
1331         return 0;
1332 }
1333
1334 static void iommu_set_root_entry(struct intel_iommu *iommu)
1335 {
1336         u64 addr;
1337         u32 sts;
1338         unsigned long flag;
1339
1340         addr = virt_to_phys(iommu->root_entry);
1341         if (ecs_enabled(iommu))
1342                 addr |= DMA_RTADDR_RTT;
1343
1344         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1345         dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1346
1347         writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1348
1349         /* Make sure hardware complete it */
1350         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1351                       readl, (sts & DMA_GSTS_RTPS), sts);
1352
1353         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1354 }
1355
1356 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1357 {
1358         u32 val;
1359         unsigned long flag;
1360
1361         if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1362                 return;
1363
1364         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1365         writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1366
1367         /* Make sure hardware complete it */
1368         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1369                       readl, (!(val & DMA_GSTS_WBFS)), val);
1370
1371         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1372 }
1373
1374 /* return value determine if we need a write buffer flush */
1375 static void __iommu_flush_context(struct intel_iommu *iommu,
1376                                   u16 did, u16 source_id, u8 function_mask,
1377                                   u64 type)
1378 {
1379         u64 val = 0;
1380         unsigned long flag;
1381
1382         switch (type) {
1383         case DMA_CCMD_GLOBAL_INVL:
1384                 val = DMA_CCMD_GLOBAL_INVL;
1385                 break;
1386         case DMA_CCMD_DOMAIN_INVL:
1387                 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1388                 break;
1389         case DMA_CCMD_DEVICE_INVL:
1390                 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1391                         | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1392                 break;
1393         default:
1394                 BUG();
1395         }
1396         val |= DMA_CCMD_ICC;
1397
1398         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1399         dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1400
1401         /* Make sure hardware complete it */
1402         IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1403                 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1404
1405         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1406 }
1407
1408 /* return value determine if we need a write buffer flush */
1409 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1410                                 u64 addr, unsigned int size_order, u64 type)
1411 {
1412         int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1413         u64 val = 0, val_iva = 0;
1414         unsigned long flag;
1415
1416         switch (type) {
1417         case DMA_TLB_GLOBAL_FLUSH:
1418                 /* global flush doesn't need set IVA_REG */
1419                 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1420                 break;
1421         case DMA_TLB_DSI_FLUSH:
1422                 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1423                 break;
1424         case DMA_TLB_PSI_FLUSH:
1425                 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1426                 /* IH bit is passed in as part of address */
1427                 val_iva = size_order | addr;
1428                 break;
1429         default:
1430                 BUG();
1431         }
1432         /* Note: set drain read/write */
1433 #if 0
1434         /*
1435          * This is probably to be super secure.. Looks like we can
1436          * ignore it without any impact.
1437          */
1438         if (cap_read_drain(iommu->cap))
1439                 val |= DMA_TLB_READ_DRAIN;
1440 #endif
1441         if (cap_write_drain(iommu->cap))
1442                 val |= DMA_TLB_WRITE_DRAIN;
1443
1444         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1445         /* Note: Only uses first TLB reg currently */
1446         if (val_iva)
1447                 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1448         dmar_writeq(iommu->reg + tlb_offset + 8, val);
1449
1450         /* Make sure hardware complete it */
1451         IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1452                 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1453
1454         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1455
1456         /* check IOTLB invalidation granularity */
1457         if (DMA_TLB_IAIG(val) == 0)
1458                 pr_err("Flush IOTLB failed\n");
1459         if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1460                 pr_debug("TLB flush request %Lx, actual %Lx\n",
1461                         (unsigned long long)DMA_TLB_IIRG(type),
1462                         (unsigned long long)DMA_TLB_IAIG(val));
1463 }
1464
1465 static struct device_domain_info *
1466 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1467                          u8 bus, u8 devfn)
1468 {
1469         struct device_domain_info *info;
1470
1471         assert_spin_locked(&device_domain_lock);
1472
1473         if (!iommu->qi)
1474                 return NULL;
1475
1476         list_for_each_entry(info, &domain->devices, link)
1477                 if (info->iommu == iommu && info->bus == bus &&
1478                     info->devfn == devfn) {
1479                         if (info->ats_supported && info->dev)
1480                                 return info;
1481                         break;
1482                 }
1483
1484         return NULL;
1485 }
1486
1487 static void domain_update_iotlb(struct dmar_domain *domain)
1488 {
1489         struct device_domain_info *info;
1490         bool has_iotlb_device = false;
1491
1492         assert_spin_locked(&device_domain_lock);
1493
1494         list_for_each_entry(info, &domain->devices, link) {
1495                 struct pci_dev *pdev;
1496
1497                 if (!info->dev || !dev_is_pci(info->dev))
1498                         continue;
1499
1500                 pdev = to_pci_dev(info->dev);
1501                 if (pdev->ats_enabled) {
1502                         has_iotlb_device = true;
1503                         break;
1504                 }
1505         }
1506
1507         domain->has_iotlb_device = has_iotlb_device;
1508 }
1509
1510 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1511 {
1512         struct pci_dev *pdev;
1513
1514         assert_spin_locked(&device_domain_lock);
1515
1516         if (!info || !dev_is_pci(info->dev))
1517                 return;
1518
1519         pdev = to_pci_dev(info->dev);
1520
1521 #ifdef CONFIG_INTEL_IOMMU_SVM
1522         /* The PCIe spec, in its wisdom, declares that the behaviour of
1523            the device if you enable PASID support after ATS support is
1524            undefined. So always enable PASID support on devices which
1525            have it, even if we can't yet know if we're ever going to
1526            use it. */
1527         if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1528                 info->pasid_enabled = 1;
1529
1530         if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1531                 info->pri_enabled = 1;
1532 #endif
1533         if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1534                 info->ats_enabled = 1;
1535                 domain_update_iotlb(info->domain);
1536                 info->ats_qdep = pci_ats_queue_depth(pdev);
1537         }
1538 }
1539
1540 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1541 {
1542         struct pci_dev *pdev;
1543
1544         assert_spin_locked(&device_domain_lock);
1545
1546         if (!dev_is_pci(info->dev))
1547                 return;
1548
1549         pdev = to_pci_dev(info->dev);
1550
1551         if (info->ats_enabled) {
1552                 pci_disable_ats(pdev);
1553                 info->ats_enabled = 0;
1554                 domain_update_iotlb(info->domain);
1555         }
1556 #ifdef CONFIG_INTEL_IOMMU_SVM
1557         if (info->pri_enabled) {
1558                 pci_disable_pri(pdev);
1559                 info->pri_enabled = 0;
1560         }
1561         if (info->pasid_enabled) {
1562                 pci_disable_pasid(pdev);
1563                 info->pasid_enabled = 0;
1564         }
1565 #endif
1566 }
1567
1568 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1569                                   u64 addr, unsigned mask)
1570 {
1571         u16 sid, qdep;
1572         unsigned long flags;
1573         struct device_domain_info *info;
1574
1575         if (!domain->has_iotlb_device)
1576                 return;
1577
1578         spin_lock_irqsave(&device_domain_lock, flags);
1579         list_for_each_entry(info, &domain->devices, link) {
1580                 if (!info->ats_enabled)
1581                         continue;
1582
1583                 sid = info->bus << 8 | info->devfn;
1584                 qdep = info->ats_qdep;
1585                 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1586         }
1587         spin_unlock_irqrestore(&device_domain_lock, flags);
1588 }
1589
1590 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1591                                   struct dmar_domain *domain,
1592                                   unsigned long pfn, unsigned int pages,
1593                                   int ih, int map)
1594 {
1595         unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1596         uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1597         u16 did = domain->iommu_did[iommu->seq_id];
1598
1599         BUG_ON(pages == 0);
1600
1601         if (ih)
1602                 ih = 1 << 6;
1603         /*
1604          * Fallback to domain selective flush if no PSI support or the size is
1605          * too big.
1606          * PSI requires page size to be 2 ^ x, and the base address is naturally
1607          * aligned to the size
1608          */
1609         if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1610                 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1611                                                 DMA_TLB_DSI_FLUSH);
1612         else
1613                 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1614                                                 DMA_TLB_PSI_FLUSH);
1615
1616         /*
1617          * In caching mode, changes of pages from non-present to present require
1618          * flush. However, device IOTLB doesn't need to be flushed in this case.
1619          */
1620         if (!cap_caching_mode(iommu->cap) || !map)
1621                 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1622                                       addr, mask);
1623 }
1624
1625 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1626 {
1627         u32 pmen;
1628         unsigned long flags;
1629
1630         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1631         pmen = readl(iommu->reg + DMAR_PMEN_REG);
1632         pmen &= ~DMA_PMEN_EPM;
1633         writel(pmen, iommu->reg + DMAR_PMEN_REG);
1634
1635         /* wait for the protected region status bit to clear */
1636         IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1637                 readl, !(pmen & DMA_PMEN_PRS), pmen);
1638
1639         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1640 }
1641
1642 static void iommu_enable_translation(struct intel_iommu *iommu)
1643 {
1644         u32 sts;
1645         unsigned long flags;
1646
1647         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1648         iommu->gcmd |= DMA_GCMD_TE;
1649         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1650
1651         /* Make sure hardware complete it */
1652         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1653                       readl, (sts & DMA_GSTS_TES), sts);
1654
1655         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1656 }
1657
1658 static void iommu_disable_translation(struct intel_iommu *iommu)
1659 {
1660         u32 sts;
1661         unsigned long flag;
1662
1663         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1664         iommu->gcmd &= ~DMA_GCMD_TE;
1665         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1666
1667         /* Make sure hardware complete it */
1668         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1669                       readl, (!(sts & DMA_GSTS_TES)), sts);
1670
1671         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1672 }
1673
1674
1675 static int iommu_init_domains(struct intel_iommu *iommu)
1676 {
1677         u32 ndomains, nlongs;
1678         size_t size;
1679
1680         ndomains = cap_ndoms(iommu->cap);
1681         pr_debug("%s: Number of Domains supported <%d>\n",
1682                  iommu->name, ndomains);
1683         nlongs = BITS_TO_LONGS(ndomains);
1684
1685         spin_lock_init(&iommu->lock);
1686
1687         iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1688         if (!iommu->domain_ids) {
1689                 pr_err("%s: Allocating domain id array failed\n",
1690                        iommu->name);
1691                 return -ENOMEM;
1692         }
1693
1694         size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
1695         iommu->domains = kzalloc(size, GFP_KERNEL);
1696
1697         if (iommu->domains) {
1698                 size = 256 * sizeof(struct dmar_domain *);
1699                 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1700         }
1701
1702         if (!iommu->domains || !iommu->domains[0]) {
1703                 pr_err("%s: Allocating domain array failed\n",
1704                        iommu->name);
1705                 kfree(iommu->domain_ids);
1706                 kfree(iommu->domains);
1707                 iommu->domain_ids = NULL;
1708                 iommu->domains    = NULL;
1709                 return -ENOMEM;
1710         }
1711
1712
1713
1714         /*
1715          * If Caching mode is set, then invalid translations are tagged
1716          * with domain-id 0, hence we need to pre-allocate it. We also
1717          * use domain-id 0 as a marker for non-allocated domain-id, so
1718          * make sure it is not used for a real domain.
1719          */
1720         set_bit(0, iommu->domain_ids);
1721
1722         return 0;
1723 }
1724
1725 static void disable_dmar_iommu(struct intel_iommu *iommu)
1726 {
1727         struct device_domain_info *info, *tmp;
1728         unsigned long flags;
1729
1730         if (!iommu->domains || !iommu->domain_ids)
1731                 return;
1732
1733 again:
1734         spin_lock_irqsave(&device_domain_lock, flags);
1735         list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1736                 struct dmar_domain *domain;
1737
1738                 if (info->iommu != iommu)
1739                         continue;
1740
1741                 if (!info->dev || !info->domain)
1742                         continue;
1743
1744                 domain = info->domain;
1745
1746                 __dmar_remove_one_dev_info(info);
1747
1748                 if (!domain_type_is_vm_or_si(domain)) {
1749                         /*
1750                          * The domain_exit() function  can't be called under
1751                          * device_domain_lock, as it takes this lock itself.
1752                          * So release the lock here and re-run the loop
1753                          * afterwards.
1754                          */
1755                         spin_unlock_irqrestore(&device_domain_lock, flags);
1756                         domain_exit(domain);
1757                         goto again;
1758                 }
1759         }
1760         spin_unlock_irqrestore(&device_domain_lock, flags);
1761
1762         if (iommu->gcmd & DMA_GCMD_TE)
1763                 iommu_disable_translation(iommu);
1764 }
1765
1766 static void free_dmar_iommu(struct intel_iommu *iommu)
1767 {
1768         if ((iommu->domains) && (iommu->domain_ids)) {
1769                 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
1770                 int i;
1771
1772                 for (i = 0; i < elems; i++)
1773                         kfree(iommu->domains[i]);
1774                 kfree(iommu->domains);
1775                 kfree(iommu->domain_ids);
1776                 iommu->domains = NULL;
1777                 iommu->domain_ids = NULL;
1778         }
1779
1780         g_iommus[iommu->seq_id] = NULL;
1781
1782         /* free context mapping */
1783         free_context_table(iommu);
1784
1785 #ifdef CONFIG_INTEL_IOMMU_SVM
1786         if (pasid_enabled(iommu)) {
1787                 if (ecap_prs(iommu->ecap))
1788                         intel_svm_finish_prq(iommu);
1789                 intel_svm_free_pasid_tables(iommu);
1790         }
1791 #endif
1792 }
1793
1794 static struct dmar_domain *alloc_domain(int flags)
1795 {
1796         struct dmar_domain *domain;
1797
1798         domain = alloc_domain_mem();
1799         if (!domain)
1800                 return NULL;
1801
1802         memset(domain, 0, sizeof(*domain));
1803         domain->nid = -1;
1804         domain->flags = flags;
1805         domain->has_iotlb_device = false;
1806         INIT_LIST_HEAD(&domain->devices);
1807
1808         return domain;
1809 }
1810
1811 /* Must be called with iommu->lock */
1812 static int domain_attach_iommu(struct dmar_domain *domain,
1813                                struct intel_iommu *iommu)
1814 {
1815         unsigned long ndomains;
1816         int num;
1817
1818         assert_spin_locked(&device_domain_lock);
1819         assert_spin_locked(&iommu->lock);
1820
1821         domain->iommu_refcnt[iommu->seq_id] += 1;
1822         domain->iommu_count += 1;
1823         if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1824                 ndomains = cap_ndoms(iommu->cap);
1825                 num      = find_first_zero_bit(iommu->domain_ids, ndomains);
1826
1827                 if (num >= ndomains) {
1828                         pr_err("%s: No free domain ids\n", iommu->name);
1829                         domain->iommu_refcnt[iommu->seq_id] -= 1;
1830                         domain->iommu_count -= 1;
1831                         return -ENOSPC;
1832                 }
1833
1834                 set_bit(num, iommu->domain_ids);
1835                 set_iommu_domain(iommu, num, domain);
1836
1837                 domain->iommu_did[iommu->seq_id] = num;
1838                 domain->nid                      = iommu->node;
1839
1840                 domain_update_iommu_cap(domain);
1841         }
1842
1843         return 0;
1844 }
1845
1846 static int domain_detach_iommu(struct dmar_domain *domain,
1847                                struct intel_iommu *iommu)
1848 {
1849         int num, count = INT_MAX;
1850
1851         assert_spin_locked(&device_domain_lock);
1852         assert_spin_locked(&iommu->lock);
1853
1854         domain->iommu_refcnt[iommu->seq_id] -= 1;
1855         count = --domain->iommu_count;
1856         if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1857                 num = domain->iommu_did[iommu->seq_id];
1858                 clear_bit(num, iommu->domain_ids);
1859                 set_iommu_domain(iommu, num, NULL);
1860
1861                 domain_update_iommu_cap(domain);
1862                 domain->iommu_did[iommu->seq_id] = 0;
1863         }
1864
1865         return count;
1866 }
1867
1868 static struct iova_domain reserved_iova_list;
1869 static struct lock_class_key reserved_rbtree_key;
1870
1871 static int dmar_init_reserved_ranges(void)
1872 {
1873         struct pci_dev *pdev = NULL;
1874         struct iova *iova;
1875         int i;
1876
1877         init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1878                         DMA_32BIT_PFN);
1879
1880         lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1881                 &reserved_rbtree_key);
1882
1883         /* IOAPIC ranges shouldn't be accessed by DMA */
1884         iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1885                 IOVA_PFN(IOAPIC_RANGE_END));
1886         if (!iova) {
1887                 pr_err("Reserve IOAPIC range failed\n");
1888                 return -ENODEV;
1889         }
1890
1891         /* Reserve all PCI MMIO to avoid peer-to-peer access */
1892         for_each_pci_dev(pdev) {
1893                 struct resource *r;
1894
1895                 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1896                         r = &pdev->resource[i];
1897                         if (!r->flags || !(r->flags & IORESOURCE_MEM))
1898                                 continue;
1899                         iova = reserve_iova(&reserved_iova_list,
1900                                             IOVA_PFN(r->start),
1901                                             IOVA_PFN(r->end));
1902                         if (!iova) {
1903                                 pr_err("Reserve iova failed\n");
1904                                 return -ENODEV;
1905                         }
1906                 }
1907         }
1908         return 0;
1909 }
1910
1911 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1912 {
1913         copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1914 }
1915
1916 static inline int guestwidth_to_adjustwidth(int gaw)
1917 {
1918         int agaw;
1919         int r = (gaw - 12) % 9;
1920
1921         if (r == 0)
1922                 agaw = gaw;
1923         else
1924                 agaw = gaw + 9 - r;
1925         if (agaw > 64)
1926                 agaw = 64;
1927         return agaw;
1928 }
1929
1930 static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1931                        int guest_width)
1932 {
1933         int adjust_width, agaw;
1934         unsigned long sagaw;
1935
1936         init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1937                         DMA_32BIT_PFN);
1938         domain_reserve_special_ranges(domain);
1939
1940         /* calculate AGAW */
1941         if (guest_width > cap_mgaw(iommu->cap))
1942                 guest_width = cap_mgaw(iommu->cap);
1943         domain->gaw = guest_width;
1944         adjust_width = guestwidth_to_adjustwidth(guest_width);
1945         agaw = width_to_agaw(adjust_width);
1946         sagaw = cap_sagaw(iommu->cap);
1947         if (!test_bit(agaw, &sagaw)) {
1948                 /* hardware doesn't support it, choose a bigger one */
1949                 pr_debug("Hardware doesn't support agaw %d\n", agaw);
1950                 agaw = find_next_bit(&sagaw, 5, agaw);
1951                 if (agaw >= 5)
1952                         return -ENODEV;
1953         }
1954         domain->agaw = agaw;
1955
1956         if (ecap_coherent(iommu->ecap))
1957                 domain->iommu_coherency = 1;
1958         else
1959                 domain->iommu_coherency = 0;
1960
1961         if (ecap_sc_support(iommu->ecap))
1962                 domain->iommu_snooping = 1;
1963         else
1964                 domain->iommu_snooping = 0;
1965
1966         if (intel_iommu_superpage)
1967                 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1968         else
1969                 domain->iommu_superpage = 0;
1970
1971         domain->nid = iommu->node;
1972
1973         /* always allocate the top pgd */
1974         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1975         if (!domain->pgd)
1976                 return -ENOMEM;
1977         __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1978         return 0;
1979 }
1980
1981 static void domain_exit(struct dmar_domain *domain)
1982 {
1983         struct page *freelist = NULL;
1984
1985         /* Domain 0 is reserved, so dont process it */
1986         if (!domain)
1987                 return;
1988
1989         /* Flush any lazy unmaps that may reference this domain */
1990         if (!intel_iommu_strict) {
1991                 int cpu;
1992
1993                 for_each_possible_cpu(cpu)
1994                         flush_unmaps_timeout(cpu);
1995         }
1996
1997         /* Remove associated devices and clear attached or cached domains */
1998         rcu_read_lock();
1999         domain_remove_dev_info(domain);
2000         rcu_read_unlock();
2001
2002         /* destroy iovas */
2003         put_iova_domain(&domain->iovad);
2004
2005         freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
2006
2007         dma_free_pagelist(freelist);
2008
2009         free_domain_mem(domain);
2010 }
2011
2012 static int domain_context_mapping_one(struct dmar_domain *domain,
2013                                       struct intel_iommu *iommu,
2014                                       u8 bus, u8 devfn)
2015 {
2016         u16 did = domain->iommu_did[iommu->seq_id];
2017         int translation = CONTEXT_TT_MULTI_LEVEL;
2018         struct device_domain_info *info = NULL;
2019         struct context_entry *context;
2020         unsigned long flags;
2021         struct dma_pte *pgd;
2022         int ret, agaw;
2023
2024         WARN_ON(did == 0);
2025
2026         if (hw_pass_through && domain_type_is_si(domain))
2027                 translation = CONTEXT_TT_PASS_THROUGH;
2028
2029         pr_debug("Set context mapping for %02x:%02x.%d\n",
2030                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
2031
2032         BUG_ON(!domain->pgd);
2033
2034         spin_lock_irqsave(&device_domain_lock, flags);
2035         spin_lock(&iommu->lock);
2036
2037         ret = -ENOMEM;
2038         context = iommu_context_addr(iommu, bus, devfn, 1);
2039         if (!context)
2040                 goto out_unlock;
2041
2042         ret = 0;
2043         if (context_present(context))
2044                 goto out_unlock;
2045
2046         /*
2047          * For kdump cases, old valid entries may be cached due to the
2048          * in-flight DMA and copied pgtable, but there is no unmapping
2049          * behaviour for them, thus we need an explicit cache flush for
2050          * the newly-mapped device. For kdump, at this point, the device
2051          * is supposed to finish reset at its driver probe stage, so no
2052          * in-flight DMA will exist, and we don't need to worry anymore
2053          * hereafter.
2054          */
2055         if (context_copied(context)) {
2056                 u16 did_old = context_domain_id(context);
2057
2058                 if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) {
2059                         iommu->flush.flush_context(iommu, did_old,
2060                                                    (((u16)bus) << 8) | devfn,
2061                                                    DMA_CCMD_MASK_NOBIT,
2062                                                    DMA_CCMD_DEVICE_INVL);
2063                         iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
2064                                                  DMA_TLB_DSI_FLUSH);
2065                 }
2066         }
2067
2068         pgd = domain->pgd;
2069
2070         context_clear_entry(context);
2071         context_set_domain_id(context, did);
2072
2073         /*
2074          * Skip top levels of page tables for iommu which has less agaw
2075          * than default.  Unnecessary for PT mode.
2076          */
2077         if (translation != CONTEXT_TT_PASS_THROUGH) {
2078                 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
2079                         ret = -ENOMEM;
2080                         pgd = phys_to_virt(dma_pte_addr(pgd));
2081                         if (!dma_pte_present(pgd))
2082                                 goto out_unlock;
2083                 }
2084
2085                 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2086                 if (info && info->ats_supported)
2087                         translation = CONTEXT_TT_DEV_IOTLB;
2088                 else
2089                         translation = CONTEXT_TT_MULTI_LEVEL;
2090
2091                 context_set_address_root(context, virt_to_phys(pgd));
2092                 context_set_address_width(context, iommu->agaw);
2093         } else {
2094                 /*
2095                  * In pass through mode, AW must be programmed to
2096                  * indicate the largest AGAW value supported by
2097                  * hardware. And ASR is ignored by hardware.
2098                  */
2099                 context_set_address_width(context, iommu->msagaw);
2100         }
2101
2102         context_set_translation_type(context, translation);
2103         context_set_fault_enable(context);
2104         context_set_present(context);
2105         domain_flush_cache(domain, context, sizeof(*context));
2106
2107         /*
2108          * It's a non-present to present mapping. If hardware doesn't cache
2109          * non-present entry we only need to flush the write-buffer. If the
2110          * _does_ cache non-present entries, then it does so in the special
2111          * domain #0, which we have to flush:
2112          */
2113         if (cap_caching_mode(iommu->cap)) {
2114                 iommu->flush.flush_context(iommu, 0,
2115                                            (((u16)bus) << 8) | devfn,
2116                                            DMA_CCMD_MASK_NOBIT,
2117                                            DMA_CCMD_DEVICE_INVL);
2118                 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
2119         } else {
2120                 iommu_flush_write_buffer(iommu);
2121         }
2122         iommu_enable_dev_iotlb(info);
2123
2124         ret = 0;
2125
2126 out_unlock:
2127         spin_unlock(&iommu->lock);
2128         spin_unlock_irqrestore(&device_domain_lock, flags);
2129
2130         return ret;
2131 }
2132
2133 struct domain_context_mapping_data {
2134         struct dmar_domain *domain;
2135         struct intel_iommu *iommu;
2136 };
2137
2138 static int domain_context_mapping_cb(struct pci_dev *pdev,
2139                                      u16 alias, void *opaque)
2140 {
2141         struct domain_context_mapping_data *data = opaque;
2142
2143         return domain_context_mapping_one(data->domain, data->iommu,
2144                                           PCI_BUS_NUM(alias), alias & 0xff);
2145 }
2146
2147 static int
2148 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2149 {
2150         struct intel_iommu *iommu;
2151         u8 bus, devfn;
2152         struct domain_context_mapping_data data;
2153
2154         iommu = device_to_iommu(dev, &bus, &devfn);
2155         if (!iommu)
2156                 return -ENODEV;
2157
2158         if (!dev_is_pci(dev))
2159                 return domain_context_mapping_one(domain, iommu, bus, devfn);
2160
2161         data.domain = domain;
2162         data.iommu = iommu;
2163
2164         return pci_for_each_dma_alias(to_pci_dev(dev),
2165                                       &domain_context_mapping_cb, &data);
2166 }
2167
2168 static int domain_context_mapped_cb(struct pci_dev *pdev,
2169                                     u16 alias, void *opaque)
2170 {
2171         struct intel_iommu *iommu = opaque;
2172
2173         return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2174 }
2175
2176 static int domain_context_mapped(struct device *dev)
2177 {
2178         struct intel_iommu *iommu;
2179         u8 bus, devfn;
2180
2181         iommu = device_to_iommu(dev, &bus, &devfn);
2182         if (!iommu)
2183                 return -ENODEV;
2184
2185         if (!dev_is_pci(dev))
2186                 return device_context_mapped(iommu, bus, devfn);
2187
2188         return !pci_for_each_dma_alias(to_pci_dev(dev),
2189                                        domain_context_mapped_cb, iommu);
2190 }
2191
2192 /* Returns a number of VTD pages, but aligned to MM page size */
2193 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2194                                             size_t size)
2195 {
2196         host_addr &= ~PAGE_MASK;
2197         return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2198 }
2199
2200 /* Return largest possible superpage level for a given mapping */
2201 static inline int hardware_largepage_caps(struct dmar_domain *domain,
2202                                           unsigned long iov_pfn,
2203                                           unsigned long phy_pfn,
2204                                           unsigned long pages)
2205 {
2206         int support, level = 1;
2207         unsigned long pfnmerge;
2208
2209         support = domain->iommu_superpage;
2210
2211         /* To use a large page, the virtual *and* physical addresses
2212            must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2213            of them will mean we have to use smaller pages. So just
2214            merge them and check both at once. */
2215         pfnmerge = iov_pfn | phy_pfn;
2216
2217         while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2218                 pages >>= VTD_STRIDE_SHIFT;
2219                 if (!pages)
2220                         break;
2221                 pfnmerge >>= VTD_STRIDE_SHIFT;
2222                 level++;
2223                 support--;
2224         }
2225         return level;
2226 }
2227
2228 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2229                             struct scatterlist *sg, unsigned long phys_pfn,
2230                             unsigned long nr_pages, int prot)
2231 {
2232         struct dma_pte *first_pte = NULL, *pte = NULL;
2233         phys_addr_t uninitialized_var(pteval);
2234         unsigned long sg_res = 0;
2235         unsigned int largepage_lvl = 0;
2236         unsigned long lvl_pages = 0;
2237
2238         BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2239
2240         if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2241                 return -EINVAL;
2242
2243         prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2244
2245         if (!sg) {
2246                 sg_res = nr_pages;
2247                 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2248         }
2249
2250         while (nr_pages > 0) {
2251                 uint64_t tmp;
2252
2253                 if (!sg_res) {
2254                         sg_res = aligned_nrpages(sg->offset, sg->length);
2255                         sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2256                         sg->dma_length = sg->length;
2257                         pteval = page_to_phys(sg_page(sg)) | prot;
2258                         phys_pfn = pteval >> VTD_PAGE_SHIFT;
2259                 }
2260
2261                 if (!pte) {
2262                         largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2263
2264                         first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2265                         if (!pte)
2266                                 return -ENOMEM;
2267                         /* It is large page*/
2268                         if (largepage_lvl > 1) {
2269                                 unsigned long nr_superpages, end_pfn;
2270
2271                                 pteval |= DMA_PTE_LARGE_PAGE;
2272                                 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2273
2274                                 nr_superpages = sg_res / lvl_pages;
2275                                 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2276
2277                                 /*
2278                                  * Ensure that old small page tables are
2279                                  * removed to make room for superpage(s).
2280                                  */
2281                                 dma_pte_free_pagetable(domain, iov_pfn, end_pfn);
2282                         } else {
2283                                 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2284                         }
2285
2286                 }
2287                 /* We don't need lock here, nobody else
2288                  * touches the iova range
2289                  */
2290                 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2291                 if (tmp) {
2292                         static int dumps = 5;
2293                         pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2294                                 iov_pfn, tmp, (unsigned long long)pteval);
2295                         if (dumps) {
2296                                 dumps--;
2297                                 debug_dma_dump_mappings(NULL);
2298                         }
2299                         WARN_ON(1);
2300                 }
2301
2302                 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2303
2304                 BUG_ON(nr_pages < lvl_pages);
2305                 BUG_ON(sg_res < lvl_pages);
2306
2307                 nr_pages -= lvl_pages;
2308                 iov_pfn += lvl_pages;
2309                 phys_pfn += lvl_pages;
2310                 pteval += lvl_pages * VTD_PAGE_SIZE;
2311                 sg_res -= lvl_pages;
2312
2313                 /* If the next PTE would be the first in a new page, then we
2314                    need to flush the cache on the entries we've just written.
2315                    And then we'll need to recalculate 'pte', so clear it and
2316                    let it get set again in the if (!pte) block above.
2317
2318                    If we're done (!nr_pages) we need to flush the cache too.
2319
2320                    Also if we've been setting superpages, we may need to
2321                    recalculate 'pte' and switch back to smaller pages for the
2322                    end of the mapping, if the trailing size is not enough to
2323                    use another superpage (i.e. sg_res < lvl_pages). */
2324                 pte++;
2325                 if (!nr_pages || first_pte_in_page(pte) ||
2326                     (largepage_lvl > 1 && sg_res < lvl_pages)) {
2327                         domain_flush_cache(domain, first_pte,
2328                                            (void *)pte - (void *)first_pte);
2329                         pte = NULL;
2330                 }
2331
2332                 if (!sg_res && nr_pages)
2333                         sg = sg_next(sg);
2334         }
2335         return 0;
2336 }
2337
2338 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2339                                     struct scatterlist *sg, unsigned long nr_pages,
2340                                     int prot)
2341 {
2342         return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2343 }
2344
2345 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2346                                      unsigned long phys_pfn, unsigned long nr_pages,
2347                                      int prot)
2348 {
2349         return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2350 }
2351
2352 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
2353 {
2354         if (!iommu)
2355                 return;
2356
2357         clear_context_table(iommu, bus, devfn);
2358         iommu->flush.flush_context(iommu, 0, 0, 0,
2359                                            DMA_CCMD_GLOBAL_INVL);
2360         iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2361 }
2362
2363 static inline void unlink_domain_info(struct device_domain_info *info)
2364 {
2365         assert_spin_locked(&device_domain_lock);
2366         list_del(&info->link);
2367         list_del(&info->global);
2368         if (info->dev)
2369                 info->dev->archdata.iommu = NULL;
2370 }
2371
2372 static void domain_remove_dev_info(struct dmar_domain *domain)
2373 {
2374         struct device_domain_info *info, *tmp;
2375         unsigned long flags;
2376
2377         spin_lock_irqsave(&device_domain_lock, flags);
2378         list_for_each_entry_safe(info, tmp, &domain->devices, link)
2379                 __dmar_remove_one_dev_info(info);
2380         spin_unlock_irqrestore(&device_domain_lock, flags);
2381 }
2382
2383 /*
2384  * find_domain
2385  * Note: we use struct device->archdata.iommu stores the info
2386  */
2387 static struct dmar_domain *find_domain(struct device *dev)
2388 {
2389         struct device_domain_info *info;
2390
2391         /* No lock here, assumes no domain exit in normal case */
2392         info = dev->archdata.iommu;
2393         if (info)
2394                 return info->domain;
2395         return NULL;
2396 }
2397
2398 static inline struct device_domain_info *
2399 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2400 {
2401         struct device_domain_info *info;
2402
2403         list_for_each_entry(info, &device_domain_list, global)
2404                 if (info->iommu->segment == segment && info->bus == bus &&
2405                     info->devfn == devfn)
2406                         return info;
2407
2408         return NULL;
2409 }
2410
2411 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2412                                                     int bus, int devfn,
2413                                                     struct device *dev,
2414                                                     struct dmar_domain *domain)
2415 {
2416         struct dmar_domain *found = NULL;
2417         struct device_domain_info *info;
2418         unsigned long flags;
2419         int ret;
2420
2421         info = alloc_devinfo_mem();
2422         if (!info)
2423                 return NULL;
2424
2425         info->bus = bus;
2426         info->devfn = devfn;
2427         info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2428         info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2429         info->ats_qdep = 0;
2430         info->dev = dev;
2431         info->domain = domain;
2432         info->iommu = iommu;
2433
2434         if (dev && dev_is_pci(dev)) {
2435                 struct pci_dev *pdev = to_pci_dev(info->dev);
2436
2437                 if (ecap_dev_iotlb_support(iommu->ecap) &&
2438                     pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
2439                     dmar_find_matched_atsr_unit(pdev))
2440                         info->ats_supported = 1;
2441
2442                 if (ecs_enabled(iommu)) {
2443                         if (pasid_enabled(iommu)) {
2444                                 int features = pci_pasid_features(pdev);
2445                                 if (features >= 0)
2446                                         info->pasid_supported = features | 1;
2447                         }
2448
2449                         if (info->ats_supported && ecap_prs(iommu->ecap) &&
2450                             pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
2451                                 info->pri_supported = 1;
2452                 }
2453         }
2454
2455         spin_lock_irqsave(&device_domain_lock, flags);
2456         if (dev)
2457                 found = find_domain(dev);
2458
2459         if (!found) {
2460                 struct device_domain_info *info2;
2461                 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2462                 if (info2) {
2463                         found      = info2->domain;
2464                         info2->dev = dev;
2465                 }
2466         }
2467
2468         if (found) {
2469                 spin_unlock_irqrestore(&device_domain_lock, flags);
2470                 free_devinfo_mem(info);
2471                 /* Caller must free the original domain */
2472                 return found;
2473         }
2474
2475         spin_lock(&iommu->lock);
2476         ret = domain_attach_iommu(domain, iommu);
2477         spin_unlock(&iommu->lock);
2478
2479         if (ret) {
2480                 spin_unlock_irqrestore(&device_domain_lock, flags);
2481                 free_devinfo_mem(info);
2482                 return NULL;
2483         }
2484
2485         list_add(&info->link, &domain->devices);
2486         list_add(&info->global, &device_domain_list);
2487         if (dev)
2488                 dev->archdata.iommu = info;
2489         spin_unlock_irqrestore(&device_domain_lock, flags);
2490
2491         if (dev && domain_context_mapping(domain, dev)) {
2492                 pr_err("Domain context map for %s failed\n", dev_name(dev));
2493                 dmar_remove_one_dev_info(domain, dev);
2494                 return NULL;
2495         }
2496
2497         return domain;
2498 }
2499
2500 static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2501 {
2502         *(u16 *)opaque = alias;
2503         return 0;
2504 }
2505
2506 static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
2507 {
2508         struct device_domain_info *info = NULL;
2509         struct dmar_domain *domain = NULL;
2510         struct intel_iommu *iommu;
2511         u16 req_id, dma_alias;
2512         unsigned long flags;
2513         u8 bus, devfn;
2514
2515         iommu = device_to_iommu(dev, &bus, &devfn);
2516         if (!iommu)
2517                 return NULL;
2518
2519         req_id = ((u16)bus << 8) | devfn;
2520
2521         if (dev_is_pci(dev)) {
2522                 struct pci_dev *pdev = to_pci_dev(dev);
2523
2524                 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2525
2526                 spin_lock_irqsave(&device_domain_lock, flags);
2527                 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2528                                                       PCI_BUS_NUM(dma_alias),
2529                                                       dma_alias & 0xff);
2530                 if (info) {
2531                         iommu = info->iommu;
2532                         domain = info->domain;
2533                 }
2534                 spin_unlock_irqrestore(&device_domain_lock, flags);
2535
2536                 /* DMA alias already has a domain, use it */
2537                 if (info)
2538                         goto out;
2539         }
2540
2541         /* Allocate and initialize new domain for the device */
2542         domain = alloc_domain(0);
2543         if (!domain)
2544                 return NULL;
2545         if (domain_init(domain, iommu, gaw)) {
2546                 domain_exit(domain);
2547                 return NULL;
2548         }
2549
2550 out:
2551
2552         return domain;
2553 }
2554
2555 static struct dmar_domain *set_domain_for_dev(struct device *dev,
2556                                               struct dmar_domain *domain)
2557 {
2558         struct intel_iommu *iommu;
2559         struct dmar_domain *tmp;
2560         u16 req_id, dma_alias;
2561         u8 bus, devfn;
2562
2563         iommu = device_to_iommu(dev, &bus, &devfn);
2564         if (!iommu)
2565                 return NULL;
2566
2567         req_id = ((u16)bus << 8) | devfn;
2568
2569         if (dev_is_pci(dev)) {
2570                 struct pci_dev *pdev = to_pci_dev(dev);
2571
2572                 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2573
2574                 /* register PCI DMA alias device */
2575                 if (req_id != dma_alias) {
2576                         tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2577                                         dma_alias & 0xff, NULL, domain);
2578
2579                         if (!tmp || tmp != domain)
2580                                 return tmp;
2581                 }
2582         }
2583
2584         tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2585         if (!tmp || tmp != domain)
2586                 return tmp;
2587
2588         return domain;
2589 }
2590
2591 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2592 {
2593         struct dmar_domain *domain, *tmp;
2594
2595         domain = find_domain(dev);
2596         if (domain)
2597                 goto out;
2598
2599         domain = find_or_alloc_domain(dev, gaw);
2600         if (!domain)
2601                 goto out;
2602
2603         tmp = set_domain_for_dev(dev, domain);
2604         if (!tmp || domain != tmp) {
2605                 domain_exit(domain);
2606                 domain = tmp;
2607         }
2608
2609 out:
2610
2611         return domain;
2612 }
2613
2614 static int iommu_domain_identity_map(struct dmar_domain *domain,
2615                                      unsigned long long start,
2616                                      unsigned long long end)
2617 {
2618         unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2619         unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2620
2621         if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2622                           dma_to_mm_pfn(last_vpfn))) {
2623                 pr_err("Reserving iova failed\n");
2624                 return -ENOMEM;
2625         }
2626
2627         pr_debug("Mapping reserved region %llx-%llx\n", start, end);
2628         /*
2629          * RMRR range might have overlap with physical memory range,
2630          * clear it first
2631          */
2632         dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2633
2634         return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2635                                   last_vpfn - first_vpfn + 1,
2636                                   DMA_PTE_READ|DMA_PTE_WRITE);
2637 }
2638
2639 static int domain_prepare_identity_map(struct device *dev,
2640                                        struct dmar_domain *domain,
2641                                        unsigned long long start,
2642                                        unsigned long long end)
2643 {
2644         /* For _hardware_ passthrough, don't bother. But for software
2645            passthrough, we do it anyway -- it may indicate a memory
2646            range which is reserved in E820, so which didn't get set
2647            up to start with in si_domain */
2648         if (domain == si_domain && hw_pass_through) {
2649                 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2650                         dev_name(dev), start, end);
2651                 return 0;
2652         }
2653
2654         pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2655                 dev_name(dev), start, end);
2656
2657         if (end < start) {
2658                 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2659                         "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2660                         dmi_get_system_info(DMI_BIOS_VENDOR),
2661                         dmi_get_system_info(DMI_BIOS_VERSION),
2662                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2663                 return -EIO;
2664         }
2665
2666         if (end >> agaw_to_width(domain->agaw)) {
2667                 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2668                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2669                      agaw_to_width(domain->agaw),
2670                      dmi_get_system_info(DMI_BIOS_VENDOR),
2671                      dmi_get_system_info(DMI_BIOS_VERSION),
2672                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2673                 return -EIO;
2674         }
2675
2676         return iommu_domain_identity_map(domain, start, end);
2677 }
2678
2679 static int iommu_prepare_identity_map(struct device *dev,
2680                                       unsigned long long start,
2681                                       unsigned long long end)
2682 {
2683         struct dmar_domain *domain;
2684         int ret;
2685
2686         domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2687         if (!domain)
2688                 return -ENOMEM;
2689
2690         ret = domain_prepare_identity_map(dev, domain, start, end);
2691         if (ret)
2692                 domain_exit(domain);
2693
2694         return ret;
2695 }
2696
2697 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2698                                          struct device *dev)
2699 {
2700         if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2701                 return 0;
2702         return iommu_prepare_identity_map(dev, rmrr->base_address,
2703                                           rmrr->end_address);
2704 }
2705
2706 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2707 static inline void iommu_prepare_isa(void)
2708 {
2709         struct pci_dev *pdev;
2710         int ret;
2711
2712         pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2713         if (!pdev)
2714                 return;
2715
2716         pr_info("Prepare 0-16MiB unity mapping for LPC\n");
2717         ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2718
2719         if (ret)
2720                 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
2721
2722         pci_dev_put(pdev);
2723 }
2724 #else
2725 static inline void iommu_prepare_isa(void)
2726 {
2727         return;
2728 }
2729 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2730
2731 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2732
2733 static int __init si_domain_init(int hw)
2734 {
2735         int nid, ret = 0;
2736
2737         si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2738         if (!si_domain)
2739                 return -EFAULT;
2740
2741         if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2742                 domain_exit(si_domain);
2743                 return -EFAULT;
2744         }
2745
2746         pr_debug("Identity mapping domain allocated\n");
2747
2748         if (hw)
2749                 return 0;
2750
2751         for_each_online_node(nid) {
2752                 unsigned long start_pfn, end_pfn;
2753                 int i;
2754
2755                 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2756                         ret = iommu_domain_identity_map(si_domain,
2757                                         PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2758                         if (ret)
2759                                 return ret;
2760                 }
2761         }
2762
2763         return 0;
2764 }
2765
2766 static int identity_mapping(struct device *dev)
2767 {
2768         struct device_domain_info *info;
2769
2770         if (likely(!iommu_identity_mapping))
2771                 return 0;
2772
2773         info = dev->archdata.iommu;
2774         if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2775                 return (info->domain == si_domain);
2776
2777         return 0;
2778 }
2779
2780 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2781 {
2782         struct dmar_domain *ndomain;
2783         struct intel_iommu *iommu;
2784         u8 bus, devfn;
2785
2786         iommu = device_to_iommu(dev, &bus, &devfn);
2787         if (!iommu)
2788                 return -ENODEV;
2789
2790         ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2791         if (ndomain != domain)
2792                 return -EBUSY;
2793
2794         return 0;
2795 }
2796
2797 static bool device_has_rmrr(struct device *dev)
2798 {
2799         struct dmar_rmrr_unit *rmrr;
2800         struct device *tmp;
2801         int i;
2802
2803         rcu_read_lock();
2804         for_each_rmrr_units(rmrr) {
2805                 /*
2806                  * Return TRUE if this RMRR contains the device that
2807                  * is passed in.
2808                  */
2809                 for_each_active_dev_scope(rmrr->devices,
2810                                           rmrr->devices_cnt, i, tmp)
2811                         if (tmp == dev) {
2812                                 rcu_read_unlock();
2813                                 return true;
2814                         }
2815         }
2816         rcu_read_unlock();
2817         return false;
2818 }
2819
2820 /*
2821  * There are a couple cases where we need to restrict the functionality of
2822  * devices associated with RMRRs.  The first is when evaluating a device for
2823  * identity mapping because problems exist when devices are moved in and out
2824  * of domains and their respective RMRR information is lost.  This means that
2825  * a device with associated RMRRs will never be in a "passthrough" domain.
2826  * The second is use of the device through the IOMMU API.  This interface
2827  * expects to have full control of the IOVA space for the device.  We cannot
2828  * satisfy both the requirement that RMRR access is maintained and have an
2829  * unencumbered IOVA space.  We also have no ability to quiesce the device's
2830  * use of the RMRR space or even inform the IOMMU API user of the restriction.
2831  * We therefore prevent devices associated with an RMRR from participating in
2832  * the IOMMU API, which eliminates them from device assignment.
2833  *
2834  * In both cases we assume that PCI USB devices with RMRRs have them largely
2835  * for historical reasons and that the RMRR space is not actively used post
2836  * boot.  This exclusion may change if vendors begin to abuse it.
2837  *
2838  * The same exception is made for graphics devices, with the requirement that
2839  * any use of the RMRR regions will be torn down before assigning the device
2840  * to a guest.
2841  */
2842 static bool device_is_rmrr_locked(struct device *dev)
2843 {
2844         if (!device_has_rmrr(dev))
2845                 return false;
2846
2847         if (dev_is_pci(dev)) {
2848                 struct pci_dev *pdev = to_pci_dev(dev);
2849
2850                 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2851                         return false;
2852         }
2853
2854         return true;
2855 }
2856
2857 static int iommu_should_identity_map(struct device *dev, int startup)
2858 {
2859
2860         if (dev_is_pci(dev)) {
2861                 struct pci_dev *pdev = to_pci_dev(dev);
2862
2863                 if (device_is_rmrr_locked(dev))
2864                         return 0;
2865
2866                 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2867                         return 1;
2868
2869                 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2870                         return 1;
2871
2872                 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2873                         return 0;
2874
2875                 /*
2876                  * We want to start off with all devices in the 1:1 domain, and
2877                  * take them out later if we find they can't access all of memory.
2878                  *
2879                  * However, we can't do this for PCI devices behind bridges,
2880                  * because all PCI devices behind the same bridge will end up
2881                  * with the same source-id on their transactions.
2882                  *
2883                  * Practically speaking, we can't change things around for these
2884                  * devices at run-time, because we can't be sure there'll be no
2885                  * DMA transactions in flight for any of their siblings.
2886                  *
2887                  * So PCI devices (unless they're on the root bus) as well as
2888                  * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2889                  * the 1:1 domain, just in _case_ one of their siblings turns out
2890                  * not to be able to map all of memory.
2891                  */
2892                 if (!pci_is_pcie(pdev)) {
2893                         if (!pci_is_root_bus(pdev->bus))
2894                                 return 0;
2895                         if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2896                                 return 0;
2897                 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2898                         return 0;
2899         } else {
2900                 if (device_has_rmrr(dev))
2901                         return 0;
2902         }
2903
2904         /*
2905          * At boot time, we don't yet know if devices will be 64-bit capable.
2906          * Assume that they will — if they turn out not to be, then we can
2907          * take them out of the 1:1 domain later.
2908          */
2909         if (!startup) {
2910                 /*
2911                  * If the device's dma_mask is less than the system's memory
2912                  * size then this is not a candidate for identity mapping.
2913                  */
2914                 u64 dma_mask = *dev->dma_mask;
2915
2916                 if (dev->coherent_dma_mask &&
2917                     dev->coherent_dma_mask < dma_mask)
2918                         dma_mask = dev->coherent_dma_mask;
2919
2920                 return dma_mask >= dma_get_required_mask(dev);
2921         }
2922
2923         return 1;
2924 }
2925
2926 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2927 {
2928         int ret;
2929
2930         if (!iommu_should_identity_map(dev, 1))
2931                 return 0;
2932
2933         ret = domain_add_dev_info(si_domain, dev);
2934         if (!ret)
2935                 pr_info("%s identity mapping for device %s\n",
2936                         hw ? "Hardware" : "Software", dev_name(dev));
2937         else if (ret == -ENODEV)
2938                 /* device not associated with an iommu */
2939                 ret = 0;
2940
2941         return ret;
2942 }
2943
2944
2945 static int __init iommu_prepare_static_identity_mapping(int hw)
2946 {
2947         struct pci_dev *pdev = NULL;
2948         struct dmar_drhd_unit *drhd;
2949         struct intel_iommu *iommu;
2950         struct device *dev;
2951         int i;
2952         int ret = 0;
2953
2954         for_each_pci_dev(pdev) {
2955                 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2956                 if (ret)
2957                         return ret;
2958         }
2959
2960         for_each_active_iommu(iommu, drhd)
2961                 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2962                         struct acpi_device_physical_node *pn;
2963                         struct acpi_device *adev;
2964
2965                         if (dev->bus != &acpi_bus_type)
2966                                 continue;
2967
2968                         adev= to_acpi_device(dev);
2969                         mutex_lock(&adev->physical_node_lock);
2970                         list_for_each_entry(pn, &adev->physical_node_list, node) {
2971                                 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2972                                 if (ret)
2973                                         break;
2974                         }
2975                         mutex_unlock(&adev->physical_node_lock);
2976                         if (ret)
2977                                 return ret;
2978                 }
2979
2980         return 0;
2981 }
2982
2983 static void intel_iommu_init_qi(struct intel_iommu *iommu)
2984 {
2985         /*
2986          * Start from the sane iommu hardware state.
2987          * If the queued invalidation is already initialized by us
2988          * (for example, while enabling interrupt-remapping) then
2989          * we got the things already rolling from a sane state.
2990          */
2991         if (!iommu->qi) {
2992                 /*
2993                  * Clear any previous faults.
2994                  */
2995                 dmar_fault(-1, iommu);
2996                 /*
2997                  * Disable queued invalidation if supported and already enabled
2998                  * before OS handover.
2999                  */
3000                 dmar_disable_qi(iommu);
3001         }
3002
3003         if (dmar_enable_qi(iommu)) {
3004                 /*
3005                  * Queued Invalidate not enabled, use Register Based Invalidate
3006                  */
3007                 iommu->flush.flush_context = __iommu_flush_context;
3008                 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
3009                 pr_info("%s: Using Register based invalidation\n",
3010                         iommu->name);
3011         } else {
3012                 iommu->flush.flush_context = qi_flush_context;
3013                 iommu->flush.flush_iotlb = qi_flush_iotlb;
3014                 pr_info("%s: Using Queued invalidation\n", iommu->name);
3015         }
3016 }
3017
3018 static int copy_context_table(struct intel_iommu *iommu,
3019                               struct root_entry *old_re,
3020                               struct context_entry **tbl,
3021                               int bus, bool ext)
3022 {
3023         int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
3024         struct context_entry *new_ce = NULL, ce;
3025         struct context_entry *old_ce = NULL;
3026         struct root_entry re;
3027         phys_addr_t old_ce_phys;
3028
3029         tbl_idx = ext ? bus * 2 : bus;
3030         memcpy(&re, old_re, sizeof(re));
3031
3032         for (devfn = 0; devfn < 256; devfn++) {
3033                 /* First calculate the correct index */
3034                 idx = (ext ? devfn * 2 : devfn) % 256;
3035
3036                 if (idx == 0) {
3037                         /* First save what we may have and clean up */
3038                         if (new_ce) {
3039                                 tbl[tbl_idx] = new_ce;
3040                                 __iommu_flush_cache(iommu, new_ce,
3041                                                     VTD_PAGE_SIZE);
3042                                 pos = 1;
3043                         }
3044
3045                         if (old_ce)
3046                                 iounmap(old_ce);
3047
3048                         ret = 0;
3049                         if (devfn < 0x80)
3050                                 old_ce_phys = root_entry_lctp(&re);
3051                         else
3052                                 old_ce_phys = root_entry_uctp(&re);
3053
3054                         if (!old_ce_phys) {
3055                                 if (ext && devfn == 0) {
3056                                         /* No LCTP, try UCTP */
3057                                         devfn = 0x7f;
3058                                         continue;
3059                                 } else {
3060                                         goto out;
3061                                 }
3062                         }
3063
3064                         ret = -ENOMEM;
3065                         old_ce = memremap(old_ce_phys, PAGE_SIZE,
3066                                         MEMREMAP_WB);
3067                         if (!old_ce)
3068                                 goto out;
3069
3070                         new_ce = alloc_pgtable_page(iommu->node);
3071                         if (!new_ce)
3072                                 goto out_unmap;
3073
3074                         ret = 0;
3075                 }
3076
3077                 /* Now copy the context entry */
3078                 memcpy(&ce, old_ce + idx, sizeof(ce));
3079
3080                 if (!__context_present(&ce))
3081                         continue;
3082
3083                 did = context_domain_id(&ce);
3084                 if (did >= 0 && did < cap_ndoms(iommu->cap))
3085                         set_bit(did, iommu->domain_ids);
3086
3087                 /*
3088                  * We need a marker for copied context entries. This
3089                  * marker needs to work for the old format as well as
3090                  * for extended context entries.
3091                  *
3092                  * Bit 67 of the context entry is used. In the old
3093                  * format this bit is available to software, in the
3094                  * extended format it is the PGE bit, but PGE is ignored
3095                  * by HW if PASIDs are disabled (and thus still
3096                  * available).
3097                  *
3098                  * So disable PASIDs first and then mark the entry
3099                  * copied. This means that we don't copy PASID
3100                  * translations from the old kernel, but this is fine as
3101                  * faults there are not fatal.
3102                  */
3103                 context_clear_pasid_enable(&ce);
3104                 context_set_copied(&ce);
3105
3106                 new_ce[idx] = ce;
3107         }
3108
3109         tbl[tbl_idx + pos] = new_ce;
3110
3111         __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3112
3113 out_unmap:
3114         memunmap(old_ce);
3115
3116 out:
3117         return ret;
3118 }
3119
3120 static int copy_translation_tables(struct intel_iommu *iommu)
3121 {
3122         struct context_entry **ctxt_tbls;
3123         struct root_entry *old_rt;
3124         phys_addr_t old_rt_phys;
3125         int ctxt_table_entries;
3126         unsigned long flags;
3127         u64 rtaddr_reg;
3128         int bus, ret;
3129         bool new_ext, ext;
3130
3131         rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3132         ext        = !!(rtaddr_reg & DMA_RTADDR_RTT);
3133         new_ext    = !!ecap_ecs(iommu->ecap);
3134
3135         /*
3136          * The RTT bit can only be changed when translation is disabled,
3137          * but disabling translation means to open a window for data
3138          * corruption. So bail out and don't copy anything if we would
3139          * have to change the bit.
3140          */
3141         if (new_ext != ext)
3142                 return -EINVAL;
3143
3144         old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3145         if (!old_rt_phys)
3146                 return -EINVAL;
3147
3148         old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
3149         if (!old_rt)
3150                 return -ENOMEM;
3151
3152         /* This is too big for the stack - allocate it from slab */
3153         ctxt_table_entries = ext ? 512 : 256;
3154         ret = -ENOMEM;
3155         ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
3156         if (!ctxt_tbls)
3157                 goto out_unmap;
3158
3159         for (bus = 0; bus < 256; bus++) {
3160                 ret = copy_context_table(iommu, &old_rt[bus],
3161                                          ctxt_tbls, bus, ext);
3162                 if (ret) {
3163                         pr_err("%s: Failed to copy context table for bus %d\n",
3164                                 iommu->name, bus);
3165                         continue;
3166                 }
3167         }
3168
3169         spin_lock_irqsave(&iommu->lock, flags);
3170
3171         /* Context tables are copied, now write them to the root_entry table */
3172         for (bus = 0; bus < 256; bus++) {
3173                 int idx = ext ? bus * 2 : bus;
3174                 u64 val;
3175
3176                 if (ctxt_tbls[idx]) {
3177                         val = virt_to_phys(ctxt_tbls[idx]) | 1;
3178                         iommu->root_entry[bus].lo = val;
3179                 }
3180
3181                 if (!ext || !ctxt_tbls[idx + 1])
3182                         continue;
3183
3184                 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3185                 iommu->root_entry[bus].hi = val;
3186         }
3187
3188         spin_unlock_irqrestore(&iommu->lock, flags);
3189
3190         kfree(ctxt_tbls);
3191
3192         __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3193
3194         ret = 0;
3195
3196 out_unmap:
3197         memunmap(old_rt);
3198
3199         return ret;
3200 }
3201
3202 static int __init init_dmars(void)
3203 {
3204         struct dmar_drhd_unit *drhd;
3205         struct dmar_rmrr_unit *rmrr;
3206         bool copied_tables = false;
3207         struct device *dev;
3208         struct intel_iommu *iommu;
3209         int i, ret, cpu;
3210
3211         /*
3212          * for each drhd
3213          *    allocate root
3214          *    initialize and program root entry to not present
3215          * endfor
3216          */
3217         for_each_drhd_unit(drhd) {
3218                 /*
3219                  * lock not needed as this is only incremented in the single
3220                  * threaded kernel __init code path all other access are read
3221                  * only
3222                  */
3223                 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3224                         g_num_of_iommus++;
3225                         continue;
3226                 }
3227                 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
3228         }
3229
3230         /* Preallocate enough resources for IOMMU hot-addition */
3231         if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3232                 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3233
3234         g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3235                         GFP_KERNEL);
3236         if (!g_iommus) {
3237                 pr_err("Allocating global iommu array failed\n");
3238                 ret = -ENOMEM;
3239                 goto error;
3240         }
3241
3242         for_each_possible_cpu(cpu) {
3243                 struct deferred_flush_data *dfd = per_cpu_ptr(&deferred_flush,
3244                                                               cpu);
3245
3246                 dfd->tables = kzalloc(g_num_of_iommus *
3247                                       sizeof(struct deferred_flush_table),
3248                                       GFP_KERNEL);
3249                 if (!dfd->tables) {
3250                         ret = -ENOMEM;
3251                         goto free_g_iommus;
3252                 }
3253
3254                 spin_lock_init(&dfd->lock);
3255                 setup_timer(&dfd->timer, flush_unmaps_timeout, cpu);
3256         }
3257
3258         for_each_active_iommu(iommu, drhd) {
3259                 g_iommus[iommu->seq_id] = iommu;
3260
3261                 intel_iommu_init_qi(iommu);
3262
3263                 ret = iommu_init_domains(iommu);
3264                 if (ret)
3265                         goto free_iommu;
3266
3267                 init_translation_status(iommu);
3268
3269                 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3270                         iommu_disable_translation(iommu);
3271                         clear_translation_pre_enabled(iommu);
3272                         pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3273                                 iommu->name);
3274                 }
3275
3276                 /*
3277                  * TBD:
3278                  * we could share the same root & context tables
3279                  * among all IOMMU's. Need to Split it later.
3280                  */
3281                 ret = iommu_alloc_root_entry(iommu);
3282                 if (ret)
3283                         goto free_iommu;
3284
3285                 if (translation_pre_enabled(iommu)) {
3286                         pr_info("Translation already enabled - trying to copy translation structures\n");
3287
3288                         ret = copy_translation_tables(iommu);
3289                         if (ret) {
3290                                 /*
3291                                  * We found the IOMMU with translation
3292                                  * enabled - but failed to copy over the
3293                                  * old root-entry table. Try to proceed
3294                                  * by disabling translation now and
3295                                  * allocating a clean root-entry table.
3296                                  * This might cause DMAR faults, but
3297                                  * probably the dump will still succeed.
3298                                  */
3299                                 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3300                                        iommu->name);
3301                                 iommu_disable_translation(iommu);
3302                                 clear_translation_pre_enabled(iommu);
3303                         } else {
3304                                 pr_info("Copied translation tables from previous kernel for %s\n",
3305                                         iommu->name);
3306                                 copied_tables = true;
3307                         }
3308                 }
3309
3310                 if (!ecap_pass_through(iommu->ecap))
3311                         hw_pass_through = 0;
3312 #ifdef CONFIG_INTEL_IOMMU_SVM
3313                 if (pasid_enabled(iommu))
3314                         intel_svm_alloc_pasid_tables(iommu);
3315 #endif
3316         }
3317
3318         /*
3319          * Now that qi is enabled on all iommus, set the root entry and flush
3320          * caches. This is required on some Intel X58 chipsets, otherwise the
3321          * flush_context function will loop forever and the boot hangs.
3322          */
3323         for_each_active_iommu(iommu, drhd) {
3324                 iommu_flush_write_buffer(iommu);
3325                 iommu_set_root_entry(iommu);
3326                 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3327                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3328         }
3329
3330         if (iommu_pass_through)
3331                 iommu_identity_mapping |= IDENTMAP_ALL;
3332
3333 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3334         iommu_identity_mapping |= IDENTMAP_GFX;
3335 #endif
3336
3337         check_tylersburg_isoch();
3338
3339         if (iommu_identity_mapping) {
3340                 ret = si_domain_init(hw_pass_through);
3341                 if (ret)
3342                         goto free_iommu;
3343         }
3344
3345
3346         /*
3347          * If we copied translations from a previous kernel in the kdump
3348          * case, we can not assign the devices to domains now, as that
3349          * would eliminate the old mappings. So skip this part and defer
3350          * the assignment to device driver initialization time.
3351          */
3352         if (copied_tables)
3353                 goto domains_done;
3354
3355         /*
3356          * If pass through is not set or not enabled, setup context entries for
3357          * identity mappings for rmrr, gfx, and isa and may fall back to static
3358          * identity mapping if iommu_identity_mapping is set.
3359          */
3360         if (iommu_identity_mapping) {
3361                 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3362                 if (ret) {
3363                         pr_crit("Failed to setup IOMMU pass-through\n");
3364                         goto free_iommu;
3365                 }
3366         }
3367         /*
3368          * For each rmrr
3369          *   for each dev attached to rmrr
3370          *   do
3371          *     locate drhd for dev, alloc domain for dev
3372          *     allocate free domain
3373          *     allocate page table entries for rmrr
3374          *     if context not allocated for bus
3375          *           allocate and init context
3376          *           set present in root table for this bus
3377          *     init context with domain, translation etc
3378          *    endfor
3379          * endfor
3380          */
3381         pr_info("Setting RMRR:\n");
3382         for_each_rmrr_units(rmrr) {
3383                 /* some BIOS lists non-exist devices in DMAR table. */
3384                 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3385                                           i, dev) {
3386                         ret = iommu_prepare_rmrr_dev(rmrr, dev);
3387                         if (ret)
3388                                 pr_err("Mapping reserved region failed\n");
3389                 }
3390         }
3391
3392         iommu_prepare_isa();
3393
3394 domains_done:
3395
3396         /*
3397          * for each drhd
3398          *   enable fault log
3399          *   global invalidate context cache
3400          *   global invalidate iotlb
3401          *   enable translation
3402          */
3403         for_each_iommu(iommu, drhd) {
3404                 if (drhd->ignored) {
3405                         /*
3406                          * we always have to disable PMRs or DMA may fail on
3407                          * this device
3408                          */
3409                         if (force_on)
3410                                 iommu_disable_protect_mem_regions(iommu);
3411                         continue;
3412                 }
3413
3414                 iommu_flush_write_buffer(iommu);
3415
3416 #ifdef CONFIG_INTEL_IOMMU_SVM
3417                 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
3418                         ret = intel_svm_enable_prq(iommu);
3419                         if (ret)
3420                                 goto free_iommu;
3421                 }
3422 #endif
3423                 ret = dmar_set_interrupt(iommu);
3424                 if (ret)
3425                         goto free_iommu;
3426
3427                 if (!translation_pre_enabled(iommu))
3428                         iommu_enable_translation(iommu);
3429
3430                 iommu_disable_protect_mem_regions(iommu);
3431         }
3432
3433         return 0;
3434
3435 free_iommu:
3436         for_each_active_iommu(iommu, drhd) {
3437                 disable_dmar_iommu(iommu);
3438                 free_dmar_iommu(iommu);
3439         }
3440 free_g_iommus:
3441         for_each_possible_cpu(cpu)
3442                 kfree(per_cpu_ptr(&deferred_flush, cpu)->tables);
3443         kfree(g_iommus);
3444 error:
3445         return ret;
3446 }
3447
3448 /* This takes a number of _MM_ pages, not VTD pages */
3449 static unsigned long intel_alloc_iova(struct device *dev,
3450                                      struct dmar_domain *domain,
3451                                      unsigned long nrpages, uint64_t dma_mask)
3452 {
3453         unsigned long iova_pfn = 0;
3454
3455         /* Restrict dma_mask to the width that the iommu can handle */
3456         dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3457         /* Ensure we reserve the whole size-aligned region */
3458         nrpages = __roundup_pow_of_two(nrpages);
3459
3460         if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3461                 /*
3462                  * First try to allocate an io virtual address in
3463                  * DMA_BIT_MASK(32) and if that fails then try allocating
3464                  * from higher range
3465                  */
3466                 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3467                                            IOVA_PFN(DMA_BIT_MASK(32)));
3468                 if (iova_pfn)
3469                         return iova_pfn;
3470         }
3471         iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask));
3472         if (unlikely(!iova_pfn)) {
3473                 pr_err("Allocating %ld-page iova for %s failed",
3474                        nrpages, dev_name(dev));
3475                 return 0;
3476         }
3477
3478         return iova_pfn;
3479 }
3480
3481 static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
3482 {
3483         struct dmar_domain *domain, *tmp;
3484         struct dmar_rmrr_unit *rmrr;
3485         struct device *i_dev;
3486         int i, ret;
3487
3488         domain = find_domain(dev);
3489         if (domain)
3490                 goto out;
3491
3492         domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3493         if (!domain)
3494                 goto out;
3495
3496         /* We have a new domain - setup possible RMRRs for the device */
3497         rcu_read_lock();
3498         for_each_rmrr_units(rmrr) {
3499                 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3500                                           i, i_dev) {
3501                         if (i_dev != dev)
3502                                 continue;
3503
3504                         ret = domain_prepare_identity_map(dev, domain,
3505                                                           rmrr->base_address,
3506                                                           rmrr->end_address);
3507                         if (ret)
3508                                 dev_err(dev, "Mapping reserved region failed\n");
3509                 }
3510         }
3511         rcu_read_unlock();
3512
3513         tmp = set_domain_for_dev(dev, domain);
3514         if (!tmp || domain != tmp) {
3515                 domain_exit(domain);
3516                 domain = tmp;
3517         }
3518
3519 out:
3520
3521         if (!domain)
3522                 pr_err("Allocating domain for %s failed\n", dev_name(dev));
3523
3524
3525         return domain;
3526 }
3527
3528 static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
3529 {
3530         struct device_domain_info *info;
3531
3532         /* No lock here, assumes no domain exit in normal case */
3533         info = dev->archdata.iommu;
3534         if (likely(info))
3535                 return info->domain;
3536
3537         return __get_valid_domain_for_dev(dev);
3538 }
3539
3540 /* Check if the dev needs to go through non-identity map and unmap process.*/
3541 static int iommu_no_mapping(struct device *dev)
3542 {
3543         int found;
3544
3545         if (iommu_dummy(dev))
3546                 return 1;
3547
3548         if (!iommu_identity_mapping)
3549                 return 0;
3550
3551         found = identity_mapping(dev);
3552         if (found) {
3553                 if (iommu_should_identity_map(dev, 0))
3554                         return 1;
3555                 else {
3556                         /*
3557                          * 32 bit DMA is removed from si_domain and fall back
3558                          * to non-identity mapping.
3559                          */
3560                         dmar_remove_one_dev_info(si_domain, dev);
3561                         pr_info("32bit %s uses non-identity mapping\n",
3562                                 dev_name(dev));
3563                         return 0;
3564                 }
3565         } else {
3566                 /*
3567                  * In case of a detached 64 bit DMA device from vm, the device
3568                  * is put into si_domain for identity mapping.
3569                  */
3570                 if (iommu_should_identity_map(dev, 0)) {
3571                         int ret;
3572                         ret = domain_add_dev_info(si_domain, dev);
3573                         if (!ret) {
3574                                 pr_info("64bit %s uses identity mapping\n",
3575                                         dev_name(dev));
3576                                 return 1;
3577                         }
3578                 }
3579         }
3580
3581         return 0;
3582 }
3583
3584 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3585                                      size_t size, int dir, u64 dma_mask)
3586 {
3587         struct dmar_domain *domain;
3588         phys_addr_t start_paddr;
3589         unsigned long iova_pfn;
3590         int prot = 0;
3591         int ret;
3592         struct intel_iommu *iommu;
3593         unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3594
3595         BUG_ON(dir == DMA_NONE);
3596
3597         if (iommu_no_mapping(dev))
3598                 return paddr;
3599
3600         domain = get_valid_domain_for_dev(dev);
3601         if (!domain)
3602                 return 0;
3603
3604         iommu = domain_get_iommu(domain);
3605         size = aligned_nrpages(paddr, size);
3606
3607         iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3608         if (!iova_pfn)
3609                 goto error;
3610
3611         /*
3612          * Check if DMAR supports zero-length reads on write only
3613          * mappings..
3614          */
3615         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3616                         !cap_zlr(iommu->cap))
3617                 prot |= DMA_PTE_READ;
3618         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3619                 prot |= DMA_PTE_WRITE;
3620         /*
3621          * paddr - (paddr + size) might be partial page, we should map the whole
3622          * page.  Note: if two part of one page are separately mapped, we
3623          * might have two guest_addr mapping to the same host paddr, but this
3624          * is not a big problem
3625          */
3626         ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
3627                                  mm_to_dma_pfn(paddr_pfn), size, prot);
3628         if (ret)
3629                 goto error;
3630
3631         /* it's a non-present to present mapping. Only flush if caching mode */
3632         if (cap_caching_mode(iommu->cap))
3633                 iommu_flush_iotlb_psi(iommu, domain,
3634                                       mm_to_dma_pfn(iova_pfn),
3635                                       size, 0, 1);
3636         else
3637                 iommu_flush_write_buffer(iommu);
3638
3639         start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
3640         start_paddr += paddr & ~PAGE_MASK;
3641         return start_paddr;
3642
3643 error:
3644         if (iova_pfn)
3645                 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3646         pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
3647                 dev_name(dev), size, (unsigned long long)paddr, dir);
3648         return 0;
3649 }
3650
3651 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3652                                  unsigned long offset, size_t size,
3653                                  enum dma_data_direction dir,
3654                                  unsigned long attrs)
3655 {
3656         return __intel_map_single(dev, page_to_phys(page) + offset, size,
3657                                   dir, *dev->dma_mask);
3658 }
3659
3660 static void flush_unmaps(struct deferred_flush_data *flush_data)
3661 {
3662         int i, j;
3663
3664         flush_data->timer_on = 0;
3665
3666         /* just flush them all */
3667         for (i = 0; i < g_num_of_iommus; i++) {
3668                 struct intel_iommu *iommu = g_iommus[i];
3669                 struct deferred_flush_table *flush_table =
3670                                 &flush_data->tables[i];
3671                 if (!iommu)
3672                         continue;
3673
3674                 if (!flush_table->next)
3675                         continue;
3676
3677                 /* In caching mode, global flushes turn emulation expensive */
3678                 if (!cap_caching_mode(iommu->cap))
3679                         iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3680                                          DMA_TLB_GLOBAL_FLUSH);
3681                 for (j = 0; j < flush_table->next; j++) {
3682                         unsigned long mask;
3683                         struct deferred_flush_entry *entry =
3684                                                 &flush_table->entries[j];
3685                         unsigned long iova_pfn = entry->iova_pfn;
3686                         unsigned long nrpages = entry->nrpages;
3687                         struct dmar_domain *domain = entry->domain;
3688                         struct page *freelist = entry->freelist;
3689
3690                         /* On real hardware multiple invalidations are expensive */
3691                         if (cap_caching_mode(iommu->cap))
3692                                 iommu_flush_iotlb_psi(iommu, domain,
3693                                         mm_to_dma_pfn(iova_pfn),
3694                                         nrpages, !freelist, 0);
3695                         else {
3696                                 mask = ilog2(nrpages);
3697                                 iommu_flush_dev_iotlb(domain,
3698                                                 (uint64_t)iova_pfn << PAGE_SHIFT, mask);
3699                         }
3700                         free_iova_fast(&domain->iovad, iova_pfn, nrpages);
3701                         if (freelist)
3702                                 dma_free_pagelist(freelist);
3703                 }
3704                 flush_table->next = 0;
3705         }
3706
3707         flush_data->size = 0;
3708 }
3709
3710 static void flush_unmaps_timeout(unsigned long cpuid)
3711 {
3712         struct deferred_flush_data *flush_data = per_cpu_ptr(&deferred_flush, cpuid);
3713         unsigned long flags;
3714
3715         spin_lock_irqsave(&flush_data->lock, flags);
3716         flush_unmaps(flush_data);
3717         spin_unlock_irqrestore(&flush_data->lock, flags);
3718 }
3719
3720 static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
3721                       unsigned long nrpages, struct page *freelist)
3722 {
3723         unsigned long flags;
3724         int entry_id, iommu_id;
3725         struct intel_iommu *iommu;
3726         struct deferred_flush_entry *entry;
3727         struct deferred_flush_data *flush_data;
3728         unsigned int cpuid;
3729
3730         cpuid = get_cpu();
3731         flush_data = per_cpu_ptr(&deferred_flush, cpuid);
3732
3733         /* Flush all CPUs' entries to avoid deferring too much.  If
3734          * this becomes a bottleneck, can just flush us, and rely on
3735          * flush timer for the rest.
3736          */
3737         if (flush_data->size == HIGH_WATER_MARK) {
3738                 int cpu;
3739
3740                 for_each_online_cpu(cpu)
3741                         flush_unmaps_timeout(cpu);
3742         }
3743
3744         spin_lock_irqsave(&flush_data->lock, flags);
3745
3746         iommu = domain_get_iommu(dom);
3747         iommu_id = iommu->seq_id;
3748
3749         entry_id = flush_data->tables[iommu_id].next;
3750         ++(flush_data->tables[iommu_id].next);
3751
3752         entry = &flush_data->tables[iommu_id].entries[entry_id];
3753         entry->domain = dom;
3754         entry->iova_pfn = iova_pfn;
3755         entry->nrpages = nrpages;
3756         entry->freelist = freelist;
3757
3758         if (!flush_data->timer_on) {
3759                 mod_timer(&flush_data->timer, jiffies + msecs_to_jiffies(10));
3760                 flush_data->timer_on = 1;
3761         }
3762         flush_data->size++;
3763         spin_unlock_irqrestore(&flush_data->lock, flags);
3764
3765         put_cpu();
3766 }
3767
3768 static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
3769 {
3770         struct dmar_domain *domain;
3771         unsigned long start_pfn, last_pfn;
3772         unsigned long nrpages;
3773         unsigned long iova_pfn;
3774         struct intel_iommu *iommu;
3775         struct page *freelist;
3776
3777         if (iommu_no_mapping(dev))
3778                 return;
3779
3780         domain = find_domain(dev);
3781         BUG_ON(!domain);
3782
3783         iommu = domain_get_iommu(domain);
3784
3785         iova_pfn = IOVA_PFN(dev_addr);
3786
3787         nrpages = aligned_nrpages(dev_addr, size);
3788         start_pfn = mm_to_dma_pfn(iova_pfn);
3789         last_pfn = start_pfn + nrpages - 1;
3790
3791         pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3792                  dev_name(dev), start_pfn, last_pfn);
3793
3794         freelist = domain_unmap(domain, start_pfn, last_pfn);
3795
3796         if (intel_iommu_strict) {
3797                 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3798                                       nrpages, !freelist, 0);
3799                 /* free iova */
3800                 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
3801                 dma_free_pagelist(freelist);
3802         } else {
3803                 add_unmap(domain, iova_pfn, nrpages, freelist);
3804                 /*
3805                  * queue up the release of the unmap to save the 1/6th of the
3806                  * cpu used up by the iotlb flush operation...
3807                  */
3808         }
3809 }
3810
3811 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3812                              size_t size, enum dma_data_direction dir,
3813                              unsigned long attrs)
3814 {
3815         intel_unmap(dev, dev_addr, size);
3816 }
3817
3818 static void *intel_alloc_coherent(struct device *dev, size_t size,
3819                                   dma_addr_t *dma_handle, gfp_t flags,
3820                                   unsigned long attrs)
3821 {
3822         struct page *page = NULL;
3823         int order;
3824
3825         size = PAGE_ALIGN(size);
3826         order = get_order(size);
3827
3828         if (!iommu_no_mapping(dev))
3829                 flags &= ~(GFP_DMA | GFP_DMA32);
3830         else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3831                 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3832                         flags |= GFP_DMA;
3833                 else
3834                         flags |= GFP_DMA32;
3835         }
3836
3837         if (gfpflags_allow_blocking(flags)) {
3838                 unsigned int count = size >> PAGE_SHIFT;
3839
3840                 page = dma_alloc_from_contiguous(dev, count, order, flags);
3841                 if (page && iommu_no_mapping(dev) &&
3842                     page_to_phys(page) + size > dev->coherent_dma_mask) {
3843                         dma_release_from_contiguous(dev, page, count);
3844                         page = NULL;
3845                 }
3846         }
3847
3848         if (!page)
3849                 page = alloc_pages(flags, order);
3850         if (!page)
3851                 return NULL;
3852         memset(page_address(page), 0, size);
3853
3854         *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3855                                          DMA_BIDIRECTIONAL,
3856                                          dev->coherent_dma_mask);
3857         if (*dma_handle)
3858                 return page_address(page);
3859         if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3860                 __free_pages(page, order);
3861
3862         return NULL;
3863 }
3864
3865 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3866                                 dma_addr_t dma_handle, unsigned long attrs)
3867 {
3868         int order;
3869         struct page *page = virt_to_page(vaddr);
3870
3871         size = PAGE_ALIGN(size);
3872         order = get_order(size);
3873
3874         intel_unmap(dev, dma_handle, size);
3875         if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3876                 __free_pages(page, order);
3877 }
3878
3879 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3880                            int nelems, enum dma_data_direction dir,
3881                            unsigned long attrs)
3882 {
3883         dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
3884         unsigned long nrpages = 0;
3885         struct scatterlist *sg;
3886         int i;
3887
3888         for_each_sg(sglist, sg, nelems, i) {
3889                 nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
3890         }
3891
3892         intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
3893 }
3894
3895 static int intel_nontranslate_map_sg(struct device *hddev,
3896         struct scatterlist *sglist, int nelems, int dir)
3897 {
3898         int i;
3899         struct scatterlist *sg;
3900
3901         for_each_sg(sglist, sg, nelems, i) {
3902                 BUG_ON(!sg_page(sg));
3903                 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3904                 sg->dma_length = sg->length;
3905         }
3906         return nelems;
3907 }
3908
3909 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3910                         enum dma_data_direction dir, unsigned long attrs)
3911 {
3912         int i;
3913         struct dmar_domain *domain;
3914         size_t size = 0;
3915         int prot = 0;
3916         unsigned long iova_pfn;
3917         int ret;
3918         struct scatterlist *sg;
3919         unsigned long start_vpfn;
3920         struct intel_iommu *iommu;
3921
3922         BUG_ON(dir == DMA_NONE);
3923         if (iommu_no_mapping(dev))
3924                 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
3925
3926         domain = get_valid_domain_for_dev(dev);
3927         if (!domain)
3928                 return 0;
3929
3930         iommu = domain_get_iommu(domain);
3931
3932         for_each_sg(sglist, sg, nelems, i)
3933                 size += aligned_nrpages(sg->offset, sg->length);
3934
3935         iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3936                                 *dev->dma_mask);
3937         if (!iova_pfn) {
3938                 sglist->dma_length = 0;
3939                 return 0;
3940         }
3941
3942         /*
3943          * Check if DMAR supports zero-length reads on write only
3944          * mappings..
3945          */
3946         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3947                         !cap_zlr(iommu->cap))
3948                 prot |= DMA_PTE_READ;
3949         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3950                 prot |= DMA_PTE_WRITE;
3951
3952         start_vpfn = mm_to_dma_pfn(iova_pfn);
3953
3954         ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3955         if (unlikely(ret)) {
3956                 dma_pte_free_pagetable(domain, start_vpfn,
3957                                        start_vpfn + size - 1);
3958                 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3959                 return 0;
3960         }
3961
3962         /* it's a non-present to present mapping. Only flush if caching mode */
3963         if (cap_caching_mode(iommu->cap))
3964                 iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
3965         else
3966                 iommu_flush_write_buffer(iommu);
3967
3968         return nelems;
3969 }
3970
3971 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3972 {
3973         return !dma_addr;
3974 }
3975
3976 struct dma_map_ops intel_dma_ops = {
3977         .alloc = intel_alloc_coherent,
3978         .free = intel_free_coherent,
3979         .map_sg = intel_map_sg,
3980         .unmap_sg = intel_unmap_sg,
3981         .map_page = intel_map_page,
3982         .unmap_page = intel_unmap_page,
3983         .mapping_error = intel_mapping_error,
3984 };
3985
3986 static inline int iommu_domain_cache_init(void)
3987 {
3988         int ret = 0;
3989
3990         iommu_domain_cache = kmem_cache_create("iommu_domain",
3991                                          sizeof(struct dmar_domain),
3992                                          0,
3993                                          SLAB_HWCACHE_ALIGN,
3994
3995                                          NULL);
3996         if (!iommu_domain_cache) {
3997                 pr_err("Couldn't create iommu_domain cache\n");
3998                 ret = -ENOMEM;
3999         }
4000
4001         return ret;
4002 }
4003
4004 static inline int iommu_devinfo_cache_init(void)
4005 {
4006         int ret = 0;
4007
4008         iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
4009                                          sizeof(struct device_domain_info),
4010                                          0,
4011                                          SLAB_HWCACHE_ALIGN,
4012                                          NULL);
4013         if (!iommu_devinfo_cache) {
4014                 pr_err("Couldn't create devinfo cache\n");
4015                 ret = -ENOMEM;
4016         }
4017
4018         return ret;
4019 }
4020
4021 static int __init iommu_init_mempool(void)
4022 {
4023         int ret;
4024         ret = iova_cache_get();
4025         if (ret)
4026                 return ret;
4027
4028         ret = iommu_domain_cache_init();
4029         if (ret)
4030                 goto domain_error;
4031
4032         ret = iommu_devinfo_cache_init();
4033         if (!ret)
4034                 return ret;
4035
4036         kmem_cache_destroy(iommu_domain_cache);
4037 domain_error:
4038         iova_cache_put();
4039
4040         return -ENOMEM;
4041 }
4042
4043 static void __init iommu_exit_mempool(void)
4044 {
4045         kmem_cache_destroy(iommu_devinfo_cache);
4046         kmem_cache_destroy(iommu_domain_cache);
4047         iova_cache_put();
4048 }
4049
4050 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
4051 {
4052         struct dmar_drhd_unit *drhd;
4053         u32 vtbar;
4054         int rc;
4055
4056         /* We know that this device on this chipset has its own IOMMU.
4057          * If we find it under a different IOMMU, then the BIOS is lying
4058          * to us. Hope that the IOMMU for this device is actually
4059          * disabled, and it needs no translation...
4060          */
4061         rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
4062         if (rc) {
4063                 /* "can't" happen */
4064                 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
4065                 return;
4066         }
4067         vtbar &= 0xffff0000;
4068
4069         /* we know that the this iommu should be at offset 0xa000 from vtbar */
4070         drhd = dmar_find_matched_drhd_unit(pdev);
4071         if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
4072                             TAINT_FIRMWARE_WORKAROUND,
4073                             "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
4074                 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
4075 }
4076 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
4077
4078 static void __init init_no_remapping_devices(void)
4079 {
4080         struct dmar_drhd_unit *drhd;
4081         struct device *dev;
4082         int i;
4083
4084         for_each_drhd_unit(drhd) {
4085                 if (!drhd->include_all) {
4086                         for_each_active_dev_scope(drhd->devices,
4087                                                   drhd->devices_cnt, i, dev)
4088                                 break;
4089                         /* ignore DMAR unit if no devices exist */
4090                         if (i == drhd->devices_cnt)
4091                                 drhd->ignored = 1;
4092                 }
4093         }
4094
4095         for_each_active_drhd_unit(drhd) {
4096                 if (drhd->include_all)
4097                         continue;
4098
4099                 for_each_active_dev_scope(drhd->devices,
4100                                           drhd->devices_cnt, i, dev)
4101                         if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
4102                                 break;
4103                 if (i < drhd->devices_cnt)
4104                         continue;
4105
4106                 /* This IOMMU has *only* gfx devices. Either bypass it or
4107                    set the gfx_mapped flag, as appropriate */
4108                 if (dmar_map_gfx) {
4109                         intel_iommu_gfx_mapped = 1;
4110                 } else {
4111                         drhd->ignored = 1;
4112                         for_each_active_dev_scope(drhd->devices,
4113                                                   drhd->devices_cnt, i, dev)
4114                                 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
4115                 }
4116         }
4117 }
4118
4119 #ifdef CONFIG_SUSPEND
4120 static int init_iommu_hw(void)
4121 {
4122         struct dmar_drhd_unit *drhd;
4123         struct intel_iommu *iommu = NULL;
4124
4125         for_each_active_iommu(iommu, drhd)
4126                 if (iommu->qi)
4127                         dmar_reenable_qi(iommu);
4128
4129         for_each_iommu(iommu, drhd) {
4130                 if (drhd->ignored) {
4131                         /*
4132                          * we always have to disable PMRs or DMA may fail on
4133                          * this device
4134                          */
4135                         if (force_on)
4136                                 iommu_disable_protect_mem_regions(iommu);
4137                         continue;
4138                 }
4139         
4140                 iommu_flush_write_buffer(iommu);
4141
4142                 iommu_set_root_entry(iommu);
4143
4144                 iommu->flush.flush_context(iommu, 0, 0, 0,
4145                                            DMA_CCMD_GLOBAL_INVL);
4146                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4147                 iommu_enable_translation(iommu);
4148                 iommu_disable_protect_mem_regions(iommu);
4149         }
4150
4151         return 0;
4152 }
4153
4154 static void iommu_flush_all(void)
4155 {
4156         struct dmar_drhd_unit *drhd;
4157         struct intel_iommu *iommu;
4158
4159         for_each_active_iommu(iommu, drhd) {
4160                 iommu->flush.flush_context(iommu, 0, 0, 0,
4161                                            DMA_CCMD_GLOBAL_INVL);
4162                 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
4163                                          DMA_TLB_GLOBAL_FLUSH);
4164         }
4165 }
4166
4167 static int iommu_suspend(void)
4168 {
4169         struct dmar_drhd_unit *drhd;
4170         struct intel_iommu *iommu = NULL;
4171         unsigned long flag;
4172
4173         for_each_active_iommu(iommu, drhd) {
4174                 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
4175                                                  GFP_ATOMIC);
4176                 if (!iommu->iommu_state)
4177                         goto nomem;
4178         }
4179
4180         iommu_flush_all();
4181
4182         for_each_active_iommu(iommu, drhd) {
4183                 iommu_disable_translation(iommu);
4184
4185                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4186
4187                 iommu->iommu_state[SR_DMAR_FECTL_REG] =
4188                         readl(iommu->reg + DMAR_FECTL_REG);
4189                 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
4190                         readl(iommu->reg + DMAR_FEDATA_REG);
4191                 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
4192                         readl(iommu->reg + DMAR_FEADDR_REG);
4193                 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
4194                         readl(iommu->reg + DMAR_FEUADDR_REG);
4195
4196                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4197         }
4198         return 0;
4199
4200 nomem:
4201         for_each_active_iommu(iommu, drhd)
4202                 kfree(iommu->iommu_state);
4203
4204         return -ENOMEM;
4205 }
4206
4207 static void iommu_resume(void)
4208 {
4209         struct dmar_drhd_unit *drhd;
4210         struct intel_iommu *iommu = NULL;
4211         unsigned long flag;
4212
4213         if (init_iommu_hw()) {
4214                 if (force_on)
4215                         panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
4216                 else
4217                         WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
4218                 return;
4219         }
4220
4221         for_each_active_iommu(iommu, drhd) {
4222
4223                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4224
4225                 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
4226                         iommu->reg + DMAR_FECTL_REG);
4227                 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
4228                         iommu->reg + DMAR_FEDATA_REG);
4229                 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
4230                         iommu->reg + DMAR_FEADDR_REG);
4231                 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
4232                         iommu->reg + DMAR_FEUADDR_REG);
4233
4234                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4235         }
4236
4237         for_each_active_iommu(iommu, drhd)
4238                 kfree(iommu->iommu_state);
4239 }
4240
4241 static struct syscore_ops iommu_syscore_ops = {
4242         .resume         = iommu_resume,
4243         .suspend        = iommu_suspend,
4244 };
4245
4246 static void __init init_iommu_pm_ops(void)
4247 {
4248         register_syscore_ops(&iommu_syscore_ops);
4249 }
4250
4251 #else
4252 static inline void init_iommu_pm_ops(void) {}
4253 #endif  /* CONFIG_PM */
4254
4255
4256 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
4257 {
4258         struct acpi_dmar_reserved_memory *rmrr;
4259         int prot = DMA_PTE_READ|DMA_PTE_WRITE;
4260         struct dmar_rmrr_unit *rmrru;
4261         size_t length;
4262
4263         rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
4264         if (!rmrru)
4265                 goto out;
4266
4267         rmrru->hdr = header;
4268         rmrr = (struct acpi_dmar_reserved_memory *)header;
4269         rmrru->base_address = rmrr->base_address;
4270         rmrru->end_address = rmrr->end_address;
4271
4272         length = rmrr->end_address - rmrr->base_address + 1;
4273         rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
4274                                               IOMMU_RESV_DIRECT);
4275         if (!rmrru->resv)
4276                 goto free_rmrru;
4277
4278         rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
4279                                 ((void *)rmrr) + rmrr->header.length,
4280                                 &rmrru->devices_cnt);
4281         if (rmrru->devices_cnt && rmrru->devices == NULL)
4282                 goto free_all;
4283
4284         list_add(&rmrru->list, &dmar_rmrr_units);
4285
4286         return 0;
4287 free_all:
4288         kfree(rmrru->resv);
4289 free_rmrru:
4290         kfree(rmrru);
4291 out:
4292         return -ENOMEM;
4293 }
4294
4295 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4296 {
4297         struct dmar_atsr_unit *atsru;
4298         struct acpi_dmar_atsr *tmp;
4299
4300         list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4301                 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4302                 if (atsr->segment != tmp->segment)
4303                         continue;
4304                 if (atsr->header.length != tmp->header.length)
4305                         continue;
4306                 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4307                         return atsru;
4308         }
4309
4310         return NULL;
4311 }
4312
4313 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4314 {
4315         struct acpi_dmar_atsr *atsr;
4316         struct dmar_atsr_unit *atsru;
4317
4318         if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
4319                 return 0;
4320
4321         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4322         atsru = dmar_find_atsr(atsr);
4323         if (atsru)
4324                 return 0;
4325
4326         atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
4327         if (!atsru)
4328                 return -ENOMEM;
4329
4330         /*
4331          * If memory is allocated from slab by ACPI _DSM method, we need to
4332          * copy the memory content because the memory buffer will be freed
4333          * on return.
4334          */
4335         atsru->hdr = (void *)(atsru + 1);
4336         memcpy(atsru->hdr, hdr, hdr->length);
4337         atsru->include_all = atsr->flags & 0x1;
4338         if (!atsru->include_all) {
4339                 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4340                                 (void *)atsr + atsr->header.length,
4341                                 &atsru->devices_cnt);
4342                 if (atsru->devices_cnt && atsru->devices == NULL) {
4343                         kfree(atsru);
4344                         return -ENOMEM;
4345                 }
4346         }
4347
4348         list_add_rcu(&atsru->list, &dmar_atsr_units);
4349
4350         return 0;
4351 }
4352
4353 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4354 {
4355         dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4356         kfree(atsru);
4357 }
4358
4359 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4360 {
4361         struct acpi_dmar_atsr *atsr;
4362         struct dmar_atsr_unit *atsru;
4363
4364         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4365         atsru = dmar_find_atsr(atsr);
4366         if (atsru) {
4367                 list_del_rcu(&atsru->list);
4368                 synchronize_rcu();
4369                 intel_iommu_free_atsr(atsru);
4370         }
4371
4372         return 0;
4373 }
4374
4375 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4376 {
4377         int i;
4378         struct device *dev;
4379         struct acpi_dmar_atsr *atsr;
4380         struct dmar_atsr_unit *atsru;
4381
4382         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4383         atsru = dmar_find_atsr(atsr);
4384         if (!atsru)
4385                 return 0;
4386
4387         if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
4388                 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4389                                           i, dev)
4390                         return -EBUSY;
4391         }
4392
4393         return 0;
4394 }
4395
4396 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4397 {
4398         int sp, ret = 0;
4399         struct intel_iommu *iommu = dmaru->iommu;
4400
4401         if (g_iommus[iommu->seq_id])
4402                 return 0;
4403
4404         if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
4405                 pr_warn("%s: Doesn't support hardware pass through.\n",
4406                         iommu->name);
4407                 return -ENXIO;
4408         }
4409         if (!ecap_sc_support(iommu->ecap) &&
4410             domain_update_iommu_snooping(iommu)) {
4411                 pr_warn("%s: Doesn't support snooping.\n",
4412                         iommu->name);
4413                 return -ENXIO;
4414         }
4415         sp = domain_update_iommu_superpage(iommu) - 1;
4416         if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
4417                 pr_warn("%s: Doesn't support large page.\n",
4418                         iommu->name);
4419                 return -ENXIO;
4420         }
4421
4422         /*
4423          * Disable translation if already enabled prior to OS handover.
4424          */
4425         if (iommu->gcmd & DMA_GCMD_TE)
4426                 iommu_disable_translation(iommu);
4427
4428         g_iommus[iommu->seq_id] = iommu;
4429         ret = iommu_init_domains(iommu);
4430         if (ret == 0)
4431                 ret = iommu_alloc_root_entry(iommu);
4432         if (ret)
4433                 goto out;
4434
4435 #ifdef CONFIG_INTEL_IOMMU_SVM
4436         if (pasid_enabled(iommu))
4437                 intel_svm_alloc_pasid_tables(iommu);
4438 #endif
4439
4440         if (dmaru->ignored) {
4441                 /*
4442                  * we always have to disable PMRs or DMA may fail on this device
4443                  */
4444                 if (force_on)
4445                         iommu_disable_protect_mem_regions(iommu);
4446                 return 0;
4447         }
4448
4449         intel_iommu_init_qi(iommu);
4450         iommu_flush_write_buffer(iommu);
4451
4452 #ifdef CONFIG_INTEL_IOMMU_SVM
4453         if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
4454                 ret = intel_svm_enable_prq(iommu);
4455                 if (ret)
4456                         goto disable_iommu;
4457         }
4458 #endif
4459         ret = dmar_set_interrupt(iommu);
4460         if (ret)
4461                 goto disable_iommu;
4462
4463         iommu_set_root_entry(iommu);
4464         iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4465         iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4466         iommu_enable_translation(iommu);
4467
4468         iommu_disable_protect_mem_regions(iommu);
4469         return 0;
4470
4471 disable_iommu:
4472         disable_dmar_iommu(iommu);
4473 out:
4474         free_dmar_iommu(iommu);
4475         return ret;
4476 }
4477
4478 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4479 {
4480         int ret = 0;
4481         struct intel_iommu *iommu = dmaru->iommu;
4482
4483         if (!intel_iommu_enabled)
4484                 return 0;
4485         if (iommu == NULL)
4486                 return -EINVAL;
4487
4488         if (insert) {
4489                 ret = intel_iommu_add(dmaru);
4490         } else {
4491                 disable_dmar_iommu(iommu);
4492                 free_dmar_iommu(iommu);
4493         }
4494
4495         return ret;
4496 }
4497
4498 static void intel_iommu_free_dmars(void)
4499 {
4500         struct dmar_rmrr_unit *rmrru, *rmrr_n;
4501         struct dmar_atsr_unit *atsru, *atsr_n;
4502
4503         list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4504                 list_del(&rmrru->list);
4505                 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4506                 kfree(rmrru->resv);
4507                 kfree(rmrru);
4508         }
4509
4510         list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4511                 list_del(&atsru->list);
4512                 intel_iommu_free_atsr(atsru);
4513         }
4514 }
4515
4516 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4517 {
4518         int i, ret = 1;
4519         struct pci_bus *bus;
4520         struct pci_dev *bridge = NULL;
4521         struct device *tmp;
4522         struct acpi_dmar_atsr *atsr;
4523         struct dmar_atsr_unit *atsru;
4524
4525         dev = pci_physfn(dev);
4526         for (bus = dev->bus; bus; bus = bus->parent) {
4527                 bridge = bus->self;
4528                 /* If it's an integrated device, allow ATS */
4529                 if (!bridge)
4530                         return 1;
4531                 /* Connected via non-PCIe: no ATS */
4532                 if (!pci_is_pcie(bridge) ||
4533                     pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
4534                         return 0;
4535                 /* If we found the root port, look it up in the ATSR */
4536                 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
4537                         break;
4538         }
4539
4540         rcu_read_lock();
4541         list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4542                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4543                 if (atsr->segment != pci_domain_nr(dev->bus))
4544                         continue;
4545
4546                 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
4547                         if (tmp == &bridge->dev)
4548                                 goto out;
4549
4550                 if (atsru->include_all)
4551                         goto out;
4552         }
4553         ret = 0;
4554 out:
4555         rcu_read_unlock();
4556
4557         return ret;
4558 }
4559
4560 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4561 {
4562         int ret = 0;
4563         struct dmar_rmrr_unit *rmrru;
4564         struct dmar_atsr_unit *atsru;
4565         struct acpi_dmar_atsr *atsr;
4566         struct acpi_dmar_reserved_memory *rmrr;
4567
4568         if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
4569                 return 0;
4570
4571         list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4572                 rmrr = container_of(rmrru->hdr,
4573                                     struct acpi_dmar_reserved_memory, header);
4574                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4575                         ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4576                                 ((void *)rmrr) + rmrr->header.length,
4577                                 rmrr->segment, rmrru->devices,
4578                                 rmrru->devices_cnt);
4579                         if(ret < 0)
4580                                 return ret;
4581                 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4582                         dmar_remove_dev_scope(info, rmrr->segment,
4583                                 rmrru->devices, rmrru->devices_cnt);
4584                 }
4585         }
4586
4587         list_for_each_entry(atsru, &dmar_atsr_units, list) {
4588                 if (atsru->include_all)
4589                         continue;
4590
4591                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4592                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4593                         ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4594                                         (void *)atsr + atsr->header.length,
4595                                         atsr->segment, atsru->devices,
4596                                         atsru->devices_cnt);
4597                         if (ret > 0)
4598                                 break;
4599                         else if(ret < 0)
4600                                 return ret;
4601                 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4602                         if (dmar_remove_dev_scope(info, atsr->segment,
4603                                         atsru->devices, atsru->devices_cnt))
4604                                 break;
4605                 }
4606         }
4607
4608         return 0;
4609 }
4610
4611 /*
4612  * Here we only respond to action of unbound device from driver.
4613  *
4614  * Added device is not attached to its DMAR domain here yet. That will happen
4615  * when mapping the device to iova.
4616  */
4617 static int device_notifier(struct notifier_block *nb,
4618                                   unsigned long action, void *data)
4619 {
4620         struct device *dev = data;
4621         struct dmar_domain *domain;
4622
4623         if (iommu_dummy(dev))
4624                 return 0;
4625
4626         if (action != BUS_NOTIFY_REMOVED_DEVICE)
4627                 return 0;
4628
4629         domain = find_domain(dev);
4630         if (!domain)
4631                 return 0;
4632
4633         dmar_remove_one_dev_info(domain, dev);
4634         if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
4635                 domain_exit(domain);
4636
4637         return 0;
4638 }
4639
4640 static struct notifier_block device_nb = {
4641         .notifier_call = device_notifier,
4642 };
4643
4644 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4645                                        unsigned long val, void *v)
4646 {
4647         struct memory_notify *mhp = v;
4648         unsigned long long start, end;
4649         unsigned long start_vpfn, last_vpfn;
4650
4651         switch (val) {
4652         case MEM_GOING_ONLINE:
4653                 start = mhp->start_pfn << PAGE_SHIFT;
4654                 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4655                 if (iommu_domain_identity_map(si_domain, start, end)) {
4656                         pr_warn("Failed to build identity map for [%llx-%llx]\n",
4657                                 start, end);
4658                         return NOTIFY_BAD;
4659                 }
4660                 break;
4661
4662         case MEM_OFFLINE:
4663         case MEM_CANCEL_ONLINE:
4664                 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4665                 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4666                 while (start_vpfn <= last_vpfn) {
4667                         struct iova *iova;
4668                         struct dmar_drhd_unit *drhd;
4669                         struct intel_iommu *iommu;
4670                         struct page *freelist;
4671
4672                         iova = find_iova(&si_domain->iovad, start_vpfn);
4673                         if (iova == NULL) {
4674                                 pr_debug("Failed get IOVA for PFN %lx\n",
4675                                          start_vpfn);
4676                                 break;
4677                         }
4678
4679                         iova = split_and_remove_iova(&si_domain->iovad, iova,
4680                                                      start_vpfn, last_vpfn);
4681                         if (iova == NULL) {
4682                                 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
4683                                         start_vpfn, last_vpfn);
4684                                 return NOTIFY_BAD;
4685                         }
4686
4687                         freelist = domain_unmap(si_domain, iova->pfn_lo,
4688                                                iova->pfn_hi);
4689
4690                         rcu_read_lock();
4691                         for_each_active_iommu(iommu, drhd)
4692                                 iommu_flush_iotlb_psi(iommu, si_domain,
4693                                         iova->pfn_lo, iova_size(iova),
4694                                         !freelist, 0);
4695                         rcu_read_unlock();
4696                         dma_free_pagelist(freelist);
4697
4698                         start_vpfn = iova->pfn_hi + 1;
4699                         free_iova_mem(iova);
4700                 }
4701                 break;
4702         }
4703
4704         return NOTIFY_OK;
4705 }
4706
4707 static struct notifier_block intel_iommu_memory_nb = {
4708         .notifier_call = intel_iommu_memory_notifier,
4709         .priority = 0
4710 };
4711
4712 static void free_all_cpu_cached_iovas(unsigned int cpu)
4713 {
4714         int i;
4715
4716         for (i = 0; i < g_num_of_iommus; i++) {
4717                 struct intel_iommu *iommu = g_iommus[i];
4718                 struct dmar_domain *domain;
4719                 int did;
4720
4721                 if (!iommu)
4722                         continue;
4723
4724                 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
4725                         domain = get_iommu_domain(iommu, (u16)did);
4726
4727                         if (!domain)
4728                                 continue;
4729                         free_cpu_cached_iovas(cpu, &domain->iovad);
4730                 }
4731         }
4732 }
4733
4734 static int intel_iommu_cpu_dead(unsigned int cpu)
4735 {
4736         free_all_cpu_cached_iovas(cpu);
4737         flush_unmaps_timeout(cpu);
4738         return 0;
4739 }
4740
4741 static void intel_disable_iommus(void)
4742 {
4743         struct intel_iommu *iommu = NULL;
4744         struct dmar_drhd_unit *drhd;
4745
4746         for_each_iommu(iommu, drhd)
4747                 iommu_disable_translation(iommu);
4748 }
4749
4750 static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4751 {
4752         return container_of(dev, struct intel_iommu, iommu.dev);
4753 }
4754
4755 static ssize_t intel_iommu_show_version(struct device *dev,
4756                                         struct device_attribute *attr,
4757                                         char *buf)
4758 {
4759         struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4760         u32 ver = readl(iommu->reg + DMAR_VER_REG);
4761         return sprintf(buf, "%d:%d\n",
4762                        DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4763 }
4764 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4765
4766 static ssize_t intel_iommu_show_address(struct device *dev,
4767                                         struct device_attribute *attr,
4768                                         char *buf)
4769 {
4770         struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4771         return sprintf(buf, "%llx\n", iommu->reg_phys);
4772 }
4773 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4774
4775 static ssize_t intel_iommu_show_cap(struct device *dev,
4776                                     struct device_attribute *attr,
4777                                     char *buf)
4778 {
4779         struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4780         return sprintf(buf, "%llx\n", iommu->cap);
4781 }
4782 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4783
4784 static ssize_t intel_iommu_show_ecap(struct device *dev,
4785                                     struct device_attribute *attr,
4786                                     char *buf)
4787 {
4788         struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4789         return sprintf(buf, "%llx\n", iommu->ecap);
4790 }
4791 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4792
4793 static ssize_t intel_iommu_show_ndoms(struct device *dev,
4794                                       struct device_attribute *attr,
4795                                       char *buf)
4796 {
4797         struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4798         return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4799 }
4800 static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4801
4802 static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4803                                            struct device_attribute *attr,
4804                                            char *buf)
4805 {
4806         struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4807         return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4808                                                   cap_ndoms(iommu->cap)));
4809 }
4810 static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4811
4812 static struct attribute *intel_iommu_attrs[] = {
4813         &dev_attr_version.attr,
4814         &dev_attr_address.attr,
4815         &dev_attr_cap.attr,
4816         &dev_attr_ecap.attr,
4817         &dev_attr_domains_supported.attr,
4818         &dev_attr_domains_used.attr,
4819         NULL,
4820 };
4821
4822 static struct attribute_group intel_iommu_group = {
4823         .name = "intel-iommu",
4824         .attrs = intel_iommu_attrs,
4825 };
4826
4827 const struct attribute_group *intel_iommu_groups[] = {
4828         &intel_iommu_group,
4829         NULL,
4830 };
4831
4832 int __init intel_iommu_init(void)
4833 {
4834         int ret = -ENODEV;
4835         struct dmar_drhd_unit *drhd;
4836         struct intel_iommu *iommu;
4837
4838         /* VT-d is required for a TXT/tboot launch, so enforce that */
4839         force_on = tboot_force_iommu();
4840
4841         if (iommu_init_mempool()) {
4842                 if (force_on)
4843                         panic("tboot: Failed to initialize iommu memory\n");
4844                 return -ENOMEM;
4845         }
4846
4847         down_write(&dmar_global_lock);
4848         if (dmar_table_init()) {
4849                 if (force_on)
4850                         panic("tboot: Failed to initialize DMAR table\n");
4851                 goto out_free_dmar;
4852         }
4853
4854         if (dmar_dev_scope_init() < 0) {
4855                 if (force_on)
4856                         panic("tboot: Failed to initialize DMAR device scope\n");
4857                 goto out_free_dmar;
4858         }
4859
4860         if (no_iommu || dmar_disabled) {
4861                 /*
4862                  * We exit the function here to ensure IOMMU's remapping and
4863                  * mempool aren't setup, which means that the IOMMU's PMRs
4864                  * won't be disabled via the call to init_dmars(). So disable
4865                  * it explicitly here. The PMRs were setup by tboot prior to
4866                  * calling SENTER, but the kernel is expected to reset/tear
4867                  * down the PMRs.
4868                  */
4869                 if (intel_iommu_tboot_noforce) {
4870                         for_each_iommu(iommu, drhd)
4871                                 iommu_disable_protect_mem_regions(iommu);
4872                 }
4873
4874                 /*
4875                  * Make sure the IOMMUs are switched off, even when we
4876                  * boot into a kexec kernel and the previous kernel left
4877                  * them enabled
4878                  */
4879                 intel_disable_iommus();
4880                 goto out_free_dmar;
4881         }
4882
4883         if (list_empty(&dmar_rmrr_units))
4884                 pr_info("No RMRR found\n");
4885
4886         if (list_empty(&dmar_atsr_units))
4887                 pr_info("No ATSR found\n");
4888
4889         if (dmar_init_reserved_ranges()) {
4890                 if (force_on)
4891                         panic("tboot: Failed to reserve iommu ranges\n");
4892                 goto out_free_reserved_range;
4893         }
4894
4895         init_no_remapping_devices();
4896
4897         ret = init_dmars();
4898         if (ret) {
4899                 if (force_on)
4900                         panic("tboot: Failed to initialize DMARs\n");
4901                 pr_err("Initialization failed\n");
4902                 goto out_free_reserved_range;
4903         }
4904         up_write(&dmar_global_lock);
4905         pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4906
4907 #ifdef CONFIG_SWIOTLB
4908         swiotlb = 0;
4909 #endif
4910         dma_ops = &intel_dma_ops;
4911
4912         init_iommu_pm_ops();
4913
4914         for_each_active_iommu(iommu, drhd) {
4915                 iommu_device_sysfs_add(&iommu->iommu, NULL,
4916                                        intel_iommu_groups,
4917                                        "%s", iommu->name);
4918                 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
4919                 iommu_device_register(&iommu->iommu);
4920         }
4921
4922         bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4923         bus_register_notifier(&pci_bus_type, &device_nb);
4924         if (si_domain && !hw_pass_through)
4925                 register_memory_notifier(&intel_iommu_memory_nb);
4926         cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
4927                           intel_iommu_cpu_dead);
4928         intel_iommu_enabled = 1;
4929
4930         return 0;
4931
4932 out_free_reserved_range:
4933         put_iova_domain(&reserved_iova_list);
4934 out_free_dmar:
4935         intel_iommu_free_dmars();
4936         up_write(&dmar_global_lock);
4937         iommu_exit_mempool();
4938         return ret;
4939 }
4940
4941 static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4942 {
4943         struct intel_iommu *iommu = opaque;
4944
4945         domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4946         return 0;
4947 }
4948
4949 /*
4950  * NB - intel-iommu lacks any sort of reference counting for the users of
4951  * dependent devices.  If multiple endpoints have intersecting dependent
4952  * devices, unbinding the driver from any one of them will possibly leave
4953  * the others unable to operate.
4954  */
4955 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
4956 {
4957         if (!iommu || !dev || !dev_is_pci(dev))
4958                 return;
4959
4960         pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
4961 }
4962
4963 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4964 {
4965         struct intel_iommu *iommu;
4966         unsigned long flags;
4967
4968         assert_spin_locked(&device_domain_lock);
4969
4970         if (WARN_ON(!info))
4971                 return;
4972
4973         iommu = info->iommu;
4974
4975         if (info->dev) {
4976                 iommu_disable_dev_iotlb(info);
4977                 domain_context_clear(iommu, info->dev);
4978         }
4979
4980         unlink_domain_info(info);
4981
4982         spin_lock_irqsave(&iommu->lock, flags);
4983         domain_detach_iommu(info->domain, iommu);
4984         spin_unlock_irqrestore(&iommu->lock, flags);
4985
4986         free_devinfo_mem(info);
4987 }
4988
4989 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
4990                                      struct device *dev)
4991 {
4992         struct device_domain_info *info;
4993         unsigned long flags;
4994
4995         spin_lock_irqsave(&device_domain_lock, flags);
4996         info = dev->archdata.iommu;
4997         __dmar_remove_one_dev_info(info);
4998         spin_unlock_irqrestore(&device_domain_lock, flags);
4999 }
5000
5001 static int md_domain_init(struct dmar_domain *domain, int guest_width)
5002 {
5003         int adjust_width;
5004
5005         init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
5006                         DMA_32BIT_PFN);
5007         domain_reserve_special_ranges(domain);
5008
5009         /* calculate AGAW */
5010         domain->gaw = guest_width;
5011         adjust_width = guestwidth_to_adjustwidth(guest_width);
5012         domain->agaw = width_to_agaw(adjust_width);
5013
5014         domain->iommu_coherency = 0;
5015         domain->iommu_snooping = 0;
5016         domain->iommu_superpage = 0;
5017         domain->max_addr = 0;
5018
5019         /* always allocate the top pgd */
5020         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
5021         if (!domain->pgd)
5022                 return -ENOMEM;
5023         domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
5024         return 0;
5025 }
5026
5027 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
5028 {
5029         struct dmar_domain *dmar_domain;
5030         struct iommu_domain *domain;
5031
5032         if (type != IOMMU_DOMAIN_UNMANAGED)
5033                 return NULL;
5034
5035         dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
5036         if (!dmar_domain) {
5037                 pr_err("Can't allocate dmar_domain\n");
5038                 return NULL;
5039         }
5040         if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
5041                 pr_err("Domain initialization failed\n");
5042                 domain_exit(dmar_domain);
5043                 return NULL;
5044         }
5045         domain_update_iommu_cap(dmar_domain);
5046
5047         domain = &dmar_domain->domain;
5048         domain->geometry.aperture_start = 0;
5049         domain->geometry.aperture_end   = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
5050         domain->geometry.force_aperture = true;
5051
5052         return domain;
5053 }
5054
5055 static void intel_iommu_domain_free(struct iommu_domain *domain)
5056 {
5057         domain_exit(to_dmar_domain(domain));
5058 }
5059
5060 static int intel_iommu_attach_device(struct iommu_domain *domain,
5061                                      struct device *dev)
5062 {
5063         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5064         struct intel_iommu *iommu;
5065         int addr_width;
5066         u8 bus, devfn;
5067
5068         if (device_is_rmrr_locked(dev)) {
5069                 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement.  Contact your platform vendor.\n");
5070                 return -EPERM;
5071         }
5072
5073         /* normally dev is not mapped */
5074         if (unlikely(domain_context_mapped(dev))) {
5075                 struct dmar_domain *old_domain;
5076
5077                 old_domain = find_domain(dev);
5078                 if (old_domain) {
5079                         rcu_read_lock();
5080                         dmar_remove_one_dev_info(old_domain, dev);
5081                         rcu_read_unlock();
5082
5083                         if (!domain_type_is_vm_or_si(old_domain) &&
5084                              list_empty(&old_domain->devices))
5085                                 domain_exit(old_domain);
5086                 }
5087         }
5088
5089         iommu = device_to_iommu(dev, &bus, &devfn);
5090         if (!iommu)
5091                 return -ENODEV;
5092
5093         /* check if this iommu agaw is sufficient for max mapped address */
5094         addr_width = agaw_to_width(iommu->agaw);
5095         if (addr_width > cap_mgaw(iommu->cap))
5096                 addr_width = cap_mgaw(iommu->cap);
5097
5098         if (dmar_domain->max_addr > (1LL << addr_width)) {
5099                 pr_err("%s: iommu width (%d) is not "
5100                        "sufficient for the mapped address (%llx)\n",
5101                        __func__, addr_width, dmar_domain->max_addr);
5102                 return -EFAULT;
5103         }
5104         dmar_domain->gaw = addr_width;
5105
5106         /*
5107          * Knock out extra levels of page tables if necessary
5108          */
5109         while (iommu->agaw < dmar_domain->agaw) {
5110                 struct dma_pte *pte;
5111
5112                 pte = dmar_domain->pgd;
5113                 if (dma_pte_present(pte)) {
5114                         dmar_domain->pgd = (struct dma_pte *)
5115                                 phys_to_virt(dma_pte_addr(pte));
5116                         free_pgtable_page(pte);
5117                 }
5118                 dmar_domain->agaw--;
5119         }
5120
5121         return domain_add_dev_info(dmar_domain, dev);
5122 }
5123
5124 static void intel_iommu_detach_device(struct iommu_domain *domain,
5125                                       struct device *dev)
5126 {
5127         dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
5128 }
5129
5130 static int intel_iommu_map(struct iommu_domain *domain,
5131                            unsigned long iova, phys_addr_t hpa,
5132                            size_t size, int iommu_prot)
5133 {
5134         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5135         u64 max_addr;
5136         int prot = 0;
5137         int ret;
5138
5139         if (iommu_prot & IOMMU_READ)
5140                 prot |= DMA_PTE_READ;
5141         if (iommu_prot & IOMMU_WRITE)
5142                 prot |= DMA_PTE_WRITE;
5143         if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5144                 prot |= DMA_PTE_SNP;
5145
5146         max_addr = iova + size;
5147         if (dmar_domain->max_addr < max_addr) {
5148                 u64 end;
5149
5150                 /* check if minimum agaw is sufficient for mapped address */
5151                 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
5152                 if (end < max_addr) {
5153                         pr_err("%s: iommu width (%d) is not "
5154                                "sufficient for the mapped address (%llx)\n",
5155                                __func__, dmar_domain->gaw, max_addr);
5156                         return -EFAULT;
5157                 }
5158                 dmar_domain->max_addr = max_addr;
5159         }
5160         /* Round up size to next multiple of PAGE_SIZE, if it and
5161            the low bits of hpa would take us onto the next page */
5162         size = aligned_nrpages(hpa, size);
5163         ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5164                                  hpa >> VTD_PAGE_SHIFT, size, prot);
5165         return ret;
5166 }
5167
5168 static size_t intel_iommu_unmap(struct iommu_domain *domain,
5169                                 unsigned long iova, size_t size)
5170 {
5171         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5172         struct page *freelist = NULL;
5173         struct intel_iommu *iommu;
5174         unsigned long start_pfn, last_pfn;
5175         unsigned int npages;
5176         int iommu_id, level = 0;
5177
5178         /* Cope with horrid API which requires us to unmap more than the
5179            size argument if it happens to be a large-page mapping. */
5180         BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
5181
5182         if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5183                 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5184
5185         start_pfn = iova >> VTD_PAGE_SHIFT;
5186         last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5187
5188         freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
5189
5190         npages = last_pfn - start_pfn + 1;
5191
5192         for_each_domain_iommu(iommu_id, dmar_domain) {
5193                 iommu = g_iommus[iommu_id];
5194
5195                 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5196                                       start_pfn, npages, !freelist, 0);
5197         }
5198
5199         dma_free_pagelist(freelist);
5200
5201         if (dmar_domain->max_addr == iova + size)
5202                 dmar_domain->max_addr = iova;
5203
5204         return size;
5205 }
5206
5207 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
5208                                             dma_addr_t iova)
5209 {
5210         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5211         struct dma_pte *pte;
5212         int level = 0;
5213         u64 phys = 0;
5214
5215         pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
5216         if (pte)
5217                 phys = dma_pte_addr(pte);
5218
5219         return phys;
5220 }
5221
5222 static bool intel_iommu_capable(enum iommu_cap cap)
5223 {
5224         if (cap == IOMMU_CAP_CACHE_COHERENCY)
5225                 return domain_update_iommu_snooping(NULL) == 1;
5226         if (cap == IOMMU_CAP_INTR_REMAP)
5227                 return irq_remapping_enabled == 1;
5228
5229         return false;
5230 }
5231
5232 static int intel_iommu_add_device(struct device *dev)
5233 {
5234         struct intel_iommu *iommu;
5235         struct iommu_group *group;
5236         u8 bus, devfn;
5237
5238         iommu = device_to_iommu(dev, &bus, &devfn);
5239         if (!iommu)
5240                 return -ENODEV;
5241
5242         iommu_device_link(&iommu->iommu, dev);
5243
5244         group = iommu_group_get_for_dev(dev);
5245
5246         if (IS_ERR(group))
5247                 return PTR_ERR(group);
5248
5249         iommu_group_put(group);
5250         return 0;
5251 }
5252
5253 static void intel_iommu_remove_device(struct device *dev)
5254 {
5255         struct intel_iommu *iommu;
5256         u8 bus, devfn;
5257
5258         iommu = device_to_iommu(dev, &bus, &devfn);
5259         if (!iommu)
5260                 return;
5261
5262         iommu_group_remove_device(dev);
5263
5264         iommu_device_unlink(&iommu->iommu, dev);
5265 }
5266
5267 static void intel_iommu_get_resv_regions(struct device *device,
5268                                          struct list_head *head)
5269 {
5270         struct iommu_resv_region *reg;
5271         struct dmar_rmrr_unit *rmrr;
5272         struct device *i_dev;
5273         int i;
5274
5275         rcu_read_lock();
5276         for_each_rmrr_units(rmrr) {
5277                 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5278                                           i, i_dev) {
5279                         if (i_dev != device)
5280                                 continue;
5281
5282                         list_add_tail(&rmrr->resv->list, head);
5283                 }
5284         }
5285         rcu_read_unlock();
5286
5287         reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5288                                       IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
5289                                       0, IOMMU_RESV_MSI);
5290         if (!reg)
5291                 return;
5292         list_add_tail(&reg->list, head);
5293 }
5294
5295 static void intel_iommu_put_resv_regions(struct device *dev,
5296                                          struct list_head *head)
5297 {
5298         struct iommu_resv_region *entry, *next;
5299
5300         list_for_each_entry_safe(entry, next, head, list) {
5301                 if (entry->type == IOMMU_RESV_RESERVED)
5302                         kfree(entry);
5303         }
5304 }
5305
5306 #ifdef CONFIG_INTEL_IOMMU_SVM
5307 #define MAX_NR_PASID_BITS (20)
5308 static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
5309 {
5310         /*
5311          * Convert ecap_pss to extend context entry pts encoding, also
5312          * respect the soft pasid_max value set by the iommu.
5313          * - number of PASID bits = ecap_pss + 1
5314          * - number of PASID table entries = 2^(pts + 5)
5315          * Therefore, pts = ecap_pss - 4
5316          * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15
5317          */
5318         if (ecap_pss(iommu->ecap) < 5)
5319                 return 0;
5320
5321         /* pasid_max is encoded as actual number of entries not the bits */
5322         return find_first_bit((unsigned long *)&iommu->pasid_max,
5323                         MAX_NR_PASID_BITS) - 5;
5324 }
5325
5326 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
5327 {
5328         struct device_domain_info *info;
5329         struct context_entry *context;
5330         struct dmar_domain *domain;
5331         unsigned long flags;
5332         u64 ctx_lo;
5333         int ret;
5334
5335         domain = get_valid_domain_for_dev(sdev->dev);
5336         if (!domain)
5337                 return -EINVAL;
5338
5339         spin_lock_irqsave(&device_domain_lock, flags);
5340         spin_lock(&iommu->lock);
5341
5342         ret = -EINVAL;
5343         info = sdev->dev->archdata.iommu;
5344         if (!info || !info->pasid_supported)
5345                 goto out;
5346
5347         context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5348         if (WARN_ON(!context))
5349                 goto out;
5350
5351         ctx_lo = context[0].lo;
5352
5353         sdev->did = domain->iommu_did[iommu->seq_id];
5354         sdev->sid = PCI_DEVID(info->bus, info->devfn);
5355
5356         if (!(ctx_lo & CONTEXT_PASIDE)) {
5357                 context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
5358                 context[1].lo = (u64)virt_to_phys(iommu->pasid_table) |
5359                         intel_iommu_get_pts(iommu);
5360
5361                 wmb();
5362                 /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
5363                  * extended to permit requests-with-PASID if the PASIDE bit
5364                  * is set. which makes sense. For CONTEXT_TT_PASS_THROUGH,
5365                  * however, the PASIDE bit is ignored and requests-with-PASID
5366                  * are unconditionally blocked. Which makes less sense.
5367                  * So convert from CONTEXT_TT_PASS_THROUGH to one of the new
5368                  * "guest mode" translation types depending on whether ATS
5369                  * is available or not. Annoyingly, we can't use the new
5370                  * modes *unless* PASIDE is set. */
5371                 if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) {
5372                         ctx_lo &= ~CONTEXT_TT_MASK;
5373                         if (info->ats_supported)
5374                                 ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2;
5375                         else
5376                                 ctx_lo |= CONTEXT_TT_PT_PASID << 2;
5377                 }
5378                 ctx_lo |= CONTEXT_PASIDE;
5379                 if (iommu->pasid_state_table)
5380                         ctx_lo |= CONTEXT_DINVE;
5381                 if (info->pri_supported)
5382                         ctx_lo |= CONTEXT_PRS;
5383                 context[0].lo = ctx_lo;
5384                 wmb();
5385                 iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
5386                                            DMA_CCMD_MASK_NOBIT,
5387                                            DMA_CCMD_DEVICE_INVL);
5388         }
5389
5390         /* Enable PASID support in the device, if it wasn't already */
5391         if (!info->pasid_enabled)
5392                 iommu_enable_dev_iotlb(info);
5393
5394         if (info->ats_enabled) {
5395                 sdev->dev_iotlb = 1;
5396                 sdev->qdep = info->ats_qdep;
5397                 if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
5398                         sdev->qdep = 0;
5399         }
5400         ret = 0;
5401
5402  out:
5403         spin_unlock(&iommu->lock);
5404         spin_unlock_irqrestore(&device_domain_lock, flags);
5405
5406         return ret;
5407 }
5408
5409 struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
5410 {
5411         struct intel_iommu *iommu;
5412         u8 bus, devfn;
5413
5414         if (iommu_dummy(dev)) {
5415                 dev_warn(dev,
5416                          "No IOMMU translation for device; cannot enable SVM\n");
5417                 return NULL;
5418         }
5419
5420         iommu = device_to_iommu(dev, &bus, &devfn);
5421         if ((!iommu)) {
5422                 dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
5423                 return NULL;
5424         }
5425
5426         if (!iommu->pasid_table) {
5427                 dev_err(dev, "PASID not enabled on IOMMU; cannot enable SVM\n");
5428                 return NULL;
5429         }
5430
5431         return iommu;
5432 }
5433 #endif /* CONFIG_INTEL_IOMMU_SVM */
5434
5435 const struct iommu_ops intel_iommu_ops = {
5436         .capable                = intel_iommu_capable,
5437         .domain_alloc           = intel_iommu_domain_alloc,
5438         .domain_free            = intel_iommu_domain_free,
5439         .attach_dev             = intel_iommu_attach_device,
5440         .detach_dev             = intel_iommu_detach_device,
5441         .map                    = intel_iommu_map,
5442         .unmap                  = intel_iommu_unmap,
5443         .map_sg                 = default_iommu_map_sg,
5444         .iova_to_phys           = intel_iommu_iova_to_phys,
5445         .add_device             = intel_iommu_add_device,
5446         .remove_device          = intel_iommu_remove_device,
5447         .get_resv_regions       = intel_iommu_get_resv_regions,
5448         .put_resv_regions       = intel_iommu_put_resv_regions,
5449         .device_group           = pci_device_group,
5450         .pgsize_bitmap          = INTEL_IOMMU_PGSIZES,
5451 };
5452
5453 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
5454 {
5455         /* G4x/GM45 integrated gfx dmar support is totally busted. */
5456         pr_info("Disabling IOMMU for graphics on this chipset\n");
5457         dmar_map_gfx = 0;
5458 }
5459
5460 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
5461 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
5462 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
5463 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
5464 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
5465 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
5466 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
5467
5468 static void quirk_iommu_rwbf(struct pci_dev *dev)
5469 {
5470         /*
5471          * Mobile 4 Series Chipset neglects to set RWBF capability,
5472          * but needs it. Same seems to hold for the desktop versions.
5473          */
5474         pr_info("Forcing write-buffer flush capability\n");
5475         rwbf_quirk = 1;
5476 }
5477
5478 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
5479 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5480 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5481 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5482 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5483 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5484 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
5485
5486 #define GGC 0x52
5487 #define GGC_MEMORY_SIZE_MASK    (0xf << 8)
5488 #define GGC_MEMORY_SIZE_NONE    (0x0 << 8)
5489 #define GGC_MEMORY_SIZE_1M      (0x1 << 8)
5490 #define GGC_MEMORY_SIZE_2M      (0x3 << 8)
5491 #define GGC_MEMORY_VT_ENABLED   (0x8 << 8)
5492 #define GGC_MEMORY_SIZE_2M_VT   (0x9 << 8)
5493 #define GGC_MEMORY_SIZE_3M_VT   (0xa << 8)
5494 #define GGC_MEMORY_SIZE_4M_VT   (0xb << 8)
5495
5496 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
5497 {
5498         unsigned short ggc;
5499
5500         if (pci_read_config_word(dev, GGC, &ggc))
5501                 return;
5502
5503         if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
5504                 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
5505                 dmar_map_gfx = 0;
5506         } else if (dmar_map_gfx) {
5507                 /* we have to ensure the gfx device is idle before we flush */
5508                 pr_info("Disabling batched IOTLB flush on Ironlake\n");
5509                 intel_iommu_strict = 1;
5510        }
5511 }
5512 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5513 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5514 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5515 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5516
5517 /* On Tylersburg chipsets, some BIOSes have been known to enable the
5518    ISOCH DMAR unit for the Azalia sound device, but not give it any
5519    TLB entries, which causes it to deadlock. Check for that.  We do
5520    this in a function called from init_dmars(), instead of in a PCI
5521    quirk, because we don't want to print the obnoxious "BIOS broken"
5522    message if VT-d is actually disabled.
5523 */
5524 static void __init check_tylersburg_isoch(void)
5525 {
5526         struct pci_dev *pdev;
5527         uint32_t vtisochctrl;
5528
5529         /* If there's no Azalia in the system anyway, forget it. */
5530         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5531         if (!pdev)
5532                 return;
5533         pci_dev_put(pdev);
5534
5535         /* System Management Registers. Might be hidden, in which case
5536            we can't do the sanity check. But that's OK, because the
5537            known-broken BIOSes _don't_ actually hide it, so far. */
5538         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5539         if (!pdev)
5540                 return;
5541
5542         if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5543                 pci_dev_put(pdev);
5544                 return;
5545         }
5546
5547         pci_dev_put(pdev);
5548
5549         /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5550         if (vtisochctrl & 1)
5551                 return;
5552
5553         /* Drop all bits other than the number of TLB entries */
5554         vtisochctrl &= 0x1c;
5555
5556         /* If we have the recommended number of TLB entries (16), fine. */
5557         if (vtisochctrl == 0x10)
5558                 return;
5559
5560         /* Zero TLB entries? You get to ride the short bus to school. */
5561         if (!vtisochctrl) {
5562                 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5563                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5564                      dmi_get_system_info(DMI_BIOS_VENDOR),
5565                      dmi_get_system_info(DMI_BIOS_VERSION),
5566                      dmi_get_system_info(DMI_PRODUCT_VERSION));
5567                 iommu_identity_mapping |= IDENTMAP_AZALIA;
5568                 return;
5569         }
5570
5571         pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
5572                vtisochctrl);
5573 }