Merge branch 'drm-dwhdmi-devel' of git://ftp.arm.linux.org.uk/~rmk/linux-arm into...
[sfrench/cifs-2.6.git] / drivers / iommu / intel-iommu.c
1 /*
2  * Copyright © 2006-2014 Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * Authors: David Woodhouse <dwmw2@infradead.org>,
14  *          Ashok Raj <ashok.raj@intel.com>,
15  *          Shaohua Li <shaohua.li@intel.com>,
16  *          Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17  *          Fenghua Yu <fenghua.yu@intel.com>
18  */
19
20 #include <linux/init.h>
21 #include <linux/bitmap.h>
22 #include <linux/debugfs.h>
23 #include <linux/export.h>
24 #include <linux/slab.h>
25 #include <linux/irq.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/pci.h>
29 #include <linux/dmar.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/mempool.h>
32 #include <linux/memory.h>
33 #include <linux/timer.h>
34 #include <linux/iova.h>
35 #include <linux/iommu.h>
36 #include <linux/intel-iommu.h>
37 #include <linux/syscore_ops.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/pci-ats.h>
41 #include <linux/memblock.h>
42 #include <linux/dma-contiguous.h>
43 #include <asm/irq_remapping.h>
44 #include <asm/cacheflush.h>
45 #include <asm/iommu.h>
46
47 #include "irq_remapping.h"
48
49 #define ROOT_SIZE               VTD_PAGE_SIZE
50 #define CONTEXT_SIZE            VTD_PAGE_SIZE
51
52 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
54 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
55
56 #define IOAPIC_RANGE_START      (0xfee00000)
57 #define IOAPIC_RANGE_END        (0xfeefffff)
58 #define IOVA_START_ADDR         (0x1000)
59
60 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
61
62 #define MAX_AGAW_WIDTH 64
63 #define MAX_AGAW_PFN_WIDTH      (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
64
65 #define __DOMAIN_MAX_PFN(gaw)  ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
66 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
67
68 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
69    to match. That way, we can use 'unsigned long' for PFNs with impunity. */
70 #define DOMAIN_MAX_PFN(gaw)     ((unsigned long) min_t(uint64_t, \
71                                 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
72 #define DOMAIN_MAX_ADDR(gaw)    (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
73
74 /* IO virtual address start page frame number */
75 #define IOVA_START_PFN          (1)
76
77 #define IOVA_PFN(addr)          ((addr) >> PAGE_SHIFT)
78 #define DMA_32BIT_PFN           IOVA_PFN(DMA_BIT_MASK(32))
79 #define DMA_64BIT_PFN           IOVA_PFN(DMA_BIT_MASK(64))
80
81 /* page table handling */
82 #define LEVEL_STRIDE            (9)
83 #define LEVEL_MASK              (((u64)1 << LEVEL_STRIDE) - 1)
84
85 /*
86  * This bitmap is used to advertise the page sizes our hardware support
87  * to the IOMMU core, which will then use this information to split
88  * physically contiguous memory regions it is mapping into page sizes
89  * that we support.
90  *
91  * Traditionally the IOMMU core just handed us the mappings directly,
92  * after making sure the size is an order of a 4KiB page and that the
93  * mapping has natural alignment.
94  *
95  * To retain this behavior, we currently advertise that we support
96  * all page sizes that are an order of 4KiB.
97  *
98  * If at some point we'd like to utilize the IOMMU core's new behavior,
99  * we could change this to advertise the real page sizes we support.
100  */
101 #define INTEL_IOMMU_PGSIZES     (~0xFFFUL)
102
103 static inline int agaw_to_level(int agaw)
104 {
105         return agaw + 2;
106 }
107
108 static inline int agaw_to_width(int agaw)
109 {
110         return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
111 }
112
113 static inline int width_to_agaw(int width)
114 {
115         return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
116 }
117
118 static inline unsigned int level_to_offset_bits(int level)
119 {
120         return (level - 1) * LEVEL_STRIDE;
121 }
122
123 static inline int pfn_level_offset(unsigned long pfn, int level)
124 {
125         return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
126 }
127
128 static inline unsigned long level_mask(int level)
129 {
130         return -1UL << level_to_offset_bits(level);
131 }
132
133 static inline unsigned long level_size(int level)
134 {
135         return 1UL << level_to_offset_bits(level);
136 }
137
138 static inline unsigned long align_to_level(unsigned long pfn, int level)
139 {
140         return (pfn + level_size(level) - 1) & level_mask(level);
141 }
142
143 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
144 {
145         return  1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
146 }
147
148 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
149    are never going to work. */
150 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
151 {
152         return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
153 }
154
155 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
156 {
157         return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
158 }
159 static inline unsigned long page_to_dma_pfn(struct page *pg)
160 {
161         return mm_to_dma_pfn(page_to_pfn(pg));
162 }
163 static inline unsigned long virt_to_dma_pfn(void *p)
164 {
165         return page_to_dma_pfn(virt_to_page(p));
166 }
167
168 /* global iommu list, set NULL for ignored DMAR units */
169 static struct intel_iommu **g_iommus;
170
171 static void __init check_tylersburg_isoch(void);
172 static int rwbf_quirk;
173
174 /*
175  * set to 1 to panic kernel if can't successfully enable VT-d
176  * (used when kernel is launched w/ TXT)
177  */
178 static int force_on = 0;
179
180 /*
181  * 0: Present
182  * 1-11: Reserved
183  * 12-63: Context Ptr (12 - (haw-1))
184  * 64-127: Reserved
185  */
186 struct root_entry {
187         u64     val;
188         u64     rsvd1;
189 };
190 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
191 static inline bool root_present(struct root_entry *root)
192 {
193         return (root->val & 1);
194 }
195 static inline void set_root_present(struct root_entry *root)
196 {
197         root->val |= 1;
198 }
199 static inline void set_root_value(struct root_entry *root, unsigned long value)
200 {
201         root->val &= ~VTD_PAGE_MASK;
202         root->val |= value & VTD_PAGE_MASK;
203 }
204
205 static inline struct context_entry *
206 get_context_addr_from_root(struct root_entry *root)
207 {
208         return (struct context_entry *)
209                 (root_present(root)?phys_to_virt(
210                 root->val & VTD_PAGE_MASK) :
211                 NULL);
212 }
213
214 /*
215  * low 64 bits:
216  * 0: present
217  * 1: fault processing disable
218  * 2-3: translation type
219  * 12-63: address space root
220  * high 64 bits:
221  * 0-2: address width
222  * 3-6: aval
223  * 8-23: domain id
224  */
225 struct context_entry {
226         u64 lo;
227         u64 hi;
228 };
229
230 static inline bool context_present(struct context_entry *context)
231 {
232         return (context->lo & 1);
233 }
234 static inline void context_set_present(struct context_entry *context)
235 {
236         context->lo |= 1;
237 }
238
239 static inline void context_set_fault_enable(struct context_entry *context)
240 {
241         context->lo &= (((u64)-1) << 2) | 1;
242 }
243
244 static inline void context_set_translation_type(struct context_entry *context,
245                                                 unsigned long value)
246 {
247         context->lo &= (((u64)-1) << 4) | 3;
248         context->lo |= (value & 3) << 2;
249 }
250
251 static inline void context_set_address_root(struct context_entry *context,
252                                             unsigned long value)
253 {
254         context->lo &= ~VTD_PAGE_MASK;
255         context->lo |= value & VTD_PAGE_MASK;
256 }
257
258 static inline void context_set_address_width(struct context_entry *context,
259                                              unsigned long value)
260 {
261         context->hi |= value & 7;
262 }
263
264 static inline void context_set_domain_id(struct context_entry *context,
265                                          unsigned long value)
266 {
267         context->hi |= (value & ((1 << 16) - 1)) << 8;
268 }
269
270 static inline void context_clear_entry(struct context_entry *context)
271 {
272         context->lo = 0;
273         context->hi = 0;
274 }
275
276 /*
277  * 0: readable
278  * 1: writable
279  * 2-6: reserved
280  * 7: super page
281  * 8-10: available
282  * 11: snoop behavior
283  * 12-63: Host physcial address
284  */
285 struct dma_pte {
286         u64 val;
287 };
288
289 static inline void dma_clear_pte(struct dma_pte *pte)
290 {
291         pte->val = 0;
292 }
293
294 static inline u64 dma_pte_addr(struct dma_pte *pte)
295 {
296 #ifdef CONFIG_64BIT
297         return pte->val & VTD_PAGE_MASK;
298 #else
299         /* Must have a full atomic 64-bit read */
300         return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
301 #endif
302 }
303
304 static inline bool dma_pte_present(struct dma_pte *pte)
305 {
306         return (pte->val & 3) != 0;
307 }
308
309 static inline bool dma_pte_superpage(struct dma_pte *pte)
310 {
311         return (pte->val & DMA_PTE_LARGE_PAGE);
312 }
313
314 static inline int first_pte_in_page(struct dma_pte *pte)
315 {
316         return !((unsigned long)pte & ~VTD_PAGE_MASK);
317 }
318
319 /*
320  * This domain is a statically identity mapping domain.
321  *      1. This domain creats a static 1:1 mapping to all usable memory.
322  *      2. It maps to each iommu if successful.
323  *      3. Each iommu mapps to this domain if successful.
324  */
325 static struct dmar_domain *si_domain;
326 static int hw_pass_through = 1;
327
328 /* domain represents a virtual machine, more than one devices
329  * across iommus may be owned in one domain, e.g. kvm guest.
330  */
331 #define DOMAIN_FLAG_VIRTUAL_MACHINE     (1 << 0)
332
333 /* si_domain contains mulitple devices */
334 #define DOMAIN_FLAG_STATIC_IDENTITY     (1 << 1)
335
336 struct dmar_domain {
337         int     id;                     /* domain id */
338         int     nid;                    /* node id */
339         DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
340                                         /* bitmap of iommus this domain uses*/
341
342         struct list_head devices;       /* all devices' list */
343         struct iova_domain iovad;       /* iova's that belong to this domain */
344
345         struct dma_pte  *pgd;           /* virtual address */
346         int             gaw;            /* max guest address width */
347
348         /* adjusted guest address width, 0 is level 2 30-bit */
349         int             agaw;
350
351         int             flags;          /* flags to find out type of domain */
352
353         int             iommu_coherency;/* indicate coherency of iommu access */
354         int             iommu_snooping; /* indicate snooping control feature*/
355         int             iommu_count;    /* reference count of iommu */
356         int             iommu_superpage;/* Level of superpages supported:
357                                            0 == 4KiB (no superpages), 1 == 2MiB,
358                                            2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
359         spinlock_t      iommu_lock;     /* protect iommu set in domain */
360         u64             max_addr;       /* maximum mapped address */
361 };
362
363 /* PCI domain-device relationship */
364 struct device_domain_info {
365         struct list_head link;  /* link to domain siblings */
366         struct list_head global; /* link to global list */
367         u8 bus;                 /* PCI bus number */
368         u8 devfn;               /* PCI devfn number */
369         struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
370         struct intel_iommu *iommu; /* IOMMU used by this device */
371         struct dmar_domain *domain; /* pointer to domain */
372 };
373
374 struct dmar_rmrr_unit {
375         struct list_head list;          /* list of rmrr units   */
376         struct acpi_dmar_header *hdr;   /* ACPI header          */
377         u64     base_address;           /* reserved base address*/
378         u64     end_address;            /* reserved end address */
379         struct dmar_dev_scope *devices; /* target devices */
380         int     devices_cnt;            /* target device count */
381 };
382
383 struct dmar_atsr_unit {
384         struct list_head list;          /* list of ATSR units */
385         struct acpi_dmar_header *hdr;   /* ACPI header */
386         struct dmar_dev_scope *devices; /* target devices */
387         int devices_cnt;                /* target device count */
388         u8 include_all:1;               /* include all ports */
389 };
390
391 static LIST_HEAD(dmar_atsr_units);
392 static LIST_HEAD(dmar_rmrr_units);
393
394 #define for_each_rmrr_units(rmrr) \
395         list_for_each_entry(rmrr, &dmar_rmrr_units, list)
396
397 static void flush_unmaps_timeout(unsigned long data);
398
399 static DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
400
401 #define HIGH_WATER_MARK 250
402 struct deferred_flush_tables {
403         int next;
404         struct iova *iova[HIGH_WATER_MARK];
405         struct dmar_domain *domain[HIGH_WATER_MARK];
406         struct page *freelist[HIGH_WATER_MARK];
407 };
408
409 static struct deferred_flush_tables *deferred_flush;
410
411 /* bitmap for indexing intel_iommus */
412 static int g_num_of_iommus;
413
414 static DEFINE_SPINLOCK(async_umap_flush_lock);
415 static LIST_HEAD(unmaps_to_do);
416
417 static int timer_on;
418 static long list_size;
419
420 static void domain_exit(struct dmar_domain *domain);
421 static void domain_remove_dev_info(struct dmar_domain *domain);
422 static void domain_remove_one_dev_info(struct dmar_domain *domain,
423                                        struct device *dev);
424 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
425                                            struct device *dev);
426 static int domain_detach_iommu(struct dmar_domain *domain,
427                                struct intel_iommu *iommu);
428
429 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
430 int dmar_disabled = 0;
431 #else
432 int dmar_disabled = 1;
433 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
434
435 int intel_iommu_enabled = 0;
436 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
437
438 static int dmar_map_gfx = 1;
439 static int dmar_forcedac;
440 static int intel_iommu_strict;
441 static int intel_iommu_superpage = 1;
442
443 int intel_iommu_gfx_mapped;
444 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
445
446 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
447 static DEFINE_SPINLOCK(device_domain_lock);
448 static LIST_HEAD(device_domain_list);
449
450 static const struct iommu_ops intel_iommu_ops;
451
452 static int __init intel_iommu_setup(char *str)
453 {
454         if (!str)
455                 return -EINVAL;
456         while (*str) {
457                 if (!strncmp(str, "on", 2)) {
458                         dmar_disabled = 0;
459                         printk(KERN_INFO "Intel-IOMMU: enabled\n");
460                 } else if (!strncmp(str, "off", 3)) {
461                         dmar_disabled = 1;
462                         printk(KERN_INFO "Intel-IOMMU: disabled\n");
463                 } else if (!strncmp(str, "igfx_off", 8)) {
464                         dmar_map_gfx = 0;
465                         printk(KERN_INFO
466                                 "Intel-IOMMU: disable GFX device mapping\n");
467                 } else if (!strncmp(str, "forcedac", 8)) {
468                         printk(KERN_INFO
469                                 "Intel-IOMMU: Forcing DAC for PCI devices\n");
470                         dmar_forcedac = 1;
471                 } else if (!strncmp(str, "strict", 6)) {
472                         printk(KERN_INFO
473                                 "Intel-IOMMU: disable batched IOTLB flush\n");
474                         intel_iommu_strict = 1;
475                 } else if (!strncmp(str, "sp_off", 6)) {
476                         printk(KERN_INFO
477                                 "Intel-IOMMU: disable supported super page\n");
478                         intel_iommu_superpage = 0;
479                 }
480
481                 str += strcspn(str, ",");
482                 while (*str == ',')
483                         str++;
484         }
485         return 0;
486 }
487 __setup("intel_iommu=", intel_iommu_setup);
488
489 static struct kmem_cache *iommu_domain_cache;
490 static struct kmem_cache *iommu_devinfo_cache;
491
492 static inline void *alloc_pgtable_page(int node)
493 {
494         struct page *page;
495         void *vaddr = NULL;
496
497         page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
498         if (page)
499                 vaddr = page_address(page);
500         return vaddr;
501 }
502
503 static inline void free_pgtable_page(void *vaddr)
504 {
505         free_page((unsigned long)vaddr);
506 }
507
508 static inline void *alloc_domain_mem(void)
509 {
510         return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
511 }
512
513 static void free_domain_mem(void *vaddr)
514 {
515         kmem_cache_free(iommu_domain_cache, vaddr);
516 }
517
518 static inline void * alloc_devinfo_mem(void)
519 {
520         return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
521 }
522
523 static inline void free_devinfo_mem(void *vaddr)
524 {
525         kmem_cache_free(iommu_devinfo_cache, vaddr);
526 }
527
528 static inline int domain_type_is_vm(struct dmar_domain *domain)
529 {
530         return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
531 }
532
533 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
534 {
535         return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
536                                 DOMAIN_FLAG_STATIC_IDENTITY);
537 }
538
539 static inline int domain_pfn_supported(struct dmar_domain *domain,
540                                        unsigned long pfn)
541 {
542         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
543
544         return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
545 }
546
547 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
548 {
549         unsigned long sagaw;
550         int agaw = -1;
551
552         sagaw = cap_sagaw(iommu->cap);
553         for (agaw = width_to_agaw(max_gaw);
554              agaw >= 0; agaw--) {
555                 if (test_bit(agaw, &sagaw))
556                         break;
557         }
558
559         return agaw;
560 }
561
562 /*
563  * Calculate max SAGAW for each iommu.
564  */
565 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
566 {
567         return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
568 }
569
570 /*
571  * calculate agaw for each iommu.
572  * "SAGAW" may be different across iommus, use a default agaw, and
573  * get a supported less agaw for iommus that don't support the default agaw.
574  */
575 int iommu_calculate_agaw(struct intel_iommu *iommu)
576 {
577         return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
578 }
579
580 /* This functionin only returns single iommu in a domain */
581 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
582 {
583         int iommu_id;
584
585         /* si_domain and vm domain should not get here. */
586         BUG_ON(domain_type_is_vm_or_si(domain));
587         iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
588         if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
589                 return NULL;
590
591         return g_iommus[iommu_id];
592 }
593
594 static void domain_update_iommu_coherency(struct dmar_domain *domain)
595 {
596         struct dmar_drhd_unit *drhd;
597         struct intel_iommu *iommu;
598         int i, found = 0;
599
600         domain->iommu_coherency = 1;
601
602         for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
603                 found = 1;
604                 if (!ecap_coherent(g_iommus[i]->ecap)) {
605                         domain->iommu_coherency = 0;
606                         break;
607                 }
608         }
609         if (found)
610                 return;
611
612         /* No hardware attached; use lowest common denominator */
613         rcu_read_lock();
614         for_each_active_iommu(iommu, drhd) {
615                 if (!ecap_coherent(iommu->ecap)) {
616                         domain->iommu_coherency = 0;
617                         break;
618                 }
619         }
620         rcu_read_unlock();
621 }
622
623 static int domain_update_iommu_snooping(struct intel_iommu *skip)
624 {
625         struct dmar_drhd_unit *drhd;
626         struct intel_iommu *iommu;
627         int ret = 1;
628
629         rcu_read_lock();
630         for_each_active_iommu(iommu, drhd) {
631                 if (iommu != skip) {
632                         if (!ecap_sc_support(iommu->ecap)) {
633                                 ret = 0;
634                                 break;
635                         }
636                 }
637         }
638         rcu_read_unlock();
639
640         return ret;
641 }
642
643 static int domain_update_iommu_superpage(struct intel_iommu *skip)
644 {
645         struct dmar_drhd_unit *drhd;
646         struct intel_iommu *iommu;
647         int mask = 0xf;
648
649         if (!intel_iommu_superpage) {
650                 return 0;
651         }
652
653         /* set iommu_superpage to the smallest common denominator */
654         rcu_read_lock();
655         for_each_active_iommu(iommu, drhd) {
656                 if (iommu != skip) {
657                         mask &= cap_super_page_val(iommu->cap);
658                         if (!mask)
659                                 break;
660                 }
661         }
662         rcu_read_unlock();
663
664         return fls(mask);
665 }
666
667 /* Some capabilities may be different across iommus */
668 static void domain_update_iommu_cap(struct dmar_domain *domain)
669 {
670         domain_update_iommu_coherency(domain);
671         domain->iommu_snooping = domain_update_iommu_snooping(NULL);
672         domain->iommu_superpage = domain_update_iommu_superpage(NULL);
673 }
674
675 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
676 {
677         struct dmar_drhd_unit *drhd = NULL;
678         struct intel_iommu *iommu;
679         struct device *tmp;
680         struct pci_dev *ptmp, *pdev = NULL;
681         u16 segment = 0;
682         int i;
683
684         if (dev_is_pci(dev)) {
685                 pdev = to_pci_dev(dev);
686                 segment = pci_domain_nr(pdev->bus);
687         } else if (ACPI_COMPANION(dev))
688                 dev = &ACPI_COMPANION(dev)->dev;
689
690         rcu_read_lock();
691         for_each_active_iommu(iommu, drhd) {
692                 if (pdev && segment != drhd->segment)
693                         continue;
694
695                 for_each_active_dev_scope(drhd->devices,
696                                           drhd->devices_cnt, i, tmp) {
697                         if (tmp == dev) {
698                                 *bus = drhd->devices[i].bus;
699                                 *devfn = drhd->devices[i].devfn;
700                                 goto out;
701                         }
702
703                         if (!pdev || !dev_is_pci(tmp))
704                                 continue;
705
706                         ptmp = to_pci_dev(tmp);
707                         if (ptmp->subordinate &&
708                             ptmp->subordinate->number <= pdev->bus->number &&
709                             ptmp->subordinate->busn_res.end >= pdev->bus->number)
710                                 goto got_pdev;
711                 }
712
713                 if (pdev && drhd->include_all) {
714                 got_pdev:
715                         *bus = pdev->bus->number;
716                         *devfn = pdev->devfn;
717                         goto out;
718                 }
719         }
720         iommu = NULL;
721  out:
722         rcu_read_unlock();
723
724         return iommu;
725 }
726
727 static void domain_flush_cache(struct dmar_domain *domain,
728                                void *addr, int size)
729 {
730         if (!domain->iommu_coherency)
731                 clflush_cache_range(addr, size);
732 }
733
734 /* Gets context entry for a given bus and devfn */
735 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
736                 u8 bus, u8 devfn)
737 {
738         struct root_entry *root;
739         struct context_entry *context;
740         unsigned long phy_addr;
741         unsigned long flags;
742
743         spin_lock_irqsave(&iommu->lock, flags);
744         root = &iommu->root_entry[bus];
745         context = get_context_addr_from_root(root);
746         if (!context) {
747                 context = (struct context_entry *)
748                                 alloc_pgtable_page(iommu->node);
749                 if (!context) {
750                         spin_unlock_irqrestore(&iommu->lock, flags);
751                         return NULL;
752                 }
753                 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
754                 phy_addr = virt_to_phys((void *)context);
755                 set_root_value(root, phy_addr);
756                 set_root_present(root);
757                 __iommu_flush_cache(iommu, root, sizeof(*root));
758         }
759         spin_unlock_irqrestore(&iommu->lock, flags);
760         return &context[devfn];
761 }
762
763 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
764 {
765         struct root_entry *root;
766         struct context_entry *context;
767         int ret;
768         unsigned long flags;
769
770         spin_lock_irqsave(&iommu->lock, flags);
771         root = &iommu->root_entry[bus];
772         context = get_context_addr_from_root(root);
773         if (!context) {
774                 ret = 0;
775                 goto out;
776         }
777         ret = context_present(&context[devfn]);
778 out:
779         spin_unlock_irqrestore(&iommu->lock, flags);
780         return ret;
781 }
782
783 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
784 {
785         struct root_entry *root;
786         struct context_entry *context;
787         unsigned long flags;
788
789         spin_lock_irqsave(&iommu->lock, flags);
790         root = &iommu->root_entry[bus];
791         context = get_context_addr_from_root(root);
792         if (context) {
793                 context_clear_entry(&context[devfn]);
794                 __iommu_flush_cache(iommu, &context[devfn], \
795                         sizeof(*context));
796         }
797         spin_unlock_irqrestore(&iommu->lock, flags);
798 }
799
800 static void free_context_table(struct intel_iommu *iommu)
801 {
802         struct root_entry *root;
803         int i;
804         unsigned long flags;
805         struct context_entry *context;
806
807         spin_lock_irqsave(&iommu->lock, flags);
808         if (!iommu->root_entry) {
809                 goto out;
810         }
811         for (i = 0; i < ROOT_ENTRY_NR; i++) {
812                 root = &iommu->root_entry[i];
813                 context = get_context_addr_from_root(root);
814                 if (context)
815                         free_pgtable_page(context);
816         }
817         free_pgtable_page(iommu->root_entry);
818         iommu->root_entry = NULL;
819 out:
820         spin_unlock_irqrestore(&iommu->lock, flags);
821 }
822
823 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
824                                       unsigned long pfn, int *target_level)
825 {
826         struct dma_pte *parent, *pte = NULL;
827         int level = agaw_to_level(domain->agaw);
828         int offset;
829
830         BUG_ON(!domain->pgd);
831
832         if (!domain_pfn_supported(domain, pfn))
833                 /* Address beyond IOMMU's addressing capabilities. */
834                 return NULL;
835
836         parent = domain->pgd;
837
838         while (1) {
839                 void *tmp_page;
840
841                 offset = pfn_level_offset(pfn, level);
842                 pte = &parent[offset];
843                 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
844                         break;
845                 if (level == *target_level)
846                         break;
847
848                 if (!dma_pte_present(pte)) {
849                         uint64_t pteval;
850
851                         tmp_page = alloc_pgtable_page(domain->nid);
852
853                         if (!tmp_page)
854                                 return NULL;
855
856                         domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
857                         pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
858                         if (cmpxchg64(&pte->val, 0ULL, pteval))
859                                 /* Someone else set it while we were thinking; use theirs. */
860                                 free_pgtable_page(tmp_page);
861                         else
862                                 domain_flush_cache(domain, pte, sizeof(*pte));
863                 }
864                 if (level == 1)
865                         break;
866
867                 parent = phys_to_virt(dma_pte_addr(pte));
868                 level--;
869         }
870
871         if (!*target_level)
872                 *target_level = level;
873
874         return pte;
875 }
876
877
878 /* return address's pte at specific level */
879 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
880                                          unsigned long pfn,
881                                          int level, int *large_page)
882 {
883         struct dma_pte *parent, *pte = NULL;
884         int total = agaw_to_level(domain->agaw);
885         int offset;
886
887         parent = domain->pgd;
888         while (level <= total) {
889                 offset = pfn_level_offset(pfn, total);
890                 pte = &parent[offset];
891                 if (level == total)
892                         return pte;
893
894                 if (!dma_pte_present(pte)) {
895                         *large_page = total;
896                         break;
897                 }
898
899                 if (dma_pte_superpage(pte)) {
900                         *large_page = total;
901                         return pte;
902                 }
903
904                 parent = phys_to_virt(dma_pte_addr(pte));
905                 total--;
906         }
907         return NULL;
908 }
909
910 /* clear last level pte, a tlb flush should be followed */
911 static void dma_pte_clear_range(struct dmar_domain *domain,
912                                 unsigned long start_pfn,
913                                 unsigned long last_pfn)
914 {
915         unsigned int large_page = 1;
916         struct dma_pte *first_pte, *pte;
917
918         BUG_ON(!domain_pfn_supported(domain, start_pfn));
919         BUG_ON(!domain_pfn_supported(domain, last_pfn));
920         BUG_ON(start_pfn > last_pfn);
921
922         /* we don't need lock here; nobody else touches the iova range */
923         do {
924                 large_page = 1;
925                 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
926                 if (!pte) {
927                         start_pfn = align_to_level(start_pfn + 1, large_page + 1);
928                         continue;
929                 }
930                 do {
931                         dma_clear_pte(pte);
932                         start_pfn += lvl_to_nr_pages(large_page);
933                         pte++;
934                 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
935
936                 domain_flush_cache(domain, first_pte,
937                                    (void *)pte - (void *)first_pte);
938
939         } while (start_pfn && start_pfn <= last_pfn);
940 }
941
942 static void dma_pte_free_level(struct dmar_domain *domain, int level,
943                                struct dma_pte *pte, unsigned long pfn,
944                                unsigned long start_pfn, unsigned long last_pfn)
945 {
946         pfn = max(start_pfn, pfn);
947         pte = &pte[pfn_level_offset(pfn, level)];
948
949         do {
950                 unsigned long level_pfn;
951                 struct dma_pte *level_pte;
952
953                 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
954                         goto next;
955
956                 level_pfn = pfn & level_mask(level - 1);
957                 level_pte = phys_to_virt(dma_pte_addr(pte));
958
959                 if (level > 2)
960                         dma_pte_free_level(domain, level - 1, level_pte,
961                                            level_pfn, start_pfn, last_pfn);
962
963                 /* If range covers entire pagetable, free it */
964                 if (!(start_pfn > level_pfn ||
965                       last_pfn < level_pfn + level_size(level) - 1)) {
966                         dma_clear_pte(pte);
967                         domain_flush_cache(domain, pte, sizeof(*pte));
968                         free_pgtable_page(level_pte);
969                 }
970 next:
971                 pfn += level_size(level);
972         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
973 }
974
975 /* free page table pages. last level pte should already be cleared */
976 static void dma_pte_free_pagetable(struct dmar_domain *domain,
977                                    unsigned long start_pfn,
978                                    unsigned long last_pfn)
979 {
980         BUG_ON(!domain_pfn_supported(domain, start_pfn));
981         BUG_ON(!domain_pfn_supported(domain, last_pfn));
982         BUG_ON(start_pfn > last_pfn);
983
984         dma_pte_clear_range(domain, start_pfn, last_pfn);
985
986         /* We don't need lock here; nobody else touches the iova range */
987         dma_pte_free_level(domain, agaw_to_level(domain->agaw),
988                            domain->pgd, 0, start_pfn, last_pfn);
989
990         /* free pgd */
991         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
992                 free_pgtable_page(domain->pgd);
993                 domain->pgd = NULL;
994         }
995 }
996
997 /* When a page at a given level is being unlinked from its parent, we don't
998    need to *modify* it at all. All we need to do is make a list of all the
999    pages which can be freed just as soon as we've flushed the IOTLB and we
1000    know the hardware page-walk will no longer touch them.
1001    The 'pte' argument is the *parent* PTE, pointing to the page that is to
1002    be freed. */
1003 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1004                                             int level, struct dma_pte *pte,
1005                                             struct page *freelist)
1006 {
1007         struct page *pg;
1008
1009         pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1010         pg->freelist = freelist;
1011         freelist = pg;
1012
1013         if (level == 1)
1014                 return freelist;
1015
1016         pte = page_address(pg);
1017         do {
1018                 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1019                         freelist = dma_pte_list_pagetables(domain, level - 1,
1020                                                            pte, freelist);
1021                 pte++;
1022         } while (!first_pte_in_page(pte));
1023
1024         return freelist;
1025 }
1026
1027 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1028                                         struct dma_pte *pte, unsigned long pfn,
1029                                         unsigned long start_pfn,
1030                                         unsigned long last_pfn,
1031                                         struct page *freelist)
1032 {
1033         struct dma_pte *first_pte = NULL, *last_pte = NULL;
1034
1035         pfn = max(start_pfn, pfn);
1036         pte = &pte[pfn_level_offset(pfn, level)];
1037
1038         do {
1039                 unsigned long level_pfn;
1040
1041                 if (!dma_pte_present(pte))
1042                         goto next;
1043
1044                 level_pfn = pfn & level_mask(level);
1045
1046                 /* If range covers entire pagetable, free it */
1047                 if (start_pfn <= level_pfn &&
1048                     last_pfn >= level_pfn + level_size(level) - 1) {
1049                         /* These suborbinate page tables are going away entirely. Don't
1050                            bother to clear them; we're just going to *free* them. */
1051                         if (level > 1 && !dma_pte_superpage(pte))
1052                                 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1053
1054                         dma_clear_pte(pte);
1055                         if (!first_pte)
1056                                 first_pte = pte;
1057                         last_pte = pte;
1058                 } else if (level > 1) {
1059                         /* Recurse down into a level that isn't *entirely* obsolete */
1060                         freelist = dma_pte_clear_level(domain, level - 1,
1061                                                        phys_to_virt(dma_pte_addr(pte)),
1062                                                        level_pfn, start_pfn, last_pfn,
1063                                                        freelist);
1064                 }
1065 next:
1066                 pfn += level_size(level);
1067         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1068
1069         if (first_pte)
1070                 domain_flush_cache(domain, first_pte,
1071                                    (void *)++last_pte - (void *)first_pte);
1072
1073         return freelist;
1074 }
1075
1076 /* We can't just free the pages because the IOMMU may still be walking
1077    the page tables, and may have cached the intermediate levels. The
1078    pages can only be freed after the IOTLB flush has been done. */
1079 struct page *domain_unmap(struct dmar_domain *domain,
1080                           unsigned long start_pfn,
1081                           unsigned long last_pfn)
1082 {
1083         struct page *freelist = NULL;
1084
1085         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1086         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1087         BUG_ON(start_pfn > last_pfn);
1088
1089         /* we don't need lock here; nobody else touches the iova range */
1090         freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1091                                        domain->pgd, 0, start_pfn, last_pfn, NULL);
1092
1093         /* free pgd */
1094         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1095                 struct page *pgd_page = virt_to_page(domain->pgd);
1096                 pgd_page->freelist = freelist;
1097                 freelist = pgd_page;
1098
1099                 domain->pgd = NULL;
1100         }
1101
1102         return freelist;
1103 }
1104
1105 void dma_free_pagelist(struct page *freelist)
1106 {
1107         struct page *pg;
1108
1109         while ((pg = freelist)) {
1110                 freelist = pg->freelist;
1111                 free_pgtable_page(page_address(pg));
1112         }
1113 }
1114
1115 /* iommu handling */
1116 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1117 {
1118         struct root_entry *root;
1119         unsigned long flags;
1120
1121         root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1122         if (!root) {
1123                 pr_err("IOMMU: allocating root entry for %s failed\n",
1124                         iommu->name);
1125                 return -ENOMEM;
1126         }
1127
1128         __iommu_flush_cache(iommu, root, ROOT_SIZE);
1129
1130         spin_lock_irqsave(&iommu->lock, flags);
1131         iommu->root_entry = root;
1132         spin_unlock_irqrestore(&iommu->lock, flags);
1133
1134         return 0;
1135 }
1136
1137 static void iommu_set_root_entry(struct intel_iommu *iommu)
1138 {
1139         void *addr;
1140         u32 sts;
1141         unsigned long flag;
1142
1143         addr = iommu->root_entry;
1144
1145         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1146         dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
1147
1148         writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1149
1150         /* Make sure hardware complete it */
1151         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1152                       readl, (sts & DMA_GSTS_RTPS), sts);
1153
1154         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1155 }
1156
1157 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1158 {
1159         u32 val;
1160         unsigned long flag;
1161
1162         if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1163                 return;
1164
1165         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1166         writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1167
1168         /* Make sure hardware complete it */
1169         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1170                       readl, (!(val & DMA_GSTS_WBFS)), val);
1171
1172         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1173 }
1174
1175 /* return value determine if we need a write buffer flush */
1176 static void __iommu_flush_context(struct intel_iommu *iommu,
1177                                   u16 did, u16 source_id, u8 function_mask,
1178                                   u64 type)
1179 {
1180         u64 val = 0;
1181         unsigned long flag;
1182
1183         switch (type) {
1184         case DMA_CCMD_GLOBAL_INVL:
1185                 val = DMA_CCMD_GLOBAL_INVL;
1186                 break;
1187         case DMA_CCMD_DOMAIN_INVL:
1188                 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1189                 break;
1190         case DMA_CCMD_DEVICE_INVL:
1191                 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1192                         | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1193                 break;
1194         default:
1195                 BUG();
1196         }
1197         val |= DMA_CCMD_ICC;
1198
1199         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1200         dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1201
1202         /* Make sure hardware complete it */
1203         IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1204                 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1205
1206         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1207 }
1208
1209 /* return value determine if we need a write buffer flush */
1210 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1211                                 u64 addr, unsigned int size_order, u64 type)
1212 {
1213         int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1214         u64 val = 0, val_iva = 0;
1215         unsigned long flag;
1216
1217         switch (type) {
1218         case DMA_TLB_GLOBAL_FLUSH:
1219                 /* global flush doesn't need set IVA_REG */
1220                 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1221                 break;
1222         case DMA_TLB_DSI_FLUSH:
1223                 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1224                 break;
1225         case DMA_TLB_PSI_FLUSH:
1226                 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1227                 /* IH bit is passed in as part of address */
1228                 val_iva = size_order | addr;
1229                 break;
1230         default:
1231                 BUG();
1232         }
1233         /* Note: set drain read/write */
1234 #if 0
1235         /*
1236          * This is probably to be super secure.. Looks like we can
1237          * ignore it without any impact.
1238          */
1239         if (cap_read_drain(iommu->cap))
1240                 val |= DMA_TLB_READ_DRAIN;
1241 #endif
1242         if (cap_write_drain(iommu->cap))
1243                 val |= DMA_TLB_WRITE_DRAIN;
1244
1245         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1246         /* Note: Only uses first TLB reg currently */
1247         if (val_iva)
1248                 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1249         dmar_writeq(iommu->reg + tlb_offset + 8, val);
1250
1251         /* Make sure hardware complete it */
1252         IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1253                 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1254
1255         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1256
1257         /* check IOTLB invalidation granularity */
1258         if (DMA_TLB_IAIG(val) == 0)
1259                 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1260         if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1261                 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
1262                         (unsigned long long)DMA_TLB_IIRG(type),
1263                         (unsigned long long)DMA_TLB_IAIG(val));
1264 }
1265
1266 static struct device_domain_info *
1267 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1268                          u8 bus, u8 devfn)
1269 {
1270         int found = 0;
1271         unsigned long flags;
1272         struct device_domain_info *info;
1273         struct pci_dev *pdev;
1274
1275         if (!ecap_dev_iotlb_support(iommu->ecap))
1276                 return NULL;
1277
1278         if (!iommu->qi)
1279                 return NULL;
1280
1281         spin_lock_irqsave(&device_domain_lock, flags);
1282         list_for_each_entry(info, &domain->devices, link)
1283                 if (info->iommu == iommu && info->bus == bus &&
1284                     info->devfn == devfn) {
1285                         found = 1;
1286                         break;
1287                 }
1288         spin_unlock_irqrestore(&device_domain_lock, flags);
1289
1290         if (!found || !info->dev || !dev_is_pci(info->dev))
1291                 return NULL;
1292
1293         pdev = to_pci_dev(info->dev);
1294
1295         if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS))
1296                 return NULL;
1297
1298         if (!dmar_find_matched_atsr_unit(pdev))
1299                 return NULL;
1300
1301         return info;
1302 }
1303
1304 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1305 {
1306         if (!info || !dev_is_pci(info->dev))
1307                 return;
1308
1309         pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
1310 }
1311
1312 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1313 {
1314         if (!info->dev || !dev_is_pci(info->dev) ||
1315             !pci_ats_enabled(to_pci_dev(info->dev)))
1316                 return;
1317
1318         pci_disable_ats(to_pci_dev(info->dev));
1319 }
1320
1321 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1322                                   u64 addr, unsigned mask)
1323 {
1324         u16 sid, qdep;
1325         unsigned long flags;
1326         struct device_domain_info *info;
1327
1328         spin_lock_irqsave(&device_domain_lock, flags);
1329         list_for_each_entry(info, &domain->devices, link) {
1330                 struct pci_dev *pdev;
1331                 if (!info->dev || !dev_is_pci(info->dev))
1332                         continue;
1333
1334                 pdev = to_pci_dev(info->dev);
1335                 if (!pci_ats_enabled(pdev))
1336                         continue;
1337
1338                 sid = info->bus << 8 | info->devfn;
1339                 qdep = pci_ats_queue_depth(pdev);
1340                 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1341         }
1342         spin_unlock_irqrestore(&device_domain_lock, flags);
1343 }
1344
1345 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1346                                   unsigned long pfn, unsigned int pages, int ih, int map)
1347 {
1348         unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1349         uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1350
1351         BUG_ON(pages == 0);
1352
1353         if (ih)
1354                 ih = 1 << 6;
1355         /*
1356          * Fallback to domain selective flush if no PSI support or the size is
1357          * too big.
1358          * PSI requires page size to be 2 ^ x, and the base address is naturally
1359          * aligned to the size
1360          */
1361         if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1362                 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1363                                                 DMA_TLB_DSI_FLUSH);
1364         else
1365                 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1366                                                 DMA_TLB_PSI_FLUSH);
1367
1368         /*
1369          * In caching mode, changes of pages from non-present to present require
1370          * flush. However, device IOTLB doesn't need to be flushed in this case.
1371          */
1372         if (!cap_caching_mode(iommu->cap) || !map)
1373                 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1374 }
1375
1376 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1377 {
1378         u32 pmen;
1379         unsigned long flags;
1380
1381         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1382         pmen = readl(iommu->reg + DMAR_PMEN_REG);
1383         pmen &= ~DMA_PMEN_EPM;
1384         writel(pmen, iommu->reg + DMAR_PMEN_REG);
1385
1386         /* wait for the protected region status bit to clear */
1387         IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1388                 readl, !(pmen & DMA_PMEN_PRS), pmen);
1389
1390         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1391 }
1392
1393 static void iommu_enable_translation(struct intel_iommu *iommu)
1394 {
1395         u32 sts;
1396         unsigned long flags;
1397
1398         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1399         iommu->gcmd |= DMA_GCMD_TE;
1400         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1401
1402         /* Make sure hardware complete it */
1403         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1404                       readl, (sts & DMA_GSTS_TES), sts);
1405
1406         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1407 }
1408
1409 static void iommu_disable_translation(struct intel_iommu *iommu)
1410 {
1411         u32 sts;
1412         unsigned long flag;
1413
1414         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1415         iommu->gcmd &= ~DMA_GCMD_TE;
1416         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1417
1418         /* Make sure hardware complete it */
1419         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1420                       readl, (!(sts & DMA_GSTS_TES)), sts);
1421
1422         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1423 }
1424
1425
1426 static int iommu_init_domains(struct intel_iommu *iommu)
1427 {
1428         unsigned long ndomains;
1429         unsigned long nlongs;
1430
1431         ndomains = cap_ndoms(iommu->cap);
1432         pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1433                  iommu->seq_id, ndomains);
1434         nlongs = BITS_TO_LONGS(ndomains);
1435
1436         spin_lock_init(&iommu->lock);
1437
1438         /* TBD: there might be 64K domains,
1439          * consider other allocation for future chip
1440          */
1441         iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1442         if (!iommu->domain_ids) {
1443                 pr_err("IOMMU%d: allocating domain id array failed\n",
1444                        iommu->seq_id);
1445                 return -ENOMEM;
1446         }
1447         iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1448                         GFP_KERNEL);
1449         if (!iommu->domains) {
1450                 pr_err("IOMMU%d: allocating domain array failed\n",
1451                        iommu->seq_id);
1452                 kfree(iommu->domain_ids);
1453                 iommu->domain_ids = NULL;
1454                 return -ENOMEM;
1455         }
1456
1457         /*
1458          * if Caching mode is set, then invalid translations are tagged
1459          * with domainid 0. Hence we need to pre-allocate it.
1460          */
1461         if (cap_caching_mode(iommu->cap))
1462                 set_bit(0, iommu->domain_ids);
1463         return 0;
1464 }
1465
1466 static void disable_dmar_iommu(struct intel_iommu *iommu)
1467 {
1468         struct dmar_domain *domain;
1469         int i;
1470
1471         if ((iommu->domains) && (iommu->domain_ids)) {
1472                 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
1473                         /*
1474                          * Domain id 0 is reserved for invalid translation
1475                          * if hardware supports caching mode.
1476                          */
1477                         if (cap_caching_mode(iommu->cap) && i == 0)
1478                                 continue;
1479
1480                         domain = iommu->domains[i];
1481                         clear_bit(i, iommu->domain_ids);
1482                         if (domain_detach_iommu(domain, iommu) == 0 &&
1483                             !domain_type_is_vm(domain))
1484                                 domain_exit(domain);
1485                 }
1486         }
1487
1488         if (iommu->gcmd & DMA_GCMD_TE)
1489                 iommu_disable_translation(iommu);
1490 }
1491
1492 static void free_dmar_iommu(struct intel_iommu *iommu)
1493 {
1494         if ((iommu->domains) && (iommu->domain_ids)) {
1495                 kfree(iommu->domains);
1496                 kfree(iommu->domain_ids);
1497                 iommu->domains = NULL;
1498                 iommu->domain_ids = NULL;
1499         }
1500
1501         g_iommus[iommu->seq_id] = NULL;
1502
1503         /* free context mapping */
1504         free_context_table(iommu);
1505 }
1506
1507 static struct dmar_domain *alloc_domain(int flags)
1508 {
1509         /* domain id for virtual machine, it won't be set in context */
1510         static atomic_t vm_domid = ATOMIC_INIT(0);
1511         struct dmar_domain *domain;
1512
1513         domain = alloc_domain_mem();
1514         if (!domain)
1515                 return NULL;
1516
1517         memset(domain, 0, sizeof(*domain));
1518         domain->nid = -1;
1519         domain->flags = flags;
1520         spin_lock_init(&domain->iommu_lock);
1521         INIT_LIST_HEAD(&domain->devices);
1522         if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1523                 domain->id = atomic_inc_return(&vm_domid);
1524
1525         return domain;
1526 }
1527
1528 static int __iommu_attach_domain(struct dmar_domain *domain,
1529                                  struct intel_iommu *iommu)
1530 {
1531         int num;
1532         unsigned long ndomains;
1533
1534         ndomains = cap_ndoms(iommu->cap);
1535         num = find_first_zero_bit(iommu->domain_ids, ndomains);
1536         if (num < ndomains) {
1537                 set_bit(num, iommu->domain_ids);
1538                 iommu->domains[num] = domain;
1539         } else {
1540                 num = -ENOSPC;
1541         }
1542
1543         return num;
1544 }
1545
1546 static int iommu_attach_domain(struct dmar_domain *domain,
1547                                struct intel_iommu *iommu)
1548 {
1549         int num;
1550         unsigned long flags;
1551
1552         spin_lock_irqsave(&iommu->lock, flags);
1553         num = __iommu_attach_domain(domain, iommu);
1554         spin_unlock_irqrestore(&iommu->lock, flags);
1555         if (num < 0)
1556                 pr_err("IOMMU: no free domain ids\n");
1557
1558         return num;
1559 }
1560
1561 static int iommu_attach_vm_domain(struct dmar_domain *domain,
1562                                   struct intel_iommu *iommu)
1563 {
1564         int num;
1565         unsigned long ndomains;
1566
1567         ndomains = cap_ndoms(iommu->cap);
1568         for_each_set_bit(num, iommu->domain_ids, ndomains)
1569                 if (iommu->domains[num] == domain)
1570                         return num;
1571
1572         return __iommu_attach_domain(domain, iommu);
1573 }
1574
1575 static void iommu_detach_domain(struct dmar_domain *domain,
1576                                 struct intel_iommu *iommu)
1577 {
1578         unsigned long flags;
1579         int num, ndomains;
1580
1581         spin_lock_irqsave(&iommu->lock, flags);
1582         if (domain_type_is_vm_or_si(domain)) {
1583                 ndomains = cap_ndoms(iommu->cap);
1584                 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1585                         if (iommu->domains[num] == domain) {
1586                                 clear_bit(num, iommu->domain_ids);
1587                                 iommu->domains[num] = NULL;
1588                                 break;
1589                         }
1590                 }
1591         } else {
1592                 clear_bit(domain->id, iommu->domain_ids);
1593                 iommu->domains[domain->id] = NULL;
1594         }
1595         spin_unlock_irqrestore(&iommu->lock, flags);
1596 }
1597
1598 static void domain_attach_iommu(struct dmar_domain *domain,
1599                                struct intel_iommu *iommu)
1600 {
1601         unsigned long flags;
1602
1603         spin_lock_irqsave(&domain->iommu_lock, flags);
1604         if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1605                 domain->iommu_count++;
1606                 if (domain->iommu_count == 1)
1607                         domain->nid = iommu->node;
1608                 domain_update_iommu_cap(domain);
1609         }
1610         spin_unlock_irqrestore(&domain->iommu_lock, flags);
1611 }
1612
1613 static int domain_detach_iommu(struct dmar_domain *domain,
1614                                struct intel_iommu *iommu)
1615 {
1616         unsigned long flags;
1617         int count = INT_MAX;
1618
1619         spin_lock_irqsave(&domain->iommu_lock, flags);
1620         if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
1621                 count = --domain->iommu_count;
1622                 domain_update_iommu_cap(domain);
1623         }
1624         spin_unlock_irqrestore(&domain->iommu_lock, flags);
1625
1626         return count;
1627 }
1628
1629 static struct iova_domain reserved_iova_list;
1630 static struct lock_class_key reserved_rbtree_key;
1631
1632 static int dmar_init_reserved_ranges(void)
1633 {
1634         struct pci_dev *pdev = NULL;
1635         struct iova *iova;
1636         int i;
1637
1638         init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1639                         DMA_32BIT_PFN);
1640
1641         lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1642                 &reserved_rbtree_key);
1643
1644         /* IOAPIC ranges shouldn't be accessed by DMA */
1645         iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1646                 IOVA_PFN(IOAPIC_RANGE_END));
1647         if (!iova) {
1648                 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1649                 return -ENODEV;
1650         }
1651
1652         /* Reserve all PCI MMIO to avoid peer-to-peer access */
1653         for_each_pci_dev(pdev) {
1654                 struct resource *r;
1655
1656                 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1657                         r = &pdev->resource[i];
1658                         if (!r->flags || !(r->flags & IORESOURCE_MEM))
1659                                 continue;
1660                         iova = reserve_iova(&reserved_iova_list,
1661                                             IOVA_PFN(r->start),
1662                                             IOVA_PFN(r->end));
1663                         if (!iova) {
1664                                 printk(KERN_ERR "Reserve iova failed\n");
1665                                 return -ENODEV;
1666                         }
1667                 }
1668         }
1669         return 0;
1670 }
1671
1672 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1673 {
1674         copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1675 }
1676
1677 static inline int guestwidth_to_adjustwidth(int gaw)
1678 {
1679         int agaw;
1680         int r = (gaw - 12) % 9;
1681
1682         if (r == 0)
1683                 agaw = gaw;
1684         else
1685                 agaw = gaw + 9 - r;
1686         if (agaw > 64)
1687                 agaw = 64;
1688         return agaw;
1689 }
1690
1691 static int domain_init(struct dmar_domain *domain, int guest_width)
1692 {
1693         struct intel_iommu *iommu;
1694         int adjust_width, agaw;
1695         unsigned long sagaw;
1696
1697         init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1698                         DMA_32BIT_PFN);
1699         domain_reserve_special_ranges(domain);
1700
1701         /* calculate AGAW */
1702         iommu = domain_get_iommu(domain);
1703         if (guest_width > cap_mgaw(iommu->cap))
1704                 guest_width = cap_mgaw(iommu->cap);
1705         domain->gaw = guest_width;
1706         adjust_width = guestwidth_to_adjustwidth(guest_width);
1707         agaw = width_to_agaw(adjust_width);
1708         sagaw = cap_sagaw(iommu->cap);
1709         if (!test_bit(agaw, &sagaw)) {
1710                 /* hardware doesn't support it, choose a bigger one */
1711                 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1712                 agaw = find_next_bit(&sagaw, 5, agaw);
1713                 if (agaw >= 5)
1714                         return -ENODEV;
1715         }
1716         domain->agaw = agaw;
1717
1718         if (ecap_coherent(iommu->ecap))
1719                 domain->iommu_coherency = 1;
1720         else
1721                 domain->iommu_coherency = 0;
1722
1723         if (ecap_sc_support(iommu->ecap))
1724                 domain->iommu_snooping = 1;
1725         else
1726                 domain->iommu_snooping = 0;
1727
1728         if (intel_iommu_superpage)
1729                 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1730         else
1731                 domain->iommu_superpage = 0;
1732
1733         domain->nid = iommu->node;
1734
1735         /* always allocate the top pgd */
1736         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1737         if (!domain->pgd)
1738                 return -ENOMEM;
1739         __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1740         return 0;
1741 }
1742
1743 static void domain_exit(struct dmar_domain *domain)
1744 {
1745         struct page *freelist = NULL;
1746         int i;
1747
1748         /* Domain 0 is reserved, so dont process it */
1749         if (!domain)
1750                 return;
1751
1752         /* Flush any lazy unmaps that may reference this domain */
1753         if (!intel_iommu_strict)
1754                 flush_unmaps_timeout(0);
1755
1756         /* remove associated devices */
1757         domain_remove_dev_info(domain);
1758
1759         /* destroy iovas */
1760         put_iova_domain(&domain->iovad);
1761
1762         freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1763
1764         /* clear attached or cached domains */
1765         rcu_read_lock();
1766         for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
1767                 iommu_detach_domain(domain, g_iommus[i]);
1768         rcu_read_unlock();
1769
1770         dma_free_pagelist(freelist);
1771
1772         free_domain_mem(domain);
1773 }
1774
1775 static int domain_context_mapping_one(struct dmar_domain *domain,
1776                                       struct intel_iommu *iommu,
1777                                       u8 bus, u8 devfn, int translation)
1778 {
1779         struct context_entry *context;
1780         unsigned long flags;
1781         struct dma_pte *pgd;
1782         int id;
1783         int agaw;
1784         struct device_domain_info *info = NULL;
1785
1786         pr_debug("Set context mapping for %02x:%02x.%d\n",
1787                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1788
1789         BUG_ON(!domain->pgd);
1790         BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1791                translation != CONTEXT_TT_MULTI_LEVEL);
1792
1793         context = device_to_context_entry(iommu, bus, devfn);
1794         if (!context)
1795                 return -ENOMEM;
1796         spin_lock_irqsave(&iommu->lock, flags);
1797         if (context_present(context)) {
1798                 spin_unlock_irqrestore(&iommu->lock, flags);
1799                 return 0;
1800         }
1801
1802         id = domain->id;
1803         pgd = domain->pgd;
1804
1805         if (domain_type_is_vm_or_si(domain)) {
1806                 if (domain_type_is_vm(domain)) {
1807                         id = iommu_attach_vm_domain(domain, iommu);
1808                         if (id < 0) {
1809                                 spin_unlock_irqrestore(&iommu->lock, flags);
1810                                 pr_err("IOMMU: no free domain ids\n");
1811                                 return -EFAULT;
1812                         }
1813                 }
1814
1815                 /* Skip top levels of page tables for
1816                  * iommu which has less agaw than default.
1817                  * Unnecessary for PT mode.
1818                  */
1819                 if (translation != CONTEXT_TT_PASS_THROUGH) {
1820                         for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1821                                 pgd = phys_to_virt(dma_pte_addr(pgd));
1822                                 if (!dma_pte_present(pgd)) {
1823                                         spin_unlock_irqrestore(&iommu->lock, flags);
1824                                         return -ENOMEM;
1825                                 }
1826                         }
1827                 }
1828         }
1829
1830         context_set_domain_id(context, id);
1831
1832         if (translation != CONTEXT_TT_PASS_THROUGH) {
1833                 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
1834                 translation = info ? CONTEXT_TT_DEV_IOTLB :
1835                                      CONTEXT_TT_MULTI_LEVEL;
1836         }
1837         /*
1838          * In pass through mode, AW must be programmed to indicate the largest
1839          * AGAW value supported by hardware. And ASR is ignored by hardware.
1840          */
1841         if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1842                 context_set_address_width(context, iommu->msagaw);
1843         else {
1844                 context_set_address_root(context, virt_to_phys(pgd));
1845                 context_set_address_width(context, iommu->agaw);
1846         }
1847
1848         context_set_translation_type(context, translation);
1849         context_set_fault_enable(context);
1850         context_set_present(context);
1851         domain_flush_cache(domain, context, sizeof(*context));
1852
1853         /*
1854          * It's a non-present to present mapping. If hardware doesn't cache
1855          * non-present entry we only need to flush the write-buffer. If the
1856          * _does_ cache non-present entries, then it does so in the special
1857          * domain #0, which we have to flush:
1858          */
1859         if (cap_caching_mode(iommu->cap)) {
1860                 iommu->flush.flush_context(iommu, 0,
1861                                            (((u16)bus) << 8) | devfn,
1862                                            DMA_CCMD_MASK_NOBIT,
1863                                            DMA_CCMD_DEVICE_INVL);
1864                 iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
1865         } else {
1866                 iommu_flush_write_buffer(iommu);
1867         }
1868         iommu_enable_dev_iotlb(info);
1869         spin_unlock_irqrestore(&iommu->lock, flags);
1870
1871         domain_attach_iommu(domain, iommu);
1872
1873         return 0;
1874 }
1875
1876 struct domain_context_mapping_data {
1877         struct dmar_domain *domain;
1878         struct intel_iommu *iommu;
1879         int translation;
1880 };
1881
1882 static int domain_context_mapping_cb(struct pci_dev *pdev,
1883                                      u16 alias, void *opaque)
1884 {
1885         struct domain_context_mapping_data *data = opaque;
1886
1887         return domain_context_mapping_one(data->domain, data->iommu,
1888                                           PCI_BUS_NUM(alias), alias & 0xff,
1889                                           data->translation);
1890 }
1891
1892 static int
1893 domain_context_mapping(struct dmar_domain *domain, struct device *dev,
1894                        int translation)
1895 {
1896         struct intel_iommu *iommu;
1897         u8 bus, devfn;
1898         struct domain_context_mapping_data data;
1899
1900         iommu = device_to_iommu(dev, &bus, &devfn);
1901         if (!iommu)
1902                 return -ENODEV;
1903
1904         if (!dev_is_pci(dev))
1905                 return domain_context_mapping_one(domain, iommu, bus, devfn,
1906                                                   translation);
1907
1908         data.domain = domain;
1909         data.iommu = iommu;
1910         data.translation = translation;
1911
1912         return pci_for_each_dma_alias(to_pci_dev(dev),
1913                                       &domain_context_mapping_cb, &data);
1914 }
1915
1916 static int domain_context_mapped_cb(struct pci_dev *pdev,
1917                                     u16 alias, void *opaque)
1918 {
1919         struct intel_iommu *iommu = opaque;
1920
1921         return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
1922 }
1923
1924 static int domain_context_mapped(struct device *dev)
1925 {
1926         struct intel_iommu *iommu;
1927         u8 bus, devfn;
1928
1929         iommu = device_to_iommu(dev, &bus, &devfn);
1930         if (!iommu)
1931                 return -ENODEV;
1932
1933         if (!dev_is_pci(dev))
1934                 return device_context_mapped(iommu, bus, devfn);
1935
1936         return !pci_for_each_dma_alias(to_pci_dev(dev),
1937                                        domain_context_mapped_cb, iommu);
1938 }
1939
1940 /* Returns a number of VTD pages, but aligned to MM page size */
1941 static inline unsigned long aligned_nrpages(unsigned long host_addr,
1942                                             size_t size)
1943 {
1944         host_addr &= ~PAGE_MASK;
1945         return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1946 }
1947
1948 /* Return largest possible superpage level for a given mapping */
1949 static inline int hardware_largepage_caps(struct dmar_domain *domain,
1950                                           unsigned long iov_pfn,
1951                                           unsigned long phy_pfn,
1952                                           unsigned long pages)
1953 {
1954         int support, level = 1;
1955         unsigned long pfnmerge;
1956
1957         support = domain->iommu_superpage;
1958
1959         /* To use a large page, the virtual *and* physical addresses
1960            must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1961            of them will mean we have to use smaller pages. So just
1962            merge them and check both at once. */
1963         pfnmerge = iov_pfn | phy_pfn;
1964
1965         while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1966                 pages >>= VTD_STRIDE_SHIFT;
1967                 if (!pages)
1968                         break;
1969                 pfnmerge >>= VTD_STRIDE_SHIFT;
1970                 level++;
1971                 support--;
1972         }
1973         return level;
1974 }
1975
1976 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1977                             struct scatterlist *sg, unsigned long phys_pfn,
1978                             unsigned long nr_pages, int prot)
1979 {
1980         struct dma_pte *first_pte = NULL, *pte = NULL;
1981         phys_addr_t uninitialized_var(pteval);
1982         unsigned long sg_res = 0;
1983         unsigned int largepage_lvl = 0;
1984         unsigned long lvl_pages = 0;
1985
1986         BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
1987
1988         if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1989                 return -EINVAL;
1990
1991         prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1992
1993         if (!sg) {
1994                 sg_res = nr_pages;
1995                 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1996         }
1997
1998         while (nr_pages > 0) {
1999                 uint64_t tmp;
2000
2001                 if (!sg_res) {
2002                         sg_res = aligned_nrpages(sg->offset, sg->length);
2003                         sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2004                         sg->dma_length = sg->length;
2005                         pteval = page_to_phys(sg_page(sg)) | prot;
2006                         phys_pfn = pteval >> VTD_PAGE_SHIFT;
2007                 }
2008
2009                 if (!pte) {
2010                         largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2011
2012                         first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2013                         if (!pte)
2014                                 return -ENOMEM;
2015                         /* It is large page*/
2016                         if (largepage_lvl > 1) {
2017                                 pteval |= DMA_PTE_LARGE_PAGE;
2018                                 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2019                                 /*
2020                                  * Ensure that old small page tables are
2021                                  * removed to make room for superpage,
2022                                  * if they exist.
2023                                  */
2024                                 dma_pte_free_pagetable(domain, iov_pfn,
2025                                                        iov_pfn + lvl_pages - 1);
2026                         } else {
2027                                 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2028                         }
2029
2030                 }
2031                 /* We don't need lock here, nobody else
2032                  * touches the iova range
2033                  */
2034                 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2035                 if (tmp) {
2036                         static int dumps = 5;
2037                         printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2038                                iov_pfn, tmp, (unsigned long long)pteval);
2039                         if (dumps) {
2040                                 dumps--;
2041                                 debug_dma_dump_mappings(NULL);
2042                         }
2043                         WARN_ON(1);
2044                 }
2045
2046                 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2047
2048                 BUG_ON(nr_pages < lvl_pages);
2049                 BUG_ON(sg_res < lvl_pages);
2050
2051                 nr_pages -= lvl_pages;
2052                 iov_pfn += lvl_pages;
2053                 phys_pfn += lvl_pages;
2054                 pteval += lvl_pages * VTD_PAGE_SIZE;
2055                 sg_res -= lvl_pages;
2056
2057                 /* If the next PTE would be the first in a new page, then we
2058                    need to flush the cache on the entries we've just written.
2059                    And then we'll need to recalculate 'pte', so clear it and
2060                    let it get set again in the if (!pte) block above.
2061
2062                    If we're done (!nr_pages) we need to flush the cache too.
2063
2064                    Also if we've been setting superpages, we may need to
2065                    recalculate 'pte' and switch back to smaller pages for the
2066                    end of the mapping, if the trailing size is not enough to
2067                    use another superpage (i.e. sg_res < lvl_pages). */
2068                 pte++;
2069                 if (!nr_pages || first_pte_in_page(pte) ||
2070                     (largepage_lvl > 1 && sg_res < lvl_pages)) {
2071                         domain_flush_cache(domain, first_pte,
2072                                            (void *)pte - (void *)first_pte);
2073                         pte = NULL;
2074                 }
2075
2076                 if (!sg_res && nr_pages)
2077                         sg = sg_next(sg);
2078         }
2079         return 0;
2080 }
2081
2082 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2083                                     struct scatterlist *sg, unsigned long nr_pages,
2084                                     int prot)
2085 {
2086         return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2087 }
2088
2089 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2090                                      unsigned long phys_pfn, unsigned long nr_pages,
2091                                      int prot)
2092 {
2093         return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2094 }
2095
2096 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
2097 {
2098         if (!iommu)
2099                 return;
2100
2101         clear_context_table(iommu, bus, devfn);
2102         iommu->flush.flush_context(iommu, 0, 0, 0,
2103                                            DMA_CCMD_GLOBAL_INVL);
2104         iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2105 }
2106
2107 static inline void unlink_domain_info(struct device_domain_info *info)
2108 {
2109         assert_spin_locked(&device_domain_lock);
2110         list_del(&info->link);
2111         list_del(&info->global);
2112         if (info->dev)
2113                 info->dev->archdata.iommu = NULL;
2114 }
2115
2116 static void domain_remove_dev_info(struct dmar_domain *domain)
2117 {
2118         struct device_domain_info *info, *tmp;
2119         unsigned long flags;
2120
2121         spin_lock_irqsave(&device_domain_lock, flags);
2122         list_for_each_entry_safe(info, tmp, &domain->devices, link) {
2123                 unlink_domain_info(info);
2124                 spin_unlock_irqrestore(&device_domain_lock, flags);
2125
2126                 iommu_disable_dev_iotlb(info);
2127                 iommu_detach_dev(info->iommu, info->bus, info->devfn);
2128
2129                 if (domain_type_is_vm(domain)) {
2130                         iommu_detach_dependent_devices(info->iommu, info->dev);
2131                         domain_detach_iommu(domain, info->iommu);
2132                 }
2133
2134                 free_devinfo_mem(info);
2135                 spin_lock_irqsave(&device_domain_lock, flags);
2136         }
2137         spin_unlock_irqrestore(&device_domain_lock, flags);
2138 }
2139
2140 /*
2141  * find_domain
2142  * Note: we use struct device->archdata.iommu stores the info
2143  */
2144 static struct dmar_domain *find_domain(struct device *dev)
2145 {
2146         struct device_domain_info *info;
2147
2148         /* No lock here, assumes no domain exit in normal case */
2149         info = dev->archdata.iommu;
2150         if (info)
2151                 return info->domain;
2152         return NULL;
2153 }
2154
2155 static inline struct device_domain_info *
2156 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2157 {
2158         struct device_domain_info *info;
2159
2160         list_for_each_entry(info, &device_domain_list, global)
2161                 if (info->iommu->segment == segment && info->bus == bus &&
2162                     info->devfn == devfn)
2163                         return info;
2164
2165         return NULL;
2166 }
2167
2168 static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
2169                                                 int bus, int devfn,
2170                                                 struct device *dev,
2171                                                 struct dmar_domain *domain)
2172 {
2173         struct dmar_domain *found = NULL;
2174         struct device_domain_info *info;
2175         unsigned long flags;
2176
2177         info = alloc_devinfo_mem();
2178         if (!info)
2179                 return NULL;
2180
2181         info->bus = bus;
2182         info->devfn = devfn;
2183         info->dev = dev;
2184         info->domain = domain;
2185         info->iommu = iommu;
2186
2187         spin_lock_irqsave(&device_domain_lock, flags);
2188         if (dev)
2189                 found = find_domain(dev);
2190         else {
2191                 struct device_domain_info *info2;
2192                 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2193                 if (info2)
2194                         found = info2->domain;
2195         }
2196         if (found) {
2197                 spin_unlock_irqrestore(&device_domain_lock, flags);
2198                 free_devinfo_mem(info);
2199                 /* Caller must free the original domain */
2200                 return found;
2201         }
2202
2203         list_add(&info->link, &domain->devices);
2204         list_add(&info->global, &device_domain_list);
2205         if (dev)
2206                 dev->archdata.iommu = info;
2207         spin_unlock_irqrestore(&device_domain_lock, flags);
2208
2209         return domain;
2210 }
2211
2212 static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2213 {
2214         *(u16 *)opaque = alias;
2215         return 0;
2216 }
2217
2218 /* domain is initialized */
2219 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2220 {
2221         struct dmar_domain *domain, *tmp;
2222         struct intel_iommu *iommu;
2223         struct device_domain_info *info;
2224         u16 dma_alias;
2225         unsigned long flags;
2226         u8 bus, devfn;
2227
2228         domain = find_domain(dev);
2229         if (domain)
2230                 return domain;
2231
2232         iommu = device_to_iommu(dev, &bus, &devfn);
2233         if (!iommu)
2234                 return NULL;
2235
2236         if (dev_is_pci(dev)) {
2237                 struct pci_dev *pdev = to_pci_dev(dev);
2238
2239                 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2240
2241                 spin_lock_irqsave(&device_domain_lock, flags);
2242                 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2243                                                       PCI_BUS_NUM(dma_alias),
2244                                                       dma_alias & 0xff);
2245                 if (info) {
2246                         iommu = info->iommu;
2247                         domain = info->domain;
2248                 }
2249                 spin_unlock_irqrestore(&device_domain_lock, flags);
2250
2251                 /* DMA alias already has a domain, uses it */
2252                 if (info)
2253                         goto found_domain;
2254         }
2255
2256         /* Allocate and initialize new domain for the device */
2257         domain = alloc_domain(0);
2258         if (!domain)
2259                 return NULL;
2260         domain->id = iommu_attach_domain(domain, iommu);
2261         if (domain->id < 0) {
2262                 free_domain_mem(domain);
2263                 return NULL;
2264         }
2265         domain_attach_iommu(domain, iommu);
2266         if (domain_init(domain, gaw)) {
2267                 domain_exit(domain);
2268                 return NULL;
2269         }
2270
2271         /* register PCI DMA alias device */
2272         if (dev_is_pci(dev)) {
2273                 tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2274                                            dma_alias & 0xff, NULL, domain);
2275
2276                 if (!tmp || tmp != domain) {
2277                         domain_exit(domain);
2278                         domain = tmp;
2279                 }
2280
2281                 if (!domain)
2282                         return NULL;
2283         }
2284
2285 found_domain:
2286         tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2287
2288         if (!tmp || tmp != domain) {
2289                 domain_exit(domain);
2290                 domain = tmp;
2291         }
2292
2293         return domain;
2294 }
2295
2296 static int iommu_identity_mapping;
2297 #define IDENTMAP_ALL            1
2298 #define IDENTMAP_GFX            2
2299 #define IDENTMAP_AZALIA         4
2300
2301 static int iommu_domain_identity_map(struct dmar_domain *domain,
2302                                      unsigned long long start,
2303                                      unsigned long long end)
2304 {
2305         unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2306         unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2307
2308         if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2309                           dma_to_mm_pfn(last_vpfn))) {
2310                 printk(KERN_ERR "IOMMU: reserve iova failed\n");
2311                 return -ENOMEM;
2312         }
2313
2314         pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2315                  start, end, domain->id);
2316         /*
2317          * RMRR range might have overlap with physical memory range,
2318          * clear it first
2319          */
2320         dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2321
2322         return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2323                                   last_vpfn - first_vpfn + 1,
2324                                   DMA_PTE_READ|DMA_PTE_WRITE);
2325 }
2326
2327 static int iommu_prepare_identity_map(struct device *dev,
2328                                       unsigned long long start,
2329                                       unsigned long long end)
2330 {
2331         struct dmar_domain *domain;
2332         int ret;
2333
2334         domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2335         if (!domain)
2336                 return -ENOMEM;
2337
2338         /* For _hardware_ passthrough, don't bother. But for software
2339            passthrough, we do it anyway -- it may indicate a memory
2340            range which is reserved in E820, so which didn't get set
2341            up to start with in si_domain */
2342         if (domain == si_domain && hw_pass_through) {
2343                 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2344                        dev_name(dev), start, end);
2345                 return 0;
2346         }
2347
2348         printk(KERN_INFO
2349                "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2350                dev_name(dev), start, end);
2351         
2352         if (end < start) {
2353                 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2354                         "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2355                         dmi_get_system_info(DMI_BIOS_VENDOR),
2356                         dmi_get_system_info(DMI_BIOS_VERSION),
2357                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2358                 ret = -EIO;
2359                 goto error;
2360         }
2361
2362         if (end >> agaw_to_width(domain->agaw)) {
2363                 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2364                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2365                      agaw_to_width(domain->agaw),
2366                      dmi_get_system_info(DMI_BIOS_VENDOR),
2367                      dmi_get_system_info(DMI_BIOS_VERSION),
2368                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2369                 ret = -EIO;
2370                 goto error;
2371         }
2372
2373         ret = iommu_domain_identity_map(domain, start, end);
2374         if (ret)
2375                 goto error;
2376
2377         /* context entry init */
2378         ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
2379         if (ret)
2380                 goto error;
2381
2382         return 0;
2383
2384  error:
2385         domain_exit(domain);
2386         return ret;
2387 }
2388
2389 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2390                                          struct device *dev)
2391 {
2392         if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2393                 return 0;
2394         return iommu_prepare_identity_map(dev, rmrr->base_address,
2395                                           rmrr->end_address);
2396 }
2397
2398 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2399 static inline void iommu_prepare_isa(void)
2400 {
2401         struct pci_dev *pdev;
2402         int ret;
2403
2404         pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2405         if (!pdev)
2406                 return;
2407
2408         printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2409         ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2410
2411         if (ret)
2412                 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2413                        "floppy might not work\n");
2414
2415         pci_dev_put(pdev);
2416 }
2417 #else
2418 static inline void iommu_prepare_isa(void)
2419 {
2420         return;
2421 }
2422 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2423
2424 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2425
2426 static int __init si_domain_init(int hw)
2427 {
2428         struct dmar_drhd_unit *drhd;
2429         struct intel_iommu *iommu;
2430         int nid, ret = 0;
2431         bool first = true;
2432
2433         si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2434         if (!si_domain)
2435                 return -EFAULT;
2436
2437         for_each_active_iommu(iommu, drhd) {
2438                 ret = iommu_attach_domain(si_domain, iommu);
2439                 if (ret < 0) {
2440                         domain_exit(si_domain);
2441                         return -EFAULT;
2442                 } else if (first) {
2443                         si_domain->id = ret;
2444                         first = false;
2445                 } else if (si_domain->id != ret) {
2446                         domain_exit(si_domain);
2447                         return -EFAULT;
2448                 }
2449                 domain_attach_iommu(si_domain, iommu);
2450         }
2451
2452         if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2453                 domain_exit(si_domain);
2454                 return -EFAULT;
2455         }
2456
2457         pr_debug("IOMMU: identity mapping domain is domain %d\n",
2458                  si_domain->id);
2459
2460         if (hw)
2461                 return 0;
2462
2463         for_each_online_node(nid) {
2464                 unsigned long start_pfn, end_pfn;
2465                 int i;
2466
2467                 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2468                         ret = iommu_domain_identity_map(si_domain,
2469                                         PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2470                         if (ret)
2471                                 return ret;
2472                 }
2473         }
2474
2475         return 0;
2476 }
2477
2478 static int identity_mapping(struct device *dev)
2479 {
2480         struct device_domain_info *info;
2481
2482         if (likely(!iommu_identity_mapping))
2483                 return 0;
2484
2485         info = dev->archdata.iommu;
2486         if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2487                 return (info->domain == si_domain);
2488
2489         return 0;
2490 }
2491
2492 static int domain_add_dev_info(struct dmar_domain *domain,
2493                                struct device *dev, int translation)
2494 {
2495         struct dmar_domain *ndomain;
2496         struct intel_iommu *iommu;
2497         u8 bus, devfn;
2498         int ret;
2499
2500         iommu = device_to_iommu(dev, &bus, &devfn);
2501         if (!iommu)
2502                 return -ENODEV;
2503
2504         ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
2505         if (ndomain != domain)
2506                 return -EBUSY;
2507
2508         ret = domain_context_mapping(domain, dev, translation);
2509         if (ret) {
2510                 domain_remove_one_dev_info(domain, dev);
2511                 return ret;
2512         }
2513
2514         return 0;
2515 }
2516
2517 static bool device_has_rmrr(struct device *dev)
2518 {
2519         struct dmar_rmrr_unit *rmrr;
2520         struct device *tmp;
2521         int i;
2522
2523         rcu_read_lock();
2524         for_each_rmrr_units(rmrr) {
2525                 /*
2526                  * Return TRUE if this RMRR contains the device that
2527                  * is passed in.
2528                  */
2529                 for_each_active_dev_scope(rmrr->devices,
2530                                           rmrr->devices_cnt, i, tmp)
2531                         if (tmp == dev) {
2532                                 rcu_read_unlock();
2533                                 return true;
2534                         }
2535         }
2536         rcu_read_unlock();
2537         return false;
2538 }
2539
2540 /*
2541  * There are a couple cases where we need to restrict the functionality of
2542  * devices associated with RMRRs.  The first is when evaluating a device for
2543  * identity mapping because problems exist when devices are moved in and out
2544  * of domains and their respective RMRR information is lost.  This means that
2545  * a device with associated RMRRs will never be in a "passthrough" domain.
2546  * The second is use of the device through the IOMMU API.  This interface
2547  * expects to have full control of the IOVA space for the device.  We cannot
2548  * satisfy both the requirement that RMRR access is maintained and have an
2549  * unencumbered IOVA space.  We also have no ability to quiesce the device's
2550  * use of the RMRR space or even inform the IOMMU API user of the restriction.
2551  * We therefore prevent devices associated with an RMRR from participating in
2552  * the IOMMU API, which eliminates them from device assignment.
2553  *
2554  * In both cases we assume that PCI USB devices with RMRRs have them largely
2555  * for historical reasons and that the RMRR space is not actively used post
2556  * boot.  This exclusion may change if vendors begin to abuse it.
2557  */
2558 static bool device_is_rmrr_locked(struct device *dev)
2559 {
2560         if (!device_has_rmrr(dev))
2561                 return false;
2562
2563         if (dev_is_pci(dev)) {
2564                 struct pci_dev *pdev = to_pci_dev(dev);
2565
2566                 if ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
2567                         return false;
2568         }
2569
2570         return true;
2571 }
2572
2573 static int iommu_should_identity_map(struct device *dev, int startup)
2574 {
2575
2576         if (dev_is_pci(dev)) {
2577                 struct pci_dev *pdev = to_pci_dev(dev);
2578
2579                 if (device_is_rmrr_locked(dev))
2580                         return 0;
2581
2582                 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2583                         return 1;
2584
2585                 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2586                         return 1;
2587
2588                 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2589                         return 0;
2590
2591                 /*
2592                  * We want to start off with all devices in the 1:1 domain, and
2593                  * take them out later if we find they can't access all of memory.
2594                  *
2595                  * However, we can't do this for PCI devices behind bridges,
2596                  * because all PCI devices behind the same bridge will end up
2597                  * with the same source-id on their transactions.
2598                  *
2599                  * Practically speaking, we can't change things around for these
2600                  * devices at run-time, because we can't be sure there'll be no
2601                  * DMA transactions in flight for any of their siblings.
2602                  *
2603                  * So PCI devices (unless they're on the root bus) as well as
2604                  * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2605                  * the 1:1 domain, just in _case_ one of their siblings turns out
2606                  * not to be able to map all of memory.
2607                  */
2608                 if (!pci_is_pcie(pdev)) {
2609                         if (!pci_is_root_bus(pdev->bus))
2610                                 return 0;
2611                         if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2612                                 return 0;
2613                 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2614                         return 0;
2615         } else {
2616                 if (device_has_rmrr(dev))
2617                         return 0;
2618         }
2619
2620         /*
2621          * At boot time, we don't yet know if devices will be 64-bit capable.
2622          * Assume that they will — if they turn out not to be, then we can
2623          * take them out of the 1:1 domain later.
2624          */
2625         if (!startup) {
2626                 /*
2627                  * If the device's dma_mask is less than the system's memory
2628                  * size then this is not a candidate for identity mapping.
2629                  */
2630                 u64 dma_mask = *dev->dma_mask;
2631
2632                 if (dev->coherent_dma_mask &&
2633                     dev->coherent_dma_mask < dma_mask)
2634                         dma_mask = dev->coherent_dma_mask;
2635
2636                 return dma_mask >= dma_get_required_mask(dev);
2637         }
2638
2639         return 1;
2640 }
2641
2642 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2643 {
2644         int ret;
2645
2646         if (!iommu_should_identity_map(dev, 1))
2647                 return 0;
2648
2649         ret = domain_add_dev_info(si_domain, dev,
2650                                   hw ? CONTEXT_TT_PASS_THROUGH :
2651                                        CONTEXT_TT_MULTI_LEVEL);
2652         if (!ret)
2653                 pr_info("IOMMU: %s identity mapping for device %s\n",
2654                         hw ? "hardware" : "software", dev_name(dev));
2655         else if (ret == -ENODEV)
2656                 /* device not associated with an iommu */
2657                 ret = 0;
2658
2659         return ret;
2660 }
2661
2662
2663 static int __init iommu_prepare_static_identity_mapping(int hw)
2664 {
2665         struct pci_dev *pdev = NULL;
2666         struct dmar_drhd_unit *drhd;
2667         struct intel_iommu *iommu;
2668         struct device *dev;
2669         int i;
2670         int ret = 0;
2671
2672         ret = si_domain_init(hw);
2673         if (ret)
2674                 return -EFAULT;
2675
2676         for_each_pci_dev(pdev) {
2677                 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2678                 if (ret)
2679                         return ret;
2680         }
2681
2682         for_each_active_iommu(iommu, drhd)
2683                 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2684                         struct acpi_device_physical_node *pn;
2685                         struct acpi_device *adev;
2686
2687                         if (dev->bus != &acpi_bus_type)
2688                                 continue;
2689                                 
2690                         adev= to_acpi_device(dev);
2691                         mutex_lock(&adev->physical_node_lock);
2692                         list_for_each_entry(pn, &adev->physical_node_list, node) {
2693                                 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2694                                 if (ret)
2695                                         break;
2696                         }
2697                         mutex_unlock(&adev->physical_node_lock);
2698                         if (ret)
2699                                 return ret;
2700                 }
2701
2702         return 0;
2703 }
2704
2705 static void intel_iommu_init_qi(struct intel_iommu *iommu)
2706 {
2707         /*
2708          * Start from the sane iommu hardware state.
2709          * If the queued invalidation is already initialized by us
2710          * (for example, while enabling interrupt-remapping) then
2711          * we got the things already rolling from a sane state.
2712          */
2713         if (!iommu->qi) {
2714                 /*
2715                  * Clear any previous faults.
2716                  */
2717                 dmar_fault(-1, iommu);
2718                 /*
2719                  * Disable queued invalidation if supported and already enabled
2720                  * before OS handover.
2721                  */
2722                 dmar_disable_qi(iommu);
2723         }
2724
2725         if (dmar_enable_qi(iommu)) {
2726                 /*
2727                  * Queued Invalidate not enabled, use Register Based Invalidate
2728                  */
2729                 iommu->flush.flush_context = __iommu_flush_context;
2730                 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2731                 pr_info("IOMMU: %s using Register based invalidation\n",
2732                         iommu->name);
2733         } else {
2734                 iommu->flush.flush_context = qi_flush_context;
2735                 iommu->flush.flush_iotlb = qi_flush_iotlb;
2736                 pr_info("IOMMU: %s using Queued invalidation\n", iommu->name);
2737         }
2738 }
2739
2740 static int __init init_dmars(void)
2741 {
2742         struct dmar_drhd_unit *drhd;
2743         struct dmar_rmrr_unit *rmrr;
2744         struct device *dev;
2745         struct intel_iommu *iommu;
2746         int i, ret;
2747
2748         /*
2749          * for each drhd
2750          *    allocate root
2751          *    initialize and program root entry to not present
2752          * endfor
2753          */
2754         for_each_drhd_unit(drhd) {
2755                 /*
2756                  * lock not needed as this is only incremented in the single
2757                  * threaded kernel __init code path all other access are read
2758                  * only
2759                  */
2760                 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
2761                         g_num_of_iommus++;
2762                         continue;
2763                 }
2764                 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2765                           DMAR_UNITS_SUPPORTED);
2766         }
2767
2768         /* Preallocate enough resources for IOMMU hot-addition */
2769         if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
2770                 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
2771
2772         g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2773                         GFP_KERNEL);
2774         if (!g_iommus) {
2775                 printk(KERN_ERR "Allocating global iommu array failed\n");
2776                 ret = -ENOMEM;
2777                 goto error;
2778         }
2779
2780         deferred_flush = kzalloc(g_num_of_iommus *
2781                 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2782         if (!deferred_flush) {
2783                 ret = -ENOMEM;
2784                 goto free_g_iommus;
2785         }
2786
2787         for_each_active_iommu(iommu, drhd) {
2788                 g_iommus[iommu->seq_id] = iommu;
2789
2790                 ret = iommu_init_domains(iommu);
2791                 if (ret)
2792                         goto free_iommu;
2793
2794                 /*
2795                  * TBD:
2796                  * we could share the same root & context tables
2797                  * among all IOMMU's. Need to Split it later.
2798                  */
2799                 ret = iommu_alloc_root_entry(iommu);
2800                 if (ret)
2801                         goto free_iommu;
2802                 if (!ecap_pass_through(iommu->ecap))
2803                         hw_pass_through = 0;
2804         }
2805
2806         for_each_active_iommu(iommu, drhd)
2807                 intel_iommu_init_qi(iommu);
2808
2809         if (iommu_pass_through)
2810                 iommu_identity_mapping |= IDENTMAP_ALL;
2811
2812 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
2813         iommu_identity_mapping |= IDENTMAP_GFX;
2814 #endif
2815
2816         check_tylersburg_isoch();
2817
2818         /*
2819          * If pass through is not set or not enabled, setup context entries for
2820          * identity mappings for rmrr, gfx, and isa and may fall back to static
2821          * identity mapping if iommu_identity_mapping is set.
2822          */
2823         if (iommu_identity_mapping) {
2824                 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2825                 if (ret) {
2826                         printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2827                         goto free_iommu;
2828                 }
2829         }
2830         /*
2831          * For each rmrr
2832          *   for each dev attached to rmrr
2833          *   do
2834          *     locate drhd for dev, alloc domain for dev
2835          *     allocate free domain
2836          *     allocate page table entries for rmrr
2837          *     if context not allocated for bus
2838          *           allocate and init context
2839          *           set present in root table for this bus
2840          *     init context with domain, translation etc
2841          *    endfor
2842          * endfor
2843          */
2844         printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2845         for_each_rmrr_units(rmrr) {
2846                 /* some BIOS lists non-exist devices in DMAR table. */
2847                 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
2848                                           i, dev) {
2849                         ret = iommu_prepare_rmrr_dev(rmrr, dev);
2850                         if (ret)
2851                                 printk(KERN_ERR
2852                                        "IOMMU: mapping reserved region failed\n");
2853                 }
2854         }
2855
2856         iommu_prepare_isa();
2857
2858         /*
2859          * for each drhd
2860          *   enable fault log
2861          *   global invalidate context cache
2862          *   global invalidate iotlb
2863          *   enable translation
2864          */
2865         for_each_iommu(iommu, drhd) {
2866                 if (drhd->ignored) {
2867                         /*
2868                          * we always have to disable PMRs or DMA may fail on
2869                          * this device
2870                          */
2871                         if (force_on)
2872                                 iommu_disable_protect_mem_regions(iommu);
2873                         continue;
2874                 }
2875
2876                 iommu_flush_write_buffer(iommu);
2877
2878                 ret = dmar_set_interrupt(iommu);
2879                 if (ret)
2880                         goto free_iommu;
2881
2882                 iommu_set_root_entry(iommu);
2883
2884                 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2885                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2886                 iommu_enable_translation(iommu);
2887                 iommu_disable_protect_mem_regions(iommu);
2888         }
2889
2890         return 0;
2891
2892 free_iommu:
2893         for_each_active_iommu(iommu, drhd) {
2894                 disable_dmar_iommu(iommu);
2895                 free_dmar_iommu(iommu);
2896         }
2897         kfree(deferred_flush);
2898 free_g_iommus:
2899         kfree(g_iommus);
2900 error:
2901         return ret;
2902 }
2903
2904 /* This takes a number of _MM_ pages, not VTD pages */
2905 static struct iova *intel_alloc_iova(struct device *dev,
2906                                      struct dmar_domain *domain,
2907                                      unsigned long nrpages, uint64_t dma_mask)
2908 {
2909         struct iova *iova = NULL;
2910
2911         /* Restrict dma_mask to the width that the iommu can handle */
2912         dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2913
2914         if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2915                 /*
2916                  * First try to allocate an io virtual address in
2917                  * DMA_BIT_MASK(32) and if that fails then try allocating
2918                  * from higher range
2919                  */
2920                 iova = alloc_iova(&domain->iovad, nrpages,
2921                                   IOVA_PFN(DMA_BIT_MASK(32)), 1);
2922                 if (iova)
2923                         return iova;
2924         }
2925         iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2926         if (unlikely(!iova)) {
2927                 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2928                        nrpages, dev_name(dev));
2929                 return NULL;
2930         }
2931
2932         return iova;
2933 }
2934
2935 static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
2936 {
2937         struct dmar_domain *domain;
2938         int ret;
2939
2940         domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2941         if (!domain) {
2942                 printk(KERN_ERR "Allocating domain for %s failed",
2943                        dev_name(dev));
2944                 return NULL;
2945         }
2946
2947         /* make sure context mapping is ok */
2948         if (unlikely(!domain_context_mapped(dev))) {
2949                 ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
2950                 if (ret) {
2951                         printk(KERN_ERR "Domain context map for %s failed",
2952                                dev_name(dev));
2953                         return NULL;
2954                 }
2955         }
2956
2957         return domain;
2958 }
2959
2960 static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
2961 {
2962         struct device_domain_info *info;
2963
2964         /* No lock here, assumes no domain exit in normal case */
2965         info = dev->archdata.iommu;
2966         if (likely(info))
2967                 return info->domain;
2968
2969         return __get_valid_domain_for_dev(dev);
2970 }
2971
2972 static int iommu_dummy(struct device *dev)
2973 {
2974         return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2975 }
2976
2977 /* Check if the dev needs to go through non-identity map and unmap process.*/
2978 static int iommu_no_mapping(struct device *dev)
2979 {
2980         int found;
2981
2982         if (iommu_dummy(dev))
2983                 return 1;
2984
2985         if (!iommu_identity_mapping)
2986                 return 0;
2987
2988         found = identity_mapping(dev);
2989         if (found) {
2990                 if (iommu_should_identity_map(dev, 0))
2991                         return 1;
2992                 else {
2993                         /*
2994                          * 32 bit DMA is removed from si_domain and fall back
2995                          * to non-identity mapping.
2996                          */
2997                         domain_remove_one_dev_info(si_domain, dev);
2998                         printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2999                                dev_name(dev));
3000                         return 0;
3001                 }
3002         } else {
3003                 /*
3004                  * In case of a detached 64 bit DMA device from vm, the device
3005                  * is put into si_domain for identity mapping.
3006                  */
3007                 if (iommu_should_identity_map(dev, 0)) {
3008                         int ret;
3009                         ret = domain_add_dev_info(si_domain, dev,
3010                                                   hw_pass_through ?
3011                                                   CONTEXT_TT_PASS_THROUGH :
3012                                                   CONTEXT_TT_MULTI_LEVEL);
3013                         if (!ret) {
3014                                 printk(KERN_INFO "64bit %s uses identity mapping\n",
3015                                        dev_name(dev));
3016                                 return 1;
3017                         }
3018                 }
3019         }
3020
3021         return 0;
3022 }
3023
3024 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3025                                      size_t size, int dir, u64 dma_mask)
3026 {
3027         struct dmar_domain *domain;
3028         phys_addr_t start_paddr;
3029         struct iova *iova;
3030         int prot = 0;
3031         int ret;
3032         struct intel_iommu *iommu;
3033         unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3034
3035         BUG_ON(dir == DMA_NONE);
3036
3037         if (iommu_no_mapping(dev))
3038                 return paddr;
3039
3040         domain = get_valid_domain_for_dev(dev);
3041         if (!domain)
3042                 return 0;
3043
3044         iommu = domain_get_iommu(domain);
3045         size = aligned_nrpages(paddr, size);
3046
3047         iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3048         if (!iova)
3049                 goto error;
3050
3051         /*
3052          * Check if DMAR supports zero-length reads on write only
3053          * mappings..
3054          */
3055         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3056                         !cap_zlr(iommu->cap))
3057                 prot |= DMA_PTE_READ;
3058         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3059                 prot |= DMA_PTE_WRITE;
3060         /*
3061          * paddr - (paddr + size) might be partial page, we should map the whole
3062          * page.  Note: if two part of one page are separately mapped, we
3063          * might have two guest_addr mapping to the same host paddr, but this
3064          * is not a big problem
3065          */
3066         ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
3067                                  mm_to_dma_pfn(paddr_pfn), size, prot);
3068         if (ret)
3069                 goto error;
3070
3071         /* it's a non-present to present mapping. Only flush if caching mode */
3072         if (cap_caching_mode(iommu->cap))
3073                 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
3074         else
3075                 iommu_flush_write_buffer(iommu);
3076
3077         start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
3078         start_paddr += paddr & ~PAGE_MASK;
3079         return start_paddr;
3080
3081 error:
3082         if (iova)
3083                 __free_iova(&domain->iovad, iova);
3084         printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
3085                 dev_name(dev), size, (unsigned long long)paddr, dir);
3086         return 0;
3087 }
3088
3089 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3090                                  unsigned long offset, size_t size,
3091                                  enum dma_data_direction dir,
3092                                  struct dma_attrs *attrs)
3093 {
3094         return __intel_map_single(dev, page_to_phys(page) + offset, size,
3095                                   dir, *dev->dma_mask);
3096 }
3097
3098 static void flush_unmaps(void)
3099 {
3100         int i, j;
3101
3102         timer_on = 0;
3103
3104         /* just flush them all */
3105         for (i = 0; i < g_num_of_iommus; i++) {
3106                 struct intel_iommu *iommu = g_iommus[i];
3107                 if (!iommu)
3108                         continue;
3109
3110                 if (!deferred_flush[i].next)
3111                         continue;
3112
3113                 /* In caching mode, global flushes turn emulation expensive */
3114                 if (!cap_caching_mode(iommu->cap))
3115                         iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3116                                          DMA_TLB_GLOBAL_FLUSH);
3117                 for (j = 0; j < deferred_flush[i].next; j++) {
3118                         unsigned long mask;
3119                         struct iova *iova = deferred_flush[i].iova[j];
3120                         struct dmar_domain *domain = deferred_flush[i].domain[j];
3121
3122                         /* On real hardware multiple invalidations are expensive */
3123                         if (cap_caching_mode(iommu->cap))
3124                                 iommu_flush_iotlb_psi(iommu, domain->id,
3125                                         iova->pfn_lo, iova_size(iova),
3126                                         !deferred_flush[i].freelist[j], 0);
3127                         else {
3128                                 mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
3129                                 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
3130                                                 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
3131                         }
3132                         __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
3133                         if (deferred_flush[i].freelist[j])
3134                                 dma_free_pagelist(deferred_flush[i].freelist[j]);
3135                 }
3136                 deferred_flush[i].next = 0;
3137         }
3138
3139         list_size = 0;
3140 }
3141
3142 static void flush_unmaps_timeout(unsigned long data)
3143 {
3144         unsigned long flags;
3145
3146         spin_lock_irqsave(&async_umap_flush_lock, flags);
3147         flush_unmaps();
3148         spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3149 }
3150
3151 static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
3152 {
3153         unsigned long flags;
3154         int next, iommu_id;
3155         struct intel_iommu *iommu;
3156
3157         spin_lock_irqsave(&async_umap_flush_lock, flags);
3158         if (list_size == HIGH_WATER_MARK)
3159                 flush_unmaps();
3160
3161         iommu = domain_get_iommu(dom);
3162         iommu_id = iommu->seq_id;
3163
3164         next = deferred_flush[iommu_id].next;
3165         deferred_flush[iommu_id].domain[next] = dom;
3166         deferred_flush[iommu_id].iova[next] = iova;
3167         deferred_flush[iommu_id].freelist[next] = freelist;
3168         deferred_flush[iommu_id].next++;
3169
3170         if (!timer_on) {
3171                 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
3172                 timer_on = 1;
3173         }
3174         list_size++;
3175         spin_unlock_irqrestore(&async_umap_flush_lock, flags);
3176 }
3177
3178 static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
3179 {
3180         struct dmar_domain *domain;
3181         unsigned long start_pfn, last_pfn;
3182         struct iova *iova;
3183         struct intel_iommu *iommu;
3184         struct page *freelist;
3185
3186         if (iommu_no_mapping(dev))
3187                 return;
3188
3189         domain = find_domain(dev);
3190         BUG_ON(!domain);
3191
3192         iommu = domain_get_iommu(domain);
3193
3194         iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
3195         if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
3196                       (unsigned long long)dev_addr))
3197                 return;
3198
3199         start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3200         last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3201
3202         pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3203                  dev_name(dev), start_pfn, last_pfn);
3204
3205         freelist = domain_unmap(domain, start_pfn, last_pfn);
3206
3207         if (intel_iommu_strict) {
3208                 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
3209                                       last_pfn - start_pfn + 1, !freelist, 0);
3210                 /* free iova */
3211                 __free_iova(&domain->iovad, iova);
3212                 dma_free_pagelist(freelist);
3213         } else {
3214                 add_unmap(domain, iova, freelist);
3215                 /*
3216                  * queue up the release of the unmap to save the 1/6th of the
3217                  * cpu used up by the iotlb flush operation...
3218                  */
3219         }
3220 }
3221
3222 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3223                              size_t size, enum dma_data_direction dir,
3224                              struct dma_attrs *attrs)
3225 {
3226         intel_unmap(dev, dev_addr);
3227 }
3228
3229 static void *intel_alloc_coherent(struct device *dev, size_t size,
3230                                   dma_addr_t *dma_handle, gfp_t flags,
3231                                   struct dma_attrs *attrs)
3232 {
3233         struct page *page = NULL;
3234         int order;
3235
3236         size = PAGE_ALIGN(size);
3237         order = get_order(size);
3238
3239         if (!iommu_no_mapping(dev))
3240                 flags &= ~(GFP_DMA | GFP_DMA32);
3241         else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3242                 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3243                         flags |= GFP_DMA;
3244                 else
3245                         flags |= GFP_DMA32;
3246         }
3247
3248         if (flags & __GFP_WAIT) {
3249                 unsigned int count = size >> PAGE_SHIFT;
3250
3251                 page = dma_alloc_from_contiguous(dev, count, order);
3252                 if (page && iommu_no_mapping(dev) &&
3253                     page_to_phys(page) + size > dev->coherent_dma_mask) {
3254                         dma_release_from_contiguous(dev, page, count);
3255                         page = NULL;
3256                 }
3257         }
3258
3259         if (!page)
3260                 page = alloc_pages(flags, order);
3261         if (!page)
3262                 return NULL;
3263         memset(page_address(page), 0, size);
3264
3265         *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3266                                          DMA_BIDIRECTIONAL,
3267                                          dev->coherent_dma_mask);
3268         if (*dma_handle)
3269                 return page_address(page);
3270         if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3271                 __free_pages(page, order);
3272
3273         return NULL;
3274 }
3275
3276 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3277                                 dma_addr_t dma_handle, struct dma_attrs *attrs)
3278 {
3279         int order;
3280         struct page *page = virt_to_page(vaddr);
3281
3282         size = PAGE_ALIGN(size);
3283         order = get_order(size);
3284
3285         intel_unmap(dev, dma_handle);
3286         if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3287                 __free_pages(page, order);
3288 }
3289
3290 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3291                            int nelems, enum dma_data_direction dir,
3292                            struct dma_attrs *attrs)
3293 {
3294         intel_unmap(dev, sglist[0].dma_address);
3295 }
3296
3297 static int intel_nontranslate_map_sg(struct device *hddev,
3298         struct scatterlist *sglist, int nelems, int dir)
3299 {
3300         int i;
3301         struct scatterlist *sg;
3302
3303         for_each_sg(sglist, sg, nelems, i) {
3304                 BUG_ON(!sg_page(sg));
3305                 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3306                 sg->dma_length = sg->length;
3307         }
3308         return nelems;
3309 }
3310
3311 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3312                         enum dma_data_direction dir, struct dma_attrs *attrs)
3313 {
3314         int i;
3315         struct dmar_domain *domain;
3316         size_t size = 0;
3317         int prot = 0;
3318         struct iova *iova = NULL;
3319         int ret;
3320         struct scatterlist *sg;
3321         unsigned long start_vpfn;
3322         struct intel_iommu *iommu;
3323
3324         BUG_ON(dir == DMA_NONE);
3325         if (iommu_no_mapping(dev))
3326                 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
3327
3328         domain = get_valid_domain_for_dev(dev);
3329         if (!domain)
3330                 return 0;
3331
3332         iommu = domain_get_iommu(domain);
3333
3334         for_each_sg(sglist, sg, nelems, i)
3335                 size += aligned_nrpages(sg->offset, sg->length);
3336
3337         iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3338                                 *dev->dma_mask);
3339         if (!iova) {
3340                 sglist->dma_length = 0;
3341                 return 0;
3342         }
3343
3344         /*
3345          * Check if DMAR supports zero-length reads on write only
3346          * mappings..
3347          */
3348         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3349                         !cap_zlr(iommu->cap))
3350                 prot |= DMA_PTE_READ;
3351         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3352                 prot |= DMA_PTE_WRITE;
3353
3354         start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
3355
3356         ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3357         if (unlikely(ret)) {
3358                 dma_pte_free_pagetable(domain, start_vpfn,
3359                                        start_vpfn + size - 1);
3360                 __free_iova(&domain->iovad, iova);
3361                 return 0;
3362         }
3363
3364         /* it's a non-present to present mapping. Only flush if caching mode */
3365         if (cap_caching_mode(iommu->cap))
3366                 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
3367         else
3368                 iommu_flush_write_buffer(iommu);
3369
3370         return nelems;
3371 }
3372
3373 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3374 {
3375         return !dma_addr;
3376 }
3377
3378 struct dma_map_ops intel_dma_ops = {
3379         .alloc = intel_alloc_coherent,
3380         .free = intel_free_coherent,
3381         .map_sg = intel_map_sg,
3382         .unmap_sg = intel_unmap_sg,
3383         .map_page = intel_map_page,
3384         .unmap_page = intel_unmap_page,
3385         .mapping_error = intel_mapping_error,
3386 };
3387
3388 static inline int iommu_domain_cache_init(void)
3389 {
3390         int ret = 0;
3391
3392         iommu_domain_cache = kmem_cache_create("iommu_domain",
3393                                          sizeof(struct dmar_domain),
3394                                          0,
3395                                          SLAB_HWCACHE_ALIGN,
3396
3397                                          NULL);
3398         if (!iommu_domain_cache) {
3399                 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3400                 ret = -ENOMEM;
3401         }
3402
3403         return ret;
3404 }
3405
3406 static inline int iommu_devinfo_cache_init(void)
3407 {
3408         int ret = 0;
3409
3410         iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3411                                          sizeof(struct device_domain_info),
3412                                          0,
3413                                          SLAB_HWCACHE_ALIGN,
3414                                          NULL);
3415         if (!iommu_devinfo_cache) {
3416                 printk(KERN_ERR "Couldn't create devinfo cache\n");
3417                 ret = -ENOMEM;
3418         }
3419
3420         return ret;
3421 }
3422
3423 static int __init iommu_init_mempool(void)
3424 {
3425         int ret;
3426         ret = iommu_iova_cache_init();
3427         if (ret)
3428                 return ret;
3429
3430         ret = iommu_domain_cache_init();
3431         if (ret)
3432                 goto domain_error;
3433
3434         ret = iommu_devinfo_cache_init();
3435         if (!ret)
3436                 return ret;
3437
3438         kmem_cache_destroy(iommu_domain_cache);
3439 domain_error:
3440         iommu_iova_cache_destroy();
3441
3442         return -ENOMEM;
3443 }
3444
3445 static void __init iommu_exit_mempool(void)
3446 {
3447         kmem_cache_destroy(iommu_devinfo_cache);
3448         kmem_cache_destroy(iommu_domain_cache);
3449         iommu_iova_cache_destroy();
3450 }
3451
3452 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3453 {
3454         struct dmar_drhd_unit *drhd;
3455         u32 vtbar;
3456         int rc;
3457
3458         /* We know that this device on this chipset has its own IOMMU.
3459          * If we find it under a different IOMMU, then the BIOS is lying
3460          * to us. Hope that the IOMMU for this device is actually
3461          * disabled, and it needs no translation...
3462          */
3463         rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3464         if (rc) {
3465                 /* "can't" happen */
3466                 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3467                 return;
3468         }
3469         vtbar &= 0xffff0000;
3470
3471         /* we know that the this iommu should be at offset 0xa000 from vtbar */
3472         drhd = dmar_find_matched_drhd_unit(pdev);
3473         if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3474                             TAINT_FIRMWARE_WORKAROUND,
3475                             "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3476                 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3477 }
3478 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3479
3480 static void __init init_no_remapping_devices(void)
3481 {
3482         struct dmar_drhd_unit *drhd;
3483         struct device *dev;
3484         int i;
3485
3486         for_each_drhd_unit(drhd) {
3487                 if (!drhd->include_all) {
3488                         for_each_active_dev_scope(drhd->devices,
3489                                                   drhd->devices_cnt, i, dev)
3490                                 break;
3491                         /* ignore DMAR unit if no devices exist */
3492                         if (i == drhd->devices_cnt)
3493                                 drhd->ignored = 1;
3494                 }
3495         }
3496
3497         for_each_active_drhd_unit(drhd) {
3498                 if (drhd->include_all)
3499                         continue;
3500
3501                 for_each_active_dev_scope(drhd->devices,
3502                                           drhd->devices_cnt, i, dev)
3503                         if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
3504                                 break;
3505                 if (i < drhd->devices_cnt)
3506                         continue;
3507
3508                 /* This IOMMU has *only* gfx devices. Either bypass it or
3509                    set the gfx_mapped flag, as appropriate */
3510                 if (dmar_map_gfx) {
3511                         intel_iommu_gfx_mapped = 1;
3512                 } else {
3513                         drhd->ignored = 1;
3514                         for_each_active_dev_scope(drhd->devices,
3515                                                   drhd->devices_cnt, i, dev)
3516                                 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3517                 }
3518         }
3519 }
3520
3521 #ifdef CONFIG_SUSPEND
3522 static int init_iommu_hw(void)
3523 {
3524         struct dmar_drhd_unit *drhd;
3525         struct intel_iommu *iommu = NULL;
3526
3527         for_each_active_iommu(iommu, drhd)
3528                 if (iommu->qi)
3529                         dmar_reenable_qi(iommu);
3530
3531         for_each_iommu(iommu, drhd) {
3532                 if (drhd->ignored) {
3533                         /*
3534                          * we always have to disable PMRs or DMA may fail on
3535                          * this device
3536                          */
3537                         if (force_on)
3538                                 iommu_disable_protect_mem_regions(iommu);
3539                         continue;
3540                 }
3541         
3542                 iommu_flush_write_buffer(iommu);
3543
3544                 iommu_set_root_entry(iommu);
3545
3546                 iommu->flush.flush_context(iommu, 0, 0, 0,
3547                                            DMA_CCMD_GLOBAL_INVL);
3548                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3549                 iommu_enable_translation(iommu);
3550                 iommu_disable_protect_mem_regions(iommu);
3551         }
3552
3553         return 0;
3554 }
3555
3556 static void iommu_flush_all(void)
3557 {
3558         struct dmar_drhd_unit *drhd;
3559         struct intel_iommu *iommu;
3560
3561         for_each_active_iommu(iommu, drhd) {
3562                 iommu->flush.flush_context(iommu, 0, 0, 0,
3563                                            DMA_CCMD_GLOBAL_INVL);
3564                 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3565                                          DMA_TLB_GLOBAL_FLUSH);
3566         }
3567 }
3568
3569 static int iommu_suspend(void)
3570 {
3571         struct dmar_drhd_unit *drhd;
3572         struct intel_iommu *iommu = NULL;
3573         unsigned long flag;
3574
3575         for_each_active_iommu(iommu, drhd) {
3576                 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3577                                                  GFP_ATOMIC);
3578                 if (!iommu->iommu_state)
3579                         goto nomem;
3580         }
3581
3582         iommu_flush_all();
3583
3584         for_each_active_iommu(iommu, drhd) {
3585                 iommu_disable_translation(iommu);
3586
3587                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3588
3589                 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3590                         readl(iommu->reg + DMAR_FECTL_REG);
3591                 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3592                         readl(iommu->reg + DMAR_FEDATA_REG);
3593                 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3594                         readl(iommu->reg + DMAR_FEADDR_REG);
3595                 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3596                         readl(iommu->reg + DMAR_FEUADDR_REG);
3597
3598                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3599         }
3600         return 0;
3601
3602 nomem:
3603         for_each_active_iommu(iommu, drhd)
3604                 kfree(iommu->iommu_state);
3605
3606         return -ENOMEM;
3607 }
3608
3609 static void iommu_resume(void)
3610 {
3611         struct dmar_drhd_unit *drhd;
3612         struct intel_iommu *iommu = NULL;
3613         unsigned long flag;
3614
3615         if (init_iommu_hw()) {
3616                 if (force_on)
3617                         panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3618                 else
3619                         WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3620                 return;
3621         }
3622
3623         for_each_active_iommu(iommu, drhd) {
3624
3625                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3626
3627                 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3628                         iommu->reg + DMAR_FECTL_REG);
3629                 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3630                         iommu->reg + DMAR_FEDATA_REG);
3631                 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3632                         iommu->reg + DMAR_FEADDR_REG);
3633                 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3634                         iommu->reg + DMAR_FEUADDR_REG);
3635
3636                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3637         }
3638
3639         for_each_active_iommu(iommu, drhd)
3640                 kfree(iommu->iommu_state);
3641 }
3642
3643 static struct syscore_ops iommu_syscore_ops = {
3644         .resume         = iommu_resume,
3645         .suspend        = iommu_suspend,
3646 };
3647
3648 static void __init init_iommu_pm_ops(void)
3649 {
3650         register_syscore_ops(&iommu_syscore_ops);
3651 }
3652
3653 #else
3654 static inline void init_iommu_pm_ops(void) {}
3655 #endif  /* CONFIG_PM */
3656
3657
3658 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
3659 {
3660         struct acpi_dmar_reserved_memory *rmrr;
3661         struct dmar_rmrr_unit *rmrru;
3662
3663         rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3664         if (!rmrru)
3665                 return -ENOMEM;
3666
3667         rmrru->hdr = header;
3668         rmrr = (struct acpi_dmar_reserved_memory *)header;
3669         rmrru->base_address = rmrr->base_address;
3670         rmrru->end_address = rmrr->end_address;
3671         rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3672                                 ((void *)rmrr) + rmrr->header.length,
3673                                 &rmrru->devices_cnt);
3674         if (rmrru->devices_cnt && rmrru->devices == NULL) {
3675                 kfree(rmrru);
3676                 return -ENOMEM;
3677         }
3678
3679         list_add(&rmrru->list, &dmar_rmrr_units);
3680
3681         return 0;
3682 }
3683
3684 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
3685 {
3686         struct dmar_atsr_unit *atsru;
3687         struct acpi_dmar_atsr *tmp;
3688
3689         list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3690                 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
3691                 if (atsr->segment != tmp->segment)
3692                         continue;
3693                 if (atsr->header.length != tmp->header.length)
3694                         continue;
3695                 if (memcmp(atsr, tmp, atsr->header.length) == 0)
3696                         return atsru;
3697         }
3698
3699         return NULL;
3700 }
3701
3702 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3703 {
3704         struct acpi_dmar_atsr *atsr;
3705         struct dmar_atsr_unit *atsru;
3706
3707         if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
3708                 return 0;
3709
3710         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3711         atsru = dmar_find_atsr(atsr);
3712         if (atsru)
3713                 return 0;
3714
3715         atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
3716         if (!atsru)
3717                 return -ENOMEM;
3718
3719         /*
3720          * If memory is allocated from slab by ACPI _DSM method, we need to
3721          * copy the memory content because the memory buffer will be freed
3722          * on return.
3723          */
3724         atsru->hdr = (void *)(atsru + 1);
3725         memcpy(atsru->hdr, hdr, hdr->length);
3726         atsru->include_all = atsr->flags & 0x1;
3727         if (!atsru->include_all) {
3728                 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3729                                 (void *)atsr + atsr->header.length,
3730                                 &atsru->devices_cnt);
3731                 if (atsru->devices_cnt && atsru->devices == NULL) {
3732                         kfree(atsru);
3733                         return -ENOMEM;
3734                 }
3735         }
3736
3737         list_add_rcu(&atsru->list, &dmar_atsr_units);
3738
3739         return 0;
3740 }
3741
3742 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3743 {
3744         dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3745         kfree(atsru);
3746 }
3747
3748 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3749 {
3750         struct acpi_dmar_atsr *atsr;
3751         struct dmar_atsr_unit *atsru;
3752
3753         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3754         atsru = dmar_find_atsr(atsr);
3755         if (atsru) {
3756                 list_del_rcu(&atsru->list);
3757                 synchronize_rcu();
3758                 intel_iommu_free_atsr(atsru);
3759         }
3760
3761         return 0;
3762 }
3763
3764 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3765 {
3766         int i;
3767         struct device *dev;
3768         struct acpi_dmar_atsr *atsr;
3769         struct dmar_atsr_unit *atsru;
3770
3771         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3772         atsru = dmar_find_atsr(atsr);
3773         if (!atsru)
3774                 return 0;
3775
3776         if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
3777                 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
3778                                           i, dev)
3779                         return -EBUSY;
3780
3781         return 0;
3782 }
3783
3784 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
3785 {
3786         int sp, ret = 0;
3787         struct intel_iommu *iommu = dmaru->iommu;
3788
3789         if (g_iommus[iommu->seq_id])
3790                 return 0;
3791
3792         if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
3793                 pr_warn("IOMMU: %s doesn't support hardware pass through.\n",
3794                         iommu->name);
3795                 return -ENXIO;
3796         }
3797         if (!ecap_sc_support(iommu->ecap) &&
3798             domain_update_iommu_snooping(iommu)) {
3799                 pr_warn("IOMMU: %s doesn't support snooping.\n",
3800                         iommu->name);
3801                 return -ENXIO;
3802         }
3803         sp = domain_update_iommu_superpage(iommu) - 1;
3804         if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
3805                 pr_warn("IOMMU: %s doesn't support large page.\n",
3806                         iommu->name);
3807                 return -ENXIO;
3808         }
3809
3810         /*
3811          * Disable translation if already enabled prior to OS handover.
3812          */
3813         if (iommu->gcmd & DMA_GCMD_TE)
3814                 iommu_disable_translation(iommu);
3815
3816         g_iommus[iommu->seq_id] = iommu;
3817         ret = iommu_init_domains(iommu);
3818         if (ret == 0)
3819                 ret = iommu_alloc_root_entry(iommu);
3820         if (ret)
3821                 goto out;
3822
3823         if (dmaru->ignored) {
3824                 /*
3825                  * we always have to disable PMRs or DMA may fail on this device
3826                  */
3827                 if (force_on)
3828                         iommu_disable_protect_mem_regions(iommu);
3829                 return 0;
3830         }
3831
3832         intel_iommu_init_qi(iommu);
3833         iommu_flush_write_buffer(iommu);
3834         ret = dmar_set_interrupt(iommu);
3835         if (ret)
3836                 goto disable_iommu;
3837
3838         iommu_set_root_entry(iommu);
3839         iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3840         iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3841         iommu_enable_translation(iommu);
3842
3843         if (si_domain) {
3844                 ret = iommu_attach_domain(si_domain, iommu);
3845                 if (ret < 0 || si_domain->id != ret)
3846                         goto disable_iommu;
3847                 domain_attach_iommu(si_domain, iommu);
3848         }
3849
3850         iommu_disable_protect_mem_regions(iommu);
3851         return 0;
3852
3853 disable_iommu:
3854         disable_dmar_iommu(iommu);
3855 out:
3856         free_dmar_iommu(iommu);
3857         return ret;
3858 }
3859
3860 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
3861 {
3862         int ret = 0;
3863         struct intel_iommu *iommu = dmaru->iommu;
3864
3865         if (!intel_iommu_enabled)
3866                 return 0;
3867         if (iommu == NULL)
3868                 return -EINVAL;
3869
3870         if (insert) {
3871                 ret = intel_iommu_add(dmaru);
3872         } else {
3873                 disable_dmar_iommu(iommu);
3874                 free_dmar_iommu(iommu);
3875         }
3876
3877         return ret;
3878 }
3879
3880 static void intel_iommu_free_dmars(void)
3881 {
3882         struct dmar_rmrr_unit *rmrru, *rmrr_n;
3883         struct dmar_atsr_unit *atsru, *atsr_n;
3884
3885         list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3886                 list_del(&rmrru->list);
3887                 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3888                 kfree(rmrru);
3889         }
3890
3891         list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3892                 list_del(&atsru->list);
3893                 intel_iommu_free_atsr(atsru);
3894         }
3895 }
3896
3897 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3898 {
3899         int i, ret = 1;
3900         struct pci_bus *bus;
3901         struct pci_dev *bridge = NULL;
3902         struct device *tmp;
3903         struct acpi_dmar_atsr *atsr;
3904         struct dmar_atsr_unit *atsru;
3905
3906         dev = pci_physfn(dev);
3907         for (bus = dev->bus; bus; bus = bus->parent) {
3908                 bridge = bus->self;
3909                 if (!bridge || !pci_is_pcie(bridge) ||
3910                     pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
3911                         return 0;
3912                 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
3913                         break;
3914         }
3915         if (!bridge)
3916                 return 0;
3917
3918         rcu_read_lock();
3919         list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3920                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3921                 if (atsr->segment != pci_domain_nr(dev->bus))
3922                         continue;
3923
3924                 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
3925                         if (tmp == &bridge->dev)
3926                                 goto out;
3927
3928                 if (atsru->include_all)
3929                         goto out;
3930         }
3931         ret = 0;
3932 out:
3933         rcu_read_unlock();
3934
3935         return ret;
3936 }
3937
3938 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3939 {
3940         int ret = 0;
3941         struct dmar_rmrr_unit *rmrru;
3942         struct dmar_atsr_unit *atsru;
3943         struct acpi_dmar_atsr *atsr;
3944         struct acpi_dmar_reserved_memory *rmrr;
3945
3946         if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
3947                 return 0;
3948
3949         list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3950                 rmrr = container_of(rmrru->hdr,
3951                                     struct acpi_dmar_reserved_memory, header);
3952                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3953                         ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
3954                                 ((void *)rmrr) + rmrr->header.length,
3955                                 rmrr->segment, rmrru->devices,
3956                                 rmrru->devices_cnt);
3957                         if(ret < 0)
3958                                 return ret;
3959                 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3960                         dmar_remove_dev_scope(info, rmrr->segment,
3961                                 rmrru->devices, rmrru->devices_cnt);
3962                 }
3963         }
3964
3965         list_for_each_entry(atsru, &dmar_atsr_units, list) {
3966                 if (atsru->include_all)
3967                         continue;
3968
3969                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3970                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
3971                         ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
3972                                         (void *)atsr + atsr->header.length,
3973                                         atsr->segment, atsru->devices,
3974                                         atsru->devices_cnt);
3975                         if (ret > 0)
3976                                 break;
3977                         else if(ret < 0)
3978                                 return ret;
3979                 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3980                         if (dmar_remove_dev_scope(info, atsr->segment,
3981                                         atsru->devices, atsru->devices_cnt))
3982                                 break;
3983                 }
3984         }
3985
3986         return 0;
3987 }
3988
3989 /*
3990  * Here we only respond to action of unbound device from driver.
3991  *
3992  * Added device is not attached to its DMAR domain here yet. That will happen
3993  * when mapping the device to iova.
3994  */
3995 static int device_notifier(struct notifier_block *nb,
3996                                   unsigned long action, void *data)
3997 {
3998         struct device *dev = data;
3999         struct dmar_domain *domain;
4000
4001         if (iommu_dummy(dev))
4002                 return 0;
4003
4004         if (action != BUS_NOTIFY_REMOVED_DEVICE)
4005                 return 0;
4006
4007         domain = find_domain(dev);
4008         if (!domain)
4009                 return 0;
4010
4011         down_read(&dmar_global_lock);
4012         domain_remove_one_dev_info(domain, dev);
4013         if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
4014                 domain_exit(domain);
4015         up_read(&dmar_global_lock);
4016
4017         return 0;
4018 }
4019
4020 static struct notifier_block device_nb = {
4021         .notifier_call = device_notifier,
4022 };
4023
4024 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4025                                        unsigned long val, void *v)
4026 {
4027         struct memory_notify *mhp = v;
4028         unsigned long long start, end;
4029         unsigned long start_vpfn, last_vpfn;
4030
4031         switch (val) {
4032         case MEM_GOING_ONLINE:
4033                 start = mhp->start_pfn << PAGE_SHIFT;
4034                 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4035                 if (iommu_domain_identity_map(si_domain, start, end)) {
4036                         pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
4037                                 start, end);
4038                         return NOTIFY_BAD;
4039                 }
4040                 break;
4041
4042         case MEM_OFFLINE:
4043         case MEM_CANCEL_ONLINE:
4044                 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4045                 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4046                 while (start_vpfn <= last_vpfn) {
4047                         struct iova *iova;
4048                         struct dmar_drhd_unit *drhd;
4049                         struct intel_iommu *iommu;
4050                         struct page *freelist;
4051
4052                         iova = find_iova(&si_domain->iovad, start_vpfn);
4053                         if (iova == NULL) {
4054                                 pr_debug("dmar: failed get IOVA for PFN %lx\n",
4055                                          start_vpfn);
4056                                 break;
4057                         }
4058
4059                         iova = split_and_remove_iova(&si_domain->iovad, iova,
4060                                                      start_vpfn, last_vpfn);
4061                         if (iova == NULL) {
4062                                 pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
4063                                         start_vpfn, last_vpfn);
4064                                 return NOTIFY_BAD;
4065                         }
4066
4067                         freelist = domain_unmap(si_domain, iova->pfn_lo,
4068                                                iova->pfn_hi);
4069
4070                         rcu_read_lock();
4071                         for_each_active_iommu(iommu, drhd)
4072                                 iommu_flush_iotlb_psi(iommu, si_domain->id,
4073                                         iova->pfn_lo, iova_size(iova),
4074                                         !freelist, 0);
4075                         rcu_read_unlock();
4076                         dma_free_pagelist(freelist);
4077
4078                         start_vpfn = iova->pfn_hi + 1;
4079                         free_iova_mem(iova);
4080                 }
4081                 break;
4082         }
4083
4084         return NOTIFY_OK;
4085 }
4086
4087 static struct notifier_block intel_iommu_memory_nb = {
4088         .notifier_call = intel_iommu_memory_notifier,
4089         .priority = 0
4090 };
4091
4092
4093 static ssize_t intel_iommu_show_version(struct device *dev,
4094                                         struct device_attribute *attr,
4095                                         char *buf)
4096 {
4097         struct intel_iommu *iommu = dev_get_drvdata(dev);
4098         u32 ver = readl(iommu->reg + DMAR_VER_REG);
4099         return sprintf(buf, "%d:%d\n",
4100                        DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4101 }
4102 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4103
4104 static ssize_t intel_iommu_show_address(struct device *dev,
4105                                         struct device_attribute *attr,
4106                                         char *buf)
4107 {
4108         struct intel_iommu *iommu = dev_get_drvdata(dev);
4109         return sprintf(buf, "%llx\n", iommu->reg_phys);
4110 }
4111 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4112
4113 static ssize_t intel_iommu_show_cap(struct device *dev,
4114                                     struct device_attribute *attr,
4115                                     char *buf)
4116 {
4117         struct intel_iommu *iommu = dev_get_drvdata(dev);
4118         return sprintf(buf, "%llx\n", iommu->cap);
4119 }
4120 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4121
4122 static ssize_t intel_iommu_show_ecap(struct device *dev,
4123                                     struct device_attribute *attr,
4124                                     char *buf)
4125 {
4126         struct intel_iommu *iommu = dev_get_drvdata(dev);
4127         return sprintf(buf, "%llx\n", iommu->ecap);
4128 }
4129 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4130
4131 static struct attribute *intel_iommu_attrs[] = {
4132         &dev_attr_version.attr,
4133         &dev_attr_address.attr,
4134         &dev_attr_cap.attr,
4135         &dev_attr_ecap.attr,
4136         NULL,
4137 };
4138
4139 static struct attribute_group intel_iommu_group = {
4140         .name = "intel-iommu",
4141         .attrs = intel_iommu_attrs,
4142 };
4143
4144 const struct attribute_group *intel_iommu_groups[] = {
4145         &intel_iommu_group,
4146         NULL,
4147 };
4148
4149 int __init intel_iommu_init(void)
4150 {
4151         int ret = -ENODEV;
4152         struct dmar_drhd_unit *drhd;
4153         struct intel_iommu *iommu;
4154
4155         /* VT-d is required for a TXT/tboot launch, so enforce that */
4156         force_on = tboot_force_iommu();
4157
4158         if (iommu_init_mempool()) {
4159                 if (force_on)
4160                         panic("tboot: Failed to initialize iommu memory\n");
4161                 return -ENOMEM;
4162         }
4163
4164         down_write(&dmar_global_lock);
4165         if (dmar_table_init()) {
4166                 if (force_on)
4167                         panic("tboot: Failed to initialize DMAR table\n");
4168                 goto out_free_dmar;
4169         }
4170
4171         /*
4172          * Disable translation if already enabled prior to OS handover.
4173          */
4174         for_each_active_iommu(iommu, drhd)
4175                 if (iommu->gcmd & DMA_GCMD_TE)
4176                         iommu_disable_translation(iommu);
4177
4178         if (dmar_dev_scope_init() < 0) {
4179                 if (force_on)
4180                         panic("tboot: Failed to initialize DMAR device scope\n");
4181                 goto out_free_dmar;
4182         }
4183
4184         if (no_iommu || dmar_disabled)
4185                 goto out_free_dmar;
4186
4187         if (list_empty(&dmar_rmrr_units))
4188                 printk(KERN_INFO "DMAR: No RMRR found\n");
4189
4190         if (list_empty(&dmar_atsr_units))
4191                 printk(KERN_INFO "DMAR: No ATSR found\n");
4192
4193         if (dmar_init_reserved_ranges()) {
4194                 if (force_on)
4195                         panic("tboot: Failed to reserve iommu ranges\n");
4196                 goto out_free_reserved_range;
4197         }
4198
4199         init_no_remapping_devices();
4200
4201         ret = init_dmars();
4202         if (ret) {
4203                 if (force_on)
4204                         panic("tboot: Failed to initialize DMARs\n");
4205                 printk(KERN_ERR "IOMMU: dmar init failed\n");
4206                 goto out_free_reserved_range;
4207         }
4208         up_write(&dmar_global_lock);
4209         printk(KERN_INFO
4210         "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
4211
4212         init_timer(&unmap_timer);
4213 #ifdef CONFIG_SWIOTLB
4214         swiotlb = 0;
4215 #endif
4216         dma_ops = &intel_dma_ops;
4217
4218         init_iommu_pm_ops();
4219
4220         for_each_active_iommu(iommu, drhd)
4221                 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4222                                                        intel_iommu_groups,
4223                                                        iommu->name);
4224
4225         bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4226         bus_register_notifier(&pci_bus_type, &device_nb);
4227         if (si_domain && !hw_pass_through)
4228                 register_memory_notifier(&intel_iommu_memory_nb);
4229
4230         intel_iommu_enabled = 1;
4231
4232         return 0;
4233
4234 out_free_reserved_range:
4235         put_iova_domain(&reserved_iova_list);
4236 out_free_dmar:
4237         intel_iommu_free_dmars();
4238         up_write(&dmar_global_lock);
4239         iommu_exit_mempool();
4240         return ret;
4241 }
4242
4243 static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4244 {
4245         struct intel_iommu *iommu = opaque;
4246
4247         iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4248         return 0;
4249 }
4250
4251 /*
4252  * NB - intel-iommu lacks any sort of reference counting for the users of
4253  * dependent devices.  If multiple endpoints have intersecting dependent
4254  * devices, unbinding the driver from any one of them will possibly leave
4255  * the others unable to operate.
4256  */
4257 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
4258                                            struct device *dev)
4259 {
4260         if (!iommu || !dev || !dev_is_pci(dev))
4261                 return;
4262
4263         pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
4264 }
4265
4266 static void domain_remove_one_dev_info(struct dmar_domain *domain,
4267                                        struct device *dev)
4268 {
4269         struct device_domain_info *info, *tmp;
4270         struct intel_iommu *iommu;
4271         unsigned long flags;
4272         int found = 0;
4273         u8 bus, devfn;
4274
4275         iommu = device_to_iommu(dev, &bus, &devfn);
4276         if (!iommu)
4277                 return;
4278
4279         spin_lock_irqsave(&device_domain_lock, flags);
4280         list_for_each_entry_safe(info, tmp, &domain->devices, link) {
4281                 if (info->iommu == iommu && info->bus == bus &&
4282                     info->devfn == devfn) {
4283                         unlink_domain_info(info);
4284                         spin_unlock_irqrestore(&device_domain_lock, flags);
4285
4286                         iommu_disable_dev_iotlb(info);
4287                         iommu_detach_dev(iommu, info->bus, info->devfn);
4288                         iommu_detach_dependent_devices(iommu, dev);
4289                         free_devinfo_mem(info);
4290
4291                         spin_lock_irqsave(&device_domain_lock, flags);
4292
4293                         if (found)
4294                                 break;
4295                         else
4296                                 continue;
4297                 }
4298
4299                 /* if there is no other devices under the same iommu
4300                  * owned by this domain, clear this iommu in iommu_bmp
4301                  * update iommu count and coherency
4302                  */
4303                 if (info->iommu == iommu)
4304                         found = 1;
4305         }
4306
4307         spin_unlock_irqrestore(&device_domain_lock, flags);
4308
4309         if (found == 0) {
4310                 domain_detach_iommu(domain, iommu);
4311                 if (!domain_type_is_vm_or_si(domain))
4312                         iommu_detach_domain(domain, iommu);
4313         }
4314 }
4315
4316 static int md_domain_init(struct dmar_domain *domain, int guest_width)
4317 {
4318         int adjust_width;
4319
4320         init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4321                         DMA_32BIT_PFN);
4322         domain_reserve_special_ranges(domain);
4323
4324         /* calculate AGAW */
4325         domain->gaw = guest_width;
4326         adjust_width = guestwidth_to_adjustwidth(guest_width);
4327         domain->agaw = width_to_agaw(adjust_width);
4328
4329         domain->iommu_coherency = 0;
4330         domain->iommu_snooping = 0;
4331         domain->iommu_superpage = 0;
4332         domain->max_addr = 0;
4333
4334         /* always allocate the top pgd */
4335         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4336         if (!domain->pgd)
4337                 return -ENOMEM;
4338         domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4339         return 0;
4340 }
4341
4342 static int intel_iommu_domain_init(struct iommu_domain *domain)
4343 {
4344         struct dmar_domain *dmar_domain;
4345
4346         dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
4347         if (!dmar_domain) {
4348                 printk(KERN_ERR
4349                         "intel_iommu_domain_init: dmar_domain == NULL\n");
4350                 return -ENOMEM;
4351         }
4352         if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4353                 printk(KERN_ERR
4354                         "intel_iommu_domain_init() failed\n");
4355                 domain_exit(dmar_domain);
4356                 return -ENOMEM;
4357         }
4358         domain_update_iommu_cap(dmar_domain);
4359         domain->priv = dmar_domain;
4360
4361         domain->geometry.aperture_start = 0;
4362         domain->geometry.aperture_end   = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4363         domain->geometry.force_aperture = true;
4364
4365         return 0;
4366 }
4367
4368 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
4369 {
4370         struct dmar_domain *dmar_domain = domain->priv;
4371
4372         domain->priv = NULL;
4373         domain_exit(dmar_domain);
4374 }
4375
4376 static int intel_iommu_attach_device(struct iommu_domain *domain,
4377                                      struct device *dev)
4378 {
4379         struct dmar_domain *dmar_domain = domain->priv;
4380         struct intel_iommu *iommu;
4381         int addr_width;
4382         u8 bus, devfn;
4383
4384         if (device_is_rmrr_locked(dev)) {
4385                 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement.  Contact your platform vendor.\n");
4386                 return -EPERM;
4387         }
4388
4389         /* normally dev is not mapped */
4390         if (unlikely(domain_context_mapped(dev))) {
4391                 struct dmar_domain *old_domain;
4392
4393                 old_domain = find_domain(dev);
4394                 if (old_domain) {
4395                         if (domain_type_is_vm_or_si(dmar_domain))
4396                                 domain_remove_one_dev_info(old_domain, dev);
4397                         else
4398                                 domain_remove_dev_info(old_domain);
4399
4400                         if (!domain_type_is_vm_or_si(old_domain) &&
4401                              list_empty(&old_domain->devices))
4402                                 domain_exit(old_domain);
4403                 }
4404         }
4405
4406         iommu = device_to_iommu(dev, &bus, &devfn);
4407         if (!iommu)
4408                 return -ENODEV;
4409
4410         /* check if this iommu agaw is sufficient for max mapped address */
4411         addr_width = agaw_to_width(iommu->agaw);
4412         if (addr_width > cap_mgaw(iommu->cap))
4413                 addr_width = cap_mgaw(iommu->cap);
4414
4415         if (dmar_domain->max_addr > (1LL << addr_width)) {
4416                 printk(KERN_ERR "%s: iommu width (%d) is not "
4417                        "sufficient for the mapped address (%llx)\n",
4418                        __func__, addr_width, dmar_domain->max_addr);
4419                 return -EFAULT;
4420         }
4421         dmar_domain->gaw = addr_width;
4422
4423         /*
4424          * Knock out extra levels of page tables if necessary
4425          */
4426         while (iommu->agaw < dmar_domain->agaw) {
4427                 struct dma_pte *pte;
4428
4429                 pte = dmar_domain->pgd;
4430                 if (dma_pte_present(pte)) {
4431                         dmar_domain->pgd = (struct dma_pte *)
4432                                 phys_to_virt(dma_pte_addr(pte));
4433                         free_pgtable_page(pte);
4434                 }
4435                 dmar_domain->agaw--;
4436         }
4437
4438         return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
4439 }
4440
4441 static void intel_iommu_detach_device(struct iommu_domain *domain,
4442                                       struct device *dev)
4443 {
4444         struct dmar_domain *dmar_domain = domain->priv;
4445
4446         domain_remove_one_dev_info(dmar_domain, dev);
4447 }
4448
4449 static int intel_iommu_map(struct iommu_domain *domain,
4450                            unsigned long iova, phys_addr_t hpa,
4451                            size_t size, int iommu_prot)
4452 {
4453         struct dmar_domain *dmar_domain = domain->priv;
4454         u64 max_addr;
4455         int prot = 0;
4456         int ret;
4457
4458         if (iommu_prot & IOMMU_READ)
4459                 prot |= DMA_PTE_READ;
4460         if (iommu_prot & IOMMU_WRITE)
4461                 prot |= DMA_PTE_WRITE;
4462         if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4463                 prot |= DMA_PTE_SNP;
4464
4465         max_addr = iova + size;
4466         if (dmar_domain->max_addr < max_addr) {
4467                 u64 end;
4468
4469                 /* check if minimum agaw is sufficient for mapped address */
4470                 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
4471                 if (end < max_addr) {
4472                         printk(KERN_ERR "%s: iommu width (%d) is not "
4473                                "sufficient for the mapped address (%llx)\n",
4474                                __func__, dmar_domain->gaw, max_addr);
4475                         return -EFAULT;
4476                 }
4477                 dmar_domain->max_addr = max_addr;
4478         }
4479         /* Round up size to next multiple of PAGE_SIZE, if it and
4480            the low bits of hpa would take us onto the next page */
4481         size = aligned_nrpages(hpa, size);
4482         ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4483                                  hpa >> VTD_PAGE_SHIFT, size, prot);
4484         return ret;
4485 }
4486
4487 static size_t intel_iommu_unmap(struct iommu_domain *domain,
4488                                 unsigned long iova, size_t size)
4489 {
4490         struct dmar_domain *dmar_domain = domain->priv;
4491         struct page *freelist = NULL;
4492         struct intel_iommu *iommu;
4493         unsigned long start_pfn, last_pfn;
4494         unsigned int npages;
4495         int iommu_id, num, ndomains, level = 0;
4496
4497         /* Cope with horrid API which requires us to unmap more than the
4498            size argument if it happens to be a large-page mapping. */
4499         if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
4500                 BUG();
4501
4502         if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
4503                 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
4504
4505         start_pfn = iova >> VTD_PAGE_SHIFT;
4506         last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
4507
4508         freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
4509
4510         npages = last_pfn - start_pfn + 1;
4511
4512         for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
4513                iommu = g_iommus[iommu_id];
4514
4515                /*
4516                 * find bit position of dmar_domain
4517                 */
4518                ndomains = cap_ndoms(iommu->cap);
4519                for_each_set_bit(num, iommu->domain_ids, ndomains) {
4520                        if (iommu->domains[num] == dmar_domain)
4521                                iommu_flush_iotlb_psi(iommu, num, start_pfn,
4522                                                      npages, !freelist, 0);
4523                }
4524
4525         }
4526
4527         dma_free_pagelist(freelist);
4528
4529         if (dmar_domain->max_addr == iova + size)
4530                 dmar_domain->max_addr = iova;
4531
4532         return size;
4533 }
4534
4535 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4536                                             dma_addr_t iova)
4537 {
4538         struct dmar_domain *dmar_domain = domain->priv;
4539         struct dma_pte *pte;
4540         int level = 0;
4541         u64 phys = 0;
4542
4543         pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
4544         if (pte)
4545                 phys = dma_pte_addr(pte);
4546
4547         return phys;
4548 }
4549
4550 static bool intel_iommu_capable(enum iommu_cap cap)
4551 {
4552         if (cap == IOMMU_CAP_CACHE_COHERENCY)
4553                 return domain_update_iommu_snooping(NULL) == 1;
4554         if (cap == IOMMU_CAP_INTR_REMAP)
4555                 return irq_remapping_enabled == 1;
4556
4557         return false;
4558 }
4559
4560 static int intel_iommu_add_device(struct device *dev)
4561 {
4562         struct intel_iommu *iommu;
4563         struct iommu_group *group;
4564         u8 bus, devfn;
4565
4566         iommu = device_to_iommu(dev, &bus, &devfn);
4567         if (!iommu)
4568                 return -ENODEV;
4569
4570         iommu_device_link(iommu->iommu_dev, dev);
4571
4572         group = iommu_group_get_for_dev(dev);
4573
4574         if (IS_ERR(group))
4575                 return PTR_ERR(group);
4576
4577         iommu_group_put(group);
4578         return 0;
4579 }
4580
4581 static void intel_iommu_remove_device(struct device *dev)
4582 {
4583         struct intel_iommu *iommu;
4584         u8 bus, devfn;
4585
4586         iommu = device_to_iommu(dev, &bus, &devfn);
4587         if (!iommu)
4588                 return;
4589
4590         iommu_group_remove_device(dev);
4591
4592         iommu_device_unlink(iommu->iommu_dev, dev);
4593 }
4594
4595 static const struct iommu_ops intel_iommu_ops = {
4596         .capable        = intel_iommu_capable,
4597         .domain_init    = intel_iommu_domain_init,
4598         .domain_destroy = intel_iommu_domain_destroy,
4599         .attach_dev     = intel_iommu_attach_device,
4600         .detach_dev     = intel_iommu_detach_device,
4601         .map            = intel_iommu_map,
4602         .unmap          = intel_iommu_unmap,
4603         .map_sg         = default_iommu_map_sg,
4604         .iova_to_phys   = intel_iommu_iova_to_phys,
4605         .add_device     = intel_iommu_add_device,
4606         .remove_device  = intel_iommu_remove_device,
4607         .pgsize_bitmap  = INTEL_IOMMU_PGSIZES,
4608 };
4609
4610 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4611 {
4612         /* G4x/GM45 integrated gfx dmar support is totally busted. */
4613         printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4614         dmar_map_gfx = 0;
4615 }
4616
4617 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4618 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4619 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4620 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4621 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4622 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4623 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4624
4625 static void quirk_iommu_rwbf(struct pci_dev *dev)
4626 {
4627         /*
4628          * Mobile 4 Series Chipset neglects to set RWBF capability,
4629          * but needs it. Same seems to hold for the desktop versions.
4630          */
4631         printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4632         rwbf_quirk = 1;
4633 }
4634
4635 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4636 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4637 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4638 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4639 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4640 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4641 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
4642
4643 #define GGC 0x52
4644 #define GGC_MEMORY_SIZE_MASK    (0xf << 8)
4645 #define GGC_MEMORY_SIZE_NONE    (0x0 << 8)
4646 #define GGC_MEMORY_SIZE_1M      (0x1 << 8)
4647 #define GGC_MEMORY_SIZE_2M      (0x3 << 8)
4648 #define GGC_MEMORY_VT_ENABLED   (0x8 << 8)
4649 #define GGC_MEMORY_SIZE_2M_VT   (0x9 << 8)
4650 #define GGC_MEMORY_SIZE_3M_VT   (0xa << 8)
4651 #define GGC_MEMORY_SIZE_4M_VT   (0xb << 8)
4652
4653 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4654 {
4655         unsigned short ggc;
4656
4657         if (pci_read_config_word(dev, GGC, &ggc))
4658                 return;
4659
4660         if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
4661                 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4662                 dmar_map_gfx = 0;
4663         } else if (dmar_map_gfx) {
4664                 /* we have to ensure the gfx device is idle before we flush */
4665                 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4666                 intel_iommu_strict = 1;
4667        }
4668 }
4669 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4670 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4671 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4672 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4673
4674 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4675    ISOCH DMAR unit for the Azalia sound device, but not give it any
4676    TLB entries, which causes it to deadlock. Check for that.  We do
4677    this in a function called from init_dmars(), instead of in a PCI
4678    quirk, because we don't want to print the obnoxious "BIOS broken"
4679    message if VT-d is actually disabled.
4680 */
4681 static void __init check_tylersburg_isoch(void)
4682 {
4683         struct pci_dev *pdev;
4684         uint32_t vtisochctrl;
4685
4686         /* If there's no Azalia in the system anyway, forget it. */
4687         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4688         if (!pdev)
4689                 return;
4690         pci_dev_put(pdev);
4691
4692         /* System Management Registers. Might be hidden, in which case
4693            we can't do the sanity check. But that's OK, because the
4694            known-broken BIOSes _don't_ actually hide it, so far. */
4695         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4696         if (!pdev)
4697                 return;
4698
4699         if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4700                 pci_dev_put(pdev);
4701                 return;
4702         }
4703
4704         pci_dev_put(pdev);
4705
4706         /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4707         if (vtisochctrl & 1)
4708                 return;
4709
4710         /* Drop all bits other than the number of TLB entries */
4711         vtisochctrl &= 0x1c;
4712
4713         /* If we have the recommended number of TLB entries (16), fine. */
4714         if (vtisochctrl == 0x10)
4715                 return;
4716
4717         /* Zero TLB entries? You get to ride the short bus to school. */
4718         if (!vtisochctrl) {
4719                 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4720                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4721                      dmi_get_system_info(DMI_BIOS_VENDOR),
4722                      dmi_get_system_info(DMI_BIOS_VERSION),
4723                      dmi_get_system_info(DMI_PRODUCT_VERSION));
4724                 iommu_identity_mapping |= IDENTMAP_AZALIA;
4725                 return;
4726         }
4727         
4728         printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4729                vtisochctrl);
4730 }