Merge remote-tracking branch 'asoc/fix/intel' into asoc-linus
[sfrench/cifs-2.6.git] / drivers / iommu / dmar.c
1 /*
2  * Copyright (c) 2006, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Copyright (C) 2006-2008 Intel Corporation
18  * Author: Ashok Raj <ashok.raj@intel.com>
19  * Author: Shaohua Li <shaohua.li@intel.com>
20  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21  *
22  * This file implements early detection/parsing of Remapping Devices
23  * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24  * tables.
25  *
26  * These routines are used by both DMA-remapping and Interrupt-remapping
27  */
28
29 #define pr_fmt(fmt)     "DMAR: " fmt
30
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <linux/iommu.h>
42 #include <asm/irq_remapping.h>
43 #include <asm/iommu_table.h>
44
45 #include "irq_remapping.h"
46
47 typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
48 struct dmar_res_callback {
49         dmar_res_handler_t      cb[ACPI_DMAR_TYPE_RESERVED];
50         void                    *arg[ACPI_DMAR_TYPE_RESERVED];
51         bool                    ignore_unhandled;
52         bool                    print_entry;
53 };
54
55 /*
56  * Assumptions:
57  * 1) The hotplug framework guarentees that DMAR unit will be hot-added
58  *    before IO devices managed by that unit.
59  * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
60  *    after IO devices managed by that unit.
61  * 3) Hotplug events are rare.
62  *
63  * Locking rules for DMA and interrupt remapping related global data structures:
64  * 1) Use dmar_global_lock in process context
65  * 2) Use RCU in interrupt context
66  */
67 DECLARE_RWSEM(dmar_global_lock);
68 LIST_HEAD(dmar_drhd_units);
69
70 struct acpi_table_header * __initdata dmar_tbl;
71 static int dmar_dev_scope_status = 1;
72 static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
73
74 static int alloc_iommu(struct dmar_drhd_unit *drhd);
75 static void free_iommu(struct intel_iommu *iommu);
76
77 extern const struct iommu_ops intel_iommu_ops;
78
79 static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
80 {
81         /*
82          * add INCLUDE_ALL at the tail, so scan the list will find it at
83          * the very end.
84          */
85         if (drhd->include_all)
86                 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
87         else
88                 list_add_rcu(&drhd->list, &dmar_drhd_units);
89 }
90
91 void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
92 {
93         struct acpi_dmar_device_scope *scope;
94
95         *cnt = 0;
96         while (start < end) {
97                 scope = start;
98                 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
99                     scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
100                     scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
101                         (*cnt)++;
102                 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
103                         scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
104                         pr_warn("Unsupported device scope\n");
105                 }
106                 start += scope->length;
107         }
108         if (*cnt == 0)
109                 return NULL;
110
111         return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
112 }
113
114 void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
115 {
116         int i;
117         struct device *tmp_dev;
118
119         if (*devices && *cnt) {
120                 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
121                         put_device(tmp_dev);
122                 kfree(*devices);
123         }
124
125         *devices = NULL;
126         *cnt = 0;
127 }
128
129 /* Optimize out kzalloc()/kfree() for normal cases */
130 static char dmar_pci_notify_info_buf[64];
131
132 static struct dmar_pci_notify_info *
133 dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
134 {
135         int level = 0;
136         size_t size;
137         struct pci_dev *tmp;
138         struct dmar_pci_notify_info *info;
139
140         BUG_ON(dev->is_virtfn);
141
142         /* Only generate path[] for device addition event */
143         if (event == BUS_NOTIFY_ADD_DEVICE)
144                 for (tmp = dev; tmp; tmp = tmp->bus->self)
145                         level++;
146
147         size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
148         if (size <= sizeof(dmar_pci_notify_info_buf)) {
149                 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
150         } else {
151                 info = kzalloc(size, GFP_KERNEL);
152                 if (!info) {
153                         pr_warn("Out of memory when allocating notify_info "
154                                 "for %s.\n", pci_name(dev));
155                         if (dmar_dev_scope_status == 0)
156                                 dmar_dev_scope_status = -ENOMEM;
157                         return NULL;
158                 }
159         }
160
161         info->event = event;
162         info->dev = dev;
163         info->seg = pci_domain_nr(dev->bus);
164         info->level = level;
165         if (event == BUS_NOTIFY_ADD_DEVICE) {
166                 for (tmp = dev; tmp; tmp = tmp->bus->self) {
167                         level--;
168                         info->path[level].bus = tmp->bus->number;
169                         info->path[level].device = PCI_SLOT(tmp->devfn);
170                         info->path[level].function = PCI_FUNC(tmp->devfn);
171                         if (pci_is_root_bus(tmp->bus))
172                                 info->bus = tmp->bus->number;
173                 }
174         }
175
176         return info;
177 }
178
179 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
180 {
181         if ((void *)info != dmar_pci_notify_info_buf)
182                 kfree(info);
183 }
184
185 static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
186                                 struct acpi_dmar_pci_path *path, int count)
187 {
188         int i;
189
190         if (info->bus != bus)
191                 goto fallback;
192         if (info->level != count)
193                 goto fallback;
194
195         for (i = 0; i < count; i++) {
196                 if (path[i].device != info->path[i].device ||
197                     path[i].function != info->path[i].function)
198                         goto fallback;
199         }
200
201         return true;
202
203 fallback:
204
205         if (count != 1)
206                 return false;
207
208         i = info->level - 1;
209         if (bus              == info->path[i].bus &&
210             path[0].device   == info->path[i].device &&
211             path[0].function == info->path[i].function) {
212                 pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
213                         bus, path[0].device, path[0].function);
214                 return true;
215         }
216
217         return false;
218 }
219
220 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
221 int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
222                           void *start, void*end, u16 segment,
223                           struct dmar_dev_scope *devices,
224                           int devices_cnt)
225 {
226         int i, level;
227         struct device *tmp, *dev = &info->dev->dev;
228         struct acpi_dmar_device_scope *scope;
229         struct acpi_dmar_pci_path *path;
230
231         if (segment != info->seg)
232                 return 0;
233
234         for (; start < end; start += scope->length) {
235                 scope = start;
236                 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
237                     scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
238                         continue;
239
240                 path = (struct acpi_dmar_pci_path *)(scope + 1);
241                 level = (scope->length - sizeof(*scope)) / sizeof(*path);
242                 if (!dmar_match_pci_path(info, scope->bus, path, level))
243                         continue;
244
245                 /*
246                  * We expect devices with endpoint scope to have normal PCI
247                  * headers, and devices with bridge scope to have bridge PCI
248                  * headers.  However PCI NTB devices may be listed in the
249                  * DMAR table with bridge scope, even though they have a
250                  * normal PCI header.  NTB devices are identified by class
251                  * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
252                  * for this special case.
253                  */
254                 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
255                      info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
256                     (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
257                      (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
258                       info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) {
259                         pr_warn("Device scope type does not match for %s\n",
260                                 pci_name(info->dev));
261                         return -EINVAL;
262                 }
263
264                 for_each_dev_scope(devices, devices_cnt, i, tmp)
265                         if (tmp == NULL) {
266                                 devices[i].bus = info->dev->bus->number;
267                                 devices[i].devfn = info->dev->devfn;
268                                 rcu_assign_pointer(devices[i].dev,
269                                                    get_device(dev));
270                                 return 1;
271                         }
272                 BUG_ON(i >= devices_cnt);
273         }
274
275         return 0;
276 }
277
278 int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
279                           struct dmar_dev_scope *devices, int count)
280 {
281         int index;
282         struct device *tmp;
283
284         if (info->seg != segment)
285                 return 0;
286
287         for_each_active_dev_scope(devices, count, index, tmp)
288                 if (tmp == &info->dev->dev) {
289                         RCU_INIT_POINTER(devices[index].dev, NULL);
290                         synchronize_rcu();
291                         put_device(tmp);
292                         return 1;
293                 }
294
295         return 0;
296 }
297
298 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
299 {
300         int ret = 0;
301         struct dmar_drhd_unit *dmaru;
302         struct acpi_dmar_hardware_unit *drhd;
303
304         for_each_drhd_unit(dmaru) {
305                 if (dmaru->include_all)
306                         continue;
307
308                 drhd = container_of(dmaru->hdr,
309                                     struct acpi_dmar_hardware_unit, header);
310                 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
311                                 ((void *)drhd) + drhd->header.length,
312                                 dmaru->segment,
313                                 dmaru->devices, dmaru->devices_cnt);
314                 if (ret)
315                         break;
316         }
317         if (ret >= 0)
318                 ret = dmar_iommu_notify_scope_dev(info);
319         if (ret < 0 && dmar_dev_scope_status == 0)
320                 dmar_dev_scope_status = ret;
321
322         return ret;
323 }
324
325 static void  dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
326 {
327         struct dmar_drhd_unit *dmaru;
328
329         for_each_drhd_unit(dmaru)
330                 if (dmar_remove_dev_scope(info, dmaru->segment,
331                         dmaru->devices, dmaru->devices_cnt))
332                         break;
333         dmar_iommu_notify_scope_dev(info);
334 }
335
336 static int dmar_pci_bus_notifier(struct notifier_block *nb,
337                                  unsigned long action, void *data)
338 {
339         struct pci_dev *pdev = to_pci_dev(data);
340         struct dmar_pci_notify_info *info;
341
342         /* Only care about add/remove events for physical functions.
343          * For VFs we actually do the lookup based on the corresponding
344          * PF in device_to_iommu() anyway. */
345         if (pdev->is_virtfn)
346                 return NOTIFY_DONE;
347         if (action != BUS_NOTIFY_ADD_DEVICE &&
348             action != BUS_NOTIFY_REMOVED_DEVICE)
349                 return NOTIFY_DONE;
350
351         info = dmar_alloc_pci_notify_info(pdev, action);
352         if (!info)
353                 return NOTIFY_DONE;
354
355         down_write(&dmar_global_lock);
356         if (action == BUS_NOTIFY_ADD_DEVICE)
357                 dmar_pci_bus_add_dev(info);
358         else if (action == BUS_NOTIFY_REMOVED_DEVICE)
359                 dmar_pci_bus_del_dev(info);
360         up_write(&dmar_global_lock);
361
362         dmar_free_pci_notify_info(info);
363
364         return NOTIFY_OK;
365 }
366
367 static struct notifier_block dmar_pci_bus_nb = {
368         .notifier_call = dmar_pci_bus_notifier,
369         .priority = INT_MIN,
370 };
371
372 static struct dmar_drhd_unit *
373 dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
374 {
375         struct dmar_drhd_unit *dmaru;
376
377         list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
378                 if (dmaru->segment == drhd->segment &&
379                     dmaru->reg_base_addr == drhd->address)
380                         return dmaru;
381
382         return NULL;
383 }
384
385 /**
386  * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
387  * structure which uniquely represent one DMA remapping hardware unit
388  * present in the platform
389  */
390 static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
391 {
392         struct acpi_dmar_hardware_unit *drhd;
393         struct dmar_drhd_unit *dmaru;
394         int ret;
395
396         drhd = (struct acpi_dmar_hardware_unit *)header;
397         dmaru = dmar_find_dmaru(drhd);
398         if (dmaru)
399                 goto out;
400
401         dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
402         if (!dmaru)
403                 return -ENOMEM;
404
405         /*
406          * If header is allocated from slab by ACPI _DSM method, we need to
407          * copy the content because the memory buffer will be freed on return.
408          */
409         dmaru->hdr = (void *)(dmaru + 1);
410         memcpy(dmaru->hdr, header, header->length);
411         dmaru->reg_base_addr = drhd->address;
412         dmaru->segment = drhd->segment;
413         dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
414         dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
415                                               ((void *)drhd) + drhd->header.length,
416                                               &dmaru->devices_cnt);
417         if (dmaru->devices_cnt && dmaru->devices == NULL) {
418                 kfree(dmaru);
419                 return -ENOMEM;
420         }
421
422         ret = alloc_iommu(dmaru);
423         if (ret) {
424                 dmar_free_dev_scope(&dmaru->devices,
425                                     &dmaru->devices_cnt);
426                 kfree(dmaru);
427                 return ret;
428         }
429         dmar_register_drhd_unit(dmaru);
430
431 out:
432         if (arg)
433                 (*(int *)arg)++;
434
435         return 0;
436 }
437
438 static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
439 {
440         if (dmaru->devices && dmaru->devices_cnt)
441                 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
442         if (dmaru->iommu)
443                 free_iommu(dmaru->iommu);
444         kfree(dmaru);
445 }
446
447 static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
448                                       void *arg)
449 {
450         struct acpi_dmar_andd *andd = (void *)header;
451
452         /* Check for NUL termination within the designated length */
453         if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
454                 WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
455                            "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
456                            "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
457                            dmi_get_system_info(DMI_BIOS_VENDOR),
458                            dmi_get_system_info(DMI_BIOS_VERSION),
459                            dmi_get_system_info(DMI_PRODUCT_VERSION));
460                 return -EINVAL;
461         }
462         pr_info("ANDD device: %x name: %s\n", andd->device_number,
463                 andd->device_name);
464
465         return 0;
466 }
467
468 #ifdef CONFIG_ACPI_NUMA
469 static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
470 {
471         struct acpi_dmar_rhsa *rhsa;
472         struct dmar_drhd_unit *drhd;
473
474         rhsa = (struct acpi_dmar_rhsa *)header;
475         for_each_drhd_unit(drhd) {
476                 if (drhd->reg_base_addr == rhsa->base_address) {
477                         int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
478
479                         if (!node_online(node))
480                                 node = -1;
481                         drhd->iommu->node = node;
482                         return 0;
483                 }
484         }
485         WARN_TAINT(
486                 1, TAINT_FIRMWARE_WORKAROUND,
487                 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
488                 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
489                 drhd->reg_base_addr,
490                 dmi_get_system_info(DMI_BIOS_VENDOR),
491                 dmi_get_system_info(DMI_BIOS_VERSION),
492                 dmi_get_system_info(DMI_PRODUCT_VERSION));
493
494         return 0;
495 }
496 #else
497 #define dmar_parse_one_rhsa             dmar_res_noop
498 #endif
499
500 static void
501 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
502 {
503         struct acpi_dmar_hardware_unit *drhd;
504         struct acpi_dmar_reserved_memory *rmrr;
505         struct acpi_dmar_atsr *atsr;
506         struct acpi_dmar_rhsa *rhsa;
507
508         switch (header->type) {
509         case ACPI_DMAR_TYPE_HARDWARE_UNIT:
510                 drhd = container_of(header, struct acpi_dmar_hardware_unit,
511                                     header);
512                 pr_info("DRHD base: %#016Lx flags: %#x\n",
513                         (unsigned long long)drhd->address, drhd->flags);
514                 break;
515         case ACPI_DMAR_TYPE_RESERVED_MEMORY:
516                 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
517                                     header);
518                 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
519                         (unsigned long long)rmrr->base_address,
520                         (unsigned long long)rmrr->end_address);
521                 break;
522         case ACPI_DMAR_TYPE_ROOT_ATS:
523                 atsr = container_of(header, struct acpi_dmar_atsr, header);
524                 pr_info("ATSR flags: %#x\n", atsr->flags);
525                 break;
526         case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
527                 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
528                 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
529                        (unsigned long long)rhsa->base_address,
530                        rhsa->proximity_domain);
531                 break;
532         case ACPI_DMAR_TYPE_NAMESPACE:
533                 /* We don't print this here because we need to sanity-check
534                    it first. So print it in dmar_parse_one_andd() instead. */
535                 break;
536         }
537 }
538
539 /**
540  * dmar_table_detect - checks to see if the platform supports DMAR devices
541  */
542 static int __init dmar_table_detect(void)
543 {
544         acpi_status status = AE_OK;
545
546         /* if we could find DMAR table, then there are DMAR devices */
547         status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
548
549         if (ACPI_SUCCESS(status) && !dmar_tbl) {
550                 pr_warn("Unable to map DMAR\n");
551                 status = AE_NOT_FOUND;
552         }
553
554         return ACPI_SUCCESS(status) ? 0 : -ENOENT;
555 }
556
557 static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
558                                        size_t len, struct dmar_res_callback *cb)
559 {
560         struct acpi_dmar_header *iter, *next;
561         struct acpi_dmar_header *end = ((void *)start) + len;
562
563         for (iter = start; iter < end; iter = next) {
564                 next = (void *)iter + iter->length;
565                 if (iter->length == 0) {
566                         /* Avoid looping forever on bad ACPI tables */
567                         pr_debug(FW_BUG "Invalid 0-length structure\n");
568                         break;
569                 } else if (next > end) {
570                         /* Avoid passing table end */
571                         pr_warn(FW_BUG "Record passes table end\n");
572                         return -EINVAL;
573                 }
574
575                 if (cb->print_entry)
576                         dmar_table_print_dmar_entry(iter);
577
578                 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
579                         /* continue for forward compatibility */
580                         pr_debug("Unknown DMAR structure type %d\n",
581                                  iter->type);
582                 } else if (cb->cb[iter->type]) {
583                         int ret;
584
585                         ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
586                         if (ret)
587                                 return ret;
588                 } else if (!cb->ignore_unhandled) {
589                         pr_warn("No handler for DMAR structure type %d\n",
590                                 iter->type);
591                         return -EINVAL;
592                 }
593         }
594
595         return 0;
596 }
597
598 static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
599                                        struct dmar_res_callback *cb)
600 {
601         return dmar_walk_remapping_entries((void *)(dmar + 1),
602                         dmar->header.length - sizeof(*dmar), cb);
603 }
604
605 /**
606  * parse_dmar_table - parses the DMA reporting table
607  */
608 static int __init
609 parse_dmar_table(void)
610 {
611         struct acpi_table_dmar *dmar;
612         int drhd_count = 0;
613         int ret;
614         struct dmar_res_callback cb = {
615                 .print_entry = true,
616                 .ignore_unhandled = true,
617                 .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
618                 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
619                 .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
620                 .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
621                 .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
622                 .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
623         };
624
625         /*
626          * Do it again, earlier dmar_tbl mapping could be mapped with
627          * fixed map.
628          */
629         dmar_table_detect();
630
631         /*
632          * ACPI tables may not be DMA protected by tboot, so use DMAR copy
633          * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
634          */
635         dmar_tbl = tboot_get_dmar_table(dmar_tbl);
636
637         dmar = (struct acpi_table_dmar *)dmar_tbl;
638         if (!dmar)
639                 return -ENODEV;
640
641         if (dmar->width < PAGE_SHIFT - 1) {
642                 pr_warn("Invalid DMAR haw\n");
643                 return -EINVAL;
644         }
645
646         pr_info("Host address width %d\n", dmar->width + 1);
647         ret = dmar_walk_dmar_table(dmar, &cb);
648         if (ret == 0 && drhd_count == 0)
649                 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
650
651         return ret;
652 }
653
654 static int dmar_pci_device_match(struct dmar_dev_scope devices[],
655                                  int cnt, struct pci_dev *dev)
656 {
657         int index;
658         struct device *tmp;
659
660         while (dev) {
661                 for_each_active_dev_scope(devices, cnt, index, tmp)
662                         if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
663                                 return 1;
664
665                 /* Check our parent */
666                 dev = dev->bus->self;
667         }
668
669         return 0;
670 }
671
672 struct dmar_drhd_unit *
673 dmar_find_matched_drhd_unit(struct pci_dev *dev)
674 {
675         struct dmar_drhd_unit *dmaru;
676         struct acpi_dmar_hardware_unit *drhd;
677
678         dev = pci_physfn(dev);
679
680         rcu_read_lock();
681         for_each_drhd_unit(dmaru) {
682                 drhd = container_of(dmaru->hdr,
683                                     struct acpi_dmar_hardware_unit,
684                                     header);
685
686                 if (dmaru->include_all &&
687                     drhd->segment == pci_domain_nr(dev->bus))
688                         goto out;
689
690                 if (dmar_pci_device_match(dmaru->devices,
691                                           dmaru->devices_cnt, dev))
692                         goto out;
693         }
694         dmaru = NULL;
695 out:
696         rcu_read_unlock();
697
698         return dmaru;
699 }
700
701 static void __init dmar_acpi_insert_dev_scope(u8 device_number,
702                                               struct acpi_device *adev)
703 {
704         struct dmar_drhd_unit *dmaru;
705         struct acpi_dmar_hardware_unit *drhd;
706         struct acpi_dmar_device_scope *scope;
707         struct device *tmp;
708         int i;
709         struct acpi_dmar_pci_path *path;
710
711         for_each_drhd_unit(dmaru) {
712                 drhd = container_of(dmaru->hdr,
713                                     struct acpi_dmar_hardware_unit,
714                                     header);
715
716                 for (scope = (void *)(drhd + 1);
717                      (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
718                      scope = ((void *)scope) + scope->length) {
719                         if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
720                                 continue;
721                         if (scope->enumeration_id != device_number)
722                                 continue;
723
724                         path = (void *)(scope + 1);
725                         pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
726                                 dev_name(&adev->dev), dmaru->reg_base_addr,
727                                 scope->bus, path->device, path->function);
728                         for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
729                                 if (tmp == NULL) {
730                                         dmaru->devices[i].bus = scope->bus;
731                                         dmaru->devices[i].devfn = PCI_DEVFN(path->device,
732                                                                             path->function);
733                                         rcu_assign_pointer(dmaru->devices[i].dev,
734                                                            get_device(&adev->dev));
735                                         return;
736                                 }
737                         BUG_ON(i >= dmaru->devices_cnt);
738                 }
739         }
740         pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
741                 device_number, dev_name(&adev->dev));
742 }
743
744 static int __init dmar_acpi_dev_scope_init(void)
745 {
746         struct acpi_dmar_andd *andd;
747
748         if (dmar_tbl == NULL)
749                 return -ENODEV;
750
751         for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
752              ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
753              andd = ((void *)andd) + andd->header.length) {
754                 if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
755                         acpi_handle h;
756                         struct acpi_device *adev;
757
758                         if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
759                                                           andd->device_name,
760                                                           &h))) {
761                                 pr_err("Failed to find handle for ACPI object %s\n",
762                                        andd->device_name);
763                                 continue;
764                         }
765                         if (acpi_bus_get_device(h, &adev)) {
766                                 pr_err("Failed to get device for ACPI object %s\n",
767                                        andd->device_name);
768                                 continue;
769                         }
770                         dmar_acpi_insert_dev_scope(andd->device_number, adev);
771                 }
772         }
773         return 0;
774 }
775
776 int __init dmar_dev_scope_init(void)
777 {
778         struct pci_dev *dev = NULL;
779         struct dmar_pci_notify_info *info;
780
781         if (dmar_dev_scope_status != 1)
782                 return dmar_dev_scope_status;
783
784         if (list_empty(&dmar_drhd_units)) {
785                 dmar_dev_scope_status = -ENODEV;
786         } else {
787                 dmar_dev_scope_status = 0;
788
789                 dmar_acpi_dev_scope_init();
790
791                 for_each_pci_dev(dev) {
792                         if (dev->is_virtfn)
793                                 continue;
794
795                         info = dmar_alloc_pci_notify_info(dev,
796                                         BUS_NOTIFY_ADD_DEVICE);
797                         if (!info) {
798                                 return dmar_dev_scope_status;
799                         } else {
800                                 dmar_pci_bus_add_dev(info);
801                                 dmar_free_pci_notify_info(info);
802                         }
803                 }
804
805                 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
806         }
807
808         return dmar_dev_scope_status;
809 }
810
811
812 int __init dmar_table_init(void)
813 {
814         static int dmar_table_initialized;
815         int ret;
816
817         if (dmar_table_initialized == 0) {
818                 ret = parse_dmar_table();
819                 if (ret < 0) {
820                         if (ret != -ENODEV)
821                                 pr_info("Parse DMAR table failure.\n");
822                 } else  if (list_empty(&dmar_drhd_units)) {
823                         pr_info("No DMAR devices found\n");
824                         ret = -ENODEV;
825                 }
826
827                 if (ret < 0)
828                         dmar_table_initialized = ret;
829                 else
830                         dmar_table_initialized = 1;
831         }
832
833         return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
834 }
835
836 static void warn_invalid_dmar(u64 addr, const char *message)
837 {
838         WARN_TAINT_ONCE(
839                 1, TAINT_FIRMWARE_WORKAROUND,
840                 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
841                 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
842                 addr, message,
843                 dmi_get_system_info(DMI_BIOS_VENDOR),
844                 dmi_get_system_info(DMI_BIOS_VERSION),
845                 dmi_get_system_info(DMI_PRODUCT_VERSION));
846 }
847
848 static int __ref
849 dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
850 {
851         struct acpi_dmar_hardware_unit *drhd;
852         void __iomem *addr;
853         u64 cap, ecap;
854
855         drhd = (void *)entry;
856         if (!drhd->address) {
857                 warn_invalid_dmar(0, "");
858                 return -EINVAL;
859         }
860
861         if (arg)
862                 addr = ioremap(drhd->address, VTD_PAGE_SIZE);
863         else
864                 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
865         if (!addr) {
866                 pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
867                 return -EINVAL;
868         }
869
870         cap = dmar_readq(addr + DMAR_CAP_REG);
871         ecap = dmar_readq(addr + DMAR_ECAP_REG);
872
873         if (arg)
874                 iounmap(addr);
875         else
876                 early_iounmap(addr, VTD_PAGE_SIZE);
877
878         if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
879                 warn_invalid_dmar(drhd->address, " returns all ones");
880                 return -EINVAL;
881         }
882
883         return 0;
884 }
885
886 int __init detect_intel_iommu(void)
887 {
888         int ret;
889         struct dmar_res_callback validate_drhd_cb = {
890                 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
891                 .ignore_unhandled = true,
892         };
893
894         down_write(&dmar_global_lock);
895         ret = dmar_table_detect();
896         if (!ret)
897                 ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
898                                            &validate_drhd_cb);
899         if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) {
900                 iommu_detected = 1;
901                 /* Make sure ACS will be enabled */
902                 pci_request_acs();
903         }
904
905 #ifdef CONFIG_X86
906         if (!ret)
907                 x86_init.iommu.iommu_init = intel_iommu_init;
908 #endif
909
910         if (dmar_tbl) {
911                 acpi_put_table(dmar_tbl);
912                 dmar_tbl = NULL;
913         }
914         up_write(&dmar_global_lock);
915
916         return ret ? ret : 1;
917 }
918
919 static void unmap_iommu(struct intel_iommu *iommu)
920 {
921         iounmap(iommu->reg);
922         release_mem_region(iommu->reg_phys, iommu->reg_size);
923 }
924
925 /**
926  * map_iommu: map the iommu's registers
927  * @iommu: the iommu to map
928  * @phys_addr: the physical address of the base resgister
929  *
930  * Memory map the iommu's registers.  Start w/ a single page, and
931  * possibly expand if that turns out to be insufficent.
932  */
933 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
934 {
935         int map_size, err=0;
936
937         iommu->reg_phys = phys_addr;
938         iommu->reg_size = VTD_PAGE_SIZE;
939
940         if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
941                 pr_err("Can't reserve memory\n");
942                 err = -EBUSY;
943                 goto out;
944         }
945
946         iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
947         if (!iommu->reg) {
948                 pr_err("Can't map the region\n");
949                 err = -ENOMEM;
950                 goto release;
951         }
952
953         iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
954         iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
955
956         if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
957                 err = -EINVAL;
958                 warn_invalid_dmar(phys_addr, " returns all ones");
959                 goto unmap;
960         }
961
962         /* the registers might be more than one page */
963         map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
964                          cap_max_fault_reg_offset(iommu->cap));
965         map_size = VTD_PAGE_ALIGN(map_size);
966         if (map_size > iommu->reg_size) {
967                 iounmap(iommu->reg);
968                 release_mem_region(iommu->reg_phys, iommu->reg_size);
969                 iommu->reg_size = map_size;
970                 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
971                                         iommu->name)) {
972                         pr_err("Can't reserve memory\n");
973                         err = -EBUSY;
974                         goto out;
975                 }
976                 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
977                 if (!iommu->reg) {
978                         pr_err("Can't map the region\n");
979                         err = -ENOMEM;
980                         goto release;
981                 }
982         }
983         err = 0;
984         goto out;
985
986 unmap:
987         iounmap(iommu->reg);
988 release:
989         release_mem_region(iommu->reg_phys, iommu->reg_size);
990 out:
991         return err;
992 }
993
994 static int dmar_alloc_seq_id(struct intel_iommu *iommu)
995 {
996         iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
997                                             DMAR_UNITS_SUPPORTED);
998         if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
999                 iommu->seq_id = -1;
1000         } else {
1001                 set_bit(iommu->seq_id, dmar_seq_ids);
1002                 sprintf(iommu->name, "dmar%d", iommu->seq_id);
1003         }
1004
1005         return iommu->seq_id;
1006 }
1007
1008 static void dmar_free_seq_id(struct intel_iommu *iommu)
1009 {
1010         if (iommu->seq_id >= 0) {
1011                 clear_bit(iommu->seq_id, dmar_seq_ids);
1012                 iommu->seq_id = -1;
1013         }
1014 }
1015
1016 static int alloc_iommu(struct dmar_drhd_unit *drhd)
1017 {
1018         struct intel_iommu *iommu;
1019         u32 ver, sts;
1020         int agaw = 0;
1021         int msagaw = 0;
1022         int err;
1023
1024         if (!drhd->reg_base_addr) {
1025                 warn_invalid_dmar(0, "");
1026                 return -EINVAL;
1027         }
1028
1029         iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1030         if (!iommu)
1031                 return -ENOMEM;
1032
1033         if (dmar_alloc_seq_id(iommu) < 0) {
1034                 pr_err("Failed to allocate seq_id\n");
1035                 err = -ENOSPC;
1036                 goto error;
1037         }
1038
1039         err = map_iommu(iommu, drhd->reg_base_addr);
1040         if (err) {
1041                 pr_err("Failed to map %s\n", iommu->name);
1042                 goto error_free_seq_id;
1043         }
1044
1045         err = -EINVAL;
1046         agaw = iommu_calculate_agaw(iommu);
1047         if (agaw < 0) {
1048                 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1049                         iommu->seq_id);
1050                 goto err_unmap;
1051         }
1052         msagaw = iommu_calculate_max_sagaw(iommu);
1053         if (msagaw < 0) {
1054                 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1055                         iommu->seq_id);
1056                 goto err_unmap;
1057         }
1058         iommu->agaw = agaw;
1059         iommu->msagaw = msagaw;
1060         iommu->segment = drhd->segment;
1061
1062         iommu->node = -1;
1063
1064         ver = readl(iommu->reg + DMAR_VER_REG);
1065         pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1066                 iommu->name,
1067                 (unsigned long long)drhd->reg_base_addr,
1068                 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1069                 (unsigned long long)iommu->cap,
1070                 (unsigned long long)iommu->ecap);
1071
1072         /* Reflect status in gcmd */
1073         sts = readl(iommu->reg + DMAR_GSTS_REG);
1074         if (sts & DMA_GSTS_IRES)
1075                 iommu->gcmd |= DMA_GCMD_IRE;
1076         if (sts & DMA_GSTS_TES)
1077                 iommu->gcmd |= DMA_GCMD_TE;
1078         if (sts & DMA_GSTS_QIES)
1079                 iommu->gcmd |= DMA_GCMD_QIE;
1080
1081         raw_spin_lock_init(&iommu->register_lock);
1082
1083         if (intel_iommu_enabled) {
1084                 err = iommu_device_sysfs_add(&iommu->iommu, NULL,
1085                                              intel_iommu_groups,
1086                                              "%s", iommu->name);
1087                 if (err)
1088                         goto err_unmap;
1089
1090                 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
1091
1092                 err = iommu_device_register(&iommu->iommu);
1093                 if (err)
1094                         goto err_unmap;
1095         }
1096
1097         drhd->iommu = iommu;
1098
1099         return 0;
1100
1101 err_unmap:
1102         unmap_iommu(iommu);
1103 error_free_seq_id:
1104         dmar_free_seq_id(iommu);
1105 error:
1106         kfree(iommu);
1107         return err;
1108 }
1109
1110 static void free_iommu(struct intel_iommu *iommu)
1111 {
1112         if (intel_iommu_enabled) {
1113                 iommu_device_unregister(&iommu->iommu);
1114                 iommu_device_sysfs_remove(&iommu->iommu);
1115         }
1116
1117         if (iommu->irq) {
1118                 if (iommu->pr_irq) {
1119                         free_irq(iommu->pr_irq, iommu);
1120                         dmar_free_hwirq(iommu->pr_irq);
1121                         iommu->pr_irq = 0;
1122                 }
1123                 free_irq(iommu->irq, iommu);
1124                 dmar_free_hwirq(iommu->irq);
1125                 iommu->irq = 0;
1126         }
1127
1128         if (iommu->qi) {
1129                 free_page((unsigned long)iommu->qi->desc);
1130                 kfree(iommu->qi->desc_status);
1131                 kfree(iommu->qi);
1132         }
1133
1134         if (iommu->reg)
1135                 unmap_iommu(iommu);
1136
1137         dmar_free_seq_id(iommu);
1138         kfree(iommu);
1139 }
1140
1141 /*
1142  * Reclaim all the submitted descriptors which have completed its work.
1143  */
1144 static inline void reclaim_free_desc(struct q_inval *qi)
1145 {
1146         while (qi->desc_status[qi->free_tail] == QI_DONE ||
1147                qi->desc_status[qi->free_tail] == QI_ABORT) {
1148                 qi->desc_status[qi->free_tail] = QI_FREE;
1149                 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1150                 qi->free_cnt++;
1151         }
1152 }
1153
1154 static int qi_check_fault(struct intel_iommu *iommu, int index)
1155 {
1156         u32 fault;
1157         int head, tail;
1158         struct q_inval *qi = iommu->qi;
1159         int wait_index = (index + 1) % QI_LENGTH;
1160
1161         if (qi->desc_status[wait_index] == QI_ABORT)
1162                 return -EAGAIN;
1163
1164         fault = readl(iommu->reg + DMAR_FSTS_REG);
1165
1166         /*
1167          * If IQE happens, the head points to the descriptor associated
1168          * with the error. No new descriptors are fetched until the IQE
1169          * is cleared.
1170          */
1171         if (fault & DMA_FSTS_IQE) {
1172                 head = readl(iommu->reg + DMAR_IQH_REG);
1173                 if ((head >> DMAR_IQ_SHIFT) == index) {
1174                         pr_err("VT-d detected invalid descriptor: "
1175                                 "low=%llx, high=%llx\n",
1176                                 (unsigned long long)qi->desc[index].low,
1177                                 (unsigned long long)qi->desc[index].high);
1178                         memcpy(&qi->desc[index], &qi->desc[wait_index],
1179                                         sizeof(struct qi_desc));
1180                         writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1181                         return -EINVAL;
1182                 }
1183         }
1184
1185         /*
1186          * If ITE happens, all pending wait_desc commands are aborted.
1187          * No new descriptors are fetched until the ITE is cleared.
1188          */
1189         if (fault & DMA_FSTS_ITE) {
1190                 head = readl(iommu->reg + DMAR_IQH_REG);
1191                 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1192                 head |= 1;
1193                 tail = readl(iommu->reg + DMAR_IQT_REG);
1194                 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1195
1196                 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1197
1198                 do {
1199                         if (qi->desc_status[head] == QI_IN_USE)
1200                                 qi->desc_status[head] = QI_ABORT;
1201                         head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1202                 } while (head != tail);
1203
1204                 if (qi->desc_status[wait_index] == QI_ABORT)
1205                         return -EAGAIN;
1206         }
1207
1208         if (fault & DMA_FSTS_ICE)
1209                 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1210
1211         return 0;
1212 }
1213
1214 /*
1215  * Submit the queued invalidation descriptor to the remapping
1216  * hardware unit and wait for its completion.
1217  */
1218 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
1219 {
1220         int rc;
1221         struct q_inval *qi = iommu->qi;
1222         struct qi_desc *hw, wait_desc;
1223         int wait_index, index;
1224         unsigned long flags;
1225
1226         if (!qi)
1227                 return 0;
1228
1229         hw = qi->desc;
1230
1231 restart:
1232         rc = 0;
1233
1234         raw_spin_lock_irqsave(&qi->q_lock, flags);
1235         while (qi->free_cnt < 3) {
1236                 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1237                 cpu_relax();
1238                 raw_spin_lock_irqsave(&qi->q_lock, flags);
1239         }
1240
1241         index = qi->free_head;
1242         wait_index = (index + 1) % QI_LENGTH;
1243
1244         qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1245
1246         hw[index] = *desc;
1247
1248         wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
1249                         QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1250         wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
1251
1252         hw[wait_index] = wait_desc;
1253
1254         qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1255         qi->free_cnt -= 2;
1256
1257         /*
1258          * update the HW tail register indicating the presence of
1259          * new descriptors.
1260          */
1261         writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
1262
1263         while (qi->desc_status[wait_index] != QI_DONE) {
1264                 /*
1265                  * We will leave the interrupts disabled, to prevent interrupt
1266                  * context to queue another cmd while a cmd is already submitted
1267                  * and waiting for completion on this cpu. This is to avoid
1268                  * a deadlock where the interrupt context can wait indefinitely
1269                  * for free slots in the queue.
1270                  */
1271                 rc = qi_check_fault(iommu, index);
1272                 if (rc)
1273                         break;
1274
1275                 raw_spin_unlock(&qi->q_lock);
1276                 cpu_relax();
1277                 raw_spin_lock(&qi->q_lock);
1278         }
1279
1280         qi->desc_status[index] = QI_DONE;
1281
1282         reclaim_free_desc(qi);
1283         raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1284
1285         if (rc == -EAGAIN)
1286                 goto restart;
1287
1288         return rc;
1289 }
1290
1291 /*
1292  * Flush the global interrupt entry cache.
1293  */
1294 void qi_global_iec(struct intel_iommu *iommu)
1295 {
1296         struct qi_desc desc;
1297
1298         desc.low = QI_IEC_TYPE;
1299         desc.high = 0;
1300
1301         /* should never fail */
1302         qi_submit_sync(&desc, iommu);
1303 }
1304
1305 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1306                       u64 type)
1307 {
1308         struct qi_desc desc;
1309
1310         desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1311                         | QI_CC_GRAN(type) | QI_CC_TYPE;
1312         desc.high = 0;
1313
1314         qi_submit_sync(&desc, iommu);
1315 }
1316
1317 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1318                     unsigned int size_order, u64 type)
1319 {
1320         u8 dw = 0, dr = 0;
1321
1322         struct qi_desc desc;
1323         int ih = 0;
1324
1325         if (cap_write_drain(iommu->cap))
1326                 dw = 1;
1327
1328         if (cap_read_drain(iommu->cap))
1329                 dr = 1;
1330
1331         desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1332                 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1333         desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1334                 | QI_IOTLB_AM(size_order);
1335
1336         qi_submit_sync(&desc, iommu);
1337 }
1338
1339 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1340                         u64 addr, unsigned mask)
1341 {
1342         struct qi_desc desc;
1343
1344         if (mask) {
1345                 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1346                 addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1347                 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1348         } else
1349                 desc.high = QI_DEV_IOTLB_ADDR(addr);
1350
1351         if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1352                 qdep = 0;
1353
1354         desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1355                    QI_DIOTLB_TYPE;
1356
1357         qi_submit_sync(&desc, iommu);
1358 }
1359
1360 /*
1361  * Disable Queued Invalidation interface.
1362  */
1363 void dmar_disable_qi(struct intel_iommu *iommu)
1364 {
1365         unsigned long flags;
1366         u32 sts;
1367         cycles_t start_time = get_cycles();
1368
1369         if (!ecap_qis(iommu->ecap))
1370                 return;
1371
1372         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1373
1374         sts =  readl(iommu->reg + DMAR_GSTS_REG);
1375         if (!(sts & DMA_GSTS_QIES))
1376                 goto end;
1377
1378         /*
1379          * Give a chance to HW to complete the pending invalidation requests.
1380          */
1381         while ((readl(iommu->reg + DMAR_IQT_REG) !=
1382                 readl(iommu->reg + DMAR_IQH_REG)) &&
1383                 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1384                 cpu_relax();
1385
1386         iommu->gcmd &= ~DMA_GCMD_QIE;
1387         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1388
1389         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1390                       !(sts & DMA_GSTS_QIES), sts);
1391 end:
1392         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1393 }
1394
1395 /*
1396  * Enable queued invalidation.
1397  */
1398 static void __dmar_enable_qi(struct intel_iommu *iommu)
1399 {
1400         u32 sts;
1401         unsigned long flags;
1402         struct q_inval *qi = iommu->qi;
1403
1404         qi->free_head = qi->free_tail = 0;
1405         qi->free_cnt = QI_LENGTH;
1406
1407         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1408
1409         /* write zero to the tail reg */
1410         writel(0, iommu->reg + DMAR_IQT_REG);
1411
1412         dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1413
1414         iommu->gcmd |= DMA_GCMD_QIE;
1415         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1416
1417         /* Make sure hardware complete it */
1418         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1419
1420         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1421 }
1422
1423 /*
1424  * Enable Queued Invalidation interface. This is a must to support
1425  * interrupt-remapping. Also used by DMA-remapping, which replaces
1426  * register based IOTLB invalidation.
1427  */
1428 int dmar_enable_qi(struct intel_iommu *iommu)
1429 {
1430         struct q_inval *qi;
1431         struct page *desc_page;
1432
1433         if (!ecap_qis(iommu->ecap))
1434                 return -ENOENT;
1435
1436         /*
1437          * queued invalidation is already setup and enabled.
1438          */
1439         if (iommu->qi)
1440                 return 0;
1441
1442         iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1443         if (!iommu->qi)
1444                 return -ENOMEM;
1445
1446         qi = iommu->qi;
1447
1448
1449         desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1450         if (!desc_page) {
1451                 kfree(qi);
1452                 iommu->qi = NULL;
1453                 return -ENOMEM;
1454         }
1455
1456         qi->desc = page_address(desc_page);
1457
1458         qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1459         if (!qi->desc_status) {
1460                 free_page((unsigned long) qi->desc);
1461                 kfree(qi);
1462                 iommu->qi = NULL;
1463                 return -ENOMEM;
1464         }
1465
1466         raw_spin_lock_init(&qi->q_lock);
1467
1468         __dmar_enable_qi(iommu);
1469
1470         return 0;
1471 }
1472
1473 /* iommu interrupt handling. Most stuff are MSI-like. */
1474
1475 enum faulttype {
1476         DMA_REMAP,
1477         INTR_REMAP,
1478         UNKNOWN,
1479 };
1480
1481 static const char *dma_remap_fault_reasons[] =
1482 {
1483         "Software",
1484         "Present bit in root entry is clear",
1485         "Present bit in context entry is clear",
1486         "Invalid context entry",
1487         "Access beyond MGAW",
1488         "PTE Write access is not set",
1489         "PTE Read access is not set",
1490         "Next page table ptr is invalid",
1491         "Root table address invalid",
1492         "Context table ptr is invalid",
1493         "non-zero reserved fields in RTP",
1494         "non-zero reserved fields in CTP",
1495         "non-zero reserved fields in PTE",
1496         "PCE for translation request specifies blocking",
1497 };
1498
1499 static const char *irq_remap_fault_reasons[] =
1500 {
1501         "Detected reserved fields in the decoded interrupt-remapped request",
1502         "Interrupt index exceeded the interrupt-remapping table size",
1503         "Present field in the IRTE entry is clear",
1504         "Error accessing interrupt-remapping table pointed by IRTA_REG",
1505         "Detected reserved fields in the IRTE entry",
1506         "Blocked a compatibility format interrupt request",
1507         "Blocked an interrupt request due to source-id verification failure",
1508 };
1509
1510 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1511 {
1512         if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1513                                         ARRAY_SIZE(irq_remap_fault_reasons))) {
1514                 *fault_type = INTR_REMAP;
1515                 return irq_remap_fault_reasons[fault_reason - 0x20];
1516         } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1517                 *fault_type = DMA_REMAP;
1518                 return dma_remap_fault_reasons[fault_reason];
1519         } else {
1520                 *fault_type = UNKNOWN;
1521                 return "Unknown";
1522         }
1523 }
1524
1525
1526 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1527 {
1528         if (iommu->irq == irq)
1529                 return DMAR_FECTL_REG;
1530         else if (iommu->pr_irq == irq)
1531                 return DMAR_PECTL_REG;
1532         else
1533                 BUG();
1534 }
1535
1536 void dmar_msi_unmask(struct irq_data *data)
1537 {
1538         struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1539         int reg = dmar_msi_reg(iommu, data->irq);
1540         unsigned long flag;
1541
1542         /* unmask it */
1543         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1544         writel(0, iommu->reg + reg);
1545         /* Read a reg to force flush the post write */
1546         readl(iommu->reg + reg);
1547         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1548 }
1549
1550 void dmar_msi_mask(struct irq_data *data)
1551 {
1552         struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1553         int reg = dmar_msi_reg(iommu, data->irq);
1554         unsigned long flag;
1555
1556         /* mask it */
1557         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1558         writel(DMA_FECTL_IM, iommu->reg + reg);
1559         /* Read a reg to force flush the post write */
1560         readl(iommu->reg + reg);
1561         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1562 }
1563
1564 void dmar_msi_write(int irq, struct msi_msg *msg)
1565 {
1566         struct intel_iommu *iommu = irq_get_handler_data(irq);
1567         int reg = dmar_msi_reg(iommu, irq);
1568         unsigned long flag;
1569
1570         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1571         writel(msg->data, iommu->reg + reg + 4);
1572         writel(msg->address_lo, iommu->reg + reg + 8);
1573         writel(msg->address_hi, iommu->reg + reg + 12);
1574         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1575 }
1576
1577 void dmar_msi_read(int irq, struct msi_msg *msg)
1578 {
1579         struct intel_iommu *iommu = irq_get_handler_data(irq);
1580         int reg = dmar_msi_reg(iommu, irq);
1581         unsigned long flag;
1582
1583         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1584         msg->data = readl(iommu->reg + reg + 4);
1585         msg->address_lo = readl(iommu->reg + reg + 8);
1586         msg->address_hi = readl(iommu->reg + reg + 12);
1587         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1588 }
1589
1590 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1591                 u8 fault_reason, u16 source_id, unsigned long long addr)
1592 {
1593         const char *reason;
1594         int fault_type;
1595
1596         reason = dmar_get_fault_reason(fault_reason, &fault_type);
1597
1598         if (fault_type == INTR_REMAP)
1599                 pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n",
1600                         source_id >> 8, PCI_SLOT(source_id & 0xFF),
1601                         PCI_FUNC(source_id & 0xFF), addr >> 48,
1602                         fault_reason, reason);
1603         else
1604                 pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n",
1605                        type ? "DMA Read" : "DMA Write",
1606                        source_id >> 8, PCI_SLOT(source_id & 0xFF),
1607                        PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1608         return 0;
1609 }
1610
1611 #define PRIMARY_FAULT_REG_LEN (16)
1612 irqreturn_t dmar_fault(int irq, void *dev_id)
1613 {
1614         struct intel_iommu *iommu = dev_id;
1615         int reg, fault_index;
1616         u32 fault_status;
1617         unsigned long flag;
1618         bool ratelimited;
1619         static DEFINE_RATELIMIT_STATE(rs,
1620                                       DEFAULT_RATELIMIT_INTERVAL,
1621                                       DEFAULT_RATELIMIT_BURST);
1622
1623         /* Disable printing, simply clear the fault when ratelimited */
1624         ratelimited = !__ratelimit(&rs);
1625
1626         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1627         fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1628         if (fault_status && !ratelimited)
1629                 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1630
1631         /* TBD: ignore advanced fault log currently */
1632         if (!(fault_status & DMA_FSTS_PPF))
1633                 goto unlock_exit;
1634
1635         fault_index = dma_fsts_fault_record_index(fault_status);
1636         reg = cap_fault_reg_offset(iommu->cap);
1637         while (1) {
1638                 u8 fault_reason;
1639                 u16 source_id;
1640                 u64 guest_addr;
1641                 int type;
1642                 u32 data;
1643
1644                 /* highest 32 bits */
1645                 data = readl(iommu->reg + reg +
1646                                 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1647                 if (!(data & DMA_FRCD_F))
1648                         break;
1649
1650                 if (!ratelimited) {
1651                         fault_reason = dma_frcd_fault_reason(data);
1652                         type = dma_frcd_type(data);
1653
1654                         data = readl(iommu->reg + reg +
1655                                      fault_index * PRIMARY_FAULT_REG_LEN + 8);
1656                         source_id = dma_frcd_source_id(data);
1657
1658                         guest_addr = dmar_readq(iommu->reg + reg +
1659                                         fault_index * PRIMARY_FAULT_REG_LEN);
1660                         guest_addr = dma_frcd_page_addr(guest_addr);
1661                 }
1662
1663                 /* clear the fault */
1664                 writel(DMA_FRCD_F, iommu->reg + reg +
1665                         fault_index * PRIMARY_FAULT_REG_LEN + 12);
1666
1667                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1668
1669                 if (!ratelimited)
1670                         dmar_fault_do_one(iommu, type, fault_reason,
1671                                           source_id, guest_addr);
1672
1673                 fault_index++;
1674                 if (fault_index >= cap_num_fault_regs(iommu->cap))
1675                         fault_index = 0;
1676                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1677         }
1678
1679         writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1680
1681 unlock_exit:
1682         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1683         return IRQ_HANDLED;
1684 }
1685
1686 int dmar_set_interrupt(struct intel_iommu *iommu)
1687 {
1688         int irq, ret;
1689
1690         /*
1691          * Check if the fault interrupt is already initialized.
1692          */
1693         if (iommu->irq)
1694                 return 0;
1695
1696         irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
1697         if (irq > 0) {
1698                 iommu->irq = irq;
1699         } else {
1700                 pr_err("No free IRQ vectors\n");
1701                 return -EINVAL;
1702         }
1703
1704         ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1705         if (ret)
1706                 pr_err("Can't request irq\n");
1707         return ret;
1708 }
1709
1710 int __init enable_drhd_fault_handling(void)
1711 {
1712         struct dmar_drhd_unit *drhd;
1713         struct intel_iommu *iommu;
1714
1715         /*
1716          * Enable fault control interrupt.
1717          */
1718         for_each_iommu(iommu, drhd) {
1719                 u32 fault_status;
1720                 int ret = dmar_set_interrupt(iommu);
1721
1722                 if (ret) {
1723                         pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1724                                (unsigned long long)drhd->reg_base_addr, ret);
1725                         return -1;
1726                 }
1727
1728                 /*
1729                  * Clear any previous faults.
1730                  */
1731                 dmar_fault(iommu->irq, iommu);
1732                 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1733                 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1734         }
1735
1736         return 0;
1737 }
1738
1739 /*
1740  * Re-enable Queued Invalidation interface.
1741  */
1742 int dmar_reenable_qi(struct intel_iommu *iommu)
1743 {
1744         if (!ecap_qis(iommu->ecap))
1745                 return -ENOENT;
1746
1747         if (!iommu->qi)
1748                 return -ENOENT;
1749
1750         /*
1751          * First disable queued invalidation.
1752          */
1753         dmar_disable_qi(iommu);
1754         /*
1755          * Then enable queued invalidation again. Since there is no pending
1756          * invalidation requests now, it's safe to re-enable queued
1757          * invalidation.
1758          */
1759         __dmar_enable_qi(iommu);
1760
1761         return 0;
1762 }
1763
1764 /*
1765  * Check interrupt remapping support in DMAR table description.
1766  */
1767 int __init dmar_ir_support(void)
1768 {
1769         struct acpi_table_dmar *dmar;
1770         dmar = (struct acpi_table_dmar *)dmar_tbl;
1771         if (!dmar)
1772                 return 0;
1773         return dmar->flags & 0x1;
1774 }
1775
1776 /* Check whether DMAR units are in use */
1777 static inline bool dmar_in_use(void)
1778 {
1779         return irq_remapping_enabled || intel_iommu_enabled;
1780 }
1781
1782 static int __init dmar_free_unused_resources(void)
1783 {
1784         struct dmar_drhd_unit *dmaru, *dmaru_n;
1785
1786         if (dmar_in_use())
1787                 return 0;
1788
1789         if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
1790                 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
1791
1792         down_write(&dmar_global_lock);
1793         list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1794                 list_del(&dmaru->list);
1795                 dmar_free_drhd(dmaru);
1796         }
1797         up_write(&dmar_global_lock);
1798
1799         return 0;
1800 }
1801
1802 late_initcall(dmar_free_unused_resources);
1803 IOMMU_INIT_POST(detect_intel_iommu);
1804
1805 /*
1806  * DMAR Hotplug Support
1807  * For more details, please refer to Intel(R) Virtualization Technology
1808  * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
1809  * "Remapping Hardware Unit Hot Plug".
1810  */
1811 static guid_t dmar_hp_guid =
1812         GUID_INIT(0xD8C1A3A6, 0xBE9B, 0x4C9B,
1813                   0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF);
1814
1815 /*
1816  * Currently there's only one revision and BIOS will not check the revision id,
1817  * so use 0 for safety.
1818  */
1819 #define DMAR_DSM_REV_ID                 0
1820 #define DMAR_DSM_FUNC_DRHD              1
1821 #define DMAR_DSM_FUNC_ATSR              2
1822 #define DMAR_DSM_FUNC_RHSA              3
1823
1824 static inline bool dmar_detect_dsm(acpi_handle handle, int func)
1825 {
1826         return acpi_check_dsm(handle, &dmar_hp_guid, DMAR_DSM_REV_ID, 1 << func);
1827 }
1828
1829 static int dmar_walk_dsm_resource(acpi_handle handle, int func,
1830                                   dmar_res_handler_t handler, void *arg)
1831 {
1832         int ret = -ENODEV;
1833         union acpi_object *obj;
1834         struct acpi_dmar_header *start;
1835         struct dmar_res_callback callback;
1836         static int res_type[] = {
1837                 [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
1838                 [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
1839                 [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
1840         };
1841
1842         if (!dmar_detect_dsm(handle, func))
1843                 return 0;
1844
1845         obj = acpi_evaluate_dsm_typed(handle, &dmar_hp_guid, DMAR_DSM_REV_ID,
1846                                       func, NULL, ACPI_TYPE_BUFFER);
1847         if (!obj)
1848                 return -ENODEV;
1849
1850         memset(&callback, 0, sizeof(callback));
1851         callback.cb[res_type[func]] = handler;
1852         callback.arg[res_type[func]] = arg;
1853         start = (struct acpi_dmar_header *)obj->buffer.pointer;
1854         ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
1855
1856         ACPI_FREE(obj);
1857
1858         return ret;
1859 }
1860
1861 static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
1862 {
1863         int ret;
1864         struct dmar_drhd_unit *dmaru;
1865
1866         dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1867         if (!dmaru)
1868                 return -ENODEV;
1869
1870         ret = dmar_ir_hotplug(dmaru, true);
1871         if (ret == 0)
1872                 ret = dmar_iommu_hotplug(dmaru, true);
1873
1874         return ret;
1875 }
1876
1877 static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
1878 {
1879         int i, ret;
1880         struct device *dev;
1881         struct dmar_drhd_unit *dmaru;
1882
1883         dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1884         if (!dmaru)
1885                 return 0;
1886
1887         /*
1888          * All PCI devices managed by this unit should have been destroyed.
1889          */
1890         if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
1891                 for_each_active_dev_scope(dmaru->devices,
1892                                           dmaru->devices_cnt, i, dev)
1893                         return -EBUSY;
1894         }
1895
1896         ret = dmar_ir_hotplug(dmaru, false);
1897         if (ret == 0)
1898                 ret = dmar_iommu_hotplug(dmaru, false);
1899
1900         return ret;
1901 }
1902
1903 static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
1904 {
1905         struct dmar_drhd_unit *dmaru;
1906
1907         dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1908         if (dmaru) {
1909                 list_del_rcu(&dmaru->list);
1910                 synchronize_rcu();
1911                 dmar_free_drhd(dmaru);
1912         }
1913
1914         return 0;
1915 }
1916
1917 static int dmar_hotplug_insert(acpi_handle handle)
1918 {
1919         int ret;
1920         int drhd_count = 0;
1921
1922         ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1923                                      &dmar_validate_one_drhd, (void *)1);
1924         if (ret)
1925                 goto out;
1926
1927         ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1928                                      &dmar_parse_one_drhd, (void *)&drhd_count);
1929         if (ret == 0 && drhd_count == 0) {
1930                 pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
1931                 goto out;
1932         } else if (ret) {
1933                 goto release_drhd;
1934         }
1935
1936         ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
1937                                      &dmar_parse_one_rhsa, NULL);
1938         if (ret)
1939                 goto release_drhd;
1940
1941         ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1942                                      &dmar_parse_one_atsr, NULL);
1943         if (ret)
1944                 goto release_atsr;
1945
1946         ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1947                                      &dmar_hp_add_drhd, NULL);
1948         if (!ret)
1949                 return 0;
1950
1951         dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1952                                &dmar_hp_remove_drhd, NULL);
1953 release_atsr:
1954         dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1955                                &dmar_release_one_atsr, NULL);
1956 release_drhd:
1957         dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1958                                &dmar_hp_release_drhd, NULL);
1959 out:
1960         return ret;
1961 }
1962
1963 static int dmar_hotplug_remove(acpi_handle handle)
1964 {
1965         int ret;
1966
1967         ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1968                                      &dmar_check_one_atsr, NULL);
1969         if (ret)
1970                 return ret;
1971
1972         ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1973                                      &dmar_hp_remove_drhd, NULL);
1974         if (ret == 0) {
1975                 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1976                                                &dmar_release_one_atsr, NULL));
1977                 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1978                                                &dmar_hp_release_drhd, NULL));
1979         } else {
1980                 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1981                                        &dmar_hp_add_drhd, NULL);
1982         }
1983
1984         return ret;
1985 }
1986
1987 static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
1988                                        void *context, void **retval)
1989 {
1990         acpi_handle *phdl = retval;
1991
1992         if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
1993                 *phdl = handle;
1994                 return AE_CTRL_TERMINATE;
1995         }
1996
1997         return AE_OK;
1998 }
1999
2000 static int dmar_device_hotplug(acpi_handle handle, bool insert)
2001 {
2002         int ret;
2003         acpi_handle tmp = NULL;
2004         acpi_status status;
2005
2006         if (!dmar_in_use())
2007                 return 0;
2008
2009         if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2010                 tmp = handle;
2011         } else {
2012                 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
2013                                              ACPI_UINT32_MAX,
2014                                              dmar_get_dsm_handle,
2015                                              NULL, NULL, &tmp);
2016                 if (ACPI_FAILURE(status)) {
2017                         pr_warn("Failed to locate _DSM method.\n");
2018                         return -ENXIO;
2019                 }
2020         }
2021         if (tmp == NULL)
2022                 return 0;
2023
2024         down_write(&dmar_global_lock);
2025         if (insert)
2026                 ret = dmar_hotplug_insert(tmp);
2027         else
2028                 ret = dmar_hotplug_remove(tmp);
2029         up_write(&dmar_global_lock);
2030
2031         return ret;
2032 }
2033
2034 int dmar_device_add(acpi_handle handle)
2035 {
2036         return dmar_device_hotplug(handle, true);
2037 }
2038
2039 int dmar_device_remove(acpi_handle handle)
2040 {
2041         return dmar_device_hotplug(handle, false);
2042 }