2 * pci.c - Low-Level PCI Access in IA-64
4 * Derived from bios32.c of i386 tree.
6 * (c) Copyright 2002, 2005 Hewlett-Packard Development Company, L.P.
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Bjorn Helgaas <bjorn.helgaas@hp.com>
9 * Copyright (C) 2004 Silicon Graphics, Inc.
11 * Note: Above list of copyright holders is incomplete...
14 #include <linux/acpi.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/pci.h>
18 #include <linux/pci-acpi.h>
19 #include <linux/init.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/bootmem.h>
24 #include <linux/export.h>
26 #include <asm/machvec.h>
32 #include <asm/hw_irq.h>
35 * Low-level SAL-based PCI configuration access functions. Note that SAL
36 * calls are already serialized (via sal_lock), so we don't need another
37 * synchronization mechanism here.
40 #define PCI_SAL_ADDRESS(seg, bus, devfn, reg) \
41 (((u64) seg << 24) | (bus << 16) | (devfn << 8) | (reg))
43 /* SAL 3.2 adds support for extended config space. */
45 #define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg) \
46 (((u64) seg << 28) | (bus << 20) | (devfn << 12) | (reg))
48 int raw_pci_read(unsigned int seg, unsigned int bus, unsigned int devfn,
49 int reg, int len, u32 *value)
54 if (!value || (seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
57 if ((seg | reg) <= 255) {
58 addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
60 } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
61 addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
67 result = ia64_sal_pci_config_read(addr, mode, len, &data);
75 int raw_pci_write(unsigned int seg, unsigned int bus, unsigned int devfn,
76 int reg, int len, u32 value)
81 if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
84 if ((seg | reg) <= 255) {
85 addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
87 } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
88 addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
93 result = ia64_sal_pci_config_write(addr, mode, len, value);
99 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
100 int size, u32 *value)
102 return raw_pci_read(pci_domain_nr(bus), bus->number,
103 devfn, where, size, value);
106 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
109 return raw_pci_write(pci_domain_nr(bus), bus->number,
110 devfn, where, size, value);
113 struct pci_ops pci_root_ops = {
118 struct pci_root_info {
119 struct pci_controller controller;
120 struct acpi_device *bridge;
121 struct list_head resources;
122 struct list_head io_resources;
127 new_space (u64 phys_base, int sparse)
133 return 0; /* legacy I/O port space */
135 mmio_base = (u64) ioremap(phys_base, 0);
136 for (i = 0; i < num_io_spaces; i++)
137 if (io_space[i].mmio_base == mmio_base &&
138 io_space[i].sparse == sparse)
141 if (num_io_spaces == MAX_IO_SPACES) {
142 pr_err("PCI: Too many IO port spaces "
143 "(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES);
148 io_space[i].mmio_base = mmio_base;
149 io_space[i].sparse = sparse;
154 static int add_io_space(struct device *dev, struct pci_root_info *info,
155 struct resource_entry *entry)
157 struct resource_entry *iospace;
158 struct resource *resource, *res = entry->res;
160 unsigned long base, min, max, base_port;
161 unsigned int sparse = 0, space_nr, len;
163 len = strlen(info->name) + 32;
164 iospace = resource_list_create_entry(NULL, len);
166 dev_err(dev, "PCI: No memory for %s I/O port space\n",
171 if (res->flags & IORESOURCE_IO_SPARSE)
173 space_nr = new_space(entry->offset, sparse);
177 name = (char *)(iospace + 1);
178 min = res->start - entry->offset;
179 max = res->end - entry->offset;
180 base = __pa(io_space[space_nr].mmio_base);
181 base_port = IO_SPACE_BASE(space_nr);
182 snprintf(name, len, "%s I/O Ports %08lx-%08lx", info->name,
183 base_port + min, base_port + max);
186 * The SDM guarantees the legacy 0-64K space is sparse, but if the
187 * mapping is done by the processor (not the bridge), ACPI may not
193 resource = iospace->res;
194 resource->name = name;
195 resource->flags = IORESOURCE_MEM;
196 resource->start = base + (sparse ? IO_SPACE_SPARSE_ENCODING(min) : min);
197 resource->end = base + (sparse ? IO_SPACE_SPARSE_ENCODING(max) : max);
198 if (insert_resource(&iomem_resource, resource)) {
200 "can't allocate host bridge io space resource %pR\n",
205 entry->offset = base_port;
206 res->start = min + base_port;
207 res->end = max + base_port;
208 resource_list_add_tail(iospace, &info->io_resources);
213 resource_list_free_entry(iospace);
218 * An IO port or MMIO resource assigned to a PCI host bridge may be
219 * consumed by the host bridge itself or available to its child
220 * bus/devices. The ACPI specification defines a bit (Producer/Consumer)
221 * to tell whether the resource is consumed by the host bridge itself,
222 * but firmware hasn't used that bit consistently, so we can't rely on it.
224 * On x86 and IA64 platforms, all IO port and MMIO resources are assumed
225 * to be available to child bus/devices except one special case:
226 * IO port [0xCF8-0xCFF] is consumed by the host bridge itself
227 * to access PCI configuration space.
229 * So explicitly filter out PCI CFG IO ports[0xCF8-0xCFF].
231 static bool resource_is_pcicfg_ioport(struct resource *res)
233 return (res->flags & IORESOURCE_IO) &&
234 res->start == 0xCF8 && res->end == 0xCFF;
238 probe_pci_root_info(struct pci_root_info *info, struct acpi_device *device,
239 int busnum, int domain)
242 struct list_head *list = &info->resources;
243 struct resource_entry *entry, *tmp;
245 ret = acpi_dev_get_resources(device, list,
246 acpi_dev_filter_resource_type_cb,
247 (void *)(IORESOURCE_IO | IORESOURCE_MEM));
249 dev_warn(&device->dev,
250 "failed to parse _CRS method, error code %d\n", ret);
252 dev_dbg(&device->dev,
253 "no IO and memory resources present in _CRS\n");
255 resource_list_for_each_entry_safe(entry, tmp, list) {
256 if ((entry->res->flags & IORESOURCE_DISABLED) ||
257 resource_is_pcicfg_ioport(entry->res))
258 resource_list_destroy_entry(entry);
260 entry->res->name = info->name;
266 static void validate_resources(struct device *dev, struct list_head *resources,
270 struct resource *res1, *res2, *root = NULL;
271 struct resource_entry *tmp, *entry, *entry2;
273 BUG_ON((type & (IORESOURCE_MEM | IORESOURCE_IO)) == 0);
274 root = (type & IORESOURCE_MEM) ? &iomem_resource : &ioport_resource;
276 list_splice_init(resources, &list);
277 resource_list_for_each_entry_safe(entry, tmp, &list) {
282 if (!(res1->flags & type))
285 /* Exclude non-addressable range or non-addressable portion */
286 end = min(res1->end, root->end);
287 if (end <= res1->start) {
288 dev_info(dev, "host bridge window %pR (ignored, not CPU addressable)\n",
292 } else if (res1->end != end) {
293 dev_info(dev, "host bridge window %pR ([%#llx-%#llx] ignored, not CPU addressable)\n",
294 res1, (unsigned long long)end + 1,
295 (unsigned long long)res1->end);
299 resource_list_for_each_entry(entry2, resources) {
301 if (!(res2->flags & type))
305 * I don't like throwing away windows because then
306 * our resources no longer match the ACPI _CRS, but
307 * the kernel resource tree doesn't allow overlaps.
309 if (resource_overlaps(res1, res2)) {
310 res2->start = min(res1->start, res2->start);
311 res2->end = max(res1->end, res2->end);
312 dev_info(dev, "host bridge window expanded to %pR; %pR ignored\n",
320 resource_list_del(entry);
322 resource_list_free_entry(entry);
324 resource_list_add_tail(entry, resources);
328 static void add_resources(struct pci_root_info *info, struct device *dev)
330 struct resource_entry *entry, *tmp;
331 struct resource *res, *conflict, *root = NULL;
332 struct list_head *list = &info->resources;
334 validate_resources(dev, list, IORESOURCE_MEM);
335 validate_resources(dev, list, IORESOURCE_IO);
337 resource_list_for_each_entry_safe(entry, tmp, list) {
339 if (res->flags & IORESOURCE_MEM) {
340 root = &iomem_resource;
342 * HP's firmware has a hack to work around a Windows
343 * bug. Ignore these tiny memory ranges.
345 if (resource_size(res) <= 16) {
346 resource_list_destroy_entry(entry);
349 } else if (res->flags & IORESOURCE_IO) {
350 root = &ioport_resource;
351 if (add_io_space(&info->bridge->dev, info, entry)) {
352 resource_list_destroy_entry(entry);
359 conflict = insert_resource_conflict(root, res);
362 "ignoring host bridge window %pR (conflicts with %s %pR)\n",
363 res, conflict->name, conflict);
364 resource_list_destroy_entry(entry);
369 static void __release_pci_root_info(struct pci_root_info *info)
371 struct resource *res;
372 struct resource_entry *entry, *tentry;
374 resource_list_for_each_entry_safe(entry, tentry, &info->io_resources) {
375 release_resource(entry->res);
376 resource_list_destroy_entry(entry);
379 resource_list_for_each_entry_safe(entry, tentry, &info->resources) {
382 (res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
383 release_resource(res);
384 resource_list_destroy_entry(entry);
390 static void release_pci_root_info(struct pci_host_bridge *bridge)
392 struct pci_root_info *info = bridge->release_data;
394 __release_pci_root_info(info);
397 struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
399 struct acpi_device *device = root->device;
400 int domain = root->segment;
401 int bus = root->secondary.start;
402 struct pci_root_info *info;
403 struct pci_bus *pbus;
406 info = kzalloc(sizeof(*info), GFP_KERNEL);
408 dev_err(&device->dev,
409 "pci_bus %04x:%02x: ignored (out of memory)\n",
414 info->controller.segment = domain;
415 info->controller.companion = device;
416 info->controller.node = acpi_get_node(device->handle);
417 info->bridge = device;
418 INIT_LIST_HEAD(&info->resources);
419 INIT_LIST_HEAD(&info->io_resources);
420 snprintf(info->name, sizeof(info->name),
421 "PCI Bus %04x:%02x", domain, bus);
423 ret = probe_pci_root_info(info, device, bus, domain);
428 add_resources(info, &info->bridge->dev);
429 pci_add_resource(&info->resources, &root->secondary);
432 * See arch/x86/pci/acpi.c.
433 * The desired pci bus might already be scanned in a quirk. We
434 * should handle the case here, but it appears that IA64 hasn't
435 * such quirk. So we just ignore the case now.
437 pbus = pci_create_root_bus(NULL, bus, &pci_root_ops,
438 &info->controller, &info->resources);
440 __release_pci_root_info(info);
444 pci_set_host_bridge_release(to_pci_host_bridge(pbus->bridge),
445 release_pci_root_info, info);
446 pci_scan_child_bus(pbus);
450 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
453 * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL
454 * here, pci_create_root_bus() has been called by someone else and
455 * sysdata is likely to be different from what we expect. Let it go in
458 if (!bridge->dev.parent) {
459 struct pci_controller *controller = bridge->bus->sysdata;
460 ACPI_COMPANION_SET(&bridge->dev, controller->companion);
465 void pcibios_fixup_device_resources(struct pci_dev *dev)
472 for (idx = 0; idx < PCI_BRIDGE_RESOURCES; idx++) {
473 struct resource *r = &dev->resource[idx];
475 if (!r->flags || r->parent || !r->start)
478 pci_claim_resource(dev, idx);
481 EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources);
483 static void pcibios_fixup_bridge_resources(struct pci_dev *dev)
490 for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
491 struct resource *r = &dev->resource[idx];
493 if (!r->flags || r->parent || !r->start)
496 pci_claim_bridge_resource(dev, idx);
501 * Called after each bus is probed, but before its children are examined.
503 void pcibios_fixup_bus(struct pci_bus *b)
508 pci_read_bridge_bases(b);
509 pcibios_fixup_bridge_resources(b->self);
511 list_for_each_entry(dev, &b->devices, bus_list)
512 pcibios_fixup_device_resources(dev);
513 platform_pci_fixup_bus(b);
516 void pcibios_add_bus(struct pci_bus *bus)
518 acpi_pci_add_bus(bus);
521 void pcibios_remove_bus(struct pci_bus *bus)
523 acpi_pci_remove_bus(bus);
526 void pcibios_set_master (struct pci_dev *dev)
528 /* No special bus mastering setup handling */
532 pcibios_enable_device (struct pci_dev *dev, int mask)
536 ret = pci_enable_resources(dev, mask);
540 if (!dev->msi_enabled)
541 return acpi_pci_irq_enable(dev);
546 pcibios_disable_device (struct pci_dev *dev)
548 BUG_ON(atomic_read(&dev->enable_cnt));
549 if (!dev->msi_enabled)
550 acpi_pci_irq_disable(dev);
554 pcibios_align_resource (void *data, const struct resource *res,
555 resource_size_t size, resource_size_t align)
561 pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
562 enum pci_mmap_state mmap_state, int write_combine)
564 unsigned long size = vma->vm_end - vma->vm_start;
568 * I/O space cannot be accessed via normal processor loads and
569 * stores on this platform.
571 if (mmap_state == pci_mmap_io)
573 * XXX we could relax this for I/O spaces for which ACPI
574 * indicates that the space is 1-to-1 mapped. But at the
575 * moment, we don't support multiple PCI address spaces and
576 * the legacy I/O space is not 1-to-1 mapped, so this is moot.
580 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
583 prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
587 * If the user requested WC, the kernel uses UC or WC for this region,
588 * and the chipset supports WC, we can use WC. Otherwise, we have to
589 * use the same attribute the kernel uses.
592 ((pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_UC ||
593 (pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_WC) &&
594 efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
595 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
597 vma->vm_page_prot = prot;
599 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
600 vma->vm_end - vma->vm_start, vma->vm_page_prot))
607 * ia64_pci_get_legacy_mem - generic legacy mem routine
608 * @bus: bus to get legacy memory base address for
610 * Find the base of legacy memory for @bus. This is typically the first
611 * megabyte of bus address space for @bus or is simply 0 on platforms whose
612 * chipsets support legacy I/O and memory routing. Returns the base address
613 * or an error pointer if an error occurred.
615 * This is the ia64 generic version of this routine. Other platforms
616 * are free to override it with a machine vector.
618 char *ia64_pci_get_legacy_mem(struct pci_bus *bus)
620 return (char *)__IA64_UNCACHED_OFFSET;
624 * pci_mmap_legacy_page_range - map legacy memory space to userland
625 * @bus: bus whose legacy space we're mapping
626 * @vma: vma passed in by mmap
628 * Map legacy memory space for this device back to userspace using a machine
629 * vector to get the base address.
632 pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma,
633 enum pci_mmap_state mmap_state)
635 unsigned long size = vma->vm_end - vma->vm_start;
639 /* We only support mmap'ing of legacy memory space */
640 if (mmap_state != pci_mmap_mem)
644 * Avoid attribute aliasing. See Documentation/ia64/aliasing.txt
647 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
649 prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
652 addr = pci_get_legacy_mem(bus);
654 return PTR_ERR(addr);
656 vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
657 vma->vm_page_prot = prot;
659 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
660 size, vma->vm_page_prot))
667 * ia64_pci_legacy_read - read from legacy I/O space
669 * @port: legacy port value
670 * @val: caller allocated storage for returned value
671 * @size: number of bytes to read
673 * Simply reads @size bytes from @port and puts the result in @val.
675 * Again, this (and the write routine) are generic versions that can be
676 * overridden by the platform. This is necessary on platforms that don't
677 * support legacy I/O routing or that hard fail on legacy I/O timeouts.
679 int ia64_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
702 * ia64_pci_legacy_write - perform a legacy I/O write
704 * @port: port to write
705 * @val: value to write
706 * @size: number of bytes to write from @val
708 * Simply writes @size bytes of @val to @port.
710 int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
733 * set_pci_cacheline_size - determine cacheline size for PCI devices
735 * We want to use the line-size of the outer-most cache. We assume
736 * that this line-size is the same for all CPUs.
738 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
740 static void __init set_pci_dfl_cacheline_size(void)
742 unsigned long levels, unique_caches;
744 pal_cache_config_info_t cci;
746 status = ia64_pal_cache_summary(&levels, &unique_caches);
748 pr_err("%s: ia64_pal_cache_summary() failed "
749 "(status=%ld)\n", __func__, status);
753 status = ia64_pal_cache_config_info(levels - 1,
754 /* cache_type (data_or_unified)= */ 2, &cci);
756 pr_err("%s: ia64_pal_cache_config_info() failed "
757 "(status=%ld)\n", __func__, status);
760 pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4;
763 u64 ia64_dma_get_required_mask(struct device *dev)
765 u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
766 u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
769 if (!high_totalram) {
770 /* convert to mask just covering totalram */
771 low_totalram = (1 << (fls(low_totalram) - 1));
772 low_totalram += low_totalram - 1;
775 high_totalram = (1 << (fls(high_totalram) - 1));
776 high_totalram += high_totalram - 1;
777 mask = (((u64)high_totalram) << 32) + 0xffffffff;
781 EXPORT_SYMBOL_GPL(ia64_dma_get_required_mask);
783 u64 dma_get_required_mask(struct device *dev)
785 return platform_dma_get_required_mask(dev);
787 EXPORT_SYMBOL_GPL(dma_get_required_mask);
789 static int __init pcibios_init(void)
791 set_pci_dfl_cacheline_size();
795 subsys_initcall(pcibios_init);