2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved.
9 #include <linux/types.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <asm/sn/sn_sal.h>
13 #include <asm/sn/addrs.h>
14 #include <asm/sn/pcidev.h>
15 #include <asm/sn/pcibus_provider_defs.h>
16 #include <asm/sn/tioca_provider.h>
18 uint32_t tioca_gart_found;
19 EXPORT_SYMBOL(tioca_gart_found); /* used by agp-sgi */
21 LIST_HEAD(tioca_list);
22 EXPORT_SYMBOL(tioca_list); /* used by agp-sgi */
24 static int tioca_gart_init(struct tioca_kernel *);
27 * tioca_gart_init - Initialize SGI TIOCA GART
28 * @tioca_common: ptr to common prom/kernel struct identifying the
30 * If the indicated tioca has devices present, initialize its associated
31 * GART MMR's and kernel memory.
34 tioca_gart_init(struct tioca_kernel *tioca_kern)
39 struct tioca_common *tioca_common;
40 struct tioca *ca_base;
42 tioca_common = tioca_kern->ca_common;
43 ca_base = (struct tioca *)tioca_common->ca_common.bs_base;
45 if (list_empty(tioca_kern->ca_devices))
51 * Validate aperature size
54 switch (CA_APERATURE_SIZE >> 20) {
56 ap_reg |= (0x3ff << CA_GART_AP_SIZE_SHFT); /* 4MB */
59 ap_reg |= (0x3fe << CA_GART_AP_SIZE_SHFT); /* 8MB */
62 ap_reg |= (0x3fc << CA_GART_AP_SIZE_SHFT); /* 16MB */
65 ap_reg |= (0x3f8 << CA_GART_AP_SIZE_SHFT); /* 32 MB */
68 ap_reg |= (0x3f0 << CA_GART_AP_SIZE_SHFT); /* 64 MB */
71 ap_reg |= (0x3e0 << CA_GART_AP_SIZE_SHFT); /* 128 MB */
74 ap_reg |= (0x3c0 << CA_GART_AP_SIZE_SHFT); /* 256 MB */
77 ap_reg |= (0x380 << CA_GART_AP_SIZE_SHFT); /* 512 MB */
80 ap_reg |= (0x300 << CA_GART_AP_SIZE_SHFT); /* 1GB */
83 ap_reg |= (0x200 << CA_GART_AP_SIZE_SHFT); /* 2GB */
86 ap_reg |= (0x000 << CA_GART_AP_SIZE_SHFT); /* 4 GB */
89 printk(KERN_ERR "%s: Invalid CA_APERATURE_SIZE "
90 "0x%lx\n", __FUNCTION__, (ulong) CA_APERATURE_SIZE);
95 * Set up other aperature parameters
98 if (PAGE_SIZE >= 16384) {
99 tioca_kern->ca_ap_pagesize = 16384;
100 ap_reg |= CA_GART_PAGE_SIZE;
102 tioca_kern->ca_ap_pagesize = 4096;
105 tioca_kern->ca_ap_size = CA_APERATURE_SIZE;
106 tioca_kern->ca_ap_bus_base = CA_APERATURE_BASE;
107 tioca_kern->ca_gart_entries =
108 tioca_kern->ca_ap_size / tioca_kern->ca_ap_pagesize;
110 ap_reg |= (CA_GART_AP_ENB_AGP | CA_GART_AP_ENB_PCI);
111 ap_reg |= tioca_kern->ca_ap_bus_base;
114 * Allocate and set up the GART
117 tioca_kern->ca_gart_size = tioca_kern->ca_gart_entries * sizeof(u64);
119 alloc_pages_node(tioca_kern->ca_closest_node,
120 GFP_KERNEL | __GFP_ZERO,
121 get_order(tioca_kern->ca_gart_size));
124 printk(KERN_ERR "%s: Could not allocate "
125 "%lu bytes (order %d) for GART\n",
127 tioca_kern->ca_gart_size,
128 get_order(tioca_kern->ca_gart_size));
132 tioca_kern->ca_gart = page_address(tmp);
133 tioca_kern->ca_gart_coretalk_addr =
134 PHYS_TO_TIODMA(virt_to_phys(tioca_kern->ca_gart));
137 * Compute PCI/AGP convenience fields
140 offset = CA_PCI32_MAPPED_BASE - CA_APERATURE_BASE;
141 tioca_kern->ca_pciap_base = CA_PCI32_MAPPED_BASE;
142 tioca_kern->ca_pciap_size = CA_PCI32_MAPPED_SIZE;
143 tioca_kern->ca_pcigart_start = offset / tioca_kern->ca_ap_pagesize;
144 tioca_kern->ca_pcigart_base =
145 tioca_kern->ca_gart_coretalk_addr + offset;
146 tioca_kern->ca_pcigart =
147 &tioca_kern->ca_gart[tioca_kern->ca_pcigart_start];
148 tioca_kern->ca_pcigart_entries =
149 tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize;
150 tioca_kern->ca_pcigart_pagemap =
151 kcalloc(1, tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL);
152 if (!tioca_kern->ca_pcigart_pagemap) {
153 free_pages((unsigned long)tioca_kern->ca_gart,
154 get_order(tioca_kern->ca_gart_size));
158 offset = CA_AGP_MAPPED_BASE - CA_APERATURE_BASE;
159 tioca_kern->ca_gfxap_base = CA_AGP_MAPPED_BASE;
160 tioca_kern->ca_gfxap_size = CA_AGP_MAPPED_SIZE;
161 tioca_kern->ca_gfxgart_start = offset / tioca_kern->ca_ap_pagesize;
162 tioca_kern->ca_gfxgart_base =
163 tioca_kern->ca_gart_coretalk_addr + offset;
164 tioca_kern->ca_gfxgart =
165 &tioca_kern->ca_gart[tioca_kern->ca_gfxgart_start];
166 tioca_kern->ca_gfxgart_entries =
167 tioca_kern->ca_gfxap_size / tioca_kern->ca_ap_pagesize;
170 * various control settings:
171 * use agp op-combining
172 * use GET semantics to fetch memory
173 * participate in coherency domain
174 * DISABLE GART PREFETCHING due to hw bug tracked in SGI PV930029
177 __sn_setq_relaxed(&ca_base->ca_control1,
178 CA_AGPDMA_OP_ENB_COMBDELAY); /* PV895469 ? */
179 __sn_clrq_relaxed(&ca_base->ca_control2, CA_GART_MEM_PARAM);
180 __sn_setq_relaxed(&ca_base->ca_control2,
181 (0x2ull << CA_GART_MEM_PARAM_SHFT));
182 tioca_kern->ca_gart_iscoherent = 1;
183 __sn_clrq_relaxed(&ca_base->ca_control2,
184 (CA_GART_WR_PREFETCH_ENB | CA_GART_RD_PREFETCH_ENB));
187 * Unmask GART fetch error interrupts. Clear residual errors first.
190 writeq(CA_GART_FETCH_ERR, &ca_base->ca_int_status_alias);
191 writeq(CA_GART_FETCH_ERR, &ca_base->ca_mult_error_alias);
192 __sn_clrq_relaxed(&ca_base->ca_int_mask, CA_GART_FETCH_ERR);
195 * Program the aperature and gart registers in TIOCA
198 writeq(ap_reg, &ca_base->ca_gart_aperature);
199 writeq(tioca_kern->ca_gart_coretalk_addr|1, &ca_base->ca_gart_ptr_table);
205 * tioca_fastwrite_enable - enable AGP FW for a tioca and its functions
206 * @tioca_kernel: structure representing the CA
208 * Given a CA, scan all attached functions making sure they all support
209 * FastWrite. If so, enable FastWrite for all functions and the CA itself.
213 tioca_fastwrite_enable(struct tioca_kernel *tioca_kern)
217 struct tioca *tioca_base;
218 struct pci_dev *pdev;
219 struct tioca_common *common;
221 common = tioca_kern->ca_common;
224 * Scan all vga controllers on this bus making sure they all
225 * suport FW. If not, return.
228 list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
229 if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
232 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
234 return; /* no AGP CAP means no FW */
236 pci_read_config_dword(pdev, cap_ptr + PCI_AGP_STATUS, ®);
237 if (!(reg & PCI_AGP_STATUS_FW))
238 return; /* function doesn't support FW */
242 * Set fw for all vga fn's
245 list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
246 if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
249 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
250 pci_read_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, ®);
251 reg |= PCI_AGP_COMMAND_FW;
252 pci_write_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, reg);
256 * Set ca's fw to match
259 tioca_base = (struct tioca *)common->ca_common.bs_base;
260 __sn_setq_relaxed(&tioca_base->ca_control1, CA_AGP_FW_ENABLE);
263 EXPORT_SYMBOL(tioca_fastwrite_enable); /* used by agp-sgi */
266 * tioca_dma_d64 - create a DMA mapping using 64-bit direct mode
267 * @paddr: system physical address
269 * Map @paddr into 64-bit CA bus space. No device context is necessary.
270 * Bits 53:0 come from the coretalk address. We just need to mask in the
271 * following optional bits of the 64-bit pci address:
273 * 63:60 - Coretalk Packet Type - 0x1 for Mem Get/Put (coherent)
274 * 0x2 for PIO (non-coherent)
275 * We will always use 0x1
276 * 55:55 - Swap bytes Currently unused
279 tioca_dma_d64(unsigned long paddr)
283 bus_addr = PHYS_TO_TIODMA(paddr);
286 BUG_ON(bus_addr >> 54);
288 /* Set upper nibble to Cache Coherent Memory op */
289 bus_addr |= (1UL << 60);
295 * tioca_dma_d48 - create a DMA mapping using 48-bit direct mode
296 * @pdev: linux pci_dev representing the function
297 * @paddr: system physical address
299 * Map @paddr into 64-bit bus space of the CA associated with @pcidev_info.
301 * The CA agp 48 bit direct address falls out as follows:
303 * When direct mapping AGP addresses, the 48 bit AGP address is
304 * constructed as follows:
306 * [47:40] - Low 8 bits of the page Node ID extracted from coretalk
307 * address [47:40]. The upper 8 node bits are fixed
308 * and come from the xxx register bits [5:0]
309 * [39:38] - Chiplet ID extracted from coretalk address [39:38]
310 * [37:00] - node offset extracted from coretalk address [37:00]
312 * Since the node id in general will be non-zero, and the chiplet id
313 * will always be non-zero, it follows that the device must support
314 * a dma mask of at least 0xffffffffff (40 bits) to target node 0
315 * and in general should be 0xffffffffffff (48 bits) to target nodes
316 * up to 255. Nodes above 255 need the support of the xxx register,
317 * and so a given CA can only directly target nodes in the range
321 tioca_dma_d48(struct pci_dev *pdev, uint64_t paddr)
323 struct tioca_common *tioca_common;
324 struct tioca *ca_base;
328 uint64_t agp_dma_extn;
329 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
331 tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
332 ca_base = (struct tioca *)tioca_common->ca_common.bs_base;
334 ct_addr = PHYS_TO_TIODMA(paddr);
338 bus_addr = (dma_addr_t) (ct_addr & 0xffffffffffffUL);
339 node_upper = ct_addr >> 48;
341 if (node_upper > 64) {
342 printk(KERN_ERR "%s: coretalk addr 0x%p node id out "
343 "of range\n", __FUNCTION__, (void *)ct_addr);
347 agp_dma_extn = __sn_readq_relaxed(&ca_base->ca_agp_dma_addr_extn);
348 if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) {
349 printk(KERN_ERR "%s: coretalk upper node (%u) "
350 "mismatch with ca_agp_dma_addr_extn (%lu)\n",
352 node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT));
360 * tioca_dma_mapped - create a DMA mapping using a CA GART
361 * @pdev: linux pci_dev representing the function
362 * @paddr: host physical address to map
363 * @req_size: len (bytes) to map
365 * Map @paddr into CA address space using the GART mechanism. The mapped
366 * dma_addr_t is guarenteed to be contiguous in CA bus space.
369 tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size)
371 int i, ps, ps_shift, entry, entries, mapsize, last_entry;
372 uint64_t xio_addr, end_xio_addr;
373 struct tioca_common *tioca_common;
374 struct tioca_kernel *tioca_kern;
375 dma_addr_t bus_addr = 0;
376 struct tioca_dmamap *ca_dmamap;
379 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);;
381 tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
382 tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private;
384 xio_addr = PHYS_TO_TIODMA(paddr);
388 spin_lock_irqsave(&tioca_kern->ca_lock, flags);
391 * allocate a map struct
394 ca_dmamap = kcalloc(1, sizeof(struct tioca_dmamap), GFP_ATOMIC);
399 * Locate free entries that can hold req_size. Account for
400 * unaligned start/length when allocating.
403 ps = tioca_kern->ca_ap_pagesize; /* will be power of 2 */
404 ps_shift = ffs(ps) - 1;
405 end_xio_addr = xio_addr + req_size - 1;
407 entries = (end_xio_addr >> ps_shift) - (xio_addr >> ps_shift) + 1;
409 map = tioca_kern->ca_pcigart_pagemap;
410 mapsize = tioca_kern->ca_pcigart_entries;
412 entry = find_first_zero_bit(map, mapsize);
413 while (entry < mapsize) {
414 last_entry = find_next_bit(map, mapsize, entry);
416 if (last_entry - entry >= entries)
419 entry = find_next_zero_bit(map, mapsize, last_entry);
425 for (i = 0; i < entries; i++)
426 set_bit(entry + i, map);
428 bus_addr = tioca_kern->ca_pciap_base + (entry * ps);
430 ca_dmamap->cad_dma_addr = bus_addr;
431 ca_dmamap->cad_gart_size = entries;
432 ca_dmamap->cad_gart_entry = entry;
433 list_add(&ca_dmamap->cad_list, &tioca_kern->ca_dmamaps);
436 tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
437 bus_addr += xio_addr & (ps - 1);
438 xio_addr &= ~(ps - 1);
443 while (xio_addr < end_xio_addr) {
444 tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
449 tioca_tlbflush(tioca_kern);
452 spin_unlock_irqrestore(&tioca_kern->ca_lock, flags);
457 * tioca_dma_unmap - release CA mapping resources
458 * @pdev: linux pci_dev representing the function
459 * @bus_addr: bus address returned by an earlier tioca_dma_map
460 * @dir: mapping direction (unused)
462 * Locate mapping resources associated with @bus_addr and release them.
463 * For mappings created using the direct modes (64 or 48) there are no
464 * resources to release.
467 tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
470 struct tioca_common *tioca_common;
471 struct tioca_kernel *tioca_kern;
472 struct tioca_dmamap *map;
473 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
476 tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
477 tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private;
479 /* return straight away if this isn't be a mapped address */
481 if (bus_addr < tioca_kern->ca_pciap_base ||
482 bus_addr >= (tioca_kern->ca_pciap_base + tioca_kern->ca_pciap_size))
485 spin_lock_irqsave(&tioca_kern->ca_lock, flags);
487 list_for_each_entry(map, &tioca_kern->ca_dmamaps, cad_list)
488 if (map->cad_dma_addr == bus_addr)
493 entry = map->cad_gart_entry;
495 for (i = 0; i < map->cad_gart_size; i++, entry++) {
496 clear_bit(entry, tioca_kern->ca_pcigart_pagemap);
497 tioca_kern->ca_pcigart[entry] = 0;
499 tioca_tlbflush(tioca_kern);
501 list_del(&map->cad_list);
502 spin_unlock_irqrestore(&tioca_kern->ca_lock, flags);
507 * tioca_dma_map - map pages for PCI DMA
508 * @pdev: linux pci_dev representing the function
509 * @paddr: host physical address to map
510 * @byte_count: bytes to map
512 * This is the main wrapper for mapping host physical pages to CA PCI space.
513 * The mapping mode used is based on the devices dma_mask. As a last resort
514 * use the GART mapped mode.
517 tioca_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count)
522 * If card is 64 or 48 bit addresable, use a direct mapping. 32
523 * bit direct is so restrictive w.r.t. where the memory resides that
524 * we don't use it even though CA has some support.
527 if (pdev->dma_mask == ~0UL)
528 mapaddr = tioca_dma_d64(paddr);
529 else if (pdev->dma_mask == 0xffffffffffffUL)
530 mapaddr = tioca_dma_d48(pdev, paddr);
534 /* Last resort ... use PCI portion of CA GART */
537 mapaddr = tioca_dma_mapped(pdev, paddr, byte_count);
543 * tioca_error_intr_handler - SGI TIO CA error interrupt handler
545 * @arg: pointer to tioca_common struct for the given CA
548 * Handle a CA error interrupt. Simply a wrapper around a SAL call which
549 * defers processing to the SGI prom.
552 tioca_error_intr_handler(int irq, void *arg, struct pt_regs *pt)
554 struct tioca_common *soft = arg;
555 struct ia64_sal_retval ret_stuff;
558 ret_stuff.status = 0;
561 segment = soft->ca_common.bs_persist_segment;
562 busnum = soft->ca_common.bs_persist_busnum;
564 SAL_CALL_NOLOCK(ret_stuff,
565 (u64) SN_SAL_IOIF_ERROR_INTERRUPT,
566 segment, busnum, 0, 0, 0, 0, 0);
572 * tioca_bus_fixup - perform final PCI fixup for a TIO CA bus
573 * @prom_bussoft: Common prom/kernel struct representing the bus
575 * Replicates the tioca_common pointed to by @prom_bussoft in kernel
576 * space. Allocates and initializes a kernel-only area for a given CA,
577 * and sets up an irq for handling CA error interrupts.
579 * On successful setup, returns the kernel version of tioca_common back to
583 tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
585 struct tioca_common *tioca_common;
586 struct tioca_kernel *tioca_kern;
589 /* sanity check prom rev */
591 if (sn_sal_rev() < 0x0406) {
593 (KERN_ERR "%s: SGI prom rev 4.06 or greater required "
594 "for tioca support\n", __FUNCTION__);
599 * Allocate kernel bus soft and copy from prom.
602 tioca_common = kcalloc(1, sizeof(struct tioca_common), GFP_KERNEL);
606 memcpy(tioca_common, prom_bussoft, sizeof(struct tioca_common));
607 tioca_common->ca_common.bs_base |= __IA64_UNCACHED_OFFSET;
609 /* init kernel-private area */
611 tioca_kern = kcalloc(1, sizeof(struct tioca_kernel), GFP_KERNEL);
617 tioca_kern->ca_common = tioca_common;
618 spin_lock_init(&tioca_kern->ca_lock);
619 INIT_LIST_HEAD(&tioca_kern->ca_dmamaps);
620 tioca_kern->ca_closest_node =
621 nasid_to_cnodeid(tioca_common->ca_closest_nasid);
622 tioca_common->ca_kernel_private = (uint64_t) tioca_kern;
624 bus = pci_find_bus(tioca_common->ca_common.bs_persist_segment,
625 tioca_common->ca_common.bs_persist_busnum);
627 tioca_kern->ca_devices = &bus->devices;
631 if (tioca_gart_init(tioca_kern) < 0) {
638 list_add(&tioca_kern->ca_list, &tioca_list);
640 if (request_irq(SGI_TIOCA_ERROR,
641 tioca_error_intr_handler,
642 SA_SHIRQ, "TIOCA error", (void *)tioca_common))
644 "%s: Unable to get irq %d. "
645 "Error interrupts won't be routed for TIOCA bus %d\n",
646 __FUNCTION__, SGI_TIOCA_ERROR,
647 (int)tioca_common->ca_common.bs_persist_busnum);
649 /* Setup locality information */
650 controller->node = tioca_kern->ca_closest_node;
654 static struct sn_pcibus_provider tioca_pci_interfaces = {
655 .dma_map = tioca_dma_map,
656 .dma_map_consistent = tioca_dma_map,
657 .dma_unmap = tioca_dma_unmap,
658 .bus_fixup = tioca_bus_fixup,
659 .force_interrupt = NULL,
660 .target_interrupt = NULL
664 * tioca_init_provider - init SN PCI provider ops for TIO CA
667 tioca_init_provider(void)
669 sn_pci_provider[PCIIO_ASIC_TYPE_TIOCA] = &tioca_pci_interfaces;