2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994, 1995 Waldorf GmbH
7 * Copyright (C) 1994 - 2000, 06 Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
10 * Author: Maciej W. Rozycki <macro@mips.com>
15 #define ARCH_HAS_IOREMAP_WC
17 #include <linux/compiler.h>
18 #include <linux/kernel.h>
19 #include <linux/types.h>
20 #include <linux/irqflags.h>
22 #include <asm/addrspace.h>
23 #include <asm/barrier.h>
25 #include <asm/byteorder.h>
27 #include <asm/cpu-features.h>
28 #include <asm-generic/iomap.h>
30 #include <asm/pgtable-bits.h>
31 #include <asm/processor.h>
32 #include <asm/string.h>
35 #include <mangle-port.h>
38 * Raw operations are never swapped in software. OTOH values that raw
39 * operations are working on may or may not have been swapped by the bus
40 * hardware. An example use would be for flash memory that's used for
43 # define __raw_ioswabb(a, x) (x)
44 # define __raw_ioswabw(a, x) (x)
45 # define __raw_ioswabl(a, x) (x)
46 # define __raw_ioswabq(a, x) (x)
47 # define ____raw_ioswabq(a, x) (x)
49 # define __relaxed_ioswabb ioswabb
50 # define __relaxed_ioswabw ioswabw
51 # define __relaxed_ioswabl ioswabl
52 # define __relaxed_ioswabq ioswabq
54 /* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */
56 #define IO_SPACE_LIMIT 0xffff
59 * On MIPS I/O ports are memory mapped, so we access them using normal
60 * load/store instructions. mips_io_port_base is the virtual address to
61 * which all ports are being mapped. For sake of efficiency some code
62 * assumes that this is an address that can be loaded with a single lui
63 * instruction, so the lower 16 bits must be zero. Should be true on
64 * on any sane architecture; generic code does not use this assumption.
66 extern unsigned long mips_io_port_base;
68 static inline void set_io_port_base(unsigned long base)
70 mips_io_port_base = base;
74 * Provide the necessary definitions for generic iomap. We make use of
75 * mips_io_port_base for iomap(), but we don't reserve any low addresses for
79 #define HAVE_ARCH_PIO_SIZE
80 #define PIO_OFFSET mips_io_port_base
81 #define PIO_MASK IO_SPACE_LIMIT
82 #define PIO_RESERVED 0x0UL
85 * Enforce in-order execution of data I/O. In the MIPS architecture
86 * these are equivalent to corresponding platform-specific memory
87 * barriers defined in <asm/barrier.h>. API pinched from PowerPC,
88 * with sync additionally defined.
90 #define iobarrier_rw() mb()
91 #define iobarrier_r() rmb()
92 #define iobarrier_w() wmb()
93 #define iobarrier_sync() iob()
96 * virt_to_phys - map virtual addresses to physical
97 * @address: address to remap
99 * The returned physical address is the physical (CPU) mapping for
100 * the memory address given. It is only valid to use this function on
101 * addresses directly mapped or allocated via kmalloc.
103 * This function does not give bus mappings for DMA transfers. In
104 * almost all conceivable cases a device driver should not be using
107 static inline unsigned long virt_to_phys(volatile const void *address)
109 return __pa(address);
113 * phys_to_virt - map physical address to virtual
114 * @address: address to remap
116 * The returned virtual address is a current CPU mapping for
117 * the memory address given. It is only valid to use this function on
118 * addresses that have a kernel mapping
120 * This function does not handle bus mappings for DMA transfers. In
121 * almost all conceivable cases a device driver should not be using
124 static inline void * phys_to_virt(unsigned long address)
126 return (void *)(address + PAGE_OFFSET - PHYS_OFFSET);
130 * ISA I/O bus memory addresses are 1:1 with the physical address.
132 static inline unsigned long isa_virt_to_bus(volatile void *address)
134 return virt_to_phys(address);
137 static inline void *isa_bus_to_virt(unsigned long address)
139 return phys_to_virt(address);
143 * However PCI ones are not necessarily 1:1 and therefore these interfaces
144 * are forbidden in portable PCI drivers.
146 * Allow them for x86 for legacy drivers, though.
148 #define virt_to_bus virt_to_phys
149 #define bus_to_virt phys_to_virt
152 * Change "struct page" to physical address.
154 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
156 extern void __iomem * __ioremap(phys_addr_t offset, phys_addr_t size, unsigned long flags);
157 extern void __iounmap(const volatile void __iomem *addr);
159 static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long size,
162 void __iomem *addr = plat_ioremap(offset, size, flags);
167 #define __IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
169 if (cpu_has_64bit_addresses) {
170 u64 base = UNCAC_BASE;
173 * R10000 supports a 2 bit uncached attribute therefore
174 * UNCAC_BASE may not equal IO_BASE.
176 if (flags == _CACHE_UNCACHED)
177 base = (u64) IO_BASE;
178 return (void __iomem *) (unsigned long) (base + offset);
179 } else if (__builtin_constant_p(offset) &&
180 __builtin_constant_p(size) && __builtin_constant_p(flags)) {
181 phys_addr_t phys_addr, last_addr;
183 phys_addr = fixup_bigphys_addr(offset, size);
185 /* Don't allow wraparound or zero size. */
186 last_addr = phys_addr + size - 1;
187 if (!size || last_addr < phys_addr)
191 * Map uncached objects in the low 512MB of address
194 if (__IS_LOW512(phys_addr) && __IS_LOW512(last_addr) &&
195 flags == _CACHE_UNCACHED)
196 return (void __iomem *)
197 (unsigned long)CKSEG1ADDR(phys_addr);
200 return __ioremap(offset, size, flags);
206 * ioremap_prot - map bus memory into CPU space
207 * @offset: bus address of the memory
208 * @size: size of the resource to map
210 * ioremap_prot gives the caller control over cache coherency attributes (CCA)
212 static inline void __iomem *ioremap_prot(phys_addr_t offset,
213 unsigned long size, unsigned long prot_val) {
214 return __ioremap_mode(offset, size, prot_val & _CACHE_MASK);
218 * ioremap - map bus memory into CPU space
219 * @offset: bus address of the memory
220 * @size: size of the resource to map
222 * ioremap performs a platform specific sequence of operations to
223 * make bus memory CPU accessible via the readb/readw/readl/writeb/
224 * writew/writel functions and the other mmio helpers. The returned
225 * address is not guaranteed to be usable directly as a virtual
228 #define ioremap(offset, size) \
229 __ioremap_mode((offset), (size), _CACHE_UNCACHED)
230 #define ioremap_nocache ioremap
231 #define ioremap_uc ioremap
234 * ioremap_cache - map bus memory into CPU space
235 * @offset: bus address of the memory
236 * @size: size of the resource to map
238 * ioremap_cache performs a platform specific sequence of operations to
239 * make bus memory CPU accessible via the readb/readw/readl/writeb/
240 * writew/writel functions and the other mmio helpers. The returned
241 * address is not guaranteed to be usable directly as a virtual
244 * This version of ioremap ensures that the memory is marked cachable by
245 * the CPU. Also enables full write-combining. Useful for some
246 * memory-like regions on I/O busses.
248 #define ioremap_cache(offset, size) \
249 __ioremap_mode((offset), (size), _page_cachable_default)
252 * ioremap_wc - map bus memory into CPU space
253 * @offset: bus address of the memory
254 * @size: size of the resource to map
256 * ioremap_wc performs a platform specific sequence of operations to
257 * make bus memory CPU accessible via the readb/readw/readl/writeb/
258 * writew/writel functions and the other mmio helpers. The returned
259 * address is not guaranteed to be usable directly as a virtual
262 * This version of ioremap ensures that the memory is marked uncachable
263 * but accelerated by means of write-combining feature. It is specifically
264 * useful for PCIe prefetchable windows, which may vastly improve a
265 * communications performance. If it was determined on boot stage, what
266 * CPU CCA doesn't support UCA, the method shall fall-back to the
267 * _CACHE_UNCACHED option (see cpu_probe() method).
269 #define ioremap_wc(offset, size) \
270 __ioremap_mode((offset), (size), boot_cpu_data.writecombine)
272 static inline void iounmap(const volatile void __iomem *addr)
274 if (plat_iounmap(addr))
277 #define __IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
279 if (cpu_has_64bit_addresses ||
280 (__builtin_constant_p(addr) && __IS_KSEG1(addr)))
288 #if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_CPU_LOONGSON64)
289 #define war_io_reorder_wmb() wmb()
291 #define war_io_reorder_wmb() barrier()
294 #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, barrier, relax, irq) \
296 static inline void pfx##write##bwlq(type val, \
297 volatile void __iomem *mem) \
299 volatile type *__mem; \
305 war_io_reorder_wmb(); \
307 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
309 __val = pfx##ioswab##bwlq(__mem, val); \
311 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
313 else if (cpu_has_64bits) { \
314 unsigned long __flags; \
318 local_irq_save(__flags); \
319 __asm__ __volatile__( \
320 ".set push" "\t\t# __writeq""\n\t" \
321 ".set arch=r4000" "\n\t" \
322 "dsll32 %L0, %L0, 0" "\n\t" \
323 "dsrl32 %L0, %L0, 0" "\n\t" \
324 "dsll32 %M0, %M0, 0" "\n\t" \
325 "or %L0, %L0, %M0" "\n\t" \
326 "sd %L0, %2" "\n\t" \
329 : "0" (__val), "m" (*__mem)); \
331 local_irq_restore(__flags); \
336 static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
338 volatile type *__mem; \
341 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
346 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
348 else if (cpu_has_64bits) { \
349 unsigned long __flags; \
352 local_irq_save(__flags); \
353 __asm__ __volatile__( \
354 ".set push" "\t\t# __readq" "\n\t" \
355 ".set arch=r4000" "\n\t" \
356 "ld %L0, %1" "\n\t" \
357 "dsra32 %M0, %L0, 0" "\n\t" \
358 "sll %L0, %L0, 0" "\n\t" \
363 local_irq_restore(__flags); \
369 /* prevent prefetching of coherent DMA data prematurely */ \
372 return pfx##ioswab##bwlq(__mem, __val); \
375 #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, barrier, relax, p) \
377 static inline void pfx##out##bwlq##p(type val, unsigned long port) \
379 volatile type *__addr; \
385 war_io_reorder_wmb(); \
387 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
389 __val = pfx##ioswab##bwlq(__addr, val); \
391 /* Really, we want this to be atomic */ \
392 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
397 static inline type pfx##in##bwlq##p(unsigned long port) \
399 volatile type *__addr; \
402 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
404 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
411 /* prevent prefetching of coherent DMA data prematurely */ \
414 return pfx##ioswab##bwlq(__addr, __val); \
417 #define __BUILD_MEMORY_PFX(bus, bwlq, type, relax) \
419 __BUILD_MEMORY_SINGLE(bus, bwlq, type, 1, relax, 1)
421 #define BUILDIO_MEM(bwlq, type) \
423 __BUILD_MEMORY_PFX(__raw_, bwlq, type, 0) \
424 __BUILD_MEMORY_PFX(__relaxed_, bwlq, type, 1) \
425 __BUILD_MEMORY_PFX(__mem_, bwlq, type, 0) \
426 __BUILD_MEMORY_PFX(, bwlq, type, 0)
434 __BUILD_MEMORY_PFX(__raw_, q, u64, 0)
435 __BUILD_MEMORY_PFX(__mem_, q, u64, 0)
438 #define __BUILD_IOPORT_PFX(bus, bwlq, type) \
439 __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0,) \
440 __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0, _p)
442 #define BUILDIO_IOPORT(bwlq, type) \
443 __BUILD_IOPORT_PFX(, bwlq, type) \
444 __BUILD_IOPORT_PFX(__mem_, bwlq, type)
446 BUILDIO_IOPORT(b, u8)
447 BUILDIO_IOPORT(w, u16)
448 BUILDIO_IOPORT(l, u32)
450 BUILDIO_IOPORT(q, u64)
453 #define __BUILDIO(bwlq, type) \
455 __BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 1, 0, 0)
459 #define readb_relaxed __relaxed_readb
460 #define readw_relaxed __relaxed_readw
461 #define readl_relaxed __relaxed_readl
463 #define readq_relaxed __relaxed_readq
466 #define writeb_relaxed __relaxed_writeb
467 #define writew_relaxed __relaxed_writew
468 #define writel_relaxed __relaxed_writel
470 #define writeq_relaxed __relaxed_writeq
473 #define readb_be(addr) \
474 __raw_readb((__force unsigned *)(addr))
475 #define readw_be(addr) \
476 be16_to_cpu(__raw_readw((__force unsigned *)(addr)))
477 #define readl_be(addr) \
478 be32_to_cpu(__raw_readl((__force unsigned *)(addr)))
479 #define readq_be(addr) \
480 be64_to_cpu(__raw_readq((__force unsigned *)(addr)))
482 #define writeb_be(val, addr) \
483 __raw_writeb((val), (__force unsigned *)(addr))
484 #define writew_be(val, addr) \
485 __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr))
486 #define writel_be(val, addr) \
487 __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr))
488 #define writeq_be(val, addr) \
489 __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr))
492 * Some code tests for these symbols
496 #define writeq writeq
499 #define __BUILD_MEMORY_STRING(bwlq, type) \
501 static inline void writes##bwlq(volatile void __iomem *mem, \
502 const void *addr, unsigned int count) \
504 const volatile type *__addr = addr; \
507 __mem_write##bwlq(*__addr, mem); \
512 static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \
513 unsigned int count) \
515 volatile type *__addr = addr; \
518 *__addr = __mem_read##bwlq(mem); \
523 #define __BUILD_IOPORT_STRING(bwlq, type) \
525 static inline void outs##bwlq(unsigned long port, const void *addr, \
526 unsigned int count) \
528 const volatile type *__addr = addr; \
531 __mem_out##bwlq(*__addr, port); \
536 static inline void ins##bwlq(unsigned long port, void *addr, \
537 unsigned int count) \
539 volatile type *__addr = addr; \
542 *__addr = __mem_in##bwlq(port); \
547 #define BUILDSTRING(bwlq, type) \
549 __BUILD_MEMORY_STRING(bwlq, type) \
550 __BUILD_IOPORT_STRING(bwlq, type)
559 static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
561 memset((void __force *) addr, val, count);
563 static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
565 memcpy(dst, (void __force *) src, count);
567 static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
569 memcpy((void __force *) dst, src, count);
573 * The caches on some architectures aren't dma-coherent and have need to
574 * handle this in software. There are three types of operations that
575 * can be applied to dma buffers.
577 * - dma_cache_wback_inv(start, size) makes caches and coherent by
578 * writing the content of the caches back to memory, if necessary.
579 * The function also invalidates the affected part of the caches as
580 * necessary before DMA transfers from outside to memory.
581 * - dma_cache_wback(start, size) makes caches and coherent by
582 * writing the content of the caches back to memory, if necessary.
583 * The function also invalidates the affected part of the caches as
584 * necessary before DMA transfers from outside to memory.
585 * - dma_cache_inv(start, size) invalidates the affected parts of the
586 * caches. Dirty lines of the caches may be written back or simply
587 * be discarded. This operation is necessary before dma operations
590 * This API used to be exported; it now is for arch code internal use only.
592 #ifdef CONFIG_DMA_NONCOHERENT
594 extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
595 extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
596 extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
598 #define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size)
599 #define dma_cache_wback(start, size) _dma_cache_wback(start, size)
600 #define dma_cache_inv(start, size) _dma_cache_inv(start, size)
602 #else /* Sane hardware */
604 #define dma_cache_wback_inv(start,size) \
605 do { (void) (start); (void) (size); } while (0)
606 #define dma_cache_wback(start,size) \
607 do { (void) (start); (void) (size); } while (0)
608 #define dma_cache_inv(start,size) \
609 do { (void) (start); (void) (size); } while (0)
611 #endif /* CONFIG_DMA_NONCOHERENT */
614 * Read a 32-bit register that requires a 64-bit read cycle on the bus.
615 * Avoid interrupt mucking, just adjust the address for 4-byte access.
616 * Assume the addresses are 8-byte aligned.
619 #define __CSR_32_ADJUST 4
621 #define __CSR_32_ADJUST 0
624 #define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))
625 #define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))
628 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
631 #define xlate_dev_mem_ptr(p) __va(p)
634 * Convert a virtual cached pointer to an uncached pointer
636 #define xlate_dev_kmem_ptr(p) p
638 void __ioread64_copy(void *to, const void __iomem *from, size_t count);
640 #endif /* _ASM_IO_H */