d9caa811a2fa7fed3a37db3e82a68d096a87ea75
[sfrench/cifs-2.6.git] / arch / mips / include / asm / io.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994, 1995 Waldorf GmbH
7  * Copyright (C) 1994 - 2000, 06 Ralf Baechle
8  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9  * Copyright (C) 2004, 2005  MIPS Technologies, Inc.  All rights reserved.
10  *      Author: Maciej W. Rozycki <macro@mips.com>
11  */
12 #ifndef _ASM_IO_H
13 #define _ASM_IO_H
14
15 #define ARCH_HAS_IOREMAP_WC
16
17 #include <linux/compiler.h>
18 #include <linux/kernel.h>
19 #include <linux/types.h>
20 #include <linux/irqflags.h>
21
22 #include <asm/addrspace.h>
23 #include <asm/barrier.h>
24 #include <asm/bug.h>
25 #include <asm/byteorder.h>
26 #include <asm/cpu.h>
27 #include <asm/cpu-features.h>
28 #include <asm-generic/iomap.h>
29 #include <asm/page.h>
30 #include <asm/pgtable-bits.h>
31 #include <asm/processor.h>
32 #include <asm/string.h>
33
34 #include <ioremap.h>
35 #include <mangle-port.h>
36
37 /*
38  * Raw operations are never swapped in software.  OTOH values that raw
39  * operations are working on may or may not have been swapped by the bus
40  * hardware.  An example use would be for flash memory that's used for
41  * execute in place.
42  */
43 # define __raw_ioswabb(a, x)    (x)
44 # define __raw_ioswabw(a, x)    (x)
45 # define __raw_ioswabl(a, x)    (x)
46 # define __raw_ioswabq(a, x)    (x)
47 # define ____raw_ioswabq(a, x)  (x)
48
49 # define __relaxed_ioswabb ioswabb
50 # define __relaxed_ioswabw ioswabw
51 # define __relaxed_ioswabl ioswabl
52 # define __relaxed_ioswabq ioswabq
53
54 /* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */
55
56 #define IO_SPACE_LIMIT 0xffff
57
58 /*
59  * On MIPS I/O ports are memory mapped, so we access them using normal
60  * load/store instructions. mips_io_port_base is the virtual address to
61  * which all ports are being mapped.  For sake of efficiency some code
62  * assumes that this is an address that can be loaded with a single lui
63  * instruction, so the lower 16 bits must be zero.  Should be true on
64  * on any sane architecture; generic code does not use this assumption.
65  */
66 extern unsigned long mips_io_port_base;
67
68 static inline void set_io_port_base(unsigned long base)
69 {
70         mips_io_port_base = base;
71 }
72
73 /*
74  * Provide the necessary definitions for generic iomap. We make use of
75  * mips_io_port_base for iomap(), but we don't reserve any low addresses for
76  * use with I/O ports.
77  */
78
79 #define HAVE_ARCH_PIO_SIZE
80 #define PIO_OFFSET      mips_io_port_base
81 #define PIO_MASK        IO_SPACE_LIMIT
82 #define PIO_RESERVED    0x0UL
83
84 /*
85  * Enforce in-order execution of data I/O.  In the MIPS architecture
86  * these are equivalent to corresponding platform-specific memory
87  * barriers defined in <asm/barrier.h>.  API pinched from PowerPC,
88  * with sync additionally defined.
89  */
90 #define iobarrier_rw() mb()
91 #define iobarrier_r() rmb()
92 #define iobarrier_w() wmb()
93 #define iobarrier_sync() iob()
94
95 /*
96  *     virt_to_phys    -       map virtual addresses to physical
97  *     @address: address to remap
98  *
99  *     The returned physical address is the physical (CPU) mapping for
100  *     the memory address given. It is only valid to use this function on
101  *     addresses directly mapped or allocated via kmalloc.
102  *
103  *     This function does not give bus mappings for DMA transfers. In
104  *     almost all conceivable cases a device driver should not be using
105  *     this function
106  */
107 static inline unsigned long virt_to_phys(volatile const void *address)
108 {
109         return __pa(address);
110 }
111
112 /*
113  *     phys_to_virt    -       map physical address to virtual
114  *     @address: address to remap
115  *
116  *     The returned virtual address is a current CPU mapping for
117  *     the memory address given. It is only valid to use this function on
118  *     addresses that have a kernel mapping
119  *
120  *     This function does not handle bus mappings for DMA transfers. In
121  *     almost all conceivable cases a device driver should not be using
122  *     this function
123  */
124 static inline void * phys_to_virt(unsigned long address)
125 {
126         return (void *)(address + PAGE_OFFSET - PHYS_OFFSET);
127 }
128
129 /*
130  * ISA I/O bus memory addresses are 1:1 with the physical address.
131  */
132 static inline unsigned long isa_virt_to_bus(volatile void *address)
133 {
134         return virt_to_phys(address);
135 }
136
137 static inline void *isa_bus_to_virt(unsigned long address)
138 {
139         return phys_to_virt(address);
140 }
141
142 /*
143  * However PCI ones are not necessarily 1:1 and therefore these interfaces
144  * are forbidden in portable PCI drivers.
145  *
146  * Allow them for x86 for legacy drivers, though.
147  */
148 #define virt_to_bus virt_to_phys
149 #define bus_to_virt phys_to_virt
150
151 /*
152  * Change "struct page" to physical address.
153  */
154 #define page_to_phys(page)      ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
155
156 extern void __iomem * __ioremap(phys_addr_t offset, phys_addr_t size, unsigned long flags);
157 extern void __iounmap(const volatile void __iomem *addr);
158
159 static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long size,
160         unsigned long flags)
161 {
162         void __iomem *addr = plat_ioremap(offset, size, flags);
163
164         if (addr)
165                 return addr;
166
167 #define __IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
168
169         if (cpu_has_64bit_addresses) {
170                 u64 base = UNCAC_BASE;
171
172                 /*
173                  * R10000 supports a 2 bit uncached attribute therefore
174                  * UNCAC_BASE may not equal IO_BASE.
175                  */
176                 if (flags == _CACHE_UNCACHED)
177                         base = (u64) IO_BASE;
178                 return (void __iomem *) (unsigned long) (base + offset);
179         } else if (__builtin_constant_p(offset) &&
180                    __builtin_constant_p(size) && __builtin_constant_p(flags)) {
181                 phys_addr_t phys_addr, last_addr;
182
183                 phys_addr = fixup_bigphys_addr(offset, size);
184
185                 /* Don't allow wraparound or zero size. */
186                 last_addr = phys_addr + size - 1;
187                 if (!size || last_addr < phys_addr)
188                         return NULL;
189
190                 /*
191                  * Map uncached objects in the low 512MB of address
192                  * space using KSEG1.
193                  */
194                 if (__IS_LOW512(phys_addr) && __IS_LOW512(last_addr) &&
195                     flags == _CACHE_UNCACHED)
196                         return (void __iomem *)
197                                 (unsigned long)CKSEG1ADDR(phys_addr);
198         }
199
200         return __ioremap(offset, size, flags);
201
202 #undef __IS_LOW512
203 }
204
205 /*
206  * ioremap_prot     -   map bus memory into CPU space
207  * @offset:    bus address of the memory
208  * @size:      size of the resource to map
209
210  * ioremap_prot gives the caller control over cache coherency attributes (CCA)
211  */
212 static inline void __iomem *ioremap_prot(phys_addr_t offset,
213                 unsigned long size, unsigned long prot_val) {
214         return __ioremap_mode(offset, size, prot_val & _CACHE_MASK);
215 }
216
217 /*
218  * ioremap     -   map bus memory into CPU space
219  * @offset:    bus address of the memory
220  * @size:      size of the resource to map
221  *
222  * ioremap performs a platform specific sequence of operations to
223  * make bus memory CPU accessible via the readb/readw/readl/writeb/
224  * writew/writel functions and the other mmio helpers. The returned
225  * address is not guaranteed to be usable directly as a virtual
226  * address.
227  */
228 #define ioremap(offset, size)                                           \
229         __ioremap_mode((offset), (size), _CACHE_UNCACHED)
230 #define ioremap_nocache         ioremap
231 #define ioremap_uc              ioremap
232
233 /*
234  * ioremap_cache -      map bus memory into CPU space
235  * @offset:         bus address of the memory
236  * @size:           size of the resource to map
237  *
238  * ioremap_cache performs a platform specific sequence of operations to
239  * make bus memory CPU accessible via the readb/readw/readl/writeb/
240  * writew/writel functions and the other mmio helpers. The returned
241  * address is not guaranteed to be usable directly as a virtual
242  * address.
243  *
244  * This version of ioremap ensures that the memory is marked cachable by
245  * the CPU.  Also enables full write-combining.  Useful for some
246  * memory-like regions on I/O busses.
247  */
248 #define ioremap_cache(offset, size)                                     \
249         __ioremap_mode((offset), (size), _page_cachable_default)
250
251 /*
252  * ioremap_wc     -   map bus memory into CPU space
253  * @offset:    bus address of the memory
254  * @size:      size of the resource to map
255  *
256  * ioremap_wc performs a platform specific sequence of operations to
257  * make bus memory CPU accessible via the readb/readw/readl/writeb/
258  * writew/writel functions and the other mmio helpers. The returned
259  * address is not guaranteed to be usable directly as a virtual
260  * address.
261  *
262  * This version of ioremap ensures that the memory is marked uncachable
263  * but accelerated by means of write-combining feature. It is specifically
264  * useful for PCIe prefetchable windows, which may vastly improve a
265  * communications performance. If it was determined on boot stage, what
266  * CPU CCA doesn't support UCA, the method shall fall-back to the
267  * _CACHE_UNCACHED option (see cpu_probe() method).
268  */
269 #define ioremap_wc(offset, size)                                        \
270         __ioremap_mode((offset), (size), boot_cpu_data.writecombine)
271
272 static inline void iounmap(const volatile void __iomem *addr)
273 {
274         if (plat_iounmap(addr))
275                 return;
276
277 #define __IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
278
279         if (cpu_has_64bit_addresses ||
280             (__builtin_constant_p(addr) && __IS_KSEG1(addr)))
281                 return;
282
283         __iounmap(addr);
284
285 #undef __IS_KSEG1
286 }
287
288 #if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_CPU_LOONGSON64)
289 #define war_io_reorder_wmb()            wmb()
290 #else
291 #define war_io_reorder_wmb()            barrier()
292 #endif
293
294 #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, barrier, relax, irq)     \
295                                                                         \
296 static inline void pfx##write##bwlq(type val,                           \
297                                     volatile void __iomem *mem)         \
298 {                                                                       \
299         volatile type *__mem;                                           \
300         type __val;                                                     \
301                                                                         \
302         if (barrier)                                                    \
303                 iobarrier_rw();                                         \
304         else                                                            \
305                 war_io_reorder_wmb();                                   \
306                                                                         \
307         __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem));    \
308                                                                         \
309         __val = pfx##ioswab##bwlq(__mem, val);                          \
310                                                                         \
311         if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
312                 *__mem = __val;                                         \
313         else if (cpu_has_64bits) {                                      \
314                 unsigned long __flags;                                  \
315                 type __tmp;                                             \
316                                                                         \
317                 if (irq)                                                \
318                         local_irq_save(__flags);                        \
319                 __asm__ __volatile__(                                   \
320                         ".set   push"           "\t\t# __writeq""\n\t"  \
321                         ".set   arch=r4000"                     "\n\t"  \
322                         "dsll32 %L0, %L0, 0"                    "\n\t"  \
323                         "dsrl32 %L0, %L0, 0"                    "\n\t"  \
324                         "dsll32 %M0, %M0, 0"                    "\n\t"  \
325                         "or     %L0, %L0, %M0"                  "\n\t"  \
326                         "sd     %L0, %2"                        "\n\t"  \
327                         ".set   pop"                            "\n"    \
328                         : "=r" (__tmp)                                  \
329                         : "0" (__val), "m" (*__mem));                   \
330                 if (irq)                                                \
331                         local_irq_restore(__flags);                     \
332         } else                                                          \
333                 BUG();                                                  \
334 }                                                                       \
335                                                                         \
336 static inline type pfx##read##bwlq(const volatile void __iomem *mem)    \
337 {                                                                       \
338         volatile type *__mem;                                           \
339         type __val;                                                     \
340                                                                         \
341         __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem));    \
342                                                                         \
343         if (barrier)                                                    \
344                 iobarrier_rw();                                         \
345                                                                         \
346         if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
347                 __val = *__mem;                                         \
348         else if (cpu_has_64bits) {                                      \
349                 unsigned long __flags;                                  \
350                                                                         \
351                 if (irq)                                                \
352                         local_irq_save(__flags);                        \
353                 __asm__ __volatile__(                                   \
354                         ".set   push"           "\t\t# __readq" "\n\t"  \
355                         ".set   arch=r4000"                     "\n\t"  \
356                         "ld     %L0, %1"                        "\n\t"  \
357                         "dsra32 %M0, %L0, 0"                    "\n\t"  \
358                         "sll    %L0, %L0, 0"                    "\n\t"  \
359                         ".set   pop"                            "\n"    \
360                         : "=r" (__val)                                  \
361                         : "m" (*__mem));                                \
362                 if (irq)                                                \
363                         local_irq_restore(__flags);                     \
364         } else {                                                        \
365                 __val = 0;                                              \
366                 BUG();                                                  \
367         }                                                               \
368                                                                         \
369         /* prevent prefetching of coherent DMA data prematurely */      \
370         if (!relax)                                                     \
371                 rmb();                                                  \
372         return pfx##ioswab##bwlq(__mem, __val);                         \
373 }
374
375 #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, barrier, relax, p)       \
376                                                                         \
377 static inline void pfx##out##bwlq##p(type val, unsigned long port)      \
378 {                                                                       \
379         volatile type *__addr;                                          \
380         type __val;                                                     \
381                                                                         \
382         if (barrier)                                                    \
383                 iobarrier_rw();                                         \
384         else                                                            \
385                 war_io_reorder_wmb();                                   \
386                                                                         \
387         __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
388                                                                         \
389         __val = pfx##ioswab##bwlq(__addr, val);                         \
390                                                                         \
391         /* Really, we want this to be atomic */                         \
392         BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long));             \
393                                                                         \
394         *__addr = __val;                                                \
395 }                                                                       \
396                                                                         \
397 static inline type pfx##in##bwlq##p(unsigned long port)                 \
398 {                                                                       \
399         volatile type *__addr;                                          \
400         type __val;                                                     \
401                                                                         \
402         __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
403                                                                         \
404         BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long));             \
405                                                                         \
406         if (barrier)                                                    \
407                 iobarrier_rw();                                         \
408                                                                         \
409         __val = *__addr;                                                \
410                                                                         \
411         /* prevent prefetching of coherent DMA data prematurely */      \
412         if (!relax)                                                     \
413                 rmb();                                                  \
414         return pfx##ioswab##bwlq(__addr, __val);                        \
415 }
416
417 #define __BUILD_MEMORY_PFX(bus, bwlq, type, relax)                      \
418                                                                         \
419 __BUILD_MEMORY_SINGLE(bus, bwlq, type, 1, relax, 1)
420
421 #define BUILDIO_MEM(bwlq, type)                                         \
422                                                                         \
423 __BUILD_MEMORY_PFX(__raw_, bwlq, type, 0)                               \
424 __BUILD_MEMORY_PFX(__relaxed_, bwlq, type, 1)                           \
425 __BUILD_MEMORY_PFX(__mem_, bwlq, type, 0)                               \
426 __BUILD_MEMORY_PFX(, bwlq, type, 0)
427
428 BUILDIO_MEM(b, u8)
429 BUILDIO_MEM(w, u16)
430 BUILDIO_MEM(l, u32)
431 #ifdef CONFIG_64BIT
432 BUILDIO_MEM(q, u64)
433 #else
434 __BUILD_MEMORY_PFX(__raw_, q, u64, 0)
435 __BUILD_MEMORY_PFX(__mem_, q, u64, 0)
436 #endif
437
438 #define __BUILD_IOPORT_PFX(bus, bwlq, type)                             \
439         __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0,)                   \
440         __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0, _p)
441
442 #define BUILDIO_IOPORT(bwlq, type)                                      \
443         __BUILD_IOPORT_PFX(, bwlq, type)                                \
444         __BUILD_IOPORT_PFX(__mem_, bwlq, type)
445
446 BUILDIO_IOPORT(b, u8)
447 BUILDIO_IOPORT(w, u16)
448 BUILDIO_IOPORT(l, u32)
449 #ifdef CONFIG_64BIT
450 BUILDIO_IOPORT(q, u64)
451 #endif
452
453 #define __BUILDIO(bwlq, type)                                           \
454                                                                         \
455 __BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 1, 0, 0)
456
457 __BUILDIO(q, u64)
458
459 #define readb_relaxed                   __relaxed_readb
460 #define readw_relaxed                   __relaxed_readw
461 #define readl_relaxed                   __relaxed_readl
462 #ifdef CONFIG_64BIT
463 #define readq_relaxed                   __relaxed_readq
464 #endif
465
466 #define writeb_relaxed                  __relaxed_writeb
467 #define writew_relaxed                  __relaxed_writew
468 #define writel_relaxed                  __relaxed_writel
469 #ifdef CONFIG_64BIT
470 #define writeq_relaxed                  __relaxed_writeq
471 #endif
472
473 #define readb_be(addr)                                                  \
474         __raw_readb((__force unsigned *)(addr))
475 #define readw_be(addr)                                                  \
476         be16_to_cpu(__raw_readw((__force unsigned *)(addr)))
477 #define readl_be(addr)                                                  \
478         be32_to_cpu(__raw_readl((__force unsigned *)(addr)))
479 #define readq_be(addr)                                                  \
480         be64_to_cpu(__raw_readq((__force unsigned *)(addr)))
481
482 #define writeb_be(val, addr)                                            \
483         __raw_writeb((val), (__force unsigned *)(addr))
484 #define writew_be(val, addr)                                            \
485         __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr))
486 #define writel_be(val, addr)                                            \
487         __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr))
488 #define writeq_be(val, addr)                                            \
489         __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr))
490
491 /*
492  * Some code tests for these symbols
493  */
494 #ifdef CONFIG_64BIT
495 #define readq                           readq
496 #define writeq                          writeq
497 #endif
498
499 #define __BUILD_MEMORY_STRING(bwlq, type)                               \
500                                                                         \
501 static inline void writes##bwlq(volatile void __iomem *mem,             \
502                                 const void *addr, unsigned int count)   \
503 {                                                                       \
504         const volatile type *__addr = addr;                             \
505                                                                         \
506         while (count--) {                                               \
507                 __mem_write##bwlq(*__addr, mem);                        \
508                 __addr++;                                               \
509         }                                                               \
510 }                                                                       \
511                                                                         \
512 static inline void reads##bwlq(volatile void __iomem *mem, void *addr,  \
513                                unsigned int count)                      \
514 {                                                                       \
515         volatile type *__addr = addr;                                   \
516                                                                         \
517         while (count--) {                                               \
518                 *__addr = __mem_read##bwlq(mem);                        \
519                 __addr++;                                               \
520         }                                                               \
521 }
522
523 #define __BUILD_IOPORT_STRING(bwlq, type)                               \
524                                                                         \
525 static inline void outs##bwlq(unsigned long port, const void *addr,     \
526                               unsigned int count)                       \
527 {                                                                       \
528         const volatile type *__addr = addr;                             \
529                                                                         \
530         while (count--) {                                               \
531                 __mem_out##bwlq(*__addr, port);                         \
532                 __addr++;                                               \
533         }                                                               \
534 }                                                                       \
535                                                                         \
536 static inline void ins##bwlq(unsigned long port, void *addr,            \
537                              unsigned int count)                        \
538 {                                                                       \
539         volatile type *__addr = addr;                                   \
540                                                                         \
541         while (count--) {                                               \
542                 *__addr = __mem_in##bwlq(port);                         \
543                 __addr++;                                               \
544         }                                                               \
545 }
546
547 #define BUILDSTRING(bwlq, type)                                         \
548                                                                         \
549 __BUILD_MEMORY_STRING(bwlq, type)                                       \
550 __BUILD_IOPORT_STRING(bwlq, type)
551
552 BUILDSTRING(b, u8)
553 BUILDSTRING(w, u16)
554 BUILDSTRING(l, u32)
555 #ifdef CONFIG_64BIT
556 BUILDSTRING(q, u64)
557 #endif
558
559 static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
560 {
561         memset((void __force *) addr, val, count);
562 }
563 static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
564 {
565         memcpy(dst, (void __force *) src, count);
566 }
567 static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
568 {
569         memcpy((void __force *) dst, src, count);
570 }
571
572 /*
573  * The caches on some architectures aren't dma-coherent and have need to
574  * handle this in software.  There are three types of operations that
575  * can be applied to dma buffers.
576  *
577  *  - dma_cache_wback_inv(start, size) makes caches and coherent by
578  *    writing the content of the caches back to memory, if necessary.
579  *    The function also invalidates the affected part of the caches as
580  *    necessary before DMA transfers from outside to memory.
581  *  - dma_cache_wback(start, size) makes caches and coherent by
582  *    writing the content of the caches back to memory, if necessary.
583  *    The function also invalidates the affected part of the caches as
584  *    necessary before DMA transfers from outside to memory.
585  *  - dma_cache_inv(start, size) invalidates the affected parts of the
586  *    caches.  Dirty lines of the caches may be written back or simply
587  *    be discarded.  This operation is necessary before dma operations
588  *    to the memory.
589  *
590  * This API used to be exported; it now is for arch code internal use only.
591  */
592 #ifdef CONFIG_DMA_NONCOHERENT
593
594 extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
595 extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
596 extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
597
598 #define dma_cache_wback_inv(start, size)        _dma_cache_wback_inv(start, size)
599 #define dma_cache_wback(start, size)            _dma_cache_wback(start, size)
600 #define dma_cache_inv(start, size)              _dma_cache_inv(start, size)
601
602 #else /* Sane hardware */
603
604 #define dma_cache_wback_inv(start,size) \
605         do { (void) (start); (void) (size); } while (0)
606 #define dma_cache_wback(start,size)     \
607         do { (void) (start); (void) (size); } while (0)
608 #define dma_cache_inv(start,size)       \
609         do { (void) (start); (void) (size); } while (0)
610
611 #endif /* CONFIG_DMA_NONCOHERENT */
612
613 /*
614  * Read a 32-bit register that requires a 64-bit read cycle on the bus.
615  * Avoid interrupt mucking, just adjust the address for 4-byte access.
616  * Assume the addresses are 8-byte aligned.
617  */
618 #ifdef __MIPSEB__
619 #define __CSR_32_ADJUST 4
620 #else
621 #define __CSR_32_ADJUST 0
622 #endif
623
624 #define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))
625 #define csr_in32(a)    (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))
626
627 /*
628  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
629  * access
630  */
631 #define xlate_dev_mem_ptr(p)    __va(p)
632
633 /*
634  * Convert a virtual cached pointer to an uncached pointer
635  */
636 #define xlate_dev_kmem_ptr(p)   p
637
638 void __ioread64_copy(void *to, const void __iomem *from, size_t count);
639
640 #endif /* _ASM_IO_H */