dma-mapping: treat dev->bus_dma_mask as a DMA limit
[sfrench/cifs-2.6.git] / arch / mips / loongson64 / common / mem.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  */
4 #include <linux/fs.h>
5 #include <linux/fcntl.h>
6 #include <linux/memblock.h>
7 #include <linux/mm.h>
8
9 #include <asm/bootinfo.h>
10
11 #include <loongson.h>
12 #include <boot_param.h>
13 #include <mem.h>
14 #include <pci.h>
15
16 #ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
17
18 u32 memsize, highmemsize;
19
20 void __init prom_init_memory(void)
21 {
22         add_memory_region(0x0, (memsize << 20), BOOT_MEM_RAM);
23
24         add_memory_region(memsize << 20, LOONGSON_PCI_MEM_START - (memsize <<
25                                 20), BOOT_MEM_RESERVED);
26
27 #ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
28         {
29                 int bit;
30
31                 bit = fls(memsize + highmemsize);
32                 if (bit != ffs(memsize + highmemsize))
33                         bit += 20;
34                 else
35                         bit = bit + 20 - 1;
36
37                 /* set cpu window3 to map CPU to DDR: 2G -> 2G */
38                 LOONGSON_ADDRWIN_CPUTODDR(ADDRWIN_WIN3, 0x80000000ul,
39                                           0x80000000ul, (1 << bit));
40                 mmiowb();
41         }
42 #endif /* !CONFIG_CPU_SUPPORTS_ADDRWINCFG */
43
44 #ifdef CONFIG_64BIT
45         if (highmemsize > 0)
46                 add_memory_region(LOONGSON_HIGHMEM_START,
47                                   highmemsize << 20, BOOT_MEM_RAM);
48
49         add_memory_region(LOONGSON_PCI_MEM_END + 1, LOONGSON_HIGHMEM_START -
50                           LOONGSON_PCI_MEM_END - 1, BOOT_MEM_RESERVED);
51
52 #endif /* !CONFIG_64BIT */
53 }
54
55 #else /* CONFIG_LEFI_FIRMWARE_INTERFACE */
56
57 void __init prom_init_memory(void)
58 {
59         int i;
60         u32 node_id;
61         u32 mem_type;
62
63         /* parse memory information */
64         for (i = 0; i < loongson_memmap->nr_map; i++) {
65                 node_id = loongson_memmap->map[i].node_id;
66                 mem_type = loongson_memmap->map[i].mem_type;
67
68                 if (node_id != 0)
69                         continue;
70
71                 switch (mem_type) {
72                 case SYSTEM_RAM_LOW:
73                         memblock_add(loongson_memmap->map[i].mem_start,
74                                 (u64)loongson_memmap->map[i].mem_size << 20);
75                         break;
76                 case SYSTEM_RAM_HIGH:
77                         memblock_add(loongson_memmap->map[i].mem_start,
78                                 (u64)loongson_memmap->map[i].mem_size << 20);
79                         break;
80                 case SYSTEM_RAM_RESERVED:
81                         memblock_reserve(loongson_memmap->map[i].mem_start,
82                                 (u64)loongson_memmap->map[i].mem_size << 20);
83                         break;
84                 }
85         }
86 }
87
88 #endif /* CONFIG_LEFI_FIRMWARE_INTERFACE */
89
90 /* override of arch/mips/mm/cache.c: __uncached_access */
91 int __uncached_access(struct file *file, unsigned long addr)
92 {
93         if (file->f_flags & O_DSYNC)
94                 return 1;
95
96         return addr >= __pa(high_memory) ||
97                 ((addr >= LOONGSON_MMIO_MEM_START) &&
98                  (addr < LOONGSON_MMIO_MEM_END));
99 }
100
101 #ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
102
103 #include <linux/pci.h>
104 #include <linux/sched.h>
105 #include <asm/current.h>
106
107 static unsigned long uca_start, uca_end;
108
109 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
110                               unsigned long size, pgprot_t vma_prot)
111 {
112         unsigned long offset = pfn << PAGE_SHIFT;
113         unsigned long end = offset + size;
114
115         if (__uncached_access(file, offset)) {
116                 if (uca_start && (offset >= uca_start) &&
117                     (end <= uca_end))
118                         return __pgprot((pgprot_val(vma_prot) &
119                                          ~_CACHE_MASK) |
120                                         _CACHE_UNCACHED_ACCELERATED);
121                 else
122                         return pgprot_noncached(vma_prot);
123         }
124         return vma_prot;
125 }
126
127 static int __init find_vga_mem_init(void)
128 {
129         struct pci_dev *dev = 0;
130         struct resource *r;
131         int idx;
132
133         if (uca_start)
134                 return 0;
135
136         for_each_pci_dev(dev) {
137                 if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
138                         for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) {
139                                 r = &dev->resource[idx];
140                                 if (!r->start && r->end)
141                                         continue;
142                                 if (r->flags & IORESOURCE_IO)
143                                         continue;
144                                 if (r->flags & IORESOURCE_MEM) {
145                                         uca_start = r->start;
146                                         uca_end = r->end;
147                                         return 0;
148                                 }
149                         }
150                 }
151         }
152
153         return 0;
154 }
155
156 late_initcall(find_vga_mem_init);
157 #endif /* !CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED */