sparc64: add the segment boundary checking to IOMMUs while merging SG entries
[sfrench/cifs-2.6.git] / arch / sparc64 / kernel / pci_sun4v.c
1 /* pci_sun4v.c: SUN4V specific PCI controller support.
2  *
3  * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
4  */
5
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <linux/pci.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/log2.h>
16
17 #include <asm/iommu.h>
18 #include <asm/irq.h>
19 #include <asm/upa.h>
20 #include <asm/pstate.h>
21 #include <asm/oplib.h>
22 #include <asm/hypervisor.h>
23 #include <asm/prom.h>
24
25 #include "pci_impl.h"
26 #include "iommu_common.h"
27
28 #include "pci_sun4v.h"
29
30 static unsigned long vpci_major = 1;
31 static unsigned long vpci_minor = 1;
32
33 #define PGLIST_NENTS    (PAGE_SIZE / sizeof(u64))
34
35 struct iommu_batch {
36         struct device   *dev;           /* Device mapping is for.       */
37         unsigned long   prot;           /* IOMMU page protections       */
38         unsigned long   entry;          /* Index into IOTSB.            */
39         u64             *pglist;        /* List of physical pages       */
40         unsigned long   npages;         /* Number of pages in list.     */
41 };
42
43 static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
44
45 /* Interrupts must be disabled.  */
46 static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
47 {
48         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
49
50         p->dev          = dev;
51         p->prot         = prot;
52         p->entry        = entry;
53         p->npages       = 0;
54 }
55
56 /* Interrupts must be disabled.  */
57 static long iommu_batch_flush(struct iommu_batch *p)
58 {
59         struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
60         unsigned long devhandle = pbm->devhandle;
61         unsigned long prot = p->prot;
62         unsigned long entry = p->entry;
63         u64 *pglist = p->pglist;
64         unsigned long npages = p->npages;
65
66         while (npages != 0) {
67                 long num;
68
69                 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
70                                           npages, prot, __pa(pglist));
71                 if (unlikely(num < 0)) {
72                         if (printk_ratelimit())
73                                 printk("iommu_batch_flush: IOMMU map of "
74                                        "[%08lx:%08lx:%lx:%lx:%lx] failed with "
75                                        "status %ld\n",
76                                        devhandle, HV_PCI_TSBID(0, entry),
77                                        npages, prot, __pa(pglist), num);
78                         return -1;
79                 }
80
81                 entry += num;
82                 npages -= num;
83                 pglist += num;
84         }
85
86         p->entry = entry;
87         p->npages = 0;
88
89         return 0;
90 }
91
92 static inline void iommu_batch_new_entry(unsigned long entry)
93 {
94         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
95
96         if (p->entry + p->npages == entry)
97                 return;
98         if (p->entry != ~0UL)
99                 iommu_batch_flush(p);
100         p->entry = entry;
101 }
102
103 /* Interrupts must be disabled.  */
104 static inline long iommu_batch_add(u64 phys_page)
105 {
106         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
107
108         BUG_ON(p->npages >= PGLIST_NENTS);
109
110         p->pglist[p->npages++] = phys_page;
111         if (p->npages == PGLIST_NENTS)
112                 return iommu_batch_flush(p);
113
114         return 0;
115 }
116
117 /* Interrupts must be disabled.  */
118 static inline long iommu_batch_end(void)
119 {
120         struct iommu_batch *p = &__get_cpu_var(iommu_batch);
121
122         BUG_ON(p->npages >= PGLIST_NENTS);
123
124         return iommu_batch_flush(p);
125 }
126
127 static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
128                                    dma_addr_t *dma_addrp, gfp_t gfp)
129 {
130         struct iommu *iommu;
131         unsigned long flags, order, first_page, npages, n;
132         void *ret;
133         long entry;
134
135         size = IO_PAGE_ALIGN(size);
136         order = get_order(size);
137         if (unlikely(order >= MAX_ORDER))
138                 return NULL;
139
140         npages = size >> IO_PAGE_SHIFT;
141
142         first_page = __get_free_pages(gfp, order);
143         if (unlikely(first_page == 0UL))
144                 return NULL;
145
146         memset((char *)first_page, 0, PAGE_SIZE << order);
147
148         iommu = dev->archdata.iommu;
149
150         spin_lock_irqsave(&iommu->lock, flags);
151         entry = iommu_range_alloc(dev, iommu, npages, NULL);
152         spin_unlock_irqrestore(&iommu->lock, flags);
153
154         if (unlikely(entry == DMA_ERROR_CODE))
155                 goto range_alloc_fail;
156
157         *dma_addrp = (iommu->page_table_map_base +
158                       (entry << IO_PAGE_SHIFT));
159         ret = (void *) first_page;
160         first_page = __pa(first_page);
161
162         local_irq_save(flags);
163
164         iommu_batch_start(dev,
165                           (HV_PCI_MAP_ATTR_READ |
166                            HV_PCI_MAP_ATTR_WRITE),
167                           entry);
168
169         for (n = 0; n < npages; n++) {
170                 long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
171                 if (unlikely(err < 0L))
172                         goto iommu_map_fail;
173         }
174
175         if (unlikely(iommu_batch_end() < 0L))
176                 goto iommu_map_fail;
177
178         local_irq_restore(flags);
179
180         return ret;
181
182 iommu_map_fail:
183         /* Interrupts are disabled.  */
184         spin_lock(&iommu->lock);
185         iommu_range_free(iommu, *dma_addrp, npages);
186         spin_unlock_irqrestore(&iommu->lock, flags);
187
188 range_alloc_fail:
189         free_pages(first_page, order);
190         return NULL;
191 }
192
193 static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
194                                  dma_addr_t dvma)
195 {
196         struct pci_pbm_info *pbm;
197         struct iommu *iommu;
198         unsigned long flags, order, npages, entry;
199         u32 devhandle;
200
201         npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
202         iommu = dev->archdata.iommu;
203         pbm = dev->archdata.host_controller;
204         devhandle = pbm->devhandle;
205         entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
206
207         spin_lock_irqsave(&iommu->lock, flags);
208
209         iommu_range_free(iommu, dvma, npages);
210
211         do {
212                 unsigned long num;
213
214                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
215                                             npages);
216                 entry += num;
217                 npages -= num;
218         } while (npages != 0);
219
220         spin_unlock_irqrestore(&iommu->lock, flags);
221
222         order = get_order(size);
223         if (order < 10)
224                 free_pages((unsigned long)cpu, order);
225 }
226
227 static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
228                                     enum dma_data_direction direction)
229 {
230         struct iommu *iommu;
231         unsigned long flags, npages, oaddr;
232         unsigned long i, base_paddr;
233         u32 bus_addr, ret;
234         unsigned long prot;
235         long entry;
236
237         iommu = dev->archdata.iommu;
238
239         if (unlikely(direction == DMA_NONE))
240                 goto bad;
241
242         oaddr = (unsigned long)ptr;
243         npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
244         npages >>= IO_PAGE_SHIFT;
245
246         spin_lock_irqsave(&iommu->lock, flags);
247         entry = iommu_range_alloc(dev, iommu, npages, NULL);
248         spin_unlock_irqrestore(&iommu->lock, flags);
249
250         if (unlikely(entry == DMA_ERROR_CODE))
251                 goto bad;
252
253         bus_addr = (iommu->page_table_map_base +
254                     (entry << IO_PAGE_SHIFT));
255         ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
256         base_paddr = __pa(oaddr & IO_PAGE_MASK);
257         prot = HV_PCI_MAP_ATTR_READ;
258         if (direction != DMA_TO_DEVICE)
259                 prot |= HV_PCI_MAP_ATTR_WRITE;
260
261         local_irq_save(flags);
262
263         iommu_batch_start(dev, prot, entry);
264
265         for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
266                 long err = iommu_batch_add(base_paddr);
267                 if (unlikely(err < 0L))
268                         goto iommu_map_fail;
269         }
270         if (unlikely(iommu_batch_end() < 0L))
271                 goto iommu_map_fail;
272
273         local_irq_restore(flags);
274
275         return ret;
276
277 bad:
278         if (printk_ratelimit())
279                 WARN_ON(1);
280         return DMA_ERROR_CODE;
281
282 iommu_map_fail:
283         /* Interrupts are disabled.  */
284         spin_lock(&iommu->lock);
285         iommu_range_free(iommu, bus_addr, npages);
286         spin_unlock_irqrestore(&iommu->lock, flags);
287
288         return DMA_ERROR_CODE;
289 }
290
291 static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
292                                 size_t sz, enum dma_data_direction direction)
293 {
294         struct pci_pbm_info *pbm;
295         struct iommu *iommu;
296         unsigned long flags, npages;
297         long entry;
298         u32 devhandle;
299
300         if (unlikely(direction == DMA_NONE)) {
301                 if (printk_ratelimit())
302                         WARN_ON(1);
303                 return;
304         }
305
306         iommu = dev->archdata.iommu;
307         pbm = dev->archdata.host_controller;
308         devhandle = pbm->devhandle;
309
310         npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
311         npages >>= IO_PAGE_SHIFT;
312         bus_addr &= IO_PAGE_MASK;
313
314         spin_lock_irqsave(&iommu->lock, flags);
315
316         iommu_range_free(iommu, bus_addr, npages);
317
318         entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
319         do {
320                 unsigned long num;
321
322                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
323                                             npages);
324                 entry += num;
325                 npages -= num;
326         } while (npages != 0);
327
328         spin_unlock_irqrestore(&iommu->lock, flags);
329 }
330
331 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
332                          int nelems, enum dma_data_direction direction)
333 {
334         struct scatterlist *s, *outs, *segstart;
335         unsigned long flags, handle, prot;
336         dma_addr_t dma_next = 0, dma_addr;
337         unsigned int max_seg_size;
338         unsigned long seg_boundary_size;
339         int outcount, incount, i;
340         struct iommu *iommu;
341         unsigned long base_shift;
342         long err;
343
344         BUG_ON(direction == DMA_NONE);
345
346         iommu = dev->archdata.iommu;
347         if (nelems == 0 || !iommu)
348                 return 0;
349         
350         prot = HV_PCI_MAP_ATTR_READ;
351         if (direction != DMA_TO_DEVICE)
352                 prot |= HV_PCI_MAP_ATTR_WRITE;
353
354         outs = s = segstart = &sglist[0];
355         outcount = 1;
356         incount = nelems;
357         handle = 0;
358
359         /* Init first segment length for backout at failure */
360         outs->dma_length = 0;
361
362         spin_lock_irqsave(&iommu->lock, flags);
363
364         iommu_batch_start(dev, prot, ~0UL);
365
366         max_seg_size = dma_get_max_seg_size(dev);
367         seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
368                                   IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
369         base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
370         for_each_sg(sglist, s, nelems, i) {
371                 unsigned long paddr, npages, entry, out_entry = 0, slen;
372
373                 slen = s->length;
374                 /* Sanity check */
375                 if (slen == 0) {
376                         dma_next = 0;
377                         continue;
378                 }
379                 /* Allocate iommu entries for that segment */
380                 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
381                 npages = iommu_num_pages(paddr, slen);
382                 entry = iommu_range_alloc(dev, iommu, npages, &handle);
383
384                 /* Handle failure */
385                 if (unlikely(entry == DMA_ERROR_CODE)) {
386                         if (printk_ratelimit())
387                                 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
388                                        " npages %lx\n", iommu, paddr, npages);
389                         goto iommu_map_failed;
390                 }
391
392                 iommu_batch_new_entry(entry);
393
394                 /* Convert entry to a dma_addr_t */
395                 dma_addr = iommu->page_table_map_base +
396                         (entry << IO_PAGE_SHIFT);
397                 dma_addr |= (s->offset & ~IO_PAGE_MASK);
398
399                 /* Insert into HW table */
400                 paddr &= IO_PAGE_MASK;
401                 while (npages--) {
402                         err = iommu_batch_add(paddr);
403                         if (unlikely(err < 0L))
404                                 goto iommu_map_failed;
405                         paddr += IO_PAGE_SIZE;
406                 }
407
408                 /* If we are in an open segment, try merging */
409                 if (segstart != s) {
410                         /* We cannot merge if:
411                          * - allocated dma_addr isn't contiguous to previous allocation
412                          */
413                         if ((dma_addr != dma_next) ||
414                             (outs->dma_length + s->length > max_seg_size) ||
415                             (is_span_boundary(out_entry, base_shift,
416                                               seg_boundary_size, outs, s))) {
417                                 /* Can't merge: create a new segment */
418                                 segstart = s;
419                                 outcount++;
420                                 outs = sg_next(outs);
421                         } else {
422                                 outs->dma_length += s->length;
423                         }
424                 }
425
426                 if (segstart == s) {
427                         /* This is a new segment, fill entries */
428                         outs->dma_address = dma_addr;
429                         outs->dma_length = slen;
430                         out_entry = entry;
431                 }
432
433                 /* Calculate next page pointer for contiguous check */
434                 dma_next = dma_addr + slen;
435         }
436
437         err = iommu_batch_end();
438
439         if (unlikely(err < 0L))
440                 goto iommu_map_failed;
441
442         spin_unlock_irqrestore(&iommu->lock, flags);
443
444         if (outcount < incount) {
445                 outs = sg_next(outs);
446                 outs->dma_address = DMA_ERROR_CODE;
447                 outs->dma_length = 0;
448         }
449
450         return outcount;
451
452 iommu_map_failed:
453         for_each_sg(sglist, s, nelems, i) {
454                 if (s->dma_length != 0) {
455                         unsigned long vaddr, npages;
456
457                         vaddr = s->dma_address & IO_PAGE_MASK;
458                         npages = iommu_num_pages(s->dma_address, s->dma_length);
459                         iommu_range_free(iommu, vaddr, npages);
460                         /* XXX demap? XXX */
461                         s->dma_address = DMA_ERROR_CODE;
462                         s->dma_length = 0;
463                 }
464                 if (s == outs)
465                         break;
466         }
467         spin_unlock_irqrestore(&iommu->lock, flags);
468
469         return 0;
470 }
471
472 static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
473                             int nelems, enum dma_data_direction direction)
474 {
475         struct pci_pbm_info *pbm;
476         struct scatterlist *sg;
477         struct iommu *iommu;
478         unsigned long flags;
479         u32 devhandle;
480
481         BUG_ON(direction == DMA_NONE);
482
483         iommu = dev->archdata.iommu;
484         pbm = dev->archdata.host_controller;
485         devhandle = pbm->devhandle;
486         
487         spin_lock_irqsave(&iommu->lock, flags);
488
489         sg = sglist;
490         while (nelems--) {
491                 dma_addr_t dma_handle = sg->dma_address;
492                 unsigned int len = sg->dma_length;
493                 unsigned long npages, entry;
494
495                 if (!len)
496                         break;
497                 npages = iommu_num_pages(dma_handle, len);
498                 iommu_range_free(iommu, dma_handle, npages);
499
500                 entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
501                 while (npages) {
502                         unsigned long num;
503
504                         num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
505                                                     npages);
506                         entry += num;
507                         npages -= num;
508                 }
509
510                 sg = sg_next(sg);
511         }
512
513         spin_unlock_irqrestore(&iommu->lock, flags);
514 }
515
516 static void dma_4v_sync_single_for_cpu(struct device *dev,
517                                        dma_addr_t bus_addr, size_t sz,
518                                        enum dma_data_direction direction)
519 {
520         /* Nothing to do... */
521 }
522
523 static void dma_4v_sync_sg_for_cpu(struct device *dev,
524                                    struct scatterlist *sglist, int nelems,
525                                    enum dma_data_direction direction)
526 {
527         /* Nothing to do... */
528 }
529
530 const struct dma_ops sun4v_dma_ops = {
531         .alloc_coherent                 = dma_4v_alloc_coherent,
532         .free_coherent                  = dma_4v_free_coherent,
533         .map_single                     = dma_4v_map_single,
534         .unmap_single                   = dma_4v_unmap_single,
535         .map_sg                         = dma_4v_map_sg,
536         .unmap_sg                       = dma_4v_unmap_sg,
537         .sync_single_for_cpu            = dma_4v_sync_single_for_cpu,
538         .sync_sg_for_cpu                = dma_4v_sync_sg_for_cpu,
539 };
540
541 static void __init pci_sun4v_scan_bus(struct pci_pbm_info *pbm)
542 {
543         struct property *prop;
544         struct device_node *dp;
545
546         dp = pbm->prom_node;
547         prop = of_find_property(dp, "66mhz-capable", NULL);
548         pbm->is_66mhz_capable = (prop != NULL);
549         pbm->pci_bus = pci_scan_one_pbm(pbm);
550
551         /* XXX register error interrupt handlers XXX */
552 }
553
554 static unsigned long __init probe_existing_entries(struct pci_pbm_info *pbm,
555                                                    struct iommu *iommu)
556 {
557         struct iommu_arena *arena = &iommu->arena;
558         unsigned long i, cnt = 0;
559         u32 devhandle;
560
561         devhandle = pbm->devhandle;
562         for (i = 0; i < arena->limit; i++) {
563                 unsigned long ret, io_attrs, ra;
564
565                 ret = pci_sun4v_iommu_getmap(devhandle,
566                                              HV_PCI_TSBID(0, i),
567                                              &io_attrs, &ra);
568                 if (ret == HV_EOK) {
569                         if (page_in_phys_avail(ra)) {
570                                 pci_sun4v_iommu_demap(devhandle,
571                                                       HV_PCI_TSBID(0, i), 1);
572                         } else {
573                                 cnt++;
574                                 __set_bit(i, arena->map);
575                         }
576                 }
577         }
578
579         return cnt;
580 }
581
582 static void __init pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
583 {
584         struct iommu *iommu = pbm->iommu;
585         struct property *prop;
586         unsigned long num_tsb_entries, sz, tsbsize;
587         u32 vdma[2], dma_mask, dma_offset;
588
589         prop = of_find_property(pbm->prom_node, "virtual-dma", NULL);
590         if (prop) {
591                 u32 *val = prop->value;
592
593                 vdma[0] = val[0];
594                 vdma[1] = val[1];
595         } else {
596                 /* No property, use default values. */
597                 vdma[0] = 0x80000000;
598                 vdma[1] = 0x80000000;
599         }
600
601         if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
602                 prom_printf("PCI-SUN4V: strange virtual-dma[%08x:%08x].\n",
603                             vdma[0], vdma[1]);
604                 prom_halt();
605         };
606
607         dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
608         num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
609         tsbsize = num_tsb_entries * sizeof(iopte_t);
610
611         dma_offset = vdma[0];
612
613         /* Setup initial software IOMMU state. */
614         spin_lock_init(&iommu->lock);
615         iommu->ctx_lowest_free = 1;
616         iommu->page_table_map_base = dma_offset;
617         iommu->dma_addr_mask = dma_mask;
618
619         /* Allocate and initialize the free area map.  */
620         sz = (num_tsb_entries + 7) / 8;
621         sz = (sz + 7UL) & ~7UL;
622         iommu->arena.map = kzalloc(sz, GFP_KERNEL);
623         if (!iommu->arena.map) {
624                 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
625                 prom_halt();
626         }
627         iommu->arena.limit = num_tsb_entries;
628
629         sz = probe_existing_entries(pbm, iommu);
630         if (sz)
631                 printk("%s: Imported %lu TSB entries from OBP\n",
632                        pbm->name, sz);
633 }
634
635 #ifdef CONFIG_PCI_MSI
636 struct pci_sun4v_msiq_entry {
637         u64             version_type;
638 #define MSIQ_VERSION_MASK               0xffffffff00000000UL
639 #define MSIQ_VERSION_SHIFT              32
640 #define MSIQ_TYPE_MASK                  0x00000000000000ffUL
641 #define MSIQ_TYPE_SHIFT                 0
642 #define MSIQ_TYPE_NONE                  0x00
643 #define MSIQ_TYPE_MSG                   0x01
644 #define MSIQ_TYPE_MSI32                 0x02
645 #define MSIQ_TYPE_MSI64                 0x03
646 #define MSIQ_TYPE_INTX                  0x08
647 #define MSIQ_TYPE_NONE2                 0xff
648
649         u64             intx_sysino;
650         u64             reserved1;
651         u64             stick;
652         u64             req_id;  /* bus/device/func */
653 #define MSIQ_REQID_BUS_MASK             0xff00UL
654 #define MSIQ_REQID_BUS_SHIFT            8
655 #define MSIQ_REQID_DEVICE_MASK          0x00f8UL
656 #define MSIQ_REQID_DEVICE_SHIFT         3
657 #define MSIQ_REQID_FUNC_MASK            0x0007UL
658 #define MSIQ_REQID_FUNC_SHIFT           0
659
660         u64             msi_address;
661
662         /* The format of this value is message type dependent.
663          * For MSI bits 15:0 are the data from the MSI packet.
664          * For MSI-X bits 31:0 are the data from the MSI packet.
665          * For MSG, the message code and message routing code where:
666          *      bits 39:32 is the bus/device/fn of the msg target-id
667          *      bits 18:16 is the message routing code
668          *      bits 7:0 is the message code
669          * For INTx the low order 2-bits are:
670          *      00 - INTA
671          *      01 - INTB
672          *      10 - INTC
673          *      11 - INTD
674          */
675         u64             msi_data;
676
677         u64             reserved2;
678 };
679
680 static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
681                               unsigned long *head)
682 {
683         unsigned long err, limit;
684
685         err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
686         if (unlikely(err))
687                 return -ENXIO;
688
689         limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
690         if (unlikely(*head >= limit))
691                 return -EFBIG;
692
693         return 0;
694 }
695
696 static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
697                                  unsigned long msiqid, unsigned long *head,
698                                  unsigned long *msi)
699 {
700         struct pci_sun4v_msiq_entry *ep;
701         unsigned long err, type;
702
703         /* Note: void pointer arithmetic, 'head' is a byte offset  */
704         ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
705                                  (pbm->msiq_ent_count *
706                                   sizeof(struct pci_sun4v_msiq_entry))) +
707               *head);
708
709         if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
710                 return 0;
711
712         type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
713         if (unlikely(type != MSIQ_TYPE_MSI32 &&
714                      type != MSIQ_TYPE_MSI64))
715                 return -EINVAL;
716
717         *msi = ep->msi_data;
718
719         err = pci_sun4v_msi_setstate(pbm->devhandle,
720                                      ep->msi_data /* msi_num */,
721                                      HV_MSISTATE_IDLE);
722         if (unlikely(err))
723                 return -ENXIO;
724
725         /* Clear the entry.  */
726         ep->version_type &= ~MSIQ_TYPE_MASK;
727
728         (*head) += sizeof(struct pci_sun4v_msiq_entry);
729         if (*head >=
730             (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
731                 *head = 0;
732
733         return 1;
734 }
735
736 static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
737                               unsigned long head)
738 {
739         unsigned long err;
740
741         err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
742         if (unlikely(err))
743                 return -EINVAL;
744
745         return 0;
746 }
747
748 static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
749                                unsigned long msi, int is_msi64)
750 {
751         if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
752                                   (is_msi64 ?
753                                    HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
754                 return -ENXIO;
755         if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
756                 return -ENXIO;
757         if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
758                 return -ENXIO;
759         return 0;
760 }
761
762 static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
763 {
764         unsigned long err, msiqid;
765
766         err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
767         if (err)
768                 return -ENXIO;
769
770         pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
771
772         return 0;
773 }
774
775 static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
776 {
777         unsigned long q_size, alloc_size, pages, order;
778         int i;
779
780         q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
781         alloc_size = (pbm->msiq_num * q_size);
782         order = get_order(alloc_size);
783         pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
784         if (pages == 0UL) {
785                 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
786                        order);
787                 return -ENOMEM;
788         }
789         memset((char *)pages, 0, PAGE_SIZE << order);
790         pbm->msi_queues = (void *) pages;
791
792         for (i = 0; i < pbm->msiq_num; i++) {
793                 unsigned long err, base = __pa(pages + (i * q_size));
794                 unsigned long ret1, ret2;
795
796                 err = pci_sun4v_msiq_conf(pbm->devhandle,
797                                           pbm->msiq_first + i,
798                                           base, pbm->msiq_ent_count);
799                 if (err) {
800                         printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
801                                err);
802                         goto h_error;
803                 }
804
805                 err = pci_sun4v_msiq_info(pbm->devhandle,
806                                           pbm->msiq_first + i,
807                                           &ret1, &ret2);
808                 if (err) {
809                         printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
810                                err);
811                         goto h_error;
812                 }
813                 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
814                         printk(KERN_ERR "MSI: Bogus qconf "
815                                "expected[%lx:%x] got[%lx:%lx]\n",
816                                base, pbm->msiq_ent_count,
817                                ret1, ret2);
818                         goto h_error;
819                 }
820         }
821
822         return 0;
823
824 h_error:
825         free_pages(pages, order);
826         return -EINVAL;
827 }
828
829 static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
830 {
831         unsigned long q_size, alloc_size, pages, order;
832         int i;
833
834         for (i = 0; i < pbm->msiq_num; i++) {
835                 unsigned long msiqid = pbm->msiq_first + i;
836
837                 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
838         }
839
840         q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
841         alloc_size = (pbm->msiq_num * q_size);
842         order = get_order(alloc_size);
843
844         pages = (unsigned long) pbm->msi_queues;
845
846         free_pages(pages, order);
847
848         pbm->msi_queues = NULL;
849 }
850
851 static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
852                                     unsigned long msiqid,
853                                     unsigned long devino)
854 {
855         unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
856
857         if (!virt_irq)
858                 return -ENOMEM;
859
860         if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
861                 return -EINVAL;
862         if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
863                 return -EINVAL;
864
865         return virt_irq;
866 }
867
868 static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
869         .get_head       =       pci_sun4v_get_head,
870         .dequeue_msi    =       pci_sun4v_dequeue_msi,
871         .set_head       =       pci_sun4v_set_head,
872         .msi_setup      =       pci_sun4v_msi_setup,
873         .msi_teardown   =       pci_sun4v_msi_teardown,
874         .msiq_alloc     =       pci_sun4v_msiq_alloc,
875         .msiq_free      =       pci_sun4v_msiq_free,
876         .msiq_build_irq =       pci_sun4v_msiq_build_irq,
877 };
878
879 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
880 {
881         sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
882 }
883 #else /* CONFIG_PCI_MSI */
884 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
885 {
886 }
887 #endif /* !(CONFIG_PCI_MSI) */
888
889 static void __init pci_sun4v_pbm_init(struct pci_controller_info *p,
890                                       struct device_node *dp, u32 devhandle)
891 {
892         struct pci_pbm_info *pbm;
893
894         if (devhandle & 0x40)
895                 pbm = &p->pbm_B;
896         else
897                 pbm = &p->pbm_A;
898
899         pbm->next = pci_pbm_root;
900         pci_pbm_root = pbm;
901
902         pbm->scan_bus = pci_sun4v_scan_bus;
903         pbm->pci_ops = &sun4v_pci_ops;
904         pbm->config_space_reg_bits = 12;
905
906         pbm->index = pci_num_pbms++;
907
908         pbm->parent = p;
909         pbm->prom_node = dp;
910
911         pbm->devhandle = devhandle;
912
913         pbm->name = dp->full_name;
914
915         printk("%s: SUN4V PCI Bus Module\n", pbm->name);
916
917         pci_determine_mem_io_space(pbm);
918
919         pci_get_pbm_props(pbm);
920         pci_sun4v_iommu_init(pbm);
921         pci_sun4v_msi_init(pbm);
922 }
923
924 void __init sun4v_pci_init(struct device_node *dp, char *model_name)
925 {
926         static int hvapi_negotiated = 0;
927         struct pci_controller_info *p;
928         struct pci_pbm_info *pbm;
929         struct iommu *iommu;
930         struct property *prop;
931         struct linux_prom64_registers *regs;
932         u32 devhandle;
933         int i;
934
935         if (!hvapi_negotiated++) {
936                 int err = sun4v_hvapi_register(HV_GRP_PCI,
937                                                vpci_major,
938                                                &vpci_minor);
939
940                 if (err) {
941                         prom_printf("SUN4V_PCI: Could not register hvapi, "
942                                     "err=%d\n", err);
943                         prom_halt();
944                 }
945                 printk("SUN4V_PCI: Registered hvapi major[%lu] minor[%lu]\n",
946                        vpci_major, vpci_minor);
947
948                 dma_ops = &sun4v_dma_ops;
949         }
950
951         prop = of_find_property(dp, "reg", NULL);
952         if (!prop) {
953                 prom_printf("SUN4V_PCI: Could not find config registers\n");
954                 prom_halt();
955         }
956         regs = prop->value;
957
958         devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
959
960         for (pbm = pci_pbm_root; pbm; pbm = pbm->next) {
961                 if (pbm->devhandle == (devhandle ^ 0x40)) {
962                         pci_sun4v_pbm_init(pbm->parent, dp, devhandle);
963                         return;
964                 }
965         }
966
967         for_each_possible_cpu(i) {
968                 unsigned long page = get_zeroed_page(GFP_ATOMIC);
969
970                 if (!page)
971                         goto fatal_memory_error;
972
973                 per_cpu(iommu_batch, i).pglist = (u64 *) page;
974         }
975
976         p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
977         if (!p)
978                 goto fatal_memory_error;
979
980         iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
981         if (!iommu)
982                 goto fatal_memory_error;
983
984         p->pbm_A.iommu = iommu;
985
986         iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
987         if (!iommu)
988                 goto fatal_memory_error;
989
990         p->pbm_B.iommu = iommu;
991
992         pci_sun4v_pbm_init(p, dp, devhandle);
993         return;
994
995 fatal_memory_error:
996         prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");
997         prom_halt();
998 }