Merge tag 'iwlwifi-next-for-kalle-2014-12-30' of https://git.kernel.org/pub/scm/linux...
[cascardo/linux.git] / arch / sparc / kernel / pci_sun4v.c
1 /* pci_sun4v.c: SUN4V specific PCI controller support.
2  *
3  * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
4  */
5
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <linux/pci.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/export.h>
16 #include <linux/log2.h>
17 #include <linux/of_device.h>
18
19 #include <asm/iommu.h>
20 #include <asm/irq.h>
21 #include <asm/hypervisor.h>
22 #include <asm/prom.h>
23
24 #include "pci_impl.h"
25 #include "iommu_common.h"
26
27 #include "pci_sun4v.h"
28
29 #define DRIVER_NAME     "pci_sun4v"
30 #define PFX             DRIVER_NAME ": "
31
32 static unsigned long vpci_major = 1;
33 static unsigned long vpci_minor = 1;
34
35 #define PGLIST_NENTS    (PAGE_SIZE / sizeof(u64))
36
37 struct iommu_batch {
38         struct device   *dev;           /* Device mapping is for.       */
39         unsigned long   prot;           /* IOMMU page protections       */
40         unsigned long   entry;          /* Index into IOTSB.            */
41         u64             *pglist;        /* List of physical pages       */
42         unsigned long   npages;         /* Number of pages in list.     */
43 };
44
45 static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
46 static int iommu_batch_initialized;
47
48 /* Interrupts must be disabled.  */
49 static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
50 {
51         struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
52
53         p->dev          = dev;
54         p->prot         = prot;
55         p->entry        = entry;
56         p->npages       = 0;
57 }
58
59 /* Interrupts must be disabled.  */
60 static long iommu_batch_flush(struct iommu_batch *p)
61 {
62         struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
63         unsigned long devhandle = pbm->devhandle;
64         unsigned long prot = p->prot;
65         unsigned long entry = p->entry;
66         u64 *pglist = p->pglist;
67         unsigned long npages = p->npages;
68
69         while (npages != 0) {
70                 long num;
71
72                 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
73                                           npages, prot, __pa(pglist));
74                 if (unlikely(num < 0)) {
75                         if (printk_ratelimit())
76                                 printk("iommu_batch_flush: IOMMU map of "
77                                        "[%08lx:%08llx:%lx:%lx:%lx] failed with "
78                                        "status %ld\n",
79                                        devhandle, HV_PCI_TSBID(0, entry),
80                                        npages, prot, __pa(pglist), num);
81                         return -1;
82                 }
83
84                 entry += num;
85                 npages -= num;
86                 pglist += num;
87         }
88
89         p->entry = entry;
90         p->npages = 0;
91
92         return 0;
93 }
94
95 static inline void iommu_batch_new_entry(unsigned long entry)
96 {
97         struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
98
99         if (p->entry + p->npages == entry)
100                 return;
101         if (p->entry != ~0UL)
102                 iommu_batch_flush(p);
103         p->entry = entry;
104 }
105
106 /* Interrupts must be disabled.  */
107 static inline long iommu_batch_add(u64 phys_page)
108 {
109         struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
110
111         BUG_ON(p->npages >= PGLIST_NENTS);
112
113         p->pglist[p->npages++] = phys_page;
114         if (p->npages == PGLIST_NENTS)
115                 return iommu_batch_flush(p);
116
117         return 0;
118 }
119
120 /* Interrupts must be disabled.  */
121 static inline long iommu_batch_end(void)
122 {
123         struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
124
125         BUG_ON(p->npages >= PGLIST_NENTS);
126
127         return iommu_batch_flush(p);
128 }
129
130 static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
131                                    dma_addr_t *dma_addrp, gfp_t gfp,
132                                    struct dma_attrs *attrs)
133 {
134         unsigned long flags, order, first_page, npages, n;
135         struct iommu *iommu;
136         struct page *page;
137         void *ret;
138         long entry;
139         int nid;
140
141         size = IO_PAGE_ALIGN(size);
142         order = get_order(size);
143         if (unlikely(order >= MAX_ORDER))
144                 return NULL;
145
146         npages = size >> IO_PAGE_SHIFT;
147
148         nid = dev->archdata.numa_node;
149         page = alloc_pages_node(nid, gfp, order);
150         if (unlikely(!page))
151                 return NULL;
152
153         first_page = (unsigned long) page_address(page);
154         memset((char *)first_page, 0, PAGE_SIZE << order);
155
156         iommu = dev->archdata.iommu;
157
158         spin_lock_irqsave(&iommu->lock, flags);
159         entry = iommu_range_alloc(dev, iommu, npages, NULL);
160         spin_unlock_irqrestore(&iommu->lock, flags);
161
162         if (unlikely(entry == DMA_ERROR_CODE))
163                 goto range_alloc_fail;
164
165         *dma_addrp = (iommu->page_table_map_base +
166                       (entry << IO_PAGE_SHIFT));
167         ret = (void *) first_page;
168         first_page = __pa(first_page);
169
170         local_irq_save(flags);
171
172         iommu_batch_start(dev,
173                           (HV_PCI_MAP_ATTR_READ |
174                            HV_PCI_MAP_ATTR_WRITE),
175                           entry);
176
177         for (n = 0; n < npages; n++) {
178                 long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
179                 if (unlikely(err < 0L))
180                         goto iommu_map_fail;
181         }
182
183         if (unlikely(iommu_batch_end() < 0L))
184                 goto iommu_map_fail;
185
186         local_irq_restore(flags);
187
188         return ret;
189
190 iommu_map_fail:
191         /* Interrupts are disabled.  */
192         spin_lock(&iommu->lock);
193         iommu_range_free(iommu, *dma_addrp, npages);
194         spin_unlock_irqrestore(&iommu->lock, flags);
195
196 range_alloc_fail:
197         free_pages(first_page, order);
198         return NULL;
199 }
200
201 static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
202                                  dma_addr_t dvma, struct dma_attrs *attrs)
203 {
204         struct pci_pbm_info *pbm;
205         struct iommu *iommu;
206         unsigned long flags, order, npages, entry;
207         u32 devhandle;
208
209         npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
210         iommu = dev->archdata.iommu;
211         pbm = dev->archdata.host_controller;
212         devhandle = pbm->devhandle;
213         entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
214
215         spin_lock_irqsave(&iommu->lock, flags);
216
217         iommu_range_free(iommu, dvma, npages);
218
219         do {
220                 unsigned long num;
221
222                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
223                                             npages);
224                 entry += num;
225                 npages -= num;
226         } while (npages != 0);
227
228         spin_unlock_irqrestore(&iommu->lock, flags);
229
230         order = get_order(size);
231         if (order < 10)
232                 free_pages((unsigned long)cpu, order);
233 }
234
235 static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
236                                   unsigned long offset, size_t sz,
237                                   enum dma_data_direction direction,
238                                   struct dma_attrs *attrs)
239 {
240         struct iommu *iommu;
241         unsigned long flags, npages, oaddr;
242         unsigned long i, base_paddr;
243         u32 bus_addr, ret;
244         unsigned long prot;
245         long entry;
246
247         iommu = dev->archdata.iommu;
248
249         if (unlikely(direction == DMA_NONE))
250                 goto bad;
251
252         oaddr = (unsigned long)(page_address(page) + offset);
253         npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
254         npages >>= IO_PAGE_SHIFT;
255
256         spin_lock_irqsave(&iommu->lock, flags);
257         entry = iommu_range_alloc(dev, iommu, npages, NULL);
258         spin_unlock_irqrestore(&iommu->lock, flags);
259
260         if (unlikely(entry == DMA_ERROR_CODE))
261                 goto bad;
262
263         bus_addr = (iommu->page_table_map_base +
264                     (entry << IO_PAGE_SHIFT));
265         ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
266         base_paddr = __pa(oaddr & IO_PAGE_MASK);
267         prot = HV_PCI_MAP_ATTR_READ;
268         if (direction != DMA_TO_DEVICE)
269                 prot |= HV_PCI_MAP_ATTR_WRITE;
270
271         local_irq_save(flags);
272
273         iommu_batch_start(dev, prot, entry);
274
275         for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
276                 long err = iommu_batch_add(base_paddr);
277                 if (unlikely(err < 0L))
278                         goto iommu_map_fail;
279         }
280         if (unlikely(iommu_batch_end() < 0L))
281                 goto iommu_map_fail;
282
283         local_irq_restore(flags);
284
285         return ret;
286
287 bad:
288         if (printk_ratelimit())
289                 WARN_ON(1);
290         return DMA_ERROR_CODE;
291
292 iommu_map_fail:
293         /* Interrupts are disabled.  */
294         spin_lock(&iommu->lock);
295         iommu_range_free(iommu, bus_addr, npages);
296         spin_unlock_irqrestore(&iommu->lock, flags);
297
298         return DMA_ERROR_CODE;
299 }
300
301 static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
302                               size_t sz, enum dma_data_direction direction,
303                               struct dma_attrs *attrs)
304 {
305         struct pci_pbm_info *pbm;
306         struct iommu *iommu;
307         unsigned long flags, npages;
308         long entry;
309         u32 devhandle;
310
311         if (unlikely(direction == DMA_NONE)) {
312                 if (printk_ratelimit())
313                         WARN_ON(1);
314                 return;
315         }
316
317         iommu = dev->archdata.iommu;
318         pbm = dev->archdata.host_controller;
319         devhandle = pbm->devhandle;
320
321         npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
322         npages >>= IO_PAGE_SHIFT;
323         bus_addr &= IO_PAGE_MASK;
324
325         spin_lock_irqsave(&iommu->lock, flags);
326
327         iommu_range_free(iommu, bus_addr, npages);
328
329         entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
330         do {
331                 unsigned long num;
332
333                 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
334                                             npages);
335                 entry += num;
336                 npages -= num;
337         } while (npages != 0);
338
339         spin_unlock_irqrestore(&iommu->lock, flags);
340 }
341
342 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
343                          int nelems, enum dma_data_direction direction,
344                          struct dma_attrs *attrs)
345 {
346         struct scatterlist *s, *outs, *segstart;
347         unsigned long flags, handle, prot;
348         dma_addr_t dma_next = 0, dma_addr;
349         unsigned int max_seg_size;
350         unsigned long seg_boundary_size;
351         int outcount, incount, i;
352         struct iommu *iommu;
353         unsigned long base_shift;
354         long err;
355
356         BUG_ON(direction == DMA_NONE);
357
358         iommu = dev->archdata.iommu;
359         if (nelems == 0 || !iommu)
360                 return 0;
361         
362         prot = HV_PCI_MAP_ATTR_READ;
363         if (direction != DMA_TO_DEVICE)
364                 prot |= HV_PCI_MAP_ATTR_WRITE;
365
366         outs = s = segstart = &sglist[0];
367         outcount = 1;
368         incount = nelems;
369         handle = 0;
370
371         /* Init first segment length for backout at failure */
372         outs->dma_length = 0;
373
374         spin_lock_irqsave(&iommu->lock, flags);
375
376         iommu_batch_start(dev, prot, ~0UL);
377
378         max_seg_size = dma_get_max_seg_size(dev);
379         seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
380                                   IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
381         base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
382         for_each_sg(sglist, s, nelems, i) {
383                 unsigned long paddr, npages, entry, out_entry = 0, slen;
384
385                 slen = s->length;
386                 /* Sanity check */
387                 if (slen == 0) {
388                         dma_next = 0;
389                         continue;
390                 }
391                 /* Allocate iommu entries for that segment */
392                 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
393                 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
394                 entry = iommu_range_alloc(dev, iommu, npages, &handle);
395
396                 /* Handle failure */
397                 if (unlikely(entry == DMA_ERROR_CODE)) {
398                         if (printk_ratelimit())
399                                 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
400                                        " npages %lx\n", iommu, paddr, npages);
401                         goto iommu_map_failed;
402                 }
403
404                 iommu_batch_new_entry(entry);
405
406                 /* Convert entry to a dma_addr_t */
407                 dma_addr = iommu->page_table_map_base +
408                         (entry << IO_PAGE_SHIFT);
409                 dma_addr |= (s->offset & ~IO_PAGE_MASK);
410
411                 /* Insert into HW table */
412                 paddr &= IO_PAGE_MASK;
413                 while (npages--) {
414                         err = iommu_batch_add(paddr);
415                         if (unlikely(err < 0L))
416                                 goto iommu_map_failed;
417                         paddr += IO_PAGE_SIZE;
418                 }
419
420                 /* If we are in an open segment, try merging */
421                 if (segstart != s) {
422                         /* We cannot merge if:
423                          * - allocated dma_addr isn't contiguous to previous allocation
424                          */
425                         if ((dma_addr != dma_next) ||
426                             (outs->dma_length + s->length > max_seg_size) ||
427                             (is_span_boundary(out_entry, base_shift,
428                                               seg_boundary_size, outs, s))) {
429                                 /* Can't merge: create a new segment */
430                                 segstart = s;
431                                 outcount++;
432                                 outs = sg_next(outs);
433                         } else {
434                                 outs->dma_length += s->length;
435                         }
436                 }
437
438                 if (segstart == s) {
439                         /* This is a new segment, fill entries */
440                         outs->dma_address = dma_addr;
441                         outs->dma_length = slen;
442                         out_entry = entry;
443                 }
444
445                 /* Calculate next page pointer for contiguous check */
446                 dma_next = dma_addr + slen;
447         }
448
449         err = iommu_batch_end();
450
451         if (unlikely(err < 0L))
452                 goto iommu_map_failed;
453
454         spin_unlock_irqrestore(&iommu->lock, flags);
455
456         if (outcount < incount) {
457                 outs = sg_next(outs);
458                 outs->dma_address = DMA_ERROR_CODE;
459                 outs->dma_length = 0;
460         }
461
462         return outcount;
463
464 iommu_map_failed:
465         for_each_sg(sglist, s, nelems, i) {
466                 if (s->dma_length != 0) {
467                         unsigned long vaddr, npages;
468
469                         vaddr = s->dma_address & IO_PAGE_MASK;
470                         npages = iommu_num_pages(s->dma_address, s->dma_length,
471                                                  IO_PAGE_SIZE);
472                         iommu_range_free(iommu, vaddr, npages);
473                         /* XXX demap? XXX */
474                         s->dma_address = DMA_ERROR_CODE;
475                         s->dma_length = 0;
476                 }
477                 if (s == outs)
478                         break;
479         }
480         spin_unlock_irqrestore(&iommu->lock, flags);
481
482         return 0;
483 }
484
485 static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
486                             int nelems, enum dma_data_direction direction,
487                             struct dma_attrs *attrs)
488 {
489         struct pci_pbm_info *pbm;
490         struct scatterlist *sg;
491         struct iommu *iommu;
492         unsigned long flags;
493         u32 devhandle;
494
495         BUG_ON(direction == DMA_NONE);
496
497         iommu = dev->archdata.iommu;
498         pbm = dev->archdata.host_controller;
499         devhandle = pbm->devhandle;
500         
501         spin_lock_irqsave(&iommu->lock, flags);
502
503         sg = sglist;
504         while (nelems--) {
505                 dma_addr_t dma_handle = sg->dma_address;
506                 unsigned int len = sg->dma_length;
507                 unsigned long npages, entry;
508
509                 if (!len)
510                         break;
511                 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
512                 iommu_range_free(iommu, dma_handle, npages);
513
514                 entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
515                 while (npages) {
516                         unsigned long num;
517
518                         num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
519                                                     npages);
520                         entry += num;
521                         npages -= num;
522                 }
523
524                 sg = sg_next(sg);
525         }
526
527         spin_unlock_irqrestore(&iommu->lock, flags);
528 }
529
530 static struct dma_map_ops sun4v_dma_ops = {
531         .alloc                          = dma_4v_alloc_coherent,
532         .free                           = dma_4v_free_coherent,
533         .map_page                       = dma_4v_map_page,
534         .unmap_page                     = dma_4v_unmap_page,
535         .map_sg                         = dma_4v_map_sg,
536         .unmap_sg                       = dma_4v_unmap_sg,
537 };
538
539 static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
540 {
541         struct property *prop;
542         struct device_node *dp;
543
544         dp = pbm->op->dev.of_node;
545         prop = of_find_property(dp, "66mhz-capable", NULL);
546         pbm->is_66mhz_capable = (prop != NULL);
547         pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
548
549         /* XXX register error interrupt handlers XXX */
550 }
551
552 static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
553                                             struct iommu *iommu)
554 {
555         struct iommu_arena *arena = &iommu->arena;
556         unsigned long i, cnt = 0;
557         u32 devhandle;
558
559         devhandle = pbm->devhandle;
560         for (i = 0; i < arena->limit; i++) {
561                 unsigned long ret, io_attrs, ra;
562
563                 ret = pci_sun4v_iommu_getmap(devhandle,
564                                              HV_PCI_TSBID(0, i),
565                                              &io_attrs, &ra);
566                 if (ret == HV_EOK) {
567                         if (page_in_phys_avail(ra)) {
568                                 pci_sun4v_iommu_demap(devhandle,
569                                                       HV_PCI_TSBID(0, i), 1);
570                         } else {
571                                 cnt++;
572                                 __set_bit(i, arena->map);
573                         }
574                 }
575         }
576
577         return cnt;
578 }
579
580 static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
581 {
582         static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
583         struct iommu *iommu = pbm->iommu;
584         unsigned long num_tsb_entries, sz;
585         u32 dma_mask, dma_offset;
586         const u32 *vdma;
587
588         vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
589         if (!vdma)
590                 vdma = vdma_default;
591
592         if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
593                 printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
594                        vdma[0], vdma[1]);
595                 return -EINVAL;
596         }
597
598         dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
599         num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
600
601         dma_offset = vdma[0];
602
603         /* Setup initial software IOMMU state. */
604         spin_lock_init(&iommu->lock);
605         iommu->ctx_lowest_free = 1;
606         iommu->page_table_map_base = dma_offset;
607         iommu->dma_addr_mask = dma_mask;
608
609         /* Allocate and initialize the free area map.  */
610         sz = (num_tsb_entries + 7) / 8;
611         sz = (sz + 7UL) & ~7UL;
612         iommu->arena.map = kzalloc(sz, GFP_KERNEL);
613         if (!iommu->arena.map) {
614                 printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
615                 return -ENOMEM;
616         }
617         iommu->arena.limit = num_tsb_entries;
618
619         sz = probe_existing_entries(pbm, iommu);
620         if (sz)
621                 printk("%s: Imported %lu TSB entries from OBP\n",
622                        pbm->name, sz);
623
624         return 0;
625 }
626
627 #ifdef CONFIG_PCI_MSI
628 struct pci_sun4v_msiq_entry {
629         u64             version_type;
630 #define MSIQ_VERSION_MASK               0xffffffff00000000UL
631 #define MSIQ_VERSION_SHIFT              32
632 #define MSIQ_TYPE_MASK                  0x00000000000000ffUL
633 #define MSIQ_TYPE_SHIFT                 0
634 #define MSIQ_TYPE_NONE                  0x00
635 #define MSIQ_TYPE_MSG                   0x01
636 #define MSIQ_TYPE_MSI32                 0x02
637 #define MSIQ_TYPE_MSI64                 0x03
638 #define MSIQ_TYPE_INTX                  0x08
639 #define MSIQ_TYPE_NONE2                 0xff
640
641         u64             intx_sysino;
642         u64             reserved1;
643         u64             stick;
644         u64             req_id;  /* bus/device/func */
645 #define MSIQ_REQID_BUS_MASK             0xff00UL
646 #define MSIQ_REQID_BUS_SHIFT            8
647 #define MSIQ_REQID_DEVICE_MASK          0x00f8UL
648 #define MSIQ_REQID_DEVICE_SHIFT         3
649 #define MSIQ_REQID_FUNC_MASK            0x0007UL
650 #define MSIQ_REQID_FUNC_SHIFT           0
651
652         u64             msi_address;
653
654         /* The format of this value is message type dependent.
655          * For MSI bits 15:0 are the data from the MSI packet.
656          * For MSI-X bits 31:0 are the data from the MSI packet.
657          * For MSG, the message code and message routing code where:
658          *      bits 39:32 is the bus/device/fn of the msg target-id
659          *      bits 18:16 is the message routing code
660          *      bits 7:0 is the message code
661          * For INTx the low order 2-bits are:
662          *      00 - INTA
663          *      01 - INTB
664          *      10 - INTC
665          *      11 - INTD
666          */
667         u64             msi_data;
668
669         u64             reserved2;
670 };
671
672 static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
673                               unsigned long *head)
674 {
675         unsigned long err, limit;
676
677         err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
678         if (unlikely(err))
679                 return -ENXIO;
680
681         limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
682         if (unlikely(*head >= limit))
683                 return -EFBIG;
684
685         return 0;
686 }
687
688 static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
689                                  unsigned long msiqid, unsigned long *head,
690                                  unsigned long *msi)
691 {
692         struct pci_sun4v_msiq_entry *ep;
693         unsigned long err, type;
694
695         /* Note: void pointer arithmetic, 'head' is a byte offset  */
696         ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
697                                  (pbm->msiq_ent_count *
698                                   sizeof(struct pci_sun4v_msiq_entry))) +
699               *head);
700
701         if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
702                 return 0;
703
704         type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
705         if (unlikely(type != MSIQ_TYPE_MSI32 &&
706                      type != MSIQ_TYPE_MSI64))
707                 return -EINVAL;
708
709         *msi = ep->msi_data;
710
711         err = pci_sun4v_msi_setstate(pbm->devhandle,
712                                      ep->msi_data /* msi_num */,
713                                      HV_MSISTATE_IDLE);
714         if (unlikely(err))
715                 return -ENXIO;
716
717         /* Clear the entry.  */
718         ep->version_type &= ~MSIQ_TYPE_MASK;
719
720         (*head) += sizeof(struct pci_sun4v_msiq_entry);
721         if (*head >=
722             (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
723                 *head = 0;
724
725         return 1;
726 }
727
728 static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
729                               unsigned long head)
730 {
731         unsigned long err;
732
733         err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
734         if (unlikely(err))
735                 return -EINVAL;
736
737         return 0;
738 }
739
740 static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
741                                unsigned long msi, int is_msi64)
742 {
743         if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
744                                   (is_msi64 ?
745                                    HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
746                 return -ENXIO;
747         if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
748                 return -ENXIO;
749         if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
750                 return -ENXIO;
751         return 0;
752 }
753
754 static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
755 {
756         unsigned long err, msiqid;
757
758         err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
759         if (err)
760                 return -ENXIO;
761
762         pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
763
764         return 0;
765 }
766
767 static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
768 {
769         unsigned long q_size, alloc_size, pages, order;
770         int i;
771
772         q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
773         alloc_size = (pbm->msiq_num * q_size);
774         order = get_order(alloc_size);
775         pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
776         if (pages == 0UL) {
777                 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
778                        order);
779                 return -ENOMEM;
780         }
781         memset((char *)pages, 0, PAGE_SIZE << order);
782         pbm->msi_queues = (void *) pages;
783
784         for (i = 0; i < pbm->msiq_num; i++) {
785                 unsigned long err, base = __pa(pages + (i * q_size));
786                 unsigned long ret1, ret2;
787
788                 err = pci_sun4v_msiq_conf(pbm->devhandle,
789                                           pbm->msiq_first + i,
790                                           base, pbm->msiq_ent_count);
791                 if (err) {
792                         printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
793                                err);
794                         goto h_error;
795                 }
796
797                 err = pci_sun4v_msiq_info(pbm->devhandle,
798                                           pbm->msiq_first + i,
799                                           &ret1, &ret2);
800                 if (err) {
801                         printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
802                                err);
803                         goto h_error;
804                 }
805                 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
806                         printk(KERN_ERR "MSI: Bogus qconf "
807                                "expected[%lx:%x] got[%lx:%lx]\n",
808                                base, pbm->msiq_ent_count,
809                                ret1, ret2);
810                         goto h_error;
811                 }
812         }
813
814         return 0;
815
816 h_error:
817         free_pages(pages, order);
818         return -EINVAL;
819 }
820
821 static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
822 {
823         unsigned long q_size, alloc_size, pages, order;
824         int i;
825
826         for (i = 0; i < pbm->msiq_num; i++) {
827                 unsigned long msiqid = pbm->msiq_first + i;
828
829                 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
830         }
831
832         q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
833         alloc_size = (pbm->msiq_num * q_size);
834         order = get_order(alloc_size);
835
836         pages = (unsigned long) pbm->msi_queues;
837
838         free_pages(pages, order);
839
840         pbm->msi_queues = NULL;
841 }
842
843 static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
844                                     unsigned long msiqid,
845                                     unsigned long devino)
846 {
847         unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
848
849         if (!irq)
850                 return -ENOMEM;
851
852         if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
853                 return -EINVAL;
854         if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
855                 return -EINVAL;
856
857         return irq;
858 }
859
860 static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
861         .get_head       =       pci_sun4v_get_head,
862         .dequeue_msi    =       pci_sun4v_dequeue_msi,
863         .set_head       =       pci_sun4v_set_head,
864         .msi_setup      =       pci_sun4v_msi_setup,
865         .msi_teardown   =       pci_sun4v_msi_teardown,
866         .msiq_alloc     =       pci_sun4v_msiq_alloc,
867         .msiq_free      =       pci_sun4v_msiq_free,
868         .msiq_build_irq =       pci_sun4v_msiq_build_irq,
869 };
870
871 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
872 {
873         sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
874 }
875 #else /* CONFIG_PCI_MSI */
876 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
877 {
878 }
879 #endif /* !(CONFIG_PCI_MSI) */
880
881 static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
882                               struct platform_device *op, u32 devhandle)
883 {
884         struct device_node *dp = op->dev.of_node;
885         int err;
886
887         pbm->numa_node = of_node_to_nid(dp);
888
889         pbm->pci_ops = &sun4v_pci_ops;
890         pbm->config_space_reg_bits = 12;
891
892         pbm->index = pci_num_pbms++;
893
894         pbm->op = op;
895
896         pbm->devhandle = devhandle;
897
898         pbm->name = dp->full_name;
899
900         printk("%s: SUN4V PCI Bus Module\n", pbm->name);
901         printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
902
903         pci_determine_mem_io_space(pbm);
904
905         pci_get_pbm_props(pbm);
906
907         err = pci_sun4v_iommu_init(pbm);
908         if (err)
909                 return err;
910
911         pci_sun4v_msi_init(pbm);
912
913         pci_sun4v_scan_bus(pbm, &op->dev);
914
915         pbm->next = pci_pbm_root;
916         pci_pbm_root = pbm;
917
918         return 0;
919 }
920
921 static int pci_sun4v_probe(struct platform_device *op)
922 {
923         const struct linux_prom64_registers *regs;
924         static int hvapi_negotiated = 0;
925         struct pci_pbm_info *pbm;
926         struct device_node *dp;
927         struct iommu *iommu;
928         u32 devhandle;
929         int i, err;
930
931         dp = op->dev.of_node;
932
933         if (!hvapi_negotiated++) {
934                 err = sun4v_hvapi_register(HV_GRP_PCI,
935                                            vpci_major,
936                                            &vpci_minor);
937
938                 if (err) {
939                         printk(KERN_ERR PFX "Could not register hvapi, "
940                                "err=%d\n", err);
941                         return err;
942                 }
943                 printk(KERN_INFO PFX "Registered hvapi major[%lu] minor[%lu]\n",
944                        vpci_major, vpci_minor);
945
946                 dma_ops = &sun4v_dma_ops;
947         }
948
949         regs = of_get_property(dp, "reg", NULL);
950         err = -ENODEV;
951         if (!regs) {
952                 printk(KERN_ERR PFX "Could not find config registers\n");
953                 goto out_err;
954         }
955         devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
956
957         err = -ENOMEM;
958         if (!iommu_batch_initialized) {
959                 for_each_possible_cpu(i) {
960                         unsigned long page = get_zeroed_page(GFP_KERNEL);
961
962                         if (!page)
963                                 goto out_err;
964
965                         per_cpu(iommu_batch, i).pglist = (u64 *) page;
966                 }
967                 iommu_batch_initialized = 1;
968         }
969
970         pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
971         if (!pbm) {
972                 printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
973                 goto out_err;
974         }
975
976         iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
977         if (!iommu) {
978                 printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
979                 goto out_free_controller;
980         }
981
982         pbm->iommu = iommu;
983
984         err = pci_sun4v_pbm_init(pbm, op, devhandle);
985         if (err)
986                 goto out_free_iommu;
987
988         dev_set_drvdata(&op->dev, pbm);
989
990         return 0;
991
992 out_free_iommu:
993         kfree(pbm->iommu);
994
995 out_free_controller:
996         kfree(pbm);
997
998 out_err:
999         return err;
1000 }
1001
1002 static const struct of_device_id pci_sun4v_match[] = {
1003         {
1004                 .name = "pci",
1005                 .compatible = "SUNW,sun4v-pci",
1006         },
1007         {},
1008 };
1009
1010 static struct platform_driver pci_sun4v_driver = {
1011         .driver = {
1012                 .name = DRIVER_NAME,
1013                 .of_match_table = pci_sun4v_match,
1014         },
1015         .probe          = pci_sun4v_probe,
1016 };
1017
1018 static int __init pci_sun4v_init(void)
1019 {
1020         return platform_driver_register(&pci_sun4v_driver);
1021 }
1022
1023 subsys_initcall(pci_sun4v_init);