spi: Add a timeout when waiting for transfers
[cascardo/linux.git] / arch / s390 / pci / pci_dma.c
1 /*
2  * Copyright IBM Corp. 2012
3  *
4  * Author(s):
5  *   Jan Glauber <jang@linux.vnet.ibm.com>
6  */
7
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/export.h>
11 #include <linux/iommu-helper.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/vmalloc.h>
14 #include <linux/pci.h>
15 #include <asm/pci_dma.h>
16
17 static struct kmem_cache *dma_region_table_cache;
18 static struct kmem_cache *dma_page_table_cache;
19
20 static unsigned long *dma_alloc_cpu_table(void)
21 {
22         unsigned long *table, *entry;
23
24         table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
25         if (!table)
26                 return NULL;
27
28         for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
29                 *entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED;
30         return table;
31 }
32
33 static void dma_free_cpu_table(void *table)
34 {
35         kmem_cache_free(dma_region_table_cache, table);
36 }
37
38 static unsigned long *dma_alloc_page_table(void)
39 {
40         unsigned long *table, *entry;
41
42         table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
43         if (!table)
44                 return NULL;
45
46         for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
47                 *entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED;
48         return table;
49 }
50
51 static void dma_free_page_table(void *table)
52 {
53         kmem_cache_free(dma_page_table_cache, table);
54 }
55
56 static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
57 {
58         unsigned long *sto;
59
60         if (reg_entry_isvalid(*entry))
61                 sto = get_rt_sto(*entry);
62         else {
63                 sto = dma_alloc_cpu_table();
64                 if (!sto)
65                         return NULL;
66
67                 set_rt_sto(entry, sto);
68                 validate_rt_entry(entry);
69                 entry_clr_protected(entry);
70         }
71         return sto;
72 }
73
74 static unsigned long *dma_get_page_table_origin(unsigned long *entry)
75 {
76         unsigned long *pto;
77
78         if (reg_entry_isvalid(*entry))
79                 pto = get_st_pto(*entry);
80         else {
81                 pto = dma_alloc_page_table();
82                 if (!pto)
83                         return NULL;
84                 set_st_pto(entry, pto);
85                 validate_st_entry(entry);
86                 entry_clr_protected(entry);
87         }
88         return pto;
89 }
90
91 static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
92 {
93         unsigned long *sto, *pto;
94         unsigned int rtx, sx, px;
95
96         rtx = calc_rtx(dma_addr);
97         sto = dma_get_seg_table_origin(&rto[rtx]);
98         if (!sto)
99                 return NULL;
100
101         sx = calc_sx(dma_addr);
102         pto = dma_get_page_table_origin(&sto[sx]);
103         if (!pto)
104                 return NULL;
105
106         px = calc_px(dma_addr);
107         return &pto[px];
108 }
109
110 static void dma_update_cpu_trans(struct zpci_dev *zdev, void *page_addr,
111                                  dma_addr_t dma_addr, int flags)
112 {
113         unsigned long *entry;
114
115         entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
116         if (!entry) {
117                 WARN_ON_ONCE(1);
118                 return;
119         }
120
121         if (flags & ZPCI_PTE_INVALID) {
122                 invalidate_pt_entry(entry);
123                 return;
124         } else {
125                 set_pt_pfaa(entry, page_addr);
126                 validate_pt_entry(entry);
127         }
128
129         if (flags & ZPCI_TABLE_PROTECTED)
130                 entry_set_protected(entry);
131         else
132                 entry_clr_protected(entry);
133 }
134
135 static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
136                             dma_addr_t dma_addr, size_t size, int flags)
137 {
138         unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
139         u8 *page_addr = (u8 *) (pa & PAGE_MASK);
140         dma_addr_t start_dma_addr = dma_addr;
141         unsigned long irq_flags;
142         int i, rc = 0;
143
144         if (!nr_pages)
145                 return -EINVAL;
146
147         spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
148         if (!zdev->dma_table)
149                 goto no_refresh;
150
151         for (i = 0; i < nr_pages; i++) {
152                 dma_update_cpu_trans(zdev, page_addr, dma_addr, flags);
153                 page_addr += PAGE_SIZE;
154                 dma_addr += PAGE_SIZE;
155         }
156
157         /*
158          * rpcit is not required to establish new translations when previously
159          * invalid translation-table entries are validated, however it is
160          * required when altering previously valid entries.
161          */
162         if (!zdev->tlb_refresh &&
163             ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
164                 /*
165                  * TODO: also need to check that the old entry is indeed INVALID
166                  * and not only for one page but for the whole range...
167                  * -> now we WARN_ON in that case but with lazy unmap that
168                  * needs to be redone!
169                  */
170                 goto no_refresh;
171
172         rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
173                                 nr_pages * PAGE_SIZE);
174
175 no_refresh:
176         spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
177         return rc;
178 }
179
180 static void dma_free_seg_table(unsigned long entry)
181 {
182         unsigned long *sto = get_rt_sto(entry);
183         int sx;
184
185         for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
186                 if (reg_entry_isvalid(sto[sx]))
187                         dma_free_page_table(get_st_pto(sto[sx]));
188
189         dma_free_cpu_table(sto);
190 }
191
192 static void dma_cleanup_tables(struct zpci_dev *zdev)
193 {
194         unsigned long *table;
195         int rtx;
196
197         if (!zdev || !zdev->dma_table)
198                 return;
199
200         table = zdev->dma_table;
201         for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
202                 if (reg_entry_isvalid(table[rtx]))
203                         dma_free_seg_table(table[rtx]);
204
205         dma_free_cpu_table(table);
206         zdev->dma_table = NULL;
207 }
208
209 static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, unsigned long start,
210                                    int size)
211 {
212         unsigned long boundary_size = 0x1000000;
213
214         return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
215                                 start, size, 0, boundary_size, 0);
216 }
217
218 static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size)
219 {
220         unsigned long offset, flags;
221
222         spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
223         offset = __dma_alloc_iommu(zdev, zdev->next_bit, size);
224         if (offset == -1)
225                 offset = __dma_alloc_iommu(zdev, 0, size);
226
227         if (offset != -1) {
228                 zdev->next_bit = offset + size;
229                 if (zdev->next_bit >= zdev->iommu_pages)
230                         zdev->next_bit = 0;
231         }
232         spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
233         return offset;
234 }
235
236 static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size)
237 {
238         unsigned long flags;
239
240         spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
241         if (!zdev->iommu_bitmap)
242                 goto out;
243         bitmap_clear(zdev->iommu_bitmap, offset, size);
244         if (offset >= zdev->next_bit)
245                 zdev->next_bit = offset + size;
246 out:
247         spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
248 }
249
250 int dma_set_mask(struct device *dev, u64 mask)
251 {
252         if (!dev->dma_mask || !dma_supported(dev, mask))
253                 return -EIO;
254
255         *dev->dma_mask = mask;
256         return 0;
257 }
258 EXPORT_SYMBOL_GPL(dma_set_mask);
259
260 static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
261                                      unsigned long offset, size_t size,
262                                      enum dma_data_direction direction,
263                                      struct dma_attrs *attrs)
264 {
265         struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
266         unsigned long nr_pages, iommu_page_index;
267         unsigned long pa = page_to_phys(page) + offset;
268         int flags = ZPCI_PTE_VALID;
269         dma_addr_t dma_addr;
270
271         /* This rounds up number of pages based on size and offset */
272         nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
273         iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
274         if (iommu_page_index == -1)
275                 goto out_err;
276
277         /* Use rounded up size */
278         size = nr_pages * PAGE_SIZE;
279
280         dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
281         if (dma_addr + size > zdev->end_dma)
282                 goto out_free;
283
284         if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
285                 flags |= ZPCI_TABLE_PROTECTED;
286
287         if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
288                 atomic64_add(nr_pages, &zdev->fmb->mapped_pages);
289                 return dma_addr + (offset & ~PAGE_MASK);
290         }
291
292 out_free:
293         dma_free_iommu(zdev, iommu_page_index, nr_pages);
294 out_err:
295         zpci_err("map error:\n");
296         zpci_err_hex(&pa, sizeof(pa));
297         return DMA_ERROR_CODE;
298 }
299
300 static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
301                                  size_t size, enum dma_data_direction direction,
302                                  struct dma_attrs *attrs)
303 {
304         struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
305         unsigned long iommu_page_index;
306         int npages;
307
308         npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
309         dma_addr = dma_addr & PAGE_MASK;
310         if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
311                              ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID)) {
312                 zpci_err("unmap error:\n");
313                 zpci_err_hex(&dma_addr, sizeof(dma_addr));
314         }
315
316         atomic64_add(npages, &zdev->fmb->unmapped_pages);
317         iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
318         dma_free_iommu(zdev, iommu_page_index, npages);
319 }
320
321 static void *s390_dma_alloc(struct device *dev, size_t size,
322                             dma_addr_t *dma_handle, gfp_t flag,
323                             struct dma_attrs *attrs)
324 {
325         struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
326         struct page *page;
327         unsigned long pa;
328         dma_addr_t map;
329
330         size = PAGE_ALIGN(size);
331         page = alloc_pages(flag, get_order(size));
332         if (!page)
333                 return NULL;
334
335         pa = page_to_phys(page);
336         memset((void *) pa, 0, size);
337
338         map = s390_dma_map_pages(dev, page, pa % PAGE_SIZE,
339                                  size, DMA_BIDIRECTIONAL, NULL);
340         if (dma_mapping_error(dev, map)) {
341                 free_pages(pa, get_order(size));
342                 return NULL;
343         }
344
345         atomic64_add(size / PAGE_SIZE, &zdev->fmb->allocated_pages);
346         if (dma_handle)
347                 *dma_handle = map;
348         return (void *) pa;
349 }
350
351 static void s390_dma_free(struct device *dev, size_t size,
352                           void *pa, dma_addr_t dma_handle,
353                           struct dma_attrs *attrs)
354 {
355         struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
356
357         size = PAGE_ALIGN(size);
358         atomic64_sub(size / PAGE_SIZE, &zdev->fmb->allocated_pages);
359         s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
360         free_pages((unsigned long) pa, get_order(size));
361 }
362
363 static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
364                            int nr_elements, enum dma_data_direction dir,
365                            struct dma_attrs *attrs)
366 {
367         int mapped_elements = 0;
368         struct scatterlist *s;
369         int i;
370
371         for_each_sg(sg, s, nr_elements, i) {
372                 struct page *page = sg_page(s);
373                 s->dma_address = s390_dma_map_pages(dev, page, s->offset,
374                                                     s->length, dir, NULL);
375                 if (!dma_mapping_error(dev, s->dma_address)) {
376                         s->dma_length = s->length;
377                         mapped_elements++;
378                 } else
379                         goto unmap;
380         }
381 out:
382         return mapped_elements;
383
384 unmap:
385         for_each_sg(sg, s, mapped_elements, i) {
386                 if (s->dma_address)
387                         s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
388                                              dir, NULL);
389                 s->dma_address = 0;
390                 s->dma_length = 0;
391         }
392         mapped_elements = 0;
393         goto out;
394 }
395
396 static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
397                               int nr_elements, enum dma_data_direction dir,
398                               struct dma_attrs *attrs)
399 {
400         struct scatterlist *s;
401         int i;
402
403         for_each_sg(sg, s, nr_elements, i) {
404                 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL);
405                 s->dma_address = 0;
406                 s->dma_length = 0;
407         }
408 }
409
410 int zpci_dma_init_device(struct zpci_dev *zdev)
411 {
412         int rc;
413
414         spin_lock_init(&zdev->iommu_bitmap_lock);
415         spin_lock_init(&zdev->dma_table_lock);
416
417         zdev->dma_table = dma_alloc_cpu_table();
418         if (!zdev->dma_table) {
419                 rc = -ENOMEM;
420                 goto out_clean;
421         }
422
423         zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
424         zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
425         zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
426         if (!zdev->iommu_bitmap) {
427                 rc = -ENOMEM;
428                 goto out_reg;
429         }
430
431         rc = zpci_register_ioat(zdev,
432                                 0,
433                                 zdev->start_dma + PAGE_OFFSET,
434                                 zdev->start_dma + zdev->iommu_size - 1,
435                                 (u64) zdev->dma_table);
436         if (rc)
437                 goto out_reg;
438         return 0;
439
440 out_reg:
441         dma_free_cpu_table(zdev->dma_table);
442 out_clean:
443         return rc;
444 }
445
446 void zpci_dma_exit_device(struct zpci_dev *zdev)
447 {
448         zpci_unregister_ioat(zdev, 0);
449         dma_cleanup_tables(zdev);
450         vfree(zdev->iommu_bitmap);
451         zdev->iommu_bitmap = NULL;
452         zdev->next_bit = 0;
453 }
454
455 static int __init dma_alloc_cpu_table_caches(void)
456 {
457         dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
458                                         ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
459                                         0, NULL);
460         if (!dma_region_table_cache)
461                 return -ENOMEM;
462
463         dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
464                                         ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
465                                         0, NULL);
466         if (!dma_page_table_cache) {
467                 kmem_cache_destroy(dma_region_table_cache);
468                 return -ENOMEM;
469         }
470         return 0;
471 }
472
473 int __init zpci_dma_init(void)
474 {
475         return dma_alloc_cpu_table_caches();
476 }
477
478 void zpci_dma_exit(void)
479 {
480         kmem_cache_destroy(dma_page_table_cache);
481         kmem_cache_destroy(dma_region_table_cache);
482 }
483
484 #define PREALLOC_DMA_DEBUG_ENTRIES      (1 << 16)
485
486 static int __init dma_debug_do_init(void)
487 {
488         dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
489         return 0;
490 }
491 fs_initcall(dma_debug_do_init);
492
493 struct dma_map_ops s390_dma_ops = {
494         .alloc          = s390_dma_alloc,
495         .free           = s390_dma_free,
496         .map_sg         = s390_dma_map_sg,
497         .unmap_sg       = s390_dma_unmap_sg,
498         .map_page       = s390_dma_map_pages,
499         .unmap_page     = s390_dma_unmap_pages,
500         /* if we support direct DMA this must be conditional */
501         .is_phys        = 0,
502         /* dma_supported is unconditionally true without a callback */
503 };
504 EXPORT_SYMBOL_GPL(s390_dma_ops);