Merge remote-tracking branches 'spi/fix/dt', 'spi/fix/fsl-dspi' and 'spi/fix/fsl...
[cascardo/linux.git] / drivers / vme / vme.c
1 /*
2  * VME Bridge Framework
3  *
4  * Author: Martyn Welch <martyn.welch@ge.com>
5  * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6  *
7  * Based on work by Tom Armistead and Ajit Prem
8  * Copyright 2004 Motorola Inc.
9  *
10  * This program is free software; you can redistribute  it and/or modify it
11  * under  the terms of  the GNU General  Public License as published by the
12  * Free Software Foundation;  either version 2 of the  License, or (at your
13  * option) any later version.
14  */
15
16 #include <linux/init.h>
17 #include <linux/export.h>
18 #include <linux/mm.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/highmem.h>
25 #include <linux/interrupt.h>
26 #include <linux/pagemap.h>
27 #include <linux/device.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/syscalls.h>
30 #include <linux/mutex.h>
31 #include <linux/spinlock.h>
32 #include <linux/slab.h>
33 #include <linux/vme.h>
34
35 #include "vme_bridge.h"
36
37 /* Bitmask and list of registered buses both protected by common mutex */
38 static unsigned int vme_bus_numbers;
39 static LIST_HEAD(vme_bus_list);
40 static DEFINE_MUTEX(vme_buses_lock);
41
42 static int __init vme_init(void);
43
44 static struct vme_dev *dev_to_vme_dev(struct device *dev)
45 {
46         return container_of(dev, struct vme_dev, dev);
47 }
48
49 /*
50  * Find the bridge that the resource is associated with.
51  */
52 static struct vme_bridge *find_bridge(struct vme_resource *resource)
53 {
54         /* Get list to search */
55         switch (resource->type) {
56         case VME_MASTER:
57                 return list_entry(resource->entry, struct vme_master_resource,
58                         list)->parent;
59                 break;
60         case VME_SLAVE:
61                 return list_entry(resource->entry, struct vme_slave_resource,
62                         list)->parent;
63                 break;
64         case VME_DMA:
65                 return list_entry(resource->entry, struct vme_dma_resource,
66                         list)->parent;
67                 break;
68         case VME_LM:
69                 return list_entry(resource->entry, struct vme_lm_resource,
70                         list)->parent;
71                 break;
72         default:
73                 printk(KERN_ERR "Unknown resource type\n");
74                 return NULL;
75                 break;
76         }
77 }
78
79 /*
80  * Allocate a contiguous block of memory for use by the driver. This is used to
81  * create the buffers for the slave windows.
82  */
83 void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
84         dma_addr_t *dma)
85 {
86         struct vme_bridge *bridge;
87
88         if (resource == NULL) {
89                 printk(KERN_ERR "No resource\n");
90                 return NULL;
91         }
92
93         bridge = find_bridge(resource);
94         if (bridge == NULL) {
95                 printk(KERN_ERR "Can't find bridge\n");
96                 return NULL;
97         }
98
99         if (bridge->parent == NULL) {
100                 printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
101                 return NULL;
102         }
103
104         if (bridge->alloc_consistent == NULL) {
105                 printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
106                        bridge->name);
107                 return NULL;
108         }
109
110         return bridge->alloc_consistent(bridge->parent, size, dma);
111 }
112 EXPORT_SYMBOL(vme_alloc_consistent);
113
114 /*
115  * Free previously allocated contiguous block of memory.
116  */
117 void vme_free_consistent(struct vme_resource *resource, size_t size,
118         void *vaddr, dma_addr_t dma)
119 {
120         struct vme_bridge *bridge;
121
122         if (resource == NULL) {
123                 printk(KERN_ERR "No resource\n");
124                 return;
125         }
126
127         bridge = find_bridge(resource);
128         if (bridge == NULL) {
129                 printk(KERN_ERR "Can't find bridge\n");
130                 return;
131         }
132
133         if (bridge->parent == NULL) {
134                 printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
135                 return;
136         }
137
138         if (bridge->free_consistent == NULL) {
139                 printk(KERN_ERR "free_consistent not supported by bridge %s\n",
140                        bridge->name);
141                 return;
142         }
143
144         bridge->free_consistent(bridge->parent, size, vaddr, dma);
145 }
146 EXPORT_SYMBOL(vme_free_consistent);
147
148 size_t vme_get_size(struct vme_resource *resource)
149 {
150         int enabled, retval;
151         unsigned long long base, size;
152         dma_addr_t buf_base;
153         u32 aspace, cycle, dwidth;
154
155         switch (resource->type) {
156         case VME_MASTER:
157                 retval = vme_master_get(resource, &enabled, &base, &size,
158                         &aspace, &cycle, &dwidth);
159
160                 return size;
161                 break;
162         case VME_SLAVE:
163                 retval = vme_slave_get(resource, &enabled, &base, &size,
164                         &buf_base, &aspace, &cycle);
165
166                 return size;
167                 break;
168         case VME_DMA:
169                 return 0;
170                 break;
171         default:
172                 printk(KERN_ERR "Unknown resource type\n");
173                 return 0;
174                 break;
175         }
176 }
177 EXPORT_SYMBOL(vme_get_size);
178
179 int vme_check_window(u32 aspace, unsigned long long vme_base,
180                      unsigned long long size)
181 {
182         int retval = 0;
183
184         switch (aspace) {
185         case VME_A16:
186                 if (((vme_base + size) > VME_A16_MAX) ||
187                                 (vme_base > VME_A16_MAX))
188                         retval = -EFAULT;
189                 break;
190         case VME_A24:
191                 if (((vme_base + size) > VME_A24_MAX) ||
192                                 (vme_base > VME_A24_MAX))
193                         retval = -EFAULT;
194                 break;
195         case VME_A32:
196                 if (((vme_base + size) > VME_A32_MAX) ||
197                                 (vme_base > VME_A32_MAX))
198                         retval = -EFAULT;
199                 break;
200         case VME_A64:
201                 if ((size != 0) && (vme_base > U64_MAX + 1 - size))
202                         retval = -EFAULT;
203                 break;
204         case VME_CRCSR:
205                 if (((vme_base + size) > VME_CRCSR_MAX) ||
206                                 (vme_base > VME_CRCSR_MAX))
207                         retval = -EFAULT;
208                 break;
209         case VME_USER1:
210         case VME_USER2:
211         case VME_USER3:
212         case VME_USER4:
213                 /* User Defined */
214                 break;
215         default:
216                 printk(KERN_ERR "Invalid address space\n");
217                 retval = -EINVAL;
218                 break;
219         }
220
221         return retval;
222 }
223 EXPORT_SYMBOL(vme_check_window);
224
225 static u32 vme_get_aspace(int am)
226 {
227         switch (am) {
228         case 0x29:
229         case 0x2D:
230                 return VME_A16;
231         case 0x38:
232         case 0x39:
233         case 0x3A:
234         case 0x3B:
235         case 0x3C:
236         case 0x3D:
237         case 0x3E:
238         case 0x3F:
239                 return VME_A24;
240         case 0x8:
241         case 0x9:
242         case 0xA:
243         case 0xB:
244         case 0xC:
245         case 0xD:
246         case 0xE:
247         case 0xF:
248                 return VME_A32;
249         case 0x0:
250         case 0x1:
251         case 0x3:
252                 return VME_A64;
253         }
254
255         return 0;
256 }
257
258 /*
259  * Request a slave image with specific attributes, return some unique
260  * identifier.
261  */
262 struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
263         u32 cycle)
264 {
265         struct vme_bridge *bridge;
266         struct list_head *slave_pos = NULL;
267         struct vme_slave_resource *allocated_image = NULL;
268         struct vme_slave_resource *slave_image = NULL;
269         struct vme_resource *resource = NULL;
270
271         bridge = vdev->bridge;
272         if (bridge == NULL) {
273                 printk(KERN_ERR "Can't find VME bus\n");
274                 goto err_bus;
275         }
276
277         /* Loop through slave resources */
278         list_for_each(slave_pos, &bridge->slave_resources) {
279                 slave_image = list_entry(slave_pos,
280                         struct vme_slave_resource, list);
281
282                 if (slave_image == NULL) {
283                         printk(KERN_ERR "Registered NULL Slave resource\n");
284                         continue;
285                 }
286
287                 /* Find an unlocked and compatible image */
288                 mutex_lock(&slave_image->mtx);
289                 if (((slave_image->address_attr & address) == address) &&
290                         ((slave_image->cycle_attr & cycle) == cycle) &&
291                         (slave_image->locked == 0)) {
292
293                         slave_image->locked = 1;
294                         mutex_unlock(&slave_image->mtx);
295                         allocated_image = slave_image;
296                         break;
297                 }
298                 mutex_unlock(&slave_image->mtx);
299         }
300
301         /* No free image */
302         if (allocated_image == NULL)
303                 goto err_image;
304
305         resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
306         if (resource == NULL) {
307                 printk(KERN_WARNING "Unable to allocate resource structure\n");
308                 goto err_alloc;
309         }
310         resource->type = VME_SLAVE;
311         resource->entry = &allocated_image->list;
312
313         return resource;
314
315 err_alloc:
316         /* Unlock image */
317         mutex_lock(&slave_image->mtx);
318         slave_image->locked = 0;
319         mutex_unlock(&slave_image->mtx);
320 err_image:
321 err_bus:
322         return NULL;
323 }
324 EXPORT_SYMBOL(vme_slave_request);
325
326 int vme_slave_set(struct vme_resource *resource, int enabled,
327         unsigned long long vme_base, unsigned long long size,
328         dma_addr_t buf_base, u32 aspace, u32 cycle)
329 {
330         struct vme_bridge *bridge = find_bridge(resource);
331         struct vme_slave_resource *image;
332         int retval;
333
334         if (resource->type != VME_SLAVE) {
335                 printk(KERN_ERR "Not a slave resource\n");
336                 return -EINVAL;
337         }
338
339         image = list_entry(resource->entry, struct vme_slave_resource, list);
340
341         if (bridge->slave_set == NULL) {
342                 printk(KERN_ERR "Function not supported\n");
343                 return -ENOSYS;
344         }
345
346         if (!(((image->address_attr & aspace) == aspace) &&
347                 ((image->cycle_attr & cycle) == cycle))) {
348                 printk(KERN_ERR "Invalid attributes\n");
349                 return -EINVAL;
350         }
351
352         retval = vme_check_window(aspace, vme_base, size);
353         if (retval)
354                 return retval;
355
356         return bridge->slave_set(image, enabled, vme_base, size, buf_base,
357                 aspace, cycle);
358 }
359 EXPORT_SYMBOL(vme_slave_set);
360
361 int vme_slave_get(struct vme_resource *resource, int *enabled,
362         unsigned long long *vme_base, unsigned long long *size,
363         dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
364 {
365         struct vme_bridge *bridge = find_bridge(resource);
366         struct vme_slave_resource *image;
367
368         if (resource->type != VME_SLAVE) {
369                 printk(KERN_ERR "Not a slave resource\n");
370                 return -EINVAL;
371         }
372
373         image = list_entry(resource->entry, struct vme_slave_resource, list);
374
375         if (bridge->slave_get == NULL) {
376                 printk(KERN_ERR "vme_slave_get not supported\n");
377                 return -EINVAL;
378         }
379
380         return bridge->slave_get(image, enabled, vme_base, size, buf_base,
381                 aspace, cycle);
382 }
383 EXPORT_SYMBOL(vme_slave_get);
384
385 void vme_slave_free(struct vme_resource *resource)
386 {
387         struct vme_slave_resource *slave_image;
388
389         if (resource->type != VME_SLAVE) {
390                 printk(KERN_ERR "Not a slave resource\n");
391                 return;
392         }
393
394         slave_image = list_entry(resource->entry, struct vme_slave_resource,
395                 list);
396         if (slave_image == NULL) {
397                 printk(KERN_ERR "Can't find slave resource\n");
398                 return;
399         }
400
401         /* Unlock image */
402         mutex_lock(&slave_image->mtx);
403         if (slave_image->locked == 0)
404                 printk(KERN_ERR "Image is already free\n");
405
406         slave_image->locked = 0;
407         mutex_unlock(&slave_image->mtx);
408
409         /* Free up resource memory */
410         kfree(resource);
411 }
412 EXPORT_SYMBOL(vme_slave_free);
413
414 /*
415  * Request a master image with specific attributes, return some unique
416  * identifier.
417  */
418 struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
419         u32 cycle, u32 dwidth)
420 {
421         struct vme_bridge *bridge;
422         struct list_head *master_pos = NULL;
423         struct vme_master_resource *allocated_image = NULL;
424         struct vme_master_resource *master_image = NULL;
425         struct vme_resource *resource = NULL;
426
427         bridge = vdev->bridge;
428         if (bridge == NULL) {
429                 printk(KERN_ERR "Can't find VME bus\n");
430                 goto err_bus;
431         }
432
433         /* Loop through master resources */
434         list_for_each(master_pos, &bridge->master_resources) {
435                 master_image = list_entry(master_pos,
436                         struct vme_master_resource, list);
437
438                 if (master_image == NULL) {
439                         printk(KERN_WARNING "Registered NULL master resource\n");
440                         continue;
441                 }
442
443                 /* Find an unlocked and compatible image */
444                 spin_lock(&master_image->lock);
445                 if (((master_image->address_attr & address) == address) &&
446                         ((master_image->cycle_attr & cycle) == cycle) &&
447                         ((master_image->width_attr & dwidth) == dwidth) &&
448                         (master_image->locked == 0)) {
449
450                         master_image->locked = 1;
451                         spin_unlock(&master_image->lock);
452                         allocated_image = master_image;
453                         break;
454                 }
455                 spin_unlock(&master_image->lock);
456         }
457
458         /* Check to see if we found a resource */
459         if (allocated_image == NULL) {
460                 printk(KERN_ERR "Can't find a suitable resource\n");
461                 goto err_image;
462         }
463
464         resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
465         if (resource == NULL) {
466                 printk(KERN_ERR "Unable to allocate resource structure\n");
467                 goto err_alloc;
468         }
469         resource->type = VME_MASTER;
470         resource->entry = &allocated_image->list;
471
472         return resource;
473
474 err_alloc:
475         /* Unlock image */
476         spin_lock(&master_image->lock);
477         master_image->locked = 0;
478         spin_unlock(&master_image->lock);
479 err_image:
480 err_bus:
481         return NULL;
482 }
483 EXPORT_SYMBOL(vme_master_request);
484
485 int vme_master_set(struct vme_resource *resource, int enabled,
486         unsigned long long vme_base, unsigned long long size, u32 aspace,
487         u32 cycle, u32 dwidth)
488 {
489         struct vme_bridge *bridge = find_bridge(resource);
490         struct vme_master_resource *image;
491         int retval;
492
493         if (resource->type != VME_MASTER) {
494                 printk(KERN_ERR "Not a master resource\n");
495                 return -EINVAL;
496         }
497
498         image = list_entry(resource->entry, struct vme_master_resource, list);
499
500         if (bridge->master_set == NULL) {
501                 printk(KERN_WARNING "vme_master_set not supported\n");
502                 return -EINVAL;
503         }
504
505         if (!(((image->address_attr & aspace) == aspace) &&
506                 ((image->cycle_attr & cycle) == cycle) &&
507                 ((image->width_attr & dwidth) == dwidth))) {
508                 printk(KERN_WARNING "Invalid attributes\n");
509                 return -EINVAL;
510         }
511
512         retval = vme_check_window(aspace, vme_base, size);
513         if (retval)
514                 return retval;
515
516         return bridge->master_set(image, enabled, vme_base, size, aspace,
517                 cycle, dwidth);
518 }
519 EXPORT_SYMBOL(vme_master_set);
520
521 int vme_master_get(struct vme_resource *resource, int *enabled,
522         unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
523         u32 *cycle, u32 *dwidth)
524 {
525         struct vme_bridge *bridge = find_bridge(resource);
526         struct vme_master_resource *image;
527
528         if (resource->type != VME_MASTER) {
529                 printk(KERN_ERR "Not a master resource\n");
530                 return -EINVAL;
531         }
532
533         image = list_entry(resource->entry, struct vme_master_resource, list);
534
535         if (bridge->master_get == NULL) {
536                 printk(KERN_WARNING "%s not supported\n", __func__);
537                 return -EINVAL;
538         }
539
540         return bridge->master_get(image, enabled, vme_base, size, aspace,
541                 cycle, dwidth);
542 }
543 EXPORT_SYMBOL(vme_master_get);
544
545 /*
546  * Read data out of VME space into a buffer.
547  */
548 ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
549         loff_t offset)
550 {
551         struct vme_bridge *bridge = find_bridge(resource);
552         struct vme_master_resource *image;
553         size_t length;
554
555         if (bridge->master_read == NULL) {
556                 printk(KERN_WARNING "Reading from resource not supported\n");
557                 return -EINVAL;
558         }
559
560         if (resource->type != VME_MASTER) {
561                 printk(KERN_ERR "Not a master resource\n");
562                 return -EINVAL;
563         }
564
565         image = list_entry(resource->entry, struct vme_master_resource, list);
566
567         length = vme_get_size(resource);
568
569         if (offset > length) {
570                 printk(KERN_WARNING "Invalid Offset\n");
571                 return -EFAULT;
572         }
573
574         if ((offset + count) > length)
575                 count = length - offset;
576
577         return bridge->master_read(image, buf, count, offset);
578
579 }
580 EXPORT_SYMBOL(vme_master_read);
581
582 /*
583  * Write data out to VME space from a buffer.
584  */
585 ssize_t vme_master_write(struct vme_resource *resource, void *buf,
586         size_t count, loff_t offset)
587 {
588         struct vme_bridge *bridge = find_bridge(resource);
589         struct vme_master_resource *image;
590         size_t length;
591
592         if (bridge->master_write == NULL) {
593                 printk(KERN_WARNING "Writing to resource not supported\n");
594                 return -EINVAL;
595         }
596
597         if (resource->type != VME_MASTER) {
598                 printk(KERN_ERR "Not a master resource\n");
599                 return -EINVAL;
600         }
601
602         image = list_entry(resource->entry, struct vme_master_resource, list);
603
604         length = vme_get_size(resource);
605
606         if (offset > length) {
607                 printk(KERN_WARNING "Invalid Offset\n");
608                 return -EFAULT;
609         }
610
611         if ((offset + count) > length)
612                 count = length - offset;
613
614         return bridge->master_write(image, buf, count, offset);
615 }
616 EXPORT_SYMBOL(vme_master_write);
617
618 /*
619  * Perform RMW cycle to provided location.
620  */
621 unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
622         unsigned int compare, unsigned int swap, loff_t offset)
623 {
624         struct vme_bridge *bridge = find_bridge(resource);
625         struct vme_master_resource *image;
626
627         if (bridge->master_rmw == NULL) {
628                 printk(KERN_WARNING "Writing to resource not supported\n");
629                 return -EINVAL;
630         }
631
632         if (resource->type != VME_MASTER) {
633                 printk(KERN_ERR "Not a master resource\n");
634                 return -EINVAL;
635         }
636
637         image = list_entry(resource->entry, struct vme_master_resource, list);
638
639         return bridge->master_rmw(image, mask, compare, swap, offset);
640 }
641 EXPORT_SYMBOL(vme_master_rmw);
642
643 int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma)
644 {
645         struct vme_master_resource *image;
646         phys_addr_t phys_addr;
647         unsigned long vma_size;
648
649         if (resource->type != VME_MASTER) {
650                 pr_err("Not a master resource\n");
651                 return -EINVAL;
652         }
653
654         image = list_entry(resource->entry, struct vme_master_resource, list);
655         phys_addr = image->bus_resource.start + (vma->vm_pgoff << PAGE_SHIFT);
656         vma_size = vma->vm_end - vma->vm_start;
657
658         if (phys_addr + vma_size > image->bus_resource.end + 1) {
659                 pr_err("Map size cannot exceed the window size\n");
660                 return -EFAULT;
661         }
662
663         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
664
665         return vm_iomap_memory(vma, phys_addr, vma->vm_end - vma->vm_start);
666 }
667 EXPORT_SYMBOL(vme_master_mmap);
668
669 void vme_master_free(struct vme_resource *resource)
670 {
671         struct vme_master_resource *master_image;
672
673         if (resource->type != VME_MASTER) {
674                 printk(KERN_ERR "Not a master resource\n");
675                 return;
676         }
677
678         master_image = list_entry(resource->entry, struct vme_master_resource,
679                 list);
680         if (master_image == NULL) {
681                 printk(KERN_ERR "Can't find master resource\n");
682                 return;
683         }
684
685         /* Unlock image */
686         spin_lock(&master_image->lock);
687         if (master_image->locked == 0)
688                 printk(KERN_ERR "Image is already free\n");
689
690         master_image->locked = 0;
691         spin_unlock(&master_image->lock);
692
693         /* Free up resource memory */
694         kfree(resource);
695 }
696 EXPORT_SYMBOL(vme_master_free);
697
698 /*
699  * Request a DMA controller with specific attributes, return some unique
700  * identifier.
701  */
702 struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
703 {
704         struct vme_bridge *bridge;
705         struct list_head *dma_pos = NULL;
706         struct vme_dma_resource *allocated_ctrlr = NULL;
707         struct vme_dma_resource *dma_ctrlr = NULL;
708         struct vme_resource *resource = NULL;
709
710         /* XXX Not checking resource attributes */
711         printk(KERN_ERR "No VME resource Attribute tests done\n");
712
713         bridge = vdev->bridge;
714         if (bridge == NULL) {
715                 printk(KERN_ERR "Can't find VME bus\n");
716                 goto err_bus;
717         }
718
719         /* Loop through DMA resources */
720         list_for_each(dma_pos, &bridge->dma_resources) {
721                 dma_ctrlr = list_entry(dma_pos,
722                         struct vme_dma_resource, list);
723
724                 if (dma_ctrlr == NULL) {
725                         printk(KERN_ERR "Registered NULL DMA resource\n");
726                         continue;
727                 }
728
729                 /* Find an unlocked and compatible controller */
730                 mutex_lock(&dma_ctrlr->mtx);
731                 if (((dma_ctrlr->route_attr & route) == route) &&
732                         (dma_ctrlr->locked == 0)) {
733
734                         dma_ctrlr->locked = 1;
735                         mutex_unlock(&dma_ctrlr->mtx);
736                         allocated_ctrlr = dma_ctrlr;
737                         break;
738                 }
739                 mutex_unlock(&dma_ctrlr->mtx);
740         }
741
742         /* Check to see if we found a resource */
743         if (allocated_ctrlr == NULL)
744                 goto err_ctrlr;
745
746         resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
747         if (resource == NULL) {
748                 printk(KERN_WARNING "Unable to allocate resource structure\n");
749                 goto err_alloc;
750         }
751         resource->type = VME_DMA;
752         resource->entry = &allocated_ctrlr->list;
753
754         return resource;
755
756 err_alloc:
757         /* Unlock image */
758         mutex_lock(&dma_ctrlr->mtx);
759         dma_ctrlr->locked = 0;
760         mutex_unlock(&dma_ctrlr->mtx);
761 err_ctrlr:
762 err_bus:
763         return NULL;
764 }
765 EXPORT_SYMBOL(vme_dma_request);
766
767 /*
768  * Start new list
769  */
770 struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
771 {
772         struct vme_dma_resource *ctrlr;
773         struct vme_dma_list *dma_list;
774
775         if (resource->type != VME_DMA) {
776                 printk(KERN_ERR "Not a DMA resource\n");
777                 return NULL;
778         }
779
780         ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
781
782         dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
783         if (dma_list == NULL) {
784                 printk(KERN_ERR "Unable to allocate memory for new DMA list\n");
785                 return NULL;
786         }
787         INIT_LIST_HEAD(&dma_list->entries);
788         dma_list->parent = ctrlr;
789         mutex_init(&dma_list->mtx);
790
791         return dma_list;
792 }
793 EXPORT_SYMBOL(vme_new_dma_list);
794
795 /*
796  * Create "Pattern" type attributes
797  */
798 struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
799 {
800         struct vme_dma_attr *attributes;
801         struct vme_dma_pattern *pattern_attr;
802
803         attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
804         if (attributes == NULL) {
805                 printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
806                 goto err_attr;
807         }
808
809         pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
810         if (pattern_attr == NULL) {
811                 printk(KERN_ERR "Unable to allocate memory for pattern attributes\n");
812                 goto err_pat;
813         }
814
815         attributes->type = VME_DMA_PATTERN;
816         attributes->private = (void *)pattern_attr;
817
818         pattern_attr->pattern = pattern;
819         pattern_attr->type = type;
820
821         return attributes;
822
823 err_pat:
824         kfree(attributes);
825 err_attr:
826         return NULL;
827 }
828 EXPORT_SYMBOL(vme_dma_pattern_attribute);
829
830 /*
831  * Create "PCI" type attributes
832  */
833 struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
834 {
835         struct vme_dma_attr *attributes;
836         struct vme_dma_pci *pci_attr;
837
838         /* XXX Run some sanity checks here */
839
840         attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
841         if (attributes == NULL) {
842                 printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
843                 goto err_attr;
844         }
845
846         pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
847         if (pci_attr == NULL) {
848                 printk(KERN_ERR "Unable to allocate memory for PCI attributes\n");
849                 goto err_pci;
850         }
851
852
853
854         attributes->type = VME_DMA_PCI;
855         attributes->private = (void *)pci_attr;
856
857         pci_attr->address = address;
858
859         return attributes;
860
861 err_pci:
862         kfree(attributes);
863 err_attr:
864         return NULL;
865 }
866 EXPORT_SYMBOL(vme_dma_pci_attribute);
867
868 /*
869  * Create "VME" type attributes
870  */
871 struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
872         u32 aspace, u32 cycle, u32 dwidth)
873 {
874         struct vme_dma_attr *attributes;
875         struct vme_dma_vme *vme_attr;
876
877         attributes = kmalloc(
878                 sizeof(struct vme_dma_attr), GFP_KERNEL);
879         if (attributes == NULL) {
880                 printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
881                 goto err_attr;
882         }
883
884         vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
885         if (vme_attr == NULL) {
886                 printk(KERN_ERR "Unable to allocate memory for VME attributes\n");
887                 goto err_vme;
888         }
889
890         attributes->type = VME_DMA_VME;
891         attributes->private = (void *)vme_attr;
892
893         vme_attr->address = address;
894         vme_attr->aspace = aspace;
895         vme_attr->cycle = cycle;
896         vme_attr->dwidth = dwidth;
897
898         return attributes;
899
900 err_vme:
901         kfree(attributes);
902 err_attr:
903         return NULL;
904 }
905 EXPORT_SYMBOL(vme_dma_vme_attribute);
906
907 /*
908  * Free attribute
909  */
910 void vme_dma_free_attribute(struct vme_dma_attr *attributes)
911 {
912         kfree(attributes->private);
913         kfree(attributes);
914 }
915 EXPORT_SYMBOL(vme_dma_free_attribute);
916
917 int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
918         struct vme_dma_attr *dest, size_t count)
919 {
920         struct vme_bridge *bridge = list->parent->parent;
921         int retval;
922
923         if (bridge->dma_list_add == NULL) {
924                 printk(KERN_WARNING "Link List DMA generation not supported\n");
925                 return -EINVAL;
926         }
927
928         if (!mutex_trylock(&list->mtx)) {
929                 printk(KERN_ERR "Link List already submitted\n");
930                 return -EINVAL;
931         }
932
933         retval = bridge->dma_list_add(list, src, dest, count);
934
935         mutex_unlock(&list->mtx);
936
937         return retval;
938 }
939 EXPORT_SYMBOL(vme_dma_list_add);
940
941 int vme_dma_list_exec(struct vme_dma_list *list)
942 {
943         struct vme_bridge *bridge = list->parent->parent;
944         int retval;
945
946         if (bridge->dma_list_exec == NULL) {
947                 printk(KERN_ERR "Link List DMA execution not supported\n");
948                 return -EINVAL;
949         }
950
951         mutex_lock(&list->mtx);
952
953         retval = bridge->dma_list_exec(list);
954
955         mutex_unlock(&list->mtx);
956
957         return retval;
958 }
959 EXPORT_SYMBOL(vme_dma_list_exec);
960
961 int vme_dma_list_free(struct vme_dma_list *list)
962 {
963         struct vme_bridge *bridge = list->parent->parent;
964         int retval;
965
966         if (bridge->dma_list_empty == NULL) {
967                 printk(KERN_WARNING "Emptying of Link Lists not supported\n");
968                 return -EINVAL;
969         }
970
971         if (!mutex_trylock(&list->mtx)) {
972                 printk(KERN_ERR "Link List in use\n");
973                 return -EINVAL;
974         }
975
976         /*
977          * Empty out all of the entries from the DMA list. We need to go to the
978          * low level driver as DMA entries are driver specific.
979          */
980         retval = bridge->dma_list_empty(list);
981         if (retval) {
982                 printk(KERN_ERR "Unable to empty link-list entries\n");
983                 mutex_unlock(&list->mtx);
984                 return retval;
985         }
986         mutex_unlock(&list->mtx);
987         kfree(list);
988
989         return retval;
990 }
991 EXPORT_SYMBOL(vme_dma_list_free);
992
993 int vme_dma_free(struct vme_resource *resource)
994 {
995         struct vme_dma_resource *ctrlr;
996
997         if (resource->type != VME_DMA) {
998                 printk(KERN_ERR "Not a DMA resource\n");
999                 return -EINVAL;
1000         }
1001
1002         ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
1003
1004         if (!mutex_trylock(&ctrlr->mtx)) {
1005                 printk(KERN_ERR "Resource busy, can't free\n");
1006                 return -EBUSY;
1007         }
1008
1009         if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
1010                 printk(KERN_WARNING "Resource still processing transfers\n");
1011                 mutex_unlock(&ctrlr->mtx);
1012                 return -EBUSY;
1013         }
1014
1015         ctrlr->locked = 0;
1016
1017         mutex_unlock(&ctrlr->mtx);
1018
1019         kfree(resource);
1020
1021         return 0;
1022 }
1023 EXPORT_SYMBOL(vme_dma_free);
1024
1025 void vme_bus_error_handler(struct vme_bridge *bridge,
1026                            unsigned long long address, int am)
1027 {
1028         struct list_head *handler_pos = NULL;
1029         struct vme_error_handler *handler;
1030         int handler_triggered = 0;
1031         u32 aspace = vme_get_aspace(am);
1032
1033         list_for_each(handler_pos, &bridge->vme_error_handlers) {
1034                 handler = list_entry(handler_pos, struct vme_error_handler,
1035                                      list);
1036                 if ((aspace == handler->aspace) &&
1037                     (address >= handler->start) &&
1038                     (address < handler->end)) {
1039                         if (!handler->num_errors)
1040                                 handler->first_error = address;
1041                         if (handler->num_errors != UINT_MAX)
1042                                 handler->num_errors++;
1043                         handler_triggered = 1;
1044                 }
1045         }
1046
1047         if (!handler_triggered)
1048                 dev_err(bridge->parent,
1049                         "Unhandled VME access error at address 0x%llx\n",
1050                         address);
1051 }
1052 EXPORT_SYMBOL(vme_bus_error_handler);
1053
1054 struct vme_error_handler *vme_register_error_handler(
1055         struct vme_bridge *bridge, u32 aspace,
1056         unsigned long long address, size_t len)
1057 {
1058         struct vme_error_handler *handler;
1059
1060         handler = kmalloc(sizeof(*handler), GFP_KERNEL);
1061         if (!handler)
1062                 return NULL;
1063
1064         handler->aspace = aspace;
1065         handler->start = address;
1066         handler->end = address + len;
1067         handler->num_errors = 0;
1068         handler->first_error = 0;
1069         list_add_tail(&handler->list, &bridge->vme_error_handlers);
1070
1071         return handler;
1072 }
1073 EXPORT_SYMBOL(vme_register_error_handler);
1074
1075 void vme_unregister_error_handler(struct vme_error_handler *handler)
1076 {
1077         list_del(&handler->list);
1078         kfree(handler);
1079 }
1080 EXPORT_SYMBOL(vme_unregister_error_handler);
1081
1082 void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
1083 {
1084         void (*call)(int, int, void *);
1085         void *priv_data;
1086
1087         call = bridge->irq[level - 1].callback[statid].func;
1088         priv_data = bridge->irq[level - 1].callback[statid].priv_data;
1089
1090         if (call != NULL)
1091                 call(level, statid, priv_data);
1092         else
1093                 printk(KERN_WARNING "Spurious VME interrupt, level:%x, vector:%x\n",
1094                        level, statid);
1095 }
1096 EXPORT_SYMBOL(vme_irq_handler);
1097
1098 int vme_irq_request(struct vme_dev *vdev, int level, int statid,
1099         void (*callback)(int, int, void *),
1100         void *priv_data)
1101 {
1102         struct vme_bridge *bridge;
1103
1104         bridge = vdev->bridge;
1105         if (bridge == NULL) {
1106                 printk(KERN_ERR "Can't find VME bus\n");
1107                 return -EINVAL;
1108         }
1109
1110         if ((level < 1) || (level > 7)) {
1111                 printk(KERN_ERR "Invalid interrupt level\n");
1112                 return -EINVAL;
1113         }
1114
1115         if (bridge->irq_set == NULL) {
1116                 printk(KERN_ERR "Configuring interrupts not supported\n");
1117                 return -EINVAL;
1118         }
1119
1120         mutex_lock(&bridge->irq_mtx);
1121
1122         if (bridge->irq[level - 1].callback[statid].func) {
1123                 mutex_unlock(&bridge->irq_mtx);
1124                 printk(KERN_WARNING "VME Interrupt already taken\n");
1125                 return -EBUSY;
1126         }
1127
1128         bridge->irq[level - 1].count++;
1129         bridge->irq[level - 1].callback[statid].priv_data = priv_data;
1130         bridge->irq[level - 1].callback[statid].func = callback;
1131
1132         /* Enable IRQ level */
1133         bridge->irq_set(bridge, level, 1, 1);
1134
1135         mutex_unlock(&bridge->irq_mtx);
1136
1137         return 0;
1138 }
1139 EXPORT_SYMBOL(vme_irq_request);
1140
1141 void vme_irq_free(struct vme_dev *vdev, int level, int statid)
1142 {
1143         struct vme_bridge *bridge;
1144
1145         bridge = vdev->bridge;
1146         if (bridge == NULL) {
1147                 printk(KERN_ERR "Can't find VME bus\n");
1148                 return;
1149         }
1150
1151         if ((level < 1) || (level > 7)) {
1152                 printk(KERN_ERR "Invalid interrupt level\n");
1153                 return;
1154         }
1155
1156         if (bridge->irq_set == NULL) {
1157                 printk(KERN_ERR "Configuring interrupts not supported\n");
1158                 return;
1159         }
1160
1161         mutex_lock(&bridge->irq_mtx);
1162
1163         bridge->irq[level - 1].count--;
1164
1165         /* Disable IRQ level if no more interrupts attached at this level*/
1166         if (bridge->irq[level - 1].count == 0)
1167                 bridge->irq_set(bridge, level, 0, 1);
1168
1169         bridge->irq[level - 1].callback[statid].func = NULL;
1170         bridge->irq[level - 1].callback[statid].priv_data = NULL;
1171
1172         mutex_unlock(&bridge->irq_mtx);
1173 }
1174 EXPORT_SYMBOL(vme_irq_free);
1175
1176 int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
1177 {
1178         struct vme_bridge *bridge;
1179
1180         bridge = vdev->bridge;
1181         if (bridge == NULL) {
1182                 printk(KERN_ERR "Can't find VME bus\n");
1183                 return -EINVAL;
1184         }
1185
1186         if ((level < 1) || (level > 7)) {
1187                 printk(KERN_WARNING "Invalid interrupt level\n");
1188                 return -EINVAL;
1189         }
1190
1191         if (bridge->irq_generate == NULL) {
1192                 printk(KERN_WARNING "Interrupt generation not supported\n");
1193                 return -EINVAL;
1194         }
1195
1196         return bridge->irq_generate(bridge, level, statid);
1197 }
1198 EXPORT_SYMBOL(vme_irq_generate);
1199
1200 /*
1201  * Request the location monitor, return resource or NULL
1202  */
1203 struct vme_resource *vme_lm_request(struct vme_dev *vdev)
1204 {
1205         struct vme_bridge *bridge;
1206         struct list_head *lm_pos = NULL;
1207         struct vme_lm_resource *allocated_lm = NULL;
1208         struct vme_lm_resource *lm = NULL;
1209         struct vme_resource *resource = NULL;
1210
1211         bridge = vdev->bridge;
1212         if (bridge == NULL) {
1213                 printk(KERN_ERR "Can't find VME bus\n");
1214                 goto err_bus;
1215         }
1216
1217         /* Loop through DMA resources */
1218         list_for_each(lm_pos, &bridge->lm_resources) {
1219                 lm = list_entry(lm_pos,
1220                         struct vme_lm_resource, list);
1221
1222                 if (lm == NULL) {
1223                         printk(KERN_ERR "Registered NULL Location Monitor resource\n");
1224                         continue;
1225                 }
1226
1227                 /* Find an unlocked controller */
1228                 mutex_lock(&lm->mtx);
1229                 if (lm->locked == 0) {
1230                         lm->locked = 1;
1231                         mutex_unlock(&lm->mtx);
1232                         allocated_lm = lm;
1233                         break;
1234                 }
1235                 mutex_unlock(&lm->mtx);
1236         }
1237
1238         /* Check to see if we found a resource */
1239         if (allocated_lm == NULL)
1240                 goto err_lm;
1241
1242         resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
1243         if (resource == NULL) {
1244                 printk(KERN_ERR "Unable to allocate resource structure\n");
1245                 goto err_alloc;
1246         }
1247         resource->type = VME_LM;
1248         resource->entry = &allocated_lm->list;
1249
1250         return resource;
1251
1252 err_alloc:
1253         /* Unlock image */
1254         mutex_lock(&lm->mtx);
1255         lm->locked = 0;
1256         mutex_unlock(&lm->mtx);
1257 err_lm:
1258 err_bus:
1259         return NULL;
1260 }
1261 EXPORT_SYMBOL(vme_lm_request);
1262
1263 int vme_lm_count(struct vme_resource *resource)
1264 {
1265         struct vme_lm_resource *lm;
1266
1267         if (resource->type != VME_LM) {
1268                 printk(KERN_ERR "Not a Location Monitor resource\n");
1269                 return -EINVAL;
1270         }
1271
1272         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1273
1274         return lm->monitors;
1275 }
1276 EXPORT_SYMBOL(vme_lm_count);
1277
1278 int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
1279         u32 aspace, u32 cycle)
1280 {
1281         struct vme_bridge *bridge = find_bridge(resource);
1282         struct vme_lm_resource *lm;
1283
1284         if (resource->type != VME_LM) {
1285                 printk(KERN_ERR "Not a Location Monitor resource\n");
1286                 return -EINVAL;
1287         }
1288
1289         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1290
1291         if (bridge->lm_set == NULL) {
1292                 printk(KERN_ERR "vme_lm_set not supported\n");
1293                 return -EINVAL;
1294         }
1295
1296         return bridge->lm_set(lm, lm_base, aspace, cycle);
1297 }
1298 EXPORT_SYMBOL(vme_lm_set);
1299
1300 int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
1301         u32 *aspace, u32 *cycle)
1302 {
1303         struct vme_bridge *bridge = find_bridge(resource);
1304         struct vme_lm_resource *lm;
1305
1306         if (resource->type != VME_LM) {
1307                 printk(KERN_ERR "Not a Location Monitor resource\n");
1308                 return -EINVAL;
1309         }
1310
1311         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1312
1313         if (bridge->lm_get == NULL) {
1314                 printk(KERN_ERR "vme_lm_get not supported\n");
1315                 return -EINVAL;
1316         }
1317
1318         return bridge->lm_get(lm, lm_base, aspace, cycle);
1319 }
1320 EXPORT_SYMBOL(vme_lm_get);
1321
1322 int vme_lm_attach(struct vme_resource *resource, int monitor,
1323         void (*callback)(void *), void *data)
1324 {
1325         struct vme_bridge *bridge = find_bridge(resource);
1326         struct vme_lm_resource *lm;
1327
1328         if (resource->type != VME_LM) {
1329                 printk(KERN_ERR "Not a Location Monitor resource\n");
1330                 return -EINVAL;
1331         }
1332
1333         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1334
1335         if (bridge->lm_attach == NULL) {
1336                 printk(KERN_ERR "vme_lm_attach not supported\n");
1337                 return -EINVAL;
1338         }
1339
1340         return bridge->lm_attach(lm, monitor, callback, data);
1341 }
1342 EXPORT_SYMBOL(vme_lm_attach);
1343
1344 int vme_lm_detach(struct vme_resource *resource, int monitor)
1345 {
1346         struct vme_bridge *bridge = find_bridge(resource);
1347         struct vme_lm_resource *lm;
1348
1349         if (resource->type != VME_LM) {
1350                 printk(KERN_ERR "Not a Location Monitor resource\n");
1351                 return -EINVAL;
1352         }
1353
1354         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1355
1356         if (bridge->lm_detach == NULL) {
1357                 printk(KERN_ERR "vme_lm_detach not supported\n");
1358                 return -EINVAL;
1359         }
1360
1361         return bridge->lm_detach(lm, monitor);
1362 }
1363 EXPORT_SYMBOL(vme_lm_detach);
1364
1365 void vme_lm_free(struct vme_resource *resource)
1366 {
1367         struct vme_lm_resource *lm;
1368
1369         if (resource->type != VME_LM) {
1370                 printk(KERN_ERR "Not a Location Monitor resource\n");
1371                 return;
1372         }
1373
1374         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1375
1376         mutex_lock(&lm->mtx);
1377
1378         /* XXX
1379          * Check to see that there aren't any callbacks still attached, if
1380          * there are we should probably be detaching them!
1381          */
1382
1383         lm->locked = 0;
1384
1385         mutex_unlock(&lm->mtx);
1386
1387         kfree(resource);
1388 }
1389 EXPORT_SYMBOL(vme_lm_free);
1390
1391 int vme_slot_num(struct vme_dev *vdev)
1392 {
1393         struct vme_bridge *bridge;
1394
1395         bridge = vdev->bridge;
1396         if (bridge == NULL) {
1397                 printk(KERN_ERR "Can't find VME bus\n");
1398                 return -EINVAL;
1399         }
1400
1401         if (bridge->slot_get == NULL) {
1402                 printk(KERN_WARNING "vme_slot_num not supported\n");
1403                 return -EINVAL;
1404         }
1405
1406         return bridge->slot_get(bridge);
1407 }
1408 EXPORT_SYMBOL(vme_slot_num);
1409
1410 int vme_bus_num(struct vme_dev *vdev)
1411 {
1412         struct vme_bridge *bridge;
1413
1414         bridge = vdev->bridge;
1415         if (bridge == NULL) {
1416                 pr_err("Can't find VME bus\n");
1417                 return -EINVAL;
1418         }
1419
1420         return bridge->num;
1421 }
1422 EXPORT_SYMBOL(vme_bus_num);
1423
1424 /* - Bridge Registration --------------------------------------------------- */
1425
1426 static void vme_dev_release(struct device *dev)
1427 {
1428         kfree(dev_to_vme_dev(dev));
1429 }
1430
1431 /* Common bridge initialization */
1432 struct vme_bridge *vme_init_bridge(struct vme_bridge *bridge)
1433 {
1434         INIT_LIST_HEAD(&bridge->vme_error_handlers);
1435         INIT_LIST_HEAD(&bridge->master_resources);
1436         INIT_LIST_HEAD(&bridge->slave_resources);
1437         INIT_LIST_HEAD(&bridge->dma_resources);
1438         INIT_LIST_HEAD(&bridge->lm_resources);
1439         mutex_init(&bridge->irq_mtx);
1440
1441         return bridge;
1442 }
1443 EXPORT_SYMBOL(vme_init_bridge);
1444
1445 int vme_register_bridge(struct vme_bridge *bridge)
1446 {
1447         int i;
1448         int ret = -1;
1449
1450         mutex_lock(&vme_buses_lock);
1451         for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
1452                 if ((vme_bus_numbers & (1 << i)) == 0) {
1453                         vme_bus_numbers |= (1 << i);
1454                         bridge->num = i;
1455                         INIT_LIST_HEAD(&bridge->devices);
1456                         list_add_tail(&bridge->bus_list, &vme_bus_list);
1457                         ret = 0;
1458                         break;
1459                 }
1460         }
1461         mutex_unlock(&vme_buses_lock);
1462
1463         return ret;
1464 }
1465 EXPORT_SYMBOL(vme_register_bridge);
1466
1467 void vme_unregister_bridge(struct vme_bridge *bridge)
1468 {
1469         struct vme_dev *vdev;
1470         struct vme_dev *tmp;
1471
1472         mutex_lock(&vme_buses_lock);
1473         vme_bus_numbers &= ~(1 << bridge->num);
1474         list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) {
1475                 list_del(&vdev->drv_list);
1476                 list_del(&vdev->bridge_list);
1477                 device_unregister(&vdev->dev);
1478         }
1479         list_del(&bridge->bus_list);
1480         mutex_unlock(&vme_buses_lock);
1481 }
1482 EXPORT_SYMBOL(vme_unregister_bridge);
1483
1484 /* - Driver Registration --------------------------------------------------- */
1485
1486 static int __vme_register_driver_bus(struct vme_driver *drv,
1487         struct vme_bridge *bridge, unsigned int ndevs)
1488 {
1489         int err;
1490         unsigned int i;
1491         struct vme_dev *vdev;
1492         struct vme_dev *tmp;
1493
1494         for (i = 0; i < ndevs; i++) {
1495                 vdev = kzalloc(sizeof(struct vme_dev), GFP_KERNEL);
1496                 if (!vdev) {
1497                         err = -ENOMEM;
1498                         goto err_devalloc;
1499                 }
1500                 vdev->num = i;
1501                 vdev->bridge = bridge;
1502                 vdev->dev.platform_data = drv;
1503                 vdev->dev.release = vme_dev_release;
1504                 vdev->dev.parent = bridge->parent;
1505                 vdev->dev.bus = &vme_bus_type;
1506                 dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num,
1507                         vdev->num);
1508
1509                 err = device_register(&vdev->dev);
1510                 if (err)
1511                         goto err_reg;
1512
1513                 if (vdev->dev.platform_data) {
1514                         list_add_tail(&vdev->drv_list, &drv->devices);
1515                         list_add_tail(&vdev->bridge_list, &bridge->devices);
1516                 } else
1517                         device_unregister(&vdev->dev);
1518         }
1519         return 0;
1520
1521 err_reg:
1522         put_device(&vdev->dev);
1523         kfree(vdev);
1524 err_devalloc:
1525         list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
1526                 list_del(&vdev->drv_list);
1527                 list_del(&vdev->bridge_list);
1528                 device_unregister(&vdev->dev);
1529         }
1530         return err;
1531 }
1532
1533 static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
1534 {
1535         struct vme_bridge *bridge;
1536         int err = 0;
1537
1538         mutex_lock(&vme_buses_lock);
1539         list_for_each_entry(bridge, &vme_bus_list, bus_list) {
1540                 /*
1541                  * This cannot cause trouble as we already have vme_buses_lock
1542                  * and if the bridge is removed, it will have to go through
1543                  * vme_unregister_bridge() to do it (which calls remove() on
1544                  * the bridge which in turn tries to acquire vme_buses_lock and
1545                  * will have to wait).
1546                  */
1547                 err = __vme_register_driver_bus(drv, bridge, ndevs);
1548                 if (err)
1549                         break;
1550         }
1551         mutex_unlock(&vme_buses_lock);
1552         return err;
1553 }
1554
1555 int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
1556 {
1557         int err;
1558
1559         drv->driver.name = drv->name;
1560         drv->driver.bus = &vme_bus_type;
1561         INIT_LIST_HEAD(&drv->devices);
1562
1563         err = driver_register(&drv->driver);
1564         if (err)
1565                 return err;
1566
1567         err = __vme_register_driver(drv, ndevs);
1568         if (err)
1569                 driver_unregister(&drv->driver);
1570
1571         return err;
1572 }
1573 EXPORT_SYMBOL(vme_register_driver);
1574
1575 void vme_unregister_driver(struct vme_driver *drv)
1576 {
1577         struct vme_dev *dev, *dev_tmp;
1578
1579         mutex_lock(&vme_buses_lock);
1580         list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) {
1581                 list_del(&dev->drv_list);
1582                 list_del(&dev->bridge_list);
1583                 device_unregister(&dev->dev);
1584         }
1585         mutex_unlock(&vme_buses_lock);
1586
1587         driver_unregister(&drv->driver);
1588 }
1589 EXPORT_SYMBOL(vme_unregister_driver);
1590
1591 /* - Bus Registration ------------------------------------------------------ */
1592
1593 static int vme_bus_match(struct device *dev, struct device_driver *drv)
1594 {
1595         struct vme_driver *vme_drv;
1596
1597         vme_drv = container_of(drv, struct vme_driver, driver);
1598
1599         if (dev->platform_data == vme_drv) {
1600                 struct vme_dev *vdev = dev_to_vme_dev(dev);
1601
1602                 if (vme_drv->match && vme_drv->match(vdev))
1603                         return 1;
1604
1605                 dev->platform_data = NULL;
1606         }
1607         return 0;
1608 }
1609
1610 static int vme_bus_probe(struct device *dev)
1611 {
1612         int retval = -ENODEV;
1613         struct vme_driver *driver;
1614         struct vme_dev *vdev = dev_to_vme_dev(dev);
1615
1616         driver = dev->platform_data;
1617
1618         if (driver->probe != NULL)
1619                 retval = driver->probe(vdev);
1620
1621         return retval;
1622 }
1623
1624 struct bus_type vme_bus_type = {
1625         .name = "vme",
1626         .match = vme_bus_match,
1627         .probe = vme_bus_probe,
1628 };
1629 EXPORT_SYMBOL(vme_bus_type);
1630
1631 static int __init vme_init(void)
1632 {
1633         return bus_register(&vme_bus_type);
1634 }
1635 subsys_initcall(vme_init);