Merge branch 'slabh' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc
[sfrench/cifs-2.6.git] / drivers / staging / vme / vme.c
1 /*
2  * VME Bridge Framework
3  *
4  * Author: Martyn Welch <martyn.welch@ge.com>
5  * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6  *
7  * Based on work by Tom Armistead and Ajit Prem
8  * Copyright 2004 Motorola Inc.
9  *
10  * This program is free software; you can redistribute  it and/or modify it
11  * under  the terms of  the GNU General  Public License as published by the
12  * Free Software Foundation;  either version 2 of the  License, or (at your
13  * option) any later version.
14  */
15
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/mm.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/highmem.h>
25 #include <linux/interrupt.h>
26 #include <linux/pagemap.h>
27 #include <linux/device.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/syscalls.h>
30 #include <linux/mutex.h>
31 #include <linux/spinlock.h>
32 #include <linux/slab.h>
33
34 #include "vme.h"
35 #include "vme_bridge.h"
36
37 /* Bitmask and mutex to keep track of bridge numbers */
38 static unsigned int vme_bus_numbers;
39 DEFINE_MUTEX(vme_bus_num_mtx);
40
41 static void __exit vme_exit(void);
42 static int __init vme_init(void);
43
44
45 /*
46  * Find the bridge resource associated with a specific device resource
47  */
48 static struct vme_bridge *dev_to_bridge(struct device *dev)
49 {
50         return dev->platform_data;
51 }
52
53 /*
54  * Find the bridge that the resource is associated with.
55  */
56 static struct vme_bridge *find_bridge(struct vme_resource *resource)
57 {
58         /* Get list to search */
59         switch (resource->type) {
60         case VME_MASTER:
61                 return list_entry(resource->entry, struct vme_master_resource,
62                         list)->parent;
63                 break;
64         case VME_SLAVE:
65                 return list_entry(resource->entry, struct vme_slave_resource,
66                         list)->parent;
67                 break;
68         case VME_DMA:
69                 return list_entry(resource->entry, struct vme_dma_resource,
70                         list)->parent;
71                 break;
72         case VME_LM:
73                 return list_entry(resource->entry, struct vme_lm_resource,
74                         list)->parent;
75                 break;
76         default:
77                 printk(KERN_ERR "Unknown resource type\n");
78                 return NULL;
79                 break;
80         }
81 }
82
83 /*
84  * Allocate a contiguous block of memory for use by the driver. This is used to
85  * create the buffers for the slave windows.
86  *
87  * XXX VME bridges could be available on buses other than PCI. At the momment
88  *     this framework only supports PCI devices.
89  */
90 void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
91         dma_addr_t *dma)
92 {
93         struct vme_bridge *bridge;
94         struct pci_dev *pdev;
95
96         if (resource == NULL) {
97                 printk(KERN_ERR "No resource\n");
98                 return NULL;
99         }
100
101         bridge = find_bridge(resource);
102         if (bridge == NULL) {
103                 printk(KERN_ERR "Can't find bridge\n");
104                 return NULL;
105         }
106
107         /* Find pci_dev container of dev */
108         if (bridge->parent == NULL) {
109                 printk(KERN_ERR "Dev entry NULL\n");
110                 return NULL;
111         }
112         pdev = container_of(bridge->parent, struct pci_dev, dev);
113
114         return pci_alloc_consistent(pdev, size, dma);
115 }
116 EXPORT_SYMBOL(vme_alloc_consistent);
117
118 /*
119  * Free previously allocated contiguous block of memory.
120  *
121  * XXX VME bridges could be available on buses other than PCI. At the momment
122  *     this framework only supports PCI devices.
123  */
124 void vme_free_consistent(struct vme_resource *resource, size_t size,
125         void *vaddr, dma_addr_t dma)
126 {
127         struct vme_bridge *bridge;
128         struct pci_dev *pdev;
129
130         if (resource == NULL) {
131                 printk(KERN_ERR "No resource\n");
132                 return;
133         }
134
135         bridge = find_bridge(resource);
136         if (bridge == NULL) {
137                 printk(KERN_ERR "Can't find bridge\n");
138                 return;
139         }
140
141         /* Find pci_dev container of dev */
142         pdev = container_of(bridge->parent, struct pci_dev, dev);
143
144         pci_free_consistent(pdev, size, vaddr, dma);
145 }
146 EXPORT_SYMBOL(vme_free_consistent);
147
148 size_t vme_get_size(struct vme_resource *resource)
149 {
150         int enabled, retval;
151         unsigned long long base, size;
152         dma_addr_t buf_base;
153         vme_address_t aspace;
154         vme_cycle_t cycle;
155         vme_width_t dwidth;
156
157         switch (resource->type) {
158         case VME_MASTER:
159                 retval = vme_master_get(resource, &enabled, &base, &size,
160                         &aspace, &cycle, &dwidth);
161
162                 return size;
163                 break;
164         case VME_SLAVE:
165                 retval = vme_slave_get(resource, &enabled, &base, &size,
166                         &buf_base, &aspace, &cycle);
167
168                 return size;
169                 break;
170         case VME_DMA:
171                 return 0;
172                 break;
173         default:
174                 printk(KERN_ERR "Unknown resource type\n");
175                 return 0;
176                 break;
177         }
178 }
179 EXPORT_SYMBOL(vme_get_size);
180
181 static int vme_check_window(vme_address_t aspace, unsigned long long vme_base,
182         unsigned long long size)
183 {
184         int retval = 0;
185
186         switch (aspace) {
187         case VME_A16:
188                 if (((vme_base + size) > VME_A16_MAX) ||
189                                 (vme_base > VME_A16_MAX))
190                         retval = -EFAULT;
191                 break;
192         case VME_A24:
193                 if (((vme_base + size) > VME_A24_MAX) ||
194                                 (vme_base > VME_A24_MAX))
195                         retval = -EFAULT;
196                 break;
197         case VME_A32:
198                 if (((vme_base + size) > VME_A32_MAX) ||
199                                 (vme_base > VME_A32_MAX))
200                         retval = -EFAULT;
201                 break;
202         case VME_A64:
203                 /*
204                  * Any value held in an unsigned long long can be used as the
205                  * base
206                  */
207                 break;
208         case VME_CRCSR:
209                 if (((vme_base + size) > VME_CRCSR_MAX) ||
210                                 (vme_base > VME_CRCSR_MAX))
211                         retval = -EFAULT;
212                 break;
213         case VME_USER1:
214         case VME_USER2:
215         case VME_USER3:
216         case VME_USER4:
217                 /* User Defined */
218                 break;
219         default:
220                 printk(KERN_ERR "Invalid address space\n");
221                 retval = -EINVAL;
222                 break;
223         }
224
225         return retval;
226 }
227
228 /*
229  * Request a slave image with specific attributes, return some unique
230  * identifier.
231  */
232 struct vme_resource *vme_slave_request(struct device *dev,
233         vme_address_t address, vme_cycle_t cycle)
234 {
235         struct vme_bridge *bridge;
236         struct list_head *slave_pos = NULL;
237         struct vme_slave_resource *allocated_image = NULL;
238         struct vme_slave_resource *slave_image = NULL;
239         struct vme_resource *resource = NULL;
240
241         bridge = dev_to_bridge(dev);
242         if (bridge == NULL) {
243                 printk(KERN_ERR "Can't find VME bus\n");
244                 goto err_bus;
245         }
246
247         /* Loop through slave resources */
248         list_for_each(slave_pos, &(bridge->slave_resources)) {
249                 slave_image = list_entry(slave_pos,
250                         struct vme_slave_resource, list);
251
252                 if (slave_image == NULL) {
253                         printk(KERN_ERR "Registered NULL Slave resource\n");
254                         continue;
255                 }
256
257                 /* Find an unlocked and compatible image */
258                 mutex_lock(&(slave_image->mtx));
259                 if (((slave_image->address_attr & address) == address) &&
260                         ((slave_image->cycle_attr & cycle) == cycle) &&
261                         (slave_image->locked == 0)) {
262
263                         slave_image->locked = 1;
264                         mutex_unlock(&(slave_image->mtx));
265                         allocated_image = slave_image;
266                         break;
267                 }
268                 mutex_unlock(&(slave_image->mtx));
269         }
270
271         /* No free image */
272         if (allocated_image == NULL)
273                 goto err_image;
274
275         resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
276         if (resource == NULL) {
277                 printk(KERN_WARNING "Unable to allocate resource structure\n");
278                 goto err_alloc;
279         }
280         resource->type = VME_SLAVE;
281         resource->entry = &(allocated_image->list);
282
283         return resource;
284
285 err_alloc:
286         /* Unlock image */
287         mutex_lock(&(slave_image->mtx));
288         slave_image->locked = 0;
289         mutex_unlock(&(slave_image->mtx));
290 err_image:
291 err_bus:
292         return NULL;
293 }
294 EXPORT_SYMBOL(vme_slave_request);
295
296 int vme_slave_set(struct vme_resource *resource, int enabled,
297         unsigned long long vme_base, unsigned long long size,
298         dma_addr_t buf_base, vme_address_t aspace, vme_cycle_t cycle)
299 {
300         struct vme_bridge *bridge = find_bridge(resource);
301         struct vme_slave_resource *image;
302         int retval;
303
304         if (resource->type != VME_SLAVE) {
305                 printk(KERN_ERR "Not a slave resource\n");
306                 return -EINVAL;
307         }
308
309         image = list_entry(resource->entry, struct vme_slave_resource, list);
310
311         if (bridge->slave_set == NULL) {
312                 printk(KERN_ERR "Function not supported\n");
313                 return -ENOSYS;
314         }
315
316         if (!(((image->address_attr & aspace) == aspace) &&
317                 ((image->cycle_attr & cycle) == cycle))) {
318                 printk(KERN_ERR "Invalid attributes\n");
319                 return -EINVAL;
320         }
321
322         retval = vme_check_window(aspace, vme_base, size);
323         if (retval)
324                 return retval;
325
326         return bridge->slave_set(image, enabled, vme_base, size, buf_base,
327                 aspace, cycle);
328 }
329 EXPORT_SYMBOL(vme_slave_set);
330
331 int vme_slave_get(struct vme_resource *resource, int *enabled,
332         unsigned long long *vme_base, unsigned long long *size,
333         dma_addr_t *buf_base, vme_address_t *aspace, vme_cycle_t *cycle)
334 {
335         struct vme_bridge *bridge = find_bridge(resource);
336         struct vme_slave_resource *image;
337
338         if (resource->type != VME_SLAVE) {
339                 printk(KERN_ERR "Not a slave resource\n");
340                 return -EINVAL;
341         }
342
343         image = list_entry(resource->entry, struct vme_slave_resource, list);
344
345         if (bridge->slave_get == NULL) {
346                 printk(KERN_ERR "vme_slave_get not supported\n");
347                 return -EINVAL;
348         }
349
350         return bridge->slave_get(image, enabled, vme_base, size, buf_base,
351                 aspace, cycle);
352 }
353 EXPORT_SYMBOL(vme_slave_get);
354
355 void vme_slave_free(struct vme_resource *resource)
356 {
357         struct vme_slave_resource *slave_image;
358
359         if (resource->type != VME_SLAVE) {
360                 printk(KERN_ERR "Not a slave resource\n");
361                 return;
362         }
363
364         slave_image = list_entry(resource->entry, struct vme_slave_resource,
365                 list);
366         if (slave_image == NULL) {
367                 printk(KERN_ERR "Can't find slave resource\n");
368                 return;
369         }
370
371         /* Unlock image */
372         mutex_lock(&(slave_image->mtx));
373         if (slave_image->locked == 0)
374                 printk(KERN_ERR "Image is already free\n");
375
376         slave_image->locked = 0;
377         mutex_unlock(&(slave_image->mtx));
378
379         /* Free up resource memory */
380         kfree(resource);
381 }
382 EXPORT_SYMBOL(vme_slave_free);
383
384 /*
385  * Request a master image with specific attributes, return some unique
386  * identifier.
387  */
388 struct vme_resource *vme_master_request(struct device *dev,
389         vme_address_t address, vme_cycle_t cycle, vme_width_t dwidth)
390 {
391         struct vme_bridge *bridge;
392         struct list_head *master_pos = NULL;
393         struct vme_master_resource *allocated_image = NULL;
394         struct vme_master_resource *master_image = NULL;
395         struct vme_resource *resource = NULL;
396
397         bridge = dev_to_bridge(dev);
398         if (bridge == NULL) {
399                 printk(KERN_ERR "Can't find VME bus\n");
400                 goto err_bus;
401         }
402
403         /* Loop through master resources */
404         list_for_each(master_pos, &(bridge->master_resources)) {
405                 master_image = list_entry(master_pos,
406                         struct vme_master_resource, list);
407
408                 if (master_image == NULL) {
409                         printk(KERN_WARNING "Registered NULL master resource\n");
410                         continue;
411                 }
412
413                 /* Find an unlocked and compatible image */
414                 spin_lock(&(master_image->lock));
415                 if (((master_image->address_attr & address) == address) &&
416                         ((master_image->cycle_attr & cycle) == cycle) &&
417                         ((master_image->width_attr & dwidth) == dwidth) &&
418                         (master_image->locked == 0)) {
419
420                         master_image->locked = 1;
421                         spin_unlock(&(master_image->lock));
422                         allocated_image = master_image;
423                         break;
424                 }
425                 spin_unlock(&(master_image->lock));
426         }
427
428         /* Check to see if we found a resource */
429         if (allocated_image == NULL) {
430                 printk(KERN_ERR "Can't find a suitable resource\n");
431                 goto err_image;
432         }
433
434         resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
435         if (resource == NULL) {
436                 printk(KERN_ERR "Unable to allocate resource structure\n");
437                 goto err_alloc;
438         }
439         resource->type = VME_MASTER;
440         resource->entry = &(allocated_image->list);
441
442         return resource;
443
444         kfree(resource);
445 err_alloc:
446         /* Unlock image */
447         spin_lock(&(master_image->lock));
448         master_image->locked = 0;
449         spin_unlock(&(master_image->lock));
450 err_image:
451 err_bus:
452         return NULL;
453 }
454 EXPORT_SYMBOL(vme_master_request);
455
456 int vme_master_set(struct vme_resource *resource, int enabled,
457         unsigned long long vme_base, unsigned long long size,
458         vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
459 {
460         struct vme_bridge *bridge = find_bridge(resource);
461         struct vme_master_resource *image;
462         int retval;
463
464         if (resource->type != VME_MASTER) {
465                 printk(KERN_ERR "Not a master resource\n");
466                 return -EINVAL;
467         }
468
469         image = list_entry(resource->entry, struct vme_master_resource, list);
470
471         if (bridge->master_set == NULL) {
472                 printk(KERN_WARNING "vme_master_set not supported\n");
473                 return -EINVAL;
474         }
475
476         if (!(((image->address_attr & aspace) == aspace) &&
477                 ((image->cycle_attr & cycle) == cycle) &&
478                 ((image->width_attr & dwidth) == dwidth))) {
479                 printk(KERN_WARNING "Invalid attributes\n");
480                 return -EINVAL;
481         }
482
483         retval = vme_check_window(aspace, vme_base, size);
484         if (retval)
485                 return retval;
486
487         return bridge->master_set(image, enabled, vme_base, size, aspace,
488                 cycle, dwidth);
489 }
490 EXPORT_SYMBOL(vme_master_set);
491
492 int vme_master_get(struct vme_resource *resource, int *enabled,
493         unsigned long long *vme_base, unsigned long long *size,
494         vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
495 {
496         struct vme_bridge *bridge = find_bridge(resource);
497         struct vme_master_resource *image;
498
499         if (resource->type != VME_MASTER) {
500                 printk(KERN_ERR "Not a master resource\n");
501                 return -EINVAL;
502         }
503
504         image = list_entry(resource->entry, struct vme_master_resource, list);
505
506         if (bridge->master_get == NULL) {
507                 printk(KERN_WARNING "vme_master_set not supported\n");
508                 return -EINVAL;
509         }
510
511         return bridge->master_get(image, enabled, vme_base, size, aspace,
512                 cycle, dwidth);
513 }
514 EXPORT_SYMBOL(vme_master_get);
515
516 /*
517  * Read data out of VME space into a buffer.
518  */
519 ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
520         loff_t offset)
521 {
522         struct vme_bridge *bridge = find_bridge(resource);
523         struct vme_master_resource *image;
524         size_t length;
525
526         if (bridge->master_read == NULL) {
527                 printk(KERN_WARNING "Reading from resource not supported\n");
528                 return -EINVAL;
529         }
530
531         if (resource->type != VME_MASTER) {
532                 printk(KERN_ERR "Not a master resource\n");
533                 return -EINVAL;
534         }
535
536         image = list_entry(resource->entry, struct vme_master_resource, list);
537
538         length = vme_get_size(resource);
539
540         if (offset > length) {
541                 printk(KERN_WARNING "Invalid Offset\n");
542                 return -EFAULT;
543         }
544
545         if ((offset + count) > length)
546                 count = length - offset;
547
548         return bridge->master_read(image, buf, count, offset);
549
550 }
551 EXPORT_SYMBOL(vme_master_read);
552
553 /*
554  * Write data out to VME space from a buffer.
555  */
556 ssize_t vme_master_write(struct vme_resource *resource, void *buf,
557         size_t count, loff_t offset)
558 {
559         struct vme_bridge *bridge = find_bridge(resource);
560         struct vme_master_resource *image;
561         size_t length;
562
563         if (bridge->master_write == NULL) {
564                 printk(KERN_WARNING "Writing to resource not supported\n");
565                 return -EINVAL;
566         }
567
568         if (resource->type != VME_MASTER) {
569                 printk(KERN_ERR "Not a master resource\n");
570                 return -EINVAL;
571         }
572
573         image = list_entry(resource->entry, struct vme_master_resource, list);
574
575         length = vme_get_size(resource);
576
577         if (offset > length) {
578                 printk(KERN_WARNING "Invalid Offset\n");
579                 return -EFAULT;
580         }
581
582         if ((offset + count) > length)
583                 count = length - offset;
584
585         return bridge->master_write(image, buf, count, offset);
586 }
587 EXPORT_SYMBOL(vme_master_write);
588
589 /*
590  * Perform RMW cycle to provided location.
591  */
592 unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
593         unsigned int compare, unsigned int swap, loff_t offset)
594 {
595         struct vme_bridge *bridge = find_bridge(resource);
596         struct vme_master_resource *image;
597
598         if (bridge->master_rmw == NULL) {
599                 printk(KERN_WARNING "Writing to resource not supported\n");
600                 return -EINVAL;
601         }
602
603         if (resource->type != VME_MASTER) {
604                 printk(KERN_ERR "Not a master resource\n");
605                 return -EINVAL;
606         }
607
608         image = list_entry(resource->entry, struct vme_master_resource, list);
609
610         return bridge->master_rmw(image, mask, compare, swap, offset);
611 }
612 EXPORT_SYMBOL(vme_master_rmw);
613
614 void vme_master_free(struct vme_resource *resource)
615 {
616         struct vme_master_resource *master_image;
617
618         if (resource->type != VME_MASTER) {
619                 printk(KERN_ERR "Not a master resource\n");
620                 return;
621         }
622
623         master_image = list_entry(resource->entry, struct vme_master_resource,
624                 list);
625         if (master_image == NULL) {
626                 printk(KERN_ERR "Can't find master resource\n");
627                 return;
628         }
629
630         /* Unlock image */
631         spin_lock(&(master_image->lock));
632         if (master_image->locked == 0)
633                 printk(KERN_ERR "Image is already free\n");
634
635         master_image->locked = 0;
636         spin_unlock(&(master_image->lock));
637
638         /* Free up resource memory */
639         kfree(resource);
640 }
641 EXPORT_SYMBOL(vme_master_free);
642
643 /*
644  * Request a DMA controller with specific attributes, return some unique
645  * identifier.
646  */
647 struct vme_resource *vme_dma_request(struct device *dev, vme_dma_route_t route)
648 {
649         struct vme_bridge *bridge;
650         struct list_head *dma_pos = NULL;
651         struct vme_dma_resource *allocated_ctrlr = NULL;
652         struct vme_dma_resource *dma_ctrlr = NULL;
653         struct vme_resource *resource = NULL;
654
655         /* XXX Not checking resource attributes */
656         printk(KERN_ERR "No VME resource Attribute tests done\n");
657
658         bridge = dev_to_bridge(dev);
659         if (bridge == NULL) {
660                 printk(KERN_ERR "Can't find VME bus\n");
661                 goto err_bus;
662         }
663
664         /* Loop through DMA resources */
665         list_for_each(dma_pos, &(bridge->dma_resources)) {
666                 dma_ctrlr = list_entry(dma_pos,
667                         struct vme_dma_resource, list);
668
669                 if (dma_ctrlr == NULL) {
670                         printk(KERN_ERR "Registered NULL DMA resource\n");
671                         continue;
672                 }
673
674                 /* Find an unlocked and compatible controller */
675                 mutex_lock(&(dma_ctrlr->mtx));
676                 if (((dma_ctrlr->route_attr & route) == route) &&
677                         (dma_ctrlr->locked == 0)) {
678
679                         dma_ctrlr->locked = 1;
680                         mutex_unlock(&(dma_ctrlr->mtx));
681                         allocated_ctrlr = dma_ctrlr;
682                         break;
683                 }
684                 mutex_unlock(&(dma_ctrlr->mtx));
685         }
686
687         /* Check to see if we found a resource */
688         if (allocated_ctrlr == NULL)
689                 goto err_ctrlr;
690
691         resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
692         if (resource == NULL) {
693                 printk(KERN_WARNING "Unable to allocate resource structure\n");
694                 goto err_alloc;
695         }
696         resource->type = VME_DMA;
697         resource->entry = &(allocated_ctrlr->list);
698
699         return resource;
700
701 err_alloc:
702         /* Unlock image */
703         mutex_lock(&(dma_ctrlr->mtx));
704         dma_ctrlr->locked = 0;
705         mutex_unlock(&(dma_ctrlr->mtx));
706 err_ctrlr:
707 err_bus:
708         return NULL;
709 }
710 EXPORT_SYMBOL(vme_dma_request);
711
712 /*
713  * Start new list
714  */
715 struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
716 {
717         struct vme_dma_resource *ctrlr;
718         struct vme_dma_list *dma_list;
719
720         if (resource->type != VME_DMA) {
721                 printk(KERN_ERR "Not a DMA resource\n");
722                 return NULL;
723         }
724
725         ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
726
727         dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
728         if (dma_list == NULL) {
729                 printk(KERN_ERR "Unable to allocate memory for new dma list\n");
730                 return NULL;
731         }
732         INIT_LIST_HEAD(&(dma_list->entries));
733         dma_list->parent = ctrlr;
734         mutex_init(&(dma_list->mtx));
735
736         return dma_list;
737 }
738 EXPORT_SYMBOL(vme_new_dma_list);
739
740 /*
741  * Create "Pattern" type attributes
742  */
743 struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern,
744         vme_pattern_t type)
745 {
746         struct vme_dma_attr *attributes;
747         struct vme_dma_pattern *pattern_attr;
748
749         attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
750         if (attributes == NULL) {
751                 printk(KERN_ERR "Unable to allocate memory for attributes "
752                         "structure\n");
753                 goto err_attr;
754         }
755
756         pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
757         if (pattern_attr == NULL) {
758                 printk(KERN_ERR "Unable to allocate memory for pattern "
759                         "attributes\n");
760                 goto err_pat;
761         }
762
763         attributes->type = VME_DMA_PATTERN;
764         attributes->private = (void *)pattern_attr;
765
766         pattern_attr->pattern = pattern;
767         pattern_attr->type = type;
768
769         return attributes;
770
771         kfree(pattern_attr);
772 err_pat:
773         kfree(attributes);
774 err_attr:
775         return NULL;
776 }
777 EXPORT_SYMBOL(vme_dma_pattern_attribute);
778
779 /*
780  * Create "PCI" type attributes
781  */
782 struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
783 {
784         struct vme_dma_attr *attributes;
785         struct vme_dma_pci *pci_attr;
786
787         /* XXX Run some sanity checks here */
788
789         attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
790         if (attributes == NULL) {
791                 printk(KERN_ERR "Unable to allocate memory for attributes "
792                         "structure\n");
793                 goto err_attr;
794         }
795
796         pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
797         if (pci_attr == NULL) {
798                 printk(KERN_ERR "Unable to allocate memory for pci "
799                         "attributes\n");
800                 goto err_pci;
801         }
802
803
804
805         attributes->type = VME_DMA_PCI;
806         attributes->private = (void *)pci_attr;
807
808         pci_attr->address = address;
809
810         return attributes;
811
812         kfree(pci_attr);
813 err_pci:
814         kfree(attributes);
815 err_attr:
816         return NULL;
817 }
818 EXPORT_SYMBOL(vme_dma_pci_attribute);
819
820 /*
821  * Create "VME" type attributes
822  */
823 struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
824         vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
825 {
826         struct vme_dma_attr *attributes;
827         struct vme_dma_vme *vme_attr;
828
829         attributes = kmalloc(
830                 sizeof(struct vme_dma_attr), GFP_KERNEL);
831         if (attributes == NULL) {
832                 printk(KERN_ERR "Unable to allocate memory for attributes "
833                         "structure\n");
834                 goto err_attr;
835         }
836
837         vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
838         if (vme_attr == NULL) {
839                 printk(KERN_ERR "Unable to allocate memory for vme "
840                         "attributes\n");
841                 goto err_vme;
842         }
843
844         attributes->type = VME_DMA_VME;
845         attributes->private = (void *)vme_attr;
846
847         vme_attr->address = address;
848         vme_attr->aspace = aspace;
849         vme_attr->cycle = cycle;
850         vme_attr->dwidth = dwidth;
851
852         return attributes;
853
854         kfree(vme_attr);
855 err_vme:
856         kfree(attributes);
857 err_attr:
858         return NULL;
859 }
860 EXPORT_SYMBOL(vme_dma_vme_attribute);
861
862 /*
863  * Free attribute
864  */
865 void vme_dma_free_attribute(struct vme_dma_attr *attributes)
866 {
867         kfree(attributes->private);
868         kfree(attributes);
869 }
870 EXPORT_SYMBOL(vme_dma_free_attribute);
871
872 int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
873         struct vme_dma_attr *dest, size_t count)
874 {
875         struct vme_bridge *bridge = list->parent->parent;
876         int retval;
877
878         if (bridge->dma_list_add == NULL) {
879                 printk(KERN_WARNING "Link List DMA generation not supported\n");
880                 return -EINVAL;
881         }
882
883         if (!mutex_trylock(&(list->mtx))) {
884                 printk(KERN_ERR "Link List already submitted\n");
885                 return -EINVAL;
886         }
887
888         retval = bridge->dma_list_add(list, src, dest, count);
889
890         mutex_unlock(&(list->mtx));
891
892         return retval;
893 }
894 EXPORT_SYMBOL(vme_dma_list_add);
895
896 int vme_dma_list_exec(struct vme_dma_list *list)
897 {
898         struct vme_bridge *bridge = list->parent->parent;
899         int retval;
900
901         if (bridge->dma_list_exec == NULL) {
902                 printk(KERN_ERR "Link List DMA execution not supported\n");
903                 return -EINVAL;
904         }
905
906         mutex_lock(&(list->mtx));
907
908         retval = bridge->dma_list_exec(list);
909
910         mutex_unlock(&(list->mtx));
911
912         return retval;
913 }
914 EXPORT_SYMBOL(vme_dma_list_exec);
915
916 int vme_dma_list_free(struct vme_dma_list *list)
917 {
918         struct vme_bridge *bridge = list->parent->parent;
919         int retval;
920
921         if (bridge->dma_list_empty == NULL) {
922                 printk(KERN_WARNING "Emptying of Link Lists not supported\n");
923                 return -EINVAL;
924         }
925
926         if (!mutex_trylock(&(list->mtx))) {
927                 printk(KERN_ERR "Link List in use\n");
928                 return -EINVAL;
929         }
930
931         /*
932          * Empty out all of the entries from the dma list. We need to go to the
933          * low level driver as dma entries are driver specific.
934          */
935         retval = bridge->dma_list_empty(list);
936         if (retval) {
937                 printk(KERN_ERR "Unable to empty link-list entries\n");
938                 mutex_unlock(&(list->mtx));
939                 return retval;
940         }
941         mutex_unlock(&(list->mtx));
942         kfree(list);
943
944         return retval;
945 }
946 EXPORT_SYMBOL(vme_dma_list_free);
947
948 int vme_dma_free(struct vme_resource *resource)
949 {
950         struct vme_dma_resource *ctrlr;
951
952         if (resource->type != VME_DMA) {
953                 printk(KERN_ERR "Not a DMA resource\n");
954                 return -EINVAL;
955         }
956
957         ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
958
959         if (!mutex_trylock(&(ctrlr->mtx))) {
960                 printk(KERN_ERR "Resource busy, can't free\n");
961                 return -EBUSY;
962         }
963
964         if (!(list_empty(&(ctrlr->pending)) && list_empty(&(ctrlr->running)))) {
965                 printk(KERN_WARNING "Resource still processing transfers\n");
966                 mutex_unlock(&(ctrlr->mtx));
967                 return -EBUSY;
968         }
969
970         ctrlr->locked = 0;
971
972         mutex_unlock(&(ctrlr->mtx));
973
974         return 0;
975 }
976 EXPORT_SYMBOL(vme_dma_free);
977
978 void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
979 {
980         void (*call)(int, int, void *);
981         void *priv_data;
982
983         call = bridge->irq[level - 1].callback[statid].func;
984         priv_data = bridge->irq[level - 1].callback[statid].priv_data;
985
986         if (call != NULL)
987                 call(level, statid, priv_data);
988         else
989                 printk(KERN_WARNING "Spurilous VME interrupt, level:%x, "
990                         "vector:%x\n", level, statid);
991 }
992 EXPORT_SYMBOL(vme_irq_handler);
993
994 int vme_irq_request(struct device *dev, int level, int statid,
995         void (*callback)(int, int, void *),
996         void *priv_data)
997 {
998         struct vme_bridge *bridge;
999
1000         bridge = dev_to_bridge(dev);
1001         if (bridge == NULL) {
1002                 printk(KERN_ERR "Can't find VME bus\n");
1003                 return -EINVAL;
1004         }
1005
1006         if ((level < 1) || (level > 7)) {
1007                 printk(KERN_ERR "Invalid interrupt level\n");
1008                 return -EINVAL;
1009         }
1010
1011         if (bridge->irq_set == NULL) {
1012                 printk(KERN_ERR "Configuring interrupts not supported\n");
1013                 return -EINVAL;
1014         }
1015
1016         mutex_lock(&(bridge->irq_mtx));
1017
1018         if (bridge->irq[level - 1].callback[statid].func) {
1019                 mutex_unlock(&(bridge->irq_mtx));
1020                 printk(KERN_WARNING "VME Interrupt already taken\n");
1021                 return -EBUSY;
1022         }
1023
1024         bridge->irq[level - 1].count++;
1025         bridge->irq[level - 1].callback[statid].priv_data = priv_data;
1026         bridge->irq[level - 1].callback[statid].func = callback;
1027
1028         /* Enable IRQ level */
1029         bridge->irq_set(bridge, level, 1, 1);
1030
1031         mutex_unlock(&(bridge->irq_mtx));
1032
1033         return 0;
1034 }
1035 EXPORT_SYMBOL(vme_irq_request);
1036
1037 void vme_irq_free(struct device *dev, int level, int statid)
1038 {
1039         struct vme_bridge *bridge;
1040
1041         bridge = dev_to_bridge(dev);
1042         if (bridge == NULL) {
1043                 printk(KERN_ERR "Can't find VME bus\n");
1044                 return;
1045         }
1046
1047         if ((level < 1) || (level > 7)) {
1048                 printk(KERN_ERR "Invalid interrupt level\n");
1049                 return;
1050         }
1051
1052         if (bridge->irq_set == NULL) {
1053                 printk(KERN_ERR "Configuring interrupts not supported\n");
1054                 return;
1055         }
1056
1057         mutex_lock(&(bridge->irq_mtx));
1058
1059         bridge->irq[level - 1].count--;
1060
1061         /* Disable IRQ level if no more interrupts attached at this level*/
1062         if (bridge->irq[level - 1].count == 0)
1063                 bridge->irq_set(bridge, level, 0, 1);
1064
1065         bridge->irq[level - 1].callback[statid].func = NULL;
1066         bridge->irq[level - 1].callback[statid].priv_data = NULL;
1067
1068         mutex_unlock(&(bridge->irq_mtx));
1069 }
1070 EXPORT_SYMBOL(vme_irq_free);
1071
1072 int vme_irq_generate(struct device *dev, int level, int statid)
1073 {
1074         struct vme_bridge *bridge;
1075
1076         bridge = dev_to_bridge(dev);
1077         if (bridge == NULL) {
1078                 printk(KERN_ERR "Can't find VME bus\n");
1079                 return -EINVAL;
1080         }
1081
1082         if ((level < 1) || (level > 7)) {
1083                 printk(KERN_WARNING "Invalid interrupt level\n");
1084                 return -EINVAL;
1085         }
1086
1087         if (bridge->irq_generate == NULL) {
1088                 printk(KERN_WARNING "Interrupt generation not supported\n");
1089                 return -EINVAL;
1090         }
1091
1092         return bridge->irq_generate(bridge, level, statid);
1093 }
1094 EXPORT_SYMBOL(vme_irq_generate);
1095
1096 /*
1097  * Request the location monitor, return resource or NULL
1098  */
1099 struct vme_resource *vme_lm_request(struct device *dev)
1100 {
1101         struct vme_bridge *bridge;
1102         struct list_head *lm_pos = NULL;
1103         struct vme_lm_resource *allocated_lm = NULL;
1104         struct vme_lm_resource *lm = NULL;
1105         struct vme_resource *resource = NULL;
1106
1107         bridge = dev_to_bridge(dev);
1108         if (bridge == NULL) {
1109                 printk(KERN_ERR "Can't find VME bus\n");
1110                 goto err_bus;
1111         }
1112
1113         /* Loop through DMA resources */
1114         list_for_each(lm_pos, &(bridge->lm_resources)) {
1115                 lm = list_entry(lm_pos,
1116                         struct vme_lm_resource, list);
1117
1118                 if (lm == NULL) {
1119                         printk(KERN_ERR "Registered NULL Location Monitor "
1120                                 "resource\n");
1121                         continue;
1122                 }
1123
1124                 /* Find an unlocked controller */
1125                 mutex_lock(&(lm->mtx));
1126                 if (lm->locked == 0) {
1127                         lm->locked = 1;
1128                         mutex_unlock(&(lm->mtx));
1129                         allocated_lm = lm;
1130                         break;
1131                 }
1132                 mutex_unlock(&(lm->mtx));
1133         }
1134
1135         /* Check to see if we found a resource */
1136         if (allocated_lm == NULL)
1137                 goto err_lm;
1138
1139         resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
1140         if (resource == NULL) {
1141                 printk(KERN_ERR "Unable to allocate resource structure\n");
1142                 goto err_alloc;
1143         }
1144         resource->type = VME_LM;
1145         resource->entry = &(allocated_lm->list);
1146
1147         return resource;
1148
1149 err_alloc:
1150         /* Unlock image */
1151         mutex_lock(&(lm->mtx));
1152         lm->locked = 0;
1153         mutex_unlock(&(lm->mtx));
1154 err_lm:
1155 err_bus:
1156         return NULL;
1157 }
1158 EXPORT_SYMBOL(vme_lm_request);
1159
1160 int vme_lm_count(struct vme_resource *resource)
1161 {
1162         struct vme_lm_resource *lm;
1163
1164         if (resource->type != VME_LM) {
1165                 printk(KERN_ERR "Not a Location Monitor resource\n");
1166                 return -EINVAL;
1167         }
1168
1169         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1170
1171         return lm->monitors;
1172 }
1173 EXPORT_SYMBOL(vme_lm_count);
1174
1175 int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
1176         vme_address_t aspace, vme_cycle_t cycle)
1177 {
1178         struct vme_bridge *bridge = find_bridge(resource);
1179         struct vme_lm_resource *lm;
1180
1181         if (resource->type != VME_LM) {
1182                 printk(KERN_ERR "Not a Location Monitor resource\n");
1183                 return -EINVAL;
1184         }
1185
1186         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1187
1188         if (bridge->lm_set == NULL) {
1189                 printk(KERN_ERR "vme_lm_set not supported\n");
1190                 return -EINVAL;
1191         }
1192
1193         return bridge->lm_set(lm, lm_base, aspace, cycle);
1194 }
1195 EXPORT_SYMBOL(vme_lm_set);
1196
1197 int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
1198         vme_address_t *aspace, vme_cycle_t *cycle)
1199 {
1200         struct vme_bridge *bridge = find_bridge(resource);
1201         struct vme_lm_resource *lm;
1202
1203         if (resource->type != VME_LM) {
1204                 printk(KERN_ERR "Not a Location Monitor resource\n");
1205                 return -EINVAL;
1206         }
1207
1208         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1209
1210         if (bridge->lm_get == NULL) {
1211                 printk(KERN_ERR "vme_lm_get not supported\n");
1212                 return -EINVAL;
1213         }
1214
1215         return bridge->lm_get(lm, lm_base, aspace, cycle);
1216 }
1217 EXPORT_SYMBOL(vme_lm_get);
1218
1219 int vme_lm_attach(struct vme_resource *resource, int monitor,
1220         void (*callback)(int))
1221 {
1222         struct vme_bridge *bridge = find_bridge(resource);
1223         struct vme_lm_resource *lm;
1224
1225         if (resource->type != VME_LM) {
1226                 printk(KERN_ERR "Not a Location Monitor resource\n");
1227                 return -EINVAL;
1228         }
1229
1230         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1231
1232         if (bridge->lm_attach == NULL) {
1233                 printk(KERN_ERR "vme_lm_attach not supported\n");
1234                 return -EINVAL;
1235         }
1236
1237         return bridge->lm_attach(lm, monitor, callback);
1238 }
1239 EXPORT_SYMBOL(vme_lm_attach);
1240
1241 int vme_lm_detach(struct vme_resource *resource, int monitor)
1242 {
1243         struct vme_bridge *bridge = find_bridge(resource);
1244         struct vme_lm_resource *lm;
1245
1246         if (resource->type != VME_LM) {
1247                 printk(KERN_ERR "Not a Location Monitor resource\n");
1248                 return -EINVAL;
1249         }
1250
1251         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1252
1253         if (bridge->lm_detach == NULL) {
1254                 printk(KERN_ERR "vme_lm_detach not supported\n");
1255                 return -EINVAL;
1256         }
1257
1258         return bridge->lm_detach(lm, monitor);
1259 }
1260 EXPORT_SYMBOL(vme_lm_detach);
1261
1262 void vme_lm_free(struct vme_resource *resource)
1263 {
1264         struct vme_lm_resource *lm;
1265
1266         if (resource->type != VME_LM) {
1267                 printk(KERN_ERR "Not a Location Monitor resource\n");
1268                 return;
1269         }
1270
1271         lm = list_entry(resource->entry, struct vme_lm_resource, list);
1272
1273         mutex_lock(&(lm->mtx));
1274
1275         /* XXX
1276          * Check to see that there aren't any callbacks still attached, if
1277          * there are we should probably be detaching them!
1278          */
1279
1280         lm->locked = 0;
1281
1282         mutex_unlock(&(lm->mtx));
1283
1284         kfree(resource);
1285 }
1286 EXPORT_SYMBOL(vme_lm_free);
1287
1288 int vme_slot_get(struct device *bus)
1289 {
1290         struct vme_bridge *bridge;
1291
1292         bridge = dev_to_bridge(bus);
1293         if (bridge == NULL) {
1294                 printk(KERN_ERR "Can't find VME bus\n");
1295                 return -EINVAL;
1296         }
1297
1298         if (bridge->slot_get == NULL) {
1299                 printk(KERN_WARNING "vme_slot_get not supported\n");
1300                 return -EINVAL;
1301         }
1302
1303         return bridge->slot_get(bridge);
1304 }
1305 EXPORT_SYMBOL(vme_slot_get);
1306
1307
1308 /* - Bridge Registration --------------------------------------------------- */
1309
1310 static int vme_alloc_bus_num(void)
1311 {
1312         int i;
1313
1314         mutex_lock(&vme_bus_num_mtx);
1315         for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
1316                 if (((vme_bus_numbers >> i) & 0x1) == 0) {
1317                         vme_bus_numbers |= (0x1 << i);
1318                         break;
1319                 }
1320         }
1321         mutex_unlock(&vme_bus_num_mtx);
1322
1323         return i;
1324 }
1325
1326 static void vme_free_bus_num(int bus)
1327 {
1328         mutex_lock(&vme_bus_num_mtx);
1329         vme_bus_numbers |= ~(0x1 << bus);
1330         mutex_unlock(&vme_bus_num_mtx);
1331 }
1332
1333 int vme_register_bridge(struct vme_bridge *bridge)
1334 {
1335         struct device *dev;
1336         int retval;
1337         int i;
1338
1339         bridge->num = vme_alloc_bus_num();
1340
1341         /* This creates 32 vme "slot" devices. This equates to a slot for each
1342          * ID available in a system conforming to the ANSI/VITA 1-1994
1343          * specification.
1344          */
1345         for (i = 0; i < VME_SLOTS_MAX; i++) {
1346                 dev = &(bridge->dev[i]);
1347                 memset(dev, 0, sizeof(struct device));
1348
1349                 dev->parent = bridge->parent;
1350                 dev->bus = &(vme_bus_type);
1351                 /*
1352                  * We save a pointer to the bridge in platform_data so that we
1353                  * can get to it later. We keep driver_data for use by the
1354                  * driver that binds against the slot
1355                  */
1356                 dev->platform_data = bridge;
1357                 dev_set_name(dev, "vme-%x.%x", bridge->num, i + 1);
1358
1359                 retval = device_register(dev);
1360                 if (retval)
1361                         goto err_reg;
1362         }
1363
1364         return retval;
1365
1366         i = VME_SLOTS_MAX;
1367 err_reg:
1368         while (i > -1) {
1369                 dev = &(bridge->dev[i]);
1370                 device_unregister(dev);
1371         }
1372         vme_free_bus_num(bridge->num);
1373         return retval;
1374 }
1375 EXPORT_SYMBOL(vme_register_bridge);
1376
1377 void vme_unregister_bridge(struct vme_bridge *bridge)
1378 {
1379         int i;
1380         struct device *dev;
1381
1382
1383         for (i = 0; i < VME_SLOTS_MAX; i++) {
1384                 dev = &(bridge->dev[i]);
1385                 device_unregister(dev);
1386         }
1387         vme_free_bus_num(bridge->num);
1388 }
1389 EXPORT_SYMBOL(vme_unregister_bridge);
1390
1391
1392 /* - Driver Registration --------------------------------------------------- */
1393
1394 int vme_register_driver(struct vme_driver *drv)
1395 {
1396         drv->driver.name = drv->name;
1397         drv->driver.bus = &vme_bus_type;
1398
1399         return driver_register(&drv->driver);
1400 }
1401 EXPORT_SYMBOL(vme_register_driver);
1402
1403 void vme_unregister_driver(struct vme_driver *drv)
1404 {
1405         driver_unregister(&drv->driver);
1406 }
1407 EXPORT_SYMBOL(vme_unregister_driver);
1408
1409 /* - Bus Registration ------------------------------------------------------ */
1410
1411 int vme_calc_slot(struct device *dev)
1412 {
1413         struct vme_bridge *bridge;
1414         int num;
1415
1416         bridge = dev_to_bridge(dev);
1417
1418         /* Determine slot number */
1419         num = 0;
1420         while (num < VME_SLOTS_MAX) {
1421                 if (&(bridge->dev[num]) == dev)
1422                         break;
1423
1424                 num++;
1425         }
1426         if (num == VME_SLOTS_MAX) {
1427                 dev_err(dev, "Failed to identify slot\n");
1428                 num = 0;
1429                 goto err_dev;
1430         }
1431         num++;
1432
1433 err_dev:
1434         return num;
1435 }
1436
1437 static struct vme_driver *dev_to_vme_driver(struct device *dev)
1438 {
1439         if (dev->driver == NULL)
1440                 printk(KERN_ERR "Bugger dev->driver is NULL\n");
1441
1442         return container_of(dev->driver, struct vme_driver, driver);
1443 }
1444
1445 static int vme_bus_match(struct device *dev, struct device_driver *drv)
1446 {
1447         struct vme_bridge *bridge;
1448         struct vme_driver *driver;
1449         int i, num;
1450
1451         bridge = dev_to_bridge(dev);
1452         driver = container_of(drv, struct vme_driver, driver);
1453
1454         num = vme_calc_slot(dev);
1455         if (!num)
1456                 goto err_dev;
1457
1458         if (driver->bind_table == NULL) {
1459                 dev_err(dev, "Bind table NULL\n");
1460                 goto err_table;
1461         }
1462
1463         i = 0;
1464         while ((driver->bind_table[i].bus != 0) ||
1465                 (driver->bind_table[i].slot != 0)) {
1466
1467                 if (bridge->num == driver->bind_table[i].bus) {
1468                         if (num == driver->bind_table[i].slot)
1469                                 return 1;
1470
1471                         if (driver->bind_table[i].slot == VME_SLOT_ALL)
1472                                 return 1;
1473
1474                         if ((driver->bind_table[i].slot == VME_SLOT_CURRENT) &&
1475                                 (num == vme_slot_get(dev)))
1476                                 return 1;
1477                 }
1478                 i++;
1479         }
1480
1481 err_dev:
1482 err_table:
1483         return 0;
1484 }
1485
1486 static int vme_bus_probe(struct device *dev)
1487 {
1488         struct vme_bridge *bridge;
1489         struct vme_driver *driver;
1490         int retval = -ENODEV;
1491
1492         driver = dev_to_vme_driver(dev);
1493         bridge = dev_to_bridge(dev);
1494
1495         if (driver->probe != NULL)
1496                 retval = driver->probe(dev, bridge->num, vme_calc_slot(dev));
1497
1498         return retval;
1499 }
1500
1501 static int vme_bus_remove(struct device *dev)
1502 {
1503         struct vme_bridge *bridge;
1504         struct vme_driver *driver;
1505         int retval = -ENODEV;
1506
1507         driver = dev_to_vme_driver(dev);
1508         bridge = dev_to_bridge(dev);
1509
1510         if (driver->remove != NULL)
1511                 retval = driver->remove(dev, bridge->num, vme_calc_slot(dev));
1512
1513         return retval;
1514 }
1515
1516 struct bus_type vme_bus_type = {
1517         .name = "vme",
1518         .match = vme_bus_match,
1519         .probe = vme_bus_probe,
1520         .remove = vme_bus_remove,
1521 };
1522 EXPORT_SYMBOL(vme_bus_type);
1523
1524 static int __init vme_init(void)
1525 {
1526         return bus_register(&vme_bus_type);
1527 }
1528
1529 static void __exit vme_exit(void)
1530 {
1531         bus_unregister(&vme_bus_type);
1532 }
1533
1534 MODULE_DESCRIPTION("VME bridge driver framework");
1535 MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
1536 MODULE_LICENSE("GPL");
1537
1538 module_init(vme_init);
1539 module_exit(vme_exit);