2 * Low-level SPU handling
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/interrupt.h>
26 #include <linux/list.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
31 #include <linux/slab.h>
32 #include <linux/wait.h>
34 #include <asm/firmware.h>
37 #include <linux/mutex.h>
39 #include <asm/spu_priv1.h>
40 #include <asm/mmu_context.h>
43 #include "interrupt.h"
45 const struct spu_priv1_ops *spu_priv1_ops;
47 EXPORT_SYMBOL_GPL(spu_priv1_ops);
49 static int __spu_trap_invalid_dma(struct spu *spu)
51 pr_debug("%s\n", __FUNCTION__);
52 spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
56 static int __spu_trap_dma_align(struct spu *spu)
58 pr_debug("%s\n", __FUNCTION__);
59 spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
63 static int __spu_trap_error(struct spu *spu)
65 pr_debug("%s\n", __FUNCTION__);
66 spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
70 static void spu_restart_dma(struct spu *spu)
72 struct spu_priv2 __iomem *priv2 = spu->priv2;
74 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
75 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
78 static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
80 struct spu_priv2 __iomem *priv2 = spu->priv2;
81 struct mm_struct *mm = spu->mm;
84 pr_debug("%s\n", __FUNCTION__);
86 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
87 /* SLBs are pre-loaded for context switch, so
88 * we should never get here!
90 printk("%s: invalid access during switch!\n", __func__);
93 esid = (ea & ESID_MASK) | SLB_ESID_V;
95 switch(REGION_ID(ea)) {
97 #ifdef CONFIG_HUGETLB_PAGE
98 if (in_hugepage_area(mm->context, ea))
99 llp = mmu_psize_defs[mmu_huge_psize].sllp;
102 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
103 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
106 case VMALLOC_REGION_ID:
107 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
108 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
109 SLB_VSID_KERNEL | llp;
111 case KERNEL_REGION_ID:
112 llp = mmu_psize_defs[mmu_linear_psize].sllp;
113 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
114 SLB_VSID_KERNEL | llp;
117 /* Future: support kernel segments so that drivers
120 pr_debug("invalid region access at %016lx\n", ea);
124 out_be64(&priv2->slb_index_W, spu->slb_replace);
125 out_be64(&priv2->slb_vsid_RW, vsid);
126 out_be64(&priv2->slb_esid_RW, esid);
129 if (spu->slb_replace >= 8)
130 spu->slb_replace = 0;
132 spu_restart_dma(spu);
137 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
138 static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
140 pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea);
142 /* Handle kernel space hash faults immediately.
143 User hash faults need to be deferred to process context. */
144 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
145 && REGION_ID(ea) != USER_REGION_ID
146 && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
147 spu_restart_dma(spu);
151 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
152 printk("%s: invalid access during switch!\n", __func__);
159 spu->stop_callback(spu);
164 spu_irq_class_0(int irq, void *data)
169 spu->class_0_pending = 1;
170 spu->stop_callback(spu);
176 spu_irq_class_0_bottom(struct spu *spu)
178 unsigned long stat, mask;
180 spu->class_0_pending = 0;
182 mask = spu_int_mask_get(spu, 0);
183 stat = spu_int_stat_get(spu, 0);
187 if (stat & 1) /* invalid DMA alignment */
188 __spu_trap_dma_align(spu);
190 if (stat & 2) /* invalid MFC DMA */
191 __spu_trap_invalid_dma(spu);
193 if (stat & 4) /* error on SPU */
194 __spu_trap_error(spu);
196 spu_int_stat_clear(spu, 0, stat);
198 return (stat & 0x7) ? -EIO : 0;
200 EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
203 spu_irq_class_1(int irq, void *data)
206 unsigned long stat, mask, dar, dsisr;
210 /* atomically read & clear class1 status. */
211 spin_lock(&spu->register_lock);
212 mask = spu_int_mask_get(spu, 1);
213 stat = spu_int_stat_get(spu, 1) & mask;
214 dar = spu_mfc_dar_get(spu);
215 dsisr = spu_mfc_dsisr_get(spu);
216 if (stat & 2) /* mapping fault */
217 spu_mfc_dsisr_set(spu, 0ul);
218 spu_int_stat_clear(spu, 1, stat);
219 spin_unlock(&spu->register_lock);
220 pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
223 if (stat & 1) /* segment fault */
224 __spu_trap_data_seg(spu, dar);
226 if (stat & 2) { /* mapping fault */
227 __spu_trap_data_map(spu, dar, dsisr);
230 if (stat & 4) /* ls compare & suspend on get */
233 if (stat & 8) /* ls compare & suspend on put */
236 return stat ? IRQ_HANDLED : IRQ_NONE;
238 EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
241 spu_irq_class_2(int irq, void *data)
248 spin_lock(&spu->register_lock);
249 stat = spu_int_stat_get(spu, 2);
250 mask = spu_int_mask_get(spu, 2);
251 /* ignore interrupts we're not waiting for */
254 * mailbox interrupts (0x1 and 0x10) are level triggered.
255 * mask them now before acknowledging.
258 spu_int_mask_and(spu, 2, ~(stat & 0x11));
259 /* acknowledge all interrupts before the callbacks */
260 spu_int_stat_clear(spu, 2, stat);
261 spin_unlock(&spu->register_lock);
263 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
265 if (stat & 1) /* PPC core mailbox */
266 spu->ibox_callback(spu);
268 if (stat & 2) /* SPU stop-and-signal */
269 spu->stop_callback(spu);
271 if (stat & 4) /* SPU halted */
272 spu->stop_callback(spu);
274 if (stat & 8) /* DMA tag group complete */
275 spu->mfc_callback(spu);
277 if (stat & 0x10) /* SPU mailbox threshold */
278 spu->wbox_callback(spu);
280 return stat ? IRQ_HANDLED : IRQ_NONE;
283 static int spu_request_irqs(struct spu *spu)
287 if (spu->irqs[0] != NO_IRQ) {
288 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
290 ret = request_irq(spu->irqs[0], spu_irq_class_0,
296 if (spu->irqs[1] != NO_IRQ) {
297 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
299 ret = request_irq(spu->irqs[1], spu_irq_class_1,
305 if (spu->irqs[2] != NO_IRQ) {
306 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
308 ret = request_irq(spu->irqs[2], spu_irq_class_2,
317 if (spu->irqs[1] != NO_IRQ)
318 free_irq(spu->irqs[1], spu);
320 if (spu->irqs[0] != NO_IRQ)
321 free_irq(spu->irqs[0], spu);
326 static void spu_free_irqs(struct spu *spu)
328 if (spu->irqs[0] != NO_IRQ)
329 free_irq(spu->irqs[0], spu);
330 if (spu->irqs[1] != NO_IRQ)
331 free_irq(spu->irqs[1], spu);
332 if (spu->irqs[2] != NO_IRQ)
333 free_irq(spu->irqs[2], spu);
336 static struct list_head spu_list[MAX_NUMNODES];
337 static LIST_HEAD(spu_full_list);
338 static DEFINE_MUTEX(spu_mutex);
340 static void spu_init_channels(struct spu *spu)
342 static const struct {
346 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
347 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
349 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
350 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
351 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
353 struct spu_priv2 __iomem *priv2;
358 /* initialize all channel data to zero */
359 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
362 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
363 for (count = 0; count < zero_list[i].count; count++)
364 out_be64(&priv2->spu_chnldata_RW, 0);
367 /* initialize channel counts to meaningful values */
368 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
369 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
370 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
374 struct spu *spu_alloc_node(int node)
376 struct spu *spu = NULL;
378 mutex_lock(&spu_mutex);
379 if (!list_empty(&spu_list[node])) {
380 spu = list_entry(spu_list[node].next, struct spu, list);
381 list_del_init(&spu->list);
382 pr_debug("Got SPU %d %d\n", spu->number, spu->node);
383 spu_init_channels(spu);
385 mutex_unlock(&spu_mutex);
389 EXPORT_SYMBOL_GPL(spu_alloc_node);
391 struct spu *spu_alloc(void)
393 struct spu *spu = NULL;
396 for (node = 0; node < MAX_NUMNODES; node++) {
397 spu = spu_alloc_node(node);
405 void spu_free(struct spu *spu)
407 mutex_lock(&spu_mutex);
408 list_add_tail(&spu->list, &spu_list[spu->node]);
409 mutex_unlock(&spu_mutex);
411 EXPORT_SYMBOL_GPL(spu_free);
413 static int spu_handle_mm_fault(struct spu *spu)
415 struct mm_struct *mm = spu->mm;
416 struct vm_area_struct *vma;
417 u64 ea, dsisr, is_write;
423 if (!IS_VALID_EA(ea)) {
430 if (mm->pgd == NULL) {
434 down_read(&mm->mmap_sem);
435 vma = find_vma(mm, ea);
438 if (vma->vm_start <= ea)
440 if (!(vma->vm_flags & VM_GROWSDOWN))
443 if (expand_stack(vma, ea))
447 is_write = dsisr & MFC_DSISR_ACCESS_PUT;
449 if (!(vma->vm_flags & VM_WRITE))
452 if (dsisr & MFC_DSISR_ACCESS_DENIED)
454 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
458 switch (handle_mm_fault(mm, vma, ea, is_write)) {
465 case VM_FAULT_SIGBUS:
474 up_read(&mm->mmap_sem);
478 up_read(&mm->mmap_sem);
482 int spu_irq_class_1_bottom(struct spu *spu)
484 u64 ea, dsisr, access, error = 0UL;
489 if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) {
492 access = (_PAGE_PRESENT | _PAGE_USER);
493 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
494 local_irq_save(flags);
495 if (hash_page(ea, access, 0x300) != 0)
496 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
497 local_irq_restore(flags);
499 if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) {
500 if ((ret = spu_handle_mm_fault(spu)) != 0)
501 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
503 error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
508 spu_restart_dma(spu);
510 __spu_trap_invalid_dma(spu);
515 static int __init find_spu_node_id(struct device_node *spe)
517 const unsigned int *id;
518 struct device_node *cpu;
519 cpu = spe->parent->parent;
520 id = get_property(cpu, "node-id", NULL);
524 static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
527 static DEFINE_MUTEX(add_spumem_mutex);
529 const struct address_prop {
530 unsigned long address;
532 } __attribute__((packed)) *p;
535 unsigned long start_pfn, nr_pages;
536 struct pglist_data *pgdata;
540 p = get_property(spe, prop, &proplen);
541 WARN_ON(proplen != sizeof (*p));
543 start_pfn = p->address >> PAGE_SHIFT;
544 nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
546 pgdata = NODE_DATA(spu->nid);
547 zone = pgdata->node_zones;
549 /* XXX rethink locking here */
550 mutex_lock(&add_spumem_mutex);
551 ret = __add_pages(zone, start_pfn, nr_pages);
552 mutex_unlock(&add_spumem_mutex);
557 static void __iomem * __init map_spe_prop(struct spu *spu,
558 struct device_node *n, const char *name)
560 const struct address_prop {
561 unsigned long address;
563 } __attribute__((packed)) *prop;
567 void __iomem *ret = NULL;
570 p = get_property(n, name, &proplen);
571 if (proplen != sizeof (struct address_prop))
576 err = cell_spuprop_present(spu, n, name);
577 if (err && (err != -EEXIST))
580 ret = ioremap(prop->address, prop->len);
586 static void spu_unmap(struct spu *spu)
590 iounmap(spu->problem);
591 iounmap((__force u8 __iomem *)spu->local_store);
594 /* This function shall be abstracted for HV platforms */
595 static int __init spu_map_interrupts_old(struct spu *spu, struct device_node *np)
600 /* Get the interrupt source unit from the device-tree */
601 tmp = get_property(np, "isrc", NULL);
606 /* Add the node number */
607 isrc |= spu->node << IIC_IRQ_NODE_SHIFT;
609 /* Now map interrupts of all 3 classes */
610 spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
611 spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
612 spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
614 /* Right now, we only fail if class 2 failed */
615 return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
618 static int __init spu_map_device_old(struct spu *spu, struct device_node *node)
624 spu->name = get_property(node, "name", NULL);
628 prop = get_property(node, "local-store", NULL);
631 spu->local_store_phys = *(unsigned long *)prop;
633 /* we use local store as ram, not io memory */
634 spu->local_store = (void __force *)
635 map_spe_prop(spu, node, "local-store");
636 if (!spu->local_store)
639 prop = get_property(node, "problem", NULL);
642 spu->problem_phys = *(unsigned long *)prop;
644 spu->problem= map_spe_prop(spu, node, "problem");
648 spu->priv1= map_spe_prop(spu, node, "priv1");
649 /* priv1 is not available on a hypervisor */
651 spu->priv2= map_spe_prop(spu, node, "priv2");
663 static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
669 for (i=0; i < 3; i++) {
670 ret = of_irq_map_one(np, i, &oirq);
672 pr_debug("spu_new: failed to get irq %d\n", i);
676 pr_debug(" irq %d no 0x%x on %s\n", i, oirq.specifier[0],
677 oirq.controller->full_name);
678 spu->irqs[i] = irq_create_of_mapping(oirq.controller,
679 oirq.specifier, oirq.size);
680 if (spu->irqs[i] == NO_IRQ) {
681 pr_debug("spu_new: failed to map it !\n");
688 pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier, spu->name);
689 for (; i >= 0; i--) {
690 if (spu->irqs[i] != NO_IRQ)
691 irq_dispose_mapping(spu->irqs[i]);
696 static int spu_map_resource(struct device_node *node, int nr,
697 void __iomem** virt, unsigned long *phys)
699 struct resource resource = { };
702 ret = of_address_to_resource(node, nr, &resource);
707 *phys = resource.start;
708 *virt = ioremap(resource.start, resource.end - resource.start);
716 static int __init spu_map_device(struct spu *spu, struct device_node *node)
719 spu->name = get_property(node, "name", NULL);
723 ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store,
724 &spu->local_store_phys);
726 pr_debug("spu_new: failed to map %s resource 0\n",
730 ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem,
733 pr_debug("spu_new: failed to map %s resource 1\n",
737 ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2,
740 pr_debug("spu_new: failed to map %s resource 2\n",
745 if (!firmware_has_feature(FW_FEATURE_LPAR))
746 ret = spu_map_resource(node, 3, (void __iomem**)&spu->priv1,
749 pr_debug("spu_new: failed to map %s resource 3\n",
753 pr_debug("spu_new: %s maps:\n", node->full_name);
754 pr_debug(" local store : 0x%016lx -> 0x%p\n",
755 spu->local_store_phys, spu->local_store);
756 pr_debug(" problem state : 0x%016lx -> 0x%p\n",
757 spu->problem_phys, spu->problem);
758 pr_debug(" priv2 : 0x%p\n", spu->priv2);
759 pr_debug(" priv1 : 0x%p\n", spu->priv1);
766 pr_debug("failed to map spe %s: %d\n", spu->name, ret);
770 struct sysdev_class spu_sysdev_class = {
774 int spu_add_sysdev_attr(struct sysdev_attribute *attr)
777 mutex_lock(&spu_mutex);
779 list_for_each_entry(spu, &spu_full_list, full_list)
780 sysdev_create_file(&spu->sysdev, attr);
782 mutex_unlock(&spu_mutex);
785 EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
787 int spu_add_sysdev_attr_group(struct attribute_group *attrs)
790 mutex_lock(&spu_mutex);
792 list_for_each_entry(spu, &spu_full_list, full_list)
793 sysfs_create_group(&spu->sysdev.kobj, attrs);
795 mutex_unlock(&spu_mutex);
798 EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
801 void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
804 mutex_lock(&spu_mutex);
806 list_for_each_entry(spu, &spu_full_list, full_list)
807 sysdev_remove_file(&spu->sysdev, attr);
809 mutex_unlock(&spu_mutex);
811 EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
813 void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
816 mutex_lock(&spu_mutex);
818 list_for_each_entry(spu, &spu_full_list, full_list)
819 sysfs_remove_group(&spu->sysdev.kobj, attrs);
821 mutex_unlock(&spu_mutex);
823 EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
825 static int spu_create_sysdev(struct spu *spu)
829 spu->sysdev.id = spu->number;
830 spu->sysdev.cls = &spu_sysdev_class;
831 ret = sysdev_register(&spu->sysdev);
833 printk(KERN_ERR "Can't register SPU %d with sysfs\n",
838 sysfs_add_device_to_node(&spu->sysdev, spu->nid);
843 static void spu_destroy_sysdev(struct spu *spu)
845 sysfs_remove_device_from_node(&spu->sysdev, spu->nid);
846 sysdev_unregister(&spu->sysdev);
849 static int __init create_spu(struct device_node *spe)
856 spu = kzalloc(sizeof (*spu), GFP_KERNEL);
860 spu->node = find_spu_node_id(spe);
861 if (spu->node >= MAX_NUMNODES) {
862 printk(KERN_WARNING "SPE %s on node %d ignored,"
863 " node number too big\n", spe->full_name, spu->node);
864 printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
867 spu->nid = of_node_to_nid(spe);
871 ret = spu_map_device(spu, spe);
874 ret = spu_map_device_old(spu, spe);
878 ret = spu_map_interrupts(spu, spe);
880 ret = spu_map_interrupts_old(spu, spe);
883 spin_lock_init(&spu->register_lock);
884 spu_mfc_sdr_setup(spu);
885 spu_mfc_sr1_set(spu, 0x33);
886 mutex_lock(&spu_mutex);
888 spu->number = number++;
889 ret = spu_request_irqs(spu);
893 ret = spu_create_sysdev(spu);
897 list_add(&spu->list, &spu_list[spu->node]);
898 list_add(&spu->full_list, &spu_full_list);
899 spu->devnode = of_node_get(spe);
901 mutex_unlock(&spu_mutex);
903 pr_debug(KERN_DEBUG "Using SPE %s %p %p %p %p %d\n",
904 spu->name, spu->local_store,
905 spu->problem, spu->priv1, spu->priv2, spu->number);
911 mutex_unlock(&spu_mutex);
920 static void destroy_spu(struct spu *spu)
922 list_del_init(&spu->list);
923 list_del_init(&spu->full_list);
925 of_node_put(spu->devnode);
927 spu_destroy_sysdev(spu);
933 static void cleanup_spu_base(void)
935 struct spu *spu, *tmp;
938 mutex_lock(&spu_mutex);
939 for (node = 0; node < MAX_NUMNODES; node++) {
940 list_for_each_entry_safe(spu, tmp, &spu_list[node], list)
943 mutex_unlock(&spu_mutex);
944 sysdev_class_unregister(&spu_sysdev_class);
946 module_exit(cleanup_spu_base);
948 static int __init init_spu_base(void)
950 struct device_node *node;
953 /* create sysdev class for spus */
954 ret = sysdev_class_register(&spu_sysdev_class);
958 for (i = 0; i < MAX_NUMNODES; i++)
959 INIT_LIST_HEAD(&spu_list[i]);
962 for (node = of_find_node_by_type(NULL, "spe");
963 node; node = of_find_node_by_type(node, "spe")) {
964 ret = create_spu(node);
966 printk(KERN_WARNING "%s: Error initializing %s\n",
967 __FUNCTION__, node->name);
973 xmon_register_spus(&spu_full_list);
977 module_init(init_spu_base);
979 MODULE_LICENSE("GPL");
980 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");