Merge branch 'fix/misc' into for-linus
[sfrench/cifs-2.6.git] / arch / x86 / kernel / uv_irq.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * SGI UV IRQ functions
7  *
8  * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
9  */
10
11 #include <linux/module.h>
12 #include <linux/rbtree.h>
13 #include <linux/slab.h>
14 #include <linux/irq.h>
15
16 #include <asm/apic.h>
17 #include <asm/uv/uv_irq.h>
18 #include <asm/uv/uv_hub.h>
19
20 /* MMR offset and pnode of hub sourcing interrupts for a given irq */
21 struct uv_irq_2_mmr_pnode{
22         struct rb_node          list;
23         unsigned long           offset;
24         int                     pnode;
25         int                     irq;
26 };
27
28 static spinlock_t               uv_irq_lock;
29 static struct rb_root           uv_irq_root;
30
31 static int uv_set_irq_affinity(unsigned int, const struct cpumask *);
32
33 static void uv_noop(unsigned int irq)
34 {
35 }
36
37 static unsigned int uv_noop_ret(unsigned int irq)
38 {
39         return 0;
40 }
41
42 static void uv_ack_apic(unsigned int irq)
43 {
44         ack_APIC_irq();
45 }
46
47 struct irq_chip uv_irq_chip = {
48         .name           = "UV-CORE",
49         .startup        = uv_noop_ret,
50         .shutdown       = uv_noop,
51         .enable         = uv_noop,
52         .disable        = uv_noop,
53         .ack            = uv_noop,
54         .mask           = uv_noop,
55         .unmask         = uv_noop,
56         .eoi            = uv_ack_apic,
57         .end            = uv_noop,
58         .set_affinity   = uv_set_irq_affinity,
59 };
60
61 /*
62  * Add offset and pnode information of the hub sourcing interrupts to the
63  * rb tree for a specific irq.
64  */
65 static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade)
66 {
67         struct rb_node **link = &uv_irq_root.rb_node;
68         struct rb_node *parent = NULL;
69         struct uv_irq_2_mmr_pnode *n;
70         struct uv_irq_2_mmr_pnode *e;
71         unsigned long irqflags;
72
73         n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL,
74                                 uv_blade_to_memory_nid(blade));
75         if (!n)
76                 return -ENOMEM;
77
78         n->irq = irq;
79         n->offset = offset;
80         n->pnode = uv_blade_to_pnode(blade);
81         spin_lock_irqsave(&uv_irq_lock, irqflags);
82         /* Find the right place in the rbtree: */
83         while (*link) {
84                 parent = *link;
85                 e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list);
86
87                 if (unlikely(irq == e->irq)) {
88                         /* irq entry exists */
89                         e->pnode = uv_blade_to_pnode(blade);
90                         e->offset = offset;
91                         spin_unlock_irqrestore(&uv_irq_lock, irqflags);
92                         kfree(n);
93                         return 0;
94                 }
95
96                 if (irq < e->irq)
97                         link = &(*link)->rb_left;
98                 else
99                         link = &(*link)->rb_right;
100         }
101
102         /* Insert the node into the rbtree. */
103         rb_link_node(&n->list, parent, link);
104         rb_insert_color(&n->list, &uv_irq_root);
105
106         spin_unlock_irqrestore(&uv_irq_lock, irqflags);
107         return 0;
108 }
109
110 /* Retrieve offset and pnode information from the rb tree for a specific irq */
111 int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode)
112 {
113         struct uv_irq_2_mmr_pnode *e;
114         struct rb_node *n;
115         unsigned long irqflags;
116
117         spin_lock_irqsave(&uv_irq_lock, irqflags);
118         n = uv_irq_root.rb_node;
119         while (n) {
120                 e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
121
122                 if (e->irq == irq) {
123                         *offset = e->offset;
124                         *pnode = e->pnode;
125                         spin_unlock_irqrestore(&uv_irq_lock, irqflags);
126                         return 0;
127                 }
128
129                 if (irq < e->irq)
130                         n = n->rb_left;
131                 else
132                         n = n->rb_right;
133         }
134         spin_unlock_irqrestore(&uv_irq_lock, irqflags);
135         return -1;
136 }
137
138 /*
139  * Re-target the irq to the specified CPU and enable the specified MMR located
140  * on the specified blade to allow the sending of MSIs to the specified CPU.
141  */
142 static int
143 arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
144                        unsigned long mmr_offset, int restrict)
145 {
146         const struct cpumask *eligible_cpu = cpumask_of(cpu);
147         struct irq_desc *desc = irq_to_desc(irq);
148         struct irq_cfg *cfg;
149         int mmr_pnode;
150         unsigned long mmr_value;
151         struct uv_IO_APIC_route_entry *entry;
152         int err;
153
154         BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
155                         sizeof(unsigned long));
156
157         cfg = irq_cfg(irq);
158
159         err = assign_irq_vector(irq, cfg, eligible_cpu);
160         if (err != 0)
161                 return err;
162
163         if (restrict == UV_AFFINITY_CPU)
164                 desc->status |= IRQ_NO_BALANCING;
165         else
166                 desc->status |= IRQ_MOVE_PCNTXT;
167
168         set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
169                                       irq_name);
170
171         mmr_value = 0;
172         entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
173         entry->vector           = cfg->vector;
174         entry->delivery_mode    = apic->irq_delivery_mode;
175         entry->dest_mode        = apic->irq_dest_mode;
176         entry->polarity         = 0;
177         entry->trigger          = 0;
178         entry->mask             = 0;
179         entry->dest             = apic->cpu_mask_to_apicid(eligible_cpu);
180
181         mmr_pnode = uv_blade_to_pnode(mmr_blade);
182         uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
183
184         if (cfg->move_in_progress)
185                 send_cleanup_vector(cfg);
186
187         return irq;
188 }
189
190 /*
191  * Disable the specified MMR located on the specified blade so that MSIs are
192  * longer allowed to be sent.
193  */
194 static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset)
195 {
196         unsigned long mmr_value;
197         struct uv_IO_APIC_route_entry *entry;
198
199         BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
200                         sizeof(unsigned long));
201
202         mmr_value = 0;
203         entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
204         entry->mask = 1;
205
206         uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
207 }
208
209 static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask)
210 {
211         struct irq_desc *desc = irq_to_desc(irq);
212         struct irq_cfg *cfg = desc->chip_data;
213         unsigned int dest;
214         unsigned long mmr_value;
215         struct uv_IO_APIC_route_entry *entry;
216         unsigned long mmr_offset;
217         unsigned mmr_pnode;
218
219         if (set_desc_affinity(desc, mask, &dest))
220                 return -1;
221
222         mmr_value = 0;
223         entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
224
225         entry->vector           = cfg->vector;
226         entry->delivery_mode    = apic->irq_delivery_mode;
227         entry->dest_mode        = apic->irq_dest_mode;
228         entry->polarity         = 0;
229         entry->trigger          = 0;
230         entry->mask             = 0;
231         entry->dest             = dest;
232
233         /* Get previously stored MMR and pnode of hub sourcing interrupts */
234         if (uv_irq_2_mmr_info(irq, &mmr_offset, &mmr_pnode))
235                 return -1;
236
237         uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
238
239         if (cfg->move_in_progress)
240                 send_cleanup_vector(cfg);
241
242         return 0;
243 }
244
245 /*
246  * Set up a mapping of an available irq and vector, and enable the specified
247  * MMR that defines the MSI that is to be sent to the specified CPU when an
248  * interrupt is raised.
249  */
250 int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
251                  unsigned long mmr_offset, int restrict)
252 {
253         int irq, ret;
254
255         irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade));
256
257         if (irq <= 0)
258                 return -EBUSY;
259
260         ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset,
261                 restrict);
262         if (ret == irq)
263                 uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade);
264         else
265                 destroy_irq(irq);
266
267         return ret;
268 }
269 EXPORT_SYMBOL_GPL(uv_setup_irq);
270
271 /*
272  * Tear down a mapping of an irq and vector, and disable the specified MMR that
273  * defined the MSI that was to be sent to the specified CPU when an interrupt
274  * was raised.
275  *
276  * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
277  */
278 void uv_teardown_irq(unsigned int irq)
279 {
280         struct uv_irq_2_mmr_pnode *e;
281         struct rb_node *n;
282         unsigned long irqflags;
283
284         spin_lock_irqsave(&uv_irq_lock, irqflags);
285         n = uv_irq_root.rb_node;
286         while (n) {
287                 e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
288                 if (e->irq == irq) {
289                         arch_disable_uv_irq(e->pnode, e->offset);
290                         rb_erase(n, &uv_irq_root);
291                         kfree(e);
292                         break;
293                 }
294                 if (irq < e->irq)
295                         n = n->rb_left;
296                 else
297                         n = n->rb_right;
298         }
299         spin_unlock_irqrestore(&uv_irq_lock, irqflags);
300         destroy_irq(irq);
301 }
302 EXPORT_SYMBOL_GPL(uv_teardown_irq);