Merge git://git.kernel.org/pub/scm/linux/kernel/git/joern/logfs
[sfrench/cifs-2.6.git] / arch / x86 / kernel / uv_irq.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * SGI UV IRQ functions
7  *
8  * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
9  */
10
11 #include <linux/module.h>
12 #include <linux/rbtree.h>
13 #include <linux/irq.h>
14
15 #include <asm/apic.h>
16 #include <asm/uv/uv_irq.h>
17 #include <asm/uv/uv_hub.h>
18
19 /* MMR offset and pnode of hub sourcing interrupts for a given irq */
20 struct uv_irq_2_mmr_pnode{
21         struct rb_node          list;
22         unsigned long           offset;
23         int                     pnode;
24         int                     irq;
25 };
26
27 static spinlock_t               uv_irq_lock;
28 static struct rb_root           uv_irq_root;
29
30 static int uv_set_irq_affinity(unsigned int, const struct cpumask *);
31
32 static void uv_noop(unsigned int irq)
33 {
34 }
35
36 static unsigned int uv_noop_ret(unsigned int irq)
37 {
38         return 0;
39 }
40
41 static void uv_ack_apic(unsigned int irq)
42 {
43         ack_APIC_irq();
44 }
45
46 struct irq_chip uv_irq_chip = {
47         .name           = "UV-CORE",
48         .startup        = uv_noop_ret,
49         .shutdown       = uv_noop,
50         .enable         = uv_noop,
51         .disable        = uv_noop,
52         .ack            = uv_noop,
53         .mask           = uv_noop,
54         .unmask         = uv_noop,
55         .eoi            = uv_ack_apic,
56         .end            = uv_noop,
57         .set_affinity   = uv_set_irq_affinity,
58 };
59
60 /*
61  * Add offset and pnode information of the hub sourcing interrupts to the
62  * rb tree for a specific irq.
63  */
64 static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade)
65 {
66         struct rb_node **link = &uv_irq_root.rb_node;
67         struct rb_node *parent = NULL;
68         struct uv_irq_2_mmr_pnode *n;
69         struct uv_irq_2_mmr_pnode *e;
70         unsigned long irqflags;
71
72         n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL,
73                                 uv_blade_to_memory_nid(blade));
74         if (!n)
75                 return -ENOMEM;
76
77         n->irq = irq;
78         n->offset = offset;
79         n->pnode = uv_blade_to_pnode(blade);
80         spin_lock_irqsave(&uv_irq_lock, irqflags);
81         /* Find the right place in the rbtree: */
82         while (*link) {
83                 parent = *link;
84                 e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list);
85
86                 if (unlikely(irq == e->irq)) {
87                         /* irq entry exists */
88                         e->pnode = uv_blade_to_pnode(blade);
89                         e->offset = offset;
90                         spin_unlock_irqrestore(&uv_irq_lock, irqflags);
91                         kfree(n);
92                         return 0;
93                 }
94
95                 if (irq < e->irq)
96                         link = &(*link)->rb_left;
97                 else
98                         link = &(*link)->rb_right;
99         }
100
101         /* Insert the node into the rbtree. */
102         rb_link_node(&n->list, parent, link);
103         rb_insert_color(&n->list, &uv_irq_root);
104
105         spin_unlock_irqrestore(&uv_irq_lock, irqflags);
106         return 0;
107 }
108
109 /* Retrieve offset and pnode information from the rb tree for a specific irq */
110 int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode)
111 {
112         struct uv_irq_2_mmr_pnode *e;
113         struct rb_node *n;
114         unsigned long irqflags;
115
116         spin_lock_irqsave(&uv_irq_lock, irqflags);
117         n = uv_irq_root.rb_node;
118         while (n) {
119                 e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
120
121                 if (e->irq == irq) {
122                         *offset = e->offset;
123                         *pnode = e->pnode;
124                         spin_unlock_irqrestore(&uv_irq_lock, irqflags);
125                         return 0;
126                 }
127
128                 if (irq < e->irq)
129                         n = n->rb_left;
130                 else
131                         n = n->rb_right;
132         }
133         spin_unlock_irqrestore(&uv_irq_lock, irqflags);
134         return -1;
135 }
136
137 /*
138  * Re-target the irq to the specified CPU and enable the specified MMR located
139  * on the specified blade to allow the sending of MSIs to the specified CPU.
140  */
141 static int
142 arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
143                        unsigned long mmr_offset, int restrict)
144 {
145         const struct cpumask *eligible_cpu = cpumask_of(cpu);
146         struct irq_desc *desc = irq_to_desc(irq);
147         struct irq_cfg *cfg;
148         int mmr_pnode;
149         unsigned long mmr_value;
150         struct uv_IO_APIC_route_entry *entry;
151         int err;
152
153         BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
154                         sizeof(unsigned long));
155
156         cfg = irq_cfg(irq);
157
158         err = assign_irq_vector(irq, cfg, eligible_cpu);
159         if (err != 0)
160                 return err;
161
162         if (restrict == UV_AFFINITY_CPU)
163                 desc->status |= IRQ_NO_BALANCING;
164         else
165                 desc->status |= IRQ_MOVE_PCNTXT;
166
167         set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
168                                       irq_name);
169
170         mmr_value = 0;
171         entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
172         entry->vector           = cfg->vector;
173         entry->delivery_mode    = apic->irq_delivery_mode;
174         entry->dest_mode        = apic->irq_dest_mode;
175         entry->polarity         = 0;
176         entry->trigger          = 0;
177         entry->mask             = 0;
178         entry->dest             = apic->cpu_mask_to_apicid(eligible_cpu);
179
180         mmr_pnode = uv_blade_to_pnode(mmr_blade);
181         uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
182
183         if (cfg->move_in_progress)
184                 send_cleanup_vector(cfg);
185
186         return irq;
187 }
188
189 /*
190  * Disable the specified MMR located on the specified blade so that MSIs are
191  * longer allowed to be sent.
192  */
193 static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset)
194 {
195         unsigned long mmr_value;
196         struct uv_IO_APIC_route_entry *entry;
197
198         BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
199                         sizeof(unsigned long));
200
201         mmr_value = 0;
202         entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
203         entry->mask = 1;
204
205         uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
206 }
207
208 static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask)
209 {
210         struct irq_desc *desc = irq_to_desc(irq);
211         struct irq_cfg *cfg = desc->chip_data;
212         unsigned int dest;
213         unsigned long mmr_value;
214         struct uv_IO_APIC_route_entry *entry;
215         unsigned long mmr_offset;
216         unsigned mmr_pnode;
217
218         if (set_desc_affinity(desc, mask, &dest))
219                 return -1;
220
221         mmr_value = 0;
222         entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
223
224         entry->vector           = cfg->vector;
225         entry->delivery_mode    = apic->irq_delivery_mode;
226         entry->dest_mode        = apic->irq_dest_mode;
227         entry->polarity         = 0;
228         entry->trigger          = 0;
229         entry->mask             = 0;
230         entry->dest             = dest;
231
232         /* Get previously stored MMR and pnode of hub sourcing interrupts */
233         if (uv_irq_2_mmr_info(irq, &mmr_offset, &mmr_pnode))
234                 return -1;
235
236         uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
237
238         if (cfg->move_in_progress)
239                 send_cleanup_vector(cfg);
240
241         return 0;
242 }
243
244 /*
245  * Set up a mapping of an available irq and vector, and enable the specified
246  * MMR that defines the MSI that is to be sent to the specified CPU when an
247  * interrupt is raised.
248  */
249 int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
250                  unsigned long mmr_offset, int restrict)
251 {
252         int irq, ret;
253
254         irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade));
255
256         if (irq <= 0)
257                 return -EBUSY;
258
259         ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset,
260                 restrict);
261         if (ret == irq)
262                 uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade);
263         else
264                 destroy_irq(irq);
265
266         return ret;
267 }
268 EXPORT_SYMBOL_GPL(uv_setup_irq);
269
270 /*
271  * Tear down a mapping of an irq and vector, and disable the specified MMR that
272  * defined the MSI that was to be sent to the specified CPU when an interrupt
273  * was raised.
274  *
275  * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
276  */
277 void uv_teardown_irq(unsigned int irq)
278 {
279         struct uv_irq_2_mmr_pnode *e;
280         struct rb_node *n;
281         unsigned long irqflags;
282
283         spin_lock_irqsave(&uv_irq_lock, irqflags);
284         n = uv_irq_root.rb_node;
285         while (n) {
286                 e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
287                 if (e->irq == irq) {
288                         arch_disable_uv_irq(e->pnode, e->offset);
289                         rb_erase(n, &uv_irq_root);
290                         kfree(e);
291                         break;
292                 }
293                 if (irq < e->irq)
294                         n = n->rb_left;
295                 else
296                         n = n->rb_right;
297         }
298         spin_unlock_irqrestore(&uv_irq_lock, irqflags);
299         destroy_irq(irq);
300 }
301 EXPORT_SYMBOL_GPL(uv_teardown_irq);