2 * Intel IXP4xx Queue Manager driver for Linux
4 * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
11 #include <linux/ioport.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <mach/qmgr.h>
19 static struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
20 static struct resource *mem_res;
21 static spinlock_t qmgr_lock;
22 static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
23 static void (*irq_handlers[QUEUES])(void *pdev);
24 static void *irq_pdevs[QUEUES];
27 char qmgr_queue_descs[QUEUES][32];
30 void qmgr_set_irq(unsigned int queue, int src,
31 void (*handler)(void *pdev), void *pdev)
35 spin_lock_irqsave(&qmgr_lock, flags);
36 if (queue < HALF_QUEUES) {
39 BUG_ON(src > QUEUE_IRQ_SRC_NOT_FULL);
40 reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */
41 bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */
42 __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit),
45 /* IRQ source for queues 32-63 is fixed */
46 BUG_ON(src != QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY);
48 irq_handlers[queue] = handler;
49 irq_pdevs[queue] = pdev;
50 spin_unlock_irqrestore(&qmgr_lock, flags);
54 static irqreturn_t qmgr_irq1_a0(int irq, void *pdev)
57 u32 en_bitmap, src, stat;
59 /* ACK - it may clear any bits so don't rely on it */
60 __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]);
62 en_bitmap = qmgr_regs->irqen[0];
64 i = __fls(en_bitmap); /* number of the last "low" queue */
66 src = qmgr_regs->irqsrc[i >> 3];
67 stat = qmgr_regs->stat1[i >> 3];
68 if (src & 4) /* the IRQ condition is inverted */
70 if (stat & BIT(src & 3)) {
71 irq_handlers[i](irq_pdevs[i]);
79 static irqreturn_t qmgr_irq2_a0(int irq, void *pdev)
84 /* ACK - it may clear any bits so don't rely on it */
85 __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]);
87 req_bitmap = qmgr_regs->irqen[1] & qmgr_regs->statne_h;
89 i = __fls(req_bitmap); /* number of the last "high" queue */
90 req_bitmap &= ~BIT(i);
91 irq_handlers[HALF_QUEUES + i](irq_pdevs[HALF_QUEUES + i]);
98 static irqreturn_t qmgr_irq(int irq, void *pdev)
100 int i, half = (irq == IRQ_IXP4XX_QM1 ? 0 : 1);
101 u32 req_bitmap = __raw_readl(&qmgr_regs->irqstat[half]);
105 __raw_writel(req_bitmap, &qmgr_regs->irqstat[half]); /* ACK */
108 i = __fls(req_bitmap); /* number of the last queue */
109 req_bitmap &= ~BIT(i);
110 i += half * HALF_QUEUES;
111 irq_handlers[i](irq_pdevs[i]);
117 void qmgr_enable_irq(unsigned int queue)
120 int half = queue / 32;
121 u32 mask = 1 << (queue & (HALF_QUEUES - 1));
123 spin_lock_irqsave(&qmgr_lock, flags);
124 __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) | mask,
125 &qmgr_regs->irqen[half]);
126 spin_unlock_irqrestore(&qmgr_lock, flags);
129 void qmgr_disable_irq(unsigned int queue)
132 int half = queue / 32;
133 u32 mask = 1 << (queue & (HALF_QUEUES - 1));
135 spin_lock_irqsave(&qmgr_lock, flags);
136 __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) & ~mask,
137 &qmgr_regs->irqen[half]);
138 __raw_writel(mask, &qmgr_regs->irqstat[half]); /* clear */
139 spin_unlock_irqrestore(&qmgr_lock, flags);
142 static inline void shift_mask(u32 *mask)
144 mask[3] = mask[3] << 1 | mask[2] >> 31;
145 mask[2] = mask[2] << 1 | mask[1] >> 31;
146 mask[1] = mask[1] << 1 | mask[0] >> 31;
151 int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
152 unsigned int nearly_empty_watermark,
153 unsigned int nearly_full_watermark,
154 const char *desc_format, const char* name)
156 int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
157 unsigned int nearly_empty_watermark,
158 unsigned int nearly_full_watermark)
161 u32 cfg, addr = 0, mask[4]; /* in 16-dwords */
164 BUG_ON(queue >= QUEUES);
166 if ((nearly_empty_watermark | nearly_full_watermark) & ~7)
190 cfg |= nearly_empty_watermark << 26;
191 cfg |= nearly_full_watermark << 29;
192 len /= 16; /* in 16-dwords: 1, 2, 4 or 8 */
193 mask[1] = mask[2] = mask[3] = 0;
195 if (!try_module_get(THIS_MODULE))
198 spin_lock_irq(&qmgr_lock);
199 if (__raw_readl(&qmgr_regs->sram[queue])) {
205 if (!(used_sram_bitmap[0] & mask[0]) &&
206 !(used_sram_bitmap[1] & mask[1]) &&
207 !(used_sram_bitmap[2] & mask[2]) &&
208 !(used_sram_bitmap[3] & mask[3]))
209 break; /* found free space */
213 if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) {
214 printk(KERN_ERR "qmgr: no free SRAM space for"
215 " queue %i\n", queue);
221 used_sram_bitmap[0] |= mask[0];
222 used_sram_bitmap[1] |= mask[1];
223 used_sram_bitmap[2] |= mask[2];
224 used_sram_bitmap[3] |= mask[3];
225 __raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]);
227 snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]),
229 printk(KERN_DEBUG "qmgr: requested queue %s(%i) addr = 0x%02X\n",
230 qmgr_queue_descs[queue], queue, addr);
232 spin_unlock_irq(&qmgr_lock);
236 spin_unlock_irq(&qmgr_lock);
237 module_put(THIS_MODULE);
241 void qmgr_release_queue(unsigned int queue)
243 u32 cfg, addr, mask[4];
245 BUG_ON(queue >= QUEUES); /* not in valid range */
247 spin_lock_irq(&qmgr_lock);
248 cfg = __raw_readl(&qmgr_regs->sram[queue]);
249 addr = (cfg >> 14) & 0xFF;
251 BUG_ON(!addr); /* not requested */
253 switch ((cfg >> 24) & 3) {
254 case 0: mask[0] = 0x1; break;
255 case 1: mask[0] = 0x3; break;
256 case 2: mask[0] = 0xF; break;
257 case 3: mask[0] = 0xFF; break;
260 mask[1] = mask[2] = mask[3] = 0;
266 printk(KERN_DEBUG "qmgr: releasing queue %s(%i)\n",
267 qmgr_queue_descs[queue], queue);
268 qmgr_queue_descs[queue][0] = '\x0';
271 while ((addr = qmgr_get_entry(queue)))
272 printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
275 __raw_writel(0, &qmgr_regs->sram[queue]);
277 used_sram_bitmap[0] &= ~mask[0];
278 used_sram_bitmap[1] &= ~mask[1];
279 used_sram_bitmap[2] &= ~mask[2];
280 used_sram_bitmap[3] &= ~mask[3];
281 irq_handlers[queue] = NULL; /* catch IRQ bugs */
282 spin_unlock_irq(&qmgr_lock);
284 module_put(THIS_MODULE);
287 static int qmgr_init(void)
290 irq_handler_t handler1, handler2;
292 mem_res = request_mem_region(IXP4XX_QMGR_BASE_PHYS,
293 IXP4XX_QMGR_REGION_SIZE,
294 "IXP4xx Queue Manager");
298 /* reset qmgr registers */
299 for (i = 0; i < 4; i++) {
300 __raw_writel(0x33333333, &qmgr_regs->stat1[i]);
301 __raw_writel(0, &qmgr_regs->irqsrc[i]);
303 for (i = 0; i < 2; i++) {
304 __raw_writel(0, &qmgr_regs->stat2[i]);
305 __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */
306 __raw_writel(0, &qmgr_regs->irqen[i]);
309 __raw_writel(0xFFFFFFFF, &qmgr_regs->statne_h);
310 __raw_writel(0, &qmgr_regs->statf_h);
312 for (i = 0; i < QUEUES; i++)
313 __raw_writel(0, &qmgr_regs->sram[i]);
315 if (cpu_is_ixp42x_rev_a0()) {
316 handler1 = qmgr_irq1_a0;
317 handler2 = qmgr_irq2_a0;
319 handler1 = handler2 = qmgr_irq;
321 err = request_irq(IRQ_IXP4XX_QM1, handler1, 0, "IXP4xx Queue Manager",
324 printk(KERN_ERR "qmgr: failed to request IRQ%i (%i)\n",
325 IRQ_IXP4XX_QM1, err);
329 err = request_irq(IRQ_IXP4XX_QM2, handler2, 0, "IXP4xx Queue Manager",
332 printk(KERN_ERR "qmgr: failed to request IRQ%i (%i)\n",
333 IRQ_IXP4XX_QM2, err);
337 used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */
338 spin_lock_init(&qmgr_lock);
340 printk(KERN_INFO "IXP4xx Queue Manager initialized.\n");
344 free_irq(IRQ_IXP4XX_QM1, NULL);
346 release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
350 static void qmgr_remove(void)
352 free_irq(IRQ_IXP4XX_QM1, NULL);
353 free_irq(IRQ_IXP4XX_QM2, NULL);
354 synchronize_irq(IRQ_IXP4XX_QM1);
355 synchronize_irq(IRQ_IXP4XX_QM2);
356 release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
359 module_init(qmgr_init);
360 module_exit(qmgr_remove);
362 MODULE_LICENSE("GPL v2");
363 MODULE_AUTHOR("Krzysztof Halasa");
365 EXPORT_SYMBOL(qmgr_set_irq);
366 EXPORT_SYMBOL(qmgr_enable_irq);
367 EXPORT_SYMBOL(qmgr_disable_irq);
369 EXPORT_SYMBOL(qmgr_queue_descs);
370 EXPORT_SYMBOL(qmgr_request_queue);
372 EXPORT_SYMBOL(__qmgr_request_queue);
374 EXPORT_SYMBOL(qmgr_release_queue);