]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - arch/arm/mach-ixp4xx/ixp4xx_qmgr.c
IXP42x: add NPE support for IXP425 rev. A0 processors.
[mirror_ubuntu-eoan-kernel.git] / arch / arm / mach-ixp4xx / ixp4xx_qmgr.c
CommitLineData
82a96f57
KH
1/*
2 * Intel IXP4xx Queue Manager driver for Linux
3 *
4 * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 */
10
11#include <linux/ioport.h>
12#include <linux/interrupt.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
a09e64fb 15#include <mach/qmgr.h>
82a96f57 16
82a96f57
KH
17struct qmgr_regs __iomem *qmgr_regs;
18static struct resource *mem_res;
19static spinlock_t qmgr_lock;
20static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
a6a9fb85
KH
21static void (*irq_handlers[QUEUES])(void *pdev);
22static void *irq_pdevs[QUEUES];
82a96f57 23
e6da96ac
KH
24#if DEBUG_QMGR
25char qmgr_queue_descs[QUEUES][32];
26#endif
27
82a96f57
KH
28void qmgr_set_irq(unsigned int queue, int src,
29 void (*handler)(void *pdev), void *pdev)
30{
82a96f57
KH
31 unsigned long flags;
32
82a96f57 33 spin_lock_irqsave(&qmgr_lock, flags);
a6a9fb85
KH
34 if (queue < HALF_QUEUES) {
35 u32 __iomem *reg;
36 int bit;
37 BUG_ON(src > QUEUE_IRQ_SRC_NOT_FULL);
38 reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */
39 bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */
40 __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit),
41 reg);
42 } else
43 /* IRQ source for queues 32-63 is fixed */
44 BUG_ON(src != QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY);
45
82a96f57
KH
46 irq_handlers[queue] = handler;
47 irq_pdevs[queue] = pdev;
48 spin_unlock_irqrestore(&qmgr_lock, flags);
49}
50
51
a6a9fb85 52static irqreturn_t qmgr_irq(int irq, void *pdev)
82a96f57 53{
a6a9fb85
KH
54 int i, half = (irq == IRQ_IXP4XX_QM1 ? 0 : 1);
55 u32 val = __raw_readl(&qmgr_regs->irqstat[half]);
56 __raw_writel(val, &qmgr_regs->irqstat[half]); /* ACK */
82a96f57
KH
57
58 for (i = 0; i < HALF_QUEUES; i++)
a6a9fb85
KH
59 if (val & (1 << i)) {
60 int irq = half * HALF_QUEUES + i;
61 irq_handlers[irq](irq_pdevs[irq]);
62 }
82a96f57
KH
63 return val ? IRQ_HANDLED : 0;
64}
65
66
67void qmgr_enable_irq(unsigned int queue)
68{
69 unsigned long flags;
a6a9fb85
KH
70 int half = queue / 32;
71 u32 mask = 1 << (queue & (HALF_QUEUES - 1));
82a96f57
KH
72
73 spin_lock_irqsave(&qmgr_lock, flags);
a6a9fb85
KH
74 __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) | mask,
75 &qmgr_regs->irqen[half]);
82a96f57
KH
76 spin_unlock_irqrestore(&qmgr_lock, flags);
77}
78
79void qmgr_disable_irq(unsigned int queue)
80{
81 unsigned long flags;
a6a9fb85
KH
82 int half = queue / 32;
83 u32 mask = 1 << (queue & (HALF_QUEUES - 1));
82a96f57
KH
84
85 spin_lock_irqsave(&qmgr_lock, flags);
a6a9fb85
KH
86 __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) & ~mask,
87 &qmgr_regs->irqen[half]);
88 __raw_writel(mask, &qmgr_regs->irqstat[half]); /* clear */
82a96f57
KH
89 spin_unlock_irqrestore(&qmgr_lock, flags);
90}
91
92static inline void shift_mask(u32 *mask)
93{
94 mask[3] = mask[3] << 1 | mask[2] >> 31;
95 mask[2] = mask[2] << 1 | mask[1] >> 31;
96 mask[1] = mask[1] << 1 | mask[0] >> 31;
97 mask[0] <<= 1;
98}
99
e6da96ac 100#if DEBUG_QMGR
82a96f57
KH
101int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
102 unsigned int nearly_empty_watermark,
e6da96ac
KH
103 unsigned int nearly_full_watermark,
104 const char *desc_format, const char* name)
105#else
106int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
107 unsigned int nearly_empty_watermark,
108 unsigned int nearly_full_watermark)
109#endif
82a96f57
KH
110{
111 u32 cfg, addr = 0, mask[4]; /* in 16-dwords */
112 int err;
113
a6a9fb85 114 BUG_ON(queue >= QUEUES);
82a96f57
KH
115
116 if ((nearly_empty_watermark | nearly_full_watermark) & ~7)
117 return -EINVAL;
118
119 switch (len) {
120 case 16:
121 cfg = 0 << 24;
122 mask[0] = 0x1;
123 break;
124 case 32:
125 cfg = 1 << 24;
126 mask[0] = 0x3;
127 break;
128 case 64:
129 cfg = 2 << 24;
130 mask[0] = 0xF;
131 break;
132 case 128:
133 cfg = 3 << 24;
134 mask[0] = 0xFF;
135 break;
136 default:
137 return -EINVAL;
138 }
139
140 cfg |= nearly_empty_watermark << 26;
141 cfg |= nearly_full_watermark << 29;
142 len /= 16; /* in 16-dwords: 1, 2, 4 or 8 */
143 mask[1] = mask[2] = mask[3] = 0;
144
145 if (!try_module_get(THIS_MODULE))
146 return -ENODEV;
147
148 spin_lock_irq(&qmgr_lock);
149 if (__raw_readl(&qmgr_regs->sram[queue])) {
150 err = -EBUSY;
151 goto err;
152 }
153
154 while (1) {
155 if (!(used_sram_bitmap[0] & mask[0]) &&
156 !(used_sram_bitmap[1] & mask[1]) &&
157 !(used_sram_bitmap[2] & mask[2]) &&
158 !(used_sram_bitmap[3] & mask[3]))
159 break; /* found free space */
160
161 addr++;
162 shift_mask(mask);
163 if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) {
164 printk(KERN_ERR "qmgr: no free SRAM space for"
165 " queue %i\n", queue);
166 err = -ENOMEM;
167 goto err;
168 }
169 }
170
171 used_sram_bitmap[0] |= mask[0];
172 used_sram_bitmap[1] |= mask[1];
173 used_sram_bitmap[2] |= mask[2];
174 used_sram_bitmap[3] |= mask[3];
175 __raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]);
e6da96ac
KH
176#if DEBUG_QMGR
177 snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]),
178 desc_format, name);
179 printk(KERN_DEBUG "qmgr: requested queue %s(%i) addr = 0x%02X\n",
180 qmgr_queue_descs[queue], queue, addr);
82a96f57 181#endif
e6da96ac 182 spin_unlock_irq(&qmgr_lock);
82a96f57
KH
183 return 0;
184
185err:
186 spin_unlock_irq(&qmgr_lock);
187 module_put(THIS_MODULE);
188 return err;
189}
190
191void qmgr_release_queue(unsigned int queue)
192{
193 u32 cfg, addr, mask[4];
194
a6a9fb85 195 BUG_ON(queue >= QUEUES); /* not in valid range */
82a96f57
KH
196
197 spin_lock_irq(&qmgr_lock);
198 cfg = __raw_readl(&qmgr_regs->sram[queue]);
199 addr = (cfg >> 14) & 0xFF;
200
201 BUG_ON(!addr); /* not requested */
202
203 switch ((cfg >> 24) & 3) {
204 case 0: mask[0] = 0x1; break;
205 case 1: mask[0] = 0x3; break;
206 case 2: mask[0] = 0xF; break;
207 case 3: mask[0] = 0xFF; break;
208 }
209
dac2f83f
KH
210 mask[1] = mask[2] = mask[3] = 0;
211
82a96f57
KH
212 while (addr--)
213 shift_mask(mask);
214
e6da96ac
KH
215#if DEBUG_QMGR
216 printk(KERN_DEBUG "qmgr: releasing queue %s(%i)\n",
217 qmgr_queue_descs[queue], queue);
218 qmgr_queue_descs[queue][0] = '\x0';
219#endif
82a96f57
KH
220 __raw_writel(0, &qmgr_regs->sram[queue]);
221
222 used_sram_bitmap[0] &= ~mask[0];
223 used_sram_bitmap[1] &= ~mask[1];
224 used_sram_bitmap[2] &= ~mask[2];
225 used_sram_bitmap[3] &= ~mask[3];
226 irq_handlers[queue] = NULL; /* catch IRQ bugs */
227 spin_unlock_irq(&qmgr_lock);
228
229 module_put(THIS_MODULE);
3edcfb29
KH
230
231 while ((addr = qmgr_get_entry(queue)))
e6da96ac 232 printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
3edcfb29 233 queue, addr);
82a96f57
KH
234}
235
236static int qmgr_init(void)
237{
238 int i, err;
239 mem_res = request_mem_region(IXP4XX_QMGR_BASE_PHYS,
240 IXP4XX_QMGR_REGION_SIZE,
241 "IXP4xx Queue Manager");
242 if (mem_res == NULL)
243 return -EBUSY;
244
245 qmgr_regs = ioremap(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
246 if (qmgr_regs == NULL) {
247 err = -ENOMEM;
248 goto error_map;
249 }
250
251 /* reset qmgr registers */
252 for (i = 0; i < 4; i++) {
253 __raw_writel(0x33333333, &qmgr_regs->stat1[i]);
254 __raw_writel(0, &qmgr_regs->irqsrc[i]);
255 }
256 for (i = 0; i < 2; i++) {
257 __raw_writel(0, &qmgr_regs->stat2[i]);
258 __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */
259 __raw_writel(0, &qmgr_regs->irqen[i]);
260 }
261
a6a9fb85
KH
262 __raw_writel(0xFFFFFFFF, &qmgr_regs->statne_h);
263 __raw_writel(0, &qmgr_regs->statf_h);
264
82a96f57
KH
265 for (i = 0; i < QUEUES; i++)
266 __raw_writel(0, &qmgr_regs->sram[i]);
267
a6a9fb85 268 err = request_irq(IRQ_IXP4XX_QM1, qmgr_irq, 0,
82a96f57
KH
269 "IXP4xx Queue Manager", NULL);
270 if (err) {
271 printk(KERN_ERR "qmgr: failed to request IRQ%i\n",
272 IRQ_IXP4XX_QM1);
273 goto error_irq;
274 }
275
a6a9fb85
KH
276 err = request_irq(IRQ_IXP4XX_QM2, qmgr_irq, 0,
277 "IXP4xx Queue Manager", NULL);
278 if (err) {
279 printk(KERN_ERR "qmgr: failed to request IRQ%i\n",
280 IRQ_IXP4XX_QM2);
281 goto error_irq2;
282 }
283
82a96f57
KH
284 used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */
285 spin_lock_init(&qmgr_lock);
286
287 printk(KERN_INFO "IXP4xx Queue Manager initialized.\n");
288 return 0;
289
a6a9fb85
KH
290error_irq2:
291 free_irq(IRQ_IXP4XX_QM1, NULL);
82a96f57
KH
292error_irq:
293 iounmap(qmgr_regs);
294error_map:
295 release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
296 return err;
297}
298
299static void qmgr_remove(void)
300{
301 free_irq(IRQ_IXP4XX_QM1, NULL);
a6a9fb85 302 free_irq(IRQ_IXP4XX_QM2, NULL);
82a96f57 303 synchronize_irq(IRQ_IXP4XX_QM1);
a6a9fb85 304 synchronize_irq(IRQ_IXP4XX_QM2);
82a96f57
KH
305 iounmap(qmgr_regs);
306 release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
307}
308
309module_init(qmgr_init);
310module_exit(qmgr_remove);
311
312MODULE_LICENSE("GPL v2");
313MODULE_AUTHOR("Krzysztof Halasa");
314
315EXPORT_SYMBOL(qmgr_regs);
316EXPORT_SYMBOL(qmgr_set_irq);
317EXPORT_SYMBOL(qmgr_enable_irq);
318EXPORT_SYMBOL(qmgr_disable_irq);
e6da96ac
KH
319#if DEBUG_QMGR
320EXPORT_SYMBOL(qmgr_queue_descs);
82a96f57 321EXPORT_SYMBOL(qmgr_request_queue);
e6da96ac
KH
322#else
323EXPORT_SYMBOL(__qmgr_request_queue);
324#endif
82a96f57 325EXPORT_SYMBOL(qmgr_release_queue);