]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/soc/ixp4xx/ixp4xx-qmgr.c
Merge tag 'efi-urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi into...
[mirror_ubuntu-eoan-kernel.git] / drivers / soc / ixp4xx / ixp4xx-qmgr.c
CommitLineData
25763b3c 1// SPDX-License-Identifier: GPL-2.0-only
82a96f57
KH
2/*
3 * Intel IXP4xx Queue Manager driver for Linux
4 *
5 * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
82a96f57
KH
6 */
7
8#include <linux/ioport.h>
9#include <linux/interrupt.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
9540724c 12#include <linux/of.h>
81bca32f 13#include <linux/platform_device.h>
4af20dc5 14#include <linux/soc/ixp4xx/qmgr.h>
82a96f57 15
ecc133c6
LW
16static struct qmgr_regs __iomem *qmgr_regs;
17static int qmgr_irq_1;
18static int qmgr_irq_2;
82a96f57
KH
19static spinlock_t qmgr_lock;
20static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
a6a9fb85
KH
21static void (*irq_handlers[QUEUES])(void *pdev);
22static void *irq_pdevs[QUEUES];
82a96f57 23
e6da96ac
KH
24#if DEBUG_QMGR
25char qmgr_queue_descs[QUEUES][32];
26#endif
27
d08502f2
LW
28void qmgr_put_entry(unsigned int queue, u32 val)
29{
30#if DEBUG_QMGR
31 BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
32
33 printk(KERN_DEBUG "Queue %s(%i) put %X\n",
34 qmgr_queue_descs[queue], queue, val);
35#endif
36 __raw_writel(val, &qmgr_regs->acc[queue][0]);
37}
38
39u32 qmgr_get_entry(unsigned int queue)
40{
41 u32 val;
42 val = __raw_readl(&qmgr_regs->acc[queue][0]);
43#if DEBUG_QMGR
44 BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
45
46 printk(KERN_DEBUG "Queue %s(%i) get %X\n",
47 qmgr_queue_descs[queue], queue, val);
48#endif
49 return val;
50}
51
52static int __qmgr_get_stat1(unsigned int queue)
53{
54 return (__raw_readl(&qmgr_regs->stat1[queue >> 3])
55 >> ((queue & 7) << 2)) & 0xF;
56}
57
58static int __qmgr_get_stat2(unsigned int queue)
59{
60 BUG_ON(queue >= HALF_QUEUES);
61 return (__raw_readl(&qmgr_regs->stat2[queue >> 4])
62 >> ((queue & 0xF) << 1)) & 0x3;
63}
64
65/**
66 * qmgr_stat_empty() - checks if a hardware queue is empty
67 * @queue: queue number
68 *
69 * Returns non-zero value if the queue is empty.
70 */
71int qmgr_stat_empty(unsigned int queue)
72{
73 BUG_ON(queue >= HALF_QUEUES);
74 return __qmgr_get_stat1(queue) & QUEUE_STAT1_EMPTY;
75}
76
77/**
78 * qmgr_stat_below_low_watermark() - checks if a queue is below low watermark
79 * @queue: queue number
80 *
81 * Returns non-zero value if the queue is below low watermark.
82 */
83int qmgr_stat_below_low_watermark(unsigned int queue)
84{
85 if (queue >= HALF_QUEUES)
86 return (__raw_readl(&qmgr_regs->statne_h) >>
87 (queue - HALF_QUEUES)) & 0x01;
88 return __qmgr_get_stat1(queue) & QUEUE_STAT1_NEARLY_EMPTY;
89}
90
d08502f2
LW
91/**
92 * qmgr_stat_full() - checks if a hardware queue is full
93 * @queue: queue number
94 *
95 * Returns non-zero value if the queue is full.
96 */
97int qmgr_stat_full(unsigned int queue)
98{
99 if (queue >= HALF_QUEUES)
100 return (__raw_readl(&qmgr_regs->statf_h) >>
101 (queue - HALF_QUEUES)) & 0x01;
102 return __qmgr_get_stat1(queue) & QUEUE_STAT1_FULL;
103}
104
d08502f2
LW
105/**
106 * qmgr_stat_overflow() - checks if a hardware queue experienced overflow
107 * @queue: queue number
108 *
109 * Returns non-zero value if the queue experienced overflow.
110 */
111int qmgr_stat_overflow(unsigned int queue)
112{
113 return __qmgr_get_stat2(queue) & QUEUE_STAT2_OVERFLOW;
114}
115
82a96f57
KH
116void qmgr_set_irq(unsigned int queue, int src,
117 void (*handler)(void *pdev), void *pdev)
118{
82a96f57
KH
119 unsigned long flags;
120
82a96f57 121 spin_lock_irqsave(&qmgr_lock, flags);
a6a9fb85 122 if (queue < HALF_QUEUES) {
0d2c9f05 123 u32 __iomem *reg;
a6a9fb85
KH
124 int bit;
125 BUG_ON(src > QUEUE_IRQ_SRC_NOT_FULL);
126 reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */
127 bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */
128 __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit),
129 reg);
130 } else
131 /* IRQ source for queues 32-63 is fixed */
132 BUG_ON(src != QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY);
133
82a96f57
KH
134 irq_handlers[queue] = handler;
135 irq_pdevs[queue] = pdev;
136 spin_unlock_irqrestore(&qmgr_lock, flags);
137}
138
139
d4c9e9fc
KH
140static irqreturn_t qmgr_irq1_a0(int irq, void *pdev)
141{
142 int i, ret = 0;
0771c693 143 u32 en_bitmap, src, stat;
d4c9e9fc
KH
144
145 /* ACK - it may clear any bits so don't rely on it */
146 __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]);
147
0771c693
KH
148 en_bitmap = qmgr_regs->irqen[0];
149 while (en_bitmap) {
150 i = __fls(en_bitmap); /* number of the last "low" queue */
151 en_bitmap &= ~BIT(i);
d4c9e9fc
KH
152 src = qmgr_regs->irqsrc[i >> 3];
153 stat = qmgr_regs->stat1[i >> 3];
154 if (src & 4) /* the IRQ condition is inverted */
155 stat = ~stat;
156 if (stat & BIT(src & 3)) {
157 irq_handlers[i](irq_pdevs[i]);
158 ret = IRQ_HANDLED;
159 }
160 }
161 return ret;
162}
163
164
165static irqreturn_t qmgr_irq2_a0(int irq, void *pdev)
166{
167 int i, ret = 0;
168 u32 req_bitmap;
169
170 /* ACK - it may clear any bits so don't rely on it */
171 __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]);
172
173 req_bitmap = qmgr_regs->irqen[1] & qmgr_regs->statne_h;
0771c693
KH
174 while (req_bitmap) {
175 i = __fls(req_bitmap); /* number of the last "high" queue */
176 req_bitmap &= ~BIT(i);
d4c9e9fc
KH
177 irq_handlers[HALF_QUEUES + i](irq_pdevs[HALF_QUEUES + i]);
178 ret = IRQ_HANDLED;
179 }
180 return ret;
181}
182
183
a6a9fb85 184static irqreturn_t qmgr_irq(int irq, void *pdev)
82a96f57 185{
ecc133c6 186 int i, half = (irq == qmgr_irq_1 ? 0 : 1);
0771c693 187 u32 req_bitmap = __raw_readl(&qmgr_regs->irqstat[half]);
82a96f57 188
0771c693
KH
189 if (!req_bitmap)
190 return 0;
191 __raw_writel(req_bitmap, &qmgr_regs->irqstat[half]); /* ACK */
192
193 while (req_bitmap) {
194 i = __fls(req_bitmap); /* number of the last queue */
195 req_bitmap &= ~BIT(i);
196 i += half * HALF_QUEUES;
197 irq_handlers[i](irq_pdevs[i]);
198 }
199 return IRQ_HANDLED;
82a96f57
KH
200}
201
202
203void qmgr_enable_irq(unsigned int queue)
204{
205 unsigned long flags;
a6a9fb85
KH
206 int half = queue / 32;
207 u32 mask = 1 << (queue & (HALF_QUEUES - 1));
82a96f57
KH
208
209 spin_lock_irqsave(&qmgr_lock, flags);
a6a9fb85
KH
210 __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) | mask,
211 &qmgr_regs->irqen[half]);
82a96f57
KH
212 spin_unlock_irqrestore(&qmgr_lock, flags);
213}
214
215void qmgr_disable_irq(unsigned int queue)
216{
217 unsigned long flags;
a6a9fb85
KH
218 int half = queue / 32;
219 u32 mask = 1 << (queue & (HALF_QUEUES - 1));
82a96f57
KH
220
221 spin_lock_irqsave(&qmgr_lock, flags);
a6a9fb85
KH
222 __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) & ~mask,
223 &qmgr_regs->irqen[half]);
224 __raw_writel(mask, &qmgr_regs->irqstat[half]); /* clear */
82a96f57
KH
225 spin_unlock_irqrestore(&qmgr_lock, flags);
226}
227
228static inline void shift_mask(u32 *mask)
229{
230 mask[3] = mask[3] << 1 | mask[2] >> 31;
231 mask[2] = mask[2] << 1 | mask[1] >> 31;
232 mask[1] = mask[1] << 1 | mask[0] >> 31;
233 mask[0] <<= 1;
234}
235
e6da96ac 236#if DEBUG_QMGR
82a96f57
KH
237int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
238 unsigned int nearly_empty_watermark,
e6da96ac
KH
239 unsigned int nearly_full_watermark,
240 const char *desc_format, const char* name)
241#else
242int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
243 unsigned int nearly_empty_watermark,
244 unsigned int nearly_full_watermark)
245#endif
82a96f57
KH
246{
247 u32 cfg, addr = 0, mask[4]; /* in 16-dwords */
248 int err;
249
a6a9fb85 250 BUG_ON(queue >= QUEUES);
82a96f57
KH
251
252 if ((nearly_empty_watermark | nearly_full_watermark) & ~7)
253 return -EINVAL;
254
255 switch (len) {
256 case 16:
257 cfg = 0 << 24;
258 mask[0] = 0x1;
259 break;
260 case 32:
261 cfg = 1 << 24;
262 mask[0] = 0x3;
263 break;
264 case 64:
265 cfg = 2 << 24;
266 mask[0] = 0xF;
267 break;
268 case 128:
269 cfg = 3 << 24;
270 mask[0] = 0xFF;
271 break;
272 default:
273 return -EINVAL;
274 }
275
276 cfg |= nearly_empty_watermark << 26;
277 cfg |= nearly_full_watermark << 29;
278 len /= 16; /* in 16-dwords: 1, 2, 4 or 8 */
279 mask[1] = mask[2] = mask[3] = 0;
280
281 if (!try_module_get(THIS_MODULE))
282 return -ENODEV;
283
284 spin_lock_irq(&qmgr_lock);
285 if (__raw_readl(&qmgr_regs->sram[queue])) {
286 err = -EBUSY;
287 goto err;
288 }
289
290 while (1) {
291 if (!(used_sram_bitmap[0] & mask[0]) &&
292 !(used_sram_bitmap[1] & mask[1]) &&
293 !(used_sram_bitmap[2] & mask[2]) &&
294 !(used_sram_bitmap[3] & mask[3]))
295 break; /* found free space */
296
297 addr++;
298 shift_mask(mask);
299 if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) {
300 printk(KERN_ERR "qmgr: no free SRAM space for"
301 " queue %i\n", queue);
302 err = -ENOMEM;
303 goto err;
304 }
305 }
306
307 used_sram_bitmap[0] |= mask[0];
308 used_sram_bitmap[1] |= mask[1];
309 used_sram_bitmap[2] |= mask[2];
310 used_sram_bitmap[3] |= mask[3];
311 __raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]);
e6da96ac
KH
312#if DEBUG_QMGR
313 snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]),
314 desc_format, name);
315 printk(KERN_DEBUG "qmgr: requested queue %s(%i) addr = 0x%02X\n",
316 qmgr_queue_descs[queue], queue, addr);
82a96f57 317#endif
e6da96ac 318 spin_unlock_irq(&qmgr_lock);
82a96f57
KH
319 return 0;
320
321err:
322 spin_unlock_irq(&qmgr_lock);
323 module_put(THIS_MODULE);
324 return err;
325}
326
327void qmgr_release_queue(unsigned int queue)
328{
329 u32 cfg, addr, mask[4];
330
a6a9fb85 331 BUG_ON(queue >= QUEUES); /* not in valid range */
82a96f57
KH
332
333 spin_lock_irq(&qmgr_lock);
334 cfg = __raw_readl(&qmgr_regs->sram[queue]);
335 addr = (cfg >> 14) & 0xFF;
336
337 BUG_ON(!addr); /* not requested */
338
339 switch ((cfg >> 24) & 3) {
340 case 0: mask[0] = 0x1; break;
341 case 1: mask[0] = 0x3; break;
342 case 2: mask[0] = 0xF; break;
343 case 3: mask[0] = 0xFF; break;
344 }
345
dac2f83f
KH
346 mask[1] = mask[2] = mask[3] = 0;
347
82a96f57
KH
348 while (addr--)
349 shift_mask(mask);
350
e6da96ac
KH
351#if DEBUG_QMGR
352 printk(KERN_DEBUG "qmgr: releasing queue %s(%i)\n",
353 qmgr_queue_descs[queue], queue);
354 qmgr_queue_descs[queue][0] = '\x0';
355#endif
3c3a3b4c
KH
356
357 while ((addr = qmgr_get_entry(queue)))
358 printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
359 queue, addr);
360
82a96f57
KH
361 __raw_writel(0, &qmgr_regs->sram[queue]);
362
363 used_sram_bitmap[0] &= ~mask[0];
364 used_sram_bitmap[1] &= ~mask[1];
365 used_sram_bitmap[2] &= ~mask[2];
366 used_sram_bitmap[3] &= ~mask[3];
367 irq_handlers[queue] = NULL; /* catch IRQ bugs */
368 spin_unlock_irq(&qmgr_lock);
369
370 module_put(THIS_MODULE);
82a96f57
KH
371}
372
81bca32f 373static int ixp4xx_qmgr_probe(struct platform_device *pdev)
82a96f57
KH
374{
375 int i, err;
d4c9e9fc 376 irq_handler_t handler1, handler2;
ecc133c6
LW
377 struct device *dev = &pdev->dev;
378 struct resource *res;
379 int irq1, irq2;
d4c9e9fc 380
ecc133c6
LW
381 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
382 if (!res)
383 return -ENODEV;
384 qmgr_regs = devm_ioremap_resource(dev, res);
c180d710
DC
385 if (IS_ERR(qmgr_regs))
386 return PTR_ERR(qmgr_regs);
ecc133c6
LW
387
388 irq1 = platform_get_irq(pdev, 0);
389 if (irq1 <= 0)
390 return irq1 ? irq1 : -EINVAL;
391 qmgr_irq_1 = irq1;
392 irq2 = platform_get_irq(pdev, 1);
393 if (irq2 <= 0)
394 return irq2 ? irq2 : -EINVAL;
395 qmgr_irq_2 = irq2;
82a96f57 396
82a96f57
KH
397 /* reset qmgr registers */
398 for (i = 0; i < 4; i++) {
399 __raw_writel(0x33333333, &qmgr_regs->stat1[i]);
400 __raw_writel(0, &qmgr_regs->irqsrc[i]);
401 }
402 for (i = 0; i < 2; i++) {
403 __raw_writel(0, &qmgr_regs->stat2[i]);
404 __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */
405 __raw_writel(0, &qmgr_regs->irqen[i]);
406 }
407
a6a9fb85
KH
408 __raw_writel(0xFFFFFFFF, &qmgr_regs->statne_h);
409 __raw_writel(0, &qmgr_regs->statf_h);
410
82a96f57
KH
411 for (i = 0; i < QUEUES; i++)
412 __raw_writel(0, &qmgr_regs->sram[i]);
413
d4c9e9fc
KH
414 if (cpu_is_ixp42x_rev_a0()) {
415 handler1 = qmgr_irq1_a0;
416 handler2 = qmgr_irq2_a0;
417 } else
418 handler1 = handler2 = qmgr_irq;
419
ecc133c6
LW
420 err = devm_request_irq(dev, irq1, handler1, 0, "IXP4xx Queue Manager",
421 NULL);
82a96f57 422 if (err) {
ecc133c6
LW
423 dev_err(dev, "failed to request IRQ%i (%i)\n",
424 irq1, err);
425 return err;
82a96f57
KH
426 }
427
ecc133c6
LW
428 err = devm_request_irq(dev, irq2, handler2, 0, "IXP4xx Queue Manager",
429 NULL);
a6a9fb85 430 if (err) {
ecc133c6
LW
431 dev_err(dev, "failed to request IRQ%i (%i)\n",
432 irq2, err);
433 return err;
a6a9fb85
KH
434 }
435
82a96f57
KH
436 used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */
437 spin_lock_init(&qmgr_lock);
438
ecc133c6 439 dev_info(dev, "IXP4xx Queue Manager initialized.\n");
82a96f57 440 return 0;
82a96f57
KH
441}
442
81bca32f 443static int ixp4xx_qmgr_remove(struct platform_device *pdev)
82a96f57 444{
ecc133c6
LW
445 synchronize_irq(qmgr_irq_1);
446 synchronize_irq(qmgr_irq_2);
81bca32f 447 return 0;
82a96f57
KH
448}
449
9e01a009
LW
450static const struct of_device_id ixp4xx_qmgr_of_match[] = {
451 {
452 .compatible = "intel,ixp4xx-ahb-queue-manager",
453 },
454 {},
455};
456
81bca32f
LW
457static struct platform_driver ixp4xx_qmgr_driver = {
458 .driver = {
459 .name = "ixp4xx-qmgr",
9e01a009 460 .of_match_table = of_match_ptr(ixp4xx_qmgr_of_match),
81bca32f
LW
461 },
462 .probe = ixp4xx_qmgr_probe,
463 .remove = ixp4xx_qmgr_remove,
464};
465module_platform_driver(ixp4xx_qmgr_driver);
82a96f57
KH
466
467MODULE_LICENSE("GPL v2");
468MODULE_AUTHOR("Krzysztof Halasa");
469
d08502f2
LW
470EXPORT_SYMBOL(qmgr_put_entry);
471EXPORT_SYMBOL(qmgr_get_entry);
472EXPORT_SYMBOL(qmgr_stat_empty);
473EXPORT_SYMBOL(qmgr_stat_below_low_watermark);
474EXPORT_SYMBOL(qmgr_stat_full);
475EXPORT_SYMBOL(qmgr_stat_overflow);
82a96f57
KH
476EXPORT_SYMBOL(qmgr_set_irq);
477EXPORT_SYMBOL(qmgr_enable_irq);
478EXPORT_SYMBOL(qmgr_disable_irq);
e6da96ac
KH
479#if DEBUG_QMGR
480EXPORT_SYMBOL(qmgr_queue_descs);
82a96f57 481EXPORT_SYMBOL(qmgr_request_queue);
e6da96ac
KH
482#else
483EXPORT_SYMBOL(__qmgr_request_queue);
484#endif
82a96f57 485EXPORT_SYMBOL(qmgr_release_queue);