]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/cris/arch-v32/mach-a3/arbiter.c
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[mirror_ubuntu-bionic-kernel.git] / arch / cris / arch-v32 / mach-a3 / arbiter.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Memory arbiter functions. Allocates bandwidth through the
4 * arbiter and sets up arbiter breakpoints.
5 *
6 * The algorithm first assigns slots to the clients that has specified
7 * bandwidth (e.g. ethernet) and then the remaining slots are divided
8 * on all the active clients.
9 *
10 * Copyright (c) 2004-2007 Axis Communications AB.
11 *
12 * The artpec-3 has two arbiters. The memory hierarchy looks like this:
13 *
14 *
15 * CPU DMAs
16 * | |
17 * | |
18 * -------------- ------------------
19 * | foo arbiter|----| Internal memory|
20 * -------------- ------------------
21 * |
22 * --------------
23 * | L2 cache |
24 * --------------
25 * |
26 * h264 etc |
27 * | |
28 * | |
29 * --------------
30 * | bar arbiter|
31 * --------------
32 * |
33 * ---------
34 * | SDRAM |
35 * ---------
36 *
37 */
38
39 #include <hwregs/reg_map.h>
40 #include <hwregs/reg_rdwr.h>
41 #include <hwregs/marb_foo_defs.h>
42 #include <hwregs/marb_bar_defs.h>
43 #include <arbiter.h>
44 #include <hwregs/intr_vect.h>
45 #include <linux/interrupt.h>
46 #include <linux/irq.h>
47 #include <linux/signal.h>
48 #include <linux/errno.h>
49 #include <linux/spinlock.h>
50 #include <asm/io.h>
51 #include <asm/irq_regs.h>
52
53 #define D(x)
54
55 struct crisv32_watch_entry {
56 unsigned long instance;
57 watch_callback *cb;
58 unsigned long start;
59 unsigned long end;
60 int used;
61 };
62
63 #define NUMBER_OF_BP 4
64 #define SDRAM_BANDWIDTH 400000000
65 #define INTMEM_BANDWIDTH 400000000
66 #define NBR_OF_SLOTS 64
67 #define NBR_OF_REGIONS 2
68 #define NBR_OF_CLIENTS 15
69 #define ARBITERS 2
70 #define UNASSIGNED 100
71
72 struct arbiter {
73 unsigned long instance;
74 int nbr_regions;
75 int nbr_clients;
76 int requested_slots[NBR_OF_REGIONS][NBR_OF_CLIENTS];
77 int active_clients[NBR_OF_REGIONS][NBR_OF_CLIENTS];
78 };
79
80 static struct crisv32_watch_entry watches[ARBITERS][NUMBER_OF_BP] =
81 {
82 {
83 {regi_marb_foo_bp0},
84 {regi_marb_foo_bp1},
85 {regi_marb_foo_bp2},
86 {regi_marb_foo_bp3}
87 },
88 {
89 {regi_marb_bar_bp0},
90 {regi_marb_bar_bp1},
91 {regi_marb_bar_bp2},
92 {regi_marb_bar_bp3}
93 }
94 };
95
96 struct arbiter arbiters[ARBITERS] =
97 {
98 { /* L2 cache arbiter */
99 .instance = regi_marb_foo,
100 .nbr_regions = 2,
101 .nbr_clients = 15
102 },
103 { /* DDR2 arbiter */
104 .instance = regi_marb_bar,
105 .nbr_regions = 1,
106 .nbr_clients = 9
107 }
108 };
109
110 static int max_bandwidth[NBR_OF_REGIONS] = {SDRAM_BANDWIDTH, INTMEM_BANDWIDTH};
111
112 DEFINE_SPINLOCK(arbiter_lock);
113
114 static irqreturn_t
115 crisv32_foo_arbiter_irq(int irq, void *dev_id);
116 static irqreturn_t
117 crisv32_bar_arbiter_irq(int irq, void *dev_id);
118
119 /*
120 * "I'm the arbiter, I know the score.
121 * From square one I'll be watching all 64."
122 * (memory arbiter slots, that is)
123 *
124 * Or in other words:
125 * Program the memory arbiter slots for "region" according to what's
126 * in requested_slots[] and active_clients[], while minimizing
127 * latency. A caller may pass a non-zero positive amount for
128 * "unused_slots", which must then be the unallocated, remaining
129 * number of slots, free to hand out to any client.
130 */
131
132 static void crisv32_arbiter_config(int arbiter, int region, int unused_slots)
133 {
134 int slot;
135 int client;
136 int interval = 0;
137
138 /*
139 * This vector corresponds to the hardware arbiter slots (see
140 * the hardware documentation for semantics). We initialize
141 * each slot with a suitable sentinel value outside the valid
142 * range {0 .. NBR_OF_CLIENTS - 1} and replace them with
143 * client indexes. Then it's fed to the hardware.
144 */
145 s8 val[NBR_OF_SLOTS];
146
147 for (slot = 0; slot < NBR_OF_SLOTS; slot++)
148 val[slot] = -1;
149
150 for (client = 0; client < arbiters[arbiter].nbr_clients; client++) {
151 int pos;
152 /* Allocate the requested non-zero number of slots, but
153 * also give clients with zero-requests one slot each
154 * while stocks last. We do the latter here, in client
155 * order. This makes sure zero-request clients are the
156 * first to get to any spare slots, else those slots
157 * could, when bandwidth is allocated close to the limit,
158 * all be allocated to low-index non-zero-request clients
159 * in the default-fill loop below. Another positive but
160 * secondary effect is a somewhat better spread of the
161 * zero-bandwidth clients in the vector, avoiding some of
162 * the latency that could otherwise be caused by the
163 * partitioning of non-zero-bandwidth clients at low
164 * indexes and zero-bandwidth clients at high
165 * indexes. (Note that this spreading can only affect the
166 * unallocated bandwidth.) All the above only matters for
167 * memory-intensive situations, of course.
168 */
169 if (!arbiters[arbiter].requested_slots[region][client]) {
170 /*
171 * Skip inactive clients. Also skip zero-slot
172 * allocations in this pass when there are no known
173 * free slots.
174 */
175 if (!arbiters[arbiter].active_clients[region][client] ||
176 unused_slots <= 0)
177 continue;
178
179 unused_slots--;
180
181 /* Only allocate one slot for this client. */
182 interval = NBR_OF_SLOTS;
183 } else
184 interval = NBR_OF_SLOTS /
185 arbiters[arbiter].requested_slots[region][client];
186
187 pos = 0;
188 while (pos < NBR_OF_SLOTS) {
189 if (val[pos] >= 0)
190 pos++;
191 else {
192 val[pos] = client;
193 pos += interval;
194 }
195 }
196 }
197
198 client = 0;
199 for (slot = 0; slot < NBR_OF_SLOTS; slot++) {
200 /*
201 * Allocate remaining slots in round-robin
202 * client-number order for active clients. For this
203 * pass, we ignore requested bandwidth and previous
204 * allocations.
205 */
206 if (val[slot] < 0) {
207 int first = client;
208 while (!arbiters[arbiter].active_clients[region][client]) {
209 client = (client + 1) %
210 arbiters[arbiter].nbr_clients;
211 if (client == first)
212 break;
213 }
214 val[slot] = client;
215 client = (client + 1) % arbiters[arbiter].nbr_clients;
216 }
217 if (arbiter == 0) {
218 if (region == EXT_REGION)
219 REG_WR_INT_VECT(marb_foo, regi_marb_foo,
220 rw_l2_slots, slot, val[slot]);
221 else if (region == INT_REGION)
222 REG_WR_INT_VECT(marb_foo, regi_marb_foo,
223 rw_intm_slots, slot, val[slot]);
224 } else {
225 REG_WR_INT_VECT(marb_bar, regi_marb_bar,
226 rw_ddr2_slots, slot, val[slot]);
227 }
228 }
229 }
230
231 extern char _stext[], _etext[];
232
233 static void crisv32_arbiter_init(void)
234 {
235 static int initialized;
236
237 if (initialized)
238 return;
239
240 initialized = 1;
241
242 /*
243 * CPU caches are always set to active, but with zero
244 * bandwidth allocated. It should be ok to allocate zero
245 * bandwidth for the caches, because DMA for other channels
246 * will supposedly finish, once their programmed amount is
247 * done, and then the caches will get access according to the
248 * "fixed scheme" for unclaimed slots. Though, if for some
249 * use-case somewhere, there's a maximum CPU latency for
250 * e.g. some interrupt, we have to start allocating specific
251 * bandwidth for the CPU caches too.
252 */
253 arbiters[0].active_clients[EXT_REGION][11] = 1;
254 arbiters[0].active_clients[EXT_REGION][12] = 1;
255 crisv32_arbiter_config(0, EXT_REGION, 0);
256 crisv32_arbiter_config(0, INT_REGION, 0);
257 crisv32_arbiter_config(1, EXT_REGION, 0);
258
259 if (request_irq(MEMARB_FOO_INTR_VECT, crisv32_foo_arbiter_irq,
260 0, "arbiter", NULL))
261 printk(KERN_ERR "Couldn't allocate arbiter IRQ\n");
262
263 if (request_irq(MEMARB_BAR_INTR_VECT, crisv32_bar_arbiter_irq,
264 0, "arbiter", NULL))
265 printk(KERN_ERR "Couldn't allocate arbiter IRQ\n");
266
267 #ifndef CONFIG_ETRAX_KGDB
268 /* Global watch for writes to kernel text segment. */
269 crisv32_arbiter_watch(virt_to_phys(_stext), _etext - _stext,
270 MARB_CLIENTS(arbiter_all_clients, arbiter_bar_all_clients),
271 arbiter_all_write, NULL);
272 #endif
273
274 /* Set up max burst sizes by default */
275 REG_WR_INT(marb_bar, regi_marb_bar, rw_h264_rd_burst, 3);
276 REG_WR_INT(marb_bar, regi_marb_bar, rw_h264_wr_burst, 3);
277 REG_WR_INT(marb_bar, regi_marb_bar, rw_ccd_burst, 3);
278 REG_WR_INT(marb_bar, regi_marb_bar, rw_vin_wr_burst, 3);
279 REG_WR_INT(marb_bar, regi_marb_bar, rw_vin_rd_burst, 3);
280 REG_WR_INT(marb_bar, regi_marb_bar, rw_sclr_rd_burst, 3);
281 REG_WR_INT(marb_bar, regi_marb_bar, rw_vout_burst, 3);
282 REG_WR_INT(marb_bar, regi_marb_bar, rw_sclr_fifo_burst, 3);
283 REG_WR_INT(marb_bar, regi_marb_bar, rw_l2cache_burst, 3);
284 }
285
286 int crisv32_arbiter_allocate_bandwidth(int client, int region,
287 unsigned long bandwidth)
288 {
289 int i;
290 int total_assigned = 0;
291 int total_clients = 0;
292 int req;
293 int arbiter = 0;
294
295 crisv32_arbiter_init();
296
297 if (client & 0xffff0000) {
298 arbiter = 1;
299 client >>= 16;
300 }
301
302 for (i = 0; i < arbiters[arbiter].nbr_clients; i++) {
303 total_assigned += arbiters[arbiter].requested_slots[region][i];
304 total_clients += arbiters[arbiter].active_clients[region][i];
305 }
306
307 /* Avoid division by 0 for 0-bandwidth requests. */
308 req = bandwidth == 0
309 ? 0 : NBR_OF_SLOTS / (max_bandwidth[region] / bandwidth);
310
311 /*
312 * We make sure that there are enough slots only for non-zero
313 * requests. Requesting 0 bandwidth *may* allocate slots,
314 * though if all bandwidth is allocated, such a client won't
315 * get any and will have to rely on getting memory access
316 * according to the fixed scheme that's the default when one
317 * of the slot-allocated clients doesn't claim their slot.
318 */
319 if (total_assigned + req > NBR_OF_SLOTS)
320 return -ENOMEM;
321
322 arbiters[arbiter].active_clients[region][client] = 1;
323 arbiters[arbiter].requested_slots[region][client] = req;
324 crisv32_arbiter_config(arbiter, region, NBR_OF_SLOTS - total_assigned);
325
326 /* Propagate allocation from foo to bar */
327 if (arbiter == 0)
328 crisv32_arbiter_allocate_bandwidth(8 << 16,
329 EXT_REGION, bandwidth);
330 return 0;
331 }
332
333 /*
334 * Main entry for bandwidth deallocation.
335 *
336 * Strictly speaking, for a somewhat constant set of clients where
337 * each client gets a constant bandwidth and is just enabled or
338 * disabled (somewhat dynamically), no action is necessary here to
339 * avoid starvation for non-zero-allocation clients, as the allocated
340 * slots will just be unused. However, handing out those unused slots
341 * to active clients avoids needless latency if the "fixed scheme"
342 * would give unclaimed slots to an eager low-index client.
343 */
344
345 void crisv32_arbiter_deallocate_bandwidth(int client, int region)
346 {
347 int i;
348 int total_assigned = 0;
349 int arbiter = 0;
350
351 if (client & 0xffff0000)
352 arbiter = 1;
353
354 arbiters[arbiter].requested_slots[region][client] = 0;
355 arbiters[arbiter].active_clients[region][client] = 0;
356
357 for (i = 0; i < arbiters[arbiter].nbr_clients; i++)
358 total_assigned += arbiters[arbiter].requested_slots[region][i];
359
360 crisv32_arbiter_config(arbiter, region, NBR_OF_SLOTS - total_assigned);
361 }
362
363 int crisv32_arbiter_watch(unsigned long start, unsigned long size,
364 unsigned long clients, unsigned long accesses,
365 watch_callback *cb)
366 {
367 int i;
368 int arbiter;
369 int used[2];
370 int ret = 0;
371
372 crisv32_arbiter_init();
373
374 if (start > 0x80000000) {
375 printk(KERN_ERR "Arbiter: %lX doesn't look like a "
376 "physical address", start);
377 return -EFAULT;
378 }
379
380 spin_lock(&arbiter_lock);
381
382 if (clients & 0xffff)
383 used[0] = 1;
384 if (clients & 0xffff0000)
385 used[1] = 1;
386
387 for (arbiter = 0; arbiter < ARBITERS; arbiter++) {
388 if (!used[arbiter])
389 continue;
390
391 for (i = 0; i < NUMBER_OF_BP; i++) {
392 if (!watches[arbiter][i].used) {
393 unsigned intr_mask;
394 if (arbiter)
395 intr_mask = REG_RD_INT(marb_bar,
396 regi_marb_bar, rw_intr_mask);
397 else
398 intr_mask = REG_RD_INT(marb_foo,
399 regi_marb_foo, rw_intr_mask);
400
401 watches[arbiter][i].used = 1;
402 watches[arbiter][i].start = start;
403 watches[arbiter][i].end = start + size;
404 watches[arbiter][i].cb = cb;
405
406 ret |= (i + 1) << (arbiter + 8);
407 if (arbiter) {
408 REG_WR_INT(marb_bar_bp,
409 watches[arbiter][i].instance,
410 rw_first_addr,
411 watches[arbiter][i].start);
412 REG_WR_INT(marb_bar_bp,
413 watches[arbiter][i].instance,
414 rw_last_addr,
415 watches[arbiter][i].end);
416 REG_WR_INT(marb_bar_bp,
417 watches[arbiter][i].instance,
418 rw_op, accesses);
419 REG_WR_INT(marb_bar_bp,
420 watches[arbiter][i].instance,
421 rw_clients,
422 clients & 0xffff);
423 } else {
424 REG_WR_INT(marb_foo_bp,
425 watches[arbiter][i].instance,
426 rw_first_addr,
427 watches[arbiter][i].start);
428 REG_WR_INT(marb_foo_bp,
429 watches[arbiter][i].instance,
430 rw_last_addr,
431 watches[arbiter][i].end);
432 REG_WR_INT(marb_foo_bp,
433 watches[arbiter][i].instance,
434 rw_op, accesses);
435 REG_WR_INT(marb_foo_bp,
436 watches[arbiter][i].instance,
437 rw_clients, clients >> 16);
438 }
439
440 if (i == 0)
441 intr_mask |= 1;
442 else if (i == 1)
443 intr_mask |= 2;
444 else if (i == 2)
445 intr_mask |= 4;
446 else if (i == 3)
447 intr_mask |= 8;
448
449 if (arbiter)
450 REG_WR_INT(marb_bar, regi_marb_bar,
451 rw_intr_mask, intr_mask);
452 else
453 REG_WR_INT(marb_foo, regi_marb_foo,
454 rw_intr_mask, intr_mask);
455
456 spin_unlock(&arbiter_lock);
457
458 break;
459 }
460 }
461 }
462 spin_unlock(&arbiter_lock);
463 if (ret)
464 return ret;
465 else
466 return -ENOMEM;
467 }
468
469 int crisv32_arbiter_unwatch(int id)
470 {
471 int arbiter;
472 int intr_mask;
473
474 crisv32_arbiter_init();
475
476 spin_lock(&arbiter_lock);
477
478 for (arbiter = 0; arbiter < ARBITERS; arbiter++) {
479 int id2;
480
481 if (arbiter)
482 intr_mask = REG_RD_INT(marb_bar, regi_marb_bar,
483 rw_intr_mask);
484 else
485 intr_mask = REG_RD_INT(marb_foo, regi_marb_foo,
486 rw_intr_mask);
487
488 id2 = (id & (0xff << (arbiter + 8))) >> (arbiter + 8);
489 if (id2 == 0)
490 continue;
491 id2--;
492 if ((id2 >= NUMBER_OF_BP) || (!watches[arbiter][id2].used)) {
493 spin_unlock(&arbiter_lock);
494 return -EINVAL;
495 }
496
497 memset(&watches[arbiter][id2], 0,
498 sizeof(struct crisv32_watch_entry));
499
500 if (id2 == 0)
501 intr_mask &= ~1;
502 else if (id2 == 1)
503 intr_mask &= ~2;
504 else if (id2 == 2)
505 intr_mask &= ~4;
506 else if (id2 == 3)
507 intr_mask &= ~8;
508
509 if (arbiter)
510 REG_WR_INT(marb_bar, regi_marb_bar, rw_intr_mask,
511 intr_mask);
512 else
513 REG_WR_INT(marb_foo, regi_marb_foo, rw_intr_mask,
514 intr_mask);
515 }
516
517 spin_unlock(&arbiter_lock);
518 return 0;
519 }
520
521 extern void show_registers(struct pt_regs *regs);
522
523
524 static irqreturn_t
525 crisv32_foo_arbiter_irq(int irq, void *dev_id)
526 {
527 reg_marb_foo_r_masked_intr masked_intr =
528 REG_RD(marb_foo, regi_marb_foo, r_masked_intr);
529 reg_marb_foo_bp_r_brk_clients r_clients;
530 reg_marb_foo_bp_r_brk_addr r_addr;
531 reg_marb_foo_bp_r_brk_op r_op;
532 reg_marb_foo_bp_r_brk_first_client r_first;
533 reg_marb_foo_bp_r_brk_size r_size;
534 reg_marb_foo_bp_rw_ack ack = {0};
535 reg_marb_foo_rw_ack_intr ack_intr = {
536 .bp0 = 1, .bp1 = 1, .bp2 = 1, .bp3 = 1
537 };
538 struct crisv32_watch_entry *watch;
539 unsigned arbiter = (unsigned)dev_id;
540
541 masked_intr = REG_RD(marb_foo, regi_marb_foo, r_masked_intr);
542
543 if (masked_intr.bp0)
544 watch = &watches[arbiter][0];
545 else if (masked_intr.bp1)
546 watch = &watches[arbiter][1];
547 else if (masked_intr.bp2)
548 watch = &watches[arbiter][2];
549 else if (masked_intr.bp3)
550 watch = &watches[arbiter][3];
551 else
552 return IRQ_NONE;
553
554 /* Retrieve all useful information and print it. */
555 r_clients = REG_RD(marb_foo_bp, watch->instance, r_brk_clients);
556 r_addr = REG_RD(marb_foo_bp, watch->instance, r_brk_addr);
557 r_op = REG_RD(marb_foo_bp, watch->instance, r_brk_op);
558 r_first = REG_RD(marb_foo_bp, watch->instance, r_brk_first_client);
559 r_size = REG_RD(marb_foo_bp, watch->instance, r_brk_size);
560
561 printk(KERN_DEBUG "Arbiter IRQ\n");
562 printk(KERN_DEBUG "Clients %X addr %X op %X first %X size %X\n",
563 REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_clients, r_clients),
564 REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_addr, r_addr),
565 REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_op, r_op),
566 REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_first_client, r_first),
567 REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_size, r_size));
568
569 REG_WR(marb_foo_bp, watch->instance, rw_ack, ack);
570 REG_WR(marb_foo, regi_marb_foo, rw_ack_intr, ack_intr);
571
572 printk(KERN_DEBUG "IRQ occurred at %X\n", (unsigned)get_irq_regs());
573
574 if (watch->cb)
575 watch->cb();
576
577 return IRQ_HANDLED;
578 }
579
580 static irqreturn_t
581 crisv32_bar_arbiter_irq(int irq, void *dev_id)
582 {
583 reg_marb_bar_r_masked_intr masked_intr =
584 REG_RD(marb_bar, regi_marb_bar, r_masked_intr);
585 reg_marb_bar_bp_r_brk_clients r_clients;
586 reg_marb_bar_bp_r_brk_addr r_addr;
587 reg_marb_bar_bp_r_brk_op r_op;
588 reg_marb_bar_bp_r_brk_first_client r_first;
589 reg_marb_bar_bp_r_brk_size r_size;
590 reg_marb_bar_bp_rw_ack ack = {0};
591 reg_marb_bar_rw_ack_intr ack_intr = {
592 .bp0 = 1, .bp1 = 1, .bp2 = 1, .bp3 = 1
593 };
594 struct crisv32_watch_entry *watch;
595 unsigned arbiter = (unsigned)dev_id;
596
597 masked_intr = REG_RD(marb_bar, regi_marb_bar, r_masked_intr);
598
599 if (masked_intr.bp0)
600 watch = &watches[arbiter][0];
601 else if (masked_intr.bp1)
602 watch = &watches[arbiter][1];
603 else if (masked_intr.bp2)
604 watch = &watches[arbiter][2];
605 else if (masked_intr.bp3)
606 watch = &watches[arbiter][3];
607 else
608 return IRQ_NONE;
609
610 /* Retrieve all useful information and print it. */
611 r_clients = REG_RD(marb_bar_bp, watch->instance, r_brk_clients);
612 r_addr = REG_RD(marb_bar_bp, watch->instance, r_brk_addr);
613 r_op = REG_RD(marb_bar_bp, watch->instance, r_brk_op);
614 r_first = REG_RD(marb_bar_bp, watch->instance, r_brk_first_client);
615 r_size = REG_RD(marb_bar_bp, watch->instance, r_brk_size);
616
617 printk(KERN_DEBUG "Arbiter IRQ\n");
618 printk(KERN_DEBUG "Clients %X addr %X op %X first %X size %X\n",
619 REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_clients, r_clients),
620 REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_addr, r_addr),
621 REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_op, r_op),
622 REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_first_client, r_first),
623 REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_size, r_size));
624
625 REG_WR(marb_bar_bp, watch->instance, rw_ack, ack);
626 REG_WR(marb_bar, regi_marb_bar, rw_ack_intr, ack_intr);
627
628 printk(KERN_DEBUG "IRQ occurred at %X\n", (unsigned)get_irq_regs()->erp);
629
630 if (watch->cb)
631 watch->cb();
632
633 return IRQ_HANDLED;
634 }
635