]> git.proxmox.com Git - qemu.git/blame - hw/dma/pl080.c
milkymist-uart: Use Device::realize instead of SysBusDevice::init
[qemu.git] / hw / dma / pl080.c
CommitLineData
5fafdf24 1/*
e69954b9 2 * Arm PrimeCell PL080/PL081 DMA controller
cdbdb648
PB
3 *
4 * Copyright (c) 2006 CodeSourcery.
5 * Written by Paul Brook
6 *
8e31bf38 7 * This code is licensed under the GPL.
cdbdb648
PB
8 */
9
83c9f4ca 10#include "hw/sysbus.h"
cdbdb648 11
e69954b9 12#define PL080_MAX_CHANNELS 8
cdbdb648
PB
13#define PL080_CONF_E 0x1
14#define PL080_CONF_M1 0x2
15#define PL080_CONF_M2 0x4
16
17#define PL080_CCONF_H 0x40000
18#define PL080_CCONF_A 0x20000
19#define PL080_CCONF_L 0x10000
20#define PL080_CCONF_ITC 0x08000
21#define PL080_CCONF_IE 0x04000
22#define PL080_CCONF_E 0x00001
23
24#define PL080_CCTRL_I 0x80000000
25#define PL080_CCTRL_DI 0x08000000
26#define PL080_CCTRL_SI 0x04000000
27#define PL080_CCTRL_D 0x02000000
28#define PL080_CCTRL_S 0x01000000
29
30typedef struct {
31 uint32_t src;
32 uint32_t dest;
33 uint32_t lli;
34 uint32_t ctrl;
35 uint32_t conf;
36} pl080_channel;
37
4f800554
AF
38#define TYPE_PL080 "pl080"
39#define PL080(obj) OBJECT_CHECK(PL080State, (obj), TYPE_PL080)
40
d7ba0a62 41typedef struct PL080State {
4f800554
AF
42 SysBusDevice parent_obj;
43
63b02e04 44 MemoryRegion iomem;
cdbdb648
PB
45 uint8_t tc_int;
46 uint8_t tc_mask;
47 uint8_t err_int;
48 uint8_t err_mask;
49 uint32_t conf;
50 uint32_t sync;
51 uint32_t req_single;
52 uint32_t req_burst;
e69954b9
PB
53 pl080_channel chan[PL080_MAX_CHANNELS];
54 int nchannels;
cdbdb648
PB
55 /* Flag to avoid recursive DMA invocations. */
56 int running;
d537cf6c 57 qemu_irq irq;
d7ba0a62 58} PL080State;
cdbdb648 59
ff175853
PM
60static const VMStateDescription vmstate_pl080_channel = {
61 .name = "pl080_channel",
62 .version_id = 1,
63 .minimum_version_id = 1,
64 .fields = (VMStateField[]) {
65 VMSTATE_UINT32(src, pl080_channel),
66 VMSTATE_UINT32(dest, pl080_channel),
67 VMSTATE_UINT32(lli, pl080_channel),
68 VMSTATE_UINT32(ctrl, pl080_channel),
69 VMSTATE_UINT32(conf, pl080_channel),
70 VMSTATE_END_OF_LIST()
71 }
72};
73
74static const VMStateDescription vmstate_pl080 = {
75 .name = "pl080",
76 .version_id = 1,
77 .minimum_version_id = 1,
78 .fields = (VMStateField[]) {
d7ba0a62
AF
79 VMSTATE_UINT8(tc_int, PL080State),
80 VMSTATE_UINT8(tc_mask, PL080State),
81 VMSTATE_UINT8(err_int, PL080State),
82 VMSTATE_UINT8(err_mask, PL080State),
83 VMSTATE_UINT32(conf, PL080State),
84 VMSTATE_UINT32(sync, PL080State),
85 VMSTATE_UINT32(req_single, PL080State),
86 VMSTATE_UINT32(req_burst, PL080State),
87 VMSTATE_UINT8(tc_int, PL080State),
88 VMSTATE_UINT8(tc_int, PL080State),
89 VMSTATE_UINT8(tc_int, PL080State),
90 VMSTATE_STRUCT_ARRAY(chan, PL080State, PL080_MAX_CHANNELS,
ff175853 91 1, vmstate_pl080_channel, pl080_channel),
d7ba0a62 92 VMSTATE_INT32(running, PL080State),
ff175853
PM
93 VMSTATE_END_OF_LIST()
94 }
95};
96
cdbdb648
PB
97static const unsigned char pl080_id[] =
98{ 0x80, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 };
99
e69954b9
PB
100static const unsigned char pl081_id[] =
101{ 0x81, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 };
102
d7ba0a62 103static void pl080_update(PL080State *s)
cdbdb648
PB
104{
105 if ((s->tc_int & s->tc_mask)
106 || (s->err_int & s->err_mask))
d537cf6c 107 qemu_irq_raise(s->irq);
cdbdb648 108 else
d537cf6c 109 qemu_irq_lower(s->irq);
cdbdb648
PB
110}
111
d7ba0a62 112static void pl080_run(PL080State *s)
cdbdb648
PB
113{
114 int c;
115 int flow;
116 pl080_channel *ch;
117 int swidth;
118 int dwidth;
119 int xsize;
120 int n;
121 int src_id;
122 int dest_id;
123 int size;
b55266b5 124 uint8_t buff[4];
cdbdb648
PB
125 uint32_t req;
126
127 s->tc_mask = 0;
e69954b9 128 for (c = 0; c < s->nchannels; c++) {
cdbdb648
PB
129 if (s->chan[c].conf & PL080_CCONF_ITC)
130 s->tc_mask |= 1 << c;
131 if (s->chan[c].conf & PL080_CCONF_IE)
132 s->err_mask |= 1 << c;
133 }
134
135 if ((s->conf & PL080_CONF_E) == 0)
136 return;
137
2ac71179 138hw_error("DMA active\n");
cdbdb648
PB
139 /* If we are already in the middle of a DMA operation then indicate that
140 there may be new DMA requests and return immediately. */
141 if (s->running) {
142 s->running++;
143 return;
144 }
145 s->running = 1;
146 while (s->running) {
e69954b9 147 for (c = 0; c < s->nchannels; c++) {
cdbdb648
PB
148 ch = &s->chan[c];
149again:
150 /* Test if thiws channel has any pending DMA requests. */
151 if ((ch->conf & (PL080_CCONF_H | PL080_CCONF_E))
152 != PL080_CCONF_E)
153 continue;
154 flow = (ch->conf >> 11) & 7;
155 if (flow >= 4) {
2ac71179 156 hw_error(
cdbdb648
PB
157 "pl080_run: Peripheral flow control not implemented\n");
158 }
159 src_id = (ch->conf >> 1) & 0x1f;
160 dest_id = (ch->conf >> 6) & 0x1f;
161 size = ch->ctrl & 0xfff;
162 req = s->req_single | s->req_burst;
163 switch (flow) {
164 case 0:
165 break;
166 case 1:
167 if ((req & (1u << dest_id)) == 0)
168 size = 0;
169 break;
170 case 2:
171 if ((req & (1u << src_id)) == 0)
172 size = 0;
173 break;
174 case 3:
175 if ((req & (1u << src_id)) == 0
176 || (req & (1u << dest_id)) == 0)
177 size = 0;
178 break;
179 }
180 if (!size)
181 continue;
182
183 /* Transfer one element. */
184 /* ??? Should transfer multiple elements for a burst request. */
185 /* ??? Unclear what the proper behavior is when source and
186 destination widths are different. */
187 swidth = 1 << ((ch->ctrl >> 18) & 7);
188 dwidth = 1 << ((ch->ctrl >> 21) & 7);
189 for (n = 0; n < dwidth; n+= swidth) {
190 cpu_physical_memory_read(ch->src, buff + n, swidth);
191 if (ch->ctrl & PL080_CCTRL_SI)
192 ch->src += swidth;
193 }
194 xsize = (dwidth < swidth) ? swidth : dwidth;
195 /* ??? This may pad the value incorrectly for dwidth < 32. */
196 for (n = 0; n < xsize; n += dwidth) {
197 cpu_physical_memory_write(ch->dest + n, buff + n, dwidth);
198 if (ch->ctrl & PL080_CCTRL_DI)
199 ch->dest += swidth;
200 }
201
202 size--;
203 ch->ctrl = (ch->ctrl & 0xfffff000) | size;
204 if (size == 0) {
205 /* Transfer complete. */
206 if (ch->lli) {
75b0646f
AG
207 ch->src = ldl_le_phys(ch->lli);
208 ch->dest = ldl_le_phys(ch->lli + 4);
209 ch->ctrl = ldl_le_phys(ch->lli + 12);
210 ch->lli = ldl_le_phys(ch->lli + 8);
cdbdb648
PB
211 } else {
212 ch->conf &= ~PL080_CCONF_E;
213 }
214 if (ch->ctrl & PL080_CCTRL_I) {
215 s->tc_int |= 1 << c;
216 }
217 }
218 goto again;
219 }
220 if (--s->running)
221 s->running = 1;
222 }
223}
224
a8170e5e 225static uint64_t pl080_read(void *opaque, hwaddr offset,
63b02e04 226 unsigned size)
cdbdb648 227{
d7ba0a62 228 PL080State *s = (PL080State *)opaque;
cdbdb648
PB
229 uint32_t i;
230 uint32_t mask;
231
cdbdb648 232 if (offset >= 0xfe0 && offset < 0x1000) {
e69954b9
PB
233 if (s->nchannels == 8) {
234 return pl080_id[(offset - 0xfe0) >> 2];
235 } else {
236 return pl081_id[(offset - 0xfe0) >> 2];
237 }
cdbdb648
PB
238 }
239 if (offset >= 0x100 && offset < 0x200) {
240 i = (offset & 0xe0) >> 5;
e69954b9
PB
241 if (i >= s->nchannels)
242 goto bad_offset;
cdbdb648
PB
243 switch (offset >> 2) {
244 case 0: /* SrcAddr */
245 return s->chan[i].src;
246 case 1: /* DestAddr */
247 return s->chan[i].dest;
248 case 2: /* LLI */
249 return s->chan[i].lli;
250 case 3: /* Control */
251 return s->chan[i].ctrl;
252 case 4: /* Configuration */
253 return s->chan[i].conf;
254 default:
255 goto bad_offset;
256 }
257 }
258 switch (offset >> 2) {
259 case 0: /* IntStatus */
260 return (s->tc_int & s->tc_mask) | (s->err_int & s->err_mask);
261 case 1: /* IntTCStatus */
262 return (s->tc_int & s->tc_mask);
263 case 3: /* IntErrorStatus */
264 return (s->err_int & s->err_mask);
265 case 5: /* RawIntTCStatus */
266 return s->tc_int;
267 case 6: /* RawIntErrorStatus */
268 return s->err_int;
269 case 7: /* EnbldChns */
270 mask = 0;
e69954b9 271 for (i = 0; i < s->nchannels; i++) {
cdbdb648
PB
272 if (s->chan[i].conf & PL080_CCONF_E)
273 mask |= 1 << i;
274 }
275 return mask;
276 case 8: /* SoftBReq */
277 case 9: /* SoftSReq */
278 case 10: /* SoftLBReq */
279 case 11: /* SoftLSReq */
280 /* ??? Implement these. */
281 return 0;
282 case 12: /* Configuration */
283 return s->conf;
284 case 13: /* Sync */
285 return s->sync;
286 default:
287 bad_offset:
df374162
PM
288 qemu_log_mask(LOG_GUEST_ERROR,
289 "pl080_read: Bad offset %x\n", (int)offset);
cdbdb648
PB
290 return 0;
291 }
292}
293
a8170e5e 294static void pl080_write(void *opaque, hwaddr offset,
63b02e04 295 uint64_t value, unsigned size)
cdbdb648 296{
d7ba0a62 297 PL080State *s = (PL080State *)opaque;
cdbdb648
PB
298 int i;
299
cdbdb648
PB
300 if (offset >= 0x100 && offset < 0x200) {
301 i = (offset & 0xe0) >> 5;
e69954b9
PB
302 if (i >= s->nchannels)
303 goto bad_offset;
cdbdb648
PB
304 switch (offset >> 2) {
305 case 0: /* SrcAddr */
306 s->chan[i].src = value;
307 break;
308 case 1: /* DestAddr */
309 s->chan[i].dest = value;
310 break;
311 case 2: /* LLI */
312 s->chan[i].lli = value;
313 break;
314 case 3: /* Control */
315 s->chan[i].ctrl = value;
316 break;
317 case 4: /* Configuration */
318 s->chan[i].conf = value;
319 pl080_run(s);
320 break;
321 }
322 }
323 switch (offset >> 2) {
324 case 2: /* IntTCClear */
325 s->tc_int &= ~value;
326 break;
327 case 4: /* IntErrorClear */
328 s->err_int &= ~value;
329 break;
330 case 8: /* SoftBReq */
331 case 9: /* SoftSReq */
332 case 10: /* SoftLBReq */
333 case 11: /* SoftLSReq */
334 /* ??? Implement these. */
df374162 335 qemu_log_mask(LOG_UNIMP, "pl080_write: Soft DMA not implemented\n");
cdbdb648
PB
336 break;
337 case 12: /* Configuration */
338 s->conf = value;
339 if (s->conf & (PL080_CONF_M1 | PL080_CONF_M1)) {
df374162
PM
340 qemu_log_mask(LOG_UNIMP,
341 "pl080_write: Big-endian DMA not implemented\n");
cdbdb648
PB
342 }
343 pl080_run(s);
344 break;
345 case 13: /* Sync */
346 s->sync = value;
347 break;
348 default:
e69954b9 349 bad_offset:
df374162
PM
350 qemu_log_mask(LOG_GUEST_ERROR,
351 "pl080_write: Bad offset %x\n", (int)offset);
cdbdb648
PB
352 }
353 pl080_update(s);
354}
355
63b02e04
AK
356static const MemoryRegionOps pl080_ops = {
357 .read = pl080_read,
358 .write = pl080_write,
359 .endianness = DEVICE_NATIVE_ENDIAN,
cdbdb648
PB
360};
361
4f800554 362static void pl080_init(Object *obj)
cdbdb648 363{
4f800554
AF
364 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
365 PL080State *s = PL080(obj);
cdbdb648 366
3eadad55 367 memory_region_init_io(&s->iomem, OBJECT(s), &pl080_ops, s, "pl080", 0x1000);
4f800554
AF
368 sysbus_init_mmio(sbd, &s->iomem);
369 sysbus_init_irq(sbd, &s->irq);
370 s->nchannels = 8;
cdbdb648 371}
b4496b13 372
4f800554 373static void pl081_init(Object *obj)
b4496b13 374{
4f800554 375 PL080State *s = PL080(obj);
b4496b13 376
4f800554 377 s->nchannels = 2;
b4496b13
PB
378}
379
4f800554 380static void pl080_class_init(ObjectClass *oc, void *data)
999e12bb 381{
4f800554 382 DeviceClass *dc = DEVICE_CLASS(oc);
999e12bb 383
39bffca2
AL
384 dc->no_user = 1;
385 dc->vmsd = &vmstate_pl080;
999e12bb
AL
386}
387
8c43a6f0 388static const TypeInfo pl080_info = {
4f800554 389 .name = TYPE_PL080,
39bffca2 390 .parent = TYPE_SYS_BUS_DEVICE,
d7ba0a62 391 .instance_size = sizeof(PL080State),
4f800554 392 .instance_init = pl080_init,
39bffca2 393 .class_init = pl080_class_init,
ff175853
PM
394};
395
8c43a6f0 396static const TypeInfo pl081_info = {
39bffca2 397 .name = "pl081",
4f800554
AF
398 .parent = TYPE_PL080,
399 .instance_init = pl081_init,
ff175853
PM
400};
401
b4496b13
PB
402/* The PL080 and PL081 are the same except for the number of channels
403 they implement (8 and 2 respectively). */
83f7d43a 404static void pl080_register_types(void)
b4496b13 405{
39bffca2
AL
406 type_register_static(&pl080_info);
407 type_register_static(&pl081_info);
b4496b13
PB
408}
409
83f7d43a 410type_init(pl080_register_types)