]> git.proxmox.com Git - qemu.git/blame - hw/dma/pl080.c
Merge git://github.com/hw-claudio/qemu-aarch64-queue into tcg-next
[qemu.git] / hw / dma / pl080.c
CommitLineData
5fafdf24 1/*
e69954b9 2 * Arm PrimeCell PL080/PL081 DMA controller
cdbdb648
PB
3 *
4 * Copyright (c) 2006 CodeSourcery.
5 * Written by Paul Brook
6 *
8e31bf38 7 * This code is licensed under the GPL.
cdbdb648
PB
8 */
9
83c9f4ca 10#include "hw/sysbus.h"
cdbdb648 11
e69954b9 12#define PL080_MAX_CHANNELS 8
cdbdb648
PB
13#define PL080_CONF_E 0x1
14#define PL080_CONF_M1 0x2
15#define PL080_CONF_M2 0x4
16
17#define PL080_CCONF_H 0x40000
18#define PL080_CCONF_A 0x20000
19#define PL080_CCONF_L 0x10000
20#define PL080_CCONF_ITC 0x08000
21#define PL080_CCONF_IE 0x04000
22#define PL080_CCONF_E 0x00001
23
24#define PL080_CCTRL_I 0x80000000
25#define PL080_CCTRL_DI 0x08000000
26#define PL080_CCTRL_SI 0x04000000
27#define PL080_CCTRL_D 0x02000000
28#define PL080_CCTRL_S 0x01000000
29
30typedef struct {
31 uint32_t src;
32 uint32_t dest;
33 uint32_t lli;
34 uint32_t ctrl;
35 uint32_t conf;
36} pl080_channel;
37
38typedef struct {
b4496b13 39 SysBusDevice busdev;
63b02e04 40 MemoryRegion iomem;
cdbdb648
PB
41 uint8_t tc_int;
42 uint8_t tc_mask;
43 uint8_t err_int;
44 uint8_t err_mask;
45 uint32_t conf;
46 uint32_t sync;
47 uint32_t req_single;
48 uint32_t req_burst;
e69954b9
PB
49 pl080_channel chan[PL080_MAX_CHANNELS];
50 int nchannels;
cdbdb648
PB
51 /* Flag to avoid recursive DMA invocations. */
52 int running;
d537cf6c 53 qemu_irq irq;
cdbdb648
PB
54} pl080_state;
55
ff175853
PM
56static const VMStateDescription vmstate_pl080_channel = {
57 .name = "pl080_channel",
58 .version_id = 1,
59 .minimum_version_id = 1,
60 .fields = (VMStateField[]) {
61 VMSTATE_UINT32(src, pl080_channel),
62 VMSTATE_UINT32(dest, pl080_channel),
63 VMSTATE_UINT32(lli, pl080_channel),
64 VMSTATE_UINT32(ctrl, pl080_channel),
65 VMSTATE_UINT32(conf, pl080_channel),
66 VMSTATE_END_OF_LIST()
67 }
68};
69
70static const VMStateDescription vmstate_pl080 = {
71 .name = "pl080",
72 .version_id = 1,
73 .minimum_version_id = 1,
74 .fields = (VMStateField[]) {
75 VMSTATE_UINT8(tc_int, pl080_state),
76 VMSTATE_UINT8(tc_mask, pl080_state),
77 VMSTATE_UINT8(err_int, pl080_state),
78 VMSTATE_UINT8(err_mask, pl080_state),
79 VMSTATE_UINT32(conf, pl080_state),
80 VMSTATE_UINT32(sync, pl080_state),
81 VMSTATE_UINT32(req_single, pl080_state),
82 VMSTATE_UINT32(req_burst, pl080_state),
83 VMSTATE_UINT8(tc_int, pl080_state),
84 VMSTATE_UINT8(tc_int, pl080_state),
85 VMSTATE_UINT8(tc_int, pl080_state),
86 VMSTATE_STRUCT_ARRAY(chan, pl080_state, PL080_MAX_CHANNELS,
87 1, vmstate_pl080_channel, pl080_channel),
88 VMSTATE_INT32(running, pl080_state),
89 VMSTATE_END_OF_LIST()
90 }
91};
92
cdbdb648
PB
93static const unsigned char pl080_id[] =
94{ 0x80, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 };
95
e69954b9
PB
96static const unsigned char pl081_id[] =
97{ 0x81, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 };
98
cdbdb648
PB
99static void pl080_update(pl080_state *s)
100{
101 if ((s->tc_int & s->tc_mask)
102 || (s->err_int & s->err_mask))
d537cf6c 103 qemu_irq_raise(s->irq);
cdbdb648 104 else
d537cf6c 105 qemu_irq_lower(s->irq);
cdbdb648
PB
106}
107
108static void pl080_run(pl080_state *s)
109{
110 int c;
111 int flow;
112 pl080_channel *ch;
113 int swidth;
114 int dwidth;
115 int xsize;
116 int n;
117 int src_id;
118 int dest_id;
119 int size;
b55266b5 120 uint8_t buff[4];
cdbdb648
PB
121 uint32_t req;
122
123 s->tc_mask = 0;
e69954b9 124 for (c = 0; c < s->nchannels; c++) {
cdbdb648
PB
125 if (s->chan[c].conf & PL080_CCONF_ITC)
126 s->tc_mask |= 1 << c;
127 if (s->chan[c].conf & PL080_CCONF_IE)
128 s->err_mask |= 1 << c;
129 }
130
131 if ((s->conf & PL080_CONF_E) == 0)
132 return;
133
2ac71179 134hw_error("DMA active\n");
cdbdb648
PB
135 /* If we are already in the middle of a DMA operation then indicate that
136 there may be new DMA requests and return immediately. */
137 if (s->running) {
138 s->running++;
139 return;
140 }
141 s->running = 1;
142 while (s->running) {
e69954b9 143 for (c = 0; c < s->nchannels; c++) {
cdbdb648
PB
144 ch = &s->chan[c];
145again:
146 /* Test if thiws channel has any pending DMA requests. */
147 if ((ch->conf & (PL080_CCONF_H | PL080_CCONF_E))
148 != PL080_CCONF_E)
149 continue;
150 flow = (ch->conf >> 11) & 7;
151 if (flow >= 4) {
2ac71179 152 hw_error(
cdbdb648
PB
153 "pl080_run: Peripheral flow control not implemented\n");
154 }
155 src_id = (ch->conf >> 1) & 0x1f;
156 dest_id = (ch->conf >> 6) & 0x1f;
157 size = ch->ctrl & 0xfff;
158 req = s->req_single | s->req_burst;
159 switch (flow) {
160 case 0:
161 break;
162 case 1:
163 if ((req & (1u << dest_id)) == 0)
164 size = 0;
165 break;
166 case 2:
167 if ((req & (1u << src_id)) == 0)
168 size = 0;
169 break;
170 case 3:
171 if ((req & (1u << src_id)) == 0
172 || (req & (1u << dest_id)) == 0)
173 size = 0;
174 break;
175 }
176 if (!size)
177 continue;
178
179 /* Transfer one element. */
180 /* ??? Should transfer multiple elements for a burst request. */
181 /* ??? Unclear what the proper behavior is when source and
182 destination widths are different. */
183 swidth = 1 << ((ch->ctrl >> 18) & 7);
184 dwidth = 1 << ((ch->ctrl >> 21) & 7);
185 for (n = 0; n < dwidth; n+= swidth) {
186 cpu_physical_memory_read(ch->src, buff + n, swidth);
187 if (ch->ctrl & PL080_CCTRL_SI)
188 ch->src += swidth;
189 }
190 xsize = (dwidth < swidth) ? swidth : dwidth;
191 /* ??? This may pad the value incorrectly for dwidth < 32. */
192 for (n = 0; n < xsize; n += dwidth) {
193 cpu_physical_memory_write(ch->dest + n, buff + n, dwidth);
194 if (ch->ctrl & PL080_CCTRL_DI)
195 ch->dest += swidth;
196 }
197
198 size--;
199 ch->ctrl = (ch->ctrl & 0xfffff000) | size;
200 if (size == 0) {
201 /* Transfer complete. */
202 if (ch->lli) {
75b0646f
AG
203 ch->src = ldl_le_phys(ch->lli);
204 ch->dest = ldl_le_phys(ch->lli + 4);
205 ch->ctrl = ldl_le_phys(ch->lli + 12);
206 ch->lli = ldl_le_phys(ch->lli + 8);
cdbdb648
PB
207 } else {
208 ch->conf &= ~PL080_CCONF_E;
209 }
210 if (ch->ctrl & PL080_CCTRL_I) {
211 s->tc_int |= 1 << c;
212 }
213 }
214 goto again;
215 }
216 if (--s->running)
217 s->running = 1;
218 }
219}
220
a8170e5e 221static uint64_t pl080_read(void *opaque, hwaddr offset,
63b02e04 222 unsigned size)
cdbdb648
PB
223{
224 pl080_state *s = (pl080_state *)opaque;
225 uint32_t i;
226 uint32_t mask;
227
cdbdb648 228 if (offset >= 0xfe0 && offset < 0x1000) {
e69954b9
PB
229 if (s->nchannels == 8) {
230 return pl080_id[(offset - 0xfe0) >> 2];
231 } else {
232 return pl081_id[(offset - 0xfe0) >> 2];
233 }
cdbdb648
PB
234 }
235 if (offset >= 0x100 && offset < 0x200) {
236 i = (offset & 0xe0) >> 5;
e69954b9
PB
237 if (i >= s->nchannels)
238 goto bad_offset;
cdbdb648
PB
239 switch (offset >> 2) {
240 case 0: /* SrcAddr */
241 return s->chan[i].src;
242 case 1: /* DestAddr */
243 return s->chan[i].dest;
244 case 2: /* LLI */
245 return s->chan[i].lli;
246 case 3: /* Control */
247 return s->chan[i].ctrl;
248 case 4: /* Configuration */
249 return s->chan[i].conf;
250 default:
251 goto bad_offset;
252 }
253 }
254 switch (offset >> 2) {
255 case 0: /* IntStatus */
256 return (s->tc_int & s->tc_mask) | (s->err_int & s->err_mask);
257 case 1: /* IntTCStatus */
258 return (s->tc_int & s->tc_mask);
259 case 3: /* IntErrorStatus */
260 return (s->err_int & s->err_mask);
261 case 5: /* RawIntTCStatus */
262 return s->tc_int;
263 case 6: /* RawIntErrorStatus */
264 return s->err_int;
265 case 7: /* EnbldChns */
266 mask = 0;
e69954b9 267 for (i = 0; i < s->nchannels; i++) {
cdbdb648
PB
268 if (s->chan[i].conf & PL080_CCONF_E)
269 mask |= 1 << i;
270 }
271 return mask;
272 case 8: /* SoftBReq */
273 case 9: /* SoftSReq */
274 case 10: /* SoftLBReq */
275 case 11: /* SoftLSReq */
276 /* ??? Implement these. */
277 return 0;
278 case 12: /* Configuration */
279 return s->conf;
280 case 13: /* Sync */
281 return s->sync;
282 default:
283 bad_offset:
df374162
PM
284 qemu_log_mask(LOG_GUEST_ERROR,
285 "pl080_read: Bad offset %x\n", (int)offset);
cdbdb648
PB
286 return 0;
287 }
288}
289
a8170e5e 290static void pl080_write(void *opaque, hwaddr offset,
63b02e04 291 uint64_t value, unsigned size)
cdbdb648
PB
292{
293 pl080_state *s = (pl080_state *)opaque;
294 int i;
295
cdbdb648
PB
296 if (offset >= 0x100 && offset < 0x200) {
297 i = (offset & 0xe0) >> 5;
e69954b9
PB
298 if (i >= s->nchannels)
299 goto bad_offset;
cdbdb648
PB
300 switch (offset >> 2) {
301 case 0: /* SrcAddr */
302 s->chan[i].src = value;
303 break;
304 case 1: /* DestAddr */
305 s->chan[i].dest = value;
306 break;
307 case 2: /* LLI */
308 s->chan[i].lli = value;
309 break;
310 case 3: /* Control */
311 s->chan[i].ctrl = value;
312 break;
313 case 4: /* Configuration */
314 s->chan[i].conf = value;
315 pl080_run(s);
316 break;
317 }
318 }
319 switch (offset >> 2) {
320 case 2: /* IntTCClear */
321 s->tc_int &= ~value;
322 break;
323 case 4: /* IntErrorClear */
324 s->err_int &= ~value;
325 break;
326 case 8: /* SoftBReq */
327 case 9: /* SoftSReq */
328 case 10: /* SoftLBReq */
329 case 11: /* SoftLSReq */
330 /* ??? Implement these. */
df374162 331 qemu_log_mask(LOG_UNIMP, "pl080_write: Soft DMA not implemented\n");
cdbdb648
PB
332 break;
333 case 12: /* Configuration */
334 s->conf = value;
335 if (s->conf & (PL080_CONF_M1 | PL080_CONF_M1)) {
df374162
PM
336 qemu_log_mask(LOG_UNIMP,
337 "pl080_write: Big-endian DMA not implemented\n");
cdbdb648
PB
338 }
339 pl080_run(s);
340 break;
341 case 13: /* Sync */
342 s->sync = value;
343 break;
344 default:
e69954b9 345 bad_offset:
df374162
PM
346 qemu_log_mask(LOG_GUEST_ERROR,
347 "pl080_write: Bad offset %x\n", (int)offset);
cdbdb648
PB
348 }
349 pl080_update(s);
350}
351
63b02e04
AK
352static const MemoryRegionOps pl080_ops = {
353 .read = pl080_read,
354 .write = pl080_write,
355 .endianness = DEVICE_NATIVE_ENDIAN,
cdbdb648
PB
356};
357
81a322d4 358static int pl08x_init(SysBusDevice *dev, int nchannels)
cdbdb648 359{
b4496b13 360 pl080_state *s = FROM_SYSBUS(pl080_state, dev);
cdbdb648 361
3eadad55 362 memory_region_init_io(&s->iomem, OBJECT(s), &pl080_ops, s, "pl080", 0x1000);
750ecd44 363 sysbus_init_mmio(dev, &s->iomem);
b4496b13 364 sysbus_init_irq(dev, &s->irq);
e69954b9 365 s->nchannels = nchannels;
81a322d4 366 return 0;
cdbdb648 367}
b4496b13 368
81a322d4 369static int pl080_init(SysBusDevice *dev)
b4496b13 370{
81a322d4 371 return pl08x_init(dev, 8);
b4496b13
PB
372}
373
81a322d4 374static int pl081_init(SysBusDevice *dev)
b4496b13 375{
81a322d4 376 return pl08x_init(dev, 2);
b4496b13
PB
377}
378
999e12bb
AL
379static void pl080_class_init(ObjectClass *klass, void *data)
380{
39bffca2 381 DeviceClass *dc = DEVICE_CLASS(klass);
999e12bb
AL
382 SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
383
384 k->init = pl080_init;
39bffca2
AL
385 dc->no_user = 1;
386 dc->vmsd = &vmstate_pl080;
999e12bb
AL
387}
388
8c43a6f0 389static const TypeInfo pl080_info = {
39bffca2
AL
390 .name = "pl080",
391 .parent = TYPE_SYS_BUS_DEVICE,
392 .instance_size = sizeof(pl080_state),
393 .class_init = pl080_class_init,
ff175853
PM
394};
395
999e12bb
AL
396static void pl081_class_init(ObjectClass *klass, void *data)
397{
39bffca2 398 DeviceClass *dc = DEVICE_CLASS(klass);
999e12bb
AL
399 SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
400
401 k->init = pl081_init;
39bffca2
AL
402 dc->no_user = 1;
403 dc->vmsd = &vmstate_pl080;
999e12bb
AL
404}
405
8c43a6f0 406static const TypeInfo pl081_info = {
39bffca2
AL
407 .name = "pl081",
408 .parent = TYPE_SYS_BUS_DEVICE,
409 .instance_size = sizeof(pl080_state),
410 .class_init = pl081_class_init,
ff175853
PM
411};
412
b4496b13
PB
413/* The PL080 and PL081 are the same except for the number of channels
414 they implement (8 and 2 respectively). */
83f7d43a 415static void pl080_register_types(void)
b4496b13 416{
39bffca2
AL
417 type_register_static(&pl080_info);
418 type_register_static(&pl081_info);
b4496b13
PB
419}
420
83f7d43a 421type_init(pl080_register_types)