]>
Commit | Line | Data |
---|---|---|
244ab90e AL |
1 | /* |
2 | * DMA helper functions | |
3 | * | |
4 | * Copyright (c) 2009 Red Hat | |
5 | * | |
6 | * This work is licensed under the terms of the GNU General Public License | |
7 | * (GNU GPL), version 2 or later. | |
8 | */ | |
9 | ||
10 | #ifndef DMA_H | |
11 | #define DMA_H | |
12 | ||
13 | #include <stdio.h> | |
022c62cb | 14 | #include "exec/memory.h" |
df32fd1c | 15 | #include "exec/address-spaces.h" |
1ad2134f | 16 | #include "hw/hw.h" |
737e150e | 17 | #include "block/block.h" |
5e5a94b6 | 18 | #include "block/accounting.h" |
9c17d615 | 19 | #include "sysemu/kvm.h" |
244ab90e | 20 | |
10dc8aef PB |
21 | typedef struct ScatterGatherEntry ScatterGatherEntry; |
22 | ||
43cf8ae6 DG |
23 | typedef enum { |
24 | DMA_DIRECTION_TO_DEVICE = 0, | |
25 | DMA_DIRECTION_FROM_DEVICE = 1, | |
26 | } DMADirection; | |
27 | ||
fead0c24 PB |
28 | struct QEMUSGList { |
29 | ScatterGatherEntry *sg; | |
30 | int nsg; | |
31 | int nalloc; | |
32 | size_t size; | |
f487b677 | 33 | DeviceState *dev; |
df32fd1c | 34 | AddressSpace *as; |
fead0c24 PB |
35 | }; |
36 | ||
4be403c8 | 37 | #ifndef CONFIG_USER_ONLY |
d9d1055e | 38 | |
e5332e63 DG |
39 | /* |
40 | * When an IOMMU is present, bus addresses become distinct from | |
41 | * CPU/memory physical addresses and may be a different size. Because | |
42 | * the IOVA size depends more on the bus than on the platform, we more | |
43 | * or less have to treat these as 64-bit always to cover all (or at | |
44 | * least most) cases. | |
45 | */ | |
46 | typedef uint64_t dma_addr_t; | |
47 | ||
48 | #define DMA_ADDR_BITS 64 | |
49 | #define DMA_ADDR_FMT "%" PRIx64 | |
50 | ||
df32fd1c | 51 | static inline void dma_barrier(AddressSpace *as, DMADirection dir) |
7a0bac4d BH |
52 | { |
53 | /* | |
54 | * This is called before DMA read and write operations | |
55 | * unless the _relaxed form is used and is responsible | |
56 | * for providing some sane ordering of accesses vs | |
57 | * concurrently running VCPUs. | |
58 | * | |
59 | * Users of map(), unmap() or lower level st/ld_* | |
60 | * operations are responsible for providing their own | |
61 | * ordering via barriers. | |
62 | * | |
63 | * This primitive implementation does a simple smp_mb() | |
64 | * before each operation which provides pretty much full | |
65 | * ordering. | |
66 | * | |
67 | * A smarter implementation can be devised if needed to | |
68 | * use lighter barriers based on the direction of the | |
69 | * transfer, the DMA context, etc... | |
70 | */ | |
71 | if (kvm_enabled()) { | |
72 | smp_mb(); | |
73 | } | |
74 | } | |
75 | ||
d86a77f8 DG |
76 | /* Checks that the given range of addresses is valid for DMA. This is |
77 | * useful for certain cases, but usually you should just use | |
78 | * dma_memory_{read,write}() and check for errors */ | |
df32fd1c | 79 | static inline bool dma_memory_valid(AddressSpace *as, |
e5332e63 DG |
80 | dma_addr_t addr, dma_addr_t len, |
81 | DMADirection dir) | |
d86a77f8 | 82 | { |
df32fd1c | 83 | return address_space_access_valid(as, addr, len, |
24addbc7 | 84 | dir == DMA_DIRECTION_FROM_DEVICE); |
d86a77f8 DG |
85 | } |
86 | ||
df32fd1c | 87 | static inline int dma_memory_rw_relaxed(AddressSpace *as, dma_addr_t addr, |
7a0bac4d BH |
88 | void *buf, dma_addr_t len, |
89 | DMADirection dir) | |
d86a77f8 | 90 | { |
5c9eb028 PM |
91 | return (bool)address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED, |
92 | buf, len, dir == DMA_DIRECTION_FROM_DEVICE); | |
d86a77f8 DG |
93 | } |
94 | ||
df32fd1c | 95 | static inline int dma_memory_read_relaxed(AddressSpace *as, dma_addr_t addr, |
7a0bac4d BH |
96 | void *buf, dma_addr_t len) |
97 | { | |
df32fd1c | 98 | return dma_memory_rw_relaxed(as, addr, buf, len, DMA_DIRECTION_TO_DEVICE); |
7a0bac4d BH |
99 | } |
100 | ||
df32fd1c | 101 | static inline int dma_memory_write_relaxed(AddressSpace *as, dma_addr_t addr, |
7a0bac4d BH |
102 | const void *buf, dma_addr_t len) |
103 | { | |
df32fd1c | 104 | return dma_memory_rw_relaxed(as, addr, (void *)buf, len, |
7a0bac4d BH |
105 | DMA_DIRECTION_FROM_DEVICE); |
106 | } | |
107 | ||
df32fd1c | 108 | static inline int dma_memory_rw(AddressSpace *as, dma_addr_t addr, |
7a0bac4d BH |
109 | void *buf, dma_addr_t len, |
110 | DMADirection dir) | |
111 | { | |
df32fd1c | 112 | dma_barrier(as, dir); |
7a0bac4d | 113 | |
df32fd1c | 114 | return dma_memory_rw_relaxed(as, addr, buf, len, dir); |
7a0bac4d BH |
115 | } |
116 | ||
df32fd1c | 117 | static inline int dma_memory_read(AddressSpace *as, dma_addr_t addr, |
d86a77f8 DG |
118 | void *buf, dma_addr_t len) |
119 | { | |
df32fd1c | 120 | return dma_memory_rw(as, addr, buf, len, DMA_DIRECTION_TO_DEVICE); |
d86a77f8 DG |
121 | } |
122 | ||
df32fd1c | 123 | static inline int dma_memory_write(AddressSpace *as, dma_addr_t addr, |
d86a77f8 DG |
124 | const void *buf, dma_addr_t len) |
125 | { | |
df32fd1c | 126 | return dma_memory_rw(as, addr, (void *)buf, len, |
d86a77f8 DG |
127 | DMA_DIRECTION_FROM_DEVICE); |
128 | } | |
129 | ||
df32fd1c | 130 | int dma_memory_set(AddressSpace *as, dma_addr_t addr, uint8_t c, dma_addr_t len); |
d86a77f8 | 131 | |
df32fd1c | 132 | static inline void *dma_memory_map(AddressSpace *as, |
d86a77f8 DG |
133 | dma_addr_t addr, dma_addr_t *len, |
134 | DMADirection dir) | |
135 | { | |
24addbc7 PB |
136 | hwaddr xlen = *len; |
137 | void *p; | |
138 | ||
df32fd1c | 139 | p = address_space_map(as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE); |
24addbc7 PB |
140 | *len = xlen; |
141 | return p; | |
d86a77f8 DG |
142 | } |
143 | ||
df32fd1c | 144 | static inline void dma_memory_unmap(AddressSpace *as, |
d86a77f8 DG |
145 | void *buffer, dma_addr_t len, |
146 | DMADirection dir, dma_addr_t access_len) | |
147 | { | |
df32fd1c | 148 | address_space_unmap(as, buffer, (hwaddr)len, |
24addbc7 | 149 | dir == DMA_DIRECTION_FROM_DEVICE, access_len); |
d86a77f8 DG |
150 | } |
151 | ||
152 | #define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \ | |
df32fd1c | 153 | static inline uint##_bits##_t ld##_lname##_##_end##_dma(AddressSpace *as, \ |
d86a77f8 DG |
154 | dma_addr_t addr) \ |
155 | { \ | |
156 | uint##_bits##_t val; \ | |
df32fd1c | 157 | dma_memory_read(as, addr, &val, (_bits) / 8); \ |
d86a77f8 DG |
158 | return _end##_bits##_to_cpu(val); \ |
159 | } \ | |
df32fd1c | 160 | static inline void st##_sname##_##_end##_dma(AddressSpace *as, \ |
d86a77f8 DG |
161 | dma_addr_t addr, \ |
162 | uint##_bits##_t val) \ | |
163 | { \ | |
164 | val = cpu_to_##_end##_bits(val); \ | |
df32fd1c | 165 | dma_memory_write(as, addr, &val, (_bits) / 8); \ |
d86a77f8 DG |
166 | } |
167 | ||
df32fd1c | 168 | static inline uint8_t ldub_dma(AddressSpace *as, dma_addr_t addr) |
d86a77f8 DG |
169 | { |
170 | uint8_t val; | |
171 | ||
df32fd1c | 172 | dma_memory_read(as, addr, &val, 1); |
d86a77f8 DG |
173 | return val; |
174 | } | |
175 | ||
df32fd1c | 176 | static inline void stb_dma(AddressSpace *as, dma_addr_t addr, uint8_t val) |
d86a77f8 | 177 | { |
df32fd1c | 178 | dma_memory_write(as, addr, &val, 1); |
d86a77f8 DG |
179 | } |
180 | ||
181 | DEFINE_LDST_DMA(uw, w, 16, le); | |
182 | DEFINE_LDST_DMA(l, l, 32, le); | |
183 | DEFINE_LDST_DMA(q, q, 64, le); | |
184 | DEFINE_LDST_DMA(uw, w, 16, be); | |
185 | DEFINE_LDST_DMA(l, l, 32, be); | |
186 | DEFINE_LDST_DMA(q, q, 64, be); | |
187 | ||
188 | #undef DEFINE_LDST_DMA | |
189 | ||
10dc8aef | 190 | struct ScatterGatherEntry { |
d3231181 DG |
191 | dma_addr_t base; |
192 | dma_addr_t len; | |
10dc8aef | 193 | }; |
244ab90e | 194 | |
f487b677 PB |
195 | void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint, |
196 | AddressSpace *as); | |
d3231181 | 197 | void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len); |
244ab90e | 198 | void qemu_sglist_destroy(QEMUSGList *qsg); |
10dc8aef | 199 | #endif |
244ab90e | 200 | |
4be74634 | 201 | typedef BlockAIOCB *DMAIOFunc(BlockBackend *blk, int64_t sector_num, |
7c84b1b8 | 202 | QEMUIOVector *iov, int nb_sectors, |
097310b5 | 203 | BlockCompletionFunc *cb, void *opaque); |
7c84b1b8 | 204 | |
4be74634 MA |
205 | BlockAIOCB *dma_blk_io(BlockBackend *blk, |
206 | QEMUSGList *sg, uint64_t sector_num, | |
207 | DMAIOFunc *io_func, BlockCompletionFunc *cb, | |
208 | void *opaque, DMADirection dir); | |
209 | BlockAIOCB *dma_blk_read(BlockBackend *blk, | |
210 | QEMUSGList *sg, uint64_t sector, | |
211 | BlockCompletionFunc *cb, void *opaque); | |
212 | BlockAIOCB *dma_blk_write(BlockBackend *blk, | |
7c84b1b8 | 213 | QEMUSGList *sg, uint64_t sector, |
097310b5 | 214 | BlockCompletionFunc *cb, void *opaque); |
8171ee35 PB |
215 | uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg); |
216 | uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg); | |
217 | ||
4be74634 | 218 | void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie, |
84a69356 PB |
219 | QEMUSGList *sg, enum BlockAcctType type); |
220 | ||
244ab90e | 221 | #endif |