]> git.proxmox.com Git - mirror_qemu.git/blame - softmmu_template.h
cputlb: Move VICTIM_TLB_HIT out of line
[mirror_qemu.git] / softmmu_template.h
CommitLineData
b92e5a22
FB
1/*
2 * Software MMU support
5fafdf24 3 *
efbf29b6
BS
4 * Generate helpers used by TCG for qemu_ld/st ops and code load
5 * functions.
6 *
7 * Included from target op helpers and exec.c.
8 *
b92e5a22
FB
9 * Copyright (c) 2003 Fabrice Bellard
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
8167ee88 22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
b92e5a22 23 */
1de7afc9 24#include "qemu/timer.h"
77717094 25#include "exec/address-spaces.h"
022c62cb 26#include "exec/memory.h"
29e922b6 27
b92e5a22
FB
28#define DATA_SIZE (1 << SHIFT)
29
30#if DATA_SIZE == 8
31#define SUFFIX q
701e3a5c 32#define LSUFFIX q
c8f94df5 33#define SDATA_TYPE int64_t
dc9a353c 34#define DATA_TYPE uint64_t
b92e5a22
FB
35#elif DATA_SIZE == 4
36#define SUFFIX l
701e3a5c 37#define LSUFFIX l
c8f94df5 38#define SDATA_TYPE int32_t
dc9a353c 39#define DATA_TYPE uint32_t
b92e5a22
FB
40#elif DATA_SIZE == 2
41#define SUFFIX w
701e3a5c 42#define LSUFFIX uw
c8f94df5 43#define SDATA_TYPE int16_t
dc9a353c 44#define DATA_TYPE uint16_t
b92e5a22
FB
45#elif DATA_SIZE == 1
46#define SUFFIX b
701e3a5c 47#define LSUFFIX ub
c8f94df5 48#define SDATA_TYPE int8_t
dc9a353c 49#define DATA_TYPE uint8_t
b92e5a22
FB
50#else
51#error unsupported data size
52#endif
53
c8f94df5
RH
54
55/* For the benefit of TCG generated code, we want to avoid the complication
56 of ABI-specific return type promotion and always return a value extended
57 to the register size of the host. This is tcg_target_long, except in the
58 case of a 32-bit host and 64-bit data, and for that we always have
59 uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
60#if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
61# define WORD_TYPE DATA_TYPE
62# define USUFFIX SUFFIX
63#else
64# define WORD_TYPE tcg_target_ulong
65# define USUFFIX glue(u, SUFFIX)
66# define SSUFFIX glue(s, SUFFIX)
67#endif
68
b769d8fe 69#ifdef SOFTMMU_CODE_ACCESS
55e94093 70#define READ_ACCESS_TYPE MMU_INST_FETCH
84b7b8e7 71#define ADDR_READ addr_code
b769d8fe 72#else
55e94093 73#define READ_ACCESS_TYPE MMU_DATA_LOAD
84b7b8e7 74#define ADDR_READ addr_read
b769d8fe
FB
75#endif
76
867b3201
RH
77#if DATA_SIZE == 8
78# define BSWAP(X) bswap64(X)
79#elif DATA_SIZE == 4
80# define BSWAP(X) bswap32(X)
81#elif DATA_SIZE == 2
82# define BSWAP(X) bswap16(X)
83#else
84# define BSWAP(X) (X)
85#endif
86
87#ifdef TARGET_WORDS_BIGENDIAN
88# define TGT_BE(X) (X)
89# define TGT_LE(X) BSWAP(X)
90#else
91# define TGT_BE(X) BSWAP(X)
92# define TGT_LE(X) (X)
93#endif
94
95#if DATA_SIZE == 1
96# define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
97# define helper_be_ld_name helper_le_ld_name
98# define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
99# define helper_be_lds_name helper_le_lds_name
100# define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
101# define helper_be_st_name helper_le_st_name
102#else
103# define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
104# define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
105# define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
106# define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
107# define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
108# define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
109#endif
110
111#ifdef TARGET_WORDS_BIGENDIAN
112# define helper_te_ld_name helper_be_ld_name
113# define helper_te_st_name helper_be_st_name
114#else
115# define helper_te_ld_name helper_le_ld_name
116# define helper_te_st_name helper_le_st_name
117#endif
118
0f590e74 119#ifndef SOFTMMU_CODE_ACCESS
89c33337 120static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
e469b22f 121 CPUIOTLBEntry *iotlbentry,
2e70f6ef 122 target_ulong addr,
20503968 123 uintptr_t retaddr)
b92e5a22 124{
791af8c8 125 uint64_t val;
09daed84 126 CPUState *cpu = ENV_GET_CPU(env);
e469b22f 127 hwaddr physaddr = iotlbentry->addr;
a54c87b6 128 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
37ec01d4 129
0f459d16 130 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
93afeade 131 cpu->mem_io_pc = retaddr;
414b15c9 132 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
90b40a69 133 cpu_io_recompile(cpu, retaddr);
2e70f6ef 134 }
b92e5a22 135
93afeade 136 cpu->mem_io_vaddr = addr;
3b643495 137 memory_region_dispatch_read(mr, physaddr, &val, 1 << SHIFT,
fadc1cbe 138 iotlbentry->attrs);
791af8c8 139 return val;
b92e5a22 140}
0f590e74 141#endif
b92e5a22 142
3972ef6f
RH
143WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
144 TCGMemOpIdx oi, uintptr_t retaddr)
b92e5a22 145{
3972ef6f 146 unsigned mmu_idx = get_mmuidx(oi);
aac1fb05
RH
147 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
148 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
1f00b27f 149 int a_bits = get_alignment_bits(get_memop(oi));
aac1fb05 150 uintptr_t haddr;
867b3201 151 DATA_TYPE res;
3b46e624 152
0f842f8a
RH
153 /* Adjust the given return address. */
154 retaddr -= GETPC_ADJ;
155
1f00b27f
SS
156 if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
157 cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
158 mmu_idx, retaddr);
159 }
160
aac1fb05
RH
161 /* If the TLB entry is for a different page, reload and try again. */
162 if ((addr & TARGET_PAGE_MASK)
163 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
88e89a57
XT
164 if (!VICTIM_TLB_HIT(ADDR_READ)) {
165 tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
166 mmu_idx, retaddr);
167 }
aac1fb05
RH
168 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
169 }
170
171 /* Handle an IO access. */
172 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
e469b22f 173 CPUIOTLBEntry *iotlbentry;
aac1fb05
RH
174 if ((addr & (DATA_SIZE - 1)) != 0) {
175 goto do_unaligned_access;
b92e5a22 176 }
e469b22f 177 iotlbentry = &env->iotlb[mmu_idx][index];
867b3201
RH
178
179 /* ??? Note that the io helpers always read data in the target
180 byte ordering. We should push the LE/BE request down into io. */
e469b22f 181 res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr);
867b3201
RH
182 res = TGT_LE(res);
183 return res;
aac1fb05
RH
184 }
185
186 /* Handle slow unaligned access (it spans two pages or IO). */
187 if (DATA_SIZE > 1
188 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
189 >= TARGET_PAGE_SIZE)) {
190 target_ulong addr1, addr2;
867b3201 191 DATA_TYPE res1, res2;
aac1fb05
RH
192 unsigned shift;
193 do_unaligned_access:
aac1fb05
RH
194 addr1 = addr & ~(DATA_SIZE - 1);
195 addr2 = addr1 + DATA_SIZE;
0f842f8a
RH
196 /* Note the adjustment at the beginning of the function.
197 Undo that for the recursion. */
3972ef6f
RH
198 res1 = helper_le_ld_name(env, addr1, oi, retaddr + GETPC_ADJ);
199 res2 = helper_le_ld_name(env, addr2, oi, retaddr + GETPC_ADJ);
aac1fb05 200 shift = (addr & (DATA_SIZE - 1)) * 8;
867b3201
RH
201
202 /* Little-endian combine. */
aac1fb05 203 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
867b3201
RH
204 return res;
205 }
206
867b3201
RH
207 haddr = addr + env->tlb_table[mmu_idx][index].addend;
208#if DATA_SIZE == 1
209 res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
210#else
211 res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr);
212#endif
213 return res;
214}
215
216#if DATA_SIZE > 1
3972ef6f
RH
217WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
218 TCGMemOpIdx oi, uintptr_t retaddr)
867b3201 219{
3972ef6f 220 unsigned mmu_idx = get_mmuidx(oi);
867b3201
RH
221 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
222 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
1f00b27f 223 int a_bits = get_alignment_bits(get_memop(oi));
867b3201
RH
224 uintptr_t haddr;
225 DATA_TYPE res;
226
227 /* Adjust the given return address. */
228 retaddr -= GETPC_ADJ;
229
1f00b27f
SS
230 if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
231 cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
232 mmu_idx, retaddr);
233 }
234
867b3201
RH
235 /* If the TLB entry is for a different page, reload and try again. */
236 if ((addr & TARGET_PAGE_MASK)
237 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
88e89a57
XT
238 if (!VICTIM_TLB_HIT(ADDR_READ)) {
239 tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
240 mmu_idx, retaddr);
241 }
867b3201
RH
242 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
243 }
244
245 /* Handle an IO access. */
246 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
e469b22f 247 CPUIOTLBEntry *iotlbentry;
867b3201
RH
248 if ((addr & (DATA_SIZE - 1)) != 0) {
249 goto do_unaligned_access;
250 }
e469b22f 251 iotlbentry = &env->iotlb[mmu_idx][index];
867b3201
RH
252
253 /* ??? Note that the io helpers always read data in the target
254 byte ordering. We should push the LE/BE request down into io. */
e469b22f 255 res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr);
867b3201
RH
256 res = TGT_BE(res);
257 return res;
258 }
259
260 /* Handle slow unaligned access (it spans two pages or IO). */
261 if (DATA_SIZE > 1
262 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
263 >= TARGET_PAGE_SIZE)) {
264 target_ulong addr1, addr2;
265 DATA_TYPE res1, res2;
266 unsigned shift;
267 do_unaligned_access:
867b3201
RH
268 addr1 = addr & ~(DATA_SIZE - 1);
269 addr2 = addr1 + DATA_SIZE;
270 /* Note the adjustment at the beginning of the function.
271 Undo that for the recursion. */
3972ef6f
RH
272 res1 = helper_be_ld_name(env, addr1, oi, retaddr + GETPC_ADJ);
273 res2 = helper_be_ld_name(env, addr2, oi, retaddr + GETPC_ADJ);
867b3201
RH
274 shift = (addr & (DATA_SIZE - 1)) * 8;
275
276 /* Big-endian combine. */
277 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
aac1fb05
RH
278 return res;
279 }
280
aac1fb05 281 haddr = addr + env->tlb_table[mmu_idx][index].addend;
867b3201
RH
282 res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
283 return res;
b92e5a22 284}
867b3201 285#endif /* DATA_SIZE > 1 */
b92e5a22 286
b769d8fe
FB
287#ifndef SOFTMMU_CODE_ACCESS
288
c8f94df5
RH
289/* Provide signed versions of the load routines as well. We can of course
290 avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
291#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
867b3201 292WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
3972ef6f 293 TCGMemOpIdx oi, uintptr_t retaddr)
867b3201 294{
3972ef6f 295 return (SDATA_TYPE)helper_le_ld_name(env, addr, oi, retaddr);
867b3201
RH
296}
297
298# if DATA_SIZE > 1
299WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
3972ef6f 300 TCGMemOpIdx oi, uintptr_t retaddr)
c8f94df5 301{
3972ef6f 302 return (SDATA_TYPE)helper_be_ld_name(env, addr, oi, retaddr);
c8f94df5 303}
867b3201 304# endif
c8f94df5
RH
305#endif
306
89c33337 307static inline void glue(io_write, SUFFIX)(CPUArchState *env,
e469b22f 308 CPUIOTLBEntry *iotlbentry,
b769d8fe 309 DATA_TYPE val,
0f459d16 310 target_ulong addr,
20503968 311 uintptr_t retaddr)
b769d8fe 312{
09daed84 313 CPUState *cpu = ENV_GET_CPU(env);
e469b22f 314 hwaddr physaddr = iotlbentry->addr;
a54c87b6 315 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
37ec01d4 316
0f459d16 317 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
414b15c9 318 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
90b40a69 319 cpu_io_recompile(cpu, retaddr);
2e70f6ef 320 }
b769d8fe 321
93afeade
AF
322 cpu->mem_io_vaddr = addr;
323 cpu->mem_io_pc = retaddr;
3b643495 324 memory_region_dispatch_write(mr, physaddr, val, 1 << SHIFT,
fadc1cbe 325 iotlbentry->attrs);
b769d8fe 326}
b92e5a22 327
867b3201 328void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
3972ef6f 329 TCGMemOpIdx oi, uintptr_t retaddr)
b92e5a22 330{
3972ef6f 331 unsigned mmu_idx = get_mmuidx(oi);
aac1fb05
RH
332 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
333 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
1f00b27f 334 int a_bits = get_alignment_bits(get_memop(oi));
aac1fb05 335 uintptr_t haddr;
3b46e624 336
0f842f8a
RH
337 /* Adjust the given return address. */
338 retaddr -= GETPC_ADJ;
339
1f00b27f
SS
340 if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
341 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
342 mmu_idx, retaddr);
343 }
344
aac1fb05
RH
345 /* If the TLB entry is for a different page, reload and try again. */
346 if ((addr & TARGET_PAGE_MASK)
347 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
88e89a57 348 if (!VICTIM_TLB_HIT(addr_write)) {
55e94093 349 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
88e89a57 350 }
aac1fb05
RH
351 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
352 }
353
354 /* Handle an IO access. */
355 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
e469b22f 356 CPUIOTLBEntry *iotlbentry;
aac1fb05
RH
357 if ((addr & (DATA_SIZE - 1)) != 0) {
358 goto do_unaligned_access;
359 }
e469b22f 360 iotlbentry = &env->iotlb[mmu_idx][index];
867b3201
RH
361
362 /* ??? Note that the io helpers always read data in the target
363 byte ordering. We should push the LE/BE request down into io. */
364 val = TGT_LE(val);
e469b22f 365 glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr);
aac1fb05
RH
366 return;
367 }
368
369 /* Handle slow unaligned access (it spans two pages or IO). */
370 if (DATA_SIZE > 1
371 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
372 >= TARGET_PAGE_SIZE)) {
373 int i;
374 do_unaligned_access:
aac1fb05
RH
375 /* XXX: not efficient, but simple */
376 /* Note: relies on the fact that tlb_fill() does not remove the
377 * previous page from the TLB cache. */
378 for (i = DATA_SIZE - 1; i >= 0; i--) {
867b3201 379 /* Little-endian extract. */
aac1fb05 380 uint8_t val8 = val >> (i * 8);
867b3201
RH
381 /* Note the adjustment at the beginning of the function.
382 Undo that for the recursion. */
383 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
3972ef6f 384 oi, retaddr + GETPC_ADJ);
867b3201
RH
385 }
386 return;
387 }
388
867b3201
RH
389 haddr = addr + env->tlb_table[mmu_idx][index].addend;
390#if DATA_SIZE == 1
391 glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
392#else
393 glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
a64d4718 394#endif
867b3201
RH
395}
396
397#if DATA_SIZE > 1
398void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
3972ef6f 399 TCGMemOpIdx oi, uintptr_t retaddr)
867b3201 400{
3972ef6f 401 unsigned mmu_idx = get_mmuidx(oi);
867b3201
RH
402 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
403 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
1f00b27f 404 int a_bits = get_alignment_bits(get_memop(oi));
867b3201
RH
405 uintptr_t haddr;
406
407 /* Adjust the given return address. */
408 retaddr -= GETPC_ADJ;
409
1f00b27f
SS
410 if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
411 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
412 mmu_idx, retaddr);
413 }
414
867b3201
RH
415 /* If the TLB entry is for a different page, reload and try again. */
416 if ((addr & TARGET_PAGE_MASK)
417 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
88e89a57 418 if (!VICTIM_TLB_HIT(addr_write)) {
55e94093 419 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
88e89a57 420 }
867b3201
RH
421 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
422 }
423
424 /* Handle an IO access. */
425 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
e469b22f 426 CPUIOTLBEntry *iotlbentry;
867b3201
RH
427 if ((addr & (DATA_SIZE - 1)) != 0) {
428 goto do_unaligned_access;
429 }
e469b22f 430 iotlbentry = &env->iotlb[mmu_idx][index];
867b3201
RH
431
432 /* ??? Note that the io helpers always read data in the target
433 byte ordering. We should push the LE/BE request down into io. */
434 val = TGT_BE(val);
e469b22f 435 glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr);
867b3201
RH
436 return;
437 }
438
439 /* Handle slow unaligned access (it spans two pages or IO). */
440 if (DATA_SIZE > 1
441 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
442 >= TARGET_PAGE_SIZE)) {
443 int i;
444 do_unaligned_access:
867b3201
RH
445 /* XXX: not efficient, but simple */
446 /* Note: relies on the fact that tlb_fill() does not remove the
447 * previous page from the TLB cache. */
448 for (i = DATA_SIZE - 1; i >= 0; i--) {
449 /* Big-endian extract. */
450 uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
0f842f8a
RH
451 /* Note the adjustment at the beginning of the function.
452 Undo that for the recursion. */
aac1fb05 453 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
3972ef6f 454 oi, retaddr + GETPC_ADJ);
b92e5a22 455 }
aac1fb05
RH
456 return;
457 }
458
aac1fb05 459 haddr = addr + env->tlb_table[mmu_idx][index].addend;
867b3201 460 glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
b92e5a22 461}
867b3201 462#endif /* DATA_SIZE > 1 */
b92e5a22 463
3b4afc9e
YK
464#if DATA_SIZE == 1
465/* Probe for whether the specified guest write access is permitted.
466 * If it is not permitted then an exception will be taken in the same
467 * way as if this were a real write access (and we will not return).
468 * Otherwise the function will return, and there will be a valid
469 * entry in the TLB for this access.
470 */
471void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
472 uintptr_t retaddr)
473{
474 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
475 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
476
477 if ((addr & TARGET_PAGE_MASK)
478 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
479 /* TLB entry is for a different page */
480 if (!VICTIM_TLB_HIT(addr_write)) {
481 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
482 }
483 }
484}
485#endif
b769d8fe
FB
486#endif /* !defined(SOFTMMU_CODE_ACCESS) */
487
488#undef READ_ACCESS_TYPE
b92e5a22
FB
489#undef SHIFT
490#undef DATA_TYPE
491#undef SUFFIX
701e3a5c 492#undef LSUFFIX
b92e5a22 493#undef DATA_SIZE
84b7b8e7 494#undef ADDR_READ
c8f94df5
RH
495#undef WORD_TYPE
496#undef SDATA_TYPE
497#undef USUFFIX
498#undef SSUFFIX
867b3201
RH
499#undef BSWAP
500#undef TGT_BE
501#undef TGT_LE
502#undef CPU_BE
503#undef CPU_LE
504#undef helper_le_ld_name
505#undef helper_be_ld_name
506#undef helper_le_lds_name
507#undef helper_be_lds_name
508#undef helper_le_st_name
509#undef helper_be_st_name
510#undef helper_te_ld_name
511#undef helper_te_st_name