]>
git.proxmox.com Git - mirror_qemu.git/blob - exec.c
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 //#define DEBUG_TB_INVALIDATE
36 /* make various TB consistency checks */
37 //#define DEBUG_TB_CHECK
39 /* threshold to flush the translated code buffer */
40 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
42 #define CODE_GEN_MAX_BLOCKS (CODE_GEN_BUFFER_SIZE / 64)
44 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
45 TranslationBlock
*tb_hash
[CODE_GEN_HASH_SIZE
];
47 /* any access to the tbs or the page table must use this lock */
48 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
50 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
];
51 uint8_t *code_gen_ptr
;
53 /* XXX: pack the flags in the low bits of the pointer ? */
54 typedef struct PageDesc
{
56 TranslationBlock
*first_tb
;
60 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
62 #define L1_SIZE (1 << L1_BITS)
63 #define L2_SIZE (1 << L2_BITS)
65 static void tb_invalidate_page(unsigned long address
);
66 static void io_mem_init(void);
68 unsigned long real_host_page_size
;
69 unsigned long host_page_bits
;
70 unsigned long host_page_size
;
71 unsigned long host_page_mask
;
73 static PageDesc
*l1_map
[L1_SIZE
];
75 /* io memory support */
76 static unsigned long *l1_physmap
[L1_SIZE
];
77 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
78 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
81 static void page_init(void)
83 /* NOTE: we can always suppose that host_page_size >=
85 real_host_page_size
= getpagesize();
86 if (host_page_size
== 0)
87 host_page_size
= real_host_page_size
;
88 if (host_page_size
< TARGET_PAGE_SIZE
)
89 host_page_size
= TARGET_PAGE_SIZE
;
91 while ((1 << host_page_bits
) < host_page_size
)
93 host_page_mask
= ~(host_page_size
- 1);
96 /* dump memory mappings */
97 void page_dump(FILE *f
)
99 unsigned long start
, end
;
100 int i
, j
, prot
, prot1
;
103 fprintf(f
, "%-8s %-8s %-8s %s\n",
104 "start", "end", "size", "prot");
108 for(i
= 0; i
<= L1_SIZE
; i
++) {
113 for(j
= 0;j
< L2_SIZE
; j
++) {
119 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
121 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
122 start
, end
, end
- start
,
123 prot
& PAGE_READ
? 'r' : '-',
124 prot
& PAGE_WRITE
? 'w' : '-',
125 prot
& PAGE_EXEC
? 'x' : '-');
139 static inline PageDesc
*page_find_alloc(unsigned int index
)
143 lp
= &l1_map
[index
>> L2_BITS
];
146 /* allocate if not found */
147 p
= malloc(sizeof(PageDesc
) * L2_SIZE
);
148 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
151 return p
+ (index
& (L2_SIZE
- 1));
154 static inline PageDesc
*page_find(unsigned int index
)
158 p
= l1_map
[index
>> L2_BITS
];
161 return p
+ (index
& (L2_SIZE
- 1));
164 int page_get_flags(unsigned long address
)
168 p
= page_find(address
>> TARGET_PAGE_BITS
);
174 /* modify the flags of a page and invalidate the code if
175 necessary. The flag PAGE_WRITE_ORG is positionned automatically
176 depending on PAGE_WRITE */
177 void page_set_flags(unsigned long start
, unsigned long end
, int flags
)
182 start
= start
& TARGET_PAGE_MASK
;
183 end
= TARGET_PAGE_ALIGN(end
);
184 if (flags
& PAGE_WRITE
)
185 flags
|= PAGE_WRITE_ORG
;
187 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
188 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
189 /* if the write protection is set, then we invalidate the code
191 if (!(p
->flags
& PAGE_WRITE
) &&
192 (flags
& PAGE_WRITE
) &&
194 tb_invalidate_page(addr
);
198 spin_unlock(&tb_lock
);
201 void cpu_exec_init(void)
204 code_gen_ptr
= code_gen_buffer
;
210 /* set to NULL all the 'first_tb' fields in all PageDescs */
211 static void page_flush_tb(void)
216 for(i
= 0; i
< L1_SIZE
; i
++) {
219 for(j
= 0; j
< L2_SIZE
; j
++)
220 p
[j
].first_tb
= NULL
;
225 /* flush all the translation blocks */
226 /* XXX: tb_flush is currently not thread safe */
231 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
232 code_gen_ptr
- code_gen_buffer
,
234 (code_gen_ptr
- code_gen_buffer
) / nb_tbs
);
237 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++)
240 code_gen_ptr
= code_gen_buffer
;
241 /* XXX: flush processor icache at this point if cache flush is
245 #ifdef DEBUG_TB_CHECK
247 static void tb_invalidate_check(unsigned long address
)
249 TranslationBlock
*tb
;
251 address
&= TARGET_PAGE_MASK
;
252 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
253 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
254 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
255 address
>= tb
->pc
+ tb
->size
)) {
256 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
257 address
, tb
->pc
, tb
->size
);
263 /* verify that all the pages have correct rights for code */
264 static void tb_page_check(void)
266 TranslationBlock
*tb
;
267 int i
, flags1
, flags2
;
269 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
270 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
271 flags1
= page_get_flags(tb
->pc
);
272 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
273 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
274 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
275 tb
->pc
, tb
->size
, flags1
, flags2
);
281 void tb_jmp_check(TranslationBlock
*tb
)
283 TranslationBlock
*tb1
;
286 /* suppress any remaining jumps to this TB */
290 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
293 tb1
= tb1
->jmp_next
[n1
];
295 /* check end of list */
297 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
303 /* invalidate one TB */
304 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
307 TranslationBlock
*tb1
;
311 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
314 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
318 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
320 TranslationBlock
*tb1
, **ptb
;
323 ptb
= &tb
->jmp_next
[n
];
326 /* find tb(n) in circular list */
330 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
331 if (n1
== n
&& tb1
== tb
)
334 ptb
= &tb1
->jmp_first
;
336 ptb
= &tb1
->jmp_next
[n1
];
339 /* now we can suppress tb(n) from the list */
340 *ptb
= tb
->jmp_next
[n
];
342 tb
->jmp_next
[n
] = NULL
;
346 /* reset the jump entry 'n' of a TB so that it is not chained to
348 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
350 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
353 static inline void tb_invalidate(TranslationBlock
*tb
, int parity
)
356 unsigned int page_index1
, page_index2
;
358 TranslationBlock
*tb1
, *tb2
;
360 /* remove the TB from the hash list */
361 h
= tb_hash_func(tb
->pc
);
362 tb_remove(&tb_hash
[h
], tb
,
363 offsetof(TranslationBlock
, hash_next
));
364 /* remove the TB from the page list */
365 page_index1
= tb
->pc
>> TARGET_PAGE_BITS
;
366 if ((page_index1
& 1) == parity
) {
367 p
= page_find(page_index1
);
368 tb_remove(&p
->first_tb
, tb
,
369 offsetof(TranslationBlock
, page_next
[page_index1
& 1]));
371 page_index2
= (tb
->pc
+ tb
->size
- 1) >> TARGET_PAGE_BITS
;
372 if ((page_index2
& 1) == parity
) {
373 p
= page_find(page_index2
);
374 tb_remove(&p
->first_tb
, tb
,
375 offsetof(TranslationBlock
, page_next
[page_index2
& 1]));
378 /* suppress this TB from the two jump lists */
379 tb_jmp_remove(tb
, 0);
380 tb_jmp_remove(tb
, 1);
382 /* suppress any remaining jumps to this TB */
388 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
389 tb2
= tb1
->jmp_next
[n1
];
390 tb_reset_jump(tb1
, n1
);
391 tb1
->jmp_next
[n1
] = NULL
;
394 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
397 /* invalidate all TBs which intersect with the target page starting at addr */
398 static void tb_invalidate_page(unsigned long address
)
400 TranslationBlock
*tb_next
, *tb
;
401 unsigned int page_index
;
402 int parity1
, parity2
;
404 #ifdef DEBUG_TB_INVALIDATE
405 printf("tb_invalidate_page: %lx\n", address
);
408 page_index
= address
>> TARGET_PAGE_BITS
;
409 p
= page_find(page_index
);
413 parity1
= page_index
& 1;
414 parity2
= parity1
^ 1;
416 tb_next
= tb
->page_next
[parity1
];
417 tb_invalidate(tb
, parity2
);
423 /* add the tb in the target page and protect it if necessary */
424 static inline void tb_alloc_page(TranslationBlock
*tb
, unsigned int page_index
)
427 unsigned long host_start
, host_end
, addr
, page_addr
;
430 p
= page_find_alloc(page_index
);
431 tb
->page_next
[page_index
& 1] = p
->first_tb
;
433 if (p
->flags
& PAGE_WRITE
) {
434 /* force the host page as non writable (writes will have a
435 page fault + mprotect overhead) */
436 page_addr
= (page_index
<< TARGET_PAGE_BITS
);
437 host_start
= page_addr
& host_page_mask
;
438 host_end
= host_start
+ host_page_size
;
440 for(addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
)
441 prot
|= page_get_flags(addr
);
442 mprotect((void *)host_start
, host_page_size
,
443 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
444 #ifdef DEBUG_TB_INVALIDATE
445 printf("protecting code page: 0x%08lx\n",
448 p
->flags
&= ~PAGE_WRITE
;
449 #ifdef DEBUG_TB_CHECK
455 /* Allocate a new translation block. Flush the translation buffer if
456 too many translation blocks or too much generated code. */
457 TranslationBlock
*tb_alloc(unsigned long pc
)
459 TranslationBlock
*tb
;
461 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
462 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
469 /* link the tb with the other TBs */
470 void tb_link(TranslationBlock
*tb
)
472 unsigned int page_index1
, page_index2
;
474 /* add in the page list */
475 page_index1
= tb
->pc
>> TARGET_PAGE_BITS
;
476 tb_alloc_page(tb
, page_index1
);
477 page_index2
= (tb
->pc
+ tb
->size
- 1) >> TARGET_PAGE_BITS
;
478 if (page_index2
!= page_index1
) {
479 tb_alloc_page(tb
, page_index2
);
481 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
482 tb
->jmp_next
[0] = NULL
;
483 tb
->jmp_next
[1] = NULL
;
485 /* init original jump addresses */
486 if (tb
->tb_next_offset
[0] != 0xffff)
487 tb_reset_jump(tb
, 0);
488 if (tb
->tb_next_offset
[1] != 0xffff)
489 tb_reset_jump(tb
, 1);
492 /* called from signal handler: invalidate the code and unprotect the
493 page. Return TRUE if the fault was succesfully handled. */
494 int page_unprotect(unsigned long address
)
496 unsigned int page_index
, prot
, pindex
;
498 unsigned long host_start
, host_end
, addr
;
500 host_start
= address
& host_page_mask
;
501 page_index
= host_start
>> TARGET_PAGE_BITS
;
502 p1
= page_find(page_index
);
505 host_end
= host_start
+ host_page_size
;
508 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
512 /* if the page was really writable, then we change its
513 protection back to writable */
514 if (prot
& PAGE_WRITE_ORG
) {
515 mprotect((void *)host_start
, host_page_size
,
516 (prot
& PAGE_BITS
) | PAGE_WRITE
);
517 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
518 p1
[pindex
].flags
|= PAGE_WRITE
;
519 /* and since the content will be modified, we must invalidate
520 the corresponding translated code. */
521 tb_invalidate_page(address
);
522 #ifdef DEBUG_TB_CHECK
523 tb_invalidate_check(address
);
531 /* call this function when system calls directly modify a memory area */
532 void page_unprotect_range(uint8_t *data
, unsigned long data_size
)
534 unsigned long start
, end
, addr
;
536 start
= (unsigned long)data
;
537 end
= start
+ data_size
;
538 start
&= TARGET_PAGE_MASK
;
539 end
= TARGET_PAGE_ALIGN(end
);
540 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
541 page_unprotect(addr
);
545 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
546 tb[1].tc_ptr. Return NULL if not found */
547 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
551 TranslationBlock
*tb
;
555 if (tc_ptr
< (unsigned long)code_gen_buffer
||
556 tc_ptr
>= (unsigned long)code_gen_ptr
)
558 /* binary search (cf Knuth) */
561 while (m_min
<= m_max
) {
562 m
= (m_min
+ m_max
) >> 1;
564 v
= (unsigned long)tb
->tc_ptr
;
567 else if (tc_ptr
< v
) {
576 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
578 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
580 TranslationBlock
*tb1
, *tb_next
, **ptb
;
583 tb1
= tb
->jmp_next
[n
];
585 /* find head of list */
588 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
591 tb1
= tb1
->jmp_next
[n1
];
593 /* we are now sure now that tb jumps to tb1 */
596 /* remove tb from the jmp_first list */
597 ptb
= &tb_next
->jmp_first
;
601 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
602 if (n1
== n
&& tb1
== tb
)
604 ptb
= &tb1
->jmp_next
[n1
];
606 *ptb
= tb
->jmp_next
[n
];
607 tb
->jmp_next
[n
] = NULL
;
609 /* suppress the jump to next tb in generated code */
610 tb_reset_jump(tb
, n
);
612 /* suppress jumps in the tb on which we could have jump */
613 tb_reset_jump_recursive(tb_next
);
617 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
619 tb_reset_jump_recursive2(tb
, 0);
620 tb_reset_jump_recursive2(tb
, 1);
623 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
624 breakpoint is reached */
625 int cpu_breakpoint_insert(CPUState
*env
, uint32_t pc
)
627 #if defined(TARGET_I386)
630 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
631 if (env
->breakpoints
[i
] == pc
)
635 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
637 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
638 tb_invalidate_page(pc
);
645 /* remove a breakpoint */
646 int cpu_breakpoint_remove(CPUState
*env
, uint32_t pc
)
648 #if defined(TARGET_I386)
650 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
651 if (env
->breakpoints
[i
] == pc
)
656 memmove(&env
->breakpoints
[i
], &env
->breakpoints
[i
+ 1],
657 (env
->nb_breakpoints
- (i
+ 1)) * sizeof(env
->breakpoints
[0]));
658 env
->nb_breakpoints
--;
659 tb_invalidate_page(pc
);
666 /* enable or disable single step mode. EXCP_DEBUG is returned by the
667 CPU loop after each instruction */
668 void cpu_single_step(CPUState
*env
, int enabled
)
670 #if defined(TARGET_I386)
671 if (env
->singlestep_enabled
!= enabled
) {
672 env
->singlestep_enabled
= enabled
;
673 /* must flush all the translated code to avoid inconsistancies */
680 /* mask must never be zero */
681 void cpu_interrupt(CPUState
*env
, int mask
)
683 TranslationBlock
*tb
;
685 env
->interrupt_request
|= mask
;
686 /* if the cpu is currently executing code, we must unlink it and
687 all the potentially executing TB */
688 tb
= env
->current_tb
;
690 tb_reset_jump_recursive(tb
);
695 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
700 fprintf(stderr
, "qemu: fatal: ");
701 vfprintf(stderr
, fmt
, ap
);
702 fprintf(stderr
, "\n");
704 cpu_x86_dump_state(env
, stderr
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
711 /* unmap all maped pages and flush all associated code */
712 void page_unmap(void)
718 for(i
= 0; i
< L1_SIZE
; i
++) {
722 for(j
= 0;j
< L2_SIZE
;) {
723 if (p
->flags
& PAGE_VALID
) {
724 addr
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
725 /* we try to find a range to make less syscalls */
729 while (j
< L2_SIZE
&& (p
->flags
& PAGE_VALID
)) {
733 ret
= munmap((void *)addr
, (j
- j1
) << TARGET_PAGE_BITS
);
735 fprintf(stderr
, "Could not unmap page 0x%08lx\n", addr
);
751 void tlb_flush(CPUState
*env
)
753 #if defined(TARGET_I386)
755 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
756 env
->tlb_read
[0][i
].address
= -1;
757 env
->tlb_write
[0][i
].address
= -1;
758 env
->tlb_read
[1][i
].address
= -1;
759 env
->tlb_write
[1][i
].address
= -1;
764 void tlb_flush_page(CPUState
*env
, uint32_t addr
)
766 #if defined(TARGET_I386)
769 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
770 env
->tlb_read
[0][i
].address
= -1;
771 env
->tlb_write
[0][i
].address
= -1;
772 env
->tlb_read
[1][i
].address
= -1;
773 env
->tlb_write
[1][i
].address
= -1;
777 static inline unsigned long *physpage_find_alloc(unsigned int page
)
779 unsigned long **lp
, *p
;
780 unsigned int index
, i
;
782 index
= page
>> TARGET_PAGE_BITS
;
783 lp
= &l1_physmap
[index
>> L2_BITS
];
786 /* allocate if not found */
787 p
= malloc(sizeof(unsigned long) * L2_SIZE
);
788 for(i
= 0; i
< L2_SIZE
; i
++)
789 p
[i
] = IO_MEM_UNASSIGNED
;
792 return p
+ (index
& (L2_SIZE
- 1));
795 /* return NULL if no page defined (unused memory) */
796 unsigned long physpage_find(unsigned long page
)
800 index
= page
>> TARGET_PAGE_BITS
;
801 p
= l1_physmap
[index
>> L2_BITS
];
803 return IO_MEM_UNASSIGNED
;
804 return p
[index
& (L2_SIZE
- 1)];
807 /* register physical memory. 'size' must be a multiple of the target
808 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
810 void cpu_register_physical_memory(unsigned long start_addr
, unsigned long size
,
813 unsigned long addr
, end_addr
;
816 end_addr
= start_addr
+ size
;
817 for(addr
= start_addr
; addr
< end_addr
; addr
+= TARGET_PAGE_SIZE
) {
818 p
= physpage_find_alloc(addr
);
820 if ((phys_offset
& ~TARGET_PAGE_MASK
) == 0)
821 phys_offset
+= TARGET_PAGE_SIZE
;
825 static uint32_t unassigned_mem_readb(uint32_t addr
)
830 static void unassigned_mem_writeb(uint32_t addr
, uint32_t val
)
834 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
835 unassigned_mem_readb
,
836 unassigned_mem_readb
,
837 unassigned_mem_readb
,
840 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
841 unassigned_mem_writeb
,
842 unassigned_mem_writeb
,
843 unassigned_mem_writeb
,
847 static void io_mem_init(void)
850 cpu_register_io_memory(0, unassigned_mem_read
, unassigned_mem_write
);
853 /* mem_read and mem_write are arrays of functions containing the
854 function to access byte (index 0), word (index 1) and dword (index
855 2). All functions must be supplied. If io_index is non zero, the
856 corresponding io zone is modified. If it is zero, a new io zone is
857 allocated. The return value can be used with
858 cpu_register_physical_memory(). (-1) is returned if error. */
859 int cpu_register_io_memory(int io_index
,
860 CPUReadMemoryFunc
**mem_read
,
861 CPUWriteMemoryFunc
**mem_write
)
866 if (io_index
>= IO_MEM_NB_ENTRIES
)
868 io_index
= io_mem_nb
++;
870 if (io_index
>= IO_MEM_NB_ENTRIES
)
874 for(i
= 0;i
< 3; i
++) {
875 io_mem_read
[io_index
][i
] = mem_read
[i
];
876 io_mem_write
[io_index
][i
] = mem_write
[i
];
878 return io_index
<< IO_MEM_SHIFT
;