-
Notifications
You must be signed in to change notification settings - Fork 10
Expand file tree
/
Copy pathVirtualIO.cpp
More file actions
108 lines (98 loc) · 2.77 KB
/
VirtualIO.cpp
File metadata and controls
108 lines (98 loc) · 2.77 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
#include "hax-all.h"
int hax_handle_fastmmio(hax_vcpu_state *env, struct hax_fastmmio *hft)
{
uint64_t buf = 0;
/*
* With fast MMIO, QEMU need not sync vCPU state with HAXM
* driver because it will only invoke MMIO handler
* However, some MMIO operations utilize virtual address like qemu_pipe
* Thus we need to sync the CR0, CR3 and CR4 so that QEMU
* can translate the guest virtual address to guest physical
* address
*/
buf = hft->value;
//cpu_physical_memory_rw(hft->gpa, &buf, hft->size, hft->direction);
if (hft->direction == 0)
hft->value = buf;
return 0;
}
int hax_handle_io(hax_vcpu_state *env, uint32_t df, uint16_t port, int direction,
int size, int count, void *buffer)
{
/*
uint8_t *ptr;
int i;
if (!df)
ptr = (uint8_t *)buffer;
else
ptr = buffer + size * count - size;
for (i = 0; i < count; i++)
{
if (direction == HAX_EXIT_IO_IN) {
switch (size) {
case 1:
stb_p(ptr, cpu_inb(port));
break;
case 2:
stw_p(ptr, cpu_inw(port));
break;
case 4:
stl_p(ptr, cpu_inl(port));
break;
}
} else {
switch (size) {
case 1:
cpu_outb(port, ldub_p(ptr));
break;
case 2:
cpu_outw(port, lduw_p(ptr));
break;
case 4:
cpu_outl(port, ldl_p(ptr));
break;
}
}
if (!df)
ptr += size;
else
ptr -= size;
}
*/
return 0;
}
int hax_vcpu_interrupt(hax_vcpu_state *CPU)
{
#if 0
struct hax_tunnel *ht = CPU->tunnel;
/*
* Try to inject an interrupt if the guest can accept it
* Unlike KVM, the HAX kernel module checks the eflags, instead.
*/
if (ht->ready_for_interrupt_injection &&
(env->interrupt_request & CPU_INTERRUPT_HARD))
{
int irq;
env->interrupt_request &= ~CPU_INTERRUPT_HARD;
irq = -1;//cpu_get_pic_interrupt(env);
if (irq >= 0) {
hax_inject_interrupt(env, irq);
}
}
/*
* If we have an interrupt pending but the guest is not ready to
* receive it, request an interrupt window exit. This will cause
* a return to userspace as soon as the guest is ready to receive
* an interrupt.
*/
if ((env->interrupt_request & CPU_INTERRUPT_HARD))
ht->request_interrupt_window = 1;
else
ht->request_interrupt_window = 0;
#endif
return 0;
}
void hax_raise_event(hax_vcpu_state *CPU)
{
CPU->tunnel->user_event_pending = 1;
}