diff --git a/0readme_ethernet.txt b/0readme_ethernet.txt index 92c996c0..9a6e34c0 100644 --- a/0readme_ethernet.txt +++ b/0readme_ethernet.txt @@ -177,6 +177,31 @@ OSX: The macports package manager (http://www.macports.org) can be used to install the net/vde2 package. +------------------------------------------------------------------------------- +Another alternative to direct pcap and tun/tap networking on all environments is +NAT (SLiRP) networking. NAT networking is limited to only IP network protocols +so DECnet, LAT and Clusting can't work on a NAT connected interface, but this may +be the easiest solution for many folks. + + sim> attach xq nat: + +The simulator can use static IP addresses of 10.0.2.4 thru 10.0.2.14 with a +netmask of 255.255.255.0 and a gateway of 10.0.2.2 and a nameserver of 10.0.2.3. +If the simulated machine uses DHCP it will get the address 10.0.2.15. Various +NAT based parameters can be configured on the attach command. HELP XQ ATTACH +will provide useful information. Host to simulator connectivitiy can be +achieved for a simulator which gets its IP address via DHCP with the following +command: + + sim> attach xq nat:tcp=2323:10.0.2.15:23,tcp=2121:10.0.2.15:21 + +The host computer can telnet to localhost:2323 to reach the simulator via +telnet, etc. + +Additionally NAT based networking is useful to allow host systems with WiFi +networking to a) reach the simulated system and b) allow the simulated system +to reach out to the Internet. + ------------------------------------------------------------------------------- Windows notes: @@ -389,7 +414,7 @@ Building on OpenVMS Alpha and OpenVMS Integrety (IA64): VAX simulator support: -An OpenVMS VAX v7.2 system with DECNET Phase IV, MultiNet 4.4a, and LAT 5.3 has +An OpenVMS VAX v7.3 system with DECNET Phase IV, MultiNet 5.4, and LAT 5.3 has been successfully run. Other testers have reported success booting NetBSD and OpenVMS VAX 5.5-2 also. diff --git a/README.md b/README.md index 2eb1c8dd..c9496c38 100644 --- a/README.md +++ b/README.md @@ -112,6 +112,20 @@ A remote console session will close when an EOF character is entered (i.e. ^D or Asynchronous support exists for console I/O and most multiplexer devices. (Still experimental - not currently by default) +#### Ethernet Transport Enhancements + * UDP packet transport. Direct simulator connections to HECnet can be + made without running a local packet bridge program. + * NAT packet transport. Simulators which only speak TCP/IP (No DECnet) + and want to communicate with their host systems and/or directly to + the Internet can use NAT packet transport. This also works for WiFi + connected host systems. + * Packet Transmission Throttling. When connected to a LAN which has + legacy network adapaters (DEQNA, DEUNA) on legacy systems, it is very + easy for a simulated system to overrun the receiving capacity of the + older systems. Throttling of simulated traffic delivered to the LAN + can be used to mitigate this problem. + * Reliable MAC address conflict detection. + #### Disk Extensions RAW Disk Access (including CDROM) Virtual Disk Container files, including differencing disks diff --git a/doc/simh_faq.doc b/doc/simh_faq.doc index 28a60d9b..e3b905ea 100644 Binary files a/doc/simh_faq.doc and b/doc/simh_faq.doc differ diff --git a/makefile b/makefile index 5df34159..70d7d746 100644 --- a/makefile +++ b/makefile @@ -653,6 +653,10 @@ ifeq ($(WIN32),) #*nix Environments (&& cygwin) NETWORK_CCDEFS += -DUSE_NETWORK endif endif + ifeq (slirp,$(shell if $(TEST) -e slirp/simh/sim_slirp.c; then echo slirp; fi)) + NETWORK_CCDEFS += -Islirp -Islirp/simh -Islirp/simh/qemu -DHAVE_SLIRP_NETWORK slirp/*.c slirp/simh/*.c + NETWORK_LAN_FEATURES += NAT(SLiRP) + endif ifeq (,$(findstring USE_NETWORK,$(NETWORK_CCDEFS))$(findstring USE_SHARED,$(NETWORK_CCDEFS))$(findstring HAVE_VDE_NETWORK,$(NETWORK_CCDEFS))) NETWORK_CCDEFS += -DUSE_NETWORK NETWORK_FEATURES = - WITHOUT Local LAN networking support diff --git a/scp.h b/scp.h index 1272031d..188486c8 100644 --- a/scp.h +++ b/scp.h @@ -102,11 +102,12 @@ t_stat spawn_cmd (int32 flag, char *ptr); t_stat echo_cmd (int32 flag, char *ptr); /* Allow compiler to help validate printf style format arguments */ -#if defined __GNUC__ -#define GCC_FMT_ATTR(n, m) __attribute__ ((format (__printf__, n, m))) -#else +#if !defined __GNUC__ #define GCC_FMT_ATTR(n, m) #endif +#if !defined(GCC_FMT_ATTR) +#define GCC_FMT_ATTR(n, m) __attribute__ ((format (__printf__, n, m))) +#endif /* Utility routines */ diff --git a/sim_ether.c b/sim_ether.c index a3c1259b..5a19e349 100644 --- a/sim_ether.c +++ b/sim_ether.c @@ -150,6 +150,9 @@ specified at open time. This functionality is only available on *nix platforms since the vde api isn't available on Windows. + HAVE_SLIRP_NETWORK- Specifies that support for SLiRP networking should be + included. This can be leveraged to provide User Mode + IP NAT connectivity for simulators. NEED_PCAP_SENDPACKET - Specifies that you are using an older version of libpcap @@ -884,7 +887,7 @@ void eth_show_dev (FILE* st, ETH_DEV* dev) const char *eth_capabilities(void) { - return "Ethernet Packet transport" + return "Ethernet Packet transports" #if defined (HAVE_PCAP_NETWORK) ":PCAP" #endif @@ -893,6 +896,9 @@ const char *eth_capabilities(void) #endif #if defined (HAVE_VDE_NETWORK) ":VDE" +#endif +#if defined (HAVE_SLIRP_NETWORK) + ":NAT" #endif ":UDP"; } @@ -938,6 +944,10 @@ typedef void * pcap_t; /* Pseudo Type to avoid compiler errors */ #include #endif /* HAVE_VDE_NETWORK */ +#ifdef HAVE_SLIRP_NETWORK +#include "sim_slirp.h" +#endif /* HAVE_SLIRP_NETWORK */ + /* Allows windows to look up user-defined adapter names */ #if defined(_WIN32) #include @@ -1495,6 +1505,17 @@ _eth_write(ETH_DEV* dev, ETH_PACK* packet, ETH_PCALLBACK routine); static void _eth_error(ETH_DEV* dev, const char* where); +#if defined(HAVE_SLIRP_NETWORK) +static void _slirp_callback (void *opaque, const unsigned char *buf, int len) +{ +struct pcap_pkthdr header; + +memset(&header, 0, sizeof(header)); +header.caplen = header.len = len; +_eth_callback((u_char *)opaque, &header, buf); +} +#endif + #if defined (USE_READER_THREAD) #include @@ -1524,6 +1545,7 @@ switch (dev->eth_api) { case ETH_API_TAP: case ETH_API_VDE: case ETH_API_UDP: + case ETH_API_NAT: do_select = 1; select_fd = dev->fd_handle; break; @@ -1532,8 +1554,8 @@ switch (dev->eth_api) { sim_debug(dev->dbit, dev->dptr, "Reader Thread Starting\n"); /* Boost Priority for this I/O thread vs the CPU instruction execution - thread which in general won't be readily yielding the processor when - this thread needs to run */ + thread which, in general, won't be readily yielding the processor + when this thread needs to run */ pthread_getschedparam (pthread_self(), &sched_policy, &sched_priority); ++sched_priority.sched_priority; pthread_setschedparam (pthread_self(), sched_policy, &sched_priority); @@ -1544,22 +1566,31 @@ while (dev->handle) { if (WAIT_OBJECT_0 == WaitForSingleObject (hWait, 250)) sel_ret = 1; } - if (dev->eth_api == ETH_API_UDP) + if ((dev->eth_api == ETH_API_UDP) || (dev->eth_api == ETH_API_NAT)) #endif /* _WIN32 */ if (1) { - fd_set setl; - struct timeval timeout; - if (do_select) { - FD_ZERO(&setl); - FD_SET(select_fd, &setl); - timeout.tv_sec = 0; - timeout.tv_usec = 250*1000; - sel_ret = select(1+select_fd, &setl, NULL, NULL, &timeout); +#ifdef HAVE_SLIRP_NETWORK + if (dev->eth_api == ETH_API_NAT) { + sel_ret = sim_slirp_select ((SLIRP*)dev->handle, 250); + } + else +#endif + { + fd_set setl; + struct timeval timeout; + + FD_ZERO(&setl); + FD_SET(select_fd, &setl); + timeout.tv_sec = 0; + timeout.tv_usec = 250*1000; + sel_ret = select(1+select_fd, &setl, NULL, NULL, &timeout); + } } else sel_ret = 1; - if (sel_ret < 0 && errno != EINTR) break; + if (sel_ret < 0 && errno != EINTR) + break; } if (sel_ret > 0) { if (!dev->handle) @@ -1617,6 +1648,11 @@ while (dev->handle) { } break; #endif /* HAVE_VDE_NETWORK */ +#ifdef HAVE_SLIRP_NETWORK + case ETH_API_NAT: + sim_slirp_dispatch ((SLIRP*)dev->handle); + break; +#endif /* HAVE_SLIRP_NETWORK */ case ETH_API_UDP: if (1) { struct pcap_pkthdr header; @@ -1769,7 +1805,7 @@ dev->throttle_mask = (1 << dev->throttle_burst) - 1; return SCPE_OK; } -static t_stat _eth_open_port(char *savname, int *eth_api, void **handle, SOCKET *fd_handle, char errbuf[PCAP_ERRBUF_SIZE], char *bpf_filter) +static t_stat _eth_open_port(char *savname, int *eth_api, void **handle, SOCKET *fd_handle, char errbuf[PCAP_ERRBUF_SIZE], char *bpf_filter, void *opaque) { int bufsz = (BUFSIZ < ETH_MAX_PACKET) ? ETH_MAX_PACKET : BUFSIZ; @@ -1789,7 +1825,7 @@ if (0 == strncmp("tap:", savname, 4)) { #if defined(HAVE_TAP_NETWORK) if (!strcmp(savname, "tap:tapN")) { sim_printf ("Eth: Must specify actual tap device name (i.e. tap:tap0)\r\n"); - return SCPE_OPENERR; + return SCPE_OPENERR | SCPE_NOMESSAGE; } #endif #if (defined(__linux) || defined(__linux__)) && defined(HAVE_TAP_NETWORK) @@ -1865,7 +1901,7 @@ if (0 == strncmp("tap:", savname, 4)) { *handle = (void *)1; /* Flag used to indicated open */ } } -else +else { /* !tap: */ if (0 == strncmp("vde:", savname, 4)) { #if defined(HAVE_VDE_NETWORK) struct vde_open_args voa; @@ -1873,7 +1909,7 @@ else memset(&voa, 0, sizeof(voa)); if (!strcmp(savname, "vde:vdedevice")) { sim_printf ("Eth: Must specify actual vde device name (i.e. vde:/tmp/switch)\r\n"); - return SCPE_OPENERR; + return SCPE_OPENERR | SCPE_NOMESSAGE; } if (!(*handle = (void*) vde_open(savname+4, "simh", &voa))) strncpy(errbuf, strerror(errno), PCAP_ERRBUF_SIZE-1); @@ -1885,76 +1921,91 @@ else strncpy(errbuf, "No support for vde: network devices", PCAP_ERRBUF_SIZE-1); #endif /* defined(HAVE_VDE_NETWORK) */ } - else { - if (0 == strncmp("udp:", savname, 4)) { - char localport[CBUFSIZE], host[CBUFSIZE], port[CBUFSIZE]; - char hostport[2*CBUFSIZE]; - - if (!strcmp(savname, "udp:sourceport:remotehost:remoteport")) { - sim_printf ("Eth: Must specify actual udp host and ports(i.e. udp:1224:somehost.com:2234)\r\n"); - return SCPE_OPENERR; + else { /* !vde: */ + if (0 == strncmp("nat:", savname, 4)) { +#if defined(HAVE_SLIRP_NETWORK) + if (!(*handle = (void*) sim_slirp_open(savname+4, opaque, &_slirp_callback))) + strncpy(errbuf, strerror(errno), PCAP_ERRBUF_SIZE-1); + else { + *eth_api = ETH_API_NAT; + *fd_handle = 0; } - - if (SCPE_OK != sim_parse_addr_ex (savname+4, host, sizeof(host), "localhost", port, sizeof(port), localport, sizeof(localport), NULL)) - return SCPE_OPENERR; - - if (localport[0] == '\0') - strcpy (localport, port); - sprintf (hostport, "%s:%s", host, port); - if ((SCPE_OK == sim_parse_addr (hostport, NULL, 0, NULL, NULL, 0, NULL, "localhost")) && - (0 == strcmp (localport, port))) { - sim_printf ("Eth: Must specify different udp localhost ports\r\n"); - return SCPE_OPENERR; - } - *fd_handle = sim_connect_sock_ex (localport, hostport, NULL, NULL, SIM_SOCK_OPT_DATAGRAM); - if (INVALID_SOCKET == *fd_handle) - return SCPE_OPENERR; - *eth_api = ETH_API_UDP; - *handle = (void *)1; /* Flag used to indicated open */ +#else + strncpy(errbuf, "No support for nat: network devices", PCAP_ERRBUF_SIZE-1); +#endif /* defined(HAVE_SLIRP_NETWORK) */ } - else { + else { /* not nat: */ + if (0 == strncmp("udp:", savname, 4)) { + char localport[CBUFSIZE], host[CBUFSIZE], port[CBUFSIZE]; + char hostport[2*CBUFSIZE]; + + if (!strcmp(savname, "udp:sourceport:remotehost:remoteport")) { + sim_printf ("Eth: Must specify actual udp host and ports(i.e. udp:1224:somehost.com:2234)\r\n"); + return SCPE_OPENERR | SCPE_NOMESSAGE; + } + + if (SCPE_OK != sim_parse_addr_ex (savname+4, host, sizeof(host), "localhost", port, sizeof(port), localport, sizeof(localport), NULL)) + return SCPE_OPENERR; + + if (localport[0] == '\0') + strcpy (localport, port); + sprintf (hostport, "%s:%s", host, port); + if ((SCPE_OK == sim_parse_addr (hostport, NULL, 0, NULL, NULL, 0, NULL, "localhost")) && + (0 == strcmp (localport, port))) { + sim_printf ("Eth: Must specify different udp localhost ports\r\n"); + return SCPE_OPENERR | SCPE_NOMESSAGE; + } + *fd_handle = sim_connect_sock_ex (localport, hostport, NULL, NULL, SIM_SOCK_OPT_DATAGRAM); + if (INVALID_SOCKET == *fd_handle) + return SCPE_OPENERR; + *eth_api = ETH_API_UDP; + *handle = (void *)1; /* Flag used to indicated open */ + } + else { /* not udp:, so attempt to open the parameter as if it were an explicit device name */ #if defined(HAVE_PCAP_NETWORK) - *handle = (void*) pcap_open_live(savname, bufsz, ETH_PROMISC, PCAP_READ_TIMEOUT, errbuf); - if (!*handle) { /* can't open device */ - sim_printf ("Eth: pcap_open_live error - %s\r\n", errbuf); - return SCPE_OPENERR; - } - *eth_api = ETH_API_PCAP; + *handle = (void*) pcap_open_live(savname, bufsz, ETH_PROMISC, PCAP_READ_TIMEOUT, errbuf); + if (!*handle) { /* can't open device */ + sim_printf ("Eth: pcap_open_live error - %s\r\n", errbuf); + return SCPE_OPENERR | SCPE_NOMESSAGE; + } + *eth_api = ETH_API_PCAP; #if !defined(HAS_PCAP_SENDPACKET) && defined (xBSD) && !defined (__APPLE__) - /* Tell the kernel that the header is fully-formed when it gets it. - This is required in order to fake the src address. */ - if (1) { - int one = 1; - ioctl(pcap_fileno(*handle), BIOCSHDRCMPLT, &one); - } + /* Tell the kernel that the header is fully-formed when it gets it. + This is required in order to fake the src address. */ + if (1) { + int one = 1; + ioctl(pcap_fileno(*handle), BIOCSHDRCMPLT, &one); + } #endif /* xBSD */ #if defined(_WIN32) - pcap_setmintocopy ((pcap_t*)(*handle), 0); + pcap_setmintocopy ((pcap_t*)(*handle), 0); #endif #if !defined (USE_READER_THREAD) #ifdef USE_SETNONBLOCK -/* set ethernet device non-blocking so pcap_dispatch() doesn't hang */ - if (pcap_setnonblock (*handle, 1, errbuf) == -1) { - sim_printf ("Eth: Failed to set non-blocking: %s\r\n", errbuf); - } + /* set ethernet device non-blocking so pcap_dispatch() doesn't hang */ + if (pcap_setnonblock (*handle, 1, errbuf) == -1) { + sim_printf ("Eth: Failed to set non-blocking: %s\r\n", errbuf); + } #endif #if defined (__APPLE__) - if (1) { - /* Deliver packets immediately, needed for OS X 10.6.2 and later - * (Snow-Leopard). - * See this thread on libpcap and Mac Os X 10.6 Snow Leopard on - * the tcpdump mailinglist: http://seclists.org/tcpdump/2010/q1/110 - */ - int v = 1; - ioctl(pcap_fileno(*handle), BIOCIMMEDIATE, &v); - } + if (1) { + /* Deliver packets immediately, needed for OS X 10.6.2 and later + * (Snow-Leopard). + * See this thread on libpcap and Mac Os X 10.6 Snow Leopard on + * the tcpdump mailinglist: http://seclists.org/tcpdump/2010/q1/110 + */ + int v = 1; + ioctl(pcap_fileno(*handle), BIOCIMMEDIATE, &v); + } #endif /* defined (__APPLE__) */ #endif /* !defined (USE_READER_THREAD) */ #else - strncpy (errbuf, "Unknown or unsupported network device", PCAP_ERRBUF_SIZE-1); + strncpy (errbuf, "Unknown or unsupported network device", PCAP_ERRBUF_SIZE-1); #endif /* defined(HAVE_PCAP_NETWORK) */ - } - } + } /* not udp:, so attempt to open the parameter as if it were an explicit device name */ + } /* !nat: */ + } /* !vde: */ + } /* !tap: */ if (errbuf[0]) return SCPE_OPENERR; @@ -2030,11 +2081,11 @@ else { } } -r = _eth_open_port(savname, &dev->eth_api, &dev->handle, &dev->fd_handle, errbuf, NULL); +r = _eth_open_port(savname, &dev->eth_api, &dev->handle, &dev->fd_handle, errbuf, NULL, (void *)dev); if (errbuf[0]) { sim_printf ("Eth: open error - %s\r\n", errbuf); - return SCPE_OPENERR; + return SCPE_OPENERR | SCPE_NOMESSAGE; } if (r != SCPE_OK) return r; @@ -2099,15 +2150,15 @@ switch (eth_api) { case ETH_API_VDE: vde_close((VDECONN*)pcap); break; +#endif +#ifdef HAVE_SLIRP_NETWORK + case ETH_API_NAT: + sim_slirp_close((SLIRP*)pcap); + break; #endif case ETH_API_UDP: sim_close_sock(pcap_fd); break; -#ifdef USE_SLIRP_NETWORK - case ETH_API_NAT: - vde_close((VDECONN*)pcap); - break; -#endif } return SCPE_OK; } @@ -2166,11 +2217,23 @@ fprintf (st, "%s attach help\n\n", dptr->name); fprintf (st, " sim> SHOW ETHERNET\n"); fprintf (st, " libpcap version 1.0.0\n"); fprintf (st, " ETH devices:\n"); -fprintf (st, " eth0 en0 (No description available)\n"); -fprintf (st, " eth1 tap:tapN (Integrated Tun/Tap support)\n"); +fprintf (st, " eth0 en0 (No description available)\n"); +#if defined(HAVE_TAP_NETWORK) +fprintf (st, " eth1 tap:tapN (Integrated Tun/Tap support)\n"); +#endif +#if defined(HAVE_SLIRP_NETWORK) +fprintf (st, " eth2 vde:device (Integrated VDE support)\n"); +#endif +#if defined(HAVE_SLIRP_NETWORK) +fprintf (st, " eth3 nat:{optional-nat-parameters} (Integrated NAT (SLiRP) support)\n"); +#endif +fprintf (st, " eth4 udp:sourceport:remotehost:remoteport (Integrated UDP bridge support)\n"); fprintf (st, " sim> ATTACH %s eth0\n\n", dptr->name); fprintf (st, "or equivalently:\n\n"); fprintf (st, " sim> ATTACH %s en0\n\n", dptr->name); +#if defined(HAVE_SLIRP_NETWORK) +sim_slirp_attach_help (st, dptr, uptr, flag, cptr); +#endif return SCPE_OK; } @@ -2399,7 +2462,7 @@ if (dev->error_needs_reset) { _eth_close_port(dev->eth_api, (pcap_t *)dev->handle, dev->fd_handle); sim_os_sleep (ETH_ERROR_REOPEN_PAUSE); - r = _eth_open_port(dev->name, &dev->eth_api, &dev->handle, &dev->fd_handle, errbuf, dev->bpf_filter); + r = _eth_open_port(dev->name, &dev->eth_api, &dev->handle, &dev->fd_handle, errbuf, dev->bpf_filter, (void *)dev); dev->error_needs_reset = FALSE; if (r == SCPE_OK) sim_printf ("%s ReOpened: %s \n", msg, dev->name); @@ -2473,6 +2536,15 @@ if ((packet->len >= ETH_MIN_PACKET) && (packet->len <= ETH_MAX_PACKET)) { else status = 1; break; +#endif +#ifdef HAVE_SLIRP_NETWORK + case ETH_API_NAT: + status = sim_slirp_send((SLIRP*)dev->handle, (char *)packet->msg, (size_t)packet->len, 0); + if ((status == (int)packet->len) || (status == 0)) + status = 0; + else + status = 1; + break; #endif case ETH_API_UDP: status = (((int32)packet->len == sim_write_sock (dev->fd_handle, (char *)packet->msg, (int32)packet->len)) ? 0 : -1); @@ -3086,6 +3158,7 @@ switch (dev->eth_api) { case ETH_API_TAP: case ETH_API_VDE: case ETH_API_UDP: + case ETH_API_NAT: bpf_used = 0; to_me = 0; eth_packet_trace (dev, data, header->len, "received"); @@ -3130,7 +3203,7 @@ if ((LOOPBACK_SELF_FRAME(dev->physical_addr, data)) || #ifdef USE_READER_THREAD pthread_mutex_unlock (&dev->self_lock); #endif -} + } if (bpf_used ? to_me : (to_me && !from_me)) { if (header->len > ETH_MIN_JUMBO_FRAME) { @@ -3623,6 +3696,13 @@ if (used < max) { ++used; } #endif +#ifdef HAVE_SLIRP_NETWORK +if (used < max) { + sprintf(list[used].name, "%s", "nat:{optional-nat-parameters}"); + sprintf(list[used].desc, "%s", "Integrated NAT (SLiRP) support"); + ++used; + } +#endif if (used < max) { sprintf(list[used].name, "%s", "udp:sourceport:remotehost:remoteport"); @@ -3630,14 +3710,6 @@ if (used < max) { ++used; } -#ifdef USE_SLIRP_NETWORK -if (used < max) { - sprintf(list[used].name, "%s", "nat:device"); - sprintf(list[used].desc, "%s", "Integrated User Mode NAT support"); - ++used; - } -#endif - return used; } @@ -3731,5 +3803,9 @@ fprintf(st, " Peak Write Queue Size: %d\n", dev->write_queue_peak); #endif if (dev->bpf_filter) fprintf(st, " BPF Filter: %s\n", dev->bpf_filter); +#if defined(HAVE_SLIRP_NETWORK) +if (dev->eth_api == ETH_API_NAT) + sim_slirp_show ((SLIRP *)dev->handle, st); +#endif } #endif /* USE_NETWORK */ diff --git a/slirp/simh/config-host.h b/slirp/simh/config-host.h new file mode 100644 index 00000000..00afd8bb --- /dev/null +++ b/slirp/simh/config-host.h @@ -0,0 +1,42 @@ +#ifndef CONFIG_HOST_H +#define CONFIG_HOST_H + +#include +#ifdef _MSC_VER +#include +#else +typedef int SOCKET; +#endif + +typedef int bool; +#include +#include +#define qemu_add_child_watch(pid) +int qemu_setsockopt (int s, int level, int optname, void *optval, int optlen); +int qemu_recv (int s, void *buf, size_t len, int flags); +#ifdef _MSC_VER +#define snprintf _snprintf +#define strcasecmp stricmp +#else +#define CONFIG_IOVEC 1 +#endif +#define register_savevm(p1, p2, p3, p4, p5, p6, p7) +#define unregister_savevm(p1, p2, p3) +#define qemu_put_be16(p1, p2) +#define qemu_put_sbe16(p1, p2) +#define qemu_put_be32(p1, p2) +#define qemu_put_sbe32(p1, p2) +#define qemu_put_byte(p1, p2) +#define qemu_put_sbyte(p1, p2) +#define qemu_put_buffer(p1, p2, p3) + +#define qemu_get_be16(p1) 0 +#define qemu_get_sbe16(p1) 0 +#define qemu_get_be32(p1) 0 +#define qemu_get_sbe32(p1) 0 +#define qemu_get_byte(p1) 0 +#define qemu_get_sbyte(p1) 0 +#define qemu_get_buffer(p1, p2, p3) +#define error_report(...) fprintf(stderr, __VA_ARGS__) + +#endif diff --git a/slirp/simh/glib.h b/slirp/simh/glib.h new file mode 100644 index 00000000..00559272 --- /dev/null +++ b/slirp/simh/glib.h @@ -0,0 +1,101 @@ +#ifndef GLIB_H +#define GLIB_H + +#include +#if defined(_WIN32) +#include +#endif + +typedef char gchar; +typedef unsigned int guint; +typedef unsigned short gushort; +typedef void* gpointer; +typedef unsigned long gsize; +typedef const void *gconstpointer; +typedef int gint; +typedef gint gboolean; +typedef struct _GSource {int dummy;} GSource; +typedef struct GPollFD { +#if defined(_WIN32) + SOCKET fd; +#else + gint fd; +#endif + gushort events; + gushort revents; +} GPollFD; +typedef struct _GArray { + gchar *data; + guint len; +} GArray; + +gpointer g_malloc (gsize n_bytes); +gpointer g_malloc0 (gsize n_bytes); +gpointer g_realloc (gpointer mem, gsize n_bytes); +void g_free (gpointer mem); +gchar *g_strdup (const gchar *str); + +typedef enum { + /* Flags */ + G_LOG_FLAG_RECURSION = 1 << 0, + G_LOG_FLAG_FATAL = 1 << 1, + /* Levels */ + G_LOG_LEVEL_ERROR = 1 << 2, + G_LOG_LEVEL_CRITICAL = 1 << 3, + G_LOG_LEVEL_WARNING = 1 << 4, + G_LOG_LEVEL_MESSAGE = 1 << 5, + G_LOG_LEVEL_INFO = 1 << 6, + G_LOG_LEVEL_DEBUG = 1 << 7, + G_LOG_LEVEL_MASK = ~(G_LOG_FLAG_RECURSION | G_LOG_FLAG_FATAL) + } GLogLevelFlags; + +#define GLIB_SYSDEF_POLLIN =1 +#define GLIB_SYSDEF_POLLOUT =4 +#define GLIB_SYSDEF_POLLPRI =2 +#define GLIB_SYSDEF_POLLHUP =16 +#define GLIB_SYSDEF_POLLERR =8 +#define GLIB_SYSDEF_POLLNVAL =32 + +typedef enum { + G_IO_IN GLIB_SYSDEF_POLLIN, // There is data to read. + G_IO_OUT GLIB_SYSDEF_POLLOUT, // Data can be written (without blocking). + G_IO_PRI GLIB_SYSDEF_POLLPRI, // There is urgent data to read. + G_IO_ERR GLIB_SYSDEF_POLLERR, // Error condition. + G_IO_HUP GLIB_SYSDEF_POLLHUP, // Hung up (the connection has been broken, usually for pipes and sockets). + G_IO_NVAL GLIB_SYSDEF_POLLNVAL // Invalid request. The file descriptor is not open. + } GIOCondition; +void g_log (const gchar *log_domain, GLogLevelFlags log_level, const gchar *format, ...); +#if !defined(G_LOG_DOMAIN) +#define G_LOG_DOMAIN ((gchar *)NULL) +#endif +#define g_warning(...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_WARNING, __VA_ARGS__) + +#define g_new(struct_type, n_structs) g_malloc (sizeof(struct_type) * n_structs) + + +#define g_array_append_val(array, data) g_array_append_vals (array, &data, 1) +#define g_array_new(zero_terminated, clear, element_size) g_array_sized_new(zero_terminated, clear, element_size, 0) + +GArray * +g_array_sized_new (gboolean zero_terminated, + gboolean clear, + guint element_size, + guint reserved_size); +gchar * +g_array_free (GArray *array, + gboolean free_segment); + +#define g_array_index(array, type, index) (((type *)(void *)((array)->data)))[index] + +GArray * +g_array_set_size (GArray *array, + guint length); +GArray * +g_array_append_vals (GArray *array, + gconstpointer data, + guint len); +guint +g_array_get_element_size (GArray *array); + + +#endif diff --git a/slirp/simh/glib_qemu_stubs.c b/slirp/simh/glib_qemu_stubs.c new file mode 100644 index 00000000..e3b5243e --- /dev/null +++ b/slirp/simh/glib_qemu_stubs.c @@ -0,0 +1,354 @@ +/* glib_qemu_stubs.c: + ------------------------------------------------------------------------------ + Copyright (c) 2015, Mark Pizzolato + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + Except as contained in this notice, the name of the author shall not be + used in advertising or otherwise to promote the sale, use or other dealings + in this Software without prior written authorization from the author. + + ------------------------------------------------------------------------------ + + This module provides the minimal aspects of glib and qemu which are referenced + by the current qemu SLiRP code and are needed to get SLiRP functionality for + the simh network code. + +*/ + +#include +#include +#include +#include +#ifdef _WIN32 +#include +#include +#endif +#include +#include +#include "glib.h" + +gpointer +g_malloc (gsize n_bytes) +{ +gpointer ret = malloc (n_bytes); + +if (!ret) + exit (errno); +return ret; +} + +gpointer +g_malloc0 (gsize n_bytes) +{ +gpointer ret = calloc (1, n_bytes); + +if (!ret) + exit (errno); +return ret; +} + +gpointer +g_realloc (gpointer mem, gsize n_bytes) +{ +gpointer ret = realloc (mem, n_bytes); + +if (!ret) + exit (errno); +return ret; +} + +void +g_free (gpointer mem) +{ +free (mem); +} + +gchar * +g_strdup (const gchar *str) +{ +gchar *nstr = NULL; + +if (str) { + nstr = (gchar *)malloc (strlen(str)+1); + if (!nstr) + exit (errno); + strcpy (nstr, str); + } +return nstr; +} + +void pstrcpy(char *buf, int buf_size, const char *str) +{ + int c; + char *q = buf; + + if (buf_size <= 0) + return; + + for(;;) { + c = *str++; + if (c == 0 || q >= buf + buf_size - 1) + break; + *q++ = c; + } + *q = '\0'; +} + +int qemu_socket(int domain, int type, int protocol) +{ +fprintf (stderr, "qemu_socket()\r\n"); +return socket (domain, type, protocol); +} + +int qemu_accept(int s, struct sockaddr *addr, socklen_t *addrlen) +{ +fprintf (stderr, "qemu_accept()\r\n"); +return accept (s, addr, addrlen); +} + +int qemu_setsockopt (int s, int level, int optname, void *optval, int optlen) +{ +fprintf (stderr, "qemu_setsockopt()\r\n"); +return setsockopt ((SOCKET)s, level, optname, (char *)optval, optlen); +} + +int qemu_recv (int s, void *buf, size_t len, int flags) +{ +fprintf (stderr, "qemu_recv()\r\n"); +return recv ((SOCKET)s, buf, len, flags); +} + +int socket_set_nodelay(int fd) +{ + int v = 1; +fprintf (stderr, "socket_set_nodelay()\r\n"); + return setsockopt((SOCKET)fd, IPPROTO_TCP, TCP_NODELAY, (char *)&v, sizeof(v)); +} + +#ifdef _WIN32 + +void qemu_set_nonblock(int fd) +{ +unsigned long non_block = 1; + + ioctlsocket ((SOCKET)fd, FIONBIO, &non_block); /* set nonblocking */ +} +#else +#include +void qemu_set_nonblock(int fd) +{ + int f; +fprintf (stderr, "qemu_set_nonblock()\r\n"); + f = fcntl(fd, F_GETFL); + fcntl(fd, F_SETFL, f | O_NONBLOCK); +} +#endif + +int socket_set_fast_reuse(int fd) +{ + int val = 1, ret; + + ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, + (const char *)&val, sizeof(val)); + + assert(ret == 0); + + return ret; +} + +#include +#ifdef _WIN32 +int64_t qemu_clock_get_ns(int type) +{ +uint64_t now, unixbase; + +unixbase = 116444736; +unixbase *= 1000000000; +GetSystemTimeAsFileTime((FILETIME*)&now); +now -= unixbase; +return now*100; +} + +#else + +int64_t qemu_clock_get_ns(int type) +{ + struct timespec tv; + + clock_gettime(CLOCK_REALTIME, &tv); + return tv.tv_sec * 1000000000LL + tv.tv_nsec; +} +#endif + +void monitor_printf(Monitor *mon, const char *fmt, ...) +{ +va_list arglist; + + va_start (arglist, fmt); + vfprintf ((FILE *)mon, fmt, arglist); + va_end (arglist); +} + +void g_log (const gchar *log_domain, + GLogLevelFlags log_level, + const gchar *format, + ...) +{ +va_list arglist; + + fprintf (stderr, "%s(%X): ", log_domain ? log_domain : "", log_level); + va_start (arglist, format); + vfprintf (stderr, format, arglist); + va_end (arglist); +} + +int qemu_chr_fe_write(CharDriverState *s, const uint8_t *buf, int len) +{ +fprintf (stderr, "qemu_chr_fe_write() called\n"); +return 0; +} + +void qemu_notify_event(void) +{ +fprintf (stderr, "qemu_notify_event() called\n"); +} + +#if defined(_MSC_VER) + +struct quehead { + struct quehead *qh_link; + struct quehead *qh_rlink; +}; + +void +slirp_insque(void *a, void *b) +{ + register struct quehead *element = (struct quehead *) a; + register struct quehead *head = (struct quehead *) b; + element->qh_link = head->qh_link; + head->qh_link = (struct quehead *)element; + element->qh_rlink = (struct quehead *)head; + ((struct quehead *)(element->qh_link))->qh_rlink + = (struct quehead *)element; +} + +void +slirp_remque(void *a) +{ + register struct quehead *element = (struct quehead *) a; + ((struct quehead *)(element->qh_link))->qh_rlink = element->qh_rlink; + ((struct quehead *)(element->qh_rlink))->qh_link = element->qh_link; + element->qh_rlink = NULL; +} + +int +inet_aton(const char *arg, struct in_addr *addr) +{ +(*addr).s_addr = inet_addr (arg); +return (*addr).s_addr != INADDR_BROADCAST; +} + +#endif + +/* glib GArray functionality is needed */ + +typedef struct { + gchar *data; + guint len; + guint _element_size; /* element size */ + guint _size; /* allocated element count size */ + gboolean _zero_terminated; + gboolean _clear; +} GArrayInternal; + +GArray * +g_array_sized_new (gboolean zero_terminated, + gboolean clear, + guint element_size, + guint reserved_size) +{ +GArrayInternal *ar = g_malloc (sizeof (*ar)); + +ar->_zero_terminated = zero_terminated ? 1 : 0; +ar->_clear = clear ? 1 : 0; +ar->_element_size = element_size; +ar->_size = reserved_size; +ar->len = 0; +ar->data = clear ? g_malloc0 (element_size*(reserved_size + zero_terminated)) : + g_malloc (element_size*(reserved_size + zero_terminated)); +if (ar->_zero_terminated && !ar->_clear) + memset (ar->data + (ar->len * ar->_element_size), 0, ar->_element_size); +return (GArray *)ar; +} + +gchar * +g_array_free (GArray *array, + gboolean free_segment) +{ +gchar *result = free_segment ? NULL : array->data; + +if (free_segment) + free (array->data); +free (array); +return result; +} + +GArray * +g_array_set_size (GArray *array, + guint length) +{ +GArrayInternal *ar = (GArrayInternal *)array; + +if (length > ar->_size) { + ar->data = g_realloc (ar->data, (length + ar->_zero_terminated) * ar->_element_size); + if (ar->_clear) + memset (ar->data + (ar->len * ar->_element_size), 0, (length + ar->_zero_terminated - ar->len) * ar->_element_size); + ar->_size = length; + } +ar->len = length; +if (ar->_zero_terminated) + memset (ar->data + (ar->len * ar->_element_size), 0, ar->_element_size); +return array; +} + +GArray * +g_array_append_vals (GArray *array, + gconstpointer data, + guint len) +{ +GArrayInternal *ar = (GArrayInternal *)array; + +if ((ar->len + len) > ar->_size) { + ar->data = g_realloc (ar->data, (ar->len + len + ar->_zero_terminated) * ar->_element_size); + ar->_size = ar->len + len; + } +memcpy (ar->data + (ar->len * ar->_element_size), data, len * ar->_element_size); +ar->len += len; +if (ar->_zero_terminated) + memset (ar->data + (ar->len * ar->_element_size), 0, ar->_element_size); +return array; +} + +guint +g_array_get_element_size (GArray *array) +{ +GArrayInternal *ar = (GArrayInternal *)array; + +return ar->_element_size; +} diff --git a/slirp/simh/qemu/atomic.h b/slirp/simh/qemu/atomic.h new file mode 100644 index 00000000..bd2c0753 --- /dev/null +++ b/slirp/simh/qemu/atomic.h @@ -0,0 +1,269 @@ +/* + * Simple interface for atomic operations. + * + * Copyright (C) 2013 Red Hat, Inc. + * + * Author: Paolo Bonzini + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef __QEMU_ATOMIC_H +#define __QEMU_ATOMIC_H 1 + +#include "qemu/compiler.h" + +/* For C11 atomic ops */ + +/* Compiler barrier */ +#define barrier() ({ asm volatile("" ::: "memory"); (void)0; }) + +#ifndef __ATOMIC_RELAXED + +/* + * We use GCC builtin if it's available, as that can use mfence on + * 32-bit as well, e.g. if built with -march=pentium-m. However, on + * i386 the spec is buggy, and the implementation followed it until + * 4.3 (http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36793). + */ +#if defined(__i386__) || defined(__x86_64__) +#if !QEMU_GNUC_PREREQ(4, 4) +#if defined __x86_64__ +#define smp_mb() ({ asm volatile("mfence" ::: "memory"); (void)0; }) +#else +#define smp_mb() ({ asm volatile("lock; addl $0,0(%%esp) " ::: "memory"); (void)0; }) +#endif +#endif +#endif + + +#ifdef __alpha__ +#define smp_read_barrier_depends() asm volatile("mb":::"memory") +#endif + +#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__) + +/* + * Because of the strongly ordered storage model, wmb() and rmb() are nops + * here (a compiler barrier only). QEMU doesn't do accesses to write-combining + * qemu memory or non-temporal load/stores from C code. + */ +#define smp_wmb() barrier() +#define smp_rmb() barrier() + +/* + * __sync_lock_test_and_set() is documented to be an acquire barrier only, + * but it is a full barrier at the hardware level. Add a compiler barrier + * to make it a full barrier also at the compiler level. + */ +#define atomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i)) + +/* + * Load/store with Java volatile semantics. + */ +#define atomic_mb_set(ptr, i) ((void)atomic_xchg(ptr, i)) + +#elif defined(_ARCH_PPC) + +/* + * We use an eieio() for wmb() on powerpc. This assumes we don't + * need to order cacheable and non-cacheable stores with respect to + * each other. + * + * smp_mb has the same problem as on x86 for not-very-new GCC + * (http://patchwork.ozlabs.org/patch/126184/, Nov 2011). + */ +#define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; }) +#if defined(__powerpc64__) +#define smp_rmb() ({ asm volatile("lwsync" ::: "memory"); (void)0; }) +#else +#define smp_rmb() ({ asm volatile("sync" ::: "memory"); (void)0; }) +#endif +#define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; }) + +#endif /* _ARCH_PPC */ + +#endif /* C11 atomics */ + +/* + * For (host) platforms we don't have explicit barrier definitions + * for, we use the gcc __sync_synchronize() primitive to generate a + * full barrier. This should be safe on all platforms, though it may + * be overkill for smp_wmb() and smp_rmb(). + */ +#ifndef smp_mb +#define smp_mb() __sync_synchronize() +#endif + +#ifndef smp_wmb +#ifdef __ATOMIC_RELEASE +/* __atomic_thread_fence does not include a compiler barrier; instead, + * the barrier is part of __atomic_load/__atomic_store's "volatile-like" + * semantics. If smp_wmb() is a no-op, absence of the barrier means that + * the compiler is free to reorder stores on each side of the barrier. + * Add one here, and similarly in smp_rmb() and smp_read_barrier_depends(). + */ +#define smp_wmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); barrier(); }) +#else +#define smp_wmb() __sync_synchronize() +#endif +#endif + +#ifndef smp_rmb +#ifdef __ATOMIC_ACQUIRE +#define smp_rmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); barrier(); }) +#else +#define smp_rmb() __sync_synchronize() +#endif +#endif + +#ifndef smp_read_barrier_depends +#ifdef __ATOMIC_CONSUME +#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); barrier(); }) +#else +#define smp_read_barrier_depends() barrier() +#endif +#endif + +#ifndef atomic_read +#define atomic_read(ptr) (*(__typeof__(*ptr) volatile*) (ptr)) +#endif + +#ifndef atomic_set +#define atomic_set(ptr, i) ((*(__typeof__(*ptr) volatile*) (ptr)) = (i)) +#endif + +/** + * atomic_rcu_read - reads a RCU-protected pointer to a local variable + * into a RCU read-side critical section. The pointer can later be safely + * dereferenced within the critical section. + * + * This ensures that the pointer copy is invariant thorough the whole critical + * section. + * + * Inserts memory barriers on architectures that require them (currently only + * Alpha) and documents which pointers are protected by RCU. + * + * Unless the __ATOMIC_CONSUME memory order is available, atomic_rcu_read also + * includes a compiler barrier to ensure that value-speculative optimizations + * (e.g. VSS: Value Speculation Scheduling) does not perform the data read + * before the pointer read by speculating the value of the pointer. On new + * enough compilers, atomic_load takes care of such concern about + * dependency-breaking optimizations. + * + * Should match atomic_rcu_set(), atomic_xchg(), atomic_cmpxchg(). + */ +#ifndef atomic_rcu_read +#ifdef __ATOMIC_CONSUME +#define atomic_rcu_read(ptr) ({ \ + typeof(*ptr) _val; \ + __atomic_load(ptr, &_val, __ATOMIC_CONSUME); \ + _val; \ +}) +#else +#define atomic_rcu_read(ptr) ({ \ + typeof(*ptr) _val = atomic_read(ptr); \ + smp_read_barrier_depends(); \ + _val; \ +}) +#endif +#endif + +/** + * atomic_rcu_set - assigns (publicizes) a pointer to a new data structure + * meant to be read by RCU read-side critical sections. + * + * Documents which pointers will be dereferenced by RCU read-side critical + * sections and adds the required memory barriers on architectures requiring + * them. It also makes sure the compiler does not reorder code initializing the + * data structure before its publication. + * + * Should match atomic_rcu_read(). + */ +#ifndef atomic_rcu_set +#ifdef __ATOMIC_RELEASE +#define atomic_rcu_set(ptr, i) do { \ + typeof(*ptr) _val = (i); \ + __atomic_store(ptr, &_val, __ATOMIC_RELEASE); \ +} while(0) +#else +#define atomic_rcu_set(ptr, i) do { \ + smp_wmb(); \ + atomic_set(ptr, i); \ +} while (0) +#endif +#endif + +/* These have the same semantics as Java volatile variables. + * See http://gee.cs.oswego.edu/dl/jmm/cookbook.html: + * "1. Issue a StoreStore barrier (wmb) before each volatile store." + * 2. Issue a StoreLoad barrier after each volatile store. + * Note that you could instead issue one before each volatile load, but + * this would be slower for typical programs using volatiles in which + * reads greatly outnumber writes. Alternatively, if available, you + * can implement volatile store as an atomic instruction (for example + * XCHG on x86) and omit the barrier. This may be more efficient if + * atomic instructions are cheaper than StoreLoad barriers. + * 3. Issue LoadLoad and LoadStore barriers after each volatile load." + * + * If you prefer to think in terms of "pairing" of memory barriers, + * an atomic_mb_read pairs with an atomic_mb_set. + * + * And for the few ia64 lovers that exist, an atomic_mb_read is a ld.acq, + * while an atomic_mb_set is a st.rel followed by a memory barrier. + * + * These are a bit weaker than __atomic_load/store with __ATOMIC_SEQ_CST + * (see docs/atomics.txt), and I'm not sure that __ATOMIC_ACQ_REL is enough. + * Just always use the barriers manually by the rules above. + */ +#ifndef atomic_mb_read +#define atomic_mb_read(ptr) ({ \ + typeof(*ptr) _val = atomic_read(ptr); \ + smp_rmb(); \ + _val; \ +}) +#endif + +#ifndef atomic_mb_set +#define atomic_mb_set(ptr, i) do { \ + smp_wmb(); \ + atomic_set(ptr, i); \ + smp_mb(); \ +} while (0) +#endif + +#ifndef atomic_xchg +#if defined(__clang__) +#define atomic_xchg(ptr, i) __sync_swap(ptr, i) +#elif defined(__ATOMIC_SEQ_CST) +#define atomic_xchg(ptr, i) ({ \ + typeof(*ptr) _new = (i), _old; \ + __atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \ + _old; \ +}) +#else +/* __sync_lock_test_and_set() is documented to be an acquire barrier only. */ +#define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i)) +#endif +#endif + +/* Provide shorter names for GCC atomic builtins. */ +#define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1) +#define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1) +#define atomic_fetch_add __sync_fetch_and_add +#define atomic_fetch_sub __sync_fetch_and_sub +#define atomic_fetch_and __sync_fetch_and_and +#define atomic_fetch_or __sync_fetch_and_or +#define atomic_cmpxchg __sync_val_compare_and_swap + +/* And even shorter names that return void. */ +#define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1)) +#define atomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1)) +#define atomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n)) +#define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n)) +#define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n)) +#define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n)) + +#endif diff --git a/slirp/simh/qemu/block/accounting.h b/slirp/simh/qemu/block/accounting.h new file mode 100644 index 00000000..4c406cff --- /dev/null +++ b/slirp/simh/qemu/block/accounting.h @@ -0,0 +1,60 @@ +/* + * QEMU System Emulator block accounting + * + * Copyright (c) 2011 Christoph Hellwig + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef BLOCK_ACCOUNTING_H +#define BLOCK_ACCOUNTING_H + +#include + +#include "qemu/typedefs.h" + +enum BlockAcctType { + BLOCK_ACCT_READ, + BLOCK_ACCT_WRITE, + BLOCK_ACCT_FLUSH, + BLOCK_MAX_IOTYPE, +}; + +typedef struct BlockAcctStats { + uint64_t nr_bytes[BLOCK_MAX_IOTYPE]; + uint64_t nr_ops[BLOCK_MAX_IOTYPE]; + uint64_t total_time_ns[BLOCK_MAX_IOTYPE]; + uint64_t merged[BLOCK_MAX_IOTYPE]; + uint64_t wr_highest_sector; +} BlockAcctStats; + +typedef struct BlockAcctCookie { + int64_t bytes; + int64_t start_time_ns; + enum BlockAcctType type; +} BlockAcctCookie; + +void block_acct_start(BlockAcctStats *stats, BlockAcctCookie *cookie, + int64_t bytes, enum BlockAcctType type); +void block_acct_done(BlockAcctStats *stats, BlockAcctCookie *cookie); +void block_acct_highest_sector(BlockAcctStats *stats, int64_t sector_num, + unsigned int nb_sectors); +void block_acct_merge_done(BlockAcctStats *stats, enum BlockAcctType type, + int num_requests); + +#endif diff --git a/slirp/simh/qemu/block/aio.h b/slirp/simh/qemu/block/aio.h new file mode 100644 index 00000000..400b1b00 --- /dev/null +++ b/slirp/simh/qemu/block/aio.h @@ -0,0 +1,376 @@ +/* + * QEMU aio implementation + * + * Copyright IBM, Corp. 2008 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_AIO_H +#define QEMU_AIO_H + +#include "qemu/typedefs.h" +#include "qemu-common.h" +#include "qemu/queue.h" +#include "qemu/event_notifier.h" +#include "qemu/thread.h" +#include "qemu/rfifolock.h" +#include "qemu/timer.h" + +typedef struct BlockAIOCB BlockAIOCB; +typedef void BlockCompletionFunc(void *opaque, int ret); + +typedef struct AIOCBInfo { + void (*cancel_async)(BlockAIOCB *acb); + AioContext *(*get_aio_context)(BlockAIOCB *acb); + size_t aiocb_size; +} AIOCBInfo; + +struct BlockAIOCB { + const AIOCBInfo *aiocb_info; + BlockDriverState *bs; + BlockCompletionFunc *cb; + void *opaque; + int refcnt; +}; + +void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, + BlockCompletionFunc *cb, void *opaque); +void qemu_aio_unref(void *p); +void qemu_aio_ref(void *p); + +typedef struct AioHandler AioHandler; +typedef void QEMUBHFunc(void *opaque); +typedef void IOHandler(void *opaque); + +struct AioContext { + GSource source; + + /* Protects all fields from multi-threaded access */ + RFifoLock lock; + + /* The list of registered AIO handlers */ + QLIST_HEAD(, AioHandler) aio_handlers; + + /* This is a simple lock used to protect the aio_handlers list. + * Specifically, it's used to ensure that no callbacks are removed while + * we're walking and dispatching callbacks. + */ + int walking_handlers; + + /* Used to avoid unnecessary event_notifier_set calls in aio_notify; + * accessed with atomic primitives. If this field is 0, everything + * (file descriptors, bottom halves, timers) will be re-evaluated + * before the next blocking poll(), thus the event_notifier_set call + * can be skipped. If it is non-zero, you may need to wake up a + * concurrent aio_poll or the glib main event loop, making + * event_notifier_set necessary. + * + * Bit 0 is reserved for GSource usage of the AioContext, and is 1 + * between a call to aio_ctx_check and the next call to aio_ctx_dispatch. + * Bits 1-31 simply count the number of active calls to aio_poll + * that are in the prepare or poll phase. + * + * The GSource and aio_poll must use a different mechanism because + * there is no certainty that a call to GSource's prepare callback + * (via g_main_context_prepare) is indeed followed by check and + * dispatch. It's not clear whether this would be a bug, but let's + * play safe and allow it---it will just cause extra calls to + * event_notifier_set until the next call to dispatch. + * + * Instead, the aio_poll calls include both the prepare and the + * dispatch phase, hence a simple counter is enough for them. + */ + uint32_t notify_me; + + /* lock to protect between bh's adders and deleter */ + QemuMutex bh_lock; + + /* Anchor of the list of Bottom Halves belonging to the context */ + struct QEMUBH *first_bh; + + /* A simple lock used to protect the first_bh list, and ensure that + * no callbacks are removed while we're walking and dispatching callbacks. + */ + int walking_bh; + + /* Used by aio_notify. + * + * "notified" is used to avoid expensive event_notifier_test_and_clear + * calls. When it is clear, the EventNotifier is clear, or one thread + * is going to clear "notified" before processing more events. False + * positives are possible, i.e. "notified" could be set even though the + * EventNotifier is clear. + * + * Note that event_notifier_set *cannot* be optimized the same way. For + * more information on the problem that would result, see "#ifdef BUG2" + * in the docs/aio_notify_accept.promela formal model. + */ + bool notified; + EventNotifier notifier; + + /* Scheduling this BH forces the event loop it iterate */ + QEMUBH *notify_dummy_bh; + + /* Thread pool for performing work and receiving completion callbacks */ + struct ThreadPool *thread_pool; + + /* TimerLists for calling timers - one per clock type */ + QEMUTimerListGroup tlg; +}; + +/** + * aio_context_new: Allocate a new AioContext. + * + * AioContext provide a mini event-loop that can be waited on synchronously. + * They also provide bottom halves, a service to execute a piece of code + * as soon as possible. + */ +AioContext *aio_context_new(Error **errp); + +/** + * aio_context_ref: + * @ctx: The AioContext to operate on. + * + * Add a reference to an AioContext. + */ +void aio_context_ref(AioContext *ctx); + +/** + * aio_context_unref: + * @ctx: The AioContext to operate on. + * + * Drop a reference to an AioContext. + */ +void aio_context_unref(AioContext *ctx); + +/* Take ownership of the AioContext. If the AioContext will be shared between + * threads, and a thread does not want to be interrupted, it will have to + * take ownership around calls to aio_poll(). Otherwise, aio_poll() + * automatically takes care of calling aio_context_acquire and + * aio_context_release. + * + * Access to timers and BHs from a thread that has not acquired AioContext + * is possible. Access to callbacks for now must be done while the AioContext + * is owned by the thread (FIXME). + */ +void aio_context_acquire(AioContext *ctx); + +/* Relinquish ownership of the AioContext. */ +void aio_context_release(AioContext *ctx); + +/** + * aio_bh_new: Allocate a new bottom half structure. + * + * Bottom halves are lightweight callbacks whose invocation is guaranteed + * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure + * is opaque and must be allocated prior to its use. + */ +QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque); + +/** + * aio_notify: Force processing of pending events. + * + * Similar to signaling a condition variable, aio_notify forces + * aio_wait to exit, so that the next call will re-examine pending events. + * The caller of aio_notify will usually call aio_wait again very soon, + * or go through another iteration of the GLib main loop. Hence, aio_notify + * also has the side effect of recalculating the sets of file descriptors + * that the main loop waits for. + * + * Calling aio_notify is rarely necessary, because for example scheduling + * a bottom half calls it already. + */ +void aio_notify(AioContext *ctx); + +/** + * aio_notify_accept: Acknowledge receiving an aio_notify. + * + * aio_notify() uses an EventNotifier in order to wake up a sleeping + * aio_poll() or g_main_context_iteration(). Calls to aio_notify() are + * usually rare, but the AioContext has to clear the EventNotifier on + * every aio_poll() or g_main_context_iteration() in order to avoid + * busy waiting. This event_notifier_test_and_clear() cannot be done + * using the usual aio_context_set_event_notifier(), because it must + * be done before processing all events (file descriptors, bottom halves, + * timers). + * + * aio_notify_accept() is an optimized event_notifier_test_and_clear() + * that is specific to an AioContext's notifier; it is used internally + * to clear the EventNotifier only if aio_notify() had been called. + */ +void aio_notify_accept(AioContext *ctx); + +/** + * aio_bh_poll: Poll bottom halves for an AioContext. + * + * These are internal functions used by the QEMU main loop. + * And notice that multiple occurrences of aio_bh_poll cannot + * be called concurrently + */ +int aio_bh_poll(AioContext *ctx); + +/** + * qemu_bh_schedule: Schedule a bottom half. + * + * Scheduling a bottom half interrupts the main loop and causes the + * execution of the callback that was passed to qemu_bh_new. + * + * Bottom halves that are scheduled from a bottom half handler are instantly + * invoked. This can create an infinite loop if a bottom half handler + * schedules itself. + * + * @bh: The bottom half to be scheduled. + */ +void qemu_bh_schedule(QEMUBH *bh); + +/** + * qemu_bh_cancel: Cancel execution of a bottom half. + * + * Canceling execution of a bottom half undoes the effect of calls to + * qemu_bh_schedule without freeing its resources yet. While cancellation + * itself is also wait-free and thread-safe, it can of course race with the + * loop that executes bottom halves unless you are holding the iothread + * mutex. This makes it mostly useless if you are not holding the mutex. + * + * @bh: The bottom half to be canceled. + */ +void qemu_bh_cancel(QEMUBH *bh); + +/** + *qemu_bh_delete: Cancel execution of a bottom half and free its resources. + * + * Deleting a bottom half frees the memory that was allocated for it by + * qemu_bh_new. It also implies canceling the bottom half if it was + * scheduled. + * This func is async. The bottom half will do the delete action at the finial + * end. + * + * @bh: The bottom half to be deleted. + */ +void qemu_bh_delete(QEMUBH *bh); + +/* Return whether there are any pending callbacks from the GSource + * attached to the AioContext, before g_poll is invoked. + * + * This is used internally in the implementation of the GSource. + */ +bool aio_prepare(AioContext *ctx); + +/* Return whether there are any pending callbacks from the GSource + * attached to the AioContext, after g_poll is invoked. + * + * This is used internally in the implementation of the GSource. + */ +bool aio_pending(AioContext *ctx); + +/* Dispatch any pending callbacks from the GSource attached to the AioContext. + * + * This is used internally in the implementation of the GSource. + */ +bool aio_dispatch(AioContext *ctx); + +/* Progress in completing AIO work to occur. This can issue new pending + * aio as a result of executing I/O completion or bh callbacks. + * + * Return whether any progress was made by executing AIO or bottom half + * handlers. If @blocking == true, this should always be true except + * if someone called aio_notify. + * + * If there are no pending bottom halves, but there are pending AIO + * operations, it may not be possible to make any progress without + * blocking. If @blocking is true, this function will wait until one + * or more AIO events have completed, to ensure something has moved + * before returning. + */ +bool aio_poll(AioContext *ctx, bool blocking); + +/* Register a file descriptor and associated callbacks. Behaves very similarly + * to qemu_set_fd_handler. Unlike qemu_set_fd_handler, these callbacks will + * be invoked when using aio_poll(). + * + * Code that invokes AIO completion functions should rely on this function + * instead of qemu_set_fd_handler[2]. + */ +void aio_set_fd_handler(AioContext *ctx, + int fd, + IOHandler *io_read, + IOHandler *io_write, + void *opaque); + +/* Register an event notifier and associated callbacks. Behaves very similarly + * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks + * will be invoked when using aio_poll(). + * + * Code that invokes AIO completion functions should rely on this function + * instead of event_notifier_set_handler. + */ +void aio_set_event_notifier(AioContext *ctx, + EventNotifier *notifier, + EventNotifierHandler *io_read); + +/* Return a GSource that lets the main loop poll the file descriptors attached + * to this AioContext. + */ +GSource *aio_get_g_source(AioContext *ctx); + +/* Return the ThreadPool bound to this AioContext */ +struct ThreadPool *aio_get_thread_pool(AioContext *ctx); + +/** + * aio_timer_new: + * @ctx: the aio context + * @type: the clock type + * @scale: the scale + * @cb: the callback to call on timer expiry + * @opaque: the opaque pointer to pass to the callback + * + * Allocate a new timer attached to the context @ctx. + * The function is responsible for memory allocation. + * + * The preferred interface is aio_timer_init. Use that + * unless you really need dynamic memory allocation. + * + * Returns: a pointer to the new timer + */ +static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type, + int scale, + QEMUTimerCB *cb, void *opaque) +{ + return timer_new_tl(ctx->tlg.tl[type], scale, cb, opaque); +} + +/** + * aio_timer_init: + * @ctx: the aio context + * @ts: the timer + * @type: the clock type + * @scale: the scale + * @cb: the callback to call on timer expiry + * @opaque: the opaque pointer to pass to the callback + * + * Initialise a new timer attached to the context @ctx. + * The caller is responsible for memory allocation. + */ +static inline void aio_timer_init(AioContext *ctx, + QEMUTimer *ts, QEMUClockType type, + int scale, + QEMUTimerCB *cb, void *opaque) +{ + timer_init_tl(ts, ctx->tlg.tl[type], scale, cb, opaque); +} + +/** + * aio_compute_timeout: + * @ctx: the aio context + * + * Compute the timeout that a blocking aio_poll should use. + */ +int64_t aio_compute_timeout(AioContext *ctx); + +#endif diff --git a/slirp/simh/qemu/block/block.h b/slirp/simh/qemu/block/block.h new file mode 100644 index 00000000..ef673531 --- /dev/null +++ b/slirp/simh/qemu/block/block.h @@ -0,0 +1,619 @@ +#ifndef BLOCK_H +#define BLOCK_H + +#include "block/aio.h" +#include "qemu-common.h" +#include "qemu/option.h" +#include "block/coroutine.h" +#include "block/accounting.h" +#include "qapi/qmp/qobject.h" +#include "qapi-types.h" + +/* block.c */ +typedef struct BlockDriver BlockDriver; +typedef struct BlockJob BlockJob; +typedef struct BdrvChild BdrvChild; +typedef struct BdrvChildRole BdrvChildRole; + +typedef struct BlockDriverInfo { + /* in bytes, 0 if irrelevant */ + int cluster_size; + /* offset at which the VM state can be saved (0 if not possible) */ + int64_t vm_state_offset; + bool is_dirty; + /* + * True if unallocated blocks read back as zeroes. This is equivalent + * to the LBPRZ flag in the SCSI logical block provisioning page. + */ + bool unallocated_blocks_are_zero; + /* + * True if the driver can optimize writing zeroes by unmapping + * sectors. This is equivalent to the BLKDISCARDZEROES ioctl in Linux + * with the difference that in qemu a discard is allowed to silently + * fail. Therefore we have to use bdrv_write_zeroes with the + * BDRV_REQ_MAY_UNMAP flag for an optimized zero write with unmapping. + * After this call the driver has to guarantee that the contents read + * back as zero. It is additionally required that the block device is + * opened with BDRV_O_UNMAP flag for this to work. + */ + bool can_write_zeroes_with_unmap; + /* + * True if this block driver only supports compressed writes + */ + bool needs_compressed_writes; +} BlockDriverInfo; + +typedef struct BlockFragInfo { + uint64_t allocated_clusters; + uint64_t total_clusters; + uint64_t fragmented_clusters; + uint64_t compressed_clusters; +} BlockFragInfo; + +typedef enum { + BDRV_REQ_COPY_ON_READ = 0x1, + BDRV_REQ_ZERO_WRITE = 0x2, + /* The BDRV_REQ_MAY_UNMAP flag is used to indicate that the block driver + * is allowed to optimize a write zeroes request by unmapping (discarding) + * blocks if it is guaranteed that the result will read back as + * zeroes. The flag is only passed to the driver if the block device is + * opened with BDRV_O_UNMAP. + */ + BDRV_REQ_MAY_UNMAP = 0x4, +} BdrvRequestFlags; + +typedef struct BlockSizes { + uint32_t phys; + uint32_t log; +} BlockSizes; + +typedef struct HDGeometry { + uint32_t heads; + uint32_t sectors; + uint32_t cylinders; +} HDGeometry; + +#define BDRV_O_RDWR 0x0002 +#define BDRV_O_SNAPSHOT 0x0008 /* open the file read only and save writes in a snapshot */ +#define BDRV_O_TEMPORARY 0x0010 /* delete the file after use */ +#define BDRV_O_NOCACHE 0x0020 /* do not use the host page cache */ +#define BDRV_O_CACHE_WB 0x0040 /* use write-back caching */ +#define BDRV_O_NATIVE_AIO 0x0080 /* use native AIO instead of the thread pool */ +#define BDRV_O_NO_BACKING 0x0100 /* don't open the backing file */ +#define BDRV_O_NO_FLUSH 0x0200 /* disable flushing on this disk */ +#define BDRV_O_COPY_ON_READ 0x0400 /* copy read backing sectors into image */ +#define BDRV_O_INCOMING 0x0800 /* consistency hint for incoming migration */ +#define BDRV_O_CHECK 0x1000 /* open solely for consistency check */ +#define BDRV_O_ALLOW_RDWR 0x2000 /* allow reopen to change from r/o to r/w */ +#define BDRV_O_UNMAP 0x4000 /* execute guest UNMAP/TRIM operations */ +#define BDRV_O_PROTOCOL 0x8000 /* if no block driver is explicitly given: + select an appropriate protocol driver, + ignoring the format layer */ + +#define BDRV_O_CACHE_MASK (BDRV_O_NOCACHE | BDRV_O_CACHE_WB | BDRV_O_NO_FLUSH) + + +/* Option names of options parsed by the block layer */ + +#define BDRV_OPT_CACHE_WB "cache.writeback" +#define BDRV_OPT_CACHE_DIRECT "cache.direct" +#define BDRV_OPT_CACHE_NO_FLUSH "cache.no-flush" + + +#define BDRV_SECTOR_BITS 9 +#define BDRV_SECTOR_SIZE (1ULL << BDRV_SECTOR_BITS) +#define BDRV_SECTOR_MASK ~(BDRV_SECTOR_SIZE - 1) + +#define BDRV_REQUEST_MAX_SECTORS MIN(SIZE_MAX >> BDRV_SECTOR_BITS, \ + INT_MAX >> BDRV_SECTOR_BITS) + +/* + * Allocation status flags + * BDRV_BLOCK_DATA: data is read from bs->file or another file + * BDRV_BLOCK_ZERO: sectors read as zero + * BDRV_BLOCK_OFFSET_VALID: sector stored in bs->file as raw data + * BDRV_BLOCK_ALLOCATED: the content of the block is determined by this + * layer (as opposed to the backing file) + * BDRV_BLOCK_RAW: used internally to indicate that the request + * was answered by the raw driver and that one + * should look in bs->file directly. + * + * If BDRV_BLOCK_OFFSET_VALID is set, bits 9-62 represent the offset in + * bs->file where sector data can be read from as raw data. + * + * DATA == 0 && ZERO == 0 means that data is read from backing_hd if present. + * + * DATA ZERO OFFSET_VALID + * t t t sectors read as zero, bs->file is zero at offset + * t f t sectors read as valid from bs->file at offset + * f t t sectors preallocated, read as zero, bs->file not + * necessarily zero at offset + * f f t sectors preallocated but read from backing_hd, + * bs->file contains garbage at offset + * t t f sectors preallocated, read as zero, unknown offset + * t f f sectors read from unknown file or offset + * f t f not allocated or unknown offset, read as zero + * f f f not allocated or unknown offset, read from backing_hd + */ +#define BDRV_BLOCK_DATA 0x01 +#define BDRV_BLOCK_ZERO 0x02 +#define BDRV_BLOCK_OFFSET_VALID 0x04 +#define BDRV_BLOCK_RAW 0x08 +#define BDRV_BLOCK_ALLOCATED 0x10 +#define BDRV_BLOCK_OFFSET_MASK BDRV_SECTOR_MASK + +typedef QSIMPLEQ_HEAD(BlockReopenQueue, BlockReopenQueueEntry) BlockReopenQueue; + +typedef struct BDRVReopenState { + BlockDriverState *bs; + int flags; + QDict *options; + void *opaque; +} BDRVReopenState; + +/* + * Block operation types + */ +typedef enum BlockOpType { + BLOCK_OP_TYPE_BACKUP_SOURCE, + BLOCK_OP_TYPE_BACKUP_TARGET, + BLOCK_OP_TYPE_CHANGE, + BLOCK_OP_TYPE_COMMIT_SOURCE, + BLOCK_OP_TYPE_COMMIT_TARGET, + BLOCK_OP_TYPE_DATAPLANE, + BLOCK_OP_TYPE_DRIVE_DEL, + BLOCK_OP_TYPE_EJECT, + BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT, + BLOCK_OP_TYPE_INTERNAL_SNAPSHOT, + BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE, + BLOCK_OP_TYPE_MIRROR, + BLOCK_OP_TYPE_RESIZE, + BLOCK_OP_TYPE_STREAM, + BLOCK_OP_TYPE_REPLACE, + BLOCK_OP_TYPE_MAX, +} BlockOpType; + +void bdrv_iostatus_enable(BlockDriverState *bs); +void bdrv_iostatus_reset(BlockDriverState *bs); +void bdrv_iostatus_disable(BlockDriverState *bs); +bool bdrv_iostatus_is_enabled(const BlockDriverState *bs); +void bdrv_iostatus_set_err(BlockDriverState *bs, int error); +void bdrv_info_print(Monitor *mon, const QObject *data); +void bdrv_info(Monitor *mon, QObject **ret_data); +void bdrv_stats_print(Monitor *mon, const QObject *data); +void bdrv_info_stats(Monitor *mon, QObject **ret_data); + +/* disk I/O throttling */ +void bdrv_io_limits_enable(BlockDriverState *bs, const char *group); +void bdrv_io_limits_disable(BlockDriverState *bs); +void bdrv_io_limits_update_group(BlockDriverState *bs, const char *group); + +void bdrv_init(void); +void bdrv_init_with_whitelist(void); +BlockDriver *bdrv_find_protocol(const char *filename, + bool allow_protocol_prefix, + Error **errp); +BlockDriver *bdrv_find_format(const char *format_name); +int bdrv_create(BlockDriver *drv, const char* filename, + QemuOpts *opts, Error **errp); +int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp); +BlockDriverState *bdrv_new_root(void); +BlockDriverState *bdrv_new(void); +void bdrv_make_anon(BlockDriverState *bs); +void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old); +void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top); +int bdrv_parse_cache_flags(const char *mode, int *flags); +int bdrv_parse_discard_flags(const char *mode, int *flags); +int bdrv_open_image(BlockDriverState **pbs, const char *filename, + QDict *options, const char *bdref_key, + BlockDriverState* parent, const BdrvChildRole *child_role, + bool allow_none, Error **errp); +BdrvChild *bdrv_open_child(const char *filename, + QDict *options, const char *bdref_key, + BlockDriverState* parent, + const BdrvChildRole *child_role, + bool allow_none, Error **errp); +void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd); +int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp); +int bdrv_append_temp_snapshot(BlockDriverState *bs, int flags, Error **errp); +int bdrv_open(BlockDriverState **pbs, const char *filename, + const char *reference, QDict *options, int flags, Error **errp); +BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue, + BlockDriverState *bs, + QDict *options, int flags); +int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp); +int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp); +int bdrv_reopen_prepare(BDRVReopenState *reopen_state, + BlockReopenQueue *queue, Error **errp); +void bdrv_reopen_commit(BDRVReopenState *reopen_state); +void bdrv_reopen_abort(BDRVReopenState *reopen_state); +void bdrv_close(BlockDriverState *bs); +void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify); +int bdrv_read(BlockDriverState *bs, int64_t sector_num, + uint8_t *buf, int nb_sectors); +int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num, + uint8_t *buf, int nb_sectors); +int bdrv_write(BlockDriverState *bs, int64_t sector_num, + const uint8_t *buf, int nb_sectors); +int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, BdrvRequestFlags flags); +BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, BdrvRequestFlags flags, + BlockCompletionFunc *cb, void *opaque); +int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags); +int bdrv_pread(BlockDriverState *bs, int64_t offset, + void *buf, int count); +int bdrv_pwrite(BlockDriverState *bs, int64_t offset, + const void *buf, int count); +int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov); +int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset, + const void *buf, int count); +int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, QEMUIOVector *qiov); +int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, QEMUIOVector *qiov); +int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, QEMUIOVector *qiov); +/* + * Efficiently zero a region of the disk image. Note that this is a regular + * I/O request like read or write and should have a reasonable size. This + * function is not suitable for zeroing the entire image in a single request + * because it may allocate memory for the entire region. + */ +int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, BdrvRequestFlags flags); +BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs, + const char *backing_file); +int bdrv_get_backing_file_depth(BlockDriverState *bs); +void bdrv_refresh_filename(BlockDriverState *bs); +int bdrv_truncate(BlockDriverState *bs, int64_t offset); +int64_t bdrv_nb_sectors(BlockDriverState *bs); +int64_t bdrv_getlength(BlockDriverState *bs); +int64_t bdrv_get_allocated_file_size(BlockDriverState *bs); +void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr); +void bdrv_refresh_limits(BlockDriverState *bs, Error **errp); +int bdrv_commit(BlockDriverState *bs); +int bdrv_commit_all(void); +int bdrv_change_backing_file(BlockDriverState *bs, + const char *backing_file, const char *backing_fmt); +void bdrv_register(BlockDriver *bdrv); +int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top, + BlockDriverState *base, + const char *backing_file_str); +BlockDriverState *bdrv_find_overlay(BlockDriverState *active, + BlockDriverState *bs); +BlockDriverState *bdrv_find_base(BlockDriverState *bs); + + +typedef struct BdrvCheckResult { + int corruptions; + int leaks; + int check_errors; + int corruptions_fixed; + int leaks_fixed; + int64_t image_end_offset; + BlockFragInfo bfi; +} BdrvCheckResult; + +typedef enum { + BDRV_FIX_LEAKS = 1, + BDRV_FIX_ERRORS = 2, +} BdrvCheckMode; + +int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix); + +/* The units of offset and total_work_size may be chosen arbitrarily by the + * block driver; total_work_size may change during the course of the amendment + * operation */ +typedef void BlockDriverAmendStatusCB(BlockDriverState *bs, int64_t offset, + int64_t total_work_size); +int bdrv_amend_options(BlockDriverState *bs_new, QemuOpts *opts, + BlockDriverAmendStatusCB *status_cb); + +/* external snapshots */ +bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs, + BlockDriverState *candidate); +bool bdrv_is_first_non_filter(BlockDriverState *candidate); + +/* check if a named node can be replaced when doing drive-mirror */ +BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs, + const char *node_name, Error **errp); + +/* async block I/O */ +typedef void BlockDriverDirtyHandler(BlockDriverState *bs, int64_t sector, + int sector_num); +BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num, + QEMUIOVector *iov, int nb_sectors, + BlockCompletionFunc *cb, void *opaque); +BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num, + QEMUIOVector *iov, int nb_sectors, + BlockCompletionFunc *cb, void *opaque); +BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs, + BlockCompletionFunc *cb, void *opaque); +BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, + BlockCompletionFunc *cb, void *opaque); +void bdrv_aio_cancel(BlockAIOCB *acb); +void bdrv_aio_cancel_async(BlockAIOCB *acb); + +typedef struct BlockRequest { + /* Fields to be filled by multiwrite caller */ + int64_t sector; + int nb_sectors; + int flags; + QEMUIOVector *qiov; + BlockCompletionFunc *cb; + void *opaque; + + /* Filled by multiwrite implementation */ + int error; +} BlockRequest; + +int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, + int num_reqs); + +/* sg packet commands */ +int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf); +BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, + unsigned long int req, void *buf, + BlockCompletionFunc *cb, void *opaque); + +/* Invalidate any cached metadata used by image formats */ +void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp); +void bdrv_invalidate_cache_all(Error **errp); + +/* Ensure contents are flushed to disk. */ +int bdrv_flush(BlockDriverState *bs); +int coroutine_fn bdrv_co_flush(BlockDriverState *bs); +int bdrv_flush_all(void); +void bdrv_close_all(void); +void bdrv_drain(BlockDriverState *bs); +void bdrv_drain_all(void); + +int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors); +int bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors); +int bdrv_has_zero_init_1(BlockDriverState *bs); +int bdrv_has_zero_init(BlockDriverState *bs); +bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs); +bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs); +int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num, + int nb_sectors, int *pnum); +int64_t bdrv_get_block_status_above(BlockDriverState *bs, + BlockDriverState *base, + int64_t sector_num, + int nb_sectors, int *pnum); +int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors, + int *pnum); +int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base, + int64_t sector_num, int nb_sectors, int *pnum); + +void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error, + BlockdevOnError on_write_error); +BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read); +BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error); +void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action, + bool is_read, int error); +int bdrv_is_read_only(BlockDriverState *bs); +int bdrv_is_sg(BlockDriverState *bs); +int bdrv_enable_write_cache(BlockDriverState *bs); +void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce); +int bdrv_is_inserted(BlockDriverState *bs); +int bdrv_media_changed(BlockDriverState *bs); +void bdrv_lock_medium(BlockDriverState *bs, bool locked); +void bdrv_eject(BlockDriverState *bs, bool eject_flag); +const char *bdrv_get_format_name(BlockDriverState *bs); +BlockDriverState *bdrv_find_node(const char *node_name); +BlockDeviceInfoList *bdrv_named_nodes_list(Error **errp); +BlockDriverState *bdrv_lookup_bs(const char *device, + const char *node_name, + Error **errp); +bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base); +BlockDriverState *bdrv_next_node(BlockDriverState *bs); +BlockDriverState *bdrv_next(BlockDriverState *bs); +int bdrv_is_encrypted(BlockDriverState *bs); +int bdrv_key_required(BlockDriverState *bs); +int bdrv_set_key(BlockDriverState *bs, const char *key); +void bdrv_add_key(BlockDriverState *bs, const char *key, Error **errp); +int bdrv_query_missing_keys(void); +void bdrv_iterate_format(void (*it)(void *opaque, const char *name), + void *opaque); +const char *bdrv_get_node_name(const BlockDriverState *bs); +const char *bdrv_get_device_name(const BlockDriverState *bs); +const char *bdrv_get_device_or_node_name(const BlockDriverState *bs); +int bdrv_get_flags(BlockDriverState *bs); +int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num, + const uint8_t *buf, int nb_sectors); +int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi); +ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs); +void bdrv_round_to_clusters(BlockDriverState *bs, + int64_t sector_num, int nb_sectors, + int64_t *cluster_sector_num, + int *cluster_nb_sectors); + +const char *bdrv_get_encrypted_filename(BlockDriverState *bs); +void bdrv_get_backing_filename(BlockDriverState *bs, + char *filename, int filename_size); +void bdrv_get_full_backing_filename(BlockDriverState *bs, + char *dest, size_t sz, Error **errp); +void bdrv_get_full_backing_filename_from_filename(const char *backed, + const char *backing, + char *dest, size_t sz, + Error **errp); +int bdrv_is_snapshot(BlockDriverState *bs); + +int path_has_protocol(const char *path); +int path_is_absolute(const char *path); +void path_combine(char *dest, int dest_size, + const char *base_path, + const char *filename); + +int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos); +int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, + int64_t pos, int size); + +int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, + int64_t pos, int size); + +void bdrv_img_create(const char *filename, const char *fmt, + const char *base_filename, const char *base_fmt, + char *options, uint64_t img_size, int flags, + Error **errp, bool quiet); + +/* Returns the alignment in bytes that is required so that no bounce buffer + * is required throughout the stack */ +size_t bdrv_min_mem_align(BlockDriverState *bs); +/* Returns optimal alignment in bytes for bounce buffer */ +size_t bdrv_opt_mem_align(BlockDriverState *bs); +void bdrv_set_guest_block_size(BlockDriverState *bs, int align); +void *qemu_blockalign(BlockDriverState *bs, size_t size); +void *qemu_blockalign0(BlockDriverState *bs, size_t size); +void *qemu_try_blockalign(BlockDriverState *bs, size_t size); +void *qemu_try_blockalign0(BlockDriverState *bs, size_t size); +bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov); + +struct HBitmapIter; +typedef struct BdrvDirtyBitmap BdrvDirtyBitmap; +BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, + uint32_t granularity, + const char *name, + Error **errp); +int bdrv_dirty_bitmap_create_successor(BlockDriverState *bs, + BdrvDirtyBitmap *bitmap, + Error **errp); +BdrvDirtyBitmap *bdrv_dirty_bitmap_abdicate(BlockDriverState *bs, + BdrvDirtyBitmap *bitmap, + Error **errp); +BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap(BlockDriverState *bs, + BdrvDirtyBitmap *bitmap, + Error **errp); +BdrvDirtyBitmap *bdrv_find_dirty_bitmap(BlockDriverState *bs, + const char *name); +void bdrv_dirty_bitmap_make_anon(BdrvDirtyBitmap *bitmap); +void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap); +void bdrv_disable_dirty_bitmap(BdrvDirtyBitmap *bitmap); +void bdrv_enable_dirty_bitmap(BdrvDirtyBitmap *bitmap); +BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs); +uint32_t bdrv_get_default_bitmap_granularity(BlockDriverState *bs); +uint32_t bdrv_dirty_bitmap_granularity(BdrvDirtyBitmap *bitmap); +bool bdrv_dirty_bitmap_enabled(BdrvDirtyBitmap *bitmap); +bool bdrv_dirty_bitmap_frozen(BdrvDirtyBitmap *bitmap); +DirtyBitmapStatus bdrv_dirty_bitmap_status(BdrvDirtyBitmap *bitmap); +int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector); +void bdrv_set_dirty_bitmap(BdrvDirtyBitmap *bitmap, + int64_t cur_sector, int nr_sectors); +void bdrv_reset_dirty_bitmap(BdrvDirtyBitmap *bitmap, + int64_t cur_sector, int nr_sectors); +void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap); +void bdrv_dirty_iter_init(BdrvDirtyBitmap *bitmap, struct HBitmapIter *hbi); +void bdrv_set_dirty_iter(struct HBitmapIter *hbi, int64_t offset); +int64_t bdrv_get_dirty_count(BdrvDirtyBitmap *bitmap); + +void bdrv_enable_copy_on_read(BlockDriverState *bs); +void bdrv_disable_copy_on_read(BlockDriverState *bs); + +void bdrv_ref(BlockDriverState *bs); +void bdrv_unref(BlockDriverState *bs); +void bdrv_unref_child(BlockDriverState *parent, BdrvChild *child); + +bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp); +void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason); +void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason); +void bdrv_op_block_all(BlockDriverState *bs, Error *reason); +void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason); +bool bdrv_op_blocker_is_empty(BlockDriverState *bs); + +typedef enum { + BLKDBG_L1_UPDATE, + + BLKDBG_L1_GROW_ALLOC_TABLE, + BLKDBG_L1_GROW_WRITE_TABLE, + BLKDBG_L1_GROW_ACTIVATE_TABLE, + + BLKDBG_L2_LOAD, + BLKDBG_L2_UPDATE, + BLKDBG_L2_UPDATE_COMPRESSED, + BLKDBG_L2_ALLOC_COW_READ, + BLKDBG_L2_ALLOC_WRITE, + + BLKDBG_READ_AIO, + BLKDBG_READ_BACKING_AIO, + BLKDBG_READ_COMPRESSED, + + BLKDBG_WRITE_AIO, + BLKDBG_WRITE_COMPRESSED, + + BLKDBG_VMSTATE_LOAD, + BLKDBG_VMSTATE_SAVE, + + BLKDBG_COW_READ, + BLKDBG_COW_WRITE, + + BLKDBG_REFTABLE_LOAD, + BLKDBG_REFTABLE_GROW, + BLKDBG_REFTABLE_UPDATE, + + BLKDBG_REFBLOCK_LOAD, + BLKDBG_REFBLOCK_UPDATE, + BLKDBG_REFBLOCK_UPDATE_PART, + BLKDBG_REFBLOCK_ALLOC, + BLKDBG_REFBLOCK_ALLOC_HOOKUP, + BLKDBG_REFBLOCK_ALLOC_WRITE, + BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS, + BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE, + BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE, + + BLKDBG_CLUSTER_ALLOC, + BLKDBG_CLUSTER_ALLOC_BYTES, + BLKDBG_CLUSTER_FREE, + + BLKDBG_FLUSH_TO_OS, + BLKDBG_FLUSH_TO_DISK, + + BLKDBG_PWRITEV_RMW_HEAD, + BLKDBG_PWRITEV_RMW_AFTER_HEAD, + BLKDBG_PWRITEV_RMW_TAIL, + BLKDBG_PWRITEV_RMW_AFTER_TAIL, + BLKDBG_PWRITEV, + BLKDBG_PWRITEV_ZERO, + BLKDBG_PWRITEV_DONE, + + BLKDBG_EMPTY_IMAGE_PREPARE, + + BLKDBG_EVENT_MAX, +} BlkDebugEvent; + +#define BLKDBG_EVENT(bs, evt) bdrv_debug_event(bs, evt) +void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event); + +int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event, + const char *tag); +int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag); +int bdrv_debug_resume(BlockDriverState *bs, const char *tag); +bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag); + +/** + * bdrv_get_aio_context: + * + * Returns: the currently bound #AioContext + */ +AioContext *bdrv_get_aio_context(BlockDriverState *bs); + +/** + * bdrv_set_aio_context: + * + * Changes the #AioContext used for fd handlers, timers, and BHs by this + * BlockDriverState and all its children. + * + * This function must be called with iothread lock held. + */ +void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context); +int bdrv_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz); +int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo); + +void bdrv_io_plug(BlockDriverState *bs); +void bdrv_io_unplug(BlockDriverState *bs); +void bdrv_flush_io_queue(BlockDriverState *bs); + +BlockAcctStats *bdrv_get_stats(BlockDriverState *bs); + +#endif diff --git a/slirp/simh/qemu/block/coroutine.h b/slirp/simh/qemu/block/coroutine.h new file mode 100644 index 00000000..20c027a7 --- /dev/null +++ b/slirp/simh/qemu/block/coroutine.h @@ -0,0 +1,219 @@ +/* + * QEMU coroutine implementation + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Stefan Hajnoczi + * Kevin Wolf + * + * This work is licensed under the terms of the GNU LGPL, version 2 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QEMU_COROUTINE_H +#define QEMU_COROUTINE_H + +#include +#include "qemu/typedefs.h" +#include "qemu/queue.h" +#include "qemu/timer.h" + +/** + * Coroutines are a mechanism for stack switching and can be used for + * cooperative userspace threading. These functions provide a simple but + * useful flavor of coroutines that is suitable for writing sequential code, + * rather than callbacks, for operations that need to give up control while + * waiting for events to complete. + * + * These functions are re-entrant and may be used outside the global mutex. + */ + +/** + * Mark a function that executes in coroutine context + * + * Functions that execute in coroutine context cannot be called directly from + * normal functions. In the future it would be nice to enable compiler or + * static checker support for catching such errors. This annotation might make + * it possible and in the meantime it serves as documentation. + * + * For example: + * + * static void coroutine_fn foo(void) { + * .... + * } + */ +#define coroutine_fn + +typedef struct Coroutine Coroutine; + +/** + * Coroutine entry point + * + * When the coroutine is entered for the first time, opaque is passed in as an + * argument. + * + * When this function returns, the coroutine is destroyed automatically and + * execution continues in the caller who last entered the coroutine. + */ +typedef void coroutine_fn CoroutineEntry(void *opaque); + +/** + * Create a new coroutine + * + * Use qemu_coroutine_enter() to actually transfer control to the coroutine. + */ +Coroutine *qemu_coroutine_create(CoroutineEntry *entry); + +/** + * Transfer control to a coroutine + * + * The opaque argument is passed as the argument to the entry point when + * entering the coroutine for the first time. It is subsequently ignored. + */ +void qemu_coroutine_enter(Coroutine *coroutine, void *opaque); + +/** + * Transfer control back to a coroutine's caller + * + * This function does not return until the coroutine is re-entered using + * qemu_coroutine_enter(). + */ +void coroutine_fn qemu_coroutine_yield(void); + +/** + * Get the currently executing coroutine + */ +Coroutine *coroutine_fn qemu_coroutine_self(void); + +/** + * Return whether or not currently inside a coroutine + * + * This can be used to write functions that work both when in coroutine context + * and when not in coroutine context. Note that such functions cannot use the + * coroutine_fn annotation since they work outside coroutine context. + */ +bool qemu_in_coroutine(void); + + + +/** + * CoQueues are a mechanism to queue coroutines in order to continue executing + * them later. They provide the fundamental primitives on which coroutine locks + * are built. + */ +typedef struct CoQueue { + QTAILQ_HEAD(, Coroutine) entries; +} CoQueue; + +/** + * Initialise a CoQueue. This must be called before any other operation is used + * on the CoQueue. + */ +void qemu_co_queue_init(CoQueue *queue); + +/** + * Adds the current coroutine to the CoQueue and transfers control to the + * caller of the coroutine. + */ +void coroutine_fn qemu_co_queue_wait(CoQueue *queue); + +/** + * Restarts the next coroutine in the CoQueue and removes it from the queue. + * + * Returns true if a coroutine was restarted, false if the queue is empty. + */ +bool coroutine_fn qemu_co_queue_next(CoQueue *queue); + +/** + * Restarts all coroutines in the CoQueue and leaves the queue empty. + */ +void coroutine_fn qemu_co_queue_restart_all(CoQueue *queue); + +/** + * Enter the next coroutine in the queue + */ +bool qemu_co_enter_next(CoQueue *queue); + +/** + * Checks if the CoQueue is empty. + */ +bool qemu_co_queue_empty(CoQueue *queue); + + +/** + * Provides a mutex that can be used to synchronise coroutines + */ +typedef struct CoMutex { + bool locked; + CoQueue queue; +} CoMutex; + +/** + * Initialises a CoMutex. This must be called before any other operation is used + * on the CoMutex. + */ +void qemu_co_mutex_init(CoMutex *mutex); + +/** + * Locks the mutex. If the lock cannot be taken immediately, control is + * transferred to the caller of the current coroutine. + */ +void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex); + +/** + * Unlocks the mutex and schedules the next coroutine that was waiting for this + * lock to be run. + */ +void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex); + +typedef struct CoRwlock { + bool writer; + int reader; + CoQueue queue; +} CoRwlock; + +/** + * Initialises a CoRwlock. This must be called before any other operation + * is used on the CoRwlock + */ +void qemu_co_rwlock_init(CoRwlock *lock); + +/** + * Read locks the CoRwlock. If the lock cannot be taken immediately because + * of a parallel writer, control is transferred to the caller of the current + * coroutine. + */ +void qemu_co_rwlock_rdlock(CoRwlock *lock); + +/** + * Write Locks the mutex. If the lock cannot be taken immediately because + * of a parallel reader, control is transferred to the caller of the current + * coroutine. + */ +void qemu_co_rwlock_wrlock(CoRwlock *lock); + +/** + * Unlocks the read/write lock and schedules the next coroutine that was + * waiting for this lock to be run. + */ +void qemu_co_rwlock_unlock(CoRwlock *lock); + +/** + * Yield the coroutine for a given duration + * + * Behaves similarly to co_sleep_ns(), but the sleeping coroutine will be + * resumed when using aio_poll(). + */ +void coroutine_fn co_aio_sleep_ns(AioContext *ctx, QEMUClockType type, + int64_t ns); + +/** + * Yield until a file descriptor becomes readable + * + * Note that this function clobbers the handlers for the file descriptor. + */ +void coroutine_fn yield_until_fd_readable(int fd); + +#endif /* QEMU_COROUTINE_H */ diff --git a/slirp/simh/qemu/bswap.h b/slirp/simh/qemu/bswap.h new file mode 100644 index 00000000..9ea899db --- /dev/null +++ b/slirp/simh/qemu/bswap.h @@ -0,0 +1,444 @@ +#define BSWAP_H +#ifndef BSWAP_H +#define BSWAP_H + +#include "config-host.h" +#include +#include +#include +#include "fpu/softfloat.h" + +#ifdef CONFIG_MACHINE_BSWAP_H +# include +# include +# include +#elif defined(__FreeBSD__) +# include +#elif defined(CONFIG_BYTESWAP_H) +# include + +static inline uint16_t bswap16(uint16_t x) +{ + return bswap_16(x); +} + +static inline uint32_t bswap32(uint32_t x) +{ + return bswap_32(x); +} + +static inline uint64_t bswap64(uint64_t x) +{ + return bswap_64(x); +} +# else +static inline uint16_t bswap16(uint16_t x) +{ + return (((x & 0x00ff) << 8) | + ((x & 0xff00) >> 8)); +} + +static inline uint32_t bswap32(uint32_t x) +{ + return (((x & 0x000000ffU) << 24) | + ((x & 0x0000ff00U) << 8) | + ((x & 0x00ff0000U) >> 8) | + ((x & 0xff000000U) >> 24)); +} + +static inline uint64_t bswap64(uint64_t x) +{ + return (((x & 0x00000000000000ffULL) << 56) | + ((x & 0x000000000000ff00ULL) << 40) | + ((x & 0x0000000000ff0000ULL) << 24) | + ((x & 0x00000000ff000000ULL) << 8) | + ((x & 0x000000ff00000000ULL) >> 8) | + ((x & 0x0000ff0000000000ULL) >> 24) | + ((x & 0x00ff000000000000ULL) >> 40) | + ((x & 0xff00000000000000ULL) >> 56)); +} +#endif /* ! CONFIG_MACHINE_BSWAP_H */ + +static inline void bswap16s(uint16_t *s) +{ + *s = bswap16(*s); +} + +static inline void bswap32s(uint32_t *s) +{ + *s = bswap32(*s); +} + +static inline void bswap64s(uint64_t *s) +{ + *s = bswap64(*s); +} + +#if defined(HOST_WORDS_BIGENDIAN) +#define be_bswap(v, size) (v) +#define le_bswap(v, size) glue(bswap, size)(v) +#define be_bswaps(v, size) +#define le_bswaps(p, size) do { *p = glue(bswap, size)(*p); } while(0) +#else +#define le_bswap(v, size) (v) +#define be_bswap(v, size) glue(bswap, size)(v) +#define le_bswaps(v, size) +#define be_bswaps(p, size) do { *p = glue(bswap, size)(*p); } while(0) +#endif + +#define CPU_CONVERT(endian, size, type)\ +static inline type endian ## size ## _to_cpu(type v)\ +{\ + return glue(endian, _bswap)(v, size);\ +}\ +\ +static inline type cpu_to_ ## endian ## size(type v)\ +{\ + return glue(endian, _bswap)(v, size);\ +}\ +\ +static inline void endian ## size ## _to_cpus(type *p)\ +{\ + glue(endian, _bswaps)(p, size);\ +}\ +\ +static inline void cpu_to_ ## endian ## size ## s(type *p)\ +{\ + glue(endian, _bswaps)(p, size);\ +}\ +\ +static inline type endian ## size ## _to_cpup(const type *p)\ +{\ + return glue(glue(endian, size), _to_cpu)(*p);\ +}\ +\ +static inline void cpu_to_ ## endian ## size ## w(type *p, type v)\ +{\ + *p = glue(glue(cpu_to_, endian), size)(v);\ +} + +CPU_CONVERT(be, 16, uint16_t) +CPU_CONVERT(be, 32, uint32_t) +CPU_CONVERT(be, 64, uint64_t) + +CPU_CONVERT(le, 16, uint16_t) +CPU_CONVERT(le, 32, uint32_t) +CPU_CONVERT(le, 64, uint64_t) + +/* len must be one of 1, 2, 4 */ +static inline uint32_t qemu_bswap_len(uint32_t value, int len) +{ + return bswap32(value) >> (32 - 8 * len); +} + +/* Unions for reinterpreting between floats and integers. */ + +typedef union { + float32 f; + uint32_t l; +} CPU_FloatU; + +typedef union { + float64 d; +#if defined(HOST_WORDS_BIGENDIAN) + struct { + uint32_t upper; + uint32_t lower; + } l; +#else + struct { + uint32_t lower; + uint32_t upper; + } l; +#endif + uint64_t ll; +} CPU_DoubleU; + +typedef union { + floatx80 d; + struct { + uint64_t lower; + uint16_t upper; + } l; +} CPU_LDoubleU; + +typedef union { + float128 q; +#if defined(HOST_WORDS_BIGENDIAN) + struct { + uint32_t upmost; + uint32_t upper; + uint32_t lower; + uint32_t lowest; + } l; + struct { + uint64_t upper; + uint64_t lower; + } ll; +#else + struct { + uint32_t lowest; + uint32_t lower; + uint32_t upper; + uint32_t upmost; + } l; + struct { + uint64_t lower; + uint64_t upper; + } ll; +#endif +} CPU_QuadU; + +/* unaligned/endian-independent pointer access */ + +/* + * the generic syntax is: + * + * load: ld{type}{sign}{size}{endian}_p(ptr) + * + * store: st{type}{size}{endian}_p(ptr, val) + * + * Note there are small differences with the softmmu access API! + * + * type is: + * (empty): integer access + * f : float access + * + * sign is: + * (empty): for 32 or 64 bit sizes (including floats and doubles) + * u : unsigned + * s : signed + * + * size is: + * b: 8 bits + * w: 16 bits + * l: 32 bits + * q: 64 bits + * + * endian is: + * he : host endian + * be : big endian + * le : little endian + * te : target endian + * (except for byte accesses, which have no endian infix). + * + * The target endian accessors are obviously only available to source + * files which are built per-target; they are defined in cpu-all.h. + * + * In all cases these functions take a host pointer. + * For accessors that take a guest address rather than a + * host address, see the cpu_{ld,st}_* accessors defined in + * cpu_ldst.h. + */ + +static inline int ldub_p(const void *ptr) +{ + return *(uint8_t *)ptr; +} + +static inline int ldsb_p(const void *ptr) +{ + return *(int8_t *)ptr; +} + +static inline void stb_p(void *ptr, uint8_t v) +{ + *(uint8_t *)ptr = v; +} + +/* Any compiler worth its salt will turn these memcpy into native unaligned + operations. Thus we don't need to play games with packed attributes, or + inline byte-by-byte stores. */ + +static inline int lduw_he_p(const void *ptr) +{ + uint16_t r; + memcpy(&r, ptr, sizeof(r)); + return r; +} + +static inline int ldsw_he_p(const void *ptr) +{ + int16_t r; + memcpy(&r, ptr, sizeof(r)); + return r; +} + +static inline void stw_he_p(void *ptr, uint16_t v) +{ + memcpy(ptr, &v, sizeof(v)); +} + +static inline int ldl_he_p(const void *ptr) +{ + int32_t r; + memcpy(&r, ptr, sizeof(r)); + return r; +} + +static inline void stl_he_p(void *ptr, uint32_t v) +{ + memcpy(ptr, &v, sizeof(v)); +} + +static inline uint64_t ldq_he_p(const void *ptr) +{ + uint64_t r; + memcpy(&r, ptr, sizeof(r)); + return r; +} + +static inline void stq_he_p(void *ptr, uint64_t v) +{ + memcpy(ptr, &v, sizeof(v)); +} + +static inline int lduw_le_p(const void *ptr) +{ + return (uint16_t)le_bswap(lduw_he_p(ptr), 16); +} + +static inline int ldsw_le_p(const void *ptr) +{ + return (int16_t)le_bswap(lduw_he_p(ptr), 16); +} + +static inline int ldl_le_p(const void *ptr) +{ + return le_bswap(ldl_he_p(ptr), 32); +} + +static inline uint64_t ldq_le_p(const void *ptr) +{ + return le_bswap(ldq_he_p(ptr), 64); +} + +static inline void stw_le_p(void *ptr, uint16_t v) +{ + stw_he_p(ptr, le_bswap(v, 16)); +} + +static inline void stl_le_p(void *ptr, uint32_t v) +{ + stl_he_p(ptr, le_bswap(v, 32)); +} + +static inline void stq_le_p(void *ptr, uint64_t v) +{ + stq_he_p(ptr, le_bswap(v, 64)); +} + +/* float access */ + +static inline float32 ldfl_le_p(const void *ptr) +{ + CPU_FloatU u; + u.l = ldl_le_p(ptr); + return u.f; +} + +static inline void stfl_le_p(void *ptr, float32 v) +{ + CPU_FloatU u; + u.f = v; + stl_le_p(ptr, u.l); +} + +static inline float64 ldfq_le_p(const void *ptr) +{ + CPU_DoubleU u; + u.ll = ldq_le_p(ptr); + return u.d; +} + +static inline void stfq_le_p(void *ptr, float64 v) +{ + CPU_DoubleU u; + u.d = v; + stq_le_p(ptr, u.ll); +} + +static inline int lduw_be_p(const void *ptr) +{ + return (uint16_t)be_bswap(lduw_he_p(ptr), 16); +} + +static inline int ldsw_be_p(const void *ptr) +{ + return (int16_t)be_bswap(lduw_he_p(ptr), 16); +} + +static inline int ldl_be_p(const void *ptr) +{ + return be_bswap(ldl_he_p(ptr), 32); +} + +static inline uint64_t ldq_be_p(const void *ptr) +{ + return be_bswap(ldq_he_p(ptr), 64); +} + +static inline void stw_be_p(void *ptr, uint16_t v) +{ + stw_he_p(ptr, be_bswap(v, 16)); +} + +static inline void stl_be_p(void *ptr, uint32_t v) +{ + stl_he_p(ptr, be_bswap(v, 32)); +} + +static inline void stq_be_p(void *ptr, uint64_t v) +{ + stq_he_p(ptr, be_bswap(v, 64)); +} + +/* float access */ + +static inline float32 ldfl_be_p(const void *ptr) +{ + CPU_FloatU u; + u.l = ldl_be_p(ptr); + return u.f; +} + +static inline void stfl_be_p(void *ptr, float32 v) +{ + CPU_FloatU u; + u.f = v; + stl_be_p(ptr, u.l); +} + +static inline float64 ldfq_be_p(const void *ptr) +{ + CPU_DoubleU u; + u.ll = ldq_be_p(ptr); + return u.d; +} + +static inline void stfq_be_p(void *ptr, float64 v) +{ + CPU_DoubleU u; + u.d = v; + stq_be_p(ptr, u.ll); +} + +static inline unsigned long leul_to_cpu(unsigned long v) +{ + /* In order to break an include loop between here and + qemu-common.h, don't rely on HOST_LONG_BITS. */ +#if ULONG_MAX == UINT32_MAX + return le_bswap(v, 32); +#elif ULONG_MAX == UINT64_MAX + return le_bswap(v, 64); +#else +# error Unknown sizeof long +#endif +} + +#undef le_bswap +#undef be_bswap +#undef le_bswaps +#undef be_bswaps + +#endif /* BSWAP_H */ diff --git a/slirp/simh/qemu/compiler.h b/slirp/simh/qemu/compiler.h new file mode 100644 index 00000000..93fc210e --- /dev/null +++ b/slirp/simh/qemu/compiler.h @@ -0,0 +1,128 @@ +/* public domain */ + +#ifndef COMPILER_H +#define COMPILER_H + +#include "config-host.h" + +/*---------------------------------------------------------------------------- +| The macro QEMU_GNUC_PREREQ tests for minimum version of the GNU C compiler. +| The code is a copy of SOFTFLOAT_GNUC_PREREQ, see softfloat-macros.h. +*----------------------------------------------------------------------------*/ +#if defined(__GNUC__) && defined(__GNUC_MINOR__) +# define QEMU_GNUC_PREREQ(maj, min) \ + ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min)) +#else +# define QEMU_GNUC_PREREQ(maj, min) 0 +#endif + +#if defined(__GNUC__) +#define QEMU_NORETURN __attribute__ ((__noreturn__)) +#else +#define QEMU_NORETURN +#endif + +#if QEMU_GNUC_PREREQ(3, 4) +#define QEMU_WARN_UNUSED_RESULT __attribute__((warn_unused_result)) +#else +#define QEMU_WARN_UNUSED_RESULT +#endif + +#if QEMU_GNUC_PREREQ(4, 0) +#define QEMU_SENTINEL __attribute__((sentinel)) +#else +#define QEMU_SENTINEL +#endif + +#if QEMU_GNUC_PREREQ(4, 3) +#define QEMU_ARTIFICIAL __attribute__((always_inline, artificial)) +#else +#define QEMU_ARTIFICIAL +#endif + +#ifdef _MSC_VER +# define PACKED_BEGIN __pragma( pack(push, 1) ) +# define PACKED_END __pragma( pack(pop) ) +# define QEMU_PACKED +#else +# define PACKED_BEGIN +#if defined(_WIN32) +# define PACKED_END __attribute__((gcc_struct, packed)) +# define QEMU_PACKED __attribute__((gcc_struct, packed)) +#else +# define PACKED_END __attribute__((packed)) +# define QEMU_PACKED __attribute__((packed)) +#endif +#endif + +#ifndef glue +#define xglue(x, y) x ## y +#define glue(x, y) xglue(x, y) +#define stringify(s) tostring(s) +#define tostring(s) #s +#endif + +#ifndef likely +#if __GNUC__ < 3 +#define __builtin_expect(x, n) (x) +#endif + +#define likely(x) __builtin_expect(!!(x), 1) +#define unlikely(x) __builtin_expect(!!(x), 0) +#endif + +#ifndef container_of +#define container_of(ptr, type, member) \ + ((type *) ((char *)(ptr) - offsetof(type, member))) +#endif + +/* Convert from a base type to a parent type, with compile time checking. */ +#ifdef __GNUC__ +#define DO_UPCAST(type, field, dev) ( __extension__ ( { \ + char __attribute__((unused)) offset_must_be_zero[ \ + -offsetof(type, field)]; \ + container_of(dev, type, field);})) +#else +#define DO_UPCAST(type, field, dev) container_of(dev, type, field) +#endif + +#define typeof_field(type, field) typeof(((type *)0)->field) +#define type_check(t1,t2) ((t1*)0 - (t2*)0) + +#ifndef always_inline +#if !((__GNUC__ < 3) || defined(__APPLE__)) +#ifdef __OPTIMIZE__ +#undef inline +#define inline __attribute__ (( always_inline )) __inline__ +#endif +#endif +#else +#undef inline +#define inline always_inline +#endif +#ifdef _MSC_VER +#undef inline +#define inline __inline +#endif + +#define QEMU_BUILD_BUG_ON(x) \ + typedef char glue(qemu_build_bug_on__,__LINE__)[(x)?-1:1] __attribute__((unused)); + +#if defined __GNUC__ +# if !QEMU_GNUC_PREREQ(4, 4) + /* gcc versions before 4.4.x don't support gnu_printf, so use printf. */ +# define GCC_FMT_ATTR(n, m) __attribute__((format(printf, n, m))) +# else + /* Use gnu_printf when supported (qemu uses standard format strings). */ +# define GCC_FMT_ATTR(n, m) __attribute__((format(gnu_printf, n, m))) +# if defined(_WIN32) + /* Map __printf__ to __gnu_printf__ because we want standard format strings + * even when MinGW or GLib include files use __printf__. */ +# define __printf__ __gnu_printf__ +# endif +# endif +#else +#define GCC_FMT_ATTR(n, m) +#endif + +#endif /* COMPILER_H */ diff --git a/slirp/simh/qemu/config-file.h b/slirp/simh/qemu/config-file.h new file mode 100644 index 00000000..d4ba20e0 --- /dev/null +++ b/slirp/simh/qemu/config-file.h @@ -0,0 +1,33 @@ +#ifndef QEMU_CONFIG_H +#define QEMU_CONFIG_H + +#include +#include "qemu/option.h" +#include "qapi/error.h" +#include "qapi/qmp/qdict.h" + +QemuOptsList *qemu_find_opts(const char *group); +QemuOptsList *qemu_find_opts_err(const char *group, Error **errp); +QemuOpts *qemu_find_opts_singleton(const char *group); + +void qemu_add_opts(QemuOptsList *list); +void qemu_add_drive_opts(QemuOptsList *list); +int qemu_set_option(const char *str); +int qemu_global_option(const char *str); +void qemu_add_globals(void); + +void qemu_config_write(FILE *fp); +int qemu_config_parse(FILE *fp, QemuOptsList **lists, const char *fname); + +int qemu_read_config_file(const char *filename); + +/* Parse QDict options as a replacement for a config file (allowing multiple + enumerated (0..(n-1)) configuration "sections") */ +void qemu_config_parse_qdict(QDict *options, QemuOptsList **lists, + Error **errp); + +/* Read default QEMU config files + */ +int qemu_read_default_config_files(bool userconfig); + +#endif /* QEMU_CONFIG_H */ diff --git a/slirp/simh/qemu/error-report.h b/slirp/simh/qemu/error-report.h new file mode 100644 index 00000000..0a63cfac --- /dev/null +++ b/slirp/simh/qemu/error-report.h @@ -0,0 +1,46 @@ +/* + * Error reporting + * + * Copyright (C) 2010 Red Hat Inc. + * + * Authors: + * Markus Armbruster , + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_ERROR_H +#define QEMU_ERROR_H + +#include +#include +#include "qemu/compiler.h" + +typedef struct Location { + /* all members are private to qemu-error.c */ + enum { LOC_NONE, LOC_CMDLINE, LOC_FILE } kind; + int num; + const void *ptr; + struct Location *prev; +} Location; + +Location *loc_push_restore(Location *loc); +Location *loc_push_none(Location *loc); +Location *loc_pop(Location *loc); +Location *loc_save(Location *loc); +void loc_restore(Location *loc); +void loc_set_none(void); +void loc_set_cmdline(char **argv, int idx, int cnt); +void loc_set_file(const char *fname, int lno); + +void error_vprintf(const char *fmt, va_list ap) GCC_FMT_ATTR(1, 0); +void error_printf(const char *fmt, ...) GCC_FMT_ATTR(1, 2); +void error_printf_unless_qmp(const char *fmt, ...) GCC_FMT_ATTR(1, 2); +void error_set_progname(const char *argv0); +void error_vreport(const char *fmt, va_list ap) GCC_FMT_ATTR(1, 0); +//void error_report(const char *fmt, ...) GCC_FMT_ATTR(1, 2); +const char *error_get_progname(void); +extern bool enable_timestamp_msg; + +#endif diff --git a/slirp/simh/qemu/event_notifier.h b/slirp/simh/qemu/event_notifier.h new file mode 100644 index 00000000..a6e3d07f --- /dev/null +++ b/slirp/simh/qemu/event_notifier.h @@ -0,0 +1,49 @@ +/* + * event notifier support + * + * Copyright Red Hat, Inc. 2010 + * + * Authors: + * Michael S. Tsirkin + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_EVENT_NOTIFIER_H +#define QEMU_EVENT_NOTIFIER_H + +#include "qemu-common.h" + +#ifdef _WIN32 +#include +#include +#endif + +struct EventNotifier { +#ifdef _WIN32 + HANDLE event; +#else + int rfd; + int wfd; +#endif +}; + +typedef void EventNotifierHandler(EventNotifier *); + +int event_notifier_init(EventNotifier *, int active); +void event_notifier_cleanup(EventNotifier *); +int event_notifier_set(EventNotifier *); +int event_notifier_test_and_clear(EventNotifier *); +int event_notifier_set_handler(EventNotifier *, EventNotifierHandler *); + +#if 0 +#ifdef CONFIG_POSIX +void event_notifier_init_fd(EventNotifier *, int fd); +int event_notifier_get_fd(EventNotifier *); +#else +HANDLE event_notifier_get_handle(EventNotifier *); +#endif +#endif + +#endif diff --git a/slirp/simh/qemu/exec/cpu-common.h b/slirp/simh/qemu/exec/cpu-common.h new file mode 100644 index 00000000..9fb1d541 --- /dev/null +++ b/slirp/simh/qemu/exec/cpu-common.h @@ -0,0 +1,138 @@ +#ifndef CPU_COMMON_H +#define CPU_COMMON_H 1 + +/* CPU interfaces that are target independent. */ + +#ifndef CONFIG_USER_ONLY +#include "exec/hwaddr.h" +#endif + +#ifndef NEED_CPU_H +#include "exec/poison.h" +#endif + +#include "qemu/bswap.h" +#include "qemu/queue.h" +#include "qemu/fprintf-fn.h" +#include "qemu/typedefs.h" + +/** + * CPUListState: + * @cpu_fprintf: Print function. + * @file: File to print to using @cpu_fprint. + * + * State commonly used for iterating over CPU models. + */ +typedef struct CPUListState { + fprintf_function cpu_fprintf; + FILE *file; +} CPUListState; + +typedef enum MMUAccessType { + MMU_DATA_LOAD = 0, + MMU_DATA_STORE = 1, + MMU_INST_FETCH = 2 +} MMUAccessType; + +#if !defined(CONFIG_USER_ONLY) + +enum device_endian { + DEVICE_NATIVE_ENDIAN, + DEVICE_BIG_ENDIAN, + DEVICE_LITTLE_ENDIAN, +}; + +/* address in the RAM (different from a physical address) */ +#if defined(CONFIG_XEN_BACKEND) +typedef uint64_t ram_addr_t; +# define RAM_ADDR_MAX UINT64_MAX +# define RAM_ADDR_FMT "%" PRIx64 +#else +typedef uintptr_t ram_addr_t; +# define RAM_ADDR_MAX UINTPTR_MAX +# define RAM_ADDR_FMT "%" PRIxPTR +#endif + +extern ram_addr_t ram_size; +ram_addr_t get_current_ram_size(void); + +/* memory API */ + +typedef void CPUWriteMemoryFunc(void *opaque, hwaddr addr, uint32_t value); +typedef uint32_t CPUReadMemoryFunc(void *opaque, hwaddr addr); + +void qemu_ram_remap(ram_addr_t addr, ram_addr_t length); +/* This should not be used by devices. */ +MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr); +void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev); +void qemu_ram_unset_idstr(ram_addr_t addr); + +void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf, + int len, int is_write); +static inline void cpu_physical_memory_read(hwaddr addr, + void *buf, int len) +{ + cpu_physical_memory_rw(addr, buf, len, 0); +} +static inline void cpu_physical_memory_write(hwaddr addr, + const void *buf, int len) +{ + cpu_physical_memory_rw(addr, (void *)buf, len, 1); +} +void *cpu_physical_memory_map(hwaddr addr, + hwaddr *plen, + int is_write); +void cpu_physical_memory_unmap(void *buffer, hwaddr len, + int is_write, hwaddr access_len); +void cpu_register_map_client(QEMUBH *bh); +void cpu_unregister_map_client(QEMUBH *bh); + +bool cpu_physical_memory_is_io(hwaddr phys_addr); + +/* Coalesced MMIO regions are areas where write operations can be reordered. + * This usually implies that write operations are side-effect free. This allows + * batching which can make a major impact on performance when using + * virtualization. + */ +void qemu_flush_coalesced_mmio_buffer(void); + +uint32_t ldub_phys(AddressSpace *as, hwaddr addr); +uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr); +uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr); +uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr); +uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr); +uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr); +uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr); +void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val); +void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val); +void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val); +void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val); +void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val); +void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val); +void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val); + +#ifdef NEED_CPU_H +uint32_t lduw_phys(AddressSpace *as, hwaddr addr); +uint32_t ldl_phys(AddressSpace *as, hwaddr addr); +uint64_t ldq_phys(AddressSpace *as, hwaddr addr); +void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val); +void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val); +void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val); +void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val); +#endif + +void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr, + const uint8_t *buf, int len); +void cpu_flush_icache_range(hwaddr start, int len); + +extern struct MemoryRegion io_mem_rom; +extern struct MemoryRegion io_mem_notdirty; + +typedef int (RAMBlockIterFunc)(const char *block_name, void *host_addr, + ram_addr_t offset, ram_addr_t length, void *opaque); + +int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque); + +#endif + +#endif /* !CPU_COMMON_H */ diff --git a/slirp/simh/qemu/exec/hwaddr.h b/slirp/simh/qemu/exec/hwaddr.h new file mode 100644 index 00000000..c9eb78fb --- /dev/null +++ b/slirp/simh/qemu/exec/hwaddr.h @@ -0,0 +1,20 @@ +/* Define hwaddr if it exists. */ + +#ifndef HWADDR_H +#define HWADDR_H + +#define HWADDR_BITS 64 +/* hwaddr is the type of a physical address (its size can + be different from 'target_ulong'). */ + +typedef uint64_t hwaddr; +#define HWADDR_MAX UINT64_MAX +#define TARGET_FMT_plx "%016" PRIx64 +#define HWADDR_PRId PRId64 +#define HWADDR_PRIi PRIi64 +#define HWADDR_PRIo PRIo64 +#define HWADDR_PRIu PRIu64 +#define HWADDR_PRIx PRIx64 +#define HWADDR_PRIX PRIX64 + +#endif diff --git a/slirp/simh/qemu/exec/ioport.h b/slirp/simh/qemu/exec/ioport.h new file mode 100644 index 00000000..9673aaf8 --- /dev/null +++ b/slirp/simh/qemu/exec/ioport.h @@ -0,0 +1,80 @@ +/* + * defines ioport related functions + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/************************************************************************** + * IO ports API + */ + +#ifndef IOPORT_H +#define IOPORT_H + +#include "qemu-common.h" +//#include "qom/object.h" +//#include "exec/memory.h" + +typedef uint32_t pio_addr_t; +#define FMT_pioaddr PRIx32 + +#define MAX_IOPORTS (64 * 1024) +#define IOPORTS_MASK (MAX_IOPORTS - 1) + +typedef struct MemoryRegionPortio { + uint32_t offset; + uint32_t len; + unsigned size; + uint32_t (*read)(void *opaque, uint32_t address); + void (*write)(void *opaque, uint32_t address, uint32_t data); + uint32_t base; /* private field */ +} MemoryRegionPortio; + +#define PORTIO_END_OF_LIST() { } + +#ifndef CONFIG_USER_ONLY +extern const MemoryRegionOps unassigned_io_ops; +#endif + +void cpu_outb(pio_addr_t addr, uint8_t val); +void cpu_outw(pio_addr_t addr, uint16_t val); +void cpu_outl(pio_addr_t addr, uint32_t val); +uint8_t cpu_inb(pio_addr_t addr); +uint16_t cpu_inw(pio_addr_t addr); +uint32_t cpu_inl(pio_addr_t addr); + +typedef struct PortioList { + const struct MemoryRegionPortio *ports; + Object *owner; + struct MemoryRegion *address_space; + unsigned nr; + struct MemoryRegion **regions; + void *opaque; + const char *name; + bool flush_coalesced_mmio; +} PortioList; + +void portio_list_init(PortioList *piolist, Object *owner, + const struct MemoryRegionPortio *callbacks, + void *opaque, const char *name); +void portio_list_set_flush_coalesced(PortioList *piolist); +void portio_list_destroy(PortioList *piolist); +void portio_list_add(PortioList *piolist, + struct MemoryRegion *address_space, + uint32_t addr); +void portio_list_del(PortioList *piolist); + +#endif /* IOPORT_H */ diff --git a/slirp/simh/qemu/exec/memattrs.h b/slirp/simh/qemu/exec/memattrs.h new file mode 100644 index 00000000..f8537a8d --- /dev/null +++ b/slirp/simh/qemu/exec/memattrs.h @@ -0,0 +1,49 @@ +/* + * Memory transaction attributes + * + * Copyright (c) 2015 Linaro Limited. + * + * Authors: + * Peter Maydell + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef MEMATTRS_H +#define MEMATTRS_H + +/* Every memory transaction has associated with it a set of + * attributes. Some of these are generic (such as the ID of + * the bus master); some are specific to a particular kind of + * bus (such as the ARM Secure/NonSecure bit). We define them + * all as non-overlapping bitfields in a single struct to avoid + * confusion if different parts of QEMU used the same bit for + * different semantics. + */ +typedef struct MemTxAttrs { + /* Bus masters which don't specify any attributes will get this + * (via the MEMTXATTRS_UNSPECIFIED constant), so that we can + * distinguish "all attributes deliberately clear" from + * "didn't specify" if necessary. + */ + unsigned int unspecified:1; + /* ARM/AMBA: TrustZone Secure access + * x86: System Management Mode access + */ + unsigned int secure:1; + /* Memory access is usermode (unprivileged) */ + unsigned int user:1; + /* Stream ID (for MSI for example) */ + unsigned int stream_id:16; +} MemTxAttrs; + +/* Bus masters which don't specify any attributes will get this, + * which has all attribute bits clear except the topmost one + * (so that we can distinguish "all attributes deliberately clear" + * from "didn't specify" if necessary). + */ +#define MEMTXATTRS_UNSPECIFIED ((MemTxAttrs) { .unspecified = 1 }) + +#endif diff --git a/slirp/simh/qemu/exec/memory.h b/slirp/simh/qemu/exec/memory.h new file mode 100644 index 00000000..d6873018 --- /dev/null +++ b/slirp/simh/qemu/exec/memory.h @@ -0,0 +1,1348 @@ +/* + * Physical memory management API + * + * Copyright 2011 Red Hat, Inc. and/or its affiliates + * + * Authors: + * Avi Kivity + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef MEMORY_H +#define MEMORY_H + +#ifndef CONFIG_USER_ONLY + +#define DIRTY_MEMORY_VGA 0 +#define DIRTY_MEMORY_CODE 1 +#define DIRTY_MEMORY_MIGRATION 2 +#define DIRTY_MEMORY_NUM 3 /* num of dirty bits */ + +#include +#include +#include "exec/cpu-common.h" +#ifndef CONFIG_USER_ONLY +#include "exec/hwaddr.h" +#endif +#include "exec/memattrs.h" +#include "qemu/queue.h" +#include "qemu/int128.h" +#include "qemu/notify.h" +#include "qapi/error.h" +//#include "qom/object.h" +//#include "qemu/rcu.h" + +#define MAX_PHYS_ADDR_SPACE_BITS 62 +#define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1) + +#define TYPE_MEMORY_REGION "qemu:memory-region" +#define MEMORY_REGION(obj) \ + OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION) + +typedef struct MemoryRegionOps MemoryRegionOps; +typedef struct MemoryRegionMmio MemoryRegionMmio; + +struct MemoryRegionMmio { + CPUReadMemoryFunc *read[3]; + CPUWriteMemoryFunc *write[3]; +}; + +typedef struct IOMMUTLBEntry IOMMUTLBEntry; + +/* See address_space_translate: bit 0 is read, bit 1 is write. */ +typedef enum { + IOMMU_NONE = 0, + IOMMU_RO = 1, + IOMMU_WO = 2, + IOMMU_RW = 3, +} IOMMUAccessFlags; + +struct IOMMUTLBEntry { + AddressSpace *target_as; + hwaddr iova; + hwaddr translated_addr; + hwaddr addr_mask; /* 0xfff = 4k translation */ + IOMMUAccessFlags perm; +}; + +/* New-style MMIO accessors can indicate that the transaction failed. + * A zero (MEMTX_OK) response means success; anything else is a failure + * of some kind. The memory subsystem will bitwise-OR together results + * if it is synthesizing an operation from multiple smaller accesses. + */ +#define MEMTX_OK 0 +#define MEMTX_ERROR (1U << 0) /* device returned an error */ +#define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */ +typedef uint32_t MemTxResult; + +/* + * Memory region callbacks + */ +struct MemoryRegionOps { + /* Read from the memory region. @addr is relative to @mr; @size is + * in bytes. */ + uint64_t (*read)(void *opaque, + hwaddr addr, + unsigned size); + /* Write to the memory region. @addr is relative to @mr; @size is + * in bytes. */ + void (*write)(void *opaque, + hwaddr addr, + uint64_t data, + unsigned size); + + MemTxResult (*read_with_attrs)(void *opaque, + hwaddr addr, + uint64_t *data, + unsigned size, + MemTxAttrs attrs); + MemTxResult (*write_with_attrs)(void *opaque, + hwaddr addr, + uint64_t data, + unsigned size, + MemTxAttrs attrs); + + enum device_endian endianness; + /* Guest-visible constraints: */ + struct { + /* If nonzero, specify bounds on access sizes beyond which a machine + * check is thrown. + */ + unsigned min_access_size; + unsigned max_access_size; + /* If true, unaligned accesses are supported. Otherwise unaligned + * accesses throw machine checks. + */ + bool unaligned; + /* + * If present, and returns #false, the transaction is not accepted + * by the device (and results in machine dependent behaviour such + * as a machine check exception). + */ + bool (*accepts)(void *opaque, hwaddr addr, + unsigned size, bool is_write); + } valid; + /* Internal implementation constraints: */ + struct { + /* If nonzero, specifies the minimum size implemented. Smaller sizes + * will be rounded upwards and a partial result will be returned. + */ + unsigned min_access_size; + /* If nonzero, specifies the maximum size implemented. Larger sizes + * will be done as a series of accesses with smaller sizes. + */ + unsigned max_access_size; + /* If true, unaligned accesses are supported. Otherwise all accesses + * are converted to (possibly multiple) naturally aligned accesses. + */ + bool unaligned; + } impl; + + /* If .read and .write are not present, old_mmio may be used for + * backwards compatibility with old mmio registration + */ + const MemoryRegionMmio old_mmio; +}; + +typedef struct MemoryRegionIOMMUOps MemoryRegionIOMMUOps; + +struct MemoryRegionIOMMUOps { + /* Return a TLB entry that contains a given address. */ + IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr, bool is_write); +}; + +typedef struct CoalescedMemoryRange CoalescedMemoryRange; +typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd; + +struct MemoryRegion { + Object parent_obj; + /* All fields are private - violators will be prosecuted */ + const MemoryRegionOps *ops; + const MemoryRegionIOMMUOps *iommu_ops; + void *opaque; + MemoryRegion *container; + Int128 size; + hwaddr addr; + void (*destructor)(MemoryRegion *mr); + ram_addr_t ram_addr; + uint64_t align; + bool subpage; + bool terminates; + bool romd_mode; + bool ram; + bool skip_dump; + bool readonly; /* For RAM regions */ + bool enabled; + bool rom_device; + bool warning_printed; /* For reservations */ + bool flush_coalesced_mmio; + bool global_locking; + uint8_t vga_logging_count; + MemoryRegion *alias; + hwaddr alias_offset; + int32_t priority; + bool may_overlap; + QTAILQ_HEAD(subregions, MemoryRegion) subregions; + QTAILQ_ENTRY(MemoryRegion) subregions_link; + QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced; + const char *name; + uint8_t dirty_log_mask; + unsigned ioeventfd_nb; + MemoryRegionIoeventfd *ioeventfds; + NotifierList iommu_notify; +}; + +/** + * MemoryListener: callbacks structure for updates to the physical memory map + * + * Allows a component to adjust to changes in the guest-visible memory map. + * Use with memory_listener_register() and memory_listener_unregister(). + */ +struct MemoryListener { + void (*begin)(MemoryListener *listener); + void (*commit)(MemoryListener *listener); + void (*region_add)(MemoryListener *listener, MemoryRegionSection *section); + void (*region_del)(MemoryListener *listener, MemoryRegionSection *section); + void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section); + void (*log_start)(MemoryListener *listener, MemoryRegionSection *section, + int old, int new); + void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section, + int old, int new); + void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section); + void (*log_global_start)(MemoryListener *listener); + void (*log_global_stop)(MemoryListener *listener); + void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section, + bool match_data, uint64_t data, EventNotifier *e); + void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section, + bool match_data, uint64_t data, EventNotifier *e); + void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section, + hwaddr addr, hwaddr len); + void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section, + hwaddr addr, hwaddr len); + /* Lower = earlier (during add), later (during del) */ + unsigned priority; + AddressSpace *address_space_filter; + QTAILQ_ENTRY(MemoryListener) link; +}; + +/** + * AddressSpace: describes a mapping of addresses to #MemoryRegion objects + */ +struct AddressSpace { + /* All fields are private. */ + struct rcu_head rcu; + char *name; + MemoryRegion *root; + + /* Accessed via RCU. */ + struct FlatView *current_map; + + int ioeventfd_nb; + struct MemoryRegionIoeventfd *ioeventfds; + struct AddressSpaceDispatch *dispatch; + struct AddressSpaceDispatch *next_dispatch; + MemoryListener dispatch_listener; + + QTAILQ_ENTRY(AddressSpace) address_spaces_link; +}; + +/** + * MemoryRegionSection: describes a fragment of a #MemoryRegion + * + * @mr: the region, or %NULL if empty + * @address_space: the address space the region is mapped in + * @offset_within_region: the beginning of the section, relative to @mr's start + * @size: the size of the section; will not exceed @mr's boundaries + * @offset_within_address_space: the address of the first byte of the section + * relative to the region's address space + * @readonly: writes to this section are ignored + */ +struct MemoryRegionSection { + MemoryRegion *mr; + AddressSpace *address_space; + hwaddr offset_within_region; + Int128 size; + hwaddr offset_within_address_space; + bool readonly; +}; + +/** + * memory_region_init: Initialize a memory region + * + * The region typically acts as a container for other memory regions. Use + * memory_region_add_subregion() to add subregions. + * + * @mr: the #MemoryRegion to be initialized + * @owner: the object that tracks the region's reference count + * @name: used for debugging; not visible to the user or ABI + * @size: size of the region; any subregions beyond this size will be clipped + */ +void memory_region_init(MemoryRegion *mr, + struct Object *owner, + const char *name, + uint64_t size); + +/** + * memory_region_ref: Add 1 to a memory region's reference count + * + * Whenever memory regions are accessed outside the BQL, they need to be + * preserved against hot-unplug. MemoryRegions actually do not have their + * own reference count; they piggyback on a QOM object, their "owner". + * This function adds a reference to the owner. + * + * All MemoryRegions must have an owner if they can disappear, even if the + * device they belong to operates exclusively under the BQL. This is because + * the region could be returned at any time by memory_region_find, and this + * is usually under guest control. + * + * @mr: the #MemoryRegion + */ +void memory_region_ref(MemoryRegion *mr); + +/** + * memory_region_unref: Remove 1 to a memory region's reference count + * + * Whenever memory regions are accessed outside the BQL, they need to be + * preserved against hot-unplug. MemoryRegions actually do not have their + * own reference count; they piggyback on a QOM object, their "owner". + * This function removes a reference to the owner and possibly destroys it. + * + * @mr: the #MemoryRegion + */ +void memory_region_unref(MemoryRegion *mr); + +/** + * memory_region_init_io: Initialize an I/O memory region. + * + * Accesses into the region will cause the callbacks in @ops to be called. + * if @size is nonzero, subregions will be clipped to @size. + * + * @mr: the #MemoryRegion to be initialized. + * @owner: the object that tracks the region's reference count + * @ops: a structure containing read and write callbacks to be used when + * I/O is performed on the region. + * @opaque: passed to the read and write callbacks of the @ops structure. + * @name: used for debugging; not visible to the user or ABI + * @size: size of the region. + */ +void memory_region_init_io(MemoryRegion *mr, + struct Object *owner, + const MemoryRegionOps *ops, + void *opaque, + const char *name, + uint64_t size); + +/** + * memory_region_init_ram: Initialize RAM memory region. Accesses into the + * region will modify memory directly. + * + * @mr: the #MemoryRegion to be initialized. + * @owner: the object that tracks the region's reference count + * @name: the name of the region. + * @size: size of the region. + * @errp: pointer to Error*, to store an error if it happens. + */ +void memory_region_init_ram(MemoryRegion *mr, + struct Object *owner, + const char *name, + uint64_t size, + Error **errp); + +/** + * memory_region_init_resizeable_ram: Initialize memory region with resizeable + * RAM. Accesses into the region will + * modify memory directly. Only an initial + * portion of this RAM is actually used. + * The used size can change across reboots. + * + * @mr: the #MemoryRegion to be initialized. + * @owner: the object that tracks the region's reference count + * @name: the name of the region. + * @size: used size of the region. + * @max_size: max size of the region. + * @resized: callback to notify owner about used size change. + * @errp: pointer to Error*, to store an error if it happens. + */ +void memory_region_init_resizeable_ram(MemoryRegion *mr, + struct Object *owner, + const char *name, + uint64_t size, + uint64_t max_size, + void (*resized)(const char*, + uint64_t length, + void *host), + Error **errp); +#ifdef __linux__ +/** + * memory_region_init_ram_from_file: Initialize RAM memory region with a + * mmap-ed backend. + * + * @mr: the #MemoryRegion to be initialized. + * @owner: the object that tracks the region's reference count + * @name: the name of the region. + * @size: size of the region. + * @share: %true if memory must be mmaped with the MAP_SHARED flag + * @path: the path in which to allocate the RAM. + * @errp: pointer to Error*, to store an error if it happens. + */ +void memory_region_init_ram_from_file(MemoryRegion *mr, + struct Object *owner, + const char *name, + uint64_t size, + bool share, + const char *path, + Error **errp); +#endif + +/** + * memory_region_init_ram_ptr: Initialize RAM memory region from a + * user-provided pointer. Accesses into the + * region will modify memory directly. + * + * @mr: the #MemoryRegion to be initialized. + * @owner: the object that tracks the region's reference count + * @name: the name of the region. + * @size: size of the region. + * @ptr: memory to be mapped; must contain at least @size bytes. + */ +void memory_region_init_ram_ptr(MemoryRegion *mr, + struct Object *owner, + const char *name, + uint64_t size, + void *ptr); + +/** + * memory_region_init_alias: Initialize a memory region that aliases all or a + * part of another memory region. + * + * @mr: the #MemoryRegion to be initialized. + * @owner: the object that tracks the region's reference count + * @name: used for debugging; not visible to the user or ABI + * @orig: the region to be referenced; @mr will be equivalent to + * @orig between @offset and @offset + @size - 1. + * @offset: start of the section in @orig to be referenced. + * @size: size of the region. + */ +void memory_region_init_alias(MemoryRegion *mr, + struct Object *owner, + const char *name, + MemoryRegion *orig, + hwaddr offset, + uint64_t size); + +/** + * memory_region_init_rom_device: Initialize a ROM memory region. Writes are + * handled via callbacks. + * + * If NULL callbacks pointer is given, then I/O space is not supposed to be + * handled by QEMU itself. Any access via the memory API will cause an abort(). + * + * @mr: the #MemoryRegion to be initialized. + * @owner: the object that tracks the region's reference count + * @ops: callbacks for write access handling. + * @name: the name of the region. + * @size: size of the region. + * @errp: pointer to Error*, to store an error if it happens. + */ +void memory_region_init_rom_device(MemoryRegion *mr, + struct Object *owner, + const MemoryRegionOps *ops, + void *opaque, + const char *name, + uint64_t size, + Error **errp); + +/** + * memory_region_init_reservation: Initialize a memory region that reserves + * I/O space. + * + * A reservation region primariy serves debugging purposes. It claims I/O + * space that is not supposed to be handled by QEMU itself. Any access via + * the memory API will cause an abort(). + * This function is deprecated. Use memory_region_init_io() with NULL + * callbacks instead. + * + * @mr: the #MemoryRegion to be initialized + * @owner: the object that tracks the region's reference count + * @name: used for debugging; not visible to the user or ABI + * @size: size of the region. + */ +static inline void memory_region_init_reservation(MemoryRegion *mr, + Object *owner, + const char *name, + uint64_t size) +{ + memory_region_init_io(mr, owner, NULL, mr, name, size); +} + +/** + * memory_region_init_iommu: Initialize a memory region that translates + * addresses + * + * An IOMMU region translates addresses and forwards accesses to a target + * memory region. + * + * @mr: the #MemoryRegion to be initialized + * @owner: the object that tracks the region's reference count + * @ops: a function that translates addresses into the @target region + * @name: used for debugging; not visible to the user or ABI + * @size: size of the region. + */ +void memory_region_init_iommu(MemoryRegion *mr, + struct Object *owner, + const MemoryRegionIOMMUOps *ops, + const char *name, + uint64_t size); + +/** + * memory_region_owner: get a memory region's owner. + * + * @mr: the memory region being queried. + */ +struct Object *memory_region_owner(MemoryRegion *mr); + +/** + * memory_region_size: get a memory region's size. + * + * @mr: the memory region being queried. + */ +uint64_t memory_region_size(MemoryRegion *mr); + +/** + * memory_region_is_ram: check whether a memory region is random access + * + * Returns %true is a memory region is random access. + * + * @mr: the memory region being queried + */ +bool memory_region_is_ram(MemoryRegion *mr); + +/** + * memory_region_is_skip_dump: check whether a memory region should not be + * dumped + * + * Returns %true is a memory region should not be dumped(e.g. VFIO BAR MMAP). + * + * @mr: the memory region being queried + */ +bool memory_region_is_skip_dump(MemoryRegion *mr); + +/** + * memory_region_set_skip_dump: Set skip_dump flag, dump will ignore this memory + * region + * + * @mr: the memory region being queried + */ +void memory_region_set_skip_dump(MemoryRegion *mr); + +/** + * memory_region_is_romd: check whether a memory region is in ROMD mode + * + * Returns %true if a memory region is a ROM device and currently set to allow + * direct reads. + * + * @mr: the memory region being queried + */ +static inline bool memory_region_is_romd(MemoryRegion *mr) +{ + return mr->rom_device && mr->romd_mode; +} + +/** + * memory_region_is_iommu: check whether a memory region is an iommu + * + * Returns %true is a memory region is an iommu. + * + * @mr: the memory region being queried + */ +bool memory_region_is_iommu(MemoryRegion *mr); + +/** + * memory_region_notify_iommu: notify a change in an IOMMU translation entry. + * + * @mr: the memory region that was changed + * @entry: the new entry in the IOMMU translation table. The entry + * replaces all old entries for the same virtual I/O address range. + * Deleted entries have .@perm == 0. + */ +void memory_region_notify_iommu(MemoryRegion *mr, + IOMMUTLBEntry entry); + +/** + * memory_region_register_iommu_notifier: register a notifier for changes to + * IOMMU translation entries. + * + * @mr: the memory region to observe + * @n: the notifier to be added; the notifier receives a pointer to an + * #IOMMUTLBEntry as the opaque value; the pointer ceases to be + * valid on exit from the notifier. + */ +void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n); + +/** + * memory_region_unregister_iommu_notifier: unregister a notifier for + * changes to IOMMU translation entries. + * + * @n: the notifier to be removed. + */ +void memory_region_unregister_iommu_notifier(Notifier *n); + +/** + * memory_region_name: get a memory region's name + * + * Returns the string that was used to initialize the memory region. + * + * @mr: the memory region being queried + */ +const char *memory_region_name(const MemoryRegion *mr); + +/** + * memory_region_is_logging: return whether a memory region is logging writes + * + * Returns %true if the memory region is logging writes for the given client + * + * @mr: the memory region being queried + * @client: the client being queried + */ +bool memory_region_is_logging(MemoryRegion *mr, uint8_t client); + +/** + * memory_region_get_dirty_log_mask: return the clients for which a + * memory region is logging writes. + * + * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants + * are the bit indices. + * + * @mr: the memory region being queried + */ +uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr); + +/** + * memory_region_is_rom: check whether a memory region is ROM + * + * Returns %true is a memory region is read-only memory. + * + * @mr: the memory region being queried + */ +bool memory_region_is_rom(MemoryRegion *mr); + +/** + * memory_region_get_fd: Get a file descriptor backing a RAM memory region. + * + * Returns a file descriptor backing a file-based RAM memory region, + * or -1 if the region is not a file-based RAM memory region. + * + * @mr: the RAM or alias memory region being queried. + */ +int memory_region_get_fd(MemoryRegion *mr); + +/** + * memory_region_get_ram_ptr: Get a pointer into a RAM memory region. + * + * Returns a host pointer to a RAM memory region (created with + * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with + * care. + * + * @mr: the memory region being queried. + */ +void *memory_region_get_ram_ptr(MemoryRegion *mr); + +/* memory_region_ram_resize: Resize a RAM region. + * + * Only legal before guest might have detected the memory size: e.g. on + * incoming migration, or right after reset. + * + * @mr: a memory region created with @memory_region_init_resizeable_ram. + * @newsize: the new size the region + * @errp: pointer to Error*, to store an error if it happens. + */ +void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, + Error **errp); + +/** + * memory_region_set_log: Turn dirty logging on or off for a region. + * + * Turns dirty logging on or off for a specified client (display, migration). + * Only meaningful for RAM regions. + * + * @mr: the memory region being updated. + * @log: whether dirty logging is to be enabled or disabled. + * @client: the user of the logging information; %DIRTY_MEMORY_VGA only. + */ +void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client); + +/** + * memory_region_get_dirty: Check whether a range of bytes is dirty + * for a specified client. + * + * Checks whether a range of bytes has been written to since the last + * call to memory_region_reset_dirty() with the same @client. Dirty logging + * must be enabled. + * + * @mr: the memory region being queried. + * @addr: the address (relative to the start of the region) being queried. + * @size: the size of the range being queried. + * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or + * %DIRTY_MEMORY_VGA. + */ +bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr, + hwaddr size, unsigned client); + +/** + * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region. + * + * Marks a range of bytes as dirty, after it has been dirtied outside + * guest code. + * + * @mr: the memory region being dirtied. + * @addr: the address (relative to the start of the region) being dirtied. + * @size: size of the range being dirtied. + */ +void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, + hwaddr size); + +/** + * memory_region_test_and_clear_dirty: Check whether a range of bytes is dirty + * for a specified client. It clears them. + * + * Checks whether a range of bytes has been written to since the last + * call to memory_region_reset_dirty() with the same @client. Dirty logging + * must be enabled. + * + * @mr: the memory region being queried. + * @addr: the address (relative to the start of the region) being queried. + * @size: the size of the range being queried. + * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or + * %DIRTY_MEMORY_VGA. + */ +bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr, + hwaddr size, unsigned client); +/** + * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with + * any external TLBs (e.g. kvm) + * + * Flushes dirty information from accelerators such as kvm and vhost-net + * and makes it available to users of the memory API. + * + * @mr: the region being flushed. + */ +void memory_region_sync_dirty_bitmap(MemoryRegion *mr); + +/** + * memory_region_reset_dirty: Mark a range of pages as clean, for a specified + * client. + * + * Marks a range of pages as no longer dirty. + * + * @mr: the region being updated. + * @addr: the start of the subrange being cleaned. + * @size: the size of the subrange being cleaned. + * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or + * %DIRTY_MEMORY_VGA. + */ +void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, + hwaddr size, unsigned client); + +/** + * memory_region_set_readonly: Turn a memory region read-only (or read-write) + * + * Allows a memory region to be marked as read-only (turning it into a ROM). + * only useful on RAM regions. + * + * @mr: the region being updated. + * @readonly: whether rhe region is to be ROM or RAM. + */ +void memory_region_set_readonly(MemoryRegion *mr, bool readonly); + +/** + * memory_region_rom_device_set_romd: enable/disable ROMD mode + * + * Allows a ROM device (initialized with memory_region_init_rom_device() to + * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the + * device is mapped to guest memory and satisfies read access directly. + * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function. + * Writes are always handled by the #MemoryRegion.write function. + * + * @mr: the memory region to be updated + * @romd_mode: %true to put the region into ROMD mode + */ +void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode); + +/** + * memory_region_set_coalescing: Enable memory coalescing for the region. + * + * Enabled writes to a region to be queued for later processing. MMIO ->write + * callbacks may be delayed until a non-coalesced MMIO is issued. + * Only useful for IO regions. Roughly similar to write-combining hardware. + * + * @mr: the memory region to be write coalesced + */ +void memory_region_set_coalescing(MemoryRegion *mr); + +/** + * memory_region_add_coalescing: Enable memory coalescing for a sub-range of + * a region. + * + * Like memory_region_set_coalescing(), but works on a sub-range of a region. + * Multiple calls can be issued coalesced disjoint ranges. + * + * @mr: the memory region to be updated. + * @offset: the start of the range within the region to be coalesced. + * @size: the size of the subrange to be coalesced. + */ +void memory_region_add_coalescing(MemoryRegion *mr, + hwaddr offset, + uint64_t size); + +/** + * memory_region_clear_coalescing: Disable MMIO coalescing for the region. + * + * Disables any coalescing caused by memory_region_set_coalescing() or + * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory + * hardware. + * + * @mr: the memory region to be updated. + */ +void memory_region_clear_coalescing(MemoryRegion *mr); + +/** + * memory_region_set_flush_coalesced: Enforce memory coalescing flush before + * accesses. + * + * Ensure that pending coalesced MMIO request are flushed before the memory + * region is accessed. This property is automatically enabled for all regions + * passed to memory_region_set_coalescing() and memory_region_add_coalescing(). + * + * @mr: the memory region to be updated. + */ +void memory_region_set_flush_coalesced(MemoryRegion *mr); + +/** + * memory_region_clear_flush_coalesced: Disable memory coalescing flush before + * accesses. + * + * Clear the automatic coalesced MMIO flushing enabled via + * memory_region_set_flush_coalesced. Note that this service has no effect on + * memory regions that have MMIO coalescing enabled for themselves. For them, + * automatic flushing will stop once coalescing is disabled. + * + * @mr: the memory region to be updated. + */ +void memory_region_clear_flush_coalesced(MemoryRegion *mr); + +/** + * memory_region_set_global_locking: Declares the access processing requires + * QEMU's global lock. + * + * When this is invoked, accesses to the memory region will be processed while + * holding the global lock of QEMU. This is the default behavior of memory + * regions. + * + * @mr: the memory region to be updated. + */ +void memory_region_set_global_locking(MemoryRegion *mr); + +/** + * memory_region_clear_global_locking: Declares that access processing does + * not depend on the QEMU global lock. + * + * By clearing this property, accesses to the memory region will be processed + * outside of QEMU's global lock (unless the lock is held on when issuing the + * access request). In this case, the device model implementing the access + * handlers is responsible for synchronization of concurrency. + * + * @mr: the memory region to be updated. + */ +void memory_region_clear_global_locking(MemoryRegion *mr); + +/** + * memory_region_add_eventfd: Request an eventfd to be triggered when a word + * is written to a location. + * + * Marks a word in an IO region (initialized with memory_region_init_io()) + * as a trigger for an eventfd event. The I/O callback will not be called. + * The caller must be prepared to handle failure (that is, take the required + * action if the callback _is_ called). + * + * @mr: the memory region being updated. + * @addr: the address within @mr that is to be monitored + * @size: the size of the access to trigger the eventfd + * @match_data: whether to match against @data, instead of just @addr + * @data: the data to match against the guest write + * @fd: the eventfd to be triggered when @addr, @size, and @data all match. + **/ +void memory_region_add_eventfd(MemoryRegion *mr, + hwaddr addr, + unsigned size, + bool match_data, + uint64_t data, + EventNotifier *e); + +/** + * memory_region_del_eventfd: Cancel an eventfd. + * + * Cancels an eventfd trigger requested by a previous + * memory_region_add_eventfd() call. + * + * @mr: the memory region being updated. + * @addr: the address within @mr that is to be monitored + * @size: the size of the access to trigger the eventfd + * @match_data: whether to match against @data, instead of just @addr + * @data: the data to match against the guest write + * @fd: the eventfd to be triggered when @addr, @size, and @data all match. + */ +void memory_region_del_eventfd(MemoryRegion *mr, + hwaddr addr, + unsigned size, + bool match_data, + uint64_t data, + EventNotifier *e); + +/** + * memory_region_add_subregion: Add a subregion to a container. + * + * Adds a subregion at @offset. The subregion may not overlap with other + * subregions (except for those explicitly marked as overlapping). A region + * may only be added once as a subregion (unless removed with + * memory_region_del_subregion()); use memory_region_init_alias() if you + * want a region to be a subregion in multiple locations. + * + * @mr: the region to contain the new subregion; must be a container + * initialized with memory_region_init(). + * @offset: the offset relative to @mr where @subregion is added. + * @subregion: the subregion to be added. + */ +void memory_region_add_subregion(MemoryRegion *mr, + hwaddr offset, + MemoryRegion *subregion); +/** + * memory_region_add_subregion_overlap: Add a subregion to a container + * with overlap. + * + * Adds a subregion at @offset. The subregion may overlap with other + * subregions. Conflicts are resolved by having a higher @priority hide a + * lower @priority. Subregions without priority are taken as @priority 0. + * A region may only be added once as a subregion (unless removed with + * memory_region_del_subregion()); use memory_region_init_alias() if you + * want a region to be a subregion in multiple locations. + * + * @mr: the region to contain the new subregion; must be a container + * initialized with memory_region_init(). + * @offset: the offset relative to @mr where @subregion is added. + * @subregion: the subregion to be added. + * @priority: used for resolving overlaps; highest priority wins. + */ +void memory_region_add_subregion_overlap(MemoryRegion *mr, + hwaddr offset, + MemoryRegion *subregion, + int priority); + +/** + * memory_region_get_ram_addr: Get the ram address associated with a memory + * region + * + * DO NOT USE THIS FUNCTION. This is a temporary workaround while the Xen + * code is being reworked. + */ +ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr); + +uint64_t memory_region_get_alignment(const MemoryRegion *mr); +/** + * memory_region_del_subregion: Remove a subregion. + * + * Removes a subregion from its container. + * + * @mr: the container to be updated. + * @subregion: the region being removed; must be a current subregion of @mr. + */ +void memory_region_del_subregion(MemoryRegion *mr, + MemoryRegion *subregion); + +/* + * memory_region_set_enabled: dynamically enable or disable a region + * + * Enables or disables a memory region. A disabled memory region + * ignores all accesses to itself and its subregions. It does not + * obscure sibling subregions with lower priority - it simply behaves as + * if it was removed from the hierarchy. + * + * Regions default to being enabled. + * + * @mr: the region to be updated + * @enabled: whether to enable or disable the region + */ +void memory_region_set_enabled(MemoryRegion *mr, bool enabled); + +/* + * memory_region_set_address: dynamically update the address of a region + * + * Dynamically updates the address of a region, relative to its container. + * May be used on regions are currently part of a memory hierarchy. + * + * @mr: the region to be updated + * @addr: new address, relative to container region + */ +void memory_region_set_address(MemoryRegion *mr, hwaddr addr); + +/* + * memory_region_set_size: dynamically update the size of a region. + * + * Dynamically updates the size of a region. + * + * @mr: the region to be updated + * @size: used size of the region. + */ +void memory_region_set_size(MemoryRegion *mr, uint64_t size); + +/* + * memory_region_set_alias_offset: dynamically update a memory alias's offset + * + * Dynamically updates the offset into the target region that an alias points + * to, as if the fourth argument to memory_region_init_alias() has changed. + * + * @mr: the #MemoryRegion to be updated; should be an alias. + * @offset: the new offset into the target memory region + */ +void memory_region_set_alias_offset(MemoryRegion *mr, + hwaddr offset); + +/** + * memory_region_present: checks if an address relative to a @container + * translates into #MemoryRegion within @container + * + * Answer whether a #MemoryRegion within @container covers the address + * @addr. + * + * @container: a #MemoryRegion within which @addr is a relative address + * @addr: the area within @container to be searched + */ +bool memory_region_present(MemoryRegion *container, hwaddr addr); + +/** + * memory_region_is_mapped: returns true if #MemoryRegion is mapped + * into any address space. + * + * @mr: a #MemoryRegion which should be checked if it's mapped + */ +bool memory_region_is_mapped(MemoryRegion *mr); + +/** + * memory_region_find: translate an address/size relative to a + * MemoryRegion into a #MemoryRegionSection. + * + * Locates the first #MemoryRegion within @mr that overlaps the range + * given by @addr and @size. + * + * Returns a #MemoryRegionSection that describes a contiguous overlap. + * It will have the following characteristics: + * .@size = 0 iff no overlap was found + * .@mr is non-%NULL iff an overlap was found + * + * Remember that in the return value the @offset_within_region is + * relative to the returned region (in the .@mr field), not to the + * @mr argument. + * + * Similarly, the .@offset_within_address_space is relative to the + * address space that contains both regions, the passed and the + * returned one. However, in the special case where the @mr argument + * has no container (and thus is the root of the address space), the + * following will hold: + * .@offset_within_address_space >= @addr + * .@offset_within_address_space + .@size <= @addr + @size + * + * @mr: a MemoryRegion within which @addr is a relative address + * @addr: start of the area within @as to be searched + * @size: size of the area to be searched + */ +MemoryRegionSection memory_region_find(MemoryRegion *mr, + hwaddr addr, uint64_t size); + +/** + * address_space_sync_dirty_bitmap: synchronize the dirty log for all memory + * + * Synchronizes the dirty page log for an entire address space. + * @as: the address space that contains the memory being synchronized + */ +void address_space_sync_dirty_bitmap(AddressSpace *as); + +/** + * memory_region_transaction_begin: Start a transaction. + * + * During a transaction, changes will be accumulated and made visible + * only when the transaction ends (is committed). + */ +void memory_region_transaction_begin(void); + +/** + * memory_region_transaction_commit: Commit a transaction and make changes + * visible to the guest. + */ +void memory_region_transaction_commit(void); + +/** + * memory_listener_register: register callbacks to be called when memory + * sections are mapped or unmapped into an address + * space + * + * @listener: an object containing the callbacks to be called + * @filter: if non-%NULL, only regions in this address space will be observed + */ +void memory_listener_register(MemoryListener *listener, AddressSpace *filter); + +/** + * memory_listener_unregister: undo the effect of memory_listener_register() + * + * @listener: an object containing the callbacks to be removed + */ +void memory_listener_unregister(MemoryListener *listener); + +/** + * memory_global_dirty_log_start: begin dirty logging for all regions + */ +void memory_global_dirty_log_start(void); + +/** + * memory_global_dirty_log_stop: end dirty logging for all regions + */ +void memory_global_dirty_log_stop(void); + +void mtree_info(fprintf_function mon_printf, void *f); + +/** + * memory_region_dispatch_read: perform a read directly to the specified + * MemoryRegion. + * + * @mr: #MemoryRegion to access + * @addr: address within that region + * @pval: pointer to uint64_t which the data is written to + * @size: size of the access in bytes + * @attrs: memory transaction attributes to use for the access + */ +MemTxResult memory_region_dispatch_read(MemoryRegion *mr, + hwaddr addr, + uint64_t *pval, + unsigned size, + MemTxAttrs attrs); +/** + * memory_region_dispatch_write: perform a write directly to the specified + * MemoryRegion. + * + * @mr: #MemoryRegion to access + * @addr: address within that region + * @data: data to write + * @size: size of the access in bytes + * @attrs: memory transaction attributes to use for the access + */ +MemTxResult memory_region_dispatch_write(MemoryRegion *mr, + hwaddr addr, + uint64_t data, + unsigned size, + MemTxAttrs attrs); + +/** + * address_space_init: initializes an address space + * + * @as: an uninitialized #AddressSpace + * @root: a #MemoryRegion that routes addresses for the address space + * @name: an address space name. The name is only used for debugging + * output. + */ +void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name); + + +/** + * address_space_destroy: destroy an address space + * + * Releases all resources associated with an address space. After an address space + * is destroyed, its root memory region (given by address_space_init()) may be destroyed + * as well. + * + * @as: address space to be destroyed + */ +void address_space_destroy(AddressSpace *as); + +/** + * address_space_rw: read from or write to an address space. + * + * Return a MemTxResult indicating whether the operation succeeded + * or failed (eg unassigned memory, device rejected the transaction, + * IOMMU fault). + * + * @as: #AddressSpace to be accessed + * @addr: address within that address space + * @attrs: memory transaction attributes + * @buf: buffer with the data transferred + * @is_write: indicates the transfer direction + */ +MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, uint8_t *buf, + int len, bool is_write); + +/** + * address_space_write: write to address space. + * + * Return a MemTxResult indicating whether the operation succeeded + * or failed (eg unassigned memory, device rejected the transaction, + * IOMMU fault). + * + * @as: #AddressSpace to be accessed + * @addr: address within that address space + * @attrs: memory transaction attributes + * @buf: buffer with the data transferred + */ +MemTxResult address_space_write(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, + const uint8_t *buf, int len); + +/** + * address_space_read: read from an address space. + * + * Return a MemTxResult indicating whether the operation succeeded + * or failed (eg unassigned memory, device rejected the transaction, + * IOMMU fault). + * + * @as: #AddressSpace to be accessed + * @addr: address within that address space + * @attrs: memory transaction attributes + * @buf: buffer with the data transferred + */ +MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, + uint8_t *buf, int len); + +/** + * address_space_ld*: load from an address space + * address_space_st*: store to an address space + * + * These functions perform a load or store of the byte, word, + * longword or quad to the specified address within the AddressSpace. + * The _le suffixed functions treat the data as little endian; + * _be indicates big endian; no suffix indicates "same endianness + * as guest CPU". + * + * The "guest CPU endianness" accessors are deprecated for use outside + * target-* code; devices should be CPU-agnostic and use either the LE + * or the BE accessors. + * + * @as #AddressSpace to be accessed + * @addr: address within that address space + * @val: data value, for stores + * @attrs: memory transaction attributes + * @result: location to write the success/failure of the transaction; + * if NULL, this information is discarded + */ +uint32_t address_space_ldub(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val, + MemTxAttrs attrs, MemTxResult *result); + +#ifdef NEED_CPU_H +uint32_t address_space_lduw(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +uint32_t address_space_ldl(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +uint64_t address_space_ldq(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val, + MemTxAttrs attrs, MemTxResult *result); +#endif + +/* address_space_translate: translate an address range into an address space + * into a MemoryRegion and an address range into that section. Should be + * called from an RCU critical section, to avoid that the last reference + * to the returned region disappears after address_space_translate returns. + * + * @as: #AddressSpace to be accessed + * @addr: address within that address space + * @xlat: pointer to address within the returned memory region section's + * #MemoryRegion. + * @len: pointer to length + * @is_write: indicates the transfer direction + */ +MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, + hwaddr *xlat, hwaddr *len, + bool is_write); + +/* address_space_access_valid: check for validity of accessing an address + * space range + * + * Check whether memory is assigned to the given address space range, and + * access is permitted by any IOMMU regions that are active for the address + * space. + * + * For now, addr and len should be aligned to a page size. This limitation + * will be lifted in the future. + * + * @as: #AddressSpace to be accessed + * @addr: address within that address space + * @len: length of the area to be checked + * @is_write: indicates the transfer direction + */ +bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write); + +/* address_space_map: map a physical memory region into a host virtual address + * + * May map a subset of the requested range, given by and returned in @plen. + * May return %NULL if resources needed to perform the mapping are exhausted. + * Use only for reads OR writes - not for read-modify-write operations. + * Use cpu_register_map_client() to know when retrying the map operation is + * likely to succeed. + * + * @as: #AddressSpace to be accessed + * @addr: address within that address space + * @plen: pointer to length of buffer; updated on return + * @is_write: indicates the transfer direction + */ +void *address_space_map(AddressSpace *as, hwaddr addr, + hwaddr *plen, bool is_write); + +/* address_space_unmap: Unmaps a memory region previously mapped by address_space_map() + * + * Will also mark the memory as dirty if @is_write == %true. @access_len gives + * the amount of memory that was actually read or written by the caller. + * + * @as: #AddressSpace used + * @addr: address within that address space + * @len: buffer length as returned by address_space_map() + * @access_len: amount of data actually transferred + * @is_write: indicates the transfer direction + */ +void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, + int is_write, hwaddr access_len); + + +#endif + +#endif diff --git a/slirp/simh/qemu/exec/poison.h b/slirp/simh/qemu/exec/poison.h new file mode 100644 index 00000000..a4b1eca2 --- /dev/null +++ b/slirp/simh/qemu/exec/poison.h @@ -0,0 +1,62 @@ +/* Poison identifiers that should not be used when building + target independent device code. */ + +#ifndef HW_POISON_H +#define HW_POISON_H +#ifdef __GNUC__ + +#pragma GCC poison TARGET_I386 +#pragma GCC poison TARGET_X86_64 +#pragma GCC poison TARGET_ALPHA +#pragma GCC poison TARGET_ARM +#pragma GCC poison TARGET_CRIS +#pragma GCC poison TARGET_LM32 +#pragma GCC poison TARGET_M68K +#pragma GCC poison TARGET_MIPS +#pragma GCC poison TARGET_MIPS64 +#pragma GCC poison TARGET_OPENRISC +#pragma GCC poison TARGET_PPC +#pragma GCC poison TARGET_PPCEMB +#pragma GCC poison TARGET_PPC64 +#pragma GCC poison TARGET_ABI32 +#pragma GCC poison TARGET_SH4 +#pragma GCC poison TARGET_SPARC +#pragma GCC poison TARGET_SPARC64 + +#pragma GCC poison TARGET_WORDS_BIGENDIAN +#pragma GCC poison BSWAP_NEEDED + +#pragma GCC poison TARGET_LONG_BITS +#pragma GCC poison TARGET_FMT_lx +#pragma GCC poison TARGET_FMT_ld + +#pragma GCC poison TARGET_PAGE_SIZE +#pragma GCC poison TARGET_PAGE_MASK +#pragma GCC poison TARGET_PAGE_BITS +#pragma GCC poison TARGET_PAGE_ALIGN + +#pragma GCC poison CPUArchState + +#pragma GCC poison lduw_phys +#pragma GCC poison ldl_phys +#pragma GCC poison ldq_phys +#pragma GCC poison stl_phys_notdirty +#pragma GCC poison stw_phys +#pragma GCC poison stl_phys +#pragma GCC poison stq_phys + +#pragma GCC poison CPU_INTERRUPT_HARD +#pragma GCC poison CPU_INTERRUPT_EXITTB +#pragma GCC poison CPU_INTERRUPT_HALT +#pragma GCC poison CPU_INTERRUPT_DEBUG +#pragma GCC poison CPU_INTERRUPT_TGT_EXT_0 +#pragma GCC poison CPU_INTERRUPT_TGT_EXT_1 +#pragma GCC poison CPU_INTERRUPT_TGT_EXT_2 +#pragma GCC poison CPU_INTERRUPT_TGT_EXT_3 +#pragma GCC poison CPU_INTERRUPT_TGT_EXT_4 +#pragma GCC poison CPU_INTERRUPT_TGT_INT_0 +#pragma GCC poison CPU_INTERRUPT_TGT_INT_1 +#pragma GCC poison CPU_INTERRUPT_TGT_INT_2 + +#endif +#endif diff --git a/slirp/simh/qemu/fprintf-fn.h b/slirp/simh/qemu/fprintf-fn.h new file mode 100644 index 00000000..9ddc90f1 --- /dev/null +++ b/slirp/simh/qemu/fprintf-fn.h @@ -0,0 +1,17 @@ +/* + * Typedef for fprintf-alike function pointers. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_FPRINTF_FN_H +#define QEMU_FPRINTF_FN_H 1 + +#include "qemu/compiler.h" +#include + +typedef int (*fprintf_function)(FILE *f, const char *fmt, ...) + GCC_FMT_ATTR(2, 3); + +#endif diff --git a/slirp/simh/qemu/fpu/softfloat.h b/slirp/simh/qemu/fpu/softfloat.h new file mode 100644 index 00000000..ded34eb0 --- /dev/null +++ b/slirp/simh/qemu/fpu/softfloat.h @@ -0,0 +1,762 @@ +/* + * QEMU float support + * + * The code in this source file is derived from release 2a of the SoftFloat + * IEC/IEEE Floating-point Arithmetic Package. Those parts of the code (and + * some later contributions) are provided under that license, as detailed below. + * It has subsequently been modified by contributors to the QEMU Project, + * so some portions are provided under: + * the SoftFloat-2a license + * the BSD license + * GPL-v2-or-later + * + * Any future contributions to this file after December 1st 2014 will be + * taken to be licensed under the Softfloat-2a license unless specifically + * indicated otherwise. + */ + +/* +=============================================================================== +This C header file is part of the SoftFloat IEC/IEEE Floating-point +Arithmetic Package, Release 2a. + +Written by John R. Hauser. This work was made possible in part by the +International Computer Science Institute, located at Suite 600, 1947 Center +Street, Berkeley, California 94704. Funding was partially provided by the +National Science Foundation under grant MIP-9311980. The original version +of this code was written as part of a project to build a fixed-point vector +processor in collaboration with the University of California at Berkeley, +overseen by Profs. Nelson Morgan and John Wawrzynek. More information +is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ +arithmetic/SoftFloat.html'. + +THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort +has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT +TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO +PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY +AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. + +Derivative works are acceptable, even for commercial purposes, so long as +(1) they include prominent notice that the work is derivative, and (2) they +include prominent notice akin to these four paragraphs for those parts of +this code that are retained. + +=============================================================================== +*/ + +/* BSD licensing: + * Copyright (c) 2006, Fabrice Bellard + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its contributors + * may be used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* Portions of this work are licensed under the terms of the GNU GPL, + * version 2 or later. See the COPYING file in the top-level directory. + */ + +#ifndef SOFTFLOAT_H +#define SOFTFLOAT_H + +#if defined(CONFIG_SOLARIS) && defined(CONFIG_NEEDS_LIBSUNMATH) +#include +#endif + +#include +#include "config-host.h" +#include "qemu/osdep.h" + +/*---------------------------------------------------------------------------- +| Each of the following `typedef's defines the most convenient type that holds +| integers of at least as many bits as specified. For example, `uint8' should +| be the most convenient type that can hold unsigned integers of as many as +| 8 bits. The `flag' type must be able to hold either a 0 or 1. For most +| implementations of C, `flag', `uint8', and `int8' should all be `typedef'ed +| to the same as `int'. +*----------------------------------------------------------------------------*/ +typedef uint8_t flag; +typedef uint8_t uint8; +typedef int8_t int8; +typedef unsigned int uint32; +typedef signed int int32; +typedef uint64_t uint64; +typedef int64_t int64; + +#define LIT64( a ) a##LL + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE floating-point ordering relations +*----------------------------------------------------------------------------*/ +enum { + float_relation_less = -1, + float_relation_equal = 0, + float_relation_greater = 1, + float_relation_unordered = 2 +}; + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE floating-point types. +*----------------------------------------------------------------------------*/ +/* Use structures for soft-float types. This prevents accidentally mixing + them with native int/float types. A sufficiently clever compiler and + sane ABI should be able to see though these structs. However + x86/gcc 3.x seems to struggle a bit, so leave them disabled by default. */ +//#define USE_SOFTFLOAT_STRUCT_TYPES +#ifdef USE_SOFTFLOAT_STRUCT_TYPES +typedef struct { + uint16_t v; +} float16; +#define float16_val(x) (((float16)(x)).v) +#define make_float16(x) __extension__ ({ float16 f16_val = {x}; f16_val; }) +#define const_float16(x) { x } +typedef struct { + uint32_t v; +} float32; +/* The cast ensures an error if the wrong type is passed. */ +#define float32_val(x) (((float32)(x)).v) +#define make_float32(x) __extension__ ({ float32 f32_val = {x}; f32_val; }) +#define const_float32(x) { x } +typedef struct { + uint64_t v; +} float64; +#define float64_val(x) (((float64)(x)).v) +#define make_float64(x) __extension__ ({ float64 f64_val = {x}; f64_val; }) +#define const_float64(x) { x } +#else +typedef uint16_t float16; +typedef uint32_t float32; +typedef uint64_t float64; +#define float16_val(x) (x) +#define float32_val(x) (x) +#define float64_val(x) (x) +#define make_float16(x) (x) +#define make_float32(x) (x) +#define make_float64(x) (x) +#define const_float16(x) (x) +#define const_float32(x) (x) +#define const_float64(x) (x) +#endif +typedef struct { + uint64_t low; + uint16_t high; +} floatx80; +#define make_floatx80(exp, mant) ((floatx80) { mant, exp }) +#define make_floatx80_init(exp, mant) { .low = mant, .high = exp } +typedef struct { +#ifdef HOST_WORDS_BIGENDIAN + uint64_t high, low; +#else + uint64_t low, high; +#endif +} float128; +#define make_float128(high_, low_) ((float128) { .high = high_, .low = low_ }) +#define make_float128_init(high_, low_) { .high = high_, .low = low_ } + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE floating-point underflow tininess-detection mode. +*----------------------------------------------------------------------------*/ +enum { + float_tininess_after_rounding = 0, + float_tininess_before_rounding = 1 +}; + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE floating-point rounding mode. +*----------------------------------------------------------------------------*/ +enum { + float_round_nearest_even = 0, + float_round_down = 1, + float_round_up = 2, + float_round_to_zero = 3, + float_round_ties_away = 4, +}; + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE floating-point exception flags. +*----------------------------------------------------------------------------*/ +enum { + float_flag_invalid = 1, + float_flag_divbyzero = 4, + float_flag_overflow = 8, + float_flag_underflow = 16, + float_flag_inexact = 32, + float_flag_input_denormal = 64, + float_flag_output_denormal = 128 +}; + +typedef struct float_status { + signed char float_detect_tininess; + signed char float_rounding_mode; + signed char float_exception_flags; + signed char floatx80_rounding_precision; + /* should denormalised results go to zero and set the inexact flag? */ + flag flush_to_zero; + /* should denormalised inputs go to zero and set the input_denormal flag? */ + flag flush_inputs_to_zero; + flag default_nan_mode; +} float_status; + +static inline void set_float_detect_tininess(int val, float_status *status) +{ + status->float_detect_tininess = val; +} +static inline void set_float_rounding_mode(int val, float_status *status) +{ + status->float_rounding_mode = val; +} +static inline void set_float_exception_flags(int val, float_status *status) +{ + status->float_exception_flags = val; +} +static inline void set_floatx80_rounding_precision(int val, + float_status *status) +{ + status->floatx80_rounding_precision = val; +} +static inline void set_flush_to_zero(flag val, float_status *status) +{ + status->flush_to_zero = val; +} +static inline void set_flush_inputs_to_zero(flag val, float_status *status) +{ + status->flush_inputs_to_zero = val; +} +static inline void set_default_nan_mode(flag val, float_status *status) +{ + status->default_nan_mode = val; +} +static inline int get_float_detect_tininess(float_status *status) +{ + return status->float_detect_tininess; +} +static inline int get_float_rounding_mode(float_status *status) +{ + return status->float_rounding_mode; +} +static inline int get_float_exception_flags(float_status *status) +{ + return status->float_exception_flags; +} +static inline int get_floatx80_rounding_precision(float_status *status) +{ + return status->floatx80_rounding_precision; +} +static inline flag get_flush_to_zero(float_status *status) +{ + return status->flush_to_zero; +} +static inline flag get_flush_inputs_to_zero(float_status *status) +{ + return status->flush_inputs_to_zero; +} +static inline flag get_default_nan_mode(float_status *status) +{ + return status->default_nan_mode; +} + +/*---------------------------------------------------------------------------- +| Routine to raise any or all of the software IEC/IEEE floating-point +| exception flags. +*----------------------------------------------------------------------------*/ +void float_raise(int8 flags, float_status *status); + +/*---------------------------------------------------------------------------- +| If `a' is denormal and we are in flush-to-zero mode then set the +| input-denormal exception and return zero. Otherwise just return the value. +*----------------------------------------------------------------------------*/ +float32 float32_squash_input_denormal(float32 a, float_status *status); +float64 float64_squash_input_denormal(float64 a, float_status *status); + +/*---------------------------------------------------------------------------- +| Options to indicate which negations to perform in float*_muladd() +| Using these differs from negating an input or output before calling +| the muladd function in that this means that a NaN doesn't have its +| sign bit inverted before it is propagated. +| We also support halving the result before rounding, as a special +| case to support the ARM fused-sqrt-step instruction FRSQRTS. +*----------------------------------------------------------------------------*/ +enum { + float_muladd_negate_c = 1, + float_muladd_negate_product = 2, + float_muladd_negate_result = 4, + float_muladd_halve_result = 8, +}; + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE integer-to-floating-point conversion routines. +*----------------------------------------------------------------------------*/ +float32 int32_to_float32(int32_t, float_status *status); +float64 int32_to_float64(int32_t, float_status *status); +float32 uint32_to_float32(uint32_t, float_status *status); +float64 uint32_to_float64(uint32_t, float_status *status); +floatx80 int32_to_floatx80(int32_t, float_status *status); +float128 int32_to_float128(int32_t, float_status *status); +float32 int64_to_float32(int64_t, float_status *status); +float64 int64_to_float64(int64_t, float_status *status); +floatx80 int64_to_floatx80(int64_t, float_status *status); +float128 int64_to_float128(int64_t, float_status *status); +float32 uint64_to_float32(uint64_t, float_status *status); +float64 uint64_to_float64(uint64_t, float_status *status); +float128 uint64_to_float128(uint64_t, float_status *status); + +/* We provide the int16 versions for symmetry of API with float-to-int */ +static inline float32 int16_to_float32(int16_t v, float_status *status) +{ + return int32_to_float32(v, status); +} + +static inline float32 uint16_to_float32(uint16_t v, float_status *status) +{ + return uint32_to_float32(v, status); +} + +static inline float64 int16_to_float64(int16_t v, float_status *status) +{ + return int32_to_float64(v, status); +} + +static inline float64 uint16_to_float64(uint16_t v, float_status *status) +{ + return uint32_to_float64(v, status); +} + +/*---------------------------------------------------------------------------- +| Software half-precision conversion routines. +*----------------------------------------------------------------------------*/ +float16 float32_to_float16(float32, flag, float_status *status); +float32 float16_to_float32(float16, flag, float_status *status); +float16 float64_to_float16(float64 a, flag ieee, float_status *status); +float64 float16_to_float64(float16 a, flag ieee, float_status *status); + +/*---------------------------------------------------------------------------- +| Software half-precision operations. +*----------------------------------------------------------------------------*/ +int float16_is_quiet_nan( float16 ); +int float16_is_signaling_nan( float16 ); +float16 float16_maybe_silence_nan( float16 ); + +static inline int float16_is_any_nan(float16 a) +{ + return ((float16_val(a) & ~0x8000) > 0x7c00); +} + +/*---------------------------------------------------------------------------- +| The pattern for a default generated half-precision NaN. +*----------------------------------------------------------------------------*/ +extern const float16 float16_default_nan; + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE single-precision conversion routines. +*----------------------------------------------------------------------------*/ +int_fast16_t float32_to_int16(float32, float_status *status); +uint_fast16_t float32_to_uint16(float32, float_status *status); +int_fast16_t float32_to_int16_round_to_zero(float32, float_status *status); +uint_fast16_t float32_to_uint16_round_to_zero(float32, float_status *status); +int32 float32_to_int32(float32, float_status *status); +int32 float32_to_int32_round_to_zero(float32, float_status *status); +uint32 float32_to_uint32(float32, float_status *status); +uint32 float32_to_uint32_round_to_zero(float32, float_status *status); +int64 float32_to_int64(float32, float_status *status); +uint64 float32_to_uint64(float32, float_status *status); +uint64 float32_to_uint64_round_to_zero(float32, float_status *status); +int64 float32_to_int64_round_to_zero(float32, float_status *status); +float64 float32_to_float64(float32, float_status *status); +floatx80 float32_to_floatx80(float32, float_status *status); +float128 float32_to_float128(float32, float_status *status); + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE single-precision operations. +*----------------------------------------------------------------------------*/ +float32 float32_round_to_int(float32, float_status *status); +float32 float32_add(float32, float32, float_status *status); +float32 float32_sub(float32, float32, float_status *status); +float32 float32_mul(float32, float32, float_status *status); +float32 float32_div(float32, float32, float_status *status); +float32 float32_rem(float32, float32, float_status *status); +float32 float32_muladd(float32, float32, float32, int, float_status *status); +float32 float32_sqrt(float32, float_status *status); +float32 float32_exp2(float32, float_status *status); +float32 float32_log2(float32, float_status *status); +int float32_eq(float32, float32, float_status *status); +int float32_le(float32, float32, float_status *status); +int float32_lt(float32, float32, float_status *status); +int float32_unordered(float32, float32, float_status *status); +int float32_eq_quiet(float32, float32, float_status *status); +int float32_le_quiet(float32, float32, float_status *status); +int float32_lt_quiet(float32, float32, float_status *status); +int float32_unordered_quiet(float32, float32, float_status *status); +int float32_compare(float32, float32, float_status *status); +int float32_compare_quiet(float32, float32, float_status *status); +float32 float32_min(float32, float32, float_status *status); +float32 float32_max(float32, float32, float_status *status); +float32 float32_minnum(float32, float32, float_status *status); +float32 float32_maxnum(float32, float32, float_status *status); +float32 float32_minnummag(float32, float32, float_status *status); +float32 float32_maxnummag(float32, float32, float_status *status); +int float32_is_quiet_nan( float32 ); +int float32_is_signaling_nan( float32 ); +float32 float32_maybe_silence_nan( float32 ); +float32 float32_scalbn(float32, int, float_status *status); + +static inline float32 float32_abs(float32 a) +{ + /* Note that abs does *not* handle NaN specially, nor does + * it flush denormal inputs to zero. + */ + return make_float32(float32_val(a) & 0x7fffffff); +} + +static inline float32 float32_chs(float32 a) +{ + /* Note that chs does *not* handle NaN specially, nor does + * it flush denormal inputs to zero. + */ + return make_float32(float32_val(a) ^ 0x80000000); +} + +static inline int float32_is_infinity(float32 a) +{ + return (float32_val(a) & 0x7fffffff) == 0x7f800000; +} + +static inline int float32_is_neg(float32 a) +{ + return float32_val(a) >> 31; +} + +static inline int float32_is_zero(float32 a) +{ + return (float32_val(a) & 0x7fffffff) == 0; +} + +static inline int float32_is_any_nan(float32 a) +{ + return ((float32_val(a) & ~(1 << 31)) > 0x7f800000UL); +} + +static inline int float32_is_zero_or_denormal(float32 a) +{ + return (float32_val(a) & 0x7f800000) == 0; +} + +static inline float32 float32_set_sign(float32 a, int sign) +{ + return make_float32((float32_val(a) & 0x7fffffff) | (sign << 31)); +} + +#define float32_zero make_float32(0) +#define float32_one make_float32(0x3f800000) +#define float32_ln2 make_float32(0x3f317218) +#define float32_pi make_float32(0x40490fdb) +#define float32_half make_float32(0x3f000000) +#define float32_infinity make_float32(0x7f800000) + + +/*---------------------------------------------------------------------------- +| The pattern for a default generated single-precision NaN. +*----------------------------------------------------------------------------*/ +extern const float32 float32_default_nan; + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE double-precision conversion routines. +*----------------------------------------------------------------------------*/ +int_fast16_t float64_to_int16(float64, float_status *status); +uint_fast16_t float64_to_uint16(float64, float_status *status); +int_fast16_t float64_to_int16_round_to_zero(float64, float_status *status); +uint_fast16_t float64_to_uint16_round_to_zero(float64, float_status *status); +int32 float64_to_int32(float64, float_status *status); +int32 float64_to_int32_round_to_zero(float64, float_status *status); +uint32 float64_to_uint32(float64, float_status *status); +uint32 float64_to_uint32_round_to_zero(float64, float_status *status); +int64 float64_to_int64(float64, float_status *status); +int64 float64_to_int64_round_to_zero(float64, float_status *status); +uint64 float64_to_uint64(float64 a, float_status *status); +uint64 float64_to_uint64_round_to_zero(float64 a, float_status *status); +float32 float64_to_float32(float64, float_status *status); +floatx80 float64_to_floatx80(float64, float_status *status); +float128 float64_to_float128(float64, float_status *status); + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE double-precision operations. +*----------------------------------------------------------------------------*/ +float64 float64_round_to_int(float64, float_status *status); +float64 float64_trunc_to_int(float64, float_status *status); +float64 float64_add(float64, float64, float_status *status); +float64 float64_sub(float64, float64, float_status *status); +float64 float64_mul(float64, float64, float_status *status); +float64 float64_div(float64, float64, float_status *status); +float64 float64_rem(float64, float64, float_status *status); +float64 float64_muladd(float64, float64, float64, int, float_status *status); +float64 float64_sqrt(float64, float_status *status); +float64 float64_log2(float64, float_status *status); +int float64_eq(float64, float64, float_status *status); +int float64_le(float64, float64, float_status *status); +int float64_lt(float64, float64, float_status *status); +int float64_unordered(float64, float64, float_status *status); +int float64_eq_quiet(float64, float64, float_status *status); +int float64_le_quiet(float64, float64, float_status *status); +int float64_lt_quiet(float64, float64, float_status *status); +int float64_unordered_quiet(float64, float64, float_status *status); +int float64_compare(float64, float64, float_status *status); +int float64_compare_quiet(float64, float64, float_status *status); +float64 float64_min(float64, float64, float_status *status); +float64 float64_max(float64, float64, float_status *status); +float64 float64_minnum(float64, float64, float_status *status); +float64 float64_maxnum(float64, float64, float_status *status); +float64 float64_minnummag(float64, float64, float_status *status); +float64 float64_maxnummag(float64, float64, float_status *status); +int float64_is_quiet_nan( float64 a ); +int float64_is_signaling_nan( float64 ); +float64 float64_maybe_silence_nan( float64 ); +float64 float64_scalbn(float64, int, float_status *status); + +static inline float64 float64_abs(float64 a) +{ + /* Note that abs does *not* handle NaN specially, nor does + * it flush denormal inputs to zero. + */ + return make_float64(float64_val(a) & 0x7fffffffffffffffLL); +} + +static inline float64 float64_chs(float64 a) +{ + /* Note that chs does *not* handle NaN specially, nor does + * it flush denormal inputs to zero. + */ + return make_float64(float64_val(a) ^ 0x8000000000000000LL); +} + +static inline int float64_is_infinity(float64 a) +{ + return (float64_val(a) & 0x7fffffffffffffffLL ) == 0x7ff0000000000000LL; +} + +static inline int float64_is_neg(float64 a) +{ + return float64_val(a) >> 63; +} + +static inline int float64_is_zero(float64 a) +{ + return (float64_val(a) & 0x7fffffffffffffffLL) == 0; +} + +static inline int float64_is_any_nan(float64 a) +{ + return ((float64_val(a) & ~(1ULL << 63)) > 0x7ff0000000000000ULL); +} + +static inline int float64_is_zero_or_denormal(float64 a) +{ + return (float64_val(a) & 0x7ff0000000000000LL) == 0; +} + +static inline float64 float64_set_sign(float64 a, int sign) +{ + return make_float64((float64_val(a) & 0x7fffffffffffffffULL) + | ((int64_t)sign << 63)); +} + +#define float64_zero make_float64(0) +#define float64_one make_float64(0x3ff0000000000000LL) +#define float64_ln2 make_float64(0x3fe62e42fefa39efLL) +#define float64_pi make_float64(0x400921fb54442d18LL) +#define float64_half make_float64(0x3fe0000000000000LL) +#define float64_infinity make_float64(0x7ff0000000000000LL) + +/*---------------------------------------------------------------------------- +| The pattern for a default generated double-precision NaN. +*----------------------------------------------------------------------------*/ +extern const float64 float64_default_nan; + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE extended double-precision conversion routines. +*----------------------------------------------------------------------------*/ +int32 floatx80_to_int32(floatx80, float_status *status); +int32 floatx80_to_int32_round_to_zero(floatx80, float_status *status); +int64 floatx80_to_int64(floatx80, float_status *status); +int64 floatx80_to_int64_round_to_zero(floatx80, float_status *status); +float32 floatx80_to_float32(floatx80, float_status *status); +float64 floatx80_to_float64(floatx80, float_status *status); +float128 floatx80_to_float128(floatx80, float_status *status); + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE extended double-precision operations. +*----------------------------------------------------------------------------*/ +floatx80 floatx80_round_to_int(floatx80, float_status *status); +floatx80 floatx80_add(floatx80, floatx80, float_status *status); +floatx80 floatx80_sub(floatx80, floatx80, float_status *status); +floatx80 floatx80_mul(floatx80, floatx80, float_status *status); +floatx80 floatx80_div(floatx80, floatx80, float_status *status); +floatx80 floatx80_rem(floatx80, floatx80, float_status *status); +floatx80 floatx80_sqrt(floatx80, float_status *status); +int floatx80_eq(floatx80, floatx80, float_status *status); +int floatx80_le(floatx80, floatx80, float_status *status); +int floatx80_lt(floatx80, floatx80, float_status *status); +int floatx80_unordered(floatx80, floatx80, float_status *status); +int floatx80_eq_quiet(floatx80, floatx80, float_status *status); +int floatx80_le_quiet(floatx80, floatx80, float_status *status); +int floatx80_lt_quiet(floatx80, floatx80, float_status *status); +int floatx80_unordered_quiet(floatx80, floatx80, float_status *status); +int floatx80_compare(floatx80, floatx80, float_status *status); +int floatx80_compare_quiet(floatx80, floatx80, float_status *status); +int floatx80_is_quiet_nan( floatx80 ); +int floatx80_is_signaling_nan( floatx80 ); +floatx80 floatx80_maybe_silence_nan( floatx80 ); +floatx80 floatx80_scalbn(floatx80, int, float_status *status); + +static inline floatx80 floatx80_abs(floatx80 a) +{ + a.high &= 0x7fff; + return a; +} + +static inline floatx80 floatx80_chs(floatx80 a) +{ + a.high ^= 0x8000; + return a; +} + +static inline int floatx80_is_infinity(floatx80 a) +{ + return (a.high & 0x7fff) == 0x7fff && a.low == 0x8000000000000000LL; +} + +static inline int floatx80_is_neg(floatx80 a) +{ + return a.high >> 15; +} + +static inline int floatx80_is_zero(floatx80 a) +{ + return (a.high & 0x7fff) == 0 && a.low == 0; +} + +static inline int floatx80_is_zero_or_denormal(floatx80 a) +{ + return (a.high & 0x7fff) == 0; +} + +static inline int floatx80_is_any_nan(floatx80 a) +{ + return ((a.high & 0x7fff) == 0x7fff) && (a.low<<1); +} + +#define floatx80_zero make_floatx80(0x0000, 0x0000000000000000LL) +#define floatx80_one make_floatx80(0x3fff, 0x8000000000000000LL) +#define floatx80_ln2 make_floatx80(0x3ffe, 0xb17217f7d1cf79acLL) +#define floatx80_pi make_floatx80(0x4000, 0xc90fdaa22168c235LL) +#define floatx80_half make_floatx80(0x3ffe, 0x8000000000000000LL) +#define floatx80_infinity make_floatx80(0x7fff, 0x8000000000000000LL) + +/*---------------------------------------------------------------------------- +| The pattern for a default generated extended double-precision NaN. +*----------------------------------------------------------------------------*/ +extern const floatx80 floatx80_default_nan; + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE quadruple-precision conversion routines. +*----------------------------------------------------------------------------*/ +int32 float128_to_int32(float128, float_status *status); +int32 float128_to_int32_round_to_zero(float128, float_status *status); +int64 float128_to_int64(float128, float_status *status); +int64 float128_to_int64_round_to_zero(float128, float_status *status); +float32 float128_to_float32(float128, float_status *status); +float64 float128_to_float64(float128, float_status *status); +floatx80 float128_to_floatx80(float128, float_status *status); + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE quadruple-precision operations. +*----------------------------------------------------------------------------*/ +float128 float128_round_to_int(float128, float_status *status); +float128 float128_add(float128, float128, float_status *status); +float128 float128_sub(float128, float128, float_status *status); +float128 float128_mul(float128, float128, float_status *status); +float128 float128_div(float128, float128, float_status *status); +float128 float128_rem(float128, float128, float_status *status); +float128 float128_sqrt(float128, float_status *status); +int float128_eq(float128, float128, float_status *status); +int float128_le(float128, float128, float_status *status); +int float128_lt(float128, float128, float_status *status); +int float128_unordered(float128, float128, float_status *status); +int float128_eq_quiet(float128, float128, float_status *status); +int float128_le_quiet(float128, float128, float_status *status); +int float128_lt_quiet(float128, float128, float_status *status); +int float128_unordered_quiet(float128, float128, float_status *status); +int float128_compare(float128, float128, float_status *status); +int float128_compare_quiet(float128, float128, float_status *status); +int float128_is_quiet_nan( float128 ); +int float128_is_signaling_nan( float128 ); +float128 float128_maybe_silence_nan( float128 ); +float128 float128_scalbn(float128, int, float_status *status); + +static inline float128 float128_abs(float128 a) +{ + a.high &= 0x7fffffffffffffffLL; + return a; +} + +static inline float128 float128_chs(float128 a) +{ + a.high ^= 0x8000000000000000LL; + return a; +} + +static inline int float128_is_infinity(float128 a) +{ + return (a.high & 0x7fffffffffffffffLL) == 0x7fff000000000000LL && a.low == 0; +} + +static inline int float128_is_neg(float128 a) +{ + return a.high >> 63; +} + +static inline int float128_is_zero(float128 a) +{ + return (a.high & 0x7fffffffffffffffLL) == 0 && a.low == 0; +} + +static inline int float128_is_zero_or_denormal(float128 a) +{ + return (a.high & 0x7fff000000000000LL) == 0; +} + +static inline int float128_is_any_nan(float128 a) +{ + return ((a.high >> 48) & 0x7fff) == 0x7fff && + ((a.low != 0) || ((a.high & 0xffffffffffffLL) != 0)); +} + +#define float128_zero make_float128(0, 0) + +/*---------------------------------------------------------------------------- +| The pattern for a default generated quadruple-precision NaN. +*----------------------------------------------------------------------------*/ +extern const float128 float128_default_nan; + +#endif /* !SOFTFLOAT_H */ diff --git a/slirp/simh/qemu/glib-compat.h b/slirp/simh/qemu/glib-compat.h new file mode 100644 index 00000000..318e0003 --- /dev/null +++ b/slirp/simh/qemu/glib-compat.h @@ -0,0 +1,168 @@ +/* + * GLIB Compatibility Functions + * + * Copyright IBM, Corp. 2013 + * + * Authors: + * Anthony Liguori + * Michael Tokarev + * Paolo Bonzini + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_GLIB_COMPAT_H +#define QEMU_GLIB_COMPAT_H + +#include + +/* GLIB version compatibility flags */ +#if !GLIB_CHECK_VERSION(2, 26, 0) +#define G_TIME_SPAN_SECOND (G_GINT64_CONSTANT(1000000)) +#endif + +#if !GLIB_CHECK_VERSION(2, 28, 0) +static inline gint64 qemu_g_get_monotonic_time(void) +{ + /* g_get_monotonic_time() is best-effort so we can use the wall clock as a + * fallback. + */ + + GTimeVal time; + g_get_current_time(&time); + + return time.tv_sec * G_TIME_SPAN_SECOND + time.tv_usec; +} +/* work around distro backports of this interface */ +#define g_get_monotonic_time() qemu_g_get_monotonic_time() +#endif + +#ifdef _WIN32 +/* + * g_poll has a problem on Windows when using + * timeouts < 10ms, so use wrapper. + */ +#define g_poll(fds, nfds, timeout) g_poll_fixed(fds, nfds, timeout) +gint g_poll_fixed(GPollFD *fds, guint nfds, gint timeout); +#endif + +#if !GLIB_CHECK_VERSION(2, 31, 0) +/* before glib-2.31, GMutex and GCond was dynamic-only (there was a separate + * GStaticMutex, but it didn't work with condition variables). + * + * Our implementation uses GOnce to fake a static implementation that does + * not require separate initialization. + * We need to rename the types to avoid passing our CompatGMutex/CompatGCond + * by mistake to a function that expects GMutex/GCond. However, for ease + * of use we keep the GLib function names. GLib uses macros for the + * implementation, we use inline functions instead and undefine the macros. + */ + +typedef struct CompatGMutex { + GOnce once; +} CompatGMutex; + +typedef struct CompatGCond { + GOnce once; +} CompatGCond; + +static inline gpointer do_g_mutex_new(gpointer unused) +{ + return (gpointer) g_mutex_new(); +} + +static inline void g_mutex_init(CompatGMutex *mutex) +{ + mutex->once = (GOnce) G_ONCE_INIT; +} + +static inline void g_mutex_clear(CompatGMutex *mutex) +{ + g_assert(mutex->once.status != G_ONCE_STATUS_PROGRESS); + if (mutex->once.retval) { + g_mutex_free((GMutex *) mutex->once.retval); + } + mutex->once = (GOnce) G_ONCE_INIT; +} + +static inline void (g_mutex_lock)(CompatGMutex *mutex) +{ + g_once(&mutex->once, do_g_mutex_new, NULL); + g_mutex_lock((GMutex *) mutex->once.retval); +} +#undef g_mutex_lock + +static inline gboolean (g_mutex_trylock)(CompatGMutex *mutex) +{ + g_once(&mutex->once, do_g_mutex_new, NULL); + return g_mutex_trylock((GMutex *) mutex->once.retval); +} +#undef g_mutex_trylock + + +static inline void (g_mutex_unlock)(CompatGMutex *mutex) +{ + g_mutex_unlock((GMutex *) mutex->once.retval); +} +#undef g_mutex_unlock + +static inline gpointer do_g_cond_new(gpointer unused) +{ + return (gpointer) g_cond_new(); +} + +static inline void g_cond_init(CompatGCond *cond) +{ + cond->once = (GOnce) G_ONCE_INIT; +} + +static inline void g_cond_clear(CompatGCond *cond) +{ + g_assert(cond->once.status != G_ONCE_STATUS_PROGRESS); + if (cond->once.retval) { + g_cond_free((GCond *) cond->once.retval); + } + cond->once = (GOnce) G_ONCE_INIT; +} + +static inline void (g_cond_wait)(CompatGCond *cond, CompatGMutex *mutex) +{ + g_assert(mutex->once.status != G_ONCE_STATUS_PROGRESS); + g_once(&cond->once, do_g_cond_new, NULL); + g_cond_wait((GCond *) cond->once.retval, (GMutex *) mutex->once.retval); +} +#undef g_cond_wait + +static inline void (g_cond_broadcast)(CompatGCond *cond) +{ + g_once(&cond->once, do_g_cond_new, NULL); + g_cond_broadcast((GCond *) cond->once.retval); +} +#undef g_cond_broadcast + +static inline void (g_cond_signal)(CompatGCond *cond) +{ + g_once(&cond->once, do_g_cond_new, NULL); + g_cond_signal((GCond *) cond->once.retval); +} +#undef g_cond_signal + + +/* before 2.31 there was no g_thread_new() */ +static inline GThread *g_thread_new(const char *name, + GThreadFunc func, gpointer data) +{ + GThread *thread = g_thread_create(func, data, TRUE, NULL); + if (!thread) { + g_error("creating thread"); + } + return thread; +} +#else +#define CompatGMutex GMutex +#define CompatGCond GCond +#endif /* glib 2.31 */ + +#endif diff --git a/slirp/simh/qemu/host-utils.h b/slirp/simh/qemu/host-utils.h new file mode 100644 index 00000000..e69de29b diff --git a/slirp/simh/qemu/hw/hw.h b/slirp/simh/qemu/hw/hw.h new file mode 100644 index 00000000..0b5a73b7 --- /dev/null +++ b/slirp/simh/qemu/hw/hw.h @@ -0,0 +1,71 @@ +/* Declarations for use by hardware emulation. */ +#if 0 +#ifndef QEMU_HW_H +#define QEMU_HW_H + +#include "qemu-common.h" + +#if !defined(CONFIG_USER_ONLY) && !defined(NEED_CPU_H) +#include "exec/cpu-common.h" +#endif + +#include "exec/ioport.h" +#include "hw/irq.h" +#include "block/aio.h" +#include "migration/vmstate.h" +#include "qemu/log.h" + +#ifdef NEED_CPU_H +#if TARGET_LONG_BITS == 64 +#define qemu_put_betl qemu_put_be64 +#define qemu_get_betl qemu_get_be64 +#define qemu_put_betls qemu_put_be64s +#define qemu_get_betls qemu_get_be64s +#define qemu_put_sbetl qemu_put_sbe64 +#define qemu_get_sbetl qemu_get_sbe64 +#define qemu_put_sbetls qemu_put_sbe64s +#define qemu_get_sbetls qemu_get_sbe64s +#else +#define qemu_put_betl qemu_put_be32 +#define qemu_get_betl qemu_get_be32 +#define qemu_put_betls qemu_put_be32s +#define qemu_get_betls qemu_get_be32s +#define qemu_put_sbetl qemu_put_sbe32 +#define qemu_get_sbetl qemu_get_sbe32 +#define qemu_put_sbetls qemu_put_sbe32s +#define qemu_get_sbetls qemu_get_sbe32s +#endif +#endif + +typedef void QEMUResetHandler(void *opaque); + +void qemu_register_reset(QEMUResetHandler *func, void *opaque); +void qemu_unregister_reset(QEMUResetHandler *func, void *opaque); + +#ifdef NEED_CPU_H +#if TARGET_LONG_BITS == 64 +#define VMSTATE_UINTTL_V(_f, _s, _v) \ + VMSTATE_UINT64_V(_f, _s, _v) +#define VMSTATE_UINTTL_EQUAL_V(_f, _s, _v) \ + VMSTATE_UINT64_EQUAL_V(_f, _s, _v) +#define VMSTATE_UINTTL_ARRAY_V(_f, _s, _n, _v) \ + VMSTATE_UINT64_ARRAY_V(_f, _s, _n, _v) +#else +#define VMSTATE_UINTTL_V(_f, _s, _v) \ + VMSTATE_UINT32_V(_f, _s, _v) +#define VMSTATE_UINTTL_EQUAL_V(_f, _s, _v) \ + VMSTATE_UINT32_EQUAL_V(_f, _s, _v) +#define VMSTATE_UINTTL_ARRAY_V(_f, _s, _n, _v) \ + VMSTATE_UINT32_ARRAY_V(_f, _s, _n, _v) +#endif +#define VMSTATE_UINTTL(_f, _s) \ + VMSTATE_UINTTL_V(_f, _s, 0) +#define VMSTATE_UINTTL_EQUAL(_f, _s) \ + VMSTATE_UINTTL_EQUAL_V(_f, _s, 0) +#define VMSTATE_UINTTL_ARRAY(_f, _s, _n) \ + VMSTATE_UINTTL_ARRAY_V(_f, _s, _n, 0) + +#endif + +#endif +#endif \ No newline at end of file diff --git a/slirp/simh/qemu/int128.h b/slirp/simh/qemu/int128.h new file mode 100644 index 00000000..fb782aad --- /dev/null +++ b/slirp/simh/qemu/int128.h @@ -0,0 +1,149 @@ +#ifndef INT128_H +#define INT128_H + +#include +#include +#include + +typedef struct Int128 Int128; + +struct Int128 { + uint64_t lo; + int64_t hi; +}; + +static inline Int128 int128_make64(uint64_t a) +{ + return (Int128) { a, 0 }; +} + +static inline uint64_t int128_get64(Int128 a) +{ + assert(!a.hi); + return a.lo; +} + +static inline Int128 int128_zero(void) +{ + return int128_make64(0); +} + +static inline Int128 int128_one(void) +{ + return int128_make64(1); +} + +static inline Int128 int128_2_64(void) +{ + return (Int128) { 0, 1 }; +} + +static inline Int128 int128_exts64(int64_t a) +{ + return (Int128) { .lo = a, .hi = (a < 0) ? -1 : 0 }; +} + +static inline Int128 int128_and(Int128 a, Int128 b) +{ + return (Int128) { a.lo & b.lo, a.hi & b.hi }; +} + +static inline Int128 int128_rshift(Int128 a, int n) +{ + int64_t h; + if (!n) { + return a; + } + h = a.hi >> (n & 63); + if (n >= 64) { + return (Int128) { h, h >> 63 }; + } else { + return (Int128) { (a.lo >> n) | ((uint64_t)a.hi << (64 - n)), h }; + } +} + +static inline Int128 int128_add(Int128 a, Int128 b) +{ + uint64_t lo = a.lo + b.lo; + + /* a.lo <= a.lo + b.lo < a.lo + k (k is the base, 2^64). Hence, + * a.lo + b.lo >= k implies 0 <= lo = a.lo + b.lo - k < a.lo. + * Similarly, a.lo + b.lo < k implies a.lo <= lo = a.lo + b.lo < k. + * + * So the carry is lo < a.lo. + */ + return (Int128) { lo, (uint64_t)a.hi + b.hi + (lo < a.lo) }; +} + +static inline Int128 int128_neg(Int128 a) +{ + uint64_t lo = -a.lo; + return (Int128) { lo, ~(uint64_t)a.hi + !lo }; +} + +static inline Int128 int128_sub(Int128 a, Int128 b) +{ + return (Int128){ a.lo - b.lo, (uint64_t)a.hi - b.hi - (a.lo < b.lo) }; +} + +static inline bool int128_nonneg(Int128 a) +{ + return a.hi >= 0; +} + +static inline bool int128_eq(Int128 a, Int128 b) +{ + return a.lo == b.lo && a.hi == b.hi; +} + +static inline bool int128_ne(Int128 a, Int128 b) +{ + return !int128_eq(a, b); +} + +static inline bool int128_ge(Int128 a, Int128 b) +{ + return a.hi > b.hi || (a.hi == b.hi && a.lo >= b.lo); +} + +static inline bool int128_lt(Int128 a, Int128 b) +{ + return !int128_ge(a, b); +} + +static inline bool int128_le(Int128 a, Int128 b) +{ + return int128_ge(b, a); +} + +static inline bool int128_gt(Int128 a, Int128 b) +{ + return !int128_le(a, b); +} + +static inline bool int128_nz(Int128 a) +{ + return a.lo || a.hi; +} + +static inline Int128 int128_min(Int128 a, Int128 b) +{ + return int128_le(a, b) ? a : b; +} + +static inline Int128 int128_max(Int128 a, Int128 b) +{ + return int128_ge(a, b) ? a : b; +} + +static inline void int128_addto(Int128 *a, Int128 b) +{ + *a = int128_add(*a, b); +} + +static inline void int128_subfrom(Int128 *a, Int128 b) +{ + *a = int128_sub(*a, b); +} + +#endif diff --git a/slirp/simh/qemu/main-loop.h b/slirp/simh/qemu/main-loop.h new file mode 100644 index 00000000..e5ca20b4 --- /dev/null +++ b/slirp/simh/qemu/main-loop.h @@ -0,0 +1,273 @@ +/* + * QEMU System Emulator + * + * Copyright (c) 2003-2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef QEMU_MAIN_LOOP_H +#define QEMU_MAIN_LOOP_H 1 + +#include "block/aio.h" + +#define SIG_IPI SIGUSR1 + +/** + * qemu_init_main_loop: Set up the process so that it can run the main loop. + * + * This includes setting up signal handlers. It should be called before + * any other threads are created. In addition, threads other than the + * main one should block signals that are trapped by the main loop. + * For simplicity, you can consider these signals to be safe: SIGUSR1, + * SIGUSR2, thread signals (SIGFPE, SIGILL, SIGSEGV, SIGBUS) and real-time + * signals if available. Remember that Windows in practice does not have + * signals, though. + * + * In the case of QEMU tools, this will also start/initialize timers. + */ +int qemu_init_main_loop(Error **errp); + +/** + * main_loop_wait: Run one iteration of the main loop. + * + * If @nonblocking is true, poll for events, otherwise suspend until + * one actually occurs. The main loop usually consists of a loop that + * repeatedly calls main_loop_wait(false). + * + * Main loop services include file descriptor callbacks, bottom halves + * and timers (defined in qemu/timer.h). Bottom halves are similar to timers + * that execute immediately, but have a lower overhead and scheduling them + * is wait-free, thread-safe and signal-safe. + * + * It is sometimes useful to put a whole program in a coroutine. In this + * case, the coroutine actually should be started from within the main loop, + * so that the main loop can run whenever the coroutine yields. To do this, + * you can use a bottom half to enter the coroutine as soon as the main loop + * starts: + * + * void enter_co_bh(void *opaque) { + * QEMUCoroutine *co = opaque; + * qemu_coroutine_enter(co, NULL); + * } + * + * ... + * QEMUCoroutine *co = qemu_coroutine_create(coroutine_entry); + * QEMUBH *start_bh = qemu_bh_new(enter_co_bh, co); + * qemu_bh_schedule(start_bh); + * while (...) { + * main_loop_wait(false); + * } + * + * (In the future we may provide a wrapper for this). + * + * @nonblocking: Whether the caller should block until an event occurs. + */ +int main_loop_wait(int nonblocking); + +/** + * qemu_get_aio_context: Return the main loop's AioContext + */ +AioContext *qemu_get_aio_context(void); + +/** + * qemu_notify_event: Force processing of pending events. + * + * Similar to signaling a condition variable, qemu_notify_event forces + * main_loop_wait to look at pending events and exit. The caller of + * main_loop_wait will usually call it again very soon, so qemu_notify_event + * also has the side effect of recalculating the sets of file descriptors + * that the main loop waits for. + * + * Calling qemu_notify_event is rarely necessary, because main loop + * services (bottom halves and timers) call it themselves. + */ +void qemu_notify_event(void); + +#ifdef _WIN32 +/* return TRUE if no sleep should be done afterwards */ +typedef int PollingFunc(void *opaque); + +/** + * qemu_add_polling_cb: Register a Windows-specific polling callback + * + * Currently, under Windows some events are polled rather than waited for. + * Polling callbacks do not ensure that @func is called timely, because + * the main loop might wait for an arbitrarily long time. If possible, + * you should instead create a separate thread that does a blocking poll + * and set a Win32 event object. The event can then be passed to + * qemu_add_wait_object. + * + * Polling callbacks really have nothing Windows specific in them, but + * as they are a hack and are currently not necessary under POSIX systems, + * they are only available when QEMU is running under Windows. + * + * @func: The function that does the polling, and returns 1 to force + * immediate completion of main_loop_wait. + * @opaque: A pointer-size value that is passed to @func. + */ +int qemu_add_polling_cb(PollingFunc *func, void *opaque); + +/** + * qemu_del_polling_cb: Unregister a Windows-specific polling callback + * + * This function removes a callback that was registered with + * qemu_add_polling_cb. + * + * @func: The function that was passed to qemu_add_polling_cb. + * @opaque: A pointer-size value that was passed to qemu_add_polling_cb. + */ +void qemu_del_polling_cb(PollingFunc *func, void *opaque); + +/* Wait objects handling */ +typedef void WaitObjectFunc(void *opaque); + +/** + * qemu_add_wait_object: Register a callback for a Windows handle + * + * Under Windows, the iohandler mechanism can only be used with sockets. + * QEMU must use the WaitForMultipleObjects API to wait on other handles. + * This function registers a #HANDLE with QEMU, so that it will be included + * in the main loop's calls to WaitForMultipleObjects. When the handle + * is in a signaled state, QEMU will call @func. + * + * @handle: The Windows handle to be observed. + * @func: A function to be called when @handle is in a signaled state. + * @opaque: A pointer-size value that is passed to @func. + */ +int qemu_add_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque); + +/** + * qemu_del_wait_object: Unregister a callback for a Windows handle + * + * This function removes a callback that was registered with + * qemu_add_wait_object. + * + * @func: The function that was passed to qemu_add_wait_object. + * @opaque: A pointer-size value that was passed to qemu_add_wait_object. + */ +void qemu_del_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque); +#endif + +/* async I/O support */ + +typedef void IOReadHandler(void *opaque, const uint8_t *buf, int size); +typedef int IOCanReadHandler(void *opaque); + +/** + * qemu_set_fd_handler: Register a file descriptor with the main loop + * + * This function tells the main loop to wake up whenever one of the + * following conditions is true: + * + * 1) if @fd_write is not %NULL, when the file descriptor is writable; + * + * 2) if @fd_read is not %NULL, when the file descriptor is readable. + * + * The callbacks that are set up by qemu_set_fd_handler are level-triggered. + * If @fd_read does not read from @fd, or @fd_write does not write to @fd + * until its buffers are full, they will be called again on the next + * iteration. + * + * @fd: The file descriptor to be observed. Under Windows it must be + * a #SOCKET. + * + * @fd_read: A level-triggered callback that is fired if @fd is readable + * at the beginning of a main loop iteration, or if it becomes readable + * during one. + * + * @fd_write: A level-triggered callback that is fired when @fd is writable + * at the beginning of a main loop iteration, or if it becomes writable + * during one. + * + * @opaque: A pointer-sized value that is passed to @fd_read and @fd_write. + */ +void qemu_set_fd_handler(int fd, + IOHandler *fd_read, + IOHandler *fd_write, + void *opaque); + +GSource *iohandler_get_g_source(void); +#ifdef CONFIG_POSIX +/** + * qemu_add_child_watch: Register a child process for reaping. + * + * Under POSIX systems, a parent process must read the exit status of + * its child processes using waitpid, or the operating system will not + * free some of the resources attached to that process. + * + * This function directs the QEMU main loop to observe a child process + * and call waitpid as soon as it exits; the watch is then removed + * automatically. It is useful whenever QEMU forks a child process + * but will find out about its termination by other means such as a + * "broken pipe". + * + * @pid: The pid that QEMU should observe. + */ +int qemu_add_child_watch(pid_t pid); +#endif + +/** + * qemu_mutex_iothread_locked: Return lock status of the main loop mutex. + * + * The main loop mutex is the coarsest lock in QEMU, and as such it + * must always be taken outside other locks. This function helps + * functions take different paths depending on whether the current + * thread is running within the main loop mutex. + */ +bool qemu_mutex_iothread_locked(void); + +/** + * qemu_mutex_lock_iothread: Lock the main loop mutex. + * + * This function locks the main loop mutex. The mutex is taken by + * qemu_init_main_loop and always taken except while waiting on + * external events (such as with select). The mutex should be taken + * by threads other than the main loop thread when calling + * qemu_bh_new(), qemu_set_fd_handler() and basically all other + * functions documented in this file. + * + * NOTE: tools currently are single-threaded and qemu_mutex_lock_iothread + * is a no-op there. + */ +void qemu_mutex_lock_iothread(void); + +/** + * qemu_mutex_unlock_iothread: Unlock the main loop mutex. + * + * This function unlocks the main loop mutex. The mutex is taken by + * qemu_init_main_loop and always taken except while waiting on + * external events (such as with select). The mutex should be unlocked + * as soon as possible by threads other than the main loop thread, + * because it prevents the main loop from processing callbacks, + * including timers and bottom halves. + * + * NOTE: tools currently are single-threaded and qemu_mutex_unlock_iothread + * is a no-op there. + */ +void qemu_mutex_unlock_iothread(void); + +/* internal interfaces */ + +void qemu_fd_register(int fd); + +QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque); +void qemu_bh_schedule_idle(QEMUBH *bh); + +#endif diff --git a/slirp/simh/qemu/module.h b/slirp/simh/qemu/module.h new file mode 100644 index 00000000..72d94984 --- /dev/null +++ b/slirp/simh/qemu/module.h @@ -0,0 +1,61 @@ +/* + * QEMU Module Infrastructure + * + * Copyright IBM, Corp. 2009 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_MODULE_H +#define QEMU_MODULE_H + +#include "qemu/osdep.h" + +#define DSO_STAMP_FUN glue(qemu_stamp, CONFIG_STAMP) +#define DSO_STAMP_FUN_STR stringify(DSO_STAMP_FUN) + +#ifdef BUILD_DSO +void DSO_STAMP_FUN(void); +/* This is a dummy symbol to identify a loaded DSO as a QEMU module, so we can + * distinguish "version mismatch" from "not a QEMU module", when the stamp + * check fails during module loading */ +void qemu_module_dummy(void); + +#define module_init(function, type) \ +static void __attribute__((constructor)) do_qemu_init_ ## function(void) \ +{ \ + register_dso_module_init(function, type); \ +} +#else +/* This should not be used directly. Use block_init etc. instead. */ +#define module_init(function, type) \ +static void __attribute__((constructor)) do_qemu_init_ ## function(void) \ +{ \ + register_module_init(function, type); \ +} +#endif + +typedef enum { + MODULE_INIT_BLOCK, + MODULE_INIT_MACHINE, + MODULE_INIT_QAPI, + MODULE_INIT_QOM, + MODULE_INIT_MAX +} module_init_type; + +#define block_init(function) module_init(function, MODULE_INIT_BLOCK) +#define machine_init(function) module_init(function, MODULE_INIT_MACHINE) +#define qapi_init(function) module_init(function, MODULE_INIT_QAPI) +#define type_init(function) module_init(function, MODULE_INIT_QOM) + +void register_module_init(void (*fn)(void), module_init_type type); +void register_dso_module_init(void (*fn)(void), module_init_type type); + +void module_call_init(module_init_type type); + +#endif diff --git a/slirp/simh/qemu/monitor/monitor.h b/slirp/simh/qemu/monitor/monitor.h new file mode 100644 index 00000000..9ae92b93 --- /dev/null +++ b/slirp/simh/qemu/monitor/monitor.h @@ -0,0 +1,56 @@ +#ifndef MONITOR_H +#define MONITOR_H + +#include "qemu-common.h" +#include "qapi/qmp/qdict.h" +#include "block/block.h" +//#include "qemu/readline.h" + +extern Monitor *cur_mon; + +/* flags for monitor_init */ +#define MONITOR_IS_DEFAULT 0x01 +#define MONITOR_USE_READLINE 0x02 +#define MONITOR_USE_CONTROL 0x04 +#define MONITOR_USE_PRETTY 0x08 + +bool monitor_cur_is_qmp(void); + +void monitor_init(CharDriverState *chr, int flags); + +int monitor_suspend(Monitor *mon); +void monitor_resume(Monitor *mon); + +int monitor_read_bdrv_key_start(Monitor *mon, BlockDriverState *bs, + BlockCompletionFunc *completion_cb, + void *opaque); +int monitor_read_block_device_key(Monitor *mon, const char *device, + BlockCompletionFunc *completion_cb, + void *opaque); + +int monitor_get_fd(Monitor *mon, const char *fdname, Error **errp); +int monitor_fd_param(Monitor *mon, const char *fdname, Error **errp); + +void monitor_vprintf(Monitor *mon, const char *fmt, va_list ap) + GCC_FMT_ATTR(2, 0); +void monitor_printf(Monitor *mon, const char *fmt, ...) GCC_FMT_ATTR(2, 3); +void monitor_flush(Monitor *mon); +int monitor_set_cpu(int cpu_index); +int monitor_get_cpu_index(void); + +void monitor_read_command(Monitor *mon, int show_prompt); +//int monitor_read_password(Monitor *mon, ReadLineFunc *readline_func, +// void *opaque); + +void object_add(const char *type, const char *id, const QDict *qdict, + Visitor *v, Error **errp); + +AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id, + bool has_opaque, const char *opaque, + Error **errp); +int monitor_fdset_get_fd(int64_t fdset_id, int flags); +int monitor_fdset_dup_fd_add(int64_t fdset_id, int dup_fd); +void monitor_fdset_dup_fd_remove(int dup_fd); +int monitor_fdset_dup_fd_find(int dup_fd); + +#endif /* !MONITOR_H */ diff --git a/slirp/simh/qemu/notify.h b/slirp/simh/qemu/notify.h new file mode 100644 index 00000000..a3d73e4b --- /dev/null +++ b/slirp/simh/qemu/notify.h @@ -0,0 +1,72 @@ +/* + * Notifier lists + * + * Copyright IBM, Corp. 2010 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_NOTIFY_H +#define QEMU_NOTIFY_H + +#include "qemu/queue.h" + +typedef struct Notifier Notifier; + +struct Notifier +{ + void (*notify)(Notifier *notifier, void *data); + QLIST_ENTRY(Notifier) node; +}; + +typedef struct NotifierList +{ + QLIST_HEAD(, Notifier) notifiers; +} NotifierList; + +#define NOTIFIER_LIST_INITIALIZER(head) \ + { QLIST_HEAD_INITIALIZER((head).notifiers) } + +void notifier_list_init(NotifierList *list); + +void notifier_list_add(NotifierList *list, Notifier *notifier); + +void notifier_remove(Notifier *notifier); + +void notifier_list_notify(NotifierList *list, void *data); + +/* Same as Notifier but allows .notify() to return errors */ +typedef struct NotifierWithReturn NotifierWithReturn; + +struct NotifierWithReturn { + /** + * Return 0 on success (next notifier will be invoked), otherwise + * notifier_with_return_list_notify() will stop and return the value. + */ + int (*notify)(NotifierWithReturn *notifier, void *data); + QLIST_ENTRY(NotifierWithReturn) node; +}; + +typedef struct NotifierWithReturnList { + QLIST_HEAD(, NotifierWithReturn) notifiers; +} NotifierWithReturnList; + +#define NOTIFIER_WITH_RETURN_LIST_INITIALIZER(head) \ + { QLIST_HEAD_INITIALIZER((head).notifiers) } + +void notifier_with_return_list_init(NotifierWithReturnList *list); + +void notifier_with_return_list_add(NotifierWithReturnList *list, + NotifierWithReturn *notifier); + +void notifier_with_return_remove(NotifierWithReturn *notifier); + +int notifier_with_return_list_notify(NotifierWithReturnList *list, + void *data); + +#endif diff --git a/slirp/simh/qemu/option.h b/slirp/simh/qemu/option.h new file mode 100644 index 00000000..71f5f27e --- /dev/null +++ b/slirp/simh/qemu/option.h @@ -0,0 +1,138 @@ +/* + * Commandline option parsing functions + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2009 Kevin Wolf + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef QEMU_OPTIONS_H +#define QEMU_OPTIONS_H + +#include +#include "qemu/queue.h" +#include "qapi/error.h" +#include "qapi/qmp/qdict.h" +#include "qemu/typedefs.h" + +const char *get_opt_name(char *buf, int buf_size, const char *p, char delim); +const char *get_opt_value(char *buf, int buf_size, const char *p); +int get_next_param_value(char *buf, int buf_size, + const char *tag, const char **pstr); +int get_param_value(char *buf, int buf_size, + const char *tag, const char *str); + + +void parse_option_size(const char *name, const char *value, + uint64_t *ret, Error **errp); +bool has_help_option(const char *param); +bool is_valid_option_list(const char *param); + +enum QemuOptType { + QEMU_OPT_STRING = 0, /* no parsing (use string as-is) */ + QEMU_OPT_BOOL, /* on/off */ + QEMU_OPT_NUMBER, /* simple number */ + QEMU_OPT_SIZE, /* size, accepts (K)ilo, (M)ega, (G)iga, (T)era postfix */ +}; + +typedef struct QemuOptDesc { + const char *name; + enum QemuOptType type; + const char *help; + const char *def_value_str; +} QemuOptDesc; + +struct QemuOptsList { + const char *name; + const char *implied_opt_name; + bool merge_lists; /* Merge multiple uses of option into a single list? */ + QTAILQ_HEAD(, QemuOpts) head; + QemuOptDesc desc[]; +}; + +const char *qemu_opt_get(QemuOpts *opts, const char *name); +char *qemu_opt_get_del(QemuOpts *opts, const char *name); +/** + * qemu_opt_has_help_opt: + * @opts: options to search for a help request + * + * Check whether the options specified by @opts include one of the + * standard strings which indicate that the user is asking for a + * list of the valid values for a command line option (as defined + * by is_help_option()). + * + * Returns: true if @opts includes 'help' or equivalent. + */ +bool qemu_opt_has_help_opt(QemuOpts *opts); +QemuOpt *qemu_opt_find(QemuOpts *opts, const char *name); +bool qemu_opt_get_bool(QemuOpts *opts, const char *name, bool defval); +uint64_t qemu_opt_get_number(QemuOpts *opts, const char *name, uint64_t defval); +uint64_t qemu_opt_get_size(QemuOpts *opts, const char *name, uint64_t defval); +bool qemu_opt_get_bool_del(QemuOpts *opts, const char *name, bool defval); +uint64_t qemu_opt_get_number_del(QemuOpts *opts, const char *name, + uint64_t defval); +uint64_t qemu_opt_get_size_del(QemuOpts *opts, const char *name, + uint64_t defval); +int qemu_opt_unset(QemuOpts *opts, const char *name); +void qemu_opt_set(QemuOpts *opts, const char *name, const char *value, + Error **errp); +void qemu_opt_set_bool(QemuOpts *opts, const char *name, bool val, + Error **errp); +void qemu_opt_set_number(QemuOpts *opts, const char *name, int64_t val, + Error **errp); +typedef int (*qemu_opt_loopfunc)(void *opaque, + const char *name, const char *value, + Error **errp); +int qemu_opt_foreach(QemuOpts *opts, qemu_opt_loopfunc func, void *opaque, + Error **errp); + +QemuOpts *qemu_opts_find(QemuOptsList *list, const char *id); +QemuOpts *qemu_opts_create(QemuOptsList *list, const char *id, + int fail_if_exists, Error **errp); +void qemu_opts_reset(QemuOptsList *list); +void qemu_opts_loc_restore(QemuOpts *opts); +void qemu_opts_set(QemuOptsList *list, const char *id, + const char *name, const char *value, Error **errp); +const char *qemu_opts_id(QemuOpts *opts); +void qemu_opts_set_id(QemuOpts *opts, char *id); +void qemu_opts_del(QemuOpts *opts); +void qemu_opts_validate(QemuOpts *opts, const QemuOptDesc *desc, Error **errp); +void qemu_opts_do_parse(QemuOpts *opts, const char *params, + const char *firstname, Error **errp); +QemuOpts *qemu_opts_parse_noisily(QemuOptsList *list, const char *params, + bool permit_abbrev); +QemuOpts *qemu_opts_parse(QemuOptsList *list, const char *params, + bool permit_abbrev, Error **errp); +void qemu_opts_set_defaults(QemuOptsList *list, const char *params, + int permit_abbrev); +QemuOpts *qemu_opts_from_qdict(QemuOptsList *list, const QDict *qdict, + Error **errp); +QDict *qemu_opts_to_qdict(QemuOpts *opts, QDict *qdict); +void qemu_opts_absorb_qdict(QemuOpts *opts, QDict *qdict, Error **errp); + +typedef int (*qemu_opts_loopfunc)(void *opaque, QemuOpts *opts, Error **errp); +int qemu_opts_foreach(QemuOptsList *list, qemu_opts_loopfunc func, + void *opaque, Error **errp); +void qemu_opts_print(QemuOpts *opts, const char *sep); +void qemu_opts_print_help(QemuOptsList *list); +void qemu_opts_free(QemuOptsList *list); +QemuOptsList *qemu_opts_append(QemuOptsList *dst, QemuOptsList *list); + +#endif diff --git a/slirp/simh/qemu/osdep.h b/slirp/simh/qemu/osdep.h new file mode 100644 index 00000000..dd789ff2 --- /dev/null +++ b/slirp/simh/qemu/osdep.h @@ -0,0 +1,291 @@ +/* + * OS includes and handling of OS dependencies + * + * This header exists to pull in some common system headers that + * most code in QEMU will want, and to fix up some possible issues with + * it (missing defines, Windows weirdness, and so on). + * + * To avoid getting into possible circular include dependencies, this + * file should not include any other QEMU headers, with the exceptions + * of config-host.h, compiler.h, os-posix.h and os-win32.h, all of which + * are doing a similar job to this file and are under similar constraints. + * + * This header also contains prototypes for functions defined in + * os-*.c and util/oslib-*.c; those would probably be better split + * out into separate header files. + * + * In an ideal world this header would contain only: + * (1) things which everybody needs + * (2) things without which code would work on most platforms but + * fail to compile or misbehave on a minority of host OSes + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#ifndef QEMU_OSDEP_H +#define QEMU_OSDEP_H + +#include "config-host.h" +#include "qemu/compiler.h" +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef _MSC_VER +#include +#endif +#include +#include +/* Put unistd.h before time.h as that triggers localtime_r/gmtime_r + * function availability on recentish Mingw-w64 platforms. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __OpenBSD__ +#include +#endif + +#ifndef _WIN32 +#include +#else +#define WIFEXITED(x) 1 +#define WEXITSTATUS(x) (x) +#endif + +#ifdef _WIN32 +#include "sysemu/os-win32.h" +#endif + +#ifdef CONFIG_POSIX +#include "sysemu/os-posix.h" +#endif + +#if defined(CONFIG_SOLARIS) && CONFIG_SOLARIS_VERSION < 10 +/* [u]int_fast*_t not in */ +typedef unsigned char uint_fast8_t; +typedef unsigned int uint_fast16_t; +typedef signed int int_fast16_t; +#endif + +#ifndef O_LARGEFILE +#define O_LARGEFILE 0 +#endif +#ifndef O_BINARY +#define O_BINARY 0 +#endif +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif +#ifndef ENOMEDIUM +#define ENOMEDIUM ENODEV +#endif +#if !defined(ENOTSUP) +#define ENOTSUP 4096 +#endif +#if !defined(ECANCELED) +#define ECANCELED 4097 +#endif +#if !defined(EMEDIUMTYPE) +#define EMEDIUMTYPE 4098 +#endif +#ifndef TIME_MAX +#define TIME_MAX LONG_MAX +#endif + +#ifndef MIN +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#endif +#ifndef MAX +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) +#endif + +/* Minimum function that returns zero only iff both values are zero. + * Intended for use with unsigned values only. */ +#ifndef MIN_NON_ZERO +#define MIN_NON_ZERO(a, b) (((a) != 0 && (a) < (b)) ? (a) : (b)) +#endif + +#ifndef ROUND_UP +#define ROUND_UP(n,d) (((n) + (d) - 1) & -(d)) +#endif + +#ifndef DIV_ROUND_UP +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#endif + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +int qemu_daemon(int nochdir, int noclose); +void *qemu_try_memalign(size_t alignment, size_t size); +void *qemu_memalign(size_t alignment, size_t size); +void *qemu_anon_ram_alloc(size_t size, uint64_t *align); +void qemu_vfree(void *ptr); +void qemu_anon_ram_free(void *ptr, size_t size); + +#define QEMU_MADV_INVALID -1 + +#if defined(CONFIG_MADVISE) + +#define QEMU_MADV_WILLNEED MADV_WILLNEED +#define QEMU_MADV_DONTNEED MADV_DONTNEED +#ifdef MADV_DONTFORK +#define QEMU_MADV_DONTFORK MADV_DONTFORK +#else +#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID +#endif +#ifdef MADV_MERGEABLE +#define QEMU_MADV_MERGEABLE MADV_MERGEABLE +#else +#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID +#endif +#ifdef MADV_UNMERGEABLE +#define QEMU_MADV_UNMERGEABLE MADV_UNMERGEABLE +#else +#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID +#endif +#ifdef MADV_DODUMP +#define QEMU_MADV_DODUMP MADV_DODUMP +#else +#define QEMU_MADV_DODUMP QEMU_MADV_INVALID +#endif +#ifdef MADV_DONTDUMP +#define QEMU_MADV_DONTDUMP MADV_DONTDUMP +#else +#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID +#endif +#ifdef MADV_HUGEPAGE +#define QEMU_MADV_HUGEPAGE MADV_HUGEPAGE +#else +#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID +#endif + +#elif defined(CONFIG_POSIX_MADVISE) + +#define QEMU_MADV_WILLNEED POSIX_MADV_WILLNEED +#define QEMU_MADV_DONTNEED POSIX_MADV_DONTNEED +#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID +#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID +#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID +#define QEMU_MADV_DODUMP QEMU_MADV_INVALID +#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID +#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID + +#else /* no-op */ + +#define QEMU_MADV_WILLNEED QEMU_MADV_INVALID +#define QEMU_MADV_DONTNEED QEMU_MADV_INVALID +#define QEMU_MADV_DONTFORK QEMU_MADV_INVALID +#define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID +#define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID +#define QEMU_MADV_DODUMP QEMU_MADV_INVALID +#define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID +#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID + +#endif + +int qemu_madvise(void *addr, size_t len, int advice); + +int qemu_open(const char *name, int flags, ...); +int qemu_close(int fd); + +#if defined(__HAIKU__) && defined(__i386__) +#define FMT_pid "%ld" +#elif defined(WIN64) +#define FMT_pid "%" PRId64 +#else +#define FMT_pid "%d" +#endif + +int qemu_create_pidfile(const char *filename); +int qemu_get_thread_id(void); + +#ifndef CONFIG_IOVEC +struct iovec { + void *iov_base; + int iov_len; +}; +/* + * Use the same value as Linux for now. + */ +#define IOV_MAX 1024 + +ssize_t readv(int fd, const struct iovec *iov, int iov_cnt); +ssize_t writev(int fd, const struct iovec *iov, int iov_cnt); +#else +#include +#endif + +#ifdef _WIN32 +static inline void qemu_timersub(const struct timeval *val1, + const struct timeval *val2, + struct timeval *res) +{ + res->tv_sec = val1->tv_sec - val2->tv_sec; + if (val1->tv_usec < val2->tv_usec) { + res->tv_sec--; + res->tv_usec = val1->tv_usec - val2->tv_usec + 1000 * 1000; + } else { + res->tv_usec = val1->tv_usec - val2->tv_usec; + } +} +#else +#define qemu_timersub timersub +#endif + +void qemu_set_cloexec(int fd); + +void qemu_set_version(const char *); +const char *qemu_get_version(void); + +void fips_set_state(bool requested); +bool fips_get_state(void); + +/* Return a dynamically allocated pathname denoting a file or directory that is + * appropriate for storing local state. + * + * @relative_pathname need not start with a directory separator; one will be + * added automatically. + * + * The caller is responsible for releasing the value returned with g_free() + * after use. + */ +char *qemu_get_local_state_pathname(const char *relative_pathname); + +/* Find program directory, and save it for later usage with + * qemu_get_exec_dir(). + * Try OS specific API first, if not working, parse from argv0. */ +void qemu_init_exec_dir(const char *argv0); + +/* Get the saved exec dir. + * Caller needs to release the returned string by g_free() */ +char *qemu_get_exec_dir(void); + +/** + * qemu_getauxval: + * @type: the auxiliary vector key to lookup + * + * Search the auxiliary vector for @type, returning the value + * or 0 if @type is not present. + */ +unsigned long qemu_getauxval(unsigned long type); + +void qemu_set_tty_echo(int fd, bool echo); + +void os_mem_prealloc(int fd, char *area, size_t sz); + +int qemu_read_password(char *buf, int buf_size); + +#endif diff --git a/slirp/simh/qemu/qapi-types.h b/slirp/simh/qemu/qapi-types.h new file mode 100644 index 00000000..3ffc54bd --- /dev/null +++ b/slirp/simh/qemu/qapi-types.h @@ -0,0 +1,5607 @@ +/* AUTOMATICALLY GENERATED, DO NOT MODIFY */ + +/* + * schema-defined QAPI types + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QAPI_TYPES_H +#define QAPI_TYPES_H + +#include +#include +#include "qapi/qmp/qobject.h" + +#ifndef QAPI_TYPES_BUILTIN +#define QAPI_TYPES_BUILTIN + + +typedef struct anyList anyList; + +struct anyList { + union { + QObject *value; + uint64_t padding; + }; + anyList *next; +}; + +void qapi_free_anyList(anyList *obj); + +typedef struct boolList boolList; + +struct boolList { + union { + bool value; + uint64_t padding; + }; + boolList *next; +}; + +void qapi_free_boolList(boolList *obj); + +typedef struct int16List int16List; + +struct int16List { + union { + int16_t value; + uint64_t padding; + }; + int16List *next; +}; + +void qapi_free_int16List(int16List *obj); + +typedef struct int32List int32List; + +struct int32List { + union { + int32_t value; + uint64_t padding; + }; + int32List *next; +}; + +void qapi_free_int32List(int32List *obj); + +typedef struct int64List int64List; + +struct int64List { + union { + int64_t value; + uint64_t padding; + }; + int64List *next; +}; + +void qapi_free_int64List(int64List *obj); + +typedef struct int8List int8List; + +struct int8List { + union { + int8_t value; + uint64_t padding; + }; + int8List *next; +}; + +void qapi_free_int8List(int8List *obj); + +typedef struct intList intList; + +struct intList { + union { + int64_t value; + uint64_t padding; + }; + intList *next; +}; + +void qapi_free_intList(intList *obj); + +typedef struct numberList numberList; + +struct numberList { + union { + double value; + uint64_t padding; + }; + numberList *next; +}; + +void qapi_free_numberList(numberList *obj); + +typedef struct sizeList sizeList; + +struct sizeList { + union { + uint64_t value; + uint64_t padding; + }; + sizeList *next; +}; + +void qapi_free_sizeList(sizeList *obj); + +typedef struct strList strList; + +struct strList { + union { + char *value; + uint64_t padding; + }; + strList *next; +}; + +void qapi_free_strList(strList *obj); + +typedef struct uint16List uint16List; + +struct uint16List { + union { + uint16_t value; + uint64_t padding; + }; + uint16List *next; +}; + +void qapi_free_uint16List(uint16List *obj); + +typedef struct uint32List uint32List; + +struct uint32List { + union { + uint32_t value; + uint64_t padding; + }; + uint32List *next; +}; + +void qapi_free_uint32List(uint32List *obj); + +typedef struct uint64List uint64List; + +struct uint64List { + union { + uint64_t value; + uint64_t padding; + }; + uint64List *next; +}; + +void qapi_free_uint64List(uint64List *obj); + +typedef struct uint8List uint8List; + +struct uint8List { + union { + uint8_t value; + uint64_t padding; + }; + uint8List *next; +}; + +void qapi_free_uint8List(uint8List *obj); + +#endif /* QAPI_TYPES_BUILTIN */ + + +typedef struct ACPIOSTInfo ACPIOSTInfo; + +typedef struct ACPIOSTInfoList ACPIOSTInfoList; + +typedef enum ACPISlotType { + ACPI_SLOT_TYPE_DIMM = 0, + ACPI_SLOT_TYPE_MAX = 1, +} ACPISlotType; + +extern const char *const ACPISlotType_lookup[]; + +typedef struct ACPISlotTypeList ACPISlotTypeList; + +typedef struct Abort Abort; + +typedef struct AbortList AbortList; + +typedef struct AcpiTableOptions AcpiTableOptions; + +typedef struct AcpiTableOptionsList AcpiTableOptionsList; + +typedef struct AddfdInfo AddfdInfo; + +typedef struct AddfdInfoList AddfdInfoList; + +typedef struct BalloonInfo BalloonInfo; + +typedef struct BalloonInfoList BalloonInfoList; + +typedef enum BiosAtaTranslation { + BIOS_ATA_TRANSLATION_AUTO = 0, + BIOS_ATA_TRANSLATION_NONE = 1, + BIOS_ATA_TRANSLATION_LBA = 2, + BIOS_ATA_TRANSLATION_LARGE = 3, + BIOS_ATA_TRANSLATION_RECHS = 4, + BIOS_ATA_TRANSLATION_MAX = 5, +} BiosAtaTranslation; + +extern const char *const BiosAtaTranslation_lookup[]; + +typedef struct BiosAtaTranslationList BiosAtaTranslationList; + +typedef enum BlkdebugEvent { + BLKDEBUG_EVENT_L1_UPDATE = 0, + BLKDEBUG_EVENT_L1_GROW_ALLOC_TABLE = 1, + BLKDEBUG_EVENT_L1_GROW_WRITE_TABLE = 2, + BLKDEBUG_EVENT_L1_GROW_ACTIVATE_TABLE = 3, + BLKDEBUG_EVENT_L2_LOAD = 4, + BLKDEBUG_EVENT_L2_UPDATE = 5, + BLKDEBUG_EVENT_L2_UPDATE_COMPRESSED = 6, + BLKDEBUG_EVENT_L2_ALLOC_COW_READ = 7, + BLKDEBUG_EVENT_L2_ALLOC_WRITE = 8, + BLKDEBUG_EVENT_READ_AIO = 9, + BLKDEBUG_EVENT_READ_BACKING_AIO = 10, + BLKDEBUG_EVENT_READ_COMPRESSED = 11, + BLKDEBUG_EVENT_WRITE_AIO = 12, + BLKDEBUG_EVENT_WRITE_COMPRESSED = 13, + BLKDEBUG_EVENT_VMSTATE_LOAD = 14, + BLKDEBUG_EVENT_VMSTATE_SAVE = 15, + BLKDEBUG_EVENT_COW_READ = 16, + BLKDEBUG_EVENT_COW_WRITE = 17, + BLKDEBUG_EVENT_REFTABLE_LOAD = 18, + BLKDEBUG_EVENT_REFTABLE_GROW = 19, + BLKDEBUG_EVENT_REFTABLE_UPDATE = 20, + BLKDEBUG_EVENT_REFBLOCK_LOAD = 21, + BLKDEBUG_EVENT_REFBLOCK_UPDATE = 22, + BLKDEBUG_EVENT_REFBLOCK_UPDATE_PART = 23, + BLKDEBUG_EVENT_REFBLOCK_ALLOC = 24, + BLKDEBUG_EVENT_REFBLOCK_ALLOC_HOOKUP = 25, + BLKDEBUG_EVENT_REFBLOCK_ALLOC_WRITE = 26, + BLKDEBUG_EVENT_REFBLOCK_ALLOC_WRITE_BLOCKS = 27, + BLKDEBUG_EVENT_REFBLOCK_ALLOC_WRITE_TABLE = 28, + BLKDEBUG_EVENT_REFBLOCK_ALLOC_SWITCH_TABLE = 29, + BLKDEBUG_EVENT_CLUSTER_ALLOC = 30, + BLKDEBUG_EVENT_CLUSTER_ALLOC_BYTES = 31, + BLKDEBUG_EVENT_CLUSTER_FREE = 32, + BLKDEBUG_EVENT_FLUSH_TO_OS = 33, + BLKDEBUG_EVENT_FLUSH_TO_DISK = 34, + BLKDEBUG_EVENT_PWRITEV_RMW_HEAD = 35, + BLKDEBUG_EVENT_PWRITEV_RMW_AFTER_HEAD = 36, + BLKDEBUG_EVENT_PWRITEV_RMW_TAIL = 37, + BLKDEBUG_EVENT_PWRITEV_RMW_AFTER_TAIL = 38, + BLKDEBUG_EVENT_PWRITEV = 39, + BLKDEBUG_EVENT_PWRITEV_ZERO = 40, + BLKDEBUG_EVENT_PWRITEV_DONE = 41, + BLKDEBUG_EVENT_EMPTY_IMAGE_PREPARE = 42, + BLKDEBUG_EVENT_MAX = 43, +} BlkdebugEvent; + +extern const char *const BlkdebugEvent_lookup[]; + +typedef struct BlkdebugEventList BlkdebugEventList; + +typedef struct BlkdebugInjectErrorOptions BlkdebugInjectErrorOptions; + +typedef struct BlkdebugInjectErrorOptionsList BlkdebugInjectErrorOptionsList; + +typedef struct BlkdebugSetStateOptions BlkdebugSetStateOptions; + +typedef struct BlkdebugSetStateOptionsList BlkdebugSetStateOptionsList; + +typedef struct BlockDeviceInfo BlockDeviceInfo; + +typedef struct BlockDeviceInfoList BlockDeviceInfoList; + +typedef enum BlockDeviceIoStatus { + BLOCK_DEVICE_IO_STATUS_OK = 0, + BLOCK_DEVICE_IO_STATUS_FAILED = 1, + BLOCK_DEVICE_IO_STATUS_NOSPACE = 2, + BLOCK_DEVICE_IO_STATUS_MAX = 3, +} BlockDeviceIoStatus; + +extern const char *const BlockDeviceIoStatus_lookup[]; + +typedef struct BlockDeviceIoStatusList BlockDeviceIoStatusList; + +typedef struct BlockDeviceMapEntry BlockDeviceMapEntry; + +typedef struct BlockDeviceMapEntryList BlockDeviceMapEntryList; + +typedef struct BlockDeviceStats BlockDeviceStats; + +typedef struct BlockDeviceStatsList BlockDeviceStatsList; + +typedef struct BlockDirtyBitmap BlockDirtyBitmap; + +typedef struct BlockDirtyBitmapAdd BlockDirtyBitmapAdd; + +typedef struct BlockDirtyBitmapAddList BlockDirtyBitmapAddList; + +typedef struct BlockDirtyBitmapList BlockDirtyBitmapList; + +typedef struct BlockDirtyInfo BlockDirtyInfo; + +typedef struct BlockDirtyInfoList BlockDirtyInfoList; + +typedef enum BlockErrorAction { + BLOCK_ERROR_ACTION_IGNORE = 0, + BLOCK_ERROR_ACTION_REPORT = 1, + BLOCK_ERROR_ACTION_STOP = 2, + BLOCK_ERROR_ACTION_MAX = 3, +} BlockErrorAction; + +extern const char *const BlockErrorAction_lookup[]; + +typedef struct BlockErrorActionList BlockErrorActionList; + +typedef struct BlockInfo BlockInfo; + +typedef struct BlockInfoList BlockInfoList; + +typedef struct BlockJobInfo BlockJobInfo; + +typedef struct BlockJobInfoList BlockJobInfoList; + +typedef enum BlockJobType { + BLOCK_JOB_TYPE_COMMIT = 0, + BLOCK_JOB_TYPE_STREAM = 1, + BLOCK_JOB_TYPE_MIRROR = 2, + BLOCK_JOB_TYPE_BACKUP = 3, + BLOCK_JOB_TYPE_MAX = 4, +} BlockJobType; + +extern const char *const BlockJobType_lookup[]; + +typedef struct BlockJobTypeList BlockJobTypeList; + +typedef struct BlockStats BlockStats; + +typedef struct BlockStatsList BlockStatsList; + +typedef enum BlockdevAioOptions { + BLOCKDEV_AIO_OPTIONS_THREADS = 0, + BLOCKDEV_AIO_OPTIONS_NATIVE = 1, + BLOCKDEV_AIO_OPTIONS_MAX = 2, +} BlockdevAioOptions; + +extern const char *const BlockdevAioOptions_lookup[]; + +typedef struct BlockdevAioOptionsList BlockdevAioOptionsList; + +typedef struct BlockdevBackup BlockdevBackup; + +typedef struct BlockdevBackupList BlockdevBackupList; + +typedef struct BlockdevCacheInfo BlockdevCacheInfo; + +typedef struct BlockdevCacheInfoList BlockdevCacheInfoList; + +typedef struct BlockdevCacheOptions BlockdevCacheOptions; + +typedef struct BlockdevCacheOptionsList BlockdevCacheOptionsList; + +typedef enum BlockdevDetectZeroesOptions { + BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF = 0, + BLOCKDEV_DETECT_ZEROES_OPTIONS_ON = 1, + BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP = 2, + BLOCKDEV_DETECT_ZEROES_OPTIONS_MAX = 3, +} BlockdevDetectZeroesOptions; + +extern const char *const BlockdevDetectZeroesOptions_lookup[]; + +typedef struct BlockdevDetectZeroesOptionsList BlockdevDetectZeroesOptionsList; + +typedef enum BlockdevDiscardOptions { + BLOCKDEV_DISCARD_OPTIONS_IGNORE = 0, + BLOCKDEV_DISCARD_OPTIONS_UNMAP = 1, + BLOCKDEV_DISCARD_OPTIONS_MAX = 2, +} BlockdevDiscardOptions; + +extern const char *const BlockdevDiscardOptions_lookup[]; + +typedef struct BlockdevDiscardOptionsList BlockdevDiscardOptionsList; + +typedef enum BlockdevDriver { + BLOCKDEV_DRIVER_ARCHIPELAGO = 0, + BLOCKDEV_DRIVER_BLKDEBUG = 1, + BLOCKDEV_DRIVER_BLKVERIFY = 2, + BLOCKDEV_DRIVER_BOCHS = 3, + BLOCKDEV_DRIVER_CLOOP = 4, + BLOCKDEV_DRIVER_DMG = 5, + BLOCKDEV_DRIVER_FILE = 6, + BLOCKDEV_DRIVER_FTP = 7, + BLOCKDEV_DRIVER_FTPS = 8, + BLOCKDEV_DRIVER_HOST_CDROM = 9, + BLOCKDEV_DRIVER_HOST_DEVICE = 10, + BLOCKDEV_DRIVER_HOST_FLOPPY = 11, + BLOCKDEV_DRIVER_HTTP = 12, + BLOCKDEV_DRIVER_HTTPS = 13, + BLOCKDEV_DRIVER_NULL_AIO = 14, + BLOCKDEV_DRIVER_NULL_CO = 15, + BLOCKDEV_DRIVER_PARALLELS = 16, + BLOCKDEV_DRIVER_QCOW = 17, + BLOCKDEV_DRIVER_QCOW2 = 18, + BLOCKDEV_DRIVER_QED = 19, + BLOCKDEV_DRIVER_QUORUM = 20, + BLOCKDEV_DRIVER_RAW = 21, + BLOCKDEV_DRIVER_TFTP = 22, + BLOCKDEV_DRIVER_VDI = 23, + BLOCKDEV_DRIVER_VHDX = 24, + BLOCKDEV_DRIVER_VMDK = 25, + BLOCKDEV_DRIVER_VPC = 26, + BLOCKDEV_DRIVER_VVFAT = 27, + BLOCKDEV_DRIVER_MAX = 28, +} BlockdevDriver; + +extern const char *const BlockdevDriver_lookup[]; + +typedef struct BlockdevDriverList BlockdevDriverList; + +typedef enum BlockdevOnError { + BLOCKDEV_ON_ERROR_REPORT = 0, + BLOCKDEV_ON_ERROR_IGNORE = 1, + BLOCKDEV_ON_ERROR_ENOSPC = 2, + BLOCKDEV_ON_ERROR_STOP = 3, + BLOCKDEV_ON_ERROR_MAX = 4, +} BlockdevOnError; + +extern const char *const BlockdevOnError_lookup[]; + +typedef struct BlockdevOnErrorList BlockdevOnErrorList; + +typedef struct BlockdevOptions BlockdevOptions; + +typedef struct BlockdevOptionsArchipelago BlockdevOptionsArchipelago; + +typedef struct BlockdevOptionsArchipelagoList BlockdevOptionsArchipelagoList; + +typedef struct BlockdevOptionsBase BlockdevOptionsBase; + +typedef struct BlockdevOptionsBaseList BlockdevOptionsBaseList; + +typedef struct BlockdevOptionsBlkdebug BlockdevOptionsBlkdebug; + +typedef struct BlockdevOptionsBlkdebugList BlockdevOptionsBlkdebugList; + +typedef struct BlockdevOptionsBlkverify BlockdevOptionsBlkverify; + +typedef struct BlockdevOptionsBlkverifyList BlockdevOptionsBlkverifyList; + +typedef struct BlockdevOptionsFile BlockdevOptionsFile; + +typedef struct BlockdevOptionsFileList BlockdevOptionsFileList; + +typedef struct BlockdevOptionsGenericCOWFormat BlockdevOptionsGenericCOWFormat; + +typedef struct BlockdevOptionsGenericCOWFormatList BlockdevOptionsGenericCOWFormatList; + +typedef struct BlockdevOptionsGenericFormat BlockdevOptionsGenericFormat; + +typedef struct BlockdevOptionsGenericFormatList BlockdevOptionsGenericFormatList; + +typedef struct BlockdevOptionsList BlockdevOptionsList; + +typedef struct BlockdevOptionsNull BlockdevOptionsNull; + +typedef struct BlockdevOptionsNullList BlockdevOptionsNullList; + +typedef struct BlockdevOptionsQcow2 BlockdevOptionsQcow2; + +typedef struct BlockdevOptionsQcow2List BlockdevOptionsQcow2List; + +typedef struct BlockdevOptionsQuorum BlockdevOptionsQuorum; + +typedef struct BlockdevOptionsQuorumList BlockdevOptionsQuorumList; + +typedef struct BlockdevOptionsVVFAT BlockdevOptionsVVFAT; + +typedef struct BlockdevOptionsVVFATList BlockdevOptionsVVFATList; + +typedef struct BlockdevRef BlockdevRef; + +typedef enum BlockdevRefKind { + BLOCKDEV_REF_KIND_DEFINITION = 0, + BLOCKDEV_REF_KIND_REFERENCE = 1, + BLOCKDEV_REF_KIND_MAX = 2, +} BlockdevRefKind; + +extern const char *const BlockdevRefKind_lookup[]; + +typedef struct BlockdevRefList BlockdevRefList; + +typedef struct BlockdevSnapshot BlockdevSnapshot; + +typedef struct BlockdevSnapshotInternal BlockdevSnapshotInternal; + +typedef struct BlockdevSnapshotInternalList BlockdevSnapshotInternalList; + +typedef struct BlockdevSnapshotList BlockdevSnapshotList; + +typedef struct ChardevBackend ChardevBackend; + +typedef struct ChardevBackendInfo ChardevBackendInfo; + +typedef struct ChardevBackendInfoList ChardevBackendInfoList; + +typedef enum ChardevBackendKind { + CHARDEV_BACKEND_KIND_FILE = 0, + CHARDEV_BACKEND_KIND_SERIAL = 1, + CHARDEV_BACKEND_KIND_PARALLEL = 2, + CHARDEV_BACKEND_KIND_PIPE = 3, + CHARDEV_BACKEND_KIND_SOCKET = 4, + CHARDEV_BACKEND_KIND_UDP = 5, + CHARDEV_BACKEND_KIND_PTY = 6, + CHARDEV_BACKEND_KIND_NULL = 7, + CHARDEV_BACKEND_KIND_MUX = 8, + CHARDEV_BACKEND_KIND_MSMOUSE = 9, + CHARDEV_BACKEND_KIND_BRAILLE = 10, + CHARDEV_BACKEND_KIND_TESTDEV = 11, + CHARDEV_BACKEND_KIND_STDIO = 12, + CHARDEV_BACKEND_KIND_CONSOLE = 13, + CHARDEV_BACKEND_KIND_SPICEVMC = 14, + CHARDEV_BACKEND_KIND_SPICEPORT = 15, + CHARDEV_BACKEND_KIND_VC = 16, + CHARDEV_BACKEND_KIND_RINGBUF = 17, + CHARDEV_BACKEND_KIND_MEMORY = 18, + CHARDEV_BACKEND_KIND_MAX = 19, +} ChardevBackendKind; + +extern const char *const ChardevBackendKind_lookup[]; + +typedef struct ChardevBackendList ChardevBackendList; + +typedef struct ChardevDummy ChardevDummy; + +typedef struct ChardevDummyList ChardevDummyList; + +typedef struct ChardevFile ChardevFile; + +typedef struct ChardevFileList ChardevFileList; + +typedef struct ChardevHostdev ChardevHostdev; + +typedef struct ChardevHostdevList ChardevHostdevList; + +typedef struct ChardevInfo ChardevInfo; + +typedef struct ChardevInfoList ChardevInfoList; + +typedef struct ChardevMux ChardevMux; + +typedef struct ChardevMuxList ChardevMuxList; + +typedef struct ChardevReturn ChardevReturn; + +typedef struct ChardevReturnList ChardevReturnList; + +typedef struct ChardevRingbuf ChardevRingbuf; + +typedef struct ChardevRingbufList ChardevRingbufList; + +typedef struct ChardevSocket ChardevSocket; + +typedef struct ChardevSocketList ChardevSocketList; + +typedef struct ChardevSpiceChannel ChardevSpiceChannel; + +typedef struct ChardevSpiceChannelList ChardevSpiceChannelList; + +typedef struct ChardevSpicePort ChardevSpicePort; + +typedef struct ChardevSpicePortList ChardevSpicePortList; + +typedef struct ChardevStdio ChardevStdio; + +typedef struct ChardevStdioList ChardevStdioList; + +typedef struct ChardevUdp ChardevUdp; + +typedef struct ChardevUdpList ChardevUdpList; + +typedef struct ChardevVC ChardevVC; + +typedef struct ChardevVCList ChardevVCList; + +typedef struct CommandInfo CommandInfo; + +typedef struct CommandInfoList CommandInfoList; + +typedef struct CommandLineOptionInfo CommandLineOptionInfo; + +typedef struct CommandLineOptionInfoList CommandLineOptionInfoList; + +typedef struct CommandLineParameterInfo CommandLineParameterInfo; + +typedef struct CommandLineParameterInfoList CommandLineParameterInfoList; + +typedef enum CommandLineParameterType { + COMMAND_LINE_PARAMETER_TYPE_STRING = 0, + COMMAND_LINE_PARAMETER_TYPE_BOOLEAN = 1, + COMMAND_LINE_PARAMETER_TYPE_NUMBER = 2, + COMMAND_LINE_PARAMETER_TYPE_SIZE = 3, + COMMAND_LINE_PARAMETER_TYPE_MAX = 4, +} CommandLineParameterType; + +extern const char *const CommandLineParameterType_lookup[]; + +typedef struct CommandLineParameterTypeList CommandLineParameterTypeList; + +typedef struct CpuDefinitionInfo CpuDefinitionInfo; + +typedef struct CpuDefinitionInfoList CpuDefinitionInfoList; + +typedef struct CpuInfo CpuInfo; + +typedef struct CpuInfoList CpuInfoList; + +typedef enum DataFormat { + DATA_FORMAT_UTF8 = 0, + DATA_FORMAT_BASE64 = 1, + DATA_FORMAT_MAX = 2, +} DataFormat; + +extern const char *const DataFormat_lookup[]; + +typedef struct DataFormatList DataFormatList; + +typedef struct DevicePropertyInfo DevicePropertyInfo; + +typedef struct DevicePropertyInfoList DevicePropertyInfoList; + +typedef enum DirtyBitmapStatus { + DIRTY_BITMAP_STATUS_ACTIVE = 0, + DIRTY_BITMAP_STATUS_DISABLED = 1, + DIRTY_BITMAP_STATUS_FROZEN = 2, + DIRTY_BITMAP_STATUS_MAX = 3, +} DirtyBitmapStatus; + +extern const char *const DirtyBitmapStatus_lookup[]; + +typedef struct DirtyBitmapStatusList DirtyBitmapStatusList; + +typedef struct DriveBackup DriveBackup; + +typedef struct DriveBackupList DriveBackupList; + +typedef struct DumpGuestMemoryCapability DumpGuestMemoryCapability; + +typedef struct DumpGuestMemoryCapabilityList DumpGuestMemoryCapabilityList; + +typedef enum DumpGuestMemoryFormat { + DUMP_GUEST_MEMORY_FORMAT_ELF = 0, + DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB = 1, + DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO = 2, + DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY = 3, + DUMP_GUEST_MEMORY_FORMAT_MAX = 4, +} DumpGuestMemoryFormat; + +extern const char *const DumpGuestMemoryFormat_lookup[]; + +typedef struct DumpGuestMemoryFormatList DumpGuestMemoryFormatList; + +typedef enum ErrorClass { + ERROR_CLASS_GENERIC_ERROR = 0, + ERROR_CLASS_COMMAND_NOT_FOUND = 1, + ERROR_CLASS_DEVICE_ENCRYPTED = 2, + ERROR_CLASS_DEVICE_NOT_ACTIVE = 3, + ERROR_CLASS_DEVICE_NOT_FOUND = 4, + ERROR_CLASS_KVM_MISSING_CAP = 5, + ERROR_CLASS_MAX = 6, +} ErrorClass; + +extern const char *const ErrorClass_lookup[]; + +typedef struct ErrorClassList ErrorClassList; + +typedef struct EventInfo EventInfo; + +typedef struct EventInfoList EventInfoList; + +typedef struct FdsetFdInfo FdsetFdInfo; + +typedef struct FdsetFdInfoList FdsetFdInfoList; + +typedef struct FdsetInfo FdsetInfo; + +typedef struct FdsetInfoList FdsetInfoList; + +typedef enum GuestPanicAction { + GUEST_PANIC_ACTION_PAUSE = 0, + GUEST_PANIC_ACTION_MAX = 1, +} GuestPanicAction; + +extern const char *const GuestPanicAction_lookup[]; + +typedef struct GuestPanicActionList GuestPanicActionList; + +typedef enum HostMemPolicy { + HOST_MEM_POLICY_DEFAULT = 0, + HOST_MEM_POLICY_PREFERRED = 1, + HOST_MEM_POLICY_BIND = 2, + HOST_MEM_POLICY_INTERLEAVE = 3, + HOST_MEM_POLICY_MAX = 4, +} HostMemPolicy; + +extern const char *const HostMemPolicy_lookup[]; + +typedef struct HostMemPolicyList HostMemPolicyList; + +typedef struct IOThreadInfo IOThreadInfo; + +typedef struct IOThreadInfoList IOThreadInfoList; + +typedef struct ImageCheck ImageCheck; + +typedef struct ImageCheckList ImageCheckList; + +typedef struct ImageInfo ImageInfo; + +typedef struct ImageInfoList ImageInfoList; + +typedef struct ImageInfoSpecific ImageInfoSpecific; + +typedef enum ImageInfoSpecificKind { + IMAGE_INFO_SPECIFIC_KIND_QCOW2 = 0, + IMAGE_INFO_SPECIFIC_KIND_VMDK = 1, + IMAGE_INFO_SPECIFIC_KIND_MAX = 2, +} ImageInfoSpecificKind; + +extern const char *const ImageInfoSpecificKind_lookup[]; + +typedef struct ImageInfoSpecificList ImageInfoSpecificList; + +typedef struct ImageInfoSpecificQCow2 ImageInfoSpecificQCow2; + +typedef struct ImageInfoSpecificQCow2List ImageInfoSpecificQCow2List; + +typedef struct ImageInfoSpecificVmdk ImageInfoSpecificVmdk; + +typedef struct ImageInfoSpecificVmdkList ImageInfoSpecificVmdkList; + +typedef struct InetSocketAddress InetSocketAddress; + +typedef struct InetSocketAddressList InetSocketAddressList; + +typedef enum InputAxis { + INPUT_AXIS_X = 0, + INPUT_AXIS_Y = 1, + INPUT_AXIS_MAX = 2, +} InputAxis; + +extern const char *const InputAxis_lookup[]; + +typedef struct InputAxisList InputAxisList; + +typedef struct InputBtnEvent InputBtnEvent; + +typedef struct InputBtnEventList InputBtnEventList; + +typedef enum InputButton { + INPUT_BUTTON_LEFT = 0, + INPUT_BUTTON_MIDDLE = 1, + INPUT_BUTTON_RIGHT = 2, + INPUT_BUTTON_WHEEL_UP = 3, + INPUT_BUTTON_WHEEL_DOWN = 4, + INPUT_BUTTON_MAX = 5, +} InputButton; + +extern const char *const InputButton_lookup[]; + +typedef struct InputButtonList InputButtonList; + +typedef struct InputEvent InputEvent; + +typedef enum InputEventKind { + INPUT_EVENT_KIND_KEY = 0, + INPUT_EVENT_KIND_BTN = 1, + INPUT_EVENT_KIND_REL = 2, + INPUT_EVENT_KIND_ABS = 3, + INPUT_EVENT_KIND_MAX = 4, +} InputEventKind; + +extern const char *const InputEventKind_lookup[]; + +typedef struct InputEventList InputEventList; + +typedef struct InputKeyEvent InputKeyEvent; + +typedef struct InputKeyEventList InputKeyEventList; + +typedef struct InputMoveEvent InputMoveEvent; + +typedef struct InputMoveEventList InputMoveEventList; + +typedef enum IoOperationType { + IO_OPERATION_TYPE_READ = 0, + IO_OPERATION_TYPE_WRITE = 1, + IO_OPERATION_TYPE_MAX = 2, +} IoOperationType; + +extern const char *const IoOperationType_lookup[]; + +typedef struct IoOperationTypeList IoOperationTypeList; + +typedef enum JSONType { + JSON_TYPE_STRING = 0, + JSON_TYPE_NUMBER = 1, + JSON_TYPE_INT = 2, + JSON_TYPE_BOOLEAN = 3, + JSON_TYPE_NULL = 4, + JSON_TYPE_OBJECT = 5, + JSON_TYPE_ARRAY = 6, + JSON_TYPE_VALUE = 7, + JSON_TYPE_MAX = 8, +} JSONType; + +extern const char *const JSONType_lookup[]; + +typedef struct JSONTypeList JSONTypeList; + +typedef struct KeyValue KeyValue; + +typedef enum KeyValueKind { + KEY_VALUE_KIND_NUMBER = 0, + KEY_VALUE_KIND_QCODE = 1, + KEY_VALUE_KIND_MAX = 2, +} KeyValueKind; + +extern const char *const KeyValueKind_lookup[]; + +typedef struct KeyValueList KeyValueList; + +typedef struct KvmInfo KvmInfo; + +typedef struct KvmInfoList KvmInfoList; + +typedef enum LostTickPolicy { + LOST_TICK_POLICY_DISCARD = 0, + LOST_TICK_POLICY_DELAY = 1, + LOST_TICK_POLICY_MERGE = 2, + LOST_TICK_POLICY_SLEW = 3, + LOST_TICK_POLICY_MAX = 4, +} LostTickPolicy; + +extern const char *const LostTickPolicy_lookup[]; + +typedef struct LostTickPolicyList LostTickPolicyList; + +typedef struct MachineInfo MachineInfo; + +typedef struct MachineInfoList MachineInfoList; + +typedef struct Memdev Memdev; + +typedef struct MemdevList MemdevList; + +typedef struct MemoryDeviceInfo MemoryDeviceInfo; + +typedef enum MemoryDeviceInfoKind { + MEMORY_DEVICE_INFO_KIND_DIMM = 0, + MEMORY_DEVICE_INFO_KIND_MAX = 1, +} MemoryDeviceInfoKind; + +extern const char *const MemoryDeviceInfoKind_lookup[]; + +typedef struct MemoryDeviceInfoList MemoryDeviceInfoList; + +typedef enum MigrationCapability { + MIGRATION_CAPABILITY_XBZRLE = 0, + MIGRATION_CAPABILITY_RDMA_PIN_ALL = 1, + MIGRATION_CAPABILITY_AUTO_CONVERGE = 2, + MIGRATION_CAPABILITY_ZERO_BLOCKS = 3, + MIGRATION_CAPABILITY_COMPRESS = 4, + MIGRATION_CAPABILITY_EVENTS = 5, + MIGRATION_CAPABILITY_MAX = 6, +} MigrationCapability; + +extern const char *const MigrationCapability_lookup[]; + +typedef struct MigrationCapabilityList MigrationCapabilityList; + +typedef struct MigrationCapabilityStatus MigrationCapabilityStatus; + +typedef struct MigrationCapabilityStatusList MigrationCapabilityStatusList; + +typedef struct MigrationInfo MigrationInfo; + +typedef struct MigrationInfoList MigrationInfoList; + +typedef enum MigrationParameter { + MIGRATION_PARAMETER_COMPRESS_LEVEL = 0, + MIGRATION_PARAMETER_COMPRESS_THREADS = 1, + MIGRATION_PARAMETER_DECOMPRESS_THREADS = 2, + MIGRATION_PARAMETER_MAX = 3, +} MigrationParameter; + +extern const char *const MigrationParameter_lookup[]; + +typedef struct MigrationParameterList MigrationParameterList; + +typedef struct MigrationParameters MigrationParameters; + +typedef struct MigrationParametersList MigrationParametersList; + +typedef struct MigrationStats MigrationStats; + +typedef struct MigrationStatsList MigrationStatsList; + +typedef enum MigrationStatus { + MIGRATION_STATUS_NONE = 0, + MIGRATION_STATUS_SETUP = 1, + MIGRATION_STATUS_CANCELLING = 2, + MIGRATION_STATUS_CANCELLED = 3, + MIGRATION_STATUS_ACTIVE = 4, + MIGRATION_STATUS_COMPLETED = 5, + MIGRATION_STATUS_FAILED = 6, + MIGRATION_STATUS_MAX = 7, +} MigrationStatus; + +extern const char *const MigrationStatus_lookup[]; + +typedef struct MigrationStatusList MigrationStatusList; + +typedef enum MirrorSyncMode { + MIRROR_SYNC_MODE_TOP = 0, + MIRROR_SYNC_MODE_FULL = 1, + MIRROR_SYNC_MODE_NONE = 2, + MIRROR_SYNC_MODE_INCREMENTAL = 3, + MIRROR_SYNC_MODE_MAX = 4, +} MirrorSyncMode; + +extern const char *const MirrorSyncMode_lookup[]; + +typedef struct MirrorSyncModeList MirrorSyncModeList; + +typedef struct MouseInfo MouseInfo; + +typedef struct MouseInfoList MouseInfoList; + +typedef struct NameInfo NameInfo; + +typedef struct NameInfoList NameInfoList; + +typedef struct NetClientOptions NetClientOptions; + +typedef enum NetClientOptionsKind { + NET_CLIENT_OPTIONS_KIND_NONE = 0, + NET_CLIENT_OPTIONS_KIND_NIC = 1, + NET_CLIENT_OPTIONS_KIND_USER = 2, + NET_CLIENT_OPTIONS_KIND_TAP = 3, + NET_CLIENT_OPTIONS_KIND_L2TPV3 = 4, + NET_CLIENT_OPTIONS_KIND_SOCKET = 5, + NET_CLIENT_OPTIONS_KIND_VDE = 6, + NET_CLIENT_OPTIONS_KIND_DUMP = 7, + NET_CLIENT_OPTIONS_KIND_BRIDGE = 8, + NET_CLIENT_OPTIONS_KIND_HUBPORT = 9, + NET_CLIENT_OPTIONS_KIND_NETMAP = 10, + NET_CLIENT_OPTIONS_KIND_VHOST_USER = 11, + NET_CLIENT_OPTIONS_KIND_MAX = 12, +} NetClientOptionsKind; + +extern const char *const NetClientOptionsKind_lookup[]; + +typedef struct NetClientOptionsList NetClientOptionsList; + +typedef struct NetLegacy NetLegacy; + +typedef struct NetLegacyList NetLegacyList; + +typedef struct NetLegacyNicOptions NetLegacyNicOptions; + +typedef struct NetLegacyNicOptionsList NetLegacyNicOptionsList; + +typedef struct Netdev Netdev; + +typedef struct NetdevBridgeOptions NetdevBridgeOptions; + +typedef struct NetdevBridgeOptionsList NetdevBridgeOptionsList; + +typedef struct NetdevDumpOptions NetdevDumpOptions; + +typedef struct NetdevDumpOptionsList NetdevDumpOptionsList; + +typedef struct NetdevHubPortOptions NetdevHubPortOptions; + +typedef struct NetdevHubPortOptionsList NetdevHubPortOptionsList; + +typedef struct NetdevL2TPv3Options NetdevL2TPv3Options; + +typedef struct NetdevL2TPv3OptionsList NetdevL2TPv3OptionsList; + +typedef struct NetdevList NetdevList; + +typedef struct NetdevNetmapOptions NetdevNetmapOptions; + +typedef struct NetdevNetmapOptionsList NetdevNetmapOptionsList; + +typedef struct NetdevNoneOptions NetdevNoneOptions; + +typedef struct NetdevNoneOptionsList NetdevNoneOptionsList; + +typedef struct NetdevSocketOptions NetdevSocketOptions; + +typedef struct NetdevSocketOptionsList NetdevSocketOptionsList; + +typedef struct NetdevTapOptions NetdevTapOptions; + +typedef struct NetdevTapOptionsList NetdevTapOptionsList; + +typedef struct NetdevUserOptions NetdevUserOptions; + +typedef struct NetdevUserOptionsList NetdevUserOptionsList; + +typedef struct NetdevVdeOptions NetdevVdeOptions; + +typedef struct NetdevVdeOptionsList NetdevVdeOptionsList; + +typedef struct NetdevVhostUserOptions NetdevVhostUserOptions; + +typedef struct NetdevVhostUserOptionsList NetdevVhostUserOptionsList; + +typedef enum NetworkAddressFamily { + NETWORK_ADDRESS_FAMILY_IPV4 = 0, + NETWORK_ADDRESS_FAMILY_IPV6 = 1, + NETWORK_ADDRESS_FAMILY_UNIX = 2, + NETWORK_ADDRESS_FAMILY_UNKNOWN = 3, + NETWORK_ADDRESS_FAMILY_MAX = 4, +} NetworkAddressFamily; + +extern const char *const NetworkAddressFamily_lookup[]; + +typedef struct NetworkAddressFamilyList NetworkAddressFamilyList; + +typedef enum NewImageMode { + NEW_IMAGE_MODE_EXISTING = 0, + NEW_IMAGE_MODE_ABSOLUTE_PATHS = 1, + NEW_IMAGE_MODE_MAX = 2, +} NewImageMode; + +extern const char *const NewImageMode_lookup[]; + +typedef struct NewImageModeList NewImageModeList; + +typedef struct NumaNodeOptions NumaNodeOptions; + +typedef struct NumaNodeOptionsList NumaNodeOptionsList; + +typedef struct NumaOptions NumaOptions; + +typedef enum NumaOptionsKind { + NUMA_OPTIONS_KIND_NODE = 0, + NUMA_OPTIONS_KIND_MAX = 1, +} NumaOptionsKind; + +extern const char *const NumaOptionsKind_lookup[]; + +typedef struct NumaOptionsList NumaOptionsList; + +typedef struct ObjectPropertyInfo ObjectPropertyInfo; + +typedef struct ObjectPropertyInfoList ObjectPropertyInfoList; + +typedef struct ObjectTypeInfo ObjectTypeInfo; + +typedef struct ObjectTypeInfoList ObjectTypeInfoList; + +typedef enum OnOffAuto { + ON_OFF_AUTO_AUTO = 0, + ON_OFF_AUTO_ON = 1, + ON_OFF_AUTO_OFF = 2, + ON_OFF_AUTO_MAX = 3, +} OnOffAuto; + +extern const char *const OnOffAuto_lookup[]; + +typedef struct OnOffAutoList OnOffAutoList; + +typedef struct PCDIMMDeviceInfo PCDIMMDeviceInfo; + +typedef struct PCDIMMDeviceInfoList PCDIMMDeviceInfoList; + +typedef struct PciBridgeInfo PciBridgeInfo; + +typedef struct PciBridgeInfoList PciBridgeInfoList; + +typedef struct PciBusInfo PciBusInfo; + +typedef struct PciBusInfoList PciBusInfoList; + +typedef struct PciDeviceClass PciDeviceClass; + +typedef struct PciDeviceClassList PciDeviceClassList; + +typedef struct PciDeviceId PciDeviceId; + +typedef struct PciDeviceIdList PciDeviceIdList; + +typedef struct PciDeviceInfo PciDeviceInfo; + +typedef struct PciDeviceInfoList PciDeviceInfoList; + +typedef struct PciInfo PciInfo; + +typedef struct PciInfoList PciInfoList; + +typedef struct PciMemoryRange PciMemoryRange; + +typedef struct PciMemoryRangeList PciMemoryRangeList; + +typedef struct PciMemoryRegion PciMemoryRegion; + +typedef struct PciMemoryRegionList PciMemoryRegionList; + +typedef enum PreallocMode { + PREALLOC_MODE_OFF = 0, + PREALLOC_MODE_METADATA = 1, + PREALLOC_MODE_FALLOC = 2, + PREALLOC_MODE_FULL = 3, + PREALLOC_MODE_MAX = 4, +} PreallocMode; + +extern const char *const PreallocMode_lookup[]; + +typedef struct PreallocModeList PreallocModeList; + +typedef enum QCryptoTLSCredsEndpoint { + QCRYPTO_TLS_CREDS_ENDPOINT_CLIENT = 0, + QCRYPTO_TLS_CREDS_ENDPOINT_SERVER = 1, + QCRYPTO_TLS_CREDS_ENDPOINT_MAX = 2, +} QCryptoTLSCredsEndpoint; + +extern const char *const QCryptoTLSCredsEndpoint_lookup[]; + +typedef struct QCryptoTLSCredsEndpointList QCryptoTLSCredsEndpointList; + +typedef enum QKeyCode { + Q_KEY_CODE_UNMAPPED = 0, + Q_KEY_CODE_SHIFT = 1, + Q_KEY_CODE_SHIFT_R = 2, + Q_KEY_CODE_ALT = 3, + Q_KEY_CODE_ALT_R = 4, + Q_KEY_CODE_ALTGR = 5, + Q_KEY_CODE_ALTGR_R = 6, + Q_KEY_CODE_CTRL = 7, + Q_KEY_CODE_CTRL_R = 8, + Q_KEY_CODE_MENU = 9, + Q_KEY_CODE_ESC = 10, + Q_KEY_CODE_1 = 11, + Q_KEY_CODE_2 = 12, + Q_KEY_CODE_3 = 13, + Q_KEY_CODE_4 = 14, + Q_KEY_CODE_5 = 15, + Q_KEY_CODE_6 = 16, + Q_KEY_CODE_7 = 17, + Q_KEY_CODE_8 = 18, + Q_KEY_CODE_9 = 19, + Q_KEY_CODE_0 = 20, + Q_KEY_CODE_MINUS = 21, + Q_KEY_CODE_EQUAL = 22, + Q_KEY_CODE_BACKSPACE = 23, + Q_KEY_CODE_TAB = 24, + Q_KEY_CODE_Q = 25, + Q_KEY_CODE_W = 26, + Q_KEY_CODE_E = 27, + Q_KEY_CODE_R = 28, + Q_KEY_CODE_T = 29, + Q_KEY_CODE_Y = 30, + Q_KEY_CODE_U = 31, + Q_KEY_CODE_I = 32, + Q_KEY_CODE_O = 33, + Q_KEY_CODE_P = 34, + Q_KEY_CODE_BRACKET_LEFT = 35, + Q_KEY_CODE_BRACKET_RIGHT = 36, + Q_KEY_CODE_RET = 37, + Q_KEY_CODE_A = 38, + Q_KEY_CODE_S = 39, + Q_KEY_CODE_D = 40, + Q_KEY_CODE_F = 41, + Q_KEY_CODE_G = 42, + Q_KEY_CODE_H = 43, + Q_KEY_CODE_J = 44, + Q_KEY_CODE_K = 45, + Q_KEY_CODE_L = 46, + Q_KEY_CODE_SEMICOLON = 47, + Q_KEY_CODE_APOSTROPHE = 48, + Q_KEY_CODE_GRAVE_ACCENT = 49, + Q_KEY_CODE_BACKSLASH = 50, + Q_KEY_CODE_Z = 51, + Q_KEY_CODE_X = 52, + Q_KEY_CODE_C = 53, + Q_KEY_CODE_V = 54, + Q_KEY_CODE_B = 55, + Q_KEY_CODE_N = 56, + Q_KEY_CODE_M = 57, + Q_KEY_CODE_COMMA = 58, + Q_KEY_CODE_DOT = 59, + Q_KEY_CODE_SLASH = 60, + Q_KEY_CODE_ASTERISK = 61, + Q_KEY_CODE_SPC = 62, + Q_KEY_CODE_CAPS_LOCK = 63, + Q_KEY_CODE_F1 = 64, + Q_KEY_CODE_F2 = 65, + Q_KEY_CODE_F3 = 66, + Q_KEY_CODE_F4 = 67, + Q_KEY_CODE_F5 = 68, + Q_KEY_CODE_F6 = 69, + Q_KEY_CODE_F7 = 70, + Q_KEY_CODE_F8 = 71, + Q_KEY_CODE_F9 = 72, + Q_KEY_CODE_F10 = 73, + Q_KEY_CODE_NUM_LOCK = 74, + Q_KEY_CODE_SCROLL_LOCK = 75, + Q_KEY_CODE_KP_DIVIDE = 76, + Q_KEY_CODE_KP_MULTIPLY = 77, + Q_KEY_CODE_KP_SUBTRACT = 78, + Q_KEY_CODE_KP_ADD = 79, + Q_KEY_CODE_KP_ENTER = 80, + Q_KEY_CODE_KP_DECIMAL = 81, + Q_KEY_CODE_SYSRQ = 82, + Q_KEY_CODE_KP_0 = 83, + Q_KEY_CODE_KP_1 = 84, + Q_KEY_CODE_KP_2 = 85, + Q_KEY_CODE_KP_3 = 86, + Q_KEY_CODE_KP_4 = 87, + Q_KEY_CODE_KP_5 = 88, + Q_KEY_CODE_KP_6 = 89, + Q_KEY_CODE_KP_7 = 90, + Q_KEY_CODE_KP_8 = 91, + Q_KEY_CODE_KP_9 = 92, + Q_KEY_CODE_LESS = 93, + Q_KEY_CODE_F11 = 94, + Q_KEY_CODE_F12 = 95, + Q_KEY_CODE_PRINT = 96, + Q_KEY_CODE_HOME = 97, + Q_KEY_CODE_PGUP = 98, + Q_KEY_CODE_PGDN = 99, + Q_KEY_CODE_END = 100, + Q_KEY_CODE_LEFT = 101, + Q_KEY_CODE_UP = 102, + Q_KEY_CODE_DOWN = 103, + Q_KEY_CODE_RIGHT = 104, + Q_KEY_CODE_INSERT = 105, + Q_KEY_CODE_DELETE = 106, + Q_KEY_CODE_STOP = 107, + Q_KEY_CODE_AGAIN = 108, + Q_KEY_CODE_PROPS = 109, + Q_KEY_CODE_UNDO = 110, + Q_KEY_CODE_FRONT = 111, + Q_KEY_CODE_COPY = 112, + Q_KEY_CODE_OPEN = 113, + Q_KEY_CODE_PASTE = 114, + Q_KEY_CODE_FIND = 115, + Q_KEY_CODE_CUT = 116, + Q_KEY_CODE_LF = 117, + Q_KEY_CODE_HELP = 118, + Q_KEY_CODE_META_L = 119, + Q_KEY_CODE_META_R = 120, + Q_KEY_CODE_COMPOSE = 121, + Q_KEY_CODE_PAUSE = 122, + Q_KEY_CODE_RO = 123, + Q_KEY_CODE_KP_COMMA = 124, + Q_KEY_CODE_MAX = 125, +} QKeyCode; + +extern const char *const QKeyCode_lookup[]; + +typedef struct QKeyCodeList QKeyCodeList; + +typedef struct Qcow2OverlapCheckFlags Qcow2OverlapCheckFlags; + +typedef struct Qcow2OverlapCheckFlagsList Qcow2OverlapCheckFlagsList; + +typedef enum Qcow2OverlapCheckMode { + QCOW2_OVERLAP_CHECK_MODE_NONE = 0, + QCOW2_OVERLAP_CHECK_MODE_CONSTANT = 1, + QCOW2_OVERLAP_CHECK_MODE_CACHED = 2, + QCOW2_OVERLAP_CHECK_MODE_ALL = 3, + QCOW2_OVERLAP_CHECK_MODE_MAX = 4, +} Qcow2OverlapCheckMode; + +extern const char *const Qcow2OverlapCheckMode_lookup[]; + +typedef struct Qcow2OverlapCheckModeList Qcow2OverlapCheckModeList; + +typedef struct Qcow2OverlapChecks Qcow2OverlapChecks; + +typedef enum Qcow2OverlapChecksKind { + QCOW2_OVERLAP_CHECKS_KIND_FLAGS = 0, + QCOW2_OVERLAP_CHECKS_KIND_MODE = 1, + QCOW2_OVERLAP_CHECKS_KIND_MAX = 2, +} Qcow2OverlapChecksKind; + +extern const char *const Qcow2OverlapChecksKind_lookup[]; + +typedef struct Qcow2OverlapChecksList Qcow2OverlapChecksList; + +typedef enum QuorumReadPattern { + QUORUM_READ_PATTERN_QUORUM = 0, + QUORUM_READ_PATTERN_FIFO = 1, + QUORUM_READ_PATTERN_MAX = 2, +} QuorumReadPattern; + +extern const char *const QuorumReadPattern_lookup[]; + +typedef struct QuorumReadPatternList QuorumReadPatternList; + +typedef struct RockerOfDpaFlow RockerOfDpaFlow; + +typedef struct RockerOfDpaFlowAction RockerOfDpaFlowAction; + +typedef struct RockerOfDpaFlowActionList RockerOfDpaFlowActionList; + +typedef struct RockerOfDpaFlowKey RockerOfDpaFlowKey; + +typedef struct RockerOfDpaFlowKeyList RockerOfDpaFlowKeyList; + +typedef struct RockerOfDpaFlowList RockerOfDpaFlowList; + +typedef struct RockerOfDpaFlowMask RockerOfDpaFlowMask; + +typedef struct RockerOfDpaFlowMaskList RockerOfDpaFlowMaskList; + +typedef struct RockerOfDpaGroup RockerOfDpaGroup; + +typedef struct RockerOfDpaGroupList RockerOfDpaGroupList; + +typedef struct RockerPort RockerPort; + +typedef enum RockerPortAutoneg { + ROCKER_PORT_AUTONEG_OFF = 0, + ROCKER_PORT_AUTONEG_ON = 1, + ROCKER_PORT_AUTONEG_MAX = 2, +} RockerPortAutoneg; + +extern const char *const RockerPortAutoneg_lookup[]; + +typedef struct RockerPortAutonegList RockerPortAutonegList; + +typedef enum RockerPortDuplex { + ROCKER_PORT_DUPLEX_HALF = 0, + ROCKER_PORT_DUPLEX_FULL = 1, + ROCKER_PORT_DUPLEX_MAX = 2, +} RockerPortDuplex; + +extern const char *const RockerPortDuplex_lookup[]; + +typedef struct RockerPortDuplexList RockerPortDuplexList; + +typedef struct RockerPortList RockerPortList; + +typedef struct RockerSwitch RockerSwitch; + +typedef struct RockerSwitchList RockerSwitchList; + +typedef enum RunState { + RUN_STATE_DEBUG = 0, + RUN_STATE_INMIGRATE = 1, + RUN_STATE_INTERNAL_ERROR = 2, + RUN_STATE_IO_ERROR = 3, + RUN_STATE_PAUSED = 4, + RUN_STATE_POSTMIGRATE = 5, + RUN_STATE_PRELAUNCH = 6, + RUN_STATE_FINISH_MIGRATE = 7, + RUN_STATE_RESTORE_VM = 8, + RUN_STATE_RUNNING = 9, + RUN_STATE_SAVE_VM = 10, + RUN_STATE_SHUTDOWN = 11, + RUN_STATE_SUSPENDED = 12, + RUN_STATE_WATCHDOG = 13, + RUN_STATE_GUEST_PANICKED = 14, + RUN_STATE_MAX = 15, +} RunState; + +extern const char *const RunState_lookup[]; + +typedef struct RunStateList RunStateList; + +typedef struct RxFilterInfo RxFilterInfo; + +typedef struct RxFilterInfoList RxFilterInfoList; + +typedef enum RxState { + RX_STATE_NORMAL = 0, + RX_STATE_NONE = 1, + RX_STATE_ALL = 2, + RX_STATE_MAX = 3, +} RxState; + +extern const char *const RxState_lookup[]; + +typedef struct RxStateList RxStateList; + +typedef struct SchemaInfo SchemaInfo; + +typedef struct SchemaInfoAlternate SchemaInfoAlternate; + +typedef struct SchemaInfoAlternateList SchemaInfoAlternateList; + +typedef struct SchemaInfoAlternateMember SchemaInfoAlternateMember; + +typedef struct SchemaInfoAlternateMemberList SchemaInfoAlternateMemberList; + +typedef struct SchemaInfoArray SchemaInfoArray; + +typedef struct SchemaInfoArrayList SchemaInfoArrayList; + +typedef struct SchemaInfoBase SchemaInfoBase; + +typedef struct SchemaInfoBaseList SchemaInfoBaseList; + +typedef struct SchemaInfoBuiltin SchemaInfoBuiltin; + +typedef struct SchemaInfoBuiltinList SchemaInfoBuiltinList; + +typedef struct SchemaInfoCommand SchemaInfoCommand; + +typedef struct SchemaInfoCommandList SchemaInfoCommandList; + +typedef struct SchemaInfoEnum SchemaInfoEnum; + +typedef struct SchemaInfoEnumList SchemaInfoEnumList; + +typedef struct SchemaInfoEvent SchemaInfoEvent; + +typedef struct SchemaInfoEventList SchemaInfoEventList; + +typedef struct SchemaInfoList SchemaInfoList; + +typedef struct SchemaInfoObject SchemaInfoObject; + +typedef struct SchemaInfoObjectList SchemaInfoObjectList; + +typedef struct SchemaInfoObjectMember SchemaInfoObjectMember; + +typedef struct SchemaInfoObjectMemberList SchemaInfoObjectMemberList; + +typedef struct SchemaInfoObjectVariant SchemaInfoObjectVariant; + +typedef struct SchemaInfoObjectVariantList SchemaInfoObjectVariantList; + +typedef enum SchemaMetaType { + SCHEMA_META_TYPE_BUILTIN = 0, + SCHEMA_META_TYPE_ENUM = 1, + SCHEMA_META_TYPE_ARRAY = 2, + SCHEMA_META_TYPE_OBJECT = 3, + SCHEMA_META_TYPE_ALTERNATE = 4, + SCHEMA_META_TYPE_COMMAND = 5, + SCHEMA_META_TYPE_EVENT = 6, + SCHEMA_META_TYPE_MAX = 7, +} SchemaMetaType; + +extern const char *const SchemaMetaType_lookup[]; + +typedef struct SchemaMetaTypeList SchemaMetaTypeList; + +typedef struct SnapshotInfo SnapshotInfo; + +typedef struct SnapshotInfoList SnapshotInfoList; + +typedef struct SocketAddress SocketAddress; + +typedef enum SocketAddressKind { + SOCKET_ADDRESS_KIND_INET = 0, + SOCKET_ADDRESS_KIND_UNIX = 1, + SOCKET_ADDRESS_KIND_FD = 2, + SOCKET_ADDRESS_KIND_MAX = 3, +} SocketAddressKind; + +extern const char *const SocketAddressKind_lookup[]; + +typedef struct SocketAddressList SocketAddressList; + +typedef struct SpiceBasicInfo SpiceBasicInfo; + +typedef struct SpiceBasicInfoList SpiceBasicInfoList; + +typedef struct SpiceChannel SpiceChannel; + +typedef struct SpiceChannelList SpiceChannelList; + +typedef struct SpiceInfo SpiceInfo; + +typedef struct SpiceInfoList SpiceInfoList; + +typedef enum SpiceQueryMouseMode { + SPICE_QUERY_MOUSE_MODE_CLIENT = 0, + SPICE_QUERY_MOUSE_MODE_SERVER = 1, + SPICE_QUERY_MOUSE_MODE_UNKNOWN = 2, + SPICE_QUERY_MOUSE_MODE_MAX = 3, +} SpiceQueryMouseMode; + +extern const char *const SpiceQueryMouseMode_lookup[]; + +typedef struct SpiceQueryMouseModeList SpiceQueryMouseModeList; + +typedef struct SpiceServerInfo SpiceServerInfo; + +typedef struct SpiceServerInfoList SpiceServerInfoList; + +typedef struct StatusInfo StatusInfo; + +typedef struct StatusInfoList StatusInfoList; + +typedef struct String String; + +typedef struct StringList StringList; + +typedef struct TPMInfo TPMInfo; + +typedef struct TPMInfoList TPMInfoList; + +typedef struct TPMPassthroughOptions TPMPassthroughOptions; + +typedef struct TPMPassthroughOptionsList TPMPassthroughOptionsList; + +typedef struct TargetInfo TargetInfo; + +typedef struct TargetInfoList TargetInfoList; + +typedef enum TpmModel { + TPM_MODEL_TPM_TIS = 0, + TPM_MODEL_MAX = 1, +} TpmModel; + +extern const char *const TpmModel_lookup[]; + +typedef struct TpmModelList TpmModelList; + +typedef enum TpmType { + TPM_TYPE_PASSTHROUGH = 0, + TPM_TYPE_MAX = 1, +} TpmType; + +extern const char *const TpmType_lookup[]; + +typedef struct TpmTypeList TpmTypeList; + +typedef struct TpmTypeOptions TpmTypeOptions; + +typedef enum TpmTypeOptionsKind { + TPM_TYPE_OPTIONS_KIND_PASSTHROUGH = 0, + TPM_TYPE_OPTIONS_KIND_MAX = 1, +} TpmTypeOptionsKind; + +extern const char *const TpmTypeOptionsKind_lookup[]; + +typedef struct TpmTypeOptionsList TpmTypeOptionsList; + +typedef struct TraceEventInfo TraceEventInfo; + +typedef struct TraceEventInfoList TraceEventInfoList; + +typedef enum TraceEventState { + TRACE_EVENT_STATE_UNAVAILABLE = 0, + TRACE_EVENT_STATE_DISABLED = 1, + TRACE_EVENT_STATE_ENABLED = 2, + TRACE_EVENT_STATE_MAX = 3, +} TraceEventState; + +extern const char *const TraceEventState_lookup[]; + +typedef struct TraceEventStateList TraceEventStateList; + +typedef struct TransactionAction TransactionAction; + +typedef enum TransactionActionKind { + TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC = 0, + TRANSACTION_ACTION_KIND_DRIVE_BACKUP = 1, + TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP = 2, + TRANSACTION_ACTION_KIND_ABORT = 3, + TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC = 4, + TRANSACTION_ACTION_KIND_MAX = 5, +} TransactionActionKind; + +extern const char *const TransactionActionKind_lookup[]; + +typedef struct TransactionActionList TransactionActionList; + +typedef struct UnixSocketAddress UnixSocketAddress; + +typedef struct UnixSocketAddressList UnixSocketAddressList; + +typedef struct UuidInfo UuidInfo; + +typedef struct UuidInfoList UuidInfoList; + +typedef struct VersionInfo VersionInfo; + +typedef struct VersionInfoList VersionInfoList; + +typedef struct VersionTriple VersionTriple; + +typedef struct VersionTripleList VersionTripleList; + +typedef struct VncBasicInfo VncBasicInfo; + +typedef struct VncBasicInfoList VncBasicInfoList; + +typedef struct VncClientInfo VncClientInfo; + +typedef struct VncClientInfoList VncClientInfoList; + +typedef struct VncInfo VncInfo; + +typedef struct VncInfo2 VncInfo2; + +typedef struct VncInfo2List VncInfo2List; + +typedef struct VncInfoList VncInfoList; + +typedef enum VncPrimaryAuth { + VNC_PRIMARY_AUTH_NONE = 0, + VNC_PRIMARY_AUTH_VNC = 1, + VNC_PRIMARY_AUTH_RA2 = 2, + VNC_PRIMARY_AUTH_RA2NE = 3, + VNC_PRIMARY_AUTH_TIGHT = 4, + VNC_PRIMARY_AUTH_ULTRA = 5, + VNC_PRIMARY_AUTH_TLS = 6, + VNC_PRIMARY_AUTH_VENCRYPT = 7, + VNC_PRIMARY_AUTH_SASL = 8, + VNC_PRIMARY_AUTH_MAX = 9, +} VncPrimaryAuth; + +extern const char *const VncPrimaryAuth_lookup[]; + +typedef struct VncPrimaryAuthList VncPrimaryAuthList; + +typedef struct VncServerInfo VncServerInfo; + +typedef struct VncServerInfoList VncServerInfoList; + +typedef enum VncVencryptSubAuth { + VNC_VENCRYPT_SUB_AUTH_PLAIN = 0, + VNC_VENCRYPT_SUB_AUTH_TLS_NONE = 1, + VNC_VENCRYPT_SUB_AUTH_X509_NONE = 2, + VNC_VENCRYPT_SUB_AUTH_TLS_VNC = 3, + VNC_VENCRYPT_SUB_AUTH_X509_VNC = 4, + VNC_VENCRYPT_SUB_AUTH_TLS_PLAIN = 5, + VNC_VENCRYPT_SUB_AUTH_X509_PLAIN = 6, + VNC_VENCRYPT_SUB_AUTH_TLS_SASL = 7, + VNC_VENCRYPT_SUB_AUTH_X509_SASL = 8, + VNC_VENCRYPT_SUB_AUTH_MAX = 9, +} VncVencryptSubAuth; + +extern const char *const VncVencryptSubAuth_lookup[]; + +typedef struct VncVencryptSubAuthList VncVencryptSubAuthList; + +typedef enum WatchdogExpirationAction { + WATCHDOG_EXPIRATION_ACTION_RESET = 0, + WATCHDOG_EXPIRATION_ACTION_SHUTDOWN = 1, + WATCHDOG_EXPIRATION_ACTION_POWEROFF = 2, + WATCHDOG_EXPIRATION_ACTION_PAUSE = 3, + WATCHDOG_EXPIRATION_ACTION_DEBUG = 4, + WATCHDOG_EXPIRATION_ACTION_NONE = 5, + WATCHDOG_EXPIRATION_ACTION_INJECT_NMI = 6, + WATCHDOG_EXPIRATION_ACTION_MAX = 7, +} WatchdogExpirationAction; + +extern const char *const WatchdogExpirationAction_lookup[]; + +typedef struct WatchdogExpirationActionList WatchdogExpirationActionList; + +typedef struct X86CPUFeatureWordInfo X86CPUFeatureWordInfo; + +typedef struct X86CPUFeatureWordInfoList X86CPUFeatureWordInfoList; + +typedef enum X86CPURegister32 { + X86_CPU_REGISTER32_EAX = 0, + X86_CPU_REGISTER32_EBX = 1, + X86_CPU_REGISTER32_ECX = 2, + X86_CPU_REGISTER32_EDX = 3, + X86_CPU_REGISTER32_ESP = 4, + X86_CPU_REGISTER32_EBP = 5, + X86_CPU_REGISTER32_ESI = 6, + X86_CPU_REGISTER32_EDI = 7, + X86_CPU_REGISTER32_MAX = 8, +} X86CPURegister32; + +extern const char *const X86CPURegister32_lookup[]; + +typedef struct X86CPURegister32List X86CPURegister32List; + +typedef struct XBZRLECacheStats XBZRLECacheStats; + +typedef struct XBZRLECacheStatsList XBZRLECacheStatsList; + +struct ACPIOSTInfo { + bool has_device; + char *device; + char *slot; + ACPISlotType slot_type; + int64_t source; + int64_t status; +}; + +void qapi_free_ACPIOSTInfo(ACPIOSTInfo *obj); + +struct ACPIOSTInfoList { + union { + ACPIOSTInfo *value; + uint64_t padding; + }; + ACPIOSTInfoList *next; +}; + +void qapi_free_ACPIOSTInfoList(ACPIOSTInfoList *obj); + +struct ACPISlotTypeList { + union { + ACPISlotType value; + uint64_t padding; + }; + ACPISlotTypeList *next; +}; + +void qapi_free_ACPISlotTypeList(ACPISlotTypeList *obj); + +struct Abort { + char qapi_dummy_field_for_empty_struct; +}; + +void qapi_free_Abort(Abort *obj); + +struct AbortList { + union { + Abort *value; + uint64_t padding; + }; + AbortList *next; +}; + +void qapi_free_AbortList(AbortList *obj); + +struct AcpiTableOptions { + bool has_sig; + char *sig; + bool has_rev; + uint8_t rev; + bool has_oem_id; + char *oem_id; + bool has_oem_table_id; + char *oem_table_id; + bool has_oem_rev; + uint32_t oem_rev; + bool has_asl_compiler_id; + char *asl_compiler_id; + bool has_asl_compiler_rev; + uint32_t asl_compiler_rev; + bool has_file; + char *file; + bool has_data; + char *data; +}; + +void qapi_free_AcpiTableOptions(AcpiTableOptions *obj); + +struct AcpiTableOptionsList { + union { + AcpiTableOptions *value; + uint64_t padding; + }; + AcpiTableOptionsList *next; +}; + +void qapi_free_AcpiTableOptionsList(AcpiTableOptionsList *obj); + +struct AddfdInfo { + int64_t fdset_id; + int64_t fd; +}; + +void qapi_free_AddfdInfo(AddfdInfo *obj); + +struct AddfdInfoList { + union { + AddfdInfo *value; + uint64_t padding; + }; + AddfdInfoList *next; +}; + +void qapi_free_AddfdInfoList(AddfdInfoList *obj); + +struct BalloonInfo { + int64_t actual; +}; + +void qapi_free_BalloonInfo(BalloonInfo *obj); + +struct BalloonInfoList { + union { + BalloonInfo *value; + uint64_t padding; + }; + BalloonInfoList *next; +}; + +void qapi_free_BalloonInfoList(BalloonInfoList *obj); + +struct BiosAtaTranslationList { + union { + BiosAtaTranslation value; + uint64_t padding; + }; + BiosAtaTranslationList *next; +}; + +void qapi_free_BiosAtaTranslationList(BiosAtaTranslationList *obj); + +struct BlkdebugEventList { + union { + BlkdebugEvent value; + uint64_t padding; + }; + BlkdebugEventList *next; +}; + +void qapi_free_BlkdebugEventList(BlkdebugEventList *obj); + +struct BlkdebugInjectErrorOptions { + BlkdebugEvent event; + bool has_state; + int64_t state; + bool has_q_errno; + int64_t q_errno; + bool has_sector; + int64_t sector; + bool has_once; + bool once; + bool has_immediately; + bool immediately; +}; + +void qapi_free_BlkdebugInjectErrorOptions(BlkdebugInjectErrorOptions *obj); + +struct BlkdebugInjectErrorOptionsList { + union { + BlkdebugInjectErrorOptions *value; + uint64_t padding; + }; + BlkdebugInjectErrorOptionsList *next; +}; + +void qapi_free_BlkdebugInjectErrorOptionsList(BlkdebugInjectErrorOptionsList *obj); + +struct BlkdebugSetStateOptions { + BlkdebugEvent event; + bool has_state; + int64_t state; + int64_t new_state; +}; + +void qapi_free_BlkdebugSetStateOptions(BlkdebugSetStateOptions *obj); + +struct BlkdebugSetStateOptionsList { + union { + BlkdebugSetStateOptions *value; + uint64_t padding; + }; + BlkdebugSetStateOptionsList *next; +}; + +void qapi_free_BlkdebugSetStateOptionsList(BlkdebugSetStateOptionsList *obj); + +struct BlockDeviceInfo { + char *file; + bool has_node_name; + char *node_name; + bool ro; + char *drv; + bool has_backing_file; + char *backing_file; + int64_t backing_file_depth; + bool encrypted; + bool encryption_key_missing; + BlockdevDetectZeroesOptions detect_zeroes; + int64_t bps; + int64_t bps_rd; + int64_t bps_wr; + int64_t iops; + int64_t iops_rd; + int64_t iops_wr; + ImageInfo *image; + bool has_bps_max; + int64_t bps_max; + bool has_bps_rd_max; + int64_t bps_rd_max; + bool has_bps_wr_max; + int64_t bps_wr_max; + bool has_iops_max; + int64_t iops_max; + bool has_iops_rd_max; + int64_t iops_rd_max; + bool has_iops_wr_max; + int64_t iops_wr_max; + bool has_iops_size; + int64_t iops_size; + bool has_group; + char *group; + BlockdevCacheInfo *cache; + int64_t write_threshold; +}; + +void qapi_free_BlockDeviceInfo(BlockDeviceInfo *obj); + +struct BlockDeviceInfoList { + union { + BlockDeviceInfo *value; + uint64_t padding; + }; + BlockDeviceInfoList *next; +}; + +void qapi_free_BlockDeviceInfoList(BlockDeviceInfoList *obj); + +struct BlockDeviceIoStatusList { + union { + BlockDeviceIoStatus value; + uint64_t padding; + }; + BlockDeviceIoStatusList *next; +}; + +void qapi_free_BlockDeviceIoStatusList(BlockDeviceIoStatusList *obj); + +struct BlockDeviceMapEntry { + int64_t start; + int64_t length; + int64_t depth; + bool zero; + bool data; + bool has_offset; + int64_t offset; +}; + +void qapi_free_BlockDeviceMapEntry(BlockDeviceMapEntry *obj); + +struct BlockDeviceMapEntryList { + union { + BlockDeviceMapEntry *value; + uint64_t padding; + }; + BlockDeviceMapEntryList *next; +}; + +void qapi_free_BlockDeviceMapEntryList(BlockDeviceMapEntryList *obj); + +struct BlockDeviceStats { + int64_t rd_bytes; + int64_t wr_bytes; + int64_t rd_operations; + int64_t wr_operations; + int64_t flush_operations; + int64_t flush_total_time_ns; + int64_t wr_total_time_ns; + int64_t rd_total_time_ns; + int64_t wr_highest_offset; + int64_t rd_merged; + int64_t wr_merged; +}; + +void qapi_free_BlockDeviceStats(BlockDeviceStats *obj); + +struct BlockDeviceStatsList { + union { + BlockDeviceStats *value; + uint64_t padding; + }; + BlockDeviceStatsList *next; +}; + +void qapi_free_BlockDeviceStatsList(BlockDeviceStatsList *obj); + +struct BlockDirtyBitmap { + char *node; + char *name; +}; + +void qapi_free_BlockDirtyBitmap(BlockDirtyBitmap *obj); + +struct BlockDirtyBitmapAdd { + char *node; + char *name; + bool has_granularity; + uint32_t granularity; +}; + +void qapi_free_BlockDirtyBitmapAdd(BlockDirtyBitmapAdd *obj); + +struct BlockDirtyBitmapAddList { + union { + BlockDirtyBitmapAdd *value; + uint64_t padding; + }; + BlockDirtyBitmapAddList *next; +}; + +void qapi_free_BlockDirtyBitmapAddList(BlockDirtyBitmapAddList *obj); + +struct BlockDirtyBitmapList { + union { + BlockDirtyBitmap *value; + uint64_t padding; + }; + BlockDirtyBitmapList *next; +}; + +void qapi_free_BlockDirtyBitmapList(BlockDirtyBitmapList *obj); + +struct BlockDirtyInfo { + bool has_name; + char *name; + int64_t count; + uint32_t granularity; + DirtyBitmapStatus status; +}; + +void qapi_free_BlockDirtyInfo(BlockDirtyInfo *obj); + +struct BlockDirtyInfoList { + union { + BlockDirtyInfo *value; + uint64_t padding; + }; + BlockDirtyInfoList *next; +}; + +void qapi_free_BlockDirtyInfoList(BlockDirtyInfoList *obj); + +struct BlockErrorActionList { + union { + BlockErrorAction value; + uint64_t padding; + }; + BlockErrorActionList *next; +}; + +void qapi_free_BlockErrorActionList(BlockErrorActionList *obj); + +struct BlockInfo { + char *device; + char *type; + bool removable; + bool locked; + bool has_inserted; + BlockDeviceInfo *inserted; + bool has_tray_open; + bool tray_open; + bool has_io_status; + BlockDeviceIoStatus io_status; + bool has_dirty_bitmaps; + BlockDirtyInfoList *dirty_bitmaps; +}; + +void qapi_free_BlockInfo(BlockInfo *obj); + +struct BlockInfoList { + union { + BlockInfo *value; + uint64_t padding; + }; + BlockInfoList *next; +}; + +void qapi_free_BlockInfoList(BlockInfoList *obj); + +struct BlockJobInfo { + char *type; + char *device; + int64_t len; + int64_t offset; + bool busy; + bool paused; + int64_t speed; + BlockDeviceIoStatus io_status; + bool ready; +}; + +void qapi_free_BlockJobInfo(BlockJobInfo *obj); + +struct BlockJobInfoList { + union { + BlockJobInfo *value; + uint64_t padding; + }; + BlockJobInfoList *next; +}; + +void qapi_free_BlockJobInfoList(BlockJobInfoList *obj); + +struct BlockJobTypeList { + union { + BlockJobType value; + uint64_t padding; + }; + BlockJobTypeList *next; +}; + +void qapi_free_BlockJobTypeList(BlockJobTypeList *obj); + +struct BlockStats { + bool has_device; + char *device; + bool has_node_name; + char *node_name; + BlockDeviceStats *stats; + bool has_parent; + BlockStats *parent; + bool has_backing; + BlockStats *backing; +}; + +void qapi_free_BlockStats(BlockStats *obj); + +struct BlockStatsList { + union { + BlockStats *value; + uint64_t padding; + }; + BlockStatsList *next; +}; + +void qapi_free_BlockStatsList(BlockStatsList *obj); + +struct BlockdevAioOptionsList { + union { + BlockdevAioOptions value; + uint64_t padding; + }; + BlockdevAioOptionsList *next; +}; + +void qapi_free_BlockdevAioOptionsList(BlockdevAioOptionsList *obj); + +struct BlockdevBackup { + char *device; + char *target; + MirrorSyncMode sync; + bool has_speed; + int64_t speed; + bool has_on_source_error; + BlockdevOnError on_source_error; + bool has_on_target_error; + BlockdevOnError on_target_error; +}; + +void qapi_free_BlockdevBackup(BlockdevBackup *obj); + +struct BlockdevBackupList { + union { + BlockdevBackup *value; + uint64_t padding; + }; + BlockdevBackupList *next; +}; + +void qapi_free_BlockdevBackupList(BlockdevBackupList *obj); + +struct BlockdevCacheInfo { + bool writeback; + bool direct; + bool no_flush; +}; + +void qapi_free_BlockdevCacheInfo(BlockdevCacheInfo *obj); + +struct BlockdevCacheInfoList { + union { + BlockdevCacheInfo *value; + uint64_t padding; + }; + BlockdevCacheInfoList *next; +}; + +void qapi_free_BlockdevCacheInfoList(BlockdevCacheInfoList *obj); + +struct BlockdevCacheOptions { + bool has_writeback; + bool writeback; + bool has_direct; + bool direct; + bool has_no_flush; + bool no_flush; +}; + +void qapi_free_BlockdevCacheOptions(BlockdevCacheOptions *obj); + +struct BlockdevCacheOptionsList { + union { + BlockdevCacheOptions *value; + uint64_t padding; + }; + BlockdevCacheOptionsList *next; +}; + +void qapi_free_BlockdevCacheOptionsList(BlockdevCacheOptionsList *obj); + +struct BlockdevDetectZeroesOptionsList { + union { + BlockdevDetectZeroesOptions value; + uint64_t padding; + }; + BlockdevDetectZeroesOptionsList *next; +}; + +void qapi_free_BlockdevDetectZeroesOptionsList(BlockdevDetectZeroesOptionsList *obj); + +struct BlockdevDiscardOptionsList { + union { + BlockdevDiscardOptions value; + uint64_t padding; + }; + BlockdevDiscardOptionsList *next; +}; + +void qapi_free_BlockdevDiscardOptionsList(BlockdevDiscardOptionsList *obj); + +struct BlockdevDriverList { + union { + BlockdevDriver value; + uint64_t padding; + }; + BlockdevDriverList *next; +}; + +void qapi_free_BlockdevDriverList(BlockdevDriverList *obj); + +struct BlockdevOnErrorList { + union { + BlockdevOnError value; + uint64_t padding; + }; + BlockdevOnErrorList *next; +}; + +void qapi_free_BlockdevOnErrorList(BlockdevOnErrorList *obj); + +struct BlockdevOptions { + /* Members inherited from BlockdevOptionsBase: */ + BlockdevDriver driver; + bool has_id; + char *id; + bool has_node_name; + char *node_name; + bool has_discard; + BlockdevDiscardOptions discard; + bool has_cache; + BlockdevCacheOptions *cache; + bool has_aio; + BlockdevAioOptions aio; + bool has_rerror; + BlockdevOnError rerror; + bool has_werror; + BlockdevOnError werror; + bool has_read_only; + bool read_only; + bool has_detect_zeroes; + BlockdevDetectZeroesOptions detect_zeroes; + /* Own members: */ + union { /* union tag is @driver */ + void *data; + BlockdevOptionsArchipelago *archipelago; + BlockdevOptionsBlkdebug *blkdebug; + BlockdevOptionsBlkverify *blkverify; + BlockdevOptionsGenericFormat *bochs; + BlockdevOptionsGenericFormat *cloop; + BlockdevOptionsGenericFormat *dmg; + BlockdevOptionsFile *file; + BlockdevOptionsFile *ftp; + BlockdevOptionsFile *ftps; + BlockdevOptionsFile *host_cdrom; + BlockdevOptionsFile *host_device; + BlockdevOptionsFile *host_floppy; + BlockdevOptionsFile *http; + BlockdevOptionsFile *https; + BlockdevOptionsNull *null_aio; + BlockdevOptionsNull *null_co; + BlockdevOptionsGenericFormat *parallels; + BlockdevOptionsQcow2 *qcow2; + BlockdevOptionsGenericCOWFormat *qcow; + BlockdevOptionsGenericCOWFormat *qed; + BlockdevOptionsQuorum *quorum; + BlockdevOptionsGenericFormat *raw; + BlockdevOptionsFile *tftp; + BlockdevOptionsGenericFormat *vdi; + BlockdevOptionsGenericFormat *vhdx; + BlockdevOptionsGenericCOWFormat *vmdk; + BlockdevOptionsGenericFormat *vpc; + BlockdevOptionsVVFAT *vvfat; + }; +}; + +void qapi_free_BlockdevOptions(BlockdevOptions *obj); + +struct BlockdevOptionsArchipelago { + char *volume; + bool has_mport; + int64_t mport; + bool has_vport; + int64_t vport; + bool has_segment; + char *segment; +}; + +void qapi_free_BlockdevOptionsArchipelago(BlockdevOptionsArchipelago *obj); + +struct BlockdevOptionsArchipelagoList { + union { + BlockdevOptionsArchipelago *value; + uint64_t padding; + }; + BlockdevOptionsArchipelagoList *next; +}; + +void qapi_free_BlockdevOptionsArchipelagoList(BlockdevOptionsArchipelagoList *obj); + +struct BlockdevOptionsBase { + BlockdevDriver driver; + bool has_id; + char *id; + bool has_node_name; + char *node_name; + bool has_discard; + BlockdevDiscardOptions discard; + bool has_cache; + BlockdevCacheOptions *cache; + bool has_aio; + BlockdevAioOptions aio; + bool has_rerror; + BlockdevOnError rerror; + bool has_werror; + BlockdevOnError werror; + bool has_read_only; + bool read_only; + bool has_detect_zeroes; + BlockdevDetectZeroesOptions detect_zeroes; +}; + +void qapi_free_BlockdevOptionsBase(BlockdevOptionsBase *obj); + +struct BlockdevOptionsBaseList { + union { + BlockdevOptionsBase *value; + uint64_t padding; + }; + BlockdevOptionsBaseList *next; +}; + +void qapi_free_BlockdevOptionsBaseList(BlockdevOptionsBaseList *obj); + +struct BlockdevOptionsBlkdebug { + BlockdevRef *image; + bool has_config; + char *config; + bool has_align; + int64_t align; + bool has_inject_error; + BlkdebugInjectErrorOptionsList *inject_error; + bool has_set_state; + BlkdebugSetStateOptionsList *set_state; +}; + +void qapi_free_BlockdevOptionsBlkdebug(BlockdevOptionsBlkdebug *obj); + +struct BlockdevOptionsBlkdebugList { + union { + BlockdevOptionsBlkdebug *value; + uint64_t padding; + }; + BlockdevOptionsBlkdebugList *next; +}; + +void qapi_free_BlockdevOptionsBlkdebugList(BlockdevOptionsBlkdebugList *obj); + +struct BlockdevOptionsBlkverify { + BlockdevRef *test; + BlockdevRef *raw; +}; + +void qapi_free_BlockdevOptionsBlkverify(BlockdevOptionsBlkverify *obj); + +struct BlockdevOptionsBlkverifyList { + union { + BlockdevOptionsBlkverify *value; + uint64_t padding; + }; + BlockdevOptionsBlkverifyList *next; +}; + +void qapi_free_BlockdevOptionsBlkverifyList(BlockdevOptionsBlkverifyList *obj); + +struct BlockdevOptionsFile { + char *filename; +}; + +void qapi_free_BlockdevOptionsFile(BlockdevOptionsFile *obj); + +struct BlockdevOptionsFileList { + union { + BlockdevOptionsFile *value; + uint64_t padding; + }; + BlockdevOptionsFileList *next; +}; + +void qapi_free_BlockdevOptionsFileList(BlockdevOptionsFileList *obj); + +struct BlockdevOptionsGenericCOWFormat { + BlockdevOptionsGenericFormat *base; + bool has_backing; + BlockdevRef *backing; +}; + +void qapi_free_BlockdevOptionsGenericCOWFormat(BlockdevOptionsGenericCOWFormat *obj); + +struct BlockdevOptionsGenericCOWFormatList { + union { + BlockdevOptionsGenericCOWFormat *value; + uint64_t padding; + }; + BlockdevOptionsGenericCOWFormatList *next; +}; + +void qapi_free_BlockdevOptionsGenericCOWFormatList(BlockdevOptionsGenericCOWFormatList *obj); + +struct BlockdevOptionsGenericFormat { + BlockdevRef *file; +}; + +void qapi_free_BlockdevOptionsGenericFormat(BlockdevOptionsGenericFormat *obj); + +struct BlockdevOptionsGenericFormatList { + union { + BlockdevOptionsGenericFormat *value; + uint64_t padding; + }; + BlockdevOptionsGenericFormatList *next; +}; + +void qapi_free_BlockdevOptionsGenericFormatList(BlockdevOptionsGenericFormatList *obj); + +struct BlockdevOptionsList { + union { + BlockdevOptions *value; + uint64_t padding; + }; + BlockdevOptionsList *next; +}; + +void qapi_free_BlockdevOptionsList(BlockdevOptionsList *obj); + +struct BlockdevOptionsNull { + bool has_size; + int64_t size; + bool has_latency_ns; + uint64_t latency_ns; +}; + +void qapi_free_BlockdevOptionsNull(BlockdevOptionsNull *obj); + +struct BlockdevOptionsNullList { + union { + BlockdevOptionsNull *value; + uint64_t padding; + }; + BlockdevOptionsNullList *next; +}; + +void qapi_free_BlockdevOptionsNullList(BlockdevOptionsNullList *obj); + +struct BlockdevOptionsQcow2 { + BlockdevOptionsGenericCOWFormat *base; + bool has_lazy_refcounts; + bool lazy_refcounts; + bool has_pass_discard_request; + bool pass_discard_request; + bool has_pass_discard_snapshot; + bool pass_discard_snapshot; + bool has_pass_discard_other; + bool pass_discard_other; + bool has_overlap_check; + Qcow2OverlapChecks *overlap_check; + bool has_cache_size; + int64_t cache_size; + bool has_l2_cache_size; + int64_t l2_cache_size; + bool has_refcount_cache_size; + int64_t refcount_cache_size; + bool has_cache_clean_interval; + int64_t cache_clean_interval; +}; + +void qapi_free_BlockdevOptionsQcow2(BlockdevOptionsQcow2 *obj); + +struct BlockdevOptionsQcow2List { + union { + BlockdevOptionsQcow2 *value; + uint64_t padding; + }; + BlockdevOptionsQcow2List *next; +}; + +void qapi_free_BlockdevOptionsQcow2List(BlockdevOptionsQcow2List *obj); + +struct BlockdevOptionsQuorum { + bool has_blkverify; + bool blkverify; + BlockdevRefList *children; + int64_t vote_threshold; + bool has_rewrite_corrupted; + bool rewrite_corrupted; + bool has_read_pattern; + QuorumReadPattern read_pattern; +}; + +void qapi_free_BlockdevOptionsQuorum(BlockdevOptionsQuorum *obj); + +struct BlockdevOptionsQuorumList { + union { + BlockdevOptionsQuorum *value; + uint64_t padding; + }; + BlockdevOptionsQuorumList *next; +}; + +void qapi_free_BlockdevOptionsQuorumList(BlockdevOptionsQuorumList *obj); + +struct BlockdevOptionsVVFAT { + char *dir; + bool has_fat_type; + int64_t fat_type; + bool has_floppy; + bool floppy; + bool has_label; + char *label; + bool has_rw; + bool rw; +}; + +void qapi_free_BlockdevOptionsVVFAT(BlockdevOptionsVVFAT *obj); + +struct BlockdevOptionsVVFATList { + union { + BlockdevOptionsVVFAT *value; + uint64_t padding; + }; + BlockdevOptionsVVFATList *next; +}; + +void qapi_free_BlockdevOptionsVVFATList(BlockdevOptionsVVFATList *obj); + +struct BlockdevRef { + BlockdevRefKind kind; + union { /* union tag is @kind */ + void *data; + BlockdevOptions *definition; + char *reference; + }; +}; + +extern const int BlockdevRef_qtypes[]; + +void qapi_free_BlockdevRef(BlockdevRef *obj); + +struct BlockdevRefList { + union { + BlockdevRef *value; + uint64_t padding; + }; + BlockdevRefList *next; +}; + +void qapi_free_BlockdevRefList(BlockdevRefList *obj); + +struct BlockdevSnapshot { + bool has_device; + char *device; + bool has_node_name; + char *node_name; + char *snapshot_file; + bool has_snapshot_node_name; + char *snapshot_node_name; + bool has_format; + char *format; + bool has_mode; + NewImageMode mode; +}; + +void qapi_free_BlockdevSnapshot(BlockdevSnapshot *obj); + +struct BlockdevSnapshotInternal { + char *device; + char *name; +}; + +void qapi_free_BlockdevSnapshotInternal(BlockdevSnapshotInternal *obj); + +struct BlockdevSnapshotInternalList { + union { + BlockdevSnapshotInternal *value; + uint64_t padding; + }; + BlockdevSnapshotInternalList *next; +}; + +void qapi_free_BlockdevSnapshotInternalList(BlockdevSnapshotInternalList *obj); + +struct BlockdevSnapshotList { + union { + BlockdevSnapshot *value; + uint64_t padding; + }; + BlockdevSnapshotList *next; +}; + +void qapi_free_BlockdevSnapshotList(BlockdevSnapshotList *obj); + +struct ChardevBackend { + ChardevBackendKind kind; + union { /* union tag is @kind */ + void *data; + ChardevFile *file; + ChardevHostdev *serial; + ChardevHostdev *parallel; + ChardevHostdev *pipe; + ChardevSocket *socket; + ChardevUdp *udp; + ChardevDummy *pty; + ChardevDummy *null; + ChardevMux *mux; + ChardevDummy *msmouse; + ChardevDummy *braille; + ChardevDummy *testdev; + ChardevStdio *stdio; + ChardevDummy *console; + ChardevSpiceChannel *spicevmc; + ChardevSpicePort *spiceport; + ChardevVC *vc; + ChardevRingbuf *ringbuf; + ChardevRingbuf *memory; + }; +}; + +void qapi_free_ChardevBackend(ChardevBackend *obj); + +struct ChardevBackendInfo { + char *name; +}; + +void qapi_free_ChardevBackendInfo(ChardevBackendInfo *obj); + +struct ChardevBackendInfoList { + union { + ChardevBackendInfo *value; + uint64_t padding; + }; + ChardevBackendInfoList *next; +}; + +void qapi_free_ChardevBackendInfoList(ChardevBackendInfoList *obj); + +struct ChardevBackendList { + union { + ChardevBackend *value; + uint64_t padding; + }; + ChardevBackendList *next; +}; + +void qapi_free_ChardevBackendList(ChardevBackendList *obj); + +struct ChardevDummy { + char qapi_dummy_field_for_empty_struct; +}; + +void qapi_free_ChardevDummy(ChardevDummy *obj); + +struct ChardevDummyList { + union { + ChardevDummy *value; + uint64_t padding; + }; + ChardevDummyList *next; +}; + +void qapi_free_ChardevDummyList(ChardevDummyList *obj); + +struct ChardevFile { + bool has_in; + char *in; + char *out; +}; + +void qapi_free_ChardevFile(ChardevFile *obj); + +struct ChardevFileList { + union { + ChardevFile *value; + uint64_t padding; + }; + ChardevFileList *next; +}; + +void qapi_free_ChardevFileList(ChardevFileList *obj); + +struct ChardevHostdev { + char *device; +}; + +void qapi_free_ChardevHostdev(ChardevHostdev *obj); + +struct ChardevHostdevList { + union { + ChardevHostdev *value; + uint64_t padding; + }; + ChardevHostdevList *next; +}; + +void qapi_free_ChardevHostdevList(ChardevHostdevList *obj); + +struct ChardevInfo { + char *label; + char *filename; + bool frontend_open; +}; + +void qapi_free_ChardevInfo(ChardevInfo *obj); + +struct ChardevInfoList { + union { + ChardevInfo *value; + uint64_t padding; + }; + ChardevInfoList *next; +}; + +void qapi_free_ChardevInfoList(ChardevInfoList *obj); + +struct ChardevMux { + char *chardev; +}; + +void qapi_free_ChardevMux(ChardevMux *obj); + +struct ChardevMuxList { + union { + ChardevMux *value; + uint64_t padding; + }; + ChardevMuxList *next; +}; + +void qapi_free_ChardevMuxList(ChardevMuxList *obj); + +struct ChardevReturn { + bool has_pty; + char *pty; +}; + +void qapi_free_ChardevReturn(ChardevReturn *obj); + +struct ChardevReturnList { + union { + ChardevReturn *value; + uint64_t padding; + }; + ChardevReturnList *next; +}; + +void qapi_free_ChardevReturnList(ChardevReturnList *obj); + +struct ChardevRingbuf { + bool has_size; + int64_t size; +}; + +void qapi_free_ChardevRingbuf(ChardevRingbuf *obj); + +struct ChardevRingbufList { + union { + ChardevRingbuf *value; + uint64_t padding; + }; + ChardevRingbufList *next; +}; + +void qapi_free_ChardevRingbufList(ChardevRingbufList *obj); + +struct ChardevSocket { + SocketAddress *addr; + bool has_server; + bool server; + bool has_wait; + bool wait; + bool has_nodelay; + bool nodelay; + bool has_telnet; + bool telnet; + bool has_reconnect; + int64_t reconnect; +}; + +void qapi_free_ChardevSocket(ChardevSocket *obj); + +struct ChardevSocketList { + union { + ChardevSocket *value; + uint64_t padding; + }; + ChardevSocketList *next; +}; + +void qapi_free_ChardevSocketList(ChardevSocketList *obj); + +struct ChardevSpiceChannel { + char *type; +}; + +void qapi_free_ChardevSpiceChannel(ChardevSpiceChannel *obj); + +struct ChardevSpiceChannelList { + union { + ChardevSpiceChannel *value; + uint64_t padding; + }; + ChardevSpiceChannelList *next; +}; + +void qapi_free_ChardevSpiceChannelList(ChardevSpiceChannelList *obj); + +struct ChardevSpicePort { + char *fqdn; +}; + +void qapi_free_ChardevSpicePort(ChardevSpicePort *obj); + +struct ChardevSpicePortList { + union { + ChardevSpicePort *value; + uint64_t padding; + }; + ChardevSpicePortList *next; +}; + +void qapi_free_ChardevSpicePortList(ChardevSpicePortList *obj); + +struct ChardevStdio { + bool has_signal; + bool signal; +}; + +void qapi_free_ChardevStdio(ChardevStdio *obj); + +struct ChardevStdioList { + union { + ChardevStdio *value; + uint64_t padding; + }; + ChardevStdioList *next; +}; + +void qapi_free_ChardevStdioList(ChardevStdioList *obj); + +struct ChardevUdp { + SocketAddress *remote; + bool has_local; + SocketAddress *local; +}; + +void qapi_free_ChardevUdp(ChardevUdp *obj); + +struct ChardevUdpList { + union { + ChardevUdp *value; + uint64_t padding; + }; + ChardevUdpList *next; +}; + +void qapi_free_ChardevUdpList(ChardevUdpList *obj); + +struct ChardevVC { + bool has_width; + int64_t width; + bool has_height; + int64_t height; + bool has_cols; + int64_t cols; + bool has_rows; + int64_t rows; +}; + +void qapi_free_ChardevVC(ChardevVC *obj); + +struct ChardevVCList { + union { + ChardevVC *value; + uint64_t padding; + }; + ChardevVCList *next; +}; + +void qapi_free_ChardevVCList(ChardevVCList *obj); + +struct CommandInfo { + char *name; +}; + +void qapi_free_CommandInfo(CommandInfo *obj); + +struct CommandInfoList { + union { + CommandInfo *value; + uint64_t padding; + }; + CommandInfoList *next; +}; + +void qapi_free_CommandInfoList(CommandInfoList *obj); + +struct CommandLineOptionInfo { + char *option; + CommandLineParameterInfoList *parameters; +}; + +void qapi_free_CommandLineOptionInfo(CommandLineOptionInfo *obj); + +struct CommandLineOptionInfoList { + union { + CommandLineOptionInfo *value; + uint64_t padding; + }; + CommandLineOptionInfoList *next; +}; + +void qapi_free_CommandLineOptionInfoList(CommandLineOptionInfoList *obj); + +struct CommandLineParameterInfo { + char *name; + CommandLineParameterType type; + bool has_help; + char *help; + bool has_q_default; + char *q_default; +}; + +void qapi_free_CommandLineParameterInfo(CommandLineParameterInfo *obj); + +struct CommandLineParameterInfoList { + union { + CommandLineParameterInfo *value; + uint64_t padding; + }; + CommandLineParameterInfoList *next; +}; + +void qapi_free_CommandLineParameterInfoList(CommandLineParameterInfoList *obj); + +struct CommandLineParameterTypeList { + union { + CommandLineParameterType value; + uint64_t padding; + }; + CommandLineParameterTypeList *next; +}; + +void qapi_free_CommandLineParameterTypeList(CommandLineParameterTypeList *obj); + +struct CpuDefinitionInfo { + char *name; +}; + +void qapi_free_CpuDefinitionInfo(CpuDefinitionInfo *obj); + +struct CpuDefinitionInfoList { + union { + CpuDefinitionInfo *value; + uint64_t padding; + }; + CpuDefinitionInfoList *next; +}; + +void qapi_free_CpuDefinitionInfoList(CpuDefinitionInfoList *obj); + +struct CpuInfo { + int64_t CPU; + bool current; + bool halted; + char *qom_path; + bool has_pc; + int64_t pc; + bool has_nip; + int64_t nip; + bool has_npc; + int64_t npc; + bool has_PC; + int64_t PC; + int64_t thread_id; +}; + +void qapi_free_CpuInfo(CpuInfo *obj); + +struct CpuInfoList { + union { + CpuInfo *value; + uint64_t padding; + }; + CpuInfoList *next; +}; + +void qapi_free_CpuInfoList(CpuInfoList *obj); + +struct DataFormatList { + union { + DataFormat value; + uint64_t padding; + }; + DataFormatList *next; +}; + +void qapi_free_DataFormatList(DataFormatList *obj); + +struct DevicePropertyInfo { + char *name; + char *type; + bool has_description; + char *description; +}; + +void qapi_free_DevicePropertyInfo(DevicePropertyInfo *obj); + +struct DevicePropertyInfoList { + union { + DevicePropertyInfo *value; + uint64_t padding; + }; + DevicePropertyInfoList *next; +}; + +void qapi_free_DevicePropertyInfoList(DevicePropertyInfoList *obj); + +struct DirtyBitmapStatusList { + union { + DirtyBitmapStatus value; + uint64_t padding; + }; + DirtyBitmapStatusList *next; +}; + +void qapi_free_DirtyBitmapStatusList(DirtyBitmapStatusList *obj); + +struct DriveBackup { + char *device; + char *target; + bool has_format; + char *format; + MirrorSyncMode sync; + bool has_mode; + NewImageMode mode; + bool has_speed; + int64_t speed; + bool has_bitmap; + char *bitmap; + bool has_on_source_error; + BlockdevOnError on_source_error; + bool has_on_target_error; + BlockdevOnError on_target_error; +}; + +void qapi_free_DriveBackup(DriveBackup *obj); + +struct DriveBackupList { + union { + DriveBackup *value; + uint64_t padding; + }; + DriveBackupList *next; +}; + +void qapi_free_DriveBackupList(DriveBackupList *obj); + +struct DumpGuestMemoryCapability { + DumpGuestMemoryFormatList *formats; +}; + +void qapi_free_DumpGuestMemoryCapability(DumpGuestMemoryCapability *obj); + +struct DumpGuestMemoryCapabilityList { + union { + DumpGuestMemoryCapability *value; + uint64_t padding; + }; + DumpGuestMemoryCapabilityList *next; +}; + +void qapi_free_DumpGuestMemoryCapabilityList(DumpGuestMemoryCapabilityList *obj); + +struct DumpGuestMemoryFormatList { + union { + DumpGuestMemoryFormat value; + uint64_t padding; + }; + DumpGuestMemoryFormatList *next; +}; + +void qapi_free_DumpGuestMemoryFormatList(DumpGuestMemoryFormatList *obj); + +struct ErrorClassList { + union { + ErrorClass value; + uint64_t padding; + }; + ErrorClassList *next; +}; + +void qapi_free_ErrorClassList(ErrorClassList *obj); + +struct EventInfo { + char *name; +}; + +void qapi_free_EventInfo(EventInfo *obj); + +struct EventInfoList { + union { + EventInfo *value; + uint64_t padding; + }; + EventInfoList *next; +}; + +void qapi_free_EventInfoList(EventInfoList *obj); + +struct FdsetFdInfo { + int64_t fd; + bool has_opaque; + char *opaque; +}; + +void qapi_free_FdsetFdInfo(FdsetFdInfo *obj); + +struct FdsetFdInfoList { + union { + FdsetFdInfo *value; + uint64_t padding; + }; + FdsetFdInfoList *next; +}; + +void qapi_free_FdsetFdInfoList(FdsetFdInfoList *obj); + +struct FdsetInfo { + int64_t fdset_id; + FdsetFdInfoList *fds; +}; + +void qapi_free_FdsetInfo(FdsetInfo *obj); + +struct FdsetInfoList { + union { + FdsetInfo *value; + uint64_t padding; + }; + FdsetInfoList *next; +}; + +void qapi_free_FdsetInfoList(FdsetInfoList *obj); + +struct GuestPanicActionList { + union { + GuestPanicAction value; + uint64_t padding; + }; + GuestPanicActionList *next; +}; + +void qapi_free_GuestPanicActionList(GuestPanicActionList *obj); + +struct HostMemPolicyList { + union { + HostMemPolicy value; + uint64_t padding; + }; + HostMemPolicyList *next; +}; + +void qapi_free_HostMemPolicyList(HostMemPolicyList *obj); + +struct IOThreadInfo { + char *id; + int64_t thread_id; +}; + +void qapi_free_IOThreadInfo(IOThreadInfo *obj); + +struct IOThreadInfoList { + union { + IOThreadInfo *value; + uint64_t padding; + }; + IOThreadInfoList *next; +}; + +void qapi_free_IOThreadInfoList(IOThreadInfoList *obj); + +struct ImageCheck { + char *filename; + char *format; + int64_t check_errors; + bool has_image_end_offset; + int64_t image_end_offset; + bool has_corruptions; + int64_t corruptions; + bool has_leaks; + int64_t leaks; + bool has_corruptions_fixed; + int64_t corruptions_fixed; + bool has_leaks_fixed; + int64_t leaks_fixed; + bool has_total_clusters; + int64_t total_clusters; + bool has_allocated_clusters; + int64_t allocated_clusters; + bool has_fragmented_clusters; + int64_t fragmented_clusters; + bool has_compressed_clusters; + int64_t compressed_clusters; +}; + +void qapi_free_ImageCheck(ImageCheck *obj); + +struct ImageCheckList { + union { + ImageCheck *value; + uint64_t padding; + }; + ImageCheckList *next; +}; + +void qapi_free_ImageCheckList(ImageCheckList *obj); + +struct ImageInfo { + char *filename; + char *format; + bool has_dirty_flag; + bool dirty_flag; + bool has_actual_size; + int64_t actual_size; + int64_t virtual_size; + bool has_cluster_size; + int64_t cluster_size; + bool has_encrypted; + bool encrypted; + bool has_compressed; + bool compressed; + bool has_backing_filename; + char *backing_filename; + bool has_full_backing_filename; + char *full_backing_filename; + bool has_backing_filename_format; + char *backing_filename_format; + bool has_snapshots; + SnapshotInfoList *snapshots; + bool has_backing_image; + ImageInfo *backing_image; + bool has_format_specific; + ImageInfoSpecific *format_specific; +}; + +void qapi_free_ImageInfo(ImageInfo *obj); + +struct ImageInfoList { + union { + ImageInfo *value; + uint64_t padding; + }; + ImageInfoList *next; +}; + +void qapi_free_ImageInfoList(ImageInfoList *obj); + +struct ImageInfoSpecific { + ImageInfoSpecificKind kind; + union { /* union tag is @kind */ + void *data; + ImageInfoSpecificQCow2 *qcow2; + ImageInfoSpecificVmdk *vmdk; + }; +}; + +void qapi_free_ImageInfoSpecific(ImageInfoSpecific *obj); + +struct ImageInfoSpecificList { + union { + ImageInfoSpecific *value; + uint64_t padding; + }; + ImageInfoSpecificList *next; +}; + +void qapi_free_ImageInfoSpecificList(ImageInfoSpecificList *obj); + +struct ImageInfoSpecificQCow2 { + char *compat; + bool has_lazy_refcounts; + bool lazy_refcounts; + bool has_corrupt; + bool corrupt; + int64_t refcount_bits; +}; + +void qapi_free_ImageInfoSpecificQCow2(ImageInfoSpecificQCow2 *obj); + +struct ImageInfoSpecificQCow2List { + union { + ImageInfoSpecificQCow2 *value; + uint64_t padding; + }; + ImageInfoSpecificQCow2List *next; +}; + +void qapi_free_ImageInfoSpecificQCow2List(ImageInfoSpecificQCow2List *obj); + +struct ImageInfoSpecificVmdk { + char *create_type; + int64_t cid; + int64_t parent_cid; + ImageInfoList *extents; +}; + +void qapi_free_ImageInfoSpecificVmdk(ImageInfoSpecificVmdk *obj); + +struct ImageInfoSpecificVmdkList { + union { + ImageInfoSpecificVmdk *value; + uint64_t padding; + }; + ImageInfoSpecificVmdkList *next; +}; + +void qapi_free_ImageInfoSpecificVmdkList(ImageInfoSpecificVmdkList *obj); + +struct InetSocketAddress { + char *host; + char *port; + bool has_to; + uint16_t to; + bool has_ipv4; + bool ipv4; + bool has_ipv6; + bool ipv6; +}; + +void qapi_free_InetSocketAddress(InetSocketAddress *obj); + +struct InetSocketAddressList { + union { + InetSocketAddress *value; + uint64_t padding; + }; + InetSocketAddressList *next; +}; + +void qapi_free_InetSocketAddressList(InetSocketAddressList *obj); + +struct InputAxisList { + union { + InputAxis value; + uint64_t padding; + }; + InputAxisList *next; +}; + +void qapi_free_InputAxisList(InputAxisList *obj); + +struct InputBtnEvent { + InputButton button; + bool down; +}; + +void qapi_free_InputBtnEvent(InputBtnEvent *obj); + +struct InputBtnEventList { + union { + InputBtnEvent *value; + uint64_t padding; + }; + InputBtnEventList *next; +}; + +void qapi_free_InputBtnEventList(InputBtnEventList *obj); + +struct InputButtonList { + union { + InputButton value; + uint64_t padding; + }; + InputButtonList *next; +}; + +void qapi_free_InputButtonList(InputButtonList *obj); + +struct InputEvent { + InputEventKind kind; + union { /* union tag is @kind */ + void *data; + InputKeyEvent *key; + InputBtnEvent *btn; + InputMoveEvent *rel; + InputMoveEvent *abs; + }; +}; + +void qapi_free_InputEvent(InputEvent *obj); + +struct InputEventList { + union { + InputEvent *value; + uint64_t padding; + }; + InputEventList *next; +}; + +void qapi_free_InputEventList(InputEventList *obj); + +struct InputKeyEvent { + KeyValue *key; + bool down; +}; + +void qapi_free_InputKeyEvent(InputKeyEvent *obj); + +struct InputKeyEventList { + union { + InputKeyEvent *value; + uint64_t padding; + }; + InputKeyEventList *next; +}; + +void qapi_free_InputKeyEventList(InputKeyEventList *obj); + +struct InputMoveEvent { + InputAxis axis; + int64_t value; +}; + +void qapi_free_InputMoveEvent(InputMoveEvent *obj); + +struct InputMoveEventList { + union { + InputMoveEvent *value; + uint64_t padding; + }; + InputMoveEventList *next; +}; + +void qapi_free_InputMoveEventList(InputMoveEventList *obj); + +struct IoOperationTypeList { + union { + IoOperationType value; + uint64_t padding; + }; + IoOperationTypeList *next; +}; + +void qapi_free_IoOperationTypeList(IoOperationTypeList *obj); + +struct JSONTypeList { + union { + JSONType value; + uint64_t padding; + }; + JSONTypeList *next; +}; + +void qapi_free_JSONTypeList(JSONTypeList *obj); + +struct KeyValue { + KeyValueKind kind; + union { /* union tag is @kind */ + void *data; + int64_t number; + QKeyCode qcode; + }; +}; + +void qapi_free_KeyValue(KeyValue *obj); + +struct KeyValueList { + union { + KeyValue *value; + uint64_t padding; + }; + KeyValueList *next; +}; + +void qapi_free_KeyValueList(KeyValueList *obj); + +struct KvmInfo { + bool enabled; + bool present; +}; + +void qapi_free_KvmInfo(KvmInfo *obj); + +struct KvmInfoList { + union { + KvmInfo *value; + uint64_t padding; + }; + KvmInfoList *next; +}; + +void qapi_free_KvmInfoList(KvmInfoList *obj); + +struct LostTickPolicyList { + union { + LostTickPolicy value; + uint64_t padding; + }; + LostTickPolicyList *next; +}; + +void qapi_free_LostTickPolicyList(LostTickPolicyList *obj); + +struct MachineInfo { + char *name; + bool has_alias; + char *alias; + bool has_is_default; + bool is_default; + int64_t cpu_max; +}; + +void qapi_free_MachineInfo(MachineInfo *obj); + +struct MachineInfoList { + union { + MachineInfo *value; + uint64_t padding; + }; + MachineInfoList *next; +}; + +void qapi_free_MachineInfoList(MachineInfoList *obj); + +struct Memdev { + uint64_t size; + bool merge; + bool dump; + bool prealloc; + uint16List *host_nodes; + HostMemPolicy policy; +}; + +void qapi_free_Memdev(Memdev *obj); + +struct MemdevList { + union { + Memdev *value; + uint64_t padding; + }; + MemdevList *next; +}; + +void qapi_free_MemdevList(MemdevList *obj); + +struct MemoryDeviceInfo { + MemoryDeviceInfoKind kind; + union { /* union tag is @kind */ + void *data; + PCDIMMDeviceInfo *dimm; + }; +}; + +void qapi_free_MemoryDeviceInfo(MemoryDeviceInfo *obj); + +struct MemoryDeviceInfoList { + union { + MemoryDeviceInfo *value; + uint64_t padding; + }; + MemoryDeviceInfoList *next; +}; + +void qapi_free_MemoryDeviceInfoList(MemoryDeviceInfoList *obj); + +struct MigrationCapabilityList { + union { + MigrationCapability value; + uint64_t padding; + }; + MigrationCapabilityList *next; +}; + +void qapi_free_MigrationCapabilityList(MigrationCapabilityList *obj); + +struct MigrationCapabilityStatus { + MigrationCapability capability; + bool state; +}; + +void qapi_free_MigrationCapabilityStatus(MigrationCapabilityStatus *obj); + +struct MigrationCapabilityStatusList { + union { + MigrationCapabilityStatus *value; + uint64_t padding; + }; + MigrationCapabilityStatusList *next; +}; + +void qapi_free_MigrationCapabilityStatusList(MigrationCapabilityStatusList *obj); + +struct MigrationInfo { + bool has_status; + MigrationStatus status; + bool has_ram; + MigrationStats *ram; + bool has_disk; + MigrationStats *disk; + bool has_xbzrle_cache; + XBZRLECacheStats *xbzrle_cache; + bool has_total_time; + int64_t total_time; + bool has_expected_downtime; + int64_t expected_downtime; + bool has_downtime; + int64_t downtime; + bool has_setup_time; + int64_t setup_time; +}; + +void qapi_free_MigrationInfo(MigrationInfo *obj); + +struct MigrationInfoList { + union { + MigrationInfo *value; + uint64_t padding; + }; + MigrationInfoList *next; +}; + +void qapi_free_MigrationInfoList(MigrationInfoList *obj); + +struct MigrationParameterList { + union { + MigrationParameter value; + uint64_t padding; + }; + MigrationParameterList *next; +}; + +void qapi_free_MigrationParameterList(MigrationParameterList *obj); + +struct MigrationParameters { + int64_t compress_level; + int64_t compress_threads; + int64_t decompress_threads; +}; + +void qapi_free_MigrationParameters(MigrationParameters *obj); + +struct MigrationParametersList { + union { + MigrationParameters *value; + uint64_t padding; + }; + MigrationParametersList *next; +}; + +void qapi_free_MigrationParametersList(MigrationParametersList *obj); + +struct MigrationStats { + int64_t transferred; + int64_t remaining; + int64_t total; + int64_t duplicate; + int64_t skipped; + int64_t normal; + int64_t normal_bytes; + int64_t dirty_pages_rate; + double mbps; + int64_t dirty_sync_count; +}; + +void qapi_free_MigrationStats(MigrationStats *obj); + +struct MigrationStatsList { + union { + MigrationStats *value; + uint64_t padding; + }; + MigrationStatsList *next; +}; + +void qapi_free_MigrationStatsList(MigrationStatsList *obj); + +struct MigrationStatusList { + union { + MigrationStatus value; + uint64_t padding; + }; + MigrationStatusList *next; +}; + +void qapi_free_MigrationStatusList(MigrationStatusList *obj); + +struct MirrorSyncModeList { + union { + MirrorSyncMode value; + uint64_t padding; + }; + MirrorSyncModeList *next; +}; + +void qapi_free_MirrorSyncModeList(MirrorSyncModeList *obj); + +struct MouseInfo { + char *name; + int64_t index; + bool current; + bool absolute; +}; + +void qapi_free_MouseInfo(MouseInfo *obj); + +struct MouseInfoList { + union { + MouseInfo *value; + uint64_t padding; + }; + MouseInfoList *next; +}; + +void qapi_free_MouseInfoList(MouseInfoList *obj); + +struct NameInfo { + bool has_name; + char *name; +}; + +void qapi_free_NameInfo(NameInfo *obj); + +struct NameInfoList { + union { + NameInfo *value; + uint64_t padding; + }; + NameInfoList *next; +}; + +void qapi_free_NameInfoList(NameInfoList *obj); + +struct NetClientOptions { + NetClientOptionsKind kind; + union { /* union tag is @kind */ + void *data; + NetdevNoneOptions *none; + NetLegacyNicOptions *nic; + NetdevUserOptions *user; + NetdevTapOptions *tap; + NetdevL2TPv3Options *l2tpv3; + NetdevSocketOptions *socket; + NetdevVdeOptions *vde; + NetdevDumpOptions *dump; + NetdevBridgeOptions *bridge; + NetdevHubPortOptions *hubport; + NetdevNetmapOptions *netmap; + NetdevVhostUserOptions *vhost_user; + }; +}; + +void qapi_free_NetClientOptions(NetClientOptions *obj); + +struct NetClientOptionsList { + union { + NetClientOptions *value; + uint64_t padding; + }; + NetClientOptionsList *next; +}; + +void qapi_free_NetClientOptionsList(NetClientOptionsList *obj); + +struct NetLegacy { + bool has_vlan; + int32_t vlan; + bool has_id; + char *id; + bool has_name; + char *name; + NetClientOptions *opts; +}; + +void qapi_free_NetLegacy(NetLegacy *obj); + +struct NetLegacyList { + union { + NetLegacy *value; + uint64_t padding; + }; + NetLegacyList *next; +}; + +void qapi_free_NetLegacyList(NetLegacyList *obj); + +struct NetLegacyNicOptions { + bool has_netdev; + char *netdev; + bool has_macaddr; + char *macaddr; + bool has_model; + char *model; + bool has_addr; + char *addr; + bool has_vectors; + uint32_t vectors; +}; + +void qapi_free_NetLegacyNicOptions(NetLegacyNicOptions *obj); + +struct NetLegacyNicOptionsList { + union { + NetLegacyNicOptions *value; + uint64_t padding; + }; + NetLegacyNicOptionsList *next; +}; + +void qapi_free_NetLegacyNicOptionsList(NetLegacyNicOptionsList *obj); + +struct Netdev { + char *id; + NetClientOptions *opts; +}; + +void qapi_free_Netdev(Netdev *obj); + +struct NetdevBridgeOptions { + bool has_br; + char *br; + bool has_helper; + char *helper; +}; + +void qapi_free_NetdevBridgeOptions(NetdevBridgeOptions *obj); + +struct NetdevBridgeOptionsList { + union { + NetdevBridgeOptions *value; + uint64_t padding; + }; + NetdevBridgeOptionsList *next; +}; + +void qapi_free_NetdevBridgeOptionsList(NetdevBridgeOptionsList *obj); + +struct NetdevDumpOptions { + bool has_len; + uint64_t len; + bool has_file; + char *file; +}; + +void qapi_free_NetdevDumpOptions(NetdevDumpOptions *obj); + +struct NetdevDumpOptionsList { + union { + NetdevDumpOptions *value; + uint64_t padding; + }; + NetdevDumpOptionsList *next; +}; + +void qapi_free_NetdevDumpOptionsList(NetdevDumpOptionsList *obj); + +struct NetdevHubPortOptions { + int32_t hubid; +}; + +void qapi_free_NetdevHubPortOptions(NetdevHubPortOptions *obj); + +struct NetdevHubPortOptionsList { + union { + NetdevHubPortOptions *value; + uint64_t padding; + }; + NetdevHubPortOptionsList *next; +}; + +void qapi_free_NetdevHubPortOptionsList(NetdevHubPortOptionsList *obj); + +struct NetdevL2TPv3Options { + char *src; + char *dst; + bool has_srcport; + char *srcport; + bool has_dstport; + char *dstport; + bool has_ipv6; + bool ipv6; + bool has_udp; + bool udp; + bool has_cookie64; + bool cookie64; + bool has_counter; + bool counter; + bool has_pincounter; + bool pincounter; + bool has_txcookie; + uint64_t txcookie; + bool has_rxcookie; + uint64_t rxcookie; + uint32_t txsession; + bool has_rxsession; + uint32_t rxsession; + bool has_offset; + uint32_t offset; +}; + +void qapi_free_NetdevL2TPv3Options(NetdevL2TPv3Options *obj); + +struct NetdevL2TPv3OptionsList { + union { + NetdevL2TPv3Options *value; + uint64_t padding; + }; + NetdevL2TPv3OptionsList *next; +}; + +void qapi_free_NetdevL2TPv3OptionsList(NetdevL2TPv3OptionsList *obj); + +struct NetdevList { + union { + Netdev *value; + uint64_t padding; + }; + NetdevList *next; +}; + +void qapi_free_NetdevList(NetdevList *obj); + +struct NetdevNetmapOptions { + char *ifname; + bool has_devname; + char *devname; +}; + +void qapi_free_NetdevNetmapOptions(NetdevNetmapOptions *obj); + +struct NetdevNetmapOptionsList { + union { + NetdevNetmapOptions *value; + uint64_t padding; + }; + NetdevNetmapOptionsList *next; +}; + +void qapi_free_NetdevNetmapOptionsList(NetdevNetmapOptionsList *obj); + +struct NetdevNoneOptions { + char qapi_dummy_field_for_empty_struct; +}; + +void qapi_free_NetdevNoneOptions(NetdevNoneOptions *obj); + +struct NetdevNoneOptionsList { + union { + NetdevNoneOptions *value; + uint64_t padding; + }; + NetdevNoneOptionsList *next; +}; + +void qapi_free_NetdevNoneOptionsList(NetdevNoneOptionsList *obj); + +struct NetdevSocketOptions { + bool has_fd; + char *fd; + bool has_listen; + char *listen; + bool has_connect; + char *connect; + bool has_mcast; + char *mcast; + bool has_localaddr; + char *localaddr; + bool has_udp; + char *udp; +}; + +void qapi_free_NetdevSocketOptions(NetdevSocketOptions *obj); + +struct NetdevSocketOptionsList { + union { + NetdevSocketOptions *value; + uint64_t padding; + }; + NetdevSocketOptionsList *next; +}; + +void qapi_free_NetdevSocketOptionsList(NetdevSocketOptionsList *obj); + +struct NetdevTapOptions { + bool has_ifname; + char *ifname; + bool has_fd; + char *fd; + bool has_fds; + char *fds; + bool has_script; + char *script; + bool has_downscript; + char *downscript; + bool has_helper; + char *helper; + bool has_sndbuf; + uint64_t sndbuf; + bool has_vnet_hdr; + bool vnet_hdr; + bool has_vhost; + bool vhost; + bool has_vhostfd; + char *vhostfd; + bool has_vhostfds; + char *vhostfds; + bool has_vhostforce; + bool vhostforce; + bool has_queues; + uint32_t queues; +}; + +void qapi_free_NetdevTapOptions(NetdevTapOptions *obj); + +struct NetdevTapOptionsList { + union { + NetdevTapOptions *value; + uint64_t padding; + }; + NetdevTapOptionsList *next; +}; + +void qapi_free_NetdevTapOptionsList(NetdevTapOptionsList *obj); + +struct NetdevUserOptions { + bool has_hostname; + char *hostname; + bool has_q_restrict; + bool q_restrict; + bool has_ip; + char *ip; + bool has_net; + char *net; + bool has_host; + char *host; + bool has_tftp; + char *tftp; + bool has_bootfile; + char *bootfile; + bool has_dhcpstart; + char *dhcpstart; + bool has_dns; + char *dns; + bool has_dnssearch; + StringList *dnssearch; + bool has_smb; + char *smb; + bool has_smbserver; + char *smbserver; + bool has_hostfwd; + StringList *hostfwd; + bool has_guestfwd; + StringList *guestfwd; +}; + +void qapi_free_NetdevUserOptions(NetdevUserOptions *obj); + +struct NetdevUserOptionsList { + union { + NetdevUserOptions *value; + uint64_t padding; + }; + NetdevUserOptionsList *next; +}; + +void qapi_free_NetdevUserOptionsList(NetdevUserOptionsList *obj); + +struct NetdevVdeOptions { + bool has_sock; + char *sock; + bool has_port; + uint16_t port; + bool has_group; + char *group; + bool has_mode; + uint16_t mode; +}; + +void qapi_free_NetdevVdeOptions(NetdevVdeOptions *obj); + +struct NetdevVdeOptionsList { + union { + NetdevVdeOptions *value; + uint64_t padding; + }; + NetdevVdeOptionsList *next; +}; + +void qapi_free_NetdevVdeOptionsList(NetdevVdeOptionsList *obj); + +struct NetdevVhostUserOptions { + char *chardev; + bool has_vhostforce; + bool vhostforce; +}; + +void qapi_free_NetdevVhostUserOptions(NetdevVhostUserOptions *obj); + +struct NetdevVhostUserOptionsList { + union { + NetdevVhostUserOptions *value; + uint64_t padding; + }; + NetdevVhostUserOptionsList *next; +}; + +void qapi_free_NetdevVhostUserOptionsList(NetdevVhostUserOptionsList *obj); + +struct NetworkAddressFamilyList { + union { + NetworkAddressFamily value; + uint64_t padding; + }; + NetworkAddressFamilyList *next; +}; + +void qapi_free_NetworkAddressFamilyList(NetworkAddressFamilyList *obj); + +struct NewImageModeList { + union { + NewImageMode value; + uint64_t padding; + }; + NewImageModeList *next; +}; + +void qapi_free_NewImageModeList(NewImageModeList *obj); + +struct NumaNodeOptions { + bool has_nodeid; + uint16_t nodeid; + bool has_cpus; + uint16List *cpus; + bool has_mem; + uint64_t mem; + bool has_memdev; + char *memdev; +}; + +void qapi_free_NumaNodeOptions(NumaNodeOptions *obj); + +struct NumaNodeOptionsList { + union { + NumaNodeOptions *value; + uint64_t padding; + }; + NumaNodeOptionsList *next; +}; + +void qapi_free_NumaNodeOptionsList(NumaNodeOptionsList *obj); + +struct NumaOptions { + NumaOptionsKind kind; + union { /* union tag is @kind */ + void *data; + NumaNodeOptions *node; + }; +}; + +void qapi_free_NumaOptions(NumaOptions *obj); + +struct NumaOptionsList { + union { + NumaOptions *value; + uint64_t padding; + }; + NumaOptionsList *next; +}; + +void qapi_free_NumaOptionsList(NumaOptionsList *obj); + +struct ObjectPropertyInfo { + char *name; + char *type; +}; + +void qapi_free_ObjectPropertyInfo(ObjectPropertyInfo *obj); + +struct ObjectPropertyInfoList { + union { + ObjectPropertyInfo *value; + uint64_t padding; + }; + ObjectPropertyInfoList *next; +}; + +void qapi_free_ObjectPropertyInfoList(ObjectPropertyInfoList *obj); + +struct ObjectTypeInfo { + char *name; +}; + +void qapi_free_ObjectTypeInfo(ObjectTypeInfo *obj); + +struct ObjectTypeInfoList { + union { + ObjectTypeInfo *value; + uint64_t padding; + }; + ObjectTypeInfoList *next; +}; + +void qapi_free_ObjectTypeInfoList(ObjectTypeInfoList *obj); + +struct OnOffAutoList { + union { + OnOffAuto value; + uint64_t padding; + }; + OnOffAutoList *next; +}; + +void qapi_free_OnOffAutoList(OnOffAutoList *obj); + +struct PCDIMMDeviceInfo { + bool has_id; + char *id; + int64_t addr; + int64_t size; + int64_t slot; + int64_t node; + char *memdev; + bool hotplugged; + bool hotpluggable; +}; + +void qapi_free_PCDIMMDeviceInfo(PCDIMMDeviceInfo *obj); + +struct PCDIMMDeviceInfoList { + union { + PCDIMMDeviceInfo *value; + uint64_t padding; + }; + PCDIMMDeviceInfoList *next; +}; + +void qapi_free_PCDIMMDeviceInfoList(PCDIMMDeviceInfoList *obj); + +struct PciBridgeInfo { + PciBusInfo *bus; + bool has_devices; + PciDeviceInfoList *devices; +}; + +void qapi_free_PciBridgeInfo(PciBridgeInfo *obj); + +struct PciBridgeInfoList { + union { + PciBridgeInfo *value; + uint64_t padding; + }; + PciBridgeInfoList *next; +}; + +void qapi_free_PciBridgeInfoList(PciBridgeInfoList *obj); + +struct PciBusInfo { + int64_t number; + int64_t secondary; + int64_t subordinate; + PciMemoryRange *io_range; + PciMemoryRange *memory_range; + PciMemoryRange *prefetchable_range; +}; + +void qapi_free_PciBusInfo(PciBusInfo *obj); + +struct PciBusInfoList { + union { + PciBusInfo *value; + uint64_t padding; + }; + PciBusInfoList *next; +}; + +void qapi_free_PciBusInfoList(PciBusInfoList *obj); + +struct PciDeviceClass { + bool has_desc; + char *desc; + int64_t q_class; +}; + +void qapi_free_PciDeviceClass(PciDeviceClass *obj); + +struct PciDeviceClassList { + union { + PciDeviceClass *value; + uint64_t padding; + }; + PciDeviceClassList *next; +}; + +void qapi_free_PciDeviceClassList(PciDeviceClassList *obj); + +struct PciDeviceId { + int64_t device; + int64_t vendor; +}; + +void qapi_free_PciDeviceId(PciDeviceId *obj); + +struct PciDeviceIdList { + union { + PciDeviceId *value; + uint64_t padding; + }; + PciDeviceIdList *next; +}; + +void qapi_free_PciDeviceIdList(PciDeviceIdList *obj); + +struct PciDeviceInfo { + int64_t bus; + int64_t slot; + int64_t function; + PciDeviceClass *class_info; + PciDeviceId *id; + bool has_irq; + int64_t irq; + char *qdev_id; + bool has_pci_bridge; + PciBridgeInfo *pci_bridge; + PciMemoryRegionList *regions; +}; + +void qapi_free_PciDeviceInfo(PciDeviceInfo *obj); + +struct PciDeviceInfoList { + union { + PciDeviceInfo *value; + uint64_t padding; + }; + PciDeviceInfoList *next; +}; + +void qapi_free_PciDeviceInfoList(PciDeviceInfoList *obj); + +struct PciInfo { + int64_t bus; + PciDeviceInfoList *devices; +}; + +void qapi_free_PciInfo(PciInfo *obj); + +struct PciInfoList { + union { + PciInfo *value; + uint64_t padding; + }; + PciInfoList *next; +}; + +void qapi_free_PciInfoList(PciInfoList *obj); + +struct PciMemoryRange { + int64_t base; + int64_t limit; +}; + +void qapi_free_PciMemoryRange(PciMemoryRange *obj); + +struct PciMemoryRangeList { + union { + PciMemoryRange *value; + uint64_t padding; + }; + PciMemoryRangeList *next; +}; + +void qapi_free_PciMemoryRangeList(PciMemoryRangeList *obj); + +struct PciMemoryRegion { + int64_t bar; + char *type; + int64_t address; + int64_t size; + bool has_prefetch; + bool prefetch; + bool has_mem_type_64; + bool mem_type_64; +}; + +void qapi_free_PciMemoryRegion(PciMemoryRegion *obj); + +struct PciMemoryRegionList { + union { + PciMemoryRegion *value; + uint64_t padding; + }; + PciMemoryRegionList *next; +}; + +void qapi_free_PciMemoryRegionList(PciMemoryRegionList *obj); + +struct PreallocModeList { + union { + PreallocMode value; + uint64_t padding; + }; + PreallocModeList *next; +}; + +void qapi_free_PreallocModeList(PreallocModeList *obj); + +struct QCryptoTLSCredsEndpointList { + union { + QCryptoTLSCredsEndpoint value; + uint64_t padding; + }; + QCryptoTLSCredsEndpointList *next; +}; + +void qapi_free_QCryptoTLSCredsEndpointList(QCryptoTLSCredsEndpointList *obj); + +struct QKeyCodeList { + union { + QKeyCode value; + uint64_t padding; + }; + QKeyCodeList *next; +}; + +void qapi_free_QKeyCodeList(QKeyCodeList *obj); + +struct Qcow2OverlapCheckFlags { + bool has_q_template; + Qcow2OverlapCheckMode q_template; + bool has_main_header; + bool main_header; + bool has_active_l1; + bool active_l1; + bool has_active_l2; + bool active_l2; + bool has_refcount_table; + bool refcount_table; + bool has_refcount_block; + bool refcount_block; + bool has_snapshot_table; + bool snapshot_table; + bool has_inactive_l1; + bool inactive_l1; + bool has_inactive_l2; + bool inactive_l2; +}; + +void qapi_free_Qcow2OverlapCheckFlags(Qcow2OverlapCheckFlags *obj); + +struct Qcow2OverlapCheckFlagsList { + union { + Qcow2OverlapCheckFlags *value; + uint64_t padding; + }; + Qcow2OverlapCheckFlagsList *next; +}; + +void qapi_free_Qcow2OverlapCheckFlagsList(Qcow2OverlapCheckFlagsList *obj); + +struct Qcow2OverlapCheckModeList { + union { + Qcow2OverlapCheckMode value; + uint64_t padding; + }; + Qcow2OverlapCheckModeList *next; +}; + +void qapi_free_Qcow2OverlapCheckModeList(Qcow2OverlapCheckModeList *obj); + +struct Qcow2OverlapChecks { + Qcow2OverlapChecksKind kind; + union { /* union tag is @kind */ + void *data; + Qcow2OverlapCheckFlags *flags; + Qcow2OverlapCheckMode mode; + }; +}; + +extern const int Qcow2OverlapChecks_qtypes[]; + +void qapi_free_Qcow2OverlapChecks(Qcow2OverlapChecks *obj); + +struct Qcow2OverlapChecksList { + union { + Qcow2OverlapChecks *value; + uint64_t padding; + }; + Qcow2OverlapChecksList *next; +}; + +void qapi_free_Qcow2OverlapChecksList(Qcow2OverlapChecksList *obj); + +struct QuorumReadPatternList { + union { + QuorumReadPattern value; + uint64_t padding; + }; + QuorumReadPatternList *next; +}; + +void qapi_free_QuorumReadPatternList(QuorumReadPatternList *obj); + +struct RockerOfDpaFlow { + uint64_t cookie; + uint64_t hits; + RockerOfDpaFlowKey *key; + RockerOfDpaFlowMask *mask; + RockerOfDpaFlowAction *action; +}; + +void qapi_free_RockerOfDpaFlow(RockerOfDpaFlow *obj); + +struct RockerOfDpaFlowAction { + bool has_goto_tbl; + uint32_t goto_tbl; + bool has_group_id; + uint32_t group_id; + bool has_tunnel_lport; + uint32_t tunnel_lport; + bool has_vlan_id; + uint16_t vlan_id; + bool has_new_vlan_id; + uint16_t new_vlan_id; + bool has_out_pport; + uint32_t out_pport; +}; + +void qapi_free_RockerOfDpaFlowAction(RockerOfDpaFlowAction *obj); + +struct RockerOfDpaFlowActionList { + union { + RockerOfDpaFlowAction *value; + uint64_t padding; + }; + RockerOfDpaFlowActionList *next; +}; + +void qapi_free_RockerOfDpaFlowActionList(RockerOfDpaFlowActionList *obj); + +struct RockerOfDpaFlowKey { + uint32_t priority; + uint32_t tbl_id; + bool has_in_pport; + uint32_t in_pport; + bool has_tunnel_id; + uint32_t tunnel_id; + bool has_vlan_id; + uint16_t vlan_id; + bool has_eth_type; + uint16_t eth_type; + bool has_eth_src; + char *eth_src; + bool has_eth_dst; + char *eth_dst; + bool has_ip_proto; + uint8_t ip_proto; + bool has_ip_tos; + uint8_t ip_tos; + bool has_ip_dst; + char *ip_dst; +}; + +void qapi_free_RockerOfDpaFlowKey(RockerOfDpaFlowKey *obj); + +struct RockerOfDpaFlowKeyList { + union { + RockerOfDpaFlowKey *value; + uint64_t padding; + }; + RockerOfDpaFlowKeyList *next; +}; + +void qapi_free_RockerOfDpaFlowKeyList(RockerOfDpaFlowKeyList *obj); + +struct RockerOfDpaFlowList { + union { + RockerOfDpaFlow *value; + uint64_t padding; + }; + RockerOfDpaFlowList *next; +}; + +void qapi_free_RockerOfDpaFlowList(RockerOfDpaFlowList *obj); + +struct RockerOfDpaFlowMask { + bool has_in_pport; + uint32_t in_pport; + bool has_tunnel_id; + uint32_t tunnel_id; + bool has_vlan_id; + uint16_t vlan_id; + bool has_eth_src; + char *eth_src; + bool has_eth_dst; + char *eth_dst; + bool has_ip_proto; + uint8_t ip_proto; + bool has_ip_tos; + uint8_t ip_tos; +}; + +void qapi_free_RockerOfDpaFlowMask(RockerOfDpaFlowMask *obj); + +struct RockerOfDpaFlowMaskList { + union { + RockerOfDpaFlowMask *value; + uint64_t padding; + }; + RockerOfDpaFlowMaskList *next; +}; + +void qapi_free_RockerOfDpaFlowMaskList(RockerOfDpaFlowMaskList *obj); + +struct RockerOfDpaGroup { + uint32_t id; + uint8_t type; + bool has_vlan_id; + uint16_t vlan_id; + bool has_pport; + uint32_t pport; + bool has_index; + uint32_t index; + bool has_out_pport; + uint32_t out_pport; + bool has_group_id; + uint32_t group_id; + bool has_set_vlan_id; + uint16_t set_vlan_id; + bool has_pop_vlan; + uint8_t pop_vlan; + bool has_group_ids; + uint32List *group_ids; + bool has_set_eth_src; + char *set_eth_src; + bool has_set_eth_dst; + char *set_eth_dst; + bool has_ttl_check; + uint8_t ttl_check; +}; + +void qapi_free_RockerOfDpaGroup(RockerOfDpaGroup *obj); + +struct RockerOfDpaGroupList { + union { + RockerOfDpaGroup *value; + uint64_t padding; + }; + RockerOfDpaGroupList *next; +}; + +void qapi_free_RockerOfDpaGroupList(RockerOfDpaGroupList *obj); + +struct RockerPort { + char *name; + bool enabled; + bool link_up; + uint32_t speed; + RockerPortDuplex duplex; + RockerPortAutoneg autoneg; +}; + +void qapi_free_RockerPort(RockerPort *obj); + +struct RockerPortAutonegList { + union { + RockerPortAutoneg value; + uint64_t padding; + }; + RockerPortAutonegList *next; +}; + +void qapi_free_RockerPortAutonegList(RockerPortAutonegList *obj); + +struct RockerPortDuplexList { + union { + RockerPortDuplex value; + uint64_t padding; + }; + RockerPortDuplexList *next; +}; + +void qapi_free_RockerPortDuplexList(RockerPortDuplexList *obj); + +struct RockerPortList { + union { + RockerPort *value; + uint64_t padding; + }; + RockerPortList *next; +}; + +void qapi_free_RockerPortList(RockerPortList *obj); + +struct RockerSwitch { + char *name; + uint64_t id; + uint32_t ports; +}; + +void qapi_free_RockerSwitch(RockerSwitch *obj); + +struct RockerSwitchList { + union { + RockerSwitch *value; + uint64_t padding; + }; + RockerSwitchList *next; +}; + +void qapi_free_RockerSwitchList(RockerSwitchList *obj); + +struct RunStateList { + union { + RunState value; + uint64_t padding; + }; + RunStateList *next; +}; + +void qapi_free_RunStateList(RunStateList *obj); + +struct RxFilterInfo { + char *name; + bool promiscuous; + RxState multicast; + RxState unicast; + RxState vlan; + bool broadcast_allowed; + bool multicast_overflow; + bool unicast_overflow; + char *main_mac; + intList *vlan_table; + strList *unicast_table; + strList *multicast_table; +}; + +void qapi_free_RxFilterInfo(RxFilterInfo *obj); + +struct RxFilterInfoList { + union { + RxFilterInfo *value; + uint64_t padding; + }; + RxFilterInfoList *next; +}; + +void qapi_free_RxFilterInfoList(RxFilterInfoList *obj); + +struct RxStateList { + union { + RxState value; + uint64_t padding; + }; + RxStateList *next; +}; + +void qapi_free_RxStateList(RxStateList *obj); + +struct SchemaInfo { + /* Members inherited from SchemaInfoBase: */ + char *name; + SchemaMetaType meta_type; + /* Own members: */ + union { /* union tag is @meta_type */ + void *data; + SchemaInfoBuiltin *builtin; + SchemaInfoEnum *q_enum; + SchemaInfoArray *array; + SchemaInfoObject *object; + SchemaInfoAlternate *alternate; + SchemaInfoCommand *command; + SchemaInfoEvent *event; + }; +}; + +void qapi_free_SchemaInfo(SchemaInfo *obj); + +struct SchemaInfoAlternate { + SchemaInfoAlternateMemberList *members; +}; + +void qapi_free_SchemaInfoAlternate(SchemaInfoAlternate *obj); + +struct SchemaInfoAlternateList { + union { + SchemaInfoAlternate *value; + uint64_t padding; + }; + SchemaInfoAlternateList *next; +}; + +void qapi_free_SchemaInfoAlternateList(SchemaInfoAlternateList *obj); + +struct SchemaInfoAlternateMember { + char *type; +}; + +void qapi_free_SchemaInfoAlternateMember(SchemaInfoAlternateMember *obj); + +struct SchemaInfoAlternateMemberList { + union { + SchemaInfoAlternateMember *value; + uint64_t padding; + }; + SchemaInfoAlternateMemberList *next; +}; + +void qapi_free_SchemaInfoAlternateMemberList(SchemaInfoAlternateMemberList *obj); + +struct SchemaInfoArray { + char *element_type; +}; + +void qapi_free_SchemaInfoArray(SchemaInfoArray *obj); + +struct SchemaInfoArrayList { + union { + SchemaInfoArray *value; + uint64_t padding; + }; + SchemaInfoArrayList *next; +}; + +void qapi_free_SchemaInfoArrayList(SchemaInfoArrayList *obj); + +struct SchemaInfoBase { + char *name; + SchemaMetaType meta_type; +}; + +void qapi_free_SchemaInfoBase(SchemaInfoBase *obj); + +struct SchemaInfoBaseList { + union { + SchemaInfoBase *value; + uint64_t padding; + }; + SchemaInfoBaseList *next; +}; + +void qapi_free_SchemaInfoBaseList(SchemaInfoBaseList *obj); + +struct SchemaInfoBuiltin { + JSONType json_type; +}; + +void qapi_free_SchemaInfoBuiltin(SchemaInfoBuiltin *obj); + +struct SchemaInfoBuiltinList { + union { + SchemaInfoBuiltin *value; + uint64_t padding; + }; + SchemaInfoBuiltinList *next; +}; + +void qapi_free_SchemaInfoBuiltinList(SchemaInfoBuiltinList *obj); + +struct SchemaInfoCommand { + char *arg_type; + char *ret_type; +}; + +void qapi_free_SchemaInfoCommand(SchemaInfoCommand *obj); + +struct SchemaInfoCommandList { + union { + SchemaInfoCommand *value; + uint64_t padding; + }; + SchemaInfoCommandList *next; +}; + +void qapi_free_SchemaInfoCommandList(SchemaInfoCommandList *obj); + +struct SchemaInfoEnum { + strList *values; +}; + +void qapi_free_SchemaInfoEnum(SchemaInfoEnum *obj); + +struct SchemaInfoEnumList { + union { + SchemaInfoEnum *value; + uint64_t padding; + }; + SchemaInfoEnumList *next; +}; + +void qapi_free_SchemaInfoEnumList(SchemaInfoEnumList *obj); + +struct SchemaInfoEvent { + char *arg_type; +}; + +void qapi_free_SchemaInfoEvent(SchemaInfoEvent *obj); + +struct SchemaInfoEventList { + union { + SchemaInfoEvent *value; + uint64_t padding; + }; + SchemaInfoEventList *next; +}; + +void qapi_free_SchemaInfoEventList(SchemaInfoEventList *obj); + +struct SchemaInfoList { + union { + SchemaInfo *value; + uint64_t padding; + }; + SchemaInfoList *next; +}; + +void qapi_free_SchemaInfoList(SchemaInfoList *obj); + +struct SchemaInfoObject { + SchemaInfoObjectMemberList *members; + bool has_tag; + char *tag; + bool has_variants; + SchemaInfoObjectVariantList *variants; +}; + +void qapi_free_SchemaInfoObject(SchemaInfoObject *obj); + +struct SchemaInfoObjectList { + union { + SchemaInfoObject *value; + uint64_t padding; + }; + SchemaInfoObjectList *next; +}; + +void qapi_free_SchemaInfoObjectList(SchemaInfoObjectList *obj); + +struct SchemaInfoObjectMember { + char *name; + char *type; + bool has_q_default; + QObject *q_default; +}; + +void qapi_free_SchemaInfoObjectMember(SchemaInfoObjectMember *obj); + +struct SchemaInfoObjectMemberList { + union { + SchemaInfoObjectMember *value; + uint64_t padding; + }; + SchemaInfoObjectMemberList *next; +}; + +void qapi_free_SchemaInfoObjectMemberList(SchemaInfoObjectMemberList *obj); + +struct SchemaInfoObjectVariant { + char *q_case; + char *type; +}; + +void qapi_free_SchemaInfoObjectVariant(SchemaInfoObjectVariant *obj); + +struct SchemaInfoObjectVariantList { + union { + SchemaInfoObjectVariant *value; + uint64_t padding; + }; + SchemaInfoObjectVariantList *next; +}; + +void qapi_free_SchemaInfoObjectVariantList(SchemaInfoObjectVariantList *obj); + +struct SchemaMetaTypeList { + union { + SchemaMetaType value; + uint64_t padding; + }; + SchemaMetaTypeList *next; +}; + +void qapi_free_SchemaMetaTypeList(SchemaMetaTypeList *obj); + +struct SnapshotInfo { + char *id; + char *name; + int64_t vm_state_size; + int64_t date_sec; + int64_t date_nsec; + int64_t vm_clock_sec; + int64_t vm_clock_nsec; +}; + +void qapi_free_SnapshotInfo(SnapshotInfo *obj); + +struct SnapshotInfoList { + union { + SnapshotInfo *value; + uint64_t padding; + }; + SnapshotInfoList *next; +}; + +void qapi_free_SnapshotInfoList(SnapshotInfoList *obj); + +struct SocketAddress { + SocketAddressKind kind; + union { /* union tag is @kind */ + void *data; + InetSocketAddress *inet; + UnixSocketAddress *q_unix; + String *fd; + }; +}; + +void qapi_free_SocketAddress(SocketAddress *obj); + +struct SocketAddressList { + union { + SocketAddress *value; + uint64_t padding; + }; + SocketAddressList *next; +}; + +void qapi_free_SocketAddressList(SocketAddressList *obj); + +struct SpiceBasicInfo { + char *host; + char *port; + NetworkAddressFamily family; +}; + +void qapi_free_SpiceBasicInfo(SpiceBasicInfo *obj); + +struct SpiceBasicInfoList { + union { + SpiceBasicInfo *value; + uint64_t padding; + }; + SpiceBasicInfoList *next; +}; + +void qapi_free_SpiceBasicInfoList(SpiceBasicInfoList *obj); + +struct SpiceChannel { + SpiceBasicInfo *base; + int64_t connection_id; + int64_t channel_type; + int64_t channel_id; + bool tls; +}; + +void qapi_free_SpiceChannel(SpiceChannel *obj); + +struct SpiceChannelList { + union { + SpiceChannel *value; + uint64_t padding; + }; + SpiceChannelList *next; +}; + +void qapi_free_SpiceChannelList(SpiceChannelList *obj); + +struct SpiceInfo { + bool enabled; + bool migrated; + bool has_host; + char *host; + bool has_port; + int64_t port; + bool has_tls_port; + int64_t tls_port; + bool has_auth; + char *auth; + bool has_compiled_version; + char *compiled_version; + SpiceQueryMouseMode mouse_mode; + bool has_channels; + SpiceChannelList *channels; +}; + +void qapi_free_SpiceInfo(SpiceInfo *obj); + +struct SpiceInfoList { + union { + SpiceInfo *value; + uint64_t padding; + }; + SpiceInfoList *next; +}; + +void qapi_free_SpiceInfoList(SpiceInfoList *obj); + +struct SpiceQueryMouseModeList { + union { + SpiceQueryMouseMode value; + uint64_t padding; + }; + SpiceQueryMouseModeList *next; +}; + +void qapi_free_SpiceQueryMouseModeList(SpiceQueryMouseModeList *obj); + +struct SpiceServerInfo { + SpiceBasicInfo *base; + bool has_auth; + char *auth; +}; + +void qapi_free_SpiceServerInfo(SpiceServerInfo *obj); + +struct SpiceServerInfoList { + union { + SpiceServerInfo *value; + uint64_t padding; + }; + SpiceServerInfoList *next; +}; + +void qapi_free_SpiceServerInfoList(SpiceServerInfoList *obj); + +struct StatusInfo { + bool running; + bool singlestep; + RunState status; +}; + +void qapi_free_StatusInfo(StatusInfo *obj); + +struct StatusInfoList { + union { + StatusInfo *value; + uint64_t padding; + }; + StatusInfoList *next; +}; + +void qapi_free_StatusInfoList(StatusInfoList *obj); + +struct String { + char *str; +}; + +void qapi_free_String(String *obj); + +struct StringList { + union { + String *value; + uint64_t padding; + }; + StringList *next; +}; + +void qapi_free_StringList(StringList *obj); + +struct TPMInfo { + char *id; + TpmModel model; + TpmTypeOptions *options; +}; + +void qapi_free_TPMInfo(TPMInfo *obj); + +struct TPMInfoList { + union { + TPMInfo *value; + uint64_t padding; + }; + TPMInfoList *next; +}; + +void qapi_free_TPMInfoList(TPMInfoList *obj); + +struct TPMPassthroughOptions { + bool has_path; + char *path; + bool has_cancel_path; + char *cancel_path; +}; + +void qapi_free_TPMPassthroughOptions(TPMPassthroughOptions *obj); + +struct TPMPassthroughOptionsList { + union { + TPMPassthroughOptions *value; + uint64_t padding; + }; + TPMPassthroughOptionsList *next; +}; + +void qapi_free_TPMPassthroughOptionsList(TPMPassthroughOptionsList *obj); + +struct TargetInfo { + char *arch; +}; + +void qapi_free_TargetInfo(TargetInfo *obj); + +struct TargetInfoList { + union { + TargetInfo *value; + uint64_t padding; + }; + TargetInfoList *next; +}; + +void qapi_free_TargetInfoList(TargetInfoList *obj); + +struct TpmModelList { + union { + TpmModel value; + uint64_t padding; + }; + TpmModelList *next; +}; + +void qapi_free_TpmModelList(TpmModelList *obj); + +struct TpmTypeList { + union { + TpmType value; + uint64_t padding; + }; + TpmTypeList *next; +}; + +void qapi_free_TpmTypeList(TpmTypeList *obj); + +struct TpmTypeOptions { + TpmTypeOptionsKind kind; + union { /* union tag is @kind */ + void *data; + TPMPassthroughOptions *passthrough; + }; +}; + +void qapi_free_TpmTypeOptions(TpmTypeOptions *obj); + +struct TpmTypeOptionsList { + union { + TpmTypeOptions *value; + uint64_t padding; + }; + TpmTypeOptionsList *next; +}; + +void qapi_free_TpmTypeOptionsList(TpmTypeOptionsList *obj); + +struct TraceEventInfo { + char *name; + TraceEventState state; +}; + +void qapi_free_TraceEventInfo(TraceEventInfo *obj); + +struct TraceEventInfoList { + union { + TraceEventInfo *value; + uint64_t padding; + }; + TraceEventInfoList *next; +}; + +void qapi_free_TraceEventInfoList(TraceEventInfoList *obj); + +struct TraceEventStateList { + union { + TraceEventState value; + uint64_t padding; + }; + TraceEventStateList *next; +}; + +void qapi_free_TraceEventStateList(TraceEventStateList *obj); + +struct TransactionAction { + TransactionActionKind kind; + union { /* union tag is @kind */ + void *data; + BlockdevSnapshot *blockdev_snapshot_sync; + DriveBackup *drive_backup; + BlockdevBackup *blockdev_backup; + Abort *abort; + BlockdevSnapshotInternal *blockdev_snapshot_internal_sync; + }; +}; + +void qapi_free_TransactionAction(TransactionAction *obj); + +struct TransactionActionList { + union { + TransactionAction *value; + uint64_t padding; + }; + TransactionActionList *next; +}; + +void qapi_free_TransactionActionList(TransactionActionList *obj); + +struct UnixSocketAddress { + char *path; +}; + +void qapi_free_UnixSocketAddress(UnixSocketAddress *obj); + +struct UnixSocketAddressList { + union { + UnixSocketAddress *value; + uint64_t padding; + }; + UnixSocketAddressList *next; +}; + +void qapi_free_UnixSocketAddressList(UnixSocketAddressList *obj); + +struct UuidInfo { + char *UUID; +}; + +void qapi_free_UuidInfo(UuidInfo *obj); + +struct UuidInfoList { + union { + UuidInfo *value; + uint64_t padding; + }; + UuidInfoList *next; +}; + +void qapi_free_UuidInfoList(UuidInfoList *obj); + +struct VersionInfo { + VersionTriple *qemu; + char *package; +}; + +void qapi_free_VersionInfo(VersionInfo *obj); + +struct VersionInfoList { + union { + VersionInfo *value; + uint64_t padding; + }; + VersionInfoList *next; +}; + +void qapi_free_VersionInfoList(VersionInfoList *obj); + +struct VersionTriple { + int64_t major; + int64_t minor; + int64_t micro; +}; + +void qapi_free_VersionTriple(VersionTriple *obj); + +struct VersionTripleList { + union { + VersionTriple *value; + uint64_t padding; + }; + VersionTripleList *next; +}; + +void qapi_free_VersionTripleList(VersionTripleList *obj); + +struct VncBasicInfo { + char *host; + char *service; + NetworkAddressFamily family; + bool websocket; +}; + +void qapi_free_VncBasicInfo(VncBasicInfo *obj); + +struct VncBasicInfoList { + union { + VncBasicInfo *value; + uint64_t padding; + }; + VncBasicInfoList *next; +}; + +void qapi_free_VncBasicInfoList(VncBasicInfoList *obj); + +struct VncClientInfo { + VncBasicInfo *base; + bool has_x509_dname; + char *x509_dname; + bool has_sasl_username; + char *sasl_username; +}; + +void qapi_free_VncClientInfo(VncClientInfo *obj); + +struct VncClientInfoList { + union { + VncClientInfo *value; + uint64_t padding; + }; + VncClientInfoList *next; +}; + +void qapi_free_VncClientInfoList(VncClientInfoList *obj); + +struct VncInfo { + bool enabled; + bool has_host; + char *host; + bool has_family; + NetworkAddressFamily family; + bool has_service; + char *service; + bool has_auth; + char *auth; + bool has_clients; + VncClientInfoList *clients; +}; + +void qapi_free_VncInfo(VncInfo *obj); + +struct VncInfo2 { + char *id; + VncBasicInfoList *server; + VncClientInfoList *clients; + VncPrimaryAuth auth; + bool has_vencrypt; + VncVencryptSubAuth vencrypt; + bool has_display; + char *display; +}; + +void qapi_free_VncInfo2(VncInfo2 *obj); + +struct VncInfo2List { + union { + VncInfo2 *value; + uint64_t padding; + }; + VncInfo2List *next; +}; + +void qapi_free_VncInfo2List(VncInfo2List *obj); + +struct VncInfoList { + union { + VncInfo *value; + uint64_t padding; + }; + VncInfoList *next; +}; + +void qapi_free_VncInfoList(VncInfoList *obj); + +struct VncPrimaryAuthList { + union { + VncPrimaryAuth value; + uint64_t padding; + }; + VncPrimaryAuthList *next; +}; + +void qapi_free_VncPrimaryAuthList(VncPrimaryAuthList *obj); + +struct VncServerInfo { + VncBasicInfo *base; + bool has_auth; + char *auth; +}; + +void qapi_free_VncServerInfo(VncServerInfo *obj); + +struct VncServerInfoList { + union { + VncServerInfo *value; + uint64_t padding; + }; + VncServerInfoList *next; +}; + +void qapi_free_VncServerInfoList(VncServerInfoList *obj); + +struct VncVencryptSubAuthList { + union { + VncVencryptSubAuth value; + uint64_t padding; + }; + VncVencryptSubAuthList *next; +}; + +void qapi_free_VncVencryptSubAuthList(VncVencryptSubAuthList *obj); + +struct WatchdogExpirationActionList { + union { + WatchdogExpirationAction value; + uint64_t padding; + }; + WatchdogExpirationActionList *next; +}; + +void qapi_free_WatchdogExpirationActionList(WatchdogExpirationActionList *obj); + +struct X86CPUFeatureWordInfo { + int64_t cpuid_input_eax; + bool has_cpuid_input_ecx; + int64_t cpuid_input_ecx; + X86CPURegister32 cpuid_register; + int64_t features; +}; + +void qapi_free_X86CPUFeatureWordInfo(X86CPUFeatureWordInfo *obj); + +struct X86CPUFeatureWordInfoList { + union { + X86CPUFeatureWordInfo *value; + uint64_t padding; + }; + X86CPUFeatureWordInfoList *next; +}; + +void qapi_free_X86CPUFeatureWordInfoList(X86CPUFeatureWordInfoList *obj); + +struct X86CPURegister32List { + union { + X86CPURegister32 value; + uint64_t padding; + }; + X86CPURegister32List *next; +}; + +void qapi_free_X86CPURegister32List(X86CPURegister32List *obj); + +struct XBZRLECacheStats { + int64_t cache_size; + int64_t bytes; + int64_t pages; + int64_t cache_miss; + double cache_miss_rate; + int64_t overflow; +}; + +void qapi_free_XBZRLECacheStats(XBZRLECacheStats *obj); + +struct XBZRLECacheStatsList { + union { + XBZRLECacheStats *value; + uint64_t padding; + }; + XBZRLECacheStatsList *next; +}; + +void qapi_free_XBZRLECacheStatsList(XBZRLECacheStatsList *obj); + +#endif diff --git a/slirp/simh/qemu/qapi/dealloc-visitor.h b/slirp/simh/qemu/qapi/dealloc-visitor.h new file mode 100644 index 00000000..cf4c36d2 --- /dev/null +++ b/slirp/simh/qemu/qapi/dealloc-visitor.h @@ -0,0 +1,26 @@ +/* + * Dealloc Visitor + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Michael Roth + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QAPI_DEALLOC_VISITOR_H +#define QAPI_DEALLOC_VISITOR_H + +#include "qapi/visitor.h" + +typedef struct QapiDeallocVisitor QapiDeallocVisitor; + +QapiDeallocVisitor *qapi_dealloc_visitor_new(void); +void qapi_dealloc_visitor_cleanup(QapiDeallocVisitor *d); + +Visitor *qapi_dealloc_get_visitor(QapiDeallocVisitor *v); + +#endif diff --git a/slirp/simh/qemu/qapi/error.h b/slirp/simh/qemu/qapi/error.h new file mode 100644 index 00000000..c69dddbb --- /dev/null +++ b/slirp/simh/qemu/qapi/error.h @@ -0,0 +1,220 @@ +/* + * QEMU Error Objects + * + * Copyright IBM, Corp. 2011 + * Copyright (C) 2011-2015 Red Hat, Inc. + * + * Authors: + * Anthony Liguori + * Markus Armbruster + * + * This work is licensed under the terms of the GNU LGPL, version 2. See + * the COPYING.LIB file in the top-level directory. + */ + +/* + * Error reporting system loosely patterned after Glib's GError. + * + * Create an error: + * error_setg(&err, "situation normal, all fouled up"); + * + * Report an error to stderr: + * error_report_err(err); + * This frees the error object. + * + * Report an error somewhere else: + * const char *msg = error_get_pretty(err); + * do with msg what needs to be done... + * error_free(err); + * + * Handle an error without reporting it (just for completeness): + * error_free(err); + * + * Pass an existing error to the caller: + * error_propagate(errp, err); + * where Error **errp is a parameter, by convention the last one. + * + * Create a new error and pass it to the caller: + * error_setg(errp, "situation normal, all fouled up"); + * + * Call a function and receive an error from it: + * Error *err = NULL; + * foo(arg, &err); + * if (err) { + * handle the error... + * } + * + * Call a function ignoring errors: + * foo(arg, NULL); + * + * Call a function aborting on errors: + * foo(arg, &error_abort); + * + * Call a function treating errors as fatal: + * foo(arg, &error_fatal); + * + * Receive an error and pass it on to the caller: + * Error *err = NULL; + * foo(arg, &err); + * if (err) { + * handle the error... + * error_propagate(errp, err); + * } + * where Error **errp is a parameter, by convention the last one. + * + * Do *not* "optimize" this to + * foo(arg, errp); + * if (*errp) { // WRONG! + * handle the error... + * } + * because errp may be NULL! + * + * But when all you do with the error is pass it on, please use + * foo(arg, errp); + * for readability. + */ + +#ifndef ERROR_H +#define ERROR_H + +#include "qemu/compiler.h" +#include "qapi-types.h" +#include + +/* + * Opaque error object. + */ +typedef struct Error Error; + +/* + * Get @err's human-readable error message. + */ +const char *error_get_pretty(Error *err); + +/* + * Get @err's error class. + * Note: use of error classes other than ERROR_CLASS_GENERIC_ERROR is + * strongly discouraged. + */ +ErrorClass error_get_class(const Error *err); + +/* + * Create a new error object and assign it to *@errp. + * If @errp is NULL, the error is ignored. Don't bother creating one + * then. + * If @errp is &error_abort, print a suitable message and abort(). + * If @errp is &error_fatal, print a suitable message and exit(1). + * If @errp is anything else, *@errp must be NULL. + * The new error's class is ERROR_CLASS_GENERIC_ERROR, and its + * human-readable error message is made from printf-style @fmt, ... + */ +#define error_setg(errp, fmt, ...) \ + error_setg_internal((errp), __FILE__, __LINE__, __func__, \ + (fmt), ## __VA_ARGS__) +void error_setg_internal(Error **errp, + const char *src, int line, const char *func, + const char *fmt, ...) + GCC_FMT_ATTR(5, 6); + +/* + * Just like error_setg(), with @os_error info added to the message. + * If @os_error is non-zero, ": " + strerror(os_error) is appended to + * the human-readable error message. + */ +#define error_setg_errno(errp, os_error, fmt, ...) \ + error_setg_errno_internal((errp), __FILE__, __LINE__, __func__, \ + (os_error), (fmt), ## __VA_ARGS__) +void error_setg_errno_internal(Error **errp, + const char *fname, int line, const char *func, + int os_error, const char *fmt, ...) + GCC_FMT_ATTR(6, 7); + +#ifdef _WIN32 +/* + * Just like error_setg(), with @win32_error info added to the message. + * If @win32_error is non-zero, ": " + g_win32_error_message(win32_err) + * is appended to the human-readable error message. + */ +#define error_setg_win32(errp, win32_err, fmt, ...) \ + error_setg_win32_internal((errp), __FILE__, __LINE__, __func__, \ + (win32_err), (fmt), ## __VA_ARGS__) +void error_setg_win32_internal(Error **errp, + const char *src, int line, const char *func, + int win32_err, const char *fmt, ...) + GCC_FMT_ATTR(6, 7); +#endif + +/* + * Propagate error object (if any) from @local_err to @dst_errp. + * If @local_err is NULL, do nothing (because there's nothing to + * propagate). + * Else, if @dst_errp is NULL, errors are being ignored. Free the + * error object. + * Else, if @dst_errp is &error_abort, print a suitable message and + * abort(). + * Else, if @dst_errp is &error_fatal, print a suitable message and + * exit(1). + * Else, if @dst_errp already contains an error, ignore this one: free + * the error object. + * Else, move the error object from @local_err to *@dst_errp. + * On return, @local_err is invalid. + */ +void error_propagate(Error **dst_errp, Error *local_err); + +/** + * Append a printf-style human-readable explanation to an existing error. + * May be called multiple times, and safe if @errp is NULL. + */ +void error_append_hint(Error **errp, const char *fmt, ...) + GCC_FMT_ATTR(2, 3); + +/* + * Convenience function to report open() failure. + */ +#define error_setg_file_open(errp, os_errno, filename) \ + error_setg_file_open_internal((errp), __FILE__, __LINE__, __func__, \ + (os_errno), (filename)) +void error_setg_file_open_internal(Error **errp, + const char *src, int line, const char *func, + int os_errno, const char *filename); + +/* + * Return an exact copy of @err. + */ +Error *error_copy(const Error *err); + +/* + * Free @err. + * @err may be NULL. + */ +void error_free(Error *err); + +/* + * Convenience function to error_report() and free @err. + */ +void error_report_err(Error *); + +/* + * Just like error_setg(), except you get to specify the error class. + * Note: use of error classes other than ERROR_CLASS_GENERIC_ERROR is + * strongly discouraged. + */ +#define error_set(errp, err_class, fmt, ...) \ + error_set_internal((errp), __FILE__, __LINE__, __func__, \ + (err_class), (fmt), ## __VA_ARGS__) +void error_set_internal(Error **errp, + const char *src, int line, const char *func, + ErrorClass err_class, const char *fmt, ...) + GCC_FMT_ATTR(6, 7); + +/* + * Pass to error_setg() & friends to abort() on error. + */ +extern Error *error_abort; + +/* + * Pass to error_setg() & friends to exit(1) on error. + */ +extern Error *error_fatal; + +#endif diff --git a/slirp/simh/qemu/qapi/opts-visitor.h b/slirp/simh/qemu/qapi/opts-visitor.h new file mode 100644 index 00000000..fd48c14e --- /dev/null +++ b/slirp/simh/qemu/qapi/opts-visitor.h @@ -0,0 +1,37 @@ +/* + * Options Visitor + * + * Copyright Red Hat, Inc. 2012 + * + * Author: Laszlo Ersek + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef OPTS_VISITOR_H +#define OPTS_VISITOR_H + +#include "qapi/visitor.h" +#include "qemu/option.h" + +/* Inclusive upper bound on the size of any flattened range. This is a safety + * (= anti-annoyance) measure; wrong ranges should not cause long startup + * delays nor exhaust virtual memory. + */ +#define OPTS_VISITOR_RANGE_MAX 65536 + +typedef struct OptsVisitor OptsVisitor; + +/* Contrarily to qemu-option.c::parse_option_number(), OptsVisitor's "int" + * parser relies on strtoll() instead of strtoull(). Consequences: + * - string representations of negative numbers yield negative values, + * - values below INT64_MIN or LLONG_MIN are rejected, + * - values above INT64_MAX or LLONG_MAX are rejected. + */ +OptsVisitor *opts_visitor_new(const QemuOpts *opts); +void opts_visitor_cleanup(OptsVisitor *nv); +Visitor *opts_get_visitor(OptsVisitor *nv); + +#endif diff --git a/slirp/simh/qemu/qapi/qmp-event.h b/slirp/simh/qemu/qapi/qmp-event.h new file mode 100644 index 00000000..8a8ffb57 --- /dev/null +++ b/slirp/simh/qemu/qapi/qmp-event.h @@ -0,0 +1,27 @@ +/* + * QMP Event related + * + * Copyright (c) 2014 Wenchao Xia + * + * Authors: + * Wenchao Xia + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QMP_EVENT_H +#define QMP_EVENT_H + +#include "qapi/error.h" +#include "qapi/qmp/qdict.h" + +typedef void (*QMPEventFuncEmit)(unsigned event, QDict *dict, Error **errp); + +void qmp_event_set_func_emit(QMPEventFuncEmit emit); + +QMPEventFuncEmit qmp_event_get_func_emit(void); + +QDict *qmp_event_build_dict(const char *event_name); +#endif diff --git a/slirp/simh/qemu/qapi/qmp-input-visitor.h b/slirp/simh/qemu/qapi/qmp-input-visitor.h new file mode 100644 index 00000000..3ed499cc --- /dev/null +++ b/slirp/simh/qemu/qapi/qmp-input-visitor.h @@ -0,0 +1,29 @@ +/* + * Input Visitor + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QMP_INPUT_VISITOR_H +#define QMP_INPUT_VISITOR_H + +#include "qapi/visitor.h" +#include "qapi/qmp/qobject.h" + +typedef struct QmpInputVisitor QmpInputVisitor; + +QmpInputVisitor *qmp_input_visitor_new(QObject *obj); +QmpInputVisitor *qmp_input_visitor_new_strict(QObject *obj); + +void qmp_input_visitor_cleanup(QmpInputVisitor *v); + +Visitor *qmp_input_get_visitor(QmpInputVisitor *v); + +#endif diff --git a/slirp/simh/qemu/qapi/qmp-output-visitor.h b/slirp/simh/qemu/qapi/qmp-output-visitor.h new file mode 100644 index 00000000..22667706 --- /dev/null +++ b/slirp/simh/qemu/qapi/qmp-output-visitor.h @@ -0,0 +1,28 @@ +/* + * Output Visitor + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QMP_OUTPUT_VISITOR_H +#define QMP_OUTPUT_VISITOR_H + +#include "qapi/visitor.h" +#include "qapi/qmp/qobject.h" + +typedef struct QmpOutputVisitor QmpOutputVisitor; + +QmpOutputVisitor *qmp_output_visitor_new(void); +void qmp_output_visitor_cleanup(QmpOutputVisitor *v); + +QObject *qmp_output_get_qobject(QmpOutputVisitor *v); +Visitor *qmp_output_get_visitor(QmpOutputVisitor *v); + +#endif diff --git a/slirp/simh/qemu/qapi/qmp/dispatch.h b/slirp/simh/qemu/qapi/qmp/dispatch.h new file mode 100644 index 00000000..e389697f --- /dev/null +++ b/slirp/simh/qemu/qapi/qmp/dispatch.h @@ -0,0 +1,58 @@ +/* + * Core Definitions for QAPI/QMP Dispatch + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QMP_CORE_H +#define QMP_CORE_H + +#include "qapi/qmp/qobject.h" +#include "qapi/qmp/qdict.h" +#include "qapi/error.h" + +typedef void (QmpCommandFunc)(QDict *, QObject **, Error **); + +typedef enum QmpCommandType +{ + QCT_NORMAL, +} QmpCommandType; + +typedef enum QmpCommandOptions +{ + QCO_NO_OPTIONS = 0x0, + QCO_NO_SUCCESS_RESP = 0x1, +} QmpCommandOptions; + +typedef struct QmpCommand +{ + const char *name; + QmpCommandType type; + QmpCommandFunc *fn; + QmpCommandOptions options; + QTAILQ_ENTRY(QmpCommand) node; + bool enabled; +} QmpCommand; + +void qmp_register_command(const char *name, QmpCommandFunc *fn, + QmpCommandOptions options); +QmpCommand *qmp_find_command(const char *name); +QObject *qmp_dispatch(QObject *request); +void qmp_disable_command(const char *name); +void qmp_enable_command(const char *name); +bool qmp_command_is_enabled(const QmpCommand *cmd); +const char *qmp_command_name(const QmpCommand *cmd); +bool qmp_has_success_response(const QmpCommand *cmd); +QObject *qmp_build_error_object(Error *err); +typedef void (*qmp_cmd_callback_fn)(QmpCommand *cmd, void *opaque); +void qmp_for_each_command(qmp_cmd_callback_fn fn, void *opaque); + +#endif + diff --git a/slirp/simh/qemu/qapi/qmp/json-lexer.h b/slirp/simh/qemu/qapi/qmp/json-lexer.h new file mode 100644 index 00000000..cdff0460 --- /dev/null +++ b/slirp/simh/qemu/qapi/qmp/json-lexer.h @@ -0,0 +1,51 @@ +/* + * JSON lexer + * + * Copyright IBM, Corp. 2009 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QEMU_JSON_LEXER_H +#define QEMU_JSON_LEXER_H + +#include "qapi/qmp/qstring.h" +#include "qapi/qmp/qlist.h" + +typedef enum json_token_type { + JSON_OPERATOR = 100, + JSON_INTEGER, + JSON_FLOAT, + JSON_KEYWORD, + JSON_STRING, + JSON_ESCAPE, + JSON_SKIP, + JSON_ERROR, +} JSONTokenType; + +typedef struct JSONLexer JSONLexer; + +typedef void (JSONLexerEmitter)(JSONLexer *, QString *, JSONTokenType, int x, int y); + +struct JSONLexer +{ + JSONLexerEmitter *emit; + int state; + QString *token; + int x, y; +}; + +void json_lexer_init(JSONLexer *lexer, JSONLexerEmitter func); + +int json_lexer_feed(JSONLexer *lexer, const char *buffer, size_t size); + +int json_lexer_flush(JSONLexer *lexer); + +void json_lexer_destroy(JSONLexer *lexer); + +#endif diff --git a/slirp/simh/qemu/qapi/qmp/json-parser.h b/slirp/simh/qemu/qapi/qmp/json-parser.h new file mode 100644 index 00000000..44d88f34 --- /dev/null +++ b/slirp/simh/qemu/qapi/qmp/json-parser.h @@ -0,0 +1,24 @@ +/* + * JSON Parser + * + * Copyright IBM, Corp. 2009 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QEMU_JSON_PARSER_H +#define QEMU_JSON_PARSER_H + +#include "qemu-common.h" +#include "qapi/qmp/qlist.h" +#include "qapi/error.h" + +QObject *json_parser_parse(QList *tokens, va_list *ap); +QObject *json_parser_parse_err(QList *tokens, va_list *ap, Error **errp); + +#endif diff --git a/slirp/simh/qemu/qapi/qmp/json-streamer.h b/slirp/simh/qemu/qapi/qmp/json-streamer.h new file mode 100644 index 00000000..823f7d7f --- /dev/null +++ b/slirp/simh/qemu/qapi/qmp/json-streamer.h @@ -0,0 +1,40 @@ +/* + * JSON streaming support + * + * Copyright IBM, Corp. 2009 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QEMU_JSON_STREAMER_H +#define QEMU_JSON_STREAMER_H + +#include "qapi/qmp/qlist.h" +#include "qapi/qmp/json-lexer.h" + +typedef struct JSONMessageParser +{ + void (*emit)(struct JSONMessageParser *parser, QList *tokens); + JSONLexer lexer; + int brace_count; + int bracket_count; + QList *tokens; + uint64_t token_size; +} JSONMessageParser; + +void json_message_parser_init(JSONMessageParser *parser, + void (*func)(JSONMessageParser *, QList *)); + +int json_message_parser_feed(JSONMessageParser *parser, + const char *buffer, size_t size); + +int json_message_parser_flush(JSONMessageParser *parser); + +void json_message_parser_destroy(JSONMessageParser *parser); + +#endif diff --git a/slirp/simh/qemu/qapi/qmp/qbool.h b/slirp/simh/qemu/qapi/qmp/qbool.h new file mode 100644 index 00000000..4aa6be3b --- /dev/null +++ b/slirp/simh/qemu/qapi/qmp/qbool.h @@ -0,0 +1,29 @@ +/* + * QBool Module + * + * Copyright IBM, Corp. 2009 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QBOOL_H +#define QBOOL_H + +#include +#include "qapi/qmp/qobject.h" + +typedef struct QBool { + QObject_HEAD; + bool value; +} QBool; + +QBool *qbool_from_bool(bool value); +bool qbool_get_bool(const QBool *qb); +QBool *qobject_to_qbool(const QObject *obj); + +#endif /* QBOOL_H */ diff --git a/slirp/simh/qemu/qapi/qmp/qdict.h b/slirp/simh/qemu/qapi/qmp/qdict.h new file mode 100644 index 00000000..a37f4c15 --- /dev/null +++ b/slirp/simh/qemu/qapi/qmp/qdict.h @@ -0,0 +1,80 @@ +/* + * QDict Module + * + * Copyright (C) 2009 Red Hat Inc. + * + * Authors: + * Luiz Capitulino + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#ifndef QDICT_H +#define QDICT_H + +#include "qapi/qmp/qobject.h" +#include "qapi/qmp/qlist.h" +#include "qemu/queue.h" +#include +#include + +#define QDICT_BUCKET_MAX 512 + +typedef struct QDictEntry { + char *key; + QObject *value; + QLIST_ENTRY(QDictEntry) next; +} QDictEntry; + +typedef struct QDict { + QObject_HEAD; + size_t size; + QLIST_HEAD(,QDictEntry) table[QDICT_BUCKET_MAX]; +} QDict; + +/* Object API */ +QDict *qdict_new(void); +const char *qdict_entry_key(const QDictEntry *entry); +QObject *qdict_entry_value(const QDictEntry *entry); +size_t qdict_size(const QDict *qdict); +void qdict_put_obj(QDict *qdict, const char *key, QObject *value); +void qdict_del(QDict *qdict, const char *key); +int qdict_haskey(const QDict *qdict, const char *key); +QObject *qdict_get(const QDict *qdict, const char *key); +QDict *qobject_to_qdict(const QObject *obj); +void qdict_iter(const QDict *qdict, + void (*iter)(const char *key, QObject *obj, void *opaque), + void *opaque); +const QDictEntry *qdict_first(const QDict *qdict); +const QDictEntry *qdict_next(const QDict *qdict, const QDictEntry *entry); + +/* Helper to qdict_put_obj(), accepts any object */ +#define qdict_put(qdict, key, obj) \ + qdict_put_obj(qdict, key, QOBJECT(obj)) + +/* High level helpers */ +double qdict_get_double(const QDict *qdict, const char *key); +int64_t qdict_get_int(const QDict *qdict, const char *key); +bool qdict_get_bool(const QDict *qdict, const char *key); +QList *qdict_get_qlist(const QDict *qdict, const char *key); +QDict *qdict_get_qdict(const QDict *qdict, const char *key); +const char *qdict_get_str(const QDict *qdict, const char *key); +int64_t qdict_get_try_int(const QDict *qdict, const char *key, + int64_t def_value); +bool qdict_get_try_bool(const QDict *qdict, const char *key, bool def_value); +const char *qdict_get_try_str(const QDict *qdict, const char *key); + +void qdict_copy_default(QDict *dst, QDict *src, const char *key); +void qdict_set_default_str(QDict *dst, const char *key, const char *val); + +QDict *qdict_clone_shallow(const QDict *src); +void qdict_flatten(QDict *qdict); + +void qdict_extract_subqdict(QDict *src, QDict **dst, const char *start); +void qdict_array_split(QDict *src, QList **dst); +int qdict_array_entries(QDict *src, const char *subqdict); + +void qdict_join(QDict *dest, QDict *src, bool overwrite); + +#endif /* QDICT_H */ diff --git a/slirp/simh/qemu/qapi/qmp/qerror.h b/slirp/simh/qemu/qapi/qmp/qerror.h new file mode 100644 index 00000000..842b27ae --- /dev/null +++ b/slirp/simh/qemu/qapi/qmp/qerror.h @@ -0,0 +1,109 @@ +/* + * QError Module + * + * Copyright (C) 2009 Red Hat Inc. + * + * Authors: + * Luiz Capitulino + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ +#ifndef QERROR_H +#define QERROR_H + +/* + * These macros will go away, please don't use in new code, and do not + * add new ones! + */ +#define QERR_BASE_NOT_FOUND \ + "Base '%s' not found" + +#define QERR_BLOCK_JOB_NOT_READY \ + "The active block job for device '%s' cannot be completed" + +#define QERR_BUS_NO_HOTPLUG \ + "Bus '%s' does not support hotplugging" + +#define QERR_DEVICE_HAS_NO_MEDIUM \ + "Device '%s' has no medium" + +#define QERR_DEVICE_INIT_FAILED \ + "Device '%s' could not be initialized" + +#define QERR_DEVICE_IN_USE \ + "Device '%s' is in use" + +#define QERR_DEVICE_NO_HOTPLUG \ + "Device '%s' does not support hotplugging" + +#define QERR_FD_NOT_FOUND \ + "File descriptor named '%s' not found" + +#define QERR_FD_NOT_SUPPLIED \ + "No file descriptor supplied via SCM_RIGHTS" + +#define QERR_FEATURE_DISABLED \ + "The feature '%s' is not enabled" + +#define QERR_INVALID_BLOCK_FORMAT \ + "Invalid block format '%s'" + +#define QERR_INVALID_PARAMETER \ + "Invalid parameter '%s'" + +#define QERR_INVALID_PARAMETER_TYPE \ + "Invalid parameter type for '%s', expected: %s" + +#define QERR_INVALID_PARAMETER_VALUE \ + "Parameter '%s' expects %s" + +#define QERR_INVALID_PASSWORD \ + "Password incorrect" + +#define QERR_IO_ERROR \ + "An IO error has occurred" + +#define QERR_JSON_PARSING \ + "Invalid JSON syntax" + +#define QERR_MIGRATION_ACTIVE \ + "There's a migration process in progress" + +#define QERR_MISSING_PARAMETER \ + "Parameter '%s' is missing" + +#define QERR_PERMISSION_DENIED \ + "Insufficient permission to perform this operation" + +#define QERR_PROPERTY_VALUE_BAD \ + "Property '%s.%s' doesn't take value '%s'" + +#define QERR_PROPERTY_VALUE_OUT_OF_RANGE \ + "Property %s.%s doesn't take value %" PRId64 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" + +#define QERR_QGA_COMMAND_FAILED \ + "Guest agent command failed, error was '%s'" + +#define QERR_QMP_BAD_INPUT_OBJECT \ + "Expected '%s' in QMP input" + +#define QERR_QMP_BAD_INPUT_OBJECT_MEMBER \ + "QMP input object member '%s' expects '%s'" + +#define QERR_QMP_EXTRA_MEMBER \ + "QMP input object member '%s' is unexpected" + +#define QERR_SET_PASSWD_FAILED \ + "Could not set password" + +#define QERR_UNDEFINED_ERROR \ + "An undefined error has occurred" + +#define QERR_UNKNOWN_BLOCK_FORMAT_FEATURE \ + "'%s' uses a %s feature which is not supported by this qemu version: %s" + +#define QERR_UNSUPPORTED \ + "this feature or command is not currently supported" + +#endif /* QERROR_H */ diff --git a/slirp/simh/qemu/qapi/qmp/qfloat.h b/slirp/simh/qemu/qapi/qmp/qfloat.h new file mode 100644 index 00000000..a8658443 --- /dev/null +++ b/slirp/simh/qemu/qapi/qmp/qfloat.h @@ -0,0 +1,29 @@ +/* + * QFloat Module + * + * Copyright IBM, Corp. 2009 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QFLOAT_H +#define QFLOAT_H + +#include +#include "qapi/qmp/qobject.h" + +typedef struct QFloat { + QObject_HEAD; + double value; +} QFloat; + +QFloat *qfloat_from_double(double value); +double qfloat_get_double(const QFloat *qi); +QFloat *qobject_to_qfloat(const QObject *obj); + +#endif /* QFLOAT_H */ diff --git a/slirp/simh/qemu/qapi/qmp/qint.h b/slirp/simh/qemu/qapi/qmp/qint.h new file mode 100644 index 00000000..48a41b0f --- /dev/null +++ b/slirp/simh/qemu/qapi/qmp/qint.h @@ -0,0 +1,28 @@ +/* + * QInt Module + * + * Copyright (C) 2009 Red Hat Inc. + * + * Authors: + * Luiz Capitulino + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#ifndef QINT_H +#define QINT_H + +#include +#include "qapi/qmp/qobject.h" + +typedef struct QInt { + QObject_HEAD; + int64_t value; +} QInt; + +QInt *qint_from_int(int64_t value); +int64_t qint_get_int(const QInt *qi); +QInt *qobject_to_qint(const QObject *obj); + +#endif /* QINT_H */ diff --git a/slirp/simh/qemu/qapi/qmp/qjson.h b/slirp/simh/qemu/qapi/qmp/qjson.h new file mode 100644 index 00000000..e0a05410 --- /dev/null +++ b/slirp/simh/qemu/qapi/qmp/qjson.h @@ -0,0 +1,29 @@ +/* + * QObject JSON integration + * + * Copyright IBM, Corp. 2009 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QJSON_H +#define QJSON_H + +#include +#include "qemu/compiler.h" +#include "qapi/qmp/qobject.h" +#include "qapi/qmp/qstring.h" + +QObject *qobject_from_json(const char *string); +QObject *qobject_from_jsonf(const char *string, ...); +QObject *qobject_from_jsonv(const char *string, va_list *ap); + +QString *qobject_to_json(const QObject *obj); +QString *qobject_to_json_pretty(const QObject *obj); + +#endif /* QJSON_H */ diff --git a/slirp/simh/qemu/qapi/qmp/qlist.h b/slirp/simh/qemu/qapi/qmp/qlist.h new file mode 100644 index 00000000..6cc4831d --- /dev/null +++ b/slirp/simh/qemu/qapi/qmp/qlist.h @@ -0,0 +1,63 @@ +/* + * QList Module + * + * Copyright (C) 2009 Red Hat Inc. + * + * Authors: + * Luiz Capitulino + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#ifndef QLIST_H +#define QLIST_H + +#include "qapi/qmp/qobject.h" +#include "qemu/queue.h" + +typedef struct QListEntry { + QObject *value; + QTAILQ_ENTRY(QListEntry) next; +} QListEntry; + +typedef struct QList { + QObject_HEAD; + QTAILQ_HEAD(,QListEntry) head; +} QList; + +#define qlist_append(qlist, obj) \ + qlist_append_obj(qlist, QOBJECT(obj)) + +#define QLIST_FOREACH_ENTRY(qlist, var) \ + for ((var) = ((qlist)->head.tqh_first); \ + (var); \ + (var) = ((var)->next.tqe_next)) + +static inline QObject *qlist_entry_obj(const QListEntry *entry) +{ + return entry->value; +} + +QList *qlist_new(void); +QList *qlist_copy(QList *src); +void qlist_append_obj(QList *qlist, QObject *obj); +void qlist_iter(const QList *qlist, + void (*iter)(QObject *obj, void *opaque), void *opaque); +QObject *qlist_pop(QList *qlist); +QObject *qlist_peek(QList *qlist); +int qlist_empty(const QList *qlist); +size_t qlist_size(const QList *qlist); +QList *qobject_to_qlist(const QObject *obj); + +static inline const QListEntry *qlist_first(const QList *qlist) +{ + return QTAILQ_FIRST(&qlist->head); +} + +static inline const QListEntry *qlist_next(const QListEntry *entry) +{ + return QTAILQ_NEXT(entry, next); +} + +#endif /* QLIST_H */ diff --git a/slirp/simh/qemu/qapi/qmp/qobject.h b/slirp/simh/qemu/qapi/qmp/qobject.h new file mode 100644 index 00000000..260d2ed3 --- /dev/null +++ b/slirp/simh/qemu/qapi/qmp/qobject.h @@ -0,0 +1,121 @@ +/* + * QEMU Object Model. + * + * Based on ideas by Avi Kivity + * + * Copyright (C) 2009, 2015 Red Hat Inc. + * + * Authors: + * Luiz Capitulino + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + * QObject Reference Counts Terminology + * ------------------------------------ + * + * - Returning references: A function that returns an object may + * return it as either a weak or a strong reference. If the reference + * is strong, you are responsible for calling QDECREF() on the reference + * when you are done. + * + * If the reference is weak, the owner of the reference may free it at + * any time in the future. Before storing the reference anywhere, you + * should call QINCREF() to make the reference strong. + * + * - Transferring ownership: when you transfer ownership of a reference + * by calling a function, you are no longer responsible for calling + * QDECREF() when the reference is no longer needed. In other words, + * when the function returns you must behave as if the reference to the + * passed object was weak. + */ +#ifndef QOBJECT_H +#define QOBJECT_H + +#include +#include + +typedef enum { + QTYPE_NONE, /* sentinel value, no QObject has this type code */ + QTYPE_QNULL, + QTYPE_QINT, + QTYPE_QSTRING, + QTYPE_QDICT, + QTYPE_QLIST, + QTYPE_QFLOAT, + QTYPE_QBOOL, + QTYPE_MAX, +} qtype_code; + +struct QObject; + +typedef struct QType { + qtype_code code; + void (*destroy)(struct QObject *); +} QType; + +typedef struct QObject { + const QType *type; + size_t refcnt; +} QObject; + +/* Objects definitions must include this */ +#define QObject_HEAD \ + QObject base + +/* Get the 'base' part of an object */ +#define QOBJECT(obj) (&(obj)->base) + +/* High-level interface for qobject_incref() */ +#define QINCREF(obj) \ + qobject_incref(QOBJECT(obj)) + +/* High-level interface for qobject_decref() */ +#define QDECREF(obj) \ + qobject_decref(obj ? QOBJECT(obj) : NULL) + +/* Initialize an object to default values */ +#define QOBJECT_INIT(obj, qtype_type) \ + obj->base.refcnt = 1; \ + obj->base.type = qtype_type + +/** + * qobject_incref(): Increment QObject's reference count + */ +static inline void qobject_incref(QObject *obj) +{ + if (obj) + obj->refcnt++; +} + +/** + * qobject_decref(): Decrement QObject's reference count, deallocate + * when it reaches zero + */ +static inline void qobject_decref(QObject *obj) +{ + if (obj && --obj->refcnt == 0) { + assert(obj->type != NULL); + assert(obj->type->destroy != NULL); + obj->type->destroy(obj); + } +} + +/** + * qobject_type(): Return the QObject's type + */ +static inline qtype_code qobject_type(const QObject *obj) +{ + assert(obj->type != NULL); + return obj->type->code; +} + +extern QObject qnull_; + +static inline QObject *qnull(void) +{ + qobject_incref(&qnull_); + return &qnull_; +} + +#endif /* QOBJECT_H */ diff --git a/slirp/simh/qemu/qapi/qmp/qstring.h b/slirp/simh/qemu/qapi/qmp/qstring.h new file mode 100644 index 00000000..1bc36661 --- /dev/null +++ b/slirp/simh/qemu/qapi/qmp/qstring.h @@ -0,0 +1,36 @@ +/* + * QString Module + * + * Copyright (C) 2009 Red Hat Inc. + * + * Authors: + * Luiz Capitulino + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#ifndef QSTRING_H +#define QSTRING_H + +#include +#include "qapi/qmp/qobject.h" + +typedef struct QString { + QObject_HEAD; + char *string; + size_t length; + size_t capacity; +} QString; + +QString *qstring_new(void); +QString *qstring_from_str(const char *str); +QString *qstring_from_substr(const char *str, int start, int end); +size_t qstring_get_length(const QString *qstring); +const char *qstring_get_str(const QString *qstring); +void qstring_append_int(QString *qstring, int64_t value); +void qstring_append(QString *qstring, const char *str); +void qstring_append_chr(QString *qstring, int c); +QString *qobject_to_qstring(const QObject *obj); + +#endif /* QSTRING_H */ diff --git a/slirp/simh/qemu/qapi/qmp/types.h b/slirp/simh/qemu/qapi/qmp/types.h new file mode 100644 index 00000000..7782ec5a --- /dev/null +++ b/slirp/simh/qemu/qapi/qmp/types.h @@ -0,0 +1,25 @@ +/* + * Include all QEMU objects. + * + * Copyright (C) 2009 Red Hat Inc. + * + * Authors: + * Luiz Capitulino + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#ifndef QEMU_OBJECTS_H +#define QEMU_OBJECTS_H + +#include "qapi/qmp/qobject.h" +#include "qapi/qmp/qint.h" +#include "qapi/qmp/qfloat.h" +#include "qapi/qmp/qbool.h" +#include "qapi/qmp/qstring.h" +#include "qapi/qmp/qdict.h" +#include "qapi/qmp/qlist.h" +#include "qapi/qmp/qjson.h" + +#endif /* QEMU_OBJECTS_H */ diff --git a/slirp/simh/qemu/qapi/string-input-visitor.h b/slirp/simh/qemu/qapi/string-input-visitor.h new file mode 100644 index 00000000..089243c0 --- /dev/null +++ b/slirp/simh/qemu/qapi/string-input-visitor.h @@ -0,0 +1,25 @@ +/* + * String parsing Visitor + * + * Copyright Red Hat, Inc. 2012 + * + * Author: Paolo Bonzini + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef STRING_INPUT_VISITOR_H +#define STRING_INPUT_VISITOR_H + +#include "qapi/visitor.h" + +typedef struct StringInputVisitor StringInputVisitor; + +StringInputVisitor *string_input_visitor_new(const char *str); +void string_input_visitor_cleanup(StringInputVisitor *v); + +Visitor *string_input_get_visitor(StringInputVisitor *v); + +#endif diff --git a/slirp/simh/qemu/qapi/string-output-visitor.h b/slirp/simh/qemu/qapi/string-output-visitor.h new file mode 100644 index 00000000..d99717f6 --- /dev/null +++ b/slirp/simh/qemu/qapi/string-output-visitor.h @@ -0,0 +1,26 @@ +/* + * String printing Visitor + * + * Copyright Red Hat, Inc. 2012 + * + * Author: Paolo Bonzini + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef STRING_OUTPUT_VISITOR_H +#define STRING_OUTPUT_VISITOR_H + +#include "qapi/visitor.h" + +typedef struct StringOutputVisitor StringOutputVisitor; + +StringOutputVisitor *string_output_visitor_new(bool human); +void string_output_visitor_cleanup(StringOutputVisitor *v); + +char *string_output_get_string(StringOutputVisitor *v); +Visitor *string_output_get_visitor(StringOutputVisitor *v); + +#endif diff --git a/slirp/simh/qemu/qapi/util.h b/slirp/simh/qemu/qapi/util.h new file mode 100644 index 00000000..7ad26c0a --- /dev/null +++ b/slirp/simh/qemu/qapi/util.h @@ -0,0 +1,17 @@ +/* + * QAPI util functions + * + * Copyright Fujitsu, Inc. 2014 + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QAPI_UTIL_H +#define QAPI_UTIL_H + +int qapi_enum_parse(const char * const lookup[], const char *buf, + int max, int def, Error **errp); + +#endif diff --git a/slirp/simh/qemu/qapi/visitor-impl.h b/slirp/simh/qemu/qapi/visitor-impl.h new file mode 100644 index 00000000..8c0ba572 --- /dev/null +++ b/slirp/simh/qemu/qapi/visitor-impl.h @@ -0,0 +1,69 @@ +/* + * Core Definitions for QAPI Visitor implementations + * + * Copyright (C) 2012 Red Hat, Inc. + * + * Author: Paolo Bonizni + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ +#ifndef QAPI_VISITOR_IMPL_H +#define QAPI_VISITOR_IMPL_H + +#include "qapi/error.h" +#include "qapi/visitor.h" + +struct Visitor +{ + /* Must be set */ + void (*start_struct)(Visitor *v, void **obj, const char *kind, + const char *name, size_t size, Error **errp); + void (*end_struct)(Visitor *v, Error **errp); + + void (*start_implicit_struct)(Visitor *v, void **obj, size_t size, + Error **errp); + void (*end_implicit_struct)(Visitor *v, Error **errp); + + void (*start_list)(Visitor *v, const char *name, Error **errp); + GenericList *(*next_list)(Visitor *v, GenericList **list, Error **errp); + void (*end_list)(Visitor *v, Error **errp); + + void (*type_enum)(Visitor *v, int *obj, const char * const strings[], + const char *kind, const char *name, Error **errp); + void (*get_next_type)(Visitor *v, int *kind, const int *qobjects, + const char *name, Error **errp); + + void (*type_int)(Visitor *v, int64_t *obj, const char *name, Error **errp); + void (*type_bool)(Visitor *v, bool *obj, const char *name, Error **errp); + void (*type_str)(Visitor *v, char **obj, const char *name, Error **errp); + void (*type_number)(Visitor *v, double *obj, const char *name, + Error **errp); + void (*type_any)(Visitor *v, QObject **obj, const char *name, + Error **errp); + + /* May be NULL */ + void (*optional)(Visitor *v, bool *present, const char *name, + Error **errp); + + void (*type_uint8)(Visitor *v, uint8_t *obj, const char *name, Error **errp); + void (*type_uint16)(Visitor *v, uint16_t *obj, const char *name, Error **errp); + void (*type_uint32)(Visitor *v, uint32_t *obj, const char *name, Error **errp); + void (*type_uint64)(Visitor *v, uint64_t *obj, const char *name, Error **errp); + void (*type_int8)(Visitor *v, int8_t *obj, const char *name, Error **errp); + void (*type_int16)(Visitor *v, int16_t *obj, const char *name, Error **errp); + void (*type_int32)(Visitor *v, int32_t *obj, const char *name, Error **errp); + void (*type_int64)(Visitor *v, int64_t *obj, const char *name, Error **errp); + /* visit_type_size() falls back to (*type_uint64)() if type_size is unset */ + void (*type_size)(Visitor *v, uint64_t *obj, const char *name, Error **errp); + bool (*start_union)(Visitor *v, bool data_present, Error **errp); + void (*end_union)(Visitor *v, bool data_present, Error **errp); +}; + +void input_type_enum(Visitor *v, int *obj, const char * const strings[], + const char *kind, const char *name, Error **errp); +void output_type_enum(Visitor *v, int *obj, const char * const strings[], + const char *kind, const char *name, Error **errp); + +#endif diff --git a/slirp/simh/qemu/qapi/visitor.h b/slirp/simh/qemu/qapi/visitor.h new file mode 100644 index 00000000..cfc19a61 --- /dev/null +++ b/slirp/simh/qemu/qapi/visitor.h @@ -0,0 +1,65 @@ +/* + * Core Definitions for QAPI Visitor Classes + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ +#ifndef QAPI_VISITOR_CORE_H +#define QAPI_VISITOR_CORE_H + +#include "qemu/typedefs.h" +#include "qapi/qmp/qobject.h" +#include "qapi/error.h" +#include + +typedef struct GenericList +{ + union { + void *value; + uint64_t padding; + }; + struct GenericList *next; +} GenericList; + +void visit_start_handle(Visitor *v, void **obj, const char *kind, + const char *name, Error **errp); +void visit_end_handle(Visitor *v, Error **errp); +void visit_start_struct(Visitor *v, void **obj, const char *kind, + const char *name, size_t size, Error **errp); +void visit_end_struct(Visitor *v, Error **errp); +void visit_start_implicit_struct(Visitor *v, void **obj, size_t size, + Error **errp); +void visit_end_implicit_struct(Visitor *v, Error **errp); +void visit_start_list(Visitor *v, const char *name, Error **errp); +GenericList *visit_next_list(Visitor *v, GenericList **list, Error **errp); +void visit_end_list(Visitor *v, Error **errp); +void visit_optional(Visitor *v, bool *present, const char *name, + Error **errp); +void visit_get_next_type(Visitor *v, int *obj, const int *qtypes, + const char *name, Error **errp); +void visit_type_enum(Visitor *v, int *obj, const char * const strings[], + const char *kind, const char *name, Error **errp); +void visit_type_int(Visitor *v, int64_t *obj, const char *name, Error **errp); +void visit_type_uint8(Visitor *v, uint8_t *obj, const char *name, Error **errp); +void visit_type_uint16(Visitor *v, uint16_t *obj, const char *name, Error **errp); +void visit_type_uint32(Visitor *v, uint32_t *obj, const char *name, Error **errp); +void visit_type_uint64(Visitor *v, uint64_t *obj, const char *name, Error **errp); +void visit_type_int8(Visitor *v, int8_t *obj, const char *name, Error **errp); +void visit_type_int16(Visitor *v, int16_t *obj, const char *name, Error **errp); +void visit_type_int32(Visitor *v, int32_t *obj, const char *name, Error **errp); +void visit_type_int64(Visitor *v, int64_t *obj, const char *name, Error **errp); +void visit_type_size(Visitor *v, uint64_t *obj, const char *name, Error **errp); +void visit_type_bool(Visitor *v, bool *obj, const char *name, Error **errp); +void visit_type_str(Visitor *v, char **obj, const char *name, Error **errp); +void visit_type_number(Visitor *v, double *obj, const char *name, Error **errp); +void visit_type_any(Visitor *v, QObject **obj, const char *name, Error **errp); +bool visit_start_union(Visitor *v, bool data_present, Error **errp); +void visit_end_union(Visitor *v, bool data_present, Error **errp); + +#endif diff --git a/slirp/simh/qemu/qemu-common.h b/slirp/simh/qemu/qemu-common.h new file mode 100644 index 00000000..22b7bb85 --- /dev/null +++ b/slirp/simh/qemu/qemu-common.h @@ -0,0 +1,482 @@ + +/* Common header file that is included by all of QEMU. + * + * This file is supposed to be included only by .c files. No header file should + * depend on qemu-common.h, as this would easily lead to circular header + * dependencies. + * + * If a header file uses a definition from qemu-common.h, that definition + * must be moved to a separate header file, and the header that uses it + * must include that header. + */ +#ifndef QEMU_COMMON_H +#define QEMU_COMMON_H + +#include "qemu/osdep.h" +#include "qemu/typedefs.h" +#include "qemu/fprintf-fn.h" + +#if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__) || defined(__ia64__) +#define WORDS_ALIGNED +#endif + +#define TFR(expr) do { if ((expr) != -1) break; } while (errno == EINTR) + +#include "glib.h" +#include "config-host.h" +#include "qemu/option.h" +#include "qemu/host-utils.h" + +/* HOST_LONG_BITS is the size of a native pointer in bits. */ +#if UINTPTR_MAX == UINT32_MAX +# define HOST_LONG_BITS 32 +#elif UINTPTR_MAX == UINT64_MAX +# define HOST_LONG_BITS 64 +#else +# error Unknown pointer size +#endif + +#define KiB 1024 +#define MiB (KiB * KiB) + +/* Trace unassigned memory or i/o accesses. */ +extern bool trace_unassigned; + +void cpu_ticks_init(void); + +/* icount */ +void configure_icount(QemuOpts *opts, Error **errp); +extern int use_icount; +extern int icount_align_option; +/* drift information for info jit command */ +extern int64_t max_delay; +extern int64_t max_advance; +void dump_drift_info(FILE *f, fprintf_function cpu_fprintf); + +#include "qemu/bswap.h" + +/* FIXME: Remove NEED_CPU_H. */ +#ifdef NEED_CPU_H +#include "cpu.h" +#endif /* !defined(NEED_CPU_H) */ + +/* main function, renamed */ +#if defined(CONFIG_COCOA) +int qemu_main(int argc, char **argv, char **envp); +#endif + +void qemu_get_timedate(struct tm *tm, int offset); +int qemu_timedate_diff(struct tm *tm); + +/** + * is_help_option: + * @s: string to test + * + * Check whether @s is one of the standard strings which indicate + * that the user is asking for a list of the valid values for a + * command option like -cpu or -M. The current accepted strings + * are 'help' and '?'. '?' is deprecated (it is a shell wildcard + * which makes it annoying to use in a reliable way) but provided + * for backwards compatibility. + * + * Returns: true if @s is a request for a list. + */ +static inline bool is_help_option(const char *s) +{ + return !strcmp(s, "?") || !strcmp(s, "help"); +} + +/* util/cutils.c */ +/** + * pstrcpy: + * @buf: buffer to copy string into + * @buf_size: size of @buf in bytes + * @str: string to copy + * + * Copy @str into @buf, including the trailing NUL, but do not + * write more than @buf_size bytes. The resulting buffer is + * always NUL terminated (even if the source string was too long). + * If @buf_size is zero or negative then no bytes are copied. + * + * This function is similar to strncpy(), but avoids two of that + * function's problems: + * * if @str fits in the buffer, pstrcpy() does not zero-fill the + * remaining space at the end of @buf + * * if @str is too long, pstrcpy() will copy the first @buf_size-1 + * bytes and then add a NUL + */ +void pstrcpy(char *buf, int buf_size, const char *str); +/** + * strpadcpy: + * @buf: buffer to copy string into + * @buf_size: size of @buf in bytes + * @str: string to copy + * @pad: character to pad the remainder of @buf with + * + * Copy @str into @buf (but *not* its trailing NUL!), and then pad the + * rest of the buffer with the @pad character. If @str is too large + * for the buffer then it is truncated, so that @buf contains the + * first @buf_size characters of @str, with no terminator. + */ +void strpadcpy(char *buf, int buf_size, const char *str, char pad); +/** + * pstrcat: + * @buf: buffer containing existing string + * @buf_size: size of @buf in bytes + * @s: string to concatenate to @buf + * + * Append a copy of @s to the string already in @buf, but do not + * allow the buffer to overflow. If the existing contents of @buf + * plus @str would total more than @buf_size bytes, then write + * as much of @str as will fit followed by a NUL terminator. + * + * @buf must already contain a NUL-terminated string, or the + * behaviour is undefined. + * + * Returns: @buf. + */ +char *pstrcat(char *buf, int buf_size, const char *s); +/** + * strstart: + * @str: string to test + * @val: prefix string to look for + * @ptr: NULL, or pointer to be written to indicate start of + * the remainder of the string + * + * Test whether @str starts with the prefix @val. + * If it does (including the degenerate case where @str and @val + * are equal) then return true. If @ptr is not NULL then a + * pointer to the first character following the prefix is written + * to it. If @val is not a prefix of @str then return false (and + * @ptr is not written to). + * + * Returns: true if @str starts with prefix @val, false otherwise. + */ +int strstart(const char *str, const char *val, const char **ptr); +/** + * stristart: + * @str: string to test + * @val: prefix string to look for + * @ptr: NULL, or pointer to be written to indicate start of + * the remainder of the string + * + * Test whether @str starts with the case-insensitive prefix @val. + * This function behaves identically to strstart(), except that the + * comparison is made after calling qemu_toupper() on each pair of + * characters. + * + * Returns: true if @str starts with case-insensitive prefix @val, + * false otherwise. + */ +int stristart(const char *str, const char *val, const char **ptr); +/** + * qemu_strnlen: + * @s: string + * @max_len: maximum number of bytes in @s to scan + * + * Return the length of the string @s, like strlen(), but do not + * examine more than @max_len bytes of the memory pointed to by @s. + * If no NUL terminator is found within @max_len bytes, then return + * @max_len instead. + * + * This function has the same behaviour as the POSIX strnlen() + * function. + * + * Returns: length of @s in bytes, or @max_len, whichever is smaller. + */ +int qemu_strnlen(const char *s, int max_len); +/** + * qemu_strsep: + * @input: pointer to string to parse + * @delim: string containing delimiter characters to search for + * + * Locate the first occurrence of any character in @delim within + * the string referenced by @input, and replace it with a NUL. + * The location of the next character after the delimiter character + * is stored into @input. + * If the end of the string was reached without finding a delimiter + * character, then NULL is stored into @input. + * If @input points to a NULL pointer on entry, return NULL. + * The return value is always the original value of *@input (and + * so now points to a NUL-terminated string corresponding to the + * part of the input up to the first delimiter). + * + * This function has the same behaviour as the BSD strsep() function. + * + * Returns: the pointer originally in @input. + */ +char *qemu_strsep(char **input, const char *delim); +time_t mktimegm(struct tm *tm); +int qemu_fdatasync(int fd); +int fcntl_setfl(int fd, int flag); +int qemu_parse_fd(const char *param); +int qemu_strtol(const char *nptr, const char **endptr, int base, + long *result); +int qemu_strtoul(const char *nptr, const char **endptr, int base, + unsigned long *result); +int qemu_strtoll(const char *nptr, const char **endptr, int base, + int64_t *result); +int qemu_strtoull(const char *nptr, const char **endptr, int base, + uint64_t *result); + +int parse_uint(const char *s, unsigned long long *value, char **endptr, + int base); +int parse_uint_full(const char *s, unsigned long long *value, int base); + +/* + * strtosz() suffixes used to specify the default treatment of an + * argument passed to strtosz() without an explicit suffix. + * These should be defined using upper case characters in the range + * A-Z, as strtosz() will use qemu_toupper() on the given argument + * prior to comparison. + */ +#define STRTOSZ_DEFSUFFIX_EB 'E' +#define STRTOSZ_DEFSUFFIX_PB 'P' +#define STRTOSZ_DEFSUFFIX_TB 'T' +#define STRTOSZ_DEFSUFFIX_GB 'G' +#define STRTOSZ_DEFSUFFIX_MB 'M' +#define STRTOSZ_DEFSUFFIX_KB 'K' +#define STRTOSZ_DEFSUFFIX_B 'B' +int64_t strtosz(const char *nptr, char **end); +int64_t strtosz_suffix(const char *nptr, char **end, const char default_suffix); +int64_t strtosz_suffix_unit(const char *nptr, char **end, + const char default_suffix, int64_t unit); +#define K_BYTE (1ULL << 10) +#define M_BYTE (1ULL << 20) +#define G_BYTE (1ULL << 30) +#define T_BYTE (1ULL << 40) +#define P_BYTE (1ULL << 50) +#define E_BYTE (1ULL << 60) + +/* used to print char* safely */ +#define STR_OR_NULL(str) ((str) ? (str) : "null") + +/* id.c */ +bool id_wellformed(const char *id); + +/* path.c */ +void init_paths(const char *prefix); +const char *path(const char *pathname); + +#define qemu_isalnum(c) isalnum((unsigned char)(c)) +#define qemu_isalpha(c) isalpha((unsigned char)(c)) +#define qemu_iscntrl(c) iscntrl((unsigned char)(c)) +#define qemu_isdigit(c) isdigit((unsigned char)(c)) +#define qemu_isgraph(c) isgraph((unsigned char)(c)) +#define qemu_islower(c) islower((unsigned char)(c)) +#define qemu_isprint(c) isprint((unsigned char)(c)) +#define qemu_ispunct(c) ispunct((unsigned char)(c)) +#define qemu_isspace(c) isspace((unsigned char)(c)) +#define qemu_isupper(c) isupper((unsigned char)(c)) +#define qemu_isxdigit(c) isxdigit((unsigned char)(c)) +#define qemu_tolower(c) tolower((unsigned char)(c)) +#define qemu_toupper(c) toupper((unsigned char)(c)) +#define qemu_isascii(c) isascii((unsigned char)(c)) +#define qemu_toascii(c) toascii((unsigned char)(c)) + +void *qemu_oom_check(void *ptr); + +ssize_t qemu_write_full(int fd, const void *buf, size_t count) + QEMU_WARN_UNUSED_RESULT; + +#ifndef _WIN32 +int qemu_pipe(int pipefd[2]); +/* like openpty() but also makes it raw; return master fd */ +int qemu_openpty_raw(int *aslave, char *pty_name); +#endif + +/* Error handling. */ + +void QEMU_NORETURN hw_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2); + +struct ParallelIOArg { + void *buffer; + int count; +}; + +typedef int (*DMA_transfer_handler) (void *opaque, int nchan, int pos, int size); + +typedef uint64_t pcibus_t; + +typedef struct PCIHostDeviceAddress { + unsigned int domain; + unsigned int bus; + unsigned int slot; + unsigned int function; +} PCIHostDeviceAddress; + +void tcg_exec_init(uintptr_t tb_size); +bool tcg_enabled(void); + +void cpu_exec_init_all(void); + +/* CPU save/load. */ +#ifdef CPU_SAVE_VERSION +void cpu_save(QEMUFile *f, void *opaque); +int cpu_load(QEMUFile *f, void *opaque, int version_id); +#endif + +/* Unblock cpu */ +void qemu_cpu_kick_self(void); + +/* work queue */ +struct qemu_work_item { + struct qemu_work_item *next; + void (*func)(void *data); + void *data; + int done; + bool free; +}; + + +/** + * Sends a (part of) iovec down a socket, yielding when the socket is full, or + * Receives data into a (part of) iovec from a socket, + * yielding when there is no data in the socket. + * The same interface as qemu_sendv_recvv(), with added yielding. + * XXX should mark these as coroutine_fn + */ +ssize_t qemu_co_sendv_recvv(int sockfd, struct iovec *iov, unsigned iov_cnt, + size_t offset, size_t bytes, bool do_send); +#define qemu_co_recvv(sockfd, iov, iov_cnt, offset, bytes) \ + qemu_co_sendv_recvv(sockfd, iov, iov_cnt, offset, bytes, false) +#define qemu_co_sendv(sockfd, iov, iov_cnt, offset, bytes) \ + qemu_co_sendv_recvv(sockfd, iov, iov_cnt, offset, bytes, true) + +/** + * The same as above, but with just a single buffer + */ +ssize_t qemu_co_send_recv(int sockfd, void *buf, size_t bytes, bool do_send); +#define qemu_co_recv(sockfd, buf, bytes) \ + qemu_co_send_recv(sockfd, buf, bytes, false) +#define qemu_co_send(sockfd, buf, bytes) \ + qemu_co_send_recv(sockfd, buf, bytes, true) + +typedef struct QEMUIOVector { + struct iovec *iov; + int niov; + int nalloc; + size_t size; +} QEMUIOVector; + +void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint); +void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov); +void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len); +void qemu_iovec_concat(QEMUIOVector *dst, + QEMUIOVector *src, size_t soffset, size_t sbytes); +size_t qemu_iovec_concat_iov(QEMUIOVector *dst, + struct iovec *src_iov, unsigned int src_cnt, + size_t soffset, size_t sbytes); +bool qemu_iovec_is_zero(QEMUIOVector *qiov); +void qemu_iovec_destroy(QEMUIOVector *qiov); +void qemu_iovec_reset(QEMUIOVector *qiov); +size_t qemu_iovec_to_buf(QEMUIOVector *qiov, size_t offset, + void *buf, size_t bytes); +size_t qemu_iovec_from_buf(QEMUIOVector *qiov, size_t offset, + const void *buf, size_t bytes); +size_t qemu_iovec_memset(QEMUIOVector *qiov, size_t offset, + int fillc, size_t bytes); +ssize_t qemu_iovec_compare(QEMUIOVector *a, QEMUIOVector *b); +void qemu_iovec_clone(QEMUIOVector *dest, const QEMUIOVector *src, void *buf); +void qemu_iovec_discard_back(QEMUIOVector *qiov, size_t bytes); + +bool buffer_is_zero(const void *buf, size_t len); + +void qemu_progress_init(int enabled, float min_skip); +void qemu_progress_end(void); +void qemu_progress_print(float delta, int max); +const char *qemu_get_vm_name(void); + +#define QEMU_FILE_TYPE_BIOS 0 +#define QEMU_FILE_TYPE_KEYMAP 1 +char *qemu_find_file(int type, const char *name); + +/* OS specific functions */ +void os_setup_early_signal_handling(void); +char *os_find_datadir(void); +void os_parse_cmd_args(int index, const char *optarg); + +/* Convert a byte between binary and BCD. */ +static inline uint8_t to_bcd(uint8_t val) +{ + return ((val / 10) << 4) | (val % 10); +} + +static inline uint8_t from_bcd(uint8_t val) +{ + return ((val >> 4) * 10) + (val & 0x0f); +} + +/* Round number down to multiple */ +#define QEMU_ALIGN_DOWN(n, m) ((n) / (m) * (m)) + +/* Round number up to multiple */ +#define QEMU_ALIGN_UP(n, m) QEMU_ALIGN_DOWN((n) + (m) - 1, (m)) + +#include "qemu/module.h" + +/* + * Implementation of ULEB128 (http://en.wikipedia.org/wiki/LEB128) + * Input is limited to 14-bit numbers + */ + +int uleb128_encode_small(uint8_t *out, uint32_t n); +int uleb128_decode_small(const uint8_t *in, uint32_t *n); + +/* unicode.c */ +int mod_utf8_codepoint(const char *s, size_t n, char **end); + +/* + * Hexdump a buffer to a file. An optional string prefix is added to every line + */ + +void qemu_hexdump(const char *buf, FILE *fp, const char *prefix, size_t size); + +/* vector definitions */ +#ifdef __ALTIVEC__ +#include +/* The altivec.h header says we're allowed to undef these for + * C++ compatibility. Here we don't care about C++, but we + * undef them anyway to avoid namespace pollution. + */ +#undef vector +#undef pixel +#undef bool +#define VECTYPE __vector unsigned char +#define SPLAT(p) vec_splat(vec_ld(0, p), 0) +#define ALL_EQ(v1, v2) vec_all_eq(v1, v2) +#define VEC_OR(v1, v2) ((v1) | (v2)) +/* altivec.h may redefine the bool macro as vector type. + * Reset it to POSIX semantics. */ +#define bool _Bool +#elif defined __SSE2__ +#include +#define VECTYPE __m128i +#define SPLAT(p) _mm_set1_epi8(*(p)) +#define ALL_EQ(v1, v2) (_mm_movemask_epi8(_mm_cmpeq_epi8(v1, v2)) == 0xFFFF) +#define VEC_OR(v1, v2) (_mm_or_si128(v1, v2)) +#else +#define VECTYPE unsigned long +#define SPLAT(p) (*(p) * (~0UL / 255)) +#define ALL_EQ(v1, v2) ((v1) == (v2)) +#define VEC_OR(v1, v2) ((v1) | (v2)) +#endif + +#define BUFFER_FIND_NONZERO_OFFSET_UNROLL_FACTOR 8 +static inline bool +can_use_buffer_find_nonzero_offset(const void *buf, size_t len) +{ + return (len % (BUFFER_FIND_NONZERO_OFFSET_UNROLL_FACTOR + * sizeof(VECTYPE)) == 0 + && ((uintptr_t) buf) % sizeof(VECTYPE) == 0); +} +size_t buffer_find_nonzero_offset(const void *buf, size_t len); + +/* + * helper to parse debug environment variables + */ +int parse_debug_env(const char *name, int max, int initial); + +const char *qemu_ether_ntoa(const MACAddr *mac); + +#endif diff --git a/slirp/simh/qemu/queue.h b/slirp/simh/qemu/queue.h new file mode 100644 index 00000000..a8d3cb8e --- /dev/null +++ b/slirp/simh/qemu/queue.h @@ -0,0 +1,445 @@ +/* $NetBSD: queue.h,v 1.52 2009/04/20 09:56:08 mschuett Exp $ */ + +/* + * QEMU version: Copy from netbsd, removed debug code, removed some of + * the implementations. Left in singly-linked lists, lists, simple + * queues, and tail queues. + */ + +/* + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)queue.h 8.5 (Berkeley) 8/20/94 + */ + +#ifndef QEMU_SYS_QUEUE_H_ +#define QEMU_SYS_QUEUE_H_ + +/* + * This file defines four types of data structures: singly-linked lists, + * lists, simple queues, and tail queues. + * + * A singly-linked list is headed by a single forward pointer. The + * elements are singly linked for minimum space and pointer manipulation + * overhead at the expense of O(n) removal for arbitrary elements. New + * elements can be added to the list after an existing element or at the + * head of the list. Elements being removed from the head of the list + * should use the explicit macro for this purpose for optimum + * efficiency. A singly-linked list may only be traversed in the forward + * direction. Singly-linked lists are ideal for applications with large + * datasets and few or no removals or for implementing a LIFO queue. + * + * A list is headed by a single forward pointer (or an array of forward + * pointers for a hash table header). The elements are doubly linked + * so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before + * or after an existing element or at the head of the list. A list + * may only be traversed in the forward direction. + * + * A simple queue is headed by a pair of pointers, one the head of the + * list and the other to the tail of the list. The elements are singly + * linked to save space, so elements can only be removed from the + * head of the list. New elements can be added to the list after + * an existing element, at the head of the list, or at the end of the + * list. A simple queue may only be traversed in the forward direction. + * + * A tail queue is headed by a pair of pointers, one to the head of the + * list and the other to the tail of the list. The elements are doubly + * linked so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before or + * after an existing element, at the head of the list, or at the end of + * the list. A tail queue may be traversed in either direction. + * + * For details on the use of these macros, see the queue(3) manual page. + */ + +#include "qemu/atomic.h" /* for smp_wmb() */ + +/* + * List definitions. + */ +#define QLIST_HEAD(name, type) \ +struct name { \ + struct type *lh_first; /* first element */ \ +} + +#define QLIST_HEAD_INITIALIZER(head) \ + { NULL } + +#define QLIST_ENTRY(type) \ +struct { \ + struct type *le_next; /* next element */ \ + struct type **le_prev; /* address of previous next element */ \ +} + +/* + * List functions. + */ +#define QLIST_INIT(head) do { \ + (head)->lh_first = NULL; \ +} while (/*CONSTCOND*/0) + +#define QLIST_SWAP(dstlist, srclist, field) do { \ + void *tmplist; \ + tmplist = (srclist)->lh_first; \ + (srclist)->lh_first = (dstlist)->lh_first; \ + if ((srclist)->lh_first != NULL) { \ + (srclist)->lh_first->field.le_prev = &(srclist)->lh_first; \ + } \ + (dstlist)->lh_first = tmplist; \ + if ((dstlist)->lh_first != NULL) { \ + (dstlist)->lh_first->field.le_prev = &(dstlist)->lh_first; \ + } \ +} while (/*CONSTCOND*/0) + +#define QLIST_FIX_HEAD_PTR(head, field) do { \ + if ((head)->lh_first != NULL) { \ + (head)->lh_first->field.le_prev = &(head)->lh_first; \ + } \ +} while (/*CONSTCOND*/0) + +#define QLIST_INSERT_AFTER(listelm, elm, field) do { \ + if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \ + (listelm)->field.le_next->field.le_prev = \ + &(elm)->field.le_next; \ + (listelm)->field.le_next = (elm); \ + (elm)->field.le_prev = &(listelm)->field.le_next; \ +} while (/*CONSTCOND*/0) + +#define QLIST_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.le_prev = (listelm)->field.le_prev; \ + (elm)->field.le_next = (listelm); \ + *(listelm)->field.le_prev = (elm); \ + (listelm)->field.le_prev = &(elm)->field.le_next; \ +} while (/*CONSTCOND*/0) + +#define QLIST_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.le_next = (head)->lh_first) != NULL) \ + (head)->lh_first->field.le_prev = &(elm)->field.le_next;\ + (head)->lh_first = (elm); \ + (elm)->field.le_prev = &(head)->lh_first; \ +} while (/*CONSTCOND*/0) + +#define QLIST_REMOVE(elm, field) do { \ + if ((elm)->field.le_next != NULL) \ + (elm)->field.le_next->field.le_prev = \ + (elm)->field.le_prev; \ + *(elm)->field.le_prev = (elm)->field.le_next; \ +} while (/*CONSTCOND*/0) + +#define QLIST_FOREACH(var, head, field) \ + for ((var) = ((head)->lh_first); \ + (var); \ + (var) = ((var)->field.le_next)) + +#define QLIST_FOREACH_SAFE(var, head, field, next_var) \ + for ((var) = ((head)->lh_first); \ + (var) && ((next_var) = ((var)->field.le_next), 1); \ + (var) = (next_var)) + +/* + * List access methods. + */ +#define QLIST_EMPTY(head) ((head)->lh_first == NULL) +#define QLIST_FIRST(head) ((head)->lh_first) +#define QLIST_NEXT(elm, field) ((elm)->field.le_next) + + +/* + * Singly-linked List definitions. + */ +#define QSLIST_HEAD(name, type) \ +struct name { \ + struct type *slh_first; /* first element */ \ +} + +#define QSLIST_HEAD_INITIALIZER(head) \ + { NULL } + +#define QSLIST_ENTRY(type) \ +struct { \ + struct type *sle_next; /* next element */ \ +} + +/* + * Singly-linked List functions. + */ +#define QSLIST_INIT(head) do { \ + (head)->slh_first = NULL; \ +} while (/*CONSTCOND*/0) + +#define QSLIST_INSERT_AFTER(slistelm, elm, field) do { \ + (elm)->field.sle_next = (slistelm)->field.sle_next; \ + (slistelm)->field.sle_next = (elm); \ +} while (/*CONSTCOND*/0) + +#define QSLIST_INSERT_HEAD(head, elm, field) do { \ + (elm)->field.sle_next = (head)->slh_first; \ + (head)->slh_first = (elm); \ +} while (/*CONSTCOND*/0) + +#define QSLIST_INSERT_HEAD_ATOMIC(head, elm, field) do { \ + typeof(elm) save_sle_next; \ + do { \ + save_sle_next = (elm)->field.sle_next = (head)->slh_first; \ + } while (atomic_cmpxchg(&(head)->slh_first, save_sle_next, (elm)) != \ + save_sle_next); \ +} while (/*CONSTCOND*/0) + +#define QSLIST_MOVE_ATOMIC(dest, src) do { \ + (dest)->slh_first = atomic_xchg(&(src)->slh_first, NULL); \ +} while (/*CONSTCOND*/0) + +#define QSLIST_REMOVE_HEAD(head, field) do { \ + (head)->slh_first = (head)->slh_first->field.sle_next; \ +} while (/*CONSTCOND*/0) + +#define QSLIST_REMOVE_AFTER(slistelm, field) do { \ + (slistelm)->field.sle_next = \ + QSLIST_NEXT(QSLIST_NEXT((slistelm), field), field); \ +} while (/*CONSTCOND*/0) + +#define QSLIST_FOREACH(var, head, field) \ + for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next) + +#define QSLIST_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = QSLIST_FIRST((head)); \ + (var) && ((tvar) = QSLIST_NEXT((var), field), 1); \ + (var) = (tvar)) + +/* + * Singly-linked List access methods. + */ +#define QSLIST_EMPTY(head) ((head)->slh_first == NULL) +#define QSLIST_FIRST(head) ((head)->slh_first) +#define QSLIST_NEXT(elm, field) ((elm)->field.sle_next) + + +/* + * Simple queue definitions. + */ +#define QSIMPLEQ_HEAD(name, type) \ +struct name { \ + struct type *sqh_first; /* first element */ \ + struct type **sqh_last; /* addr of last next element */ \ +} + +#define QSIMPLEQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).sqh_first } + +#define QSIMPLEQ_ENTRY(type) \ +struct { \ + struct type *sqe_next; /* next element */ \ +} + +/* + * Simple queue functions. + */ +#define QSIMPLEQ_INIT(head) do { \ + (head)->sqh_first = NULL; \ + (head)->sqh_last = &(head)->sqh_first; \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \ + (head)->sqh_last = &(elm)->field.sqe_next; \ + (head)->sqh_first = (elm); \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.sqe_next = NULL; \ + *(head)->sqh_last = (elm); \ + (head)->sqh_last = &(elm)->field.sqe_next; \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ + if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL) \ + (head)->sqh_last = &(elm)->field.sqe_next; \ + (listelm)->field.sqe_next = (elm); \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_REMOVE_HEAD(head, field) do { \ + if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL)\ + (head)->sqh_last = &(head)->sqh_first; \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_SPLIT_AFTER(head, elm, field, removed) do { \ + QSIMPLEQ_INIT(removed); \ + if (((removed)->sqh_first = (head)->sqh_first) != NULL) { \ + if (((head)->sqh_first = (elm)->field.sqe_next) == NULL) { \ + (head)->sqh_last = &(head)->sqh_first; \ + } \ + (removed)->sqh_last = &(elm)->field.sqe_next; \ + (elm)->field.sqe_next = NULL; \ + } \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_REMOVE(head, elm, type, field) do { \ + if ((head)->sqh_first == (elm)) { \ + QSIMPLEQ_REMOVE_HEAD((head), field); \ + } else { \ + struct type *curelm = (head)->sqh_first; \ + while (curelm->field.sqe_next != (elm)) \ + curelm = curelm->field.sqe_next; \ + if ((curelm->field.sqe_next = \ + curelm->field.sqe_next->field.sqe_next) == NULL) \ + (head)->sqh_last = &(curelm)->field.sqe_next; \ + } \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_FOREACH(var, head, field) \ + for ((var) = ((head)->sqh_first); \ + (var); \ + (var) = ((var)->field.sqe_next)) + +#define QSIMPLEQ_FOREACH_SAFE(var, head, field, next) \ + for ((var) = ((head)->sqh_first); \ + (var) && ((next = ((var)->field.sqe_next)), 1); \ + (var) = (next)) + +#define QSIMPLEQ_CONCAT(head1, head2) do { \ + if (!QSIMPLEQ_EMPTY((head2))) { \ + *(head1)->sqh_last = (head2)->sqh_first; \ + (head1)->sqh_last = (head2)->sqh_last; \ + QSIMPLEQ_INIT((head2)); \ + } \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_LAST(head, type, field) \ + (QSIMPLEQ_EMPTY((head)) ? \ + NULL : \ + ((struct type *)(void *) \ + ((char *)((head)->sqh_last) - offsetof(struct type, field)))) + +/* + * Simple queue access methods. + */ +#define QSIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL) +#define QSIMPLEQ_FIRST(head) ((head)->sqh_first) +#define QSIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) + + +/* + * Tail queue definitions. + */ +#define Q_TAILQ_HEAD(name, type, qual) \ +struct name { \ + qual type *tqh_first; /* first element */ \ + qual type *qual *tqh_last; /* addr of last next element */ \ +} +#define QTAILQ_HEAD(name, type) Q_TAILQ_HEAD(name, struct type,) + +#define QTAILQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).tqh_first } + +#define Q_TAILQ_ENTRY(type, qual) \ +struct { \ + qual type *tqe_next; /* next element */ \ + qual type *qual *tqe_prev; /* address of previous next element */\ +} +#define QTAILQ_ENTRY(type) Q_TAILQ_ENTRY(struct type,) + +/* + * Tail queue functions. + */ +#define QTAILQ_INIT(head) do { \ + (head)->tqh_first = NULL; \ + (head)->tqh_last = &(head)->tqh_first; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \ + (head)->tqh_first->field.tqe_prev = \ + &(elm)->field.tqe_next; \ + else \ + (head)->tqh_last = &(elm)->field.tqe_next; \ + (head)->tqh_first = (elm); \ + (elm)->field.tqe_prev = &(head)->tqh_first; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.tqe_next = NULL; \ + (elm)->field.tqe_prev = (head)->tqh_last; \ + *(head)->tqh_last = (elm); \ + (head)->tqh_last = &(elm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ + if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\ + (elm)->field.tqe_next->field.tqe_prev = \ + &(elm)->field.tqe_next; \ + else \ + (head)->tqh_last = &(elm)->field.tqe_next; \ + (listelm)->field.tqe_next = (elm); \ + (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ + (elm)->field.tqe_next = (listelm); \ + *(listelm)->field.tqe_prev = (elm); \ + (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_REMOVE(head, elm, field) do { \ + if (((elm)->field.tqe_next) != NULL) \ + (elm)->field.tqe_next->field.tqe_prev = \ + (elm)->field.tqe_prev; \ + else \ + (head)->tqh_last = (elm)->field.tqe_prev; \ + *(elm)->field.tqe_prev = (elm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_FOREACH(var, head, field) \ + for ((var) = ((head)->tqh_first); \ + (var); \ + (var) = ((var)->field.tqe_next)) + +#define QTAILQ_FOREACH_SAFE(var, head, field, next_var) \ + for ((var) = ((head)->tqh_first); \ + (var) && ((next_var) = ((var)->field.tqe_next), 1); \ + (var) = (next_var)) + +#define QTAILQ_FOREACH_REVERSE(var, head, headname, field) \ + for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \ + (var); \ + (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last))) + +/* + * Tail queue access methods. + */ +#define QTAILQ_EMPTY(head) ((head)->tqh_first == NULL) +#define QTAILQ_FIRST(head) ((head)->tqh_first) +#define QTAILQ_NEXT(elm, field) ((elm)->field.tqe_next) + +#define QTAILQ_LAST(head, headname) \ + (*(((struct headname *)((head)->tqh_last))->tqh_last)) +#define QTAILQ_PREV(elm, headname, field) \ + (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) + +#endif /* !QEMU_SYS_QUEUE_H_ */ diff --git a/slirp/simh/qemu/rfifolock.h b/slirp/simh/qemu/rfifolock.h new file mode 100644 index 00000000..b23ab538 --- /dev/null +++ b/slirp/simh/qemu/rfifolock.h @@ -0,0 +1,54 @@ +/* + * Recursive FIFO lock + * + * Copyright Red Hat, Inc. 2013 + * + * Authors: + * Stefan Hajnoczi + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_RFIFOLOCK_H +#define QEMU_RFIFOLOCK_H + +#include "qemu/thread.h" + +/* Recursive FIFO lock + * + * This lock provides more features than a plain mutex: + * + * 1. Fairness - enforces FIFO order. + * 2. Nesting - can be taken recursively. + * 3. Contention callback - optional, called when thread must wait. + * + * The recursive FIFO lock is heavyweight so prefer other synchronization + * primitives if you do not need its features. + */ +typedef struct { + QemuMutex lock; /* protects all fields */ + + /* FIFO order */ + unsigned int head; /* active ticket number */ + unsigned int tail; /* waiting ticket number */ + QemuCond cond; /* used to wait for our ticket number */ + + /* Nesting */ + QemuThread owner_thread; /* thread that currently has ownership */ + unsigned int nesting; /* amount of nesting levels */ + + /* Contention callback */ + void (*cb)(void *); /* called when thread must wait, with ->lock + * held so it may not recursively lock/unlock + */ + void *cb_opaque; +} RFifoLock; + +void rfifolock_init(RFifoLock *r, void (*cb)(void *), void *opaque); +void rfifolock_destroy(RFifoLock *r); +void rfifolock_lock(RFifoLock *r); +void rfifolock_unlock(RFifoLock *r); + +#endif /* QEMU_RFIFOLOCK_H */ diff --git a/slirp/simh/qemu/sockets.h b/slirp/simh/qemu/sockets.h new file mode 100644 index 00000000..a35d166f --- /dev/null +++ b/slirp/simh/qemu/sockets.h @@ -0,0 +1,94 @@ +/* headers to use the BSD sockets */ +#ifndef QEMU_SOCKET_H +#define QEMU_SOCKET_H + +#ifdef _WIN32 +#include +#include +#include + +#define socket_error() WSAGetLastError() + +int inet_aton(const char *cp, struct in_addr *ia); + +#else + +#include +#include +#include +#include +#include +#include +#include + +#define socket_error() errno + +#endif /* !_WIN32 */ + +#include "qemu/option.h" +#include "qapi/error.h" +#include "qapi-types.h" + +extern QemuOptsList socket_optslist; + +/* misc helpers */ +int qemu_socket(int domain, int type, int protocol); +int qemu_accept(int s, struct sockaddr *addr, socklen_t *addrlen); +int socket_set_cork(int fd, int v); +int socket_set_nodelay(int fd); +void qemu_set_block(int fd); +void qemu_set_nonblock(int fd); +int socket_set_fast_reuse(int fd); +int send_all(int fd, const void *buf, int len1); +int recv_all(int fd, void *buf, int len1, bool single_read); + +#ifdef _WIN32 +/* MinGW needs type casts for the 'buf' and 'optval' arguments. */ +#define qemu_sendto(sockfd, buf, len, flags, destaddr, addrlen) \ + sendto(sockfd, (const void *)buf, len, flags, destaddr, addrlen) + +/* Windows has different names for the same constants with the same values */ +#define SHUT_RD 0 +#define SHUT_WR 1 +#define SHUT_RDWR 2 +#endif + +/* callback function for nonblocking connect + * valid fd on success, negative error code on failure + */ +typedef void NonBlockingConnectHandler(int fd, Error *errp, void *opaque); + +InetSocketAddress *inet_parse(const char *str, Error **errp); +int inet_listen_opts(QemuOpts *opts, int port_offset, Error **errp); +int inet_listen(const char *str, char *ostr, int olen, + int socktype, int port_offset, Error **errp); +int inet_connect_opts(QemuOpts *opts, Error **errp, + NonBlockingConnectHandler *callback, void *opaque); +int inet_connect(const char *str, Error **errp); +int inet_nonblocking_connect(const char *str, + NonBlockingConnectHandler *callback, + void *opaque, Error **errp); + +int inet_dgram_opts(QemuOpts *opts, Error **errp); +NetworkAddressFamily inet_netfamily(int family); + +int unix_listen_opts(QemuOpts *opts, Error **errp); +int unix_listen(const char *path, char *ostr, int olen, Error **errp); +int unix_connect_opts(QemuOpts *opts, Error **errp, + NonBlockingConnectHandler *callback, void *opaque); +int unix_connect(const char *path, Error **errp); +int unix_nonblocking_connect(const char *str, + NonBlockingConnectHandler *callback, + void *opaque, Error **errp); + +SocketAddress *socket_parse(const char *str, Error **errp); +int socket_connect(SocketAddress *addr, Error **errp, + NonBlockingConnectHandler *callback, void *opaque); +int socket_listen(SocketAddress *addr, Error **errp); +int socket_dgram(SocketAddress *remote, SocketAddress *local, Error **errp); + +/* Old, ipv4 only bits. Don't use for new code. */ +int parse_host_port(struct sockaddr_in *saddr, const char *str); +int socket_init(void); + +#endif /* QEMU_SOCKET_H */ diff --git a/slirp/simh/qemu/sysemu/char.h b/slirp/simh/qemu/sysemu/char.h new file mode 100644 index 00000000..2a397309 --- /dev/null +++ b/slirp/simh/qemu/sysemu/char.h @@ -0,0 +1,371 @@ +#ifndef QEMU_CHAR_H +#define QEMU_CHAR_H + +#include "qemu-common.h" +#include "qemu/queue.h" +#include "qemu/option.h" +#include "qemu/config-file.h" +#include "block/aio.h" +#include "qapi/qmp/qobject.h" +#include "qapi/qmp/qstring.h" +#include "qemu/main-loop.h" + +/* character device */ + +#define CHR_EVENT_BREAK 0 /* serial break char */ +#define CHR_EVENT_FOCUS 1 /* focus to this terminal (modal input needed) */ +#define CHR_EVENT_OPENED 2 /* new connection established */ +#define CHR_EVENT_MUX_IN 3 /* mux-focus was set to this terminal */ +#define CHR_EVENT_MUX_OUT 4 /* mux-focus will move on */ +#define CHR_EVENT_CLOSED 5 /* connection closed */ + + +#define CHR_IOCTL_SERIAL_SET_PARAMS 1 +typedef struct { + int speed; + int parity; + int data_bits; + int stop_bits; +} QEMUSerialSetParams; + +#define CHR_IOCTL_SERIAL_SET_BREAK 2 + +#define CHR_IOCTL_PP_READ_DATA 3 +#define CHR_IOCTL_PP_WRITE_DATA 4 +#define CHR_IOCTL_PP_READ_CONTROL 5 +#define CHR_IOCTL_PP_WRITE_CONTROL 6 +#define CHR_IOCTL_PP_READ_STATUS 7 +#define CHR_IOCTL_PP_EPP_READ_ADDR 8 +#define CHR_IOCTL_PP_EPP_READ 9 +#define CHR_IOCTL_PP_EPP_WRITE_ADDR 10 +#define CHR_IOCTL_PP_EPP_WRITE 11 +#define CHR_IOCTL_PP_DATA_DIR 12 + +#define CHR_IOCTL_SERIAL_SET_TIOCM 13 +#define CHR_IOCTL_SERIAL_GET_TIOCM 14 + +#define CHR_TIOCM_CTS 0x020 +#define CHR_TIOCM_CAR 0x040 +#define CHR_TIOCM_DSR 0x100 +#define CHR_TIOCM_RI 0x080 +#define CHR_TIOCM_DTR 0x002 +#define CHR_TIOCM_RTS 0x004 + +typedef void IOEventHandler(void *opaque, int event); + +struct CharDriverState { + QemuMutex chr_write_lock; + void (*init)(struct CharDriverState *s); + int (*chr_write)(struct CharDriverState *s, const uint8_t *buf, int len); + int (*chr_sync_read)(struct CharDriverState *s, + const uint8_t *buf, int len); + GSource *(*chr_add_watch)(struct CharDriverState *s, GIOCondition cond); + void (*chr_update_read_handler)(struct CharDriverState *s); + int (*chr_ioctl)(struct CharDriverState *s, int cmd, void *arg); + int (*get_msgfds)(struct CharDriverState *s, int* fds, int num); + int (*set_msgfds)(struct CharDriverState *s, int *fds, int num); + int (*chr_add_client)(struct CharDriverState *chr, int fd); + IOEventHandler *chr_event; + IOCanReadHandler *chr_can_read; + IOReadHandler *chr_read; + void *handler_opaque; + void (*chr_close)(struct CharDriverState *chr); + void (*chr_accept_input)(struct CharDriverState *chr); + void (*chr_set_echo)(struct CharDriverState *chr, bool echo); + void (*chr_set_fe_open)(struct CharDriverState *chr, int fe_open); + void (*chr_fe_event)(struct CharDriverState *chr, int event); + void *opaque; + char *label; + char *filename; + int be_open; + int fe_open; + int explicit_fe_open; + int explicit_be_open; + int avail_connections; + int is_mux; + guint fd_in_tag; + QemuOpts *opts; + QTAILQ_ENTRY(CharDriverState) next; +}; + +/** + * @qemu_chr_alloc: + * + * Allocate and initialize a new CharDriverState. + * + * Returns: a newly allocated CharDriverState. + */ +CharDriverState *qemu_chr_alloc(void); + +/** + * @qemu_chr_new_from_opts: + * + * Create a new character backend from a QemuOpts list. + * + * @opts see qemu-config.c for a list of valid options + * @init not sure.. + * + * Returns: a new character backend + */ +CharDriverState *qemu_chr_new_from_opts(QemuOpts *opts, + void (*init)(struct CharDriverState *s), + Error **errp); + +/** + * @qemu_chr_new: + * + * Create a new character backend from a URI. + * + * @label the name of the backend + * @filename the URI + * @init not sure.. + * + * Returns: a new character backend + */ +CharDriverState *qemu_chr_new(const char *label, const char *filename, + void (*init)(struct CharDriverState *s)); + +/** + * @qemu_chr_delete: + * + * Destroy a character backend. + */ +void qemu_chr_delete(CharDriverState *chr); + +/** + * @qemu_chr_fe_set_echo: + * + * Ask the backend to override its normal echo setting. This only really + * applies to the stdio backend and is used by the QMP server such that you + * can see what you type if you try to type QMP commands. + * + * @echo true to enable echo, false to disable echo + */ +void qemu_chr_fe_set_echo(struct CharDriverState *chr, bool echo); + +/** + * @qemu_chr_fe_set_open: + * + * Set character frontend open status. This is an indication that the + * front end is ready (or not) to begin doing I/O. + */ +void qemu_chr_fe_set_open(struct CharDriverState *chr, int fe_open); + +/** + * @qemu_chr_fe_event: + * + * Send an event from the front end to the back end. + * + * @event the event to send + */ +void qemu_chr_fe_event(CharDriverState *s, int event); + +/** + * @qemu_chr_fe_printf: + * + * Write to a character backend using a printf style interface. + * This function is thread-safe. + * + * @fmt see #printf + */ +void qemu_chr_fe_printf(CharDriverState *s, const char *fmt, ...) + GCC_FMT_ATTR(2, 3); + +//int qemu_chr_fe_add_watch(CharDriverState *s, GIOCondition cond, +// GIOFunc func, void *user_data); + +/** + * @qemu_chr_fe_write: + * + * Write data to a character backend from the front end. This function + * will send data from the front end to the back end. This function + * is thread-safe. + * + * @buf the data + * @len the number of bytes to send + * + * Returns: the number of bytes consumed + */ +int qemu_chr_fe_write(CharDriverState *s, const uint8_t *buf, int len); + +/** + * @qemu_chr_fe_write_all: + * + * Write data to a character backend from the front end. This function will + * send data from the front end to the back end. Unlike @qemu_chr_fe_write, + * this function will block if the back end cannot consume all of the data + * attempted to be written. This function is thread-safe. + * + * @buf the data + * @len the number of bytes to send + * + * Returns: the number of bytes consumed + */ +int qemu_chr_fe_write_all(CharDriverState *s, const uint8_t *buf, int len); + +/** + * @qemu_chr_fe_read_all: + * + * Read data to a buffer from the back end. + * + * @buf the data buffer + * @len the number of bytes to read + * + * Returns: the number of bytes read + */ +int qemu_chr_fe_read_all(CharDriverState *s, uint8_t *buf, int len); + +/** + * @qemu_chr_fe_ioctl: + * + * Issue a device specific ioctl to a backend. This function is thread-safe. + * + * @cmd see CHR_IOCTL_* + * @arg the data associated with @cmd + * + * Returns: if @cmd is not supported by the backend, -ENOTSUP, otherwise the + * return value depends on the semantics of @cmd + */ +int qemu_chr_fe_ioctl(CharDriverState *s, int cmd, void *arg); + +/** + * @qemu_chr_fe_get_msgfd: + * + * For backends capable of fd passing, return the latest file descriptor passed + * by a client. + * + * Returns: -1 if fd passing isn't supported or there is no pending file + * descriptor. If a file descriptor is returned, subsequent calls to + * this function will return -1 until a client sends a new file + * descriptor. + */ +int qemu_chr_fe_get_msgfd(CharDriverState *s); + +/** + * @qemu_chr_fe_get_msgfds: + * + * For backends capable of fd passing, return the number of file received + * descriptors and fills the fds array up to num elements + * + * Returns: -1 if fd passing isn't supported or there are no pending file + * descriptors. If file descriptors are returned, subsequent calls to + * this function will return -1 until a client sends a new set of file + * descriptors. + */ +int qemu_chr_fe_get_msgfds(CharDriverState *s, int *fds, int num); + +/** + * @qemu_chr_fe_set_msgfds: + * + * For backends capable of fd passing, set an array of fds to be passed with + * the next send operation. + * A subsequent call to this function before calling a write function will + * result in overwriting the fd array with the new value without being send. + * Upon writing the message the fd array is freed. + * + * Returns: -1 if fd passing isn't supported. + */ +int qemu_chr_fe_set_msgfds(CharDriverState *s, int *fds, int num); + +/** + * @qemu_chr_fe_claim: + * + * Claim a backend before using it, should be called before calling + * qemu_chr_add_handlers(). + * + * Returns: -1 if the backend is already in use by another frontend, 0 on + * success. + */ +int qemu_chr_fe_claim(CharDriverState *s); + +/** + * @qemu_chr_fe_claim_no_fail: + * + * Like qemu_chr_fe_claim, but will exit qemu with an error when the + * backend is already in use. + */ +void qemu_chr_fe_claim_no_fail(CharDriverState *s); + +/** + * @qemu_chr_fe_claim: + * + * Release a backend for use by another frontend. + * + * Returns: -1 if the backend is already in use by another frontend, 0 on + * success. + */ +void qemu_chr_fe_release(CharDriverState *s); + +/** + * @qemu_chr_be_can_write: + * + * Determine how much data the front end can currently accept. This function + * returns the number of bytes the front end can accept. If it returns 0, the + * front end cannot receive data at the moment. The function must be polled + * to determine when data can be received. + * + * Returns: the number of bytes the front end can receive via @qemu_chr_be_write + */ +int qemu_chr_be_can_write(CharDriverState *s); + +/** + * @qemu_chr_be_write: + * + * Write data from the back end to the front end. Before issuing this call, + * the caller should call @qemu_chr_be_can_write to determine how much data + * the front end can currently accept. + * + * @buf a buffer to receive data from the front end + * @len the number of bytes to receive from the front end + */ +void qemu_chr_be_write(CharDriverState *s, uint8_t *buf, int len); + + +/** + * @qemu_chr_be_event: + * + * Send an event from the back end to the front end. + * + * @event the event to send + */ +void qemu_chr_be_event(CharDriverState *s, int event); + +void qemu_chr_add_handlers(CharDriverState *s, + IOCanReadHandler *fd_can_read, + IOReadHandler *fd_read, + IOEventHandler *fd_event, + void *opaque); + +void qemu_chr_be_generic_open(CharDriverState *s); +void qemu_chr_accept_input(CharDriverState *s); +int qemu_chr_add_client(CharDriverState *s, int fd); +CharDriverState *qemu_chr_find(const char *name); +bool chr_is_ringbuf(const CharDriverState *chr); + +QemuOpts *qemu_chr_parse_compat(const char *label, const char *filename); + +void register_char_driver(const char *name, ChardevBackendKind kind, + void (*parse)(QemuOpts *opts, ChardevBackend *backend, Error **errp)); + +/* add an eventfd to the qemu devices that are polled */ +CharDriverState *qemu_chr_open_eventfd(int eventfd); + +extern int term_escape_char; + +CharDriverState *qemu_char_get_next_serial(void); + +/* msmouse */ +CharDriverState *qemu_chr_open_msmouse(void); + +/* testdev.c */ +CharDriverState *chr_testdev_init(void); + +/* baum.c */ +CharDriverState *chr_baum_init(void); + +/* console.c */ +typedef CharDriverState *(VcHandler)(ChardevVC *vc); + +void register_vc_handler(VcHandler *handler); +CharDriverState *vc_init(ChardevVC *vc); +#endif diff --git a/slirp/simh/qemu/sysemu/os-win32.h b/slirp/simh/qemu/sysemu/os-win32.h new file mode 100644 index 00000000..6e3a60ae --- /dev/null +++ b/slirp/simh/qemu/sysemu/os-win32.h @@ -0,0 +1,140 @@ +/* + * win32 specific declarations + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2010 Jes Sorensen + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef QEMU_OS_WIN32_H +#define QEMU_OS_WIN32_H + +#include +#include + +#if defined(_WIN64) +/* MinGW-w64 has a 32 bit off_t, but we want 64 bit off_t. */ +# define off_t off64_t + +/* MinGW-w64 stdio.h defines SYS_OPEN. Allow a redefinition in arm-semi.c. */ +# undef SYS_OPEN +#endif + +/* Workaround for older versions of MinGW. */ +#ifndef ECONNREFUSED +# define ECONNREFUSED WSAECONNREFUSED +#endif +#ifndef EINPROGRESS +# define EINPROGRESS WSAEINPROGRESS +#endif +#ifndef EHOSTUNREACH +# define EHOSTUNREACH WSAEHOSTUNREACH +#endif +#ifndef EINTR +# define EINTR WSAEINTR +#endif +#ifndef EINPROGRESS +# define EINPROGRESS WSAEINPROGRESS +#endif +#ifndef ENETUNREACH +# define ENETUNREACH WSAENETUNREACH +#endif +#ifndef ENOTCONN +# define ENOTCONN WSAENOTCONN +#endif +#ifndef EWOULDBLOCK +# define EWOULDBLOCK WSAEWOULDBLOCK +#endif + +#if defined(_WIN64) +/* On w64, setjmp is implemented by _setjmp which needs a second parameter. + * If this parameter is NULL, longjump does no stack unwinding. + * That is what we need for QEMU. Passing the value of register rsp (default) + * lets longjmp try a stack unwinding which will crash with generated code. */ +# undef setjmp +# define setjmp(env) _setjmp(env, NULL) +#endif +/* QEMU uses sigsetjmp()/siglongjmp() as the portable way to specify + * "longjmp and don't touch the signal masks". Since we know that the + * savemask parameter will always be zero we can safely define these + * in terms of setjmp/longjmp on Win32. + */ +#define sigjmp_buf jmp_buf +#define sigsetjmp(env, savemask) setjmp(env) +#define siglongjmp(env, val) longjmp(env, val) + +/* Missing POSIX functions. Don't use MinGW-w64 macros. */ +#ifndef CONFIG_LOCALTIME_R +#undef gmtime_r +struct tm *gmtime_r(const time_t *timep, struct tm *result); +#undef localtime_r +struct tm *localtime_r(const time_t *timep, struct tm *result); +#endif /* CONFIG_LOCALTIME_R */ + + +static inline void os_setup_signal_handling(void) {} +static inline void os_daemonize(void) {} +static inline void os_setup_post(void) {} +void os_set_line_buffering(void); +static inline void os_set_proc_name(const char *dummy) {} + +size_t getpagesize(void); + +#if !defined(EPROTONOSUPPORT) +# define EPROTONOSUPPORT EINVAL +#endif + +int setenv(const char *name, const char *value, int overwrite); + +typedef struct { + long tv_sec; + long tv_usec; +} qemu_timeval; +int qemu_gettimeofday(qemu_timeval *tp); + +static inline bool is_daemonized(void) +{ + return false; +} + +static inline int os_mlock(void) +{ + return -ENOSYS; +} + +#define fsync _commit + +#if !defined(lseek) +# define lseek _lseeki64 +#endif + +int qemu_ftruncate64(int, int64_t); + +#if !defined(ftruncate) +# define ftruncate qemu_ftruncate64 +#endif + +static inline char *realpath(const char *path, char *resolved_path) +{ + _fullpath(resolved_path, path, _MAX_PATH); + return resolved_path; +} + +#endif diff --git a/slirp/simh/qemu/thread-posix.h b/slirp/simh/qemu/thread-posix.h new file mode 100644 index 00000000..eb5c7a1d --- /dev/null +++ b/slirp/simh/qemu/thread-posix.h @@ -0,0 +1,36 @@ +#ifndef __QEMU_THREAD_POSIX_H +#define __QEMU_THREAD_POSIX_H 1 +#include "pthread.h" +#include + +struct QemuMutex { + pthread_mutex_t lock; +}; + +struct QemuCond { + pthread_cond_t cond; +}; + +struct QemuSemaphore { +#if defined(__APPLE__) || defined(__NetBSD__) + pthread_mutex_t lock; + pthread_cond_t cond; + unsigned int count; +#else + sem_t sem; +#endif +}; + +struct QemuEvent { +#ifndef __linux__ + pthread_mutex_t lock; + pthread_cond_t cond; +#endif + unsigned value; +}; + +struct QemuThread { + pthread_t thread; +}; + +#endif diff --git a/slirp/simh/qemu/thread.h b/slirp/simh/qemu/thread.h new file mode 100644 index 00000000..826c9e31 --- /dev/null +++ b/slirp/simh/qemu/thread.h @@ -0,0 +1,65 @@ +#ifndef __QEMU_THREAD_H +#define __QEMU_THREAD_H 1 + +#include +#include + +typedef struct QemuMutex QemuMutex; +typedef struct QemuCond QemuCond; +typedef struct QemuSemaphore QemuSemaphore; +typedef struct QemuEvent QemuEvent; +typedef struct QemuThread QemuThread; + +#if defined(_WIN32) && !defined(_MSC_VER) +#include "qemu/thread-win32.h" +#else +#include "qemu/thread-posix.h" +#endif + +#define QEMU_THREAD_JOINABLE 0 +#define QEMU_THREAD_DETACHED 1 + +void qemu_mutex_init(QemuMutex *mutex); +void qemu_mutex_destroy(QemuMutex *mutex); +void qemu_mutex_lock(QemuMutex *mutex); +int qemu_mutex_trylock(QemuMutex *mutex); +void qemu_mutex_unlock(QemuMutex *mutex); + +void qemu_cond_init(QemuCond *cond); +void qemu_cond_destroy(QemuCond *cond); + +/* + * IMPORTANT: The implementation does not guarantee that pthread_cond_signal + * and pthread_cond_broadcast can be called except while the same mutex is + * held as in the corresponding pthread_cond_wait calls! + */ +void qemu_cond_signal(QemuCond *cond); +void qemu_cond_broadcast(QemuCond *cond); +void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex); + +void qemu_sem_init(QemuSemaphore *sem, int init); +void qemu_sem_post(QemuSemaphore *sem); +void qemu_sem_wait(QemuSemaphore *sem); +int qemu_sem_timedwait(QemuSemaphore *sem, int ms); +void qemu_sem_destroy(QemuSemaphore *sem); + +void qemu_event_init(QemuEvent *ev, bool init); +void qemu_event_set(QemuEvent *ev); +void qemu_event_reset(QemuEvent *ev); +void qemu_event_wait(QemuEvent *ev); +void qemu_event_destroy(QemuEvent *ev); + +void qemu_thread_create(QemuThread *thread, const char *name, + void *(*start_routine)(void *), + void *arg, int mode); +void *qemu_thread_join(QemuThread *thread); +void qemu_thread_get_self(QemuThread *thread); +bool qemu_thread_is_self(QemuThread *thread); +void qemu_thread_exit(void *retval); +void qemu_thread_naming(bool enable); + +struct Notifier; +void qemu_thread_atexit_add(struct Notifier *notifier); +void qemu_thread_atexit_remove(struct Notifier *notifier); + +#endif diff --git a/slirp/simh/qemu/timer.h b/slirp/simh/qemu/timer.h new file mode 100644 index 00000000..5c106a32 --- /dev/null +++ b/slirp/simh/qemu/timer.h @@ -0,0 +1,1023 @@ +#ifndef QEMU_TIMER_H +#define QEMU_TIMER_H + +#include "qemu/typedefs.h" +#include "qemu-common.h" +#include "qemu/notify.h" +#include "qemu/host-utils.h" + +#define NANOSECONDS_PER_SECOND 1000000000LL + +/* timers */ + +#define SCALE_MS 1000000 +#define SCALE_US 1000 +#define SCALE_NS 1 + +/** + * QEMUClockType: + * + * The following clock types are available: + * + * @QEMU_CLOCK_REALTIME: Real time clock + * + * The real time clock should be used only for stuff which does not + * change the virtual machine state, as it is run even if the virtual + * machine is stopped. The real time clock has a frequency of 1000 + * Hz. + * + * @QEMU_CLOCK_VIRTUAL: virtual clock + * + * The virtual clock is only run during the emulation. It is stopped + * when the virtual machine is stopped. Virtual timers use a high + * precision clock, usually cpu cycles (use ticks_per_sec). + * + * @QEMU_CLOCK_HOST: host clock + * + * The host clock should be use for device models that emulate accurate + * real time sources. It will continue to run when the virtual machine + * is suspended, and it will reflect system time changes the host may + * undergo (e.g. due to NTP). The host clock has the same precision as + * the virtual clock. + * + * @QEMU_CLOCK_VIRTUAL_RT: realtime clock used for icount warp + * + * Outside icount mode, this clock is the same as @QEMU_CLOCK_VIRTUAL. + * In icount mode, this clock counts nanoseconds while the virtual + * machine is running. It is used to increase @QEMU_CLOCK_VIRTUAL + * while the CPUs are sleeping and thus not executing instructions. + */ + +typedef enum { + QEMU_CLOCK_REALTIME = 0, + QEMU_CLOCK_VIRTUAL = 1, + QEMU_CLOCK_HOST = 2, + QEMU_CLOCK_VIRTUAL_RT = 3, + QEMU_CLOCK_MAX +} QEMUClockType; + +typedef struct QEMUTimerList QEMUTimerList; + +struct QEMUTimerListGroup { + QEMUTimerList *tl[QEMU_CLOCK_MAX]; +}; + +typedef void QEMUTimerCB(void *opaque); +typedef void QEMUTimerListNotifyCB(void *opaque); + +struct QEMUTimer { + int64_t expire_time; /* in nanoseconds */ + QEMUTimerList *timer_list; + QEMUTimerCB *cb; + void *opaque; + QEMUTimer *next; + int scale; +}; + +extern QEMUTimerListGroup main_loop_tlg; + +/* + * QEMUClockType + */ + +/* + * qemu_clock_get_ns; + * @type: the clock type + * + * Get the nanosecond value of a clock with + * type @type + * + * Returns: the clock value in nanoseconds + */ +int64_t qemu_clock_get_ns(QEMUClockType type); + +/** + * qemu_clock_get_ms; + * @type: the clock type + * + * Get the millisecond value of a clock with + * type @type + * + * Returns: the clock value in milliseconds + */ +static inline int64_t qemu_clock_get_ms(QEMUClockType type) +{ + return qemu_clock_get_ns(type) / SCALE_MS; +} + +/** + * qemu_clock_get_us; + * @type: the clock type + * + * Get the microsecond value of a clock with + * type @type + * + * Returns: the clock value in microseconds + */ +static inline int64_t qemu_clock_get_us(QEMUClockType type) +{ + return qemu_clock_get_ns(type) / SCALE_US; +} + +/** + * qemu_clock_has_timers: + * @type: the clock type + * + * Determines whether a clock's default timer list + * has timers attached + * + * Note that this function should not be used when other threads also access + * the timer list. The return value may be outdated by the time it is acted + * upon. + * + * Returns: true if the clock's default timer list + * has timers attached + */ +bool qemu_clock_has_timers(QEMUClockType type); + +/** + * qemu_clock_expired: + * @type: the clock type + * + * Determines whether a clock's default timer list + * has an expired clock. + * + * Returns: true if the clock's default timer list has + * an expired timer + */ +bool qemu_clock_expired(QEMUClockType type); + +/** + * qemu_clock_use_for_deadline: + * @type: the clock type + * + * Determine whether a clock should be used for deadline + * calculations. Some clocks, for instance vm_clock with + * use_icount set, do not count in nanoseconds. Such clocks + * are not used for deadline calculations, and are presumed + * to interrupt any poll using qemu_notify/aio_notify + * etc. + * + * Returns: true if the clock runs in nanoseconds and + * should be used for a deadline. + */ +bool qemu_clock_use_for_deadline(QEMUClockType type); + +/** + * qemu_clock_deadline_ns_all: + * @type: the clock type + * + * Calculate the deadline across all timer lists associated + * with a clock (as opposed to just the default one) + * in nanoseconds, or -1 if no timer is set to expire. + * + * Returns: time until expiry in nanoseconds or -1 + */ +int64_t qemu_clock_deadline_ns_all(QEMUClockType type); + +/** + * qemu_clock_get_main_loop_timerlist: + * @type: the clock type + * + * Return the default timer list assocatiated with a clock. + * + * Returns: the default timer list + */ +QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClockType type); + +/** + * qemu_clock_nofify: + * @type: the clock type + * + * Call the notifier callback connected with the default timer + * list linked to the clock, or qemu_notify() if none. + */ +void qemu_clock_notify(QEMUClockType type); + +/** + * qemu_clock_enable: + * @type: the clock type + * @enabled: true to enable, false to disable + * + * Enable or disable a clock + * Disabling the clock will wait for related timerlists to stop + * executing qemu_run_timers. Thus, this functions should not + * be used from the callback of a timer that is based on @clock. + * Doing so would cause a deadlock. + * + * Caller should hold BQL. + */ +void qemu_clock_enable(QEMUClockType type, bool enabled); + +/** + * qemu_clock_warp: + * @type: the clock type + * + * Warp a clock to a new value + */ +void qemu_clock_warp(QEMUClockType type); + +/** + * qemu_clock_register_reset_notifier: + * @type: the clock type + * @notifier: the notifier function + * + * Register a notifier function to call when the clock + * concerned is reset. + */ +void qemu_clock_register_reset_notifier(QEMUClockType type, + Notifier *notifier); + +/** + * qemu_clock_unregister_reset_notifier: + * @type: the clock type + * @notifier: the notifier function + * + * Unregister a notifier function to call when the clock + * concerned is reset. + */ +void qemu_clock_unregister_reset_notifier(QEMUClockType type, + Notifier *notifier); + +/** + * qemu_clock_run_timers: + * @type: clock on which to operate + * + * Run all the timers associated with the default timer list + * of a clock. + * + * Returns: true if any timer ran. + */ +bool qemu_clock_run_timers(QEMUClockType type); + +/** + * qemu_clock_run_all_timers: + * + * Run all the timers associated with the default timer list + * of every clock. + * + * Returns: true if any timer ran. + */ +bool qemu_clock_run_all_timers(void); + +/* + * QEMUTimerList + */ + +/** + * timerlist_new: + * @type: the clock type to associate with the timerlist + * @cb: the callback to call on notification + * @opaque: the opaque pointer to pass to the callback + * + * Create a new timerlist associated with the clock of + * type @type. + * + * Returns: a pointer to the QEMUTimerList created + */ +QEMUTimerList *timerlist_new(QEMUClockType type, + QEMUTimerListNotifyCB *cb, void *opaque); + +/** + * timerlist_free: + * @timer_list: the timer list to free + * + * Frees a timer_list. It must have no active timers. + */ +void timerlist_free(QEMUTimerList *timer_list); + +/** + * timerlist_has_timers: + * @timer_list: the timer list to operate on + * + * Determine whether a timer list has active timers + * + * Note that this function should not be used when other threads also access + * the timer list. The return value may be outdated by the time it is acted + * upon. + * + * Returns: true if the timer list has timers. + */ +bool timerlist_has_timers(QEMUTimerList *timer_list); + +/** + * timerlist_expired: + * @timer_list: the timer list to operate on + * + * Determine whether a timer list has any timers which + * are expired. + * + * Returns: true if the timer list has timers which + * have expired. + */ +bool timerlist_expired(QEMUTimerList *timer_list); + +/** + * timerlist_deadline_ns: + * @timer_list: the timer list to operate on + * + * Determine the deadline for a timer_list, i.e. + * the number of nanoseconds until the first timer + * expires. Return -1 if there are no timers. + * + * Returns: the number of nanoseconds until the earliest + * timer expires -1 if none + */ +int64_t timerlist_deadline_ns(QEMUTimerList *timer_list); + +/** + * timerlist_get_clock: + * @timer_list: the timer list to operate on + * + * Determine the clock type associated with a timer list. + * + * Returns: the clock type associated with the + * timer list. + */ +QEMUClockType timerlist_get_clock(QEMUTimerList *timer_list); + +/** + * timerlist_run_timers: + * @timer_list: the timer list to use + * + * Call all expired timers associated with the timer list. + * + * Returns: true if any timer expired + */ +bool timerlist_run_timers(QEMUTimerList *timer_list); + +/** + * timerlist_notify: + * @timer_list: the timer list to use + * + * call the notifier callback associated with the timer list. + */ +void timerlist_notify(QEMUTimerList *timer_list); + +/* + * QEMUTimerListGroup + */ + +/** + * timerlistgroup_init: + * @tlg: the timer list group + * @cb: the callback to call when a notify is required + * @opaque: the opaque pointer to be passed to the callback. + * + * Initialise a timer list group. This must already be + * allocated in memory and zeroed. The notifier callback is + * called whenever a clock in the timer list group is + * reenabled or whenever a timer associated with any timer + * list is modified. If @cb is specified as null, qemu_notify() + * is used instead. + */ +void timerlistgroup_init(QEMUTimerListGroup *tlg, + QEMUTimerListNotifyCB *cb, void *opaque); + +/** + * timerlistgroup_deinit: + * @tlg: the timer list group + * + * Deinitialise a timer list group. This must already be + * initialised. Note the memory is not freed. + */ +void timerlistgroup_deinit(QEMUTimerListGroup *tlg); + +/** + * timerlistgroup_run_timers: + * @tlg: the timer list group + * + * Run the timers associated with a timer list group. + * This will run timers on multiple clocks. + * + * Returns: true if any timer callback ran + */ +bool timerlistgroup_run_timers(QEMUTimerListGroup *tlg); + +/** + * timerlistgroup_deadline_ns: + * @tlg: the timer list group + * + * Determine the deadline of the soonest timer to + * expire associated with any timer list linked to + * the timer list group. Only clocks suitable for + * deadline calculation are included. + * + * Returns: the deadline in nanoseconds or -1 if no + * timers are to expire. + */ +int64_t timerlistgroup_deadline_ns(QEMUTimerListGroup *tlg); + +/* + * QEMUTimer + */ + +/** + * timer_init_tl: + * @ts: the timer to be initialised + * @timer_list: the timer list to attach the timer to + * @scale: the scale value for the timer + * @cb: the callback to be called when the timer expires + * @opaque: the opaque pointer to be passed to the callback + * + * Initialise a new timer and associate it with @timer_list. + * The caller is responsible for allocating the memory. + * + * You need not call an explicit deinit call. Simply make + * sure it is not on a list with timer_del. + */ +void timer_init_tl(QEMUTimer *ts, + QEMUTimerList *timer_list, int scale, + QEMUTimerCB *cb, void *opaque); + +/** + * timer_init: + * @type: the clock to associate with the timer + * @scale: the scale value for the timer + * @cb: the callback to call when the timer expires + * @opaque: the opaque pointer to pass to the callback + * + * Initialize a timer with the given scale on the default timer list + * associated with the clock. + * + * You need not call an explicit deinit call. Simply make + * sure it is not on a list with timer_del. + */ +static inline void timer_init(QEMUTimer *ts, QEMUClockType type, int scale, + QEMUTimerCB *cb, void *opaque) +{ + timer_init_tl(ts, main_loop_tlg.tl[type], scale, cb, opaque); +} + +/** + * timer_init_ns: + * @type: the clock to associate with the timer + * @cb: the callback to call when the timer expires + * @opaque: the opaque pointer to pass to the callback + * + * Initialize a timer with nanosecond scale on the default timer list + * associated with the clock. + * + * You need not call an explicit deinit call. Simply make + * sure it is not on a list with timer_del. + */ +static inline void timer_init_ns(QEMUTimer *ts, QEMUClockType type, + QEMUTimerCB *cb, void *opaque) +{ + timer_init(ts, type, SCALE_NS, cb, opaque); +} + +/** + * timer_init_us: + * @type: the clock to associate with the timer + * @cb: the callback to call when the timer expires + * @opaque: the opaque pointer to pass to the callback + * + * Initialize a timer with microsecond scale on the default timer list + * associated with the clock. + * + * You need not call an explicit deinit call. Simply make + * sure it is not on a list with timer_del. + */ +static inline void timer_init_us(QEMUTimer *ts, QEMUClockType type, + QEMUTimerCB *cb, void *opaque) +{ + timer_init(ts, type, SCALE_US, cb, opaque); +} + +/** + * timer_init_ms: + * @type: the clock to associate with the timer + * @cb: the callback to call when the timer expires + * @opaque: the opaque pointer to pass to the callback + * + * Initialize a timer with millisecond scale on the default timer list + * associated with the clock. + * + * You need not call an explicit deinit call. Simply make + * sure it is not on a list with timer_del. + */ +static inline void timer_init_ms(QEMUTimer *ts, QEMUClockType type, + QEMUTimerCB *cb, void *opaque) +{ + timer_init(ts, type, SCALE_MS, cb, opaque); +} + +/** + * timer_new_tl: + * @timer_list: the timer list to attach the timer to + * @scale: the scale value for the timer + * @cb: the callback to be called when the timer expires + * @opaque: the opaque pointer to be passed to the callback + * + * Creeate a new timer and associate it with @timer_list. + * The memory is allocated by the function. + * + * This is not the preferred interface unless you know you + * are going to call timer_free. Use timer_init instead. + * + * Returns: a pointer to the timer + */ +static inline QEMUTimer *timer_new_tl(QEMUTimerList *timer_list, + int scale, + QEMUTimerCB *cb, + void *opaque) +{ + QEMUTimer *ts = g_malloc0(sizeof(QEMUTimer)); + timer_init_tl(ts, timer_list, scale, cb, opaque); + return ts; +} + +/** + * timer_new: + * @type: the clock type to use + * @scale: the scale value for the timer + * @cb: the callback to be called when the timer expires + * @opaque: the opaque pointer to be passed to the callback + * + * Creeate a new timer and associate it with the default + * timer list for the clock type @type. + * + * Returns: a pointer to the timer + */ +static inline QEMUTimer *timer_new(QEMUClockType type, int scale, + QEMUTimerCB *cb, void *opaque) +{ + return timer_new_tl(main_loop_tlg.tl[type], scale, cb, opaque); +} + +/** + * timer_new_ns: + * @clock: the clock to associate with the timer + * @callback: the callback to call when the timer expires + * @opaque: the opaque pointer to pass to the callback + * + * Create a new timer with nanosecond scale on the default timer list + * associated with the clock. + * + * Returns: a pointer to the newly created timer + */ +static inline QEMUTimer *timer_new_ns(QEMUClockType type, QEMUTimerCB *cb, + void *opaque) +{ + return timer_new(type, SCALE_NS, cb, opaque); +} + +/** + * timer_new_us: + * @clock: the clock to associate with the timer + * @callback: the callback to call when the timer expires + * @opaque: the opaque pointer to pass to the callback + * + * Create a new timer with microsecond scale on the default timer list + * associated with the clock. + * + * Returns: a pointer to the newly created timer + */ +static inline QEMUTimer *timer_new_us(QEMUClockType type, QEMUTimerCB *cb, + void *opaque) +{ + return timer_new(type, SCALE_US, cb, opaque); +} + +/** + * timer_new_ms: + * @clock: the clock to associate with the timer + * @callback: the callback to call when the timer expires + * @opaque: the opaque pointer to pass to the callback + * + * Create a new timer with millisecond scale on the default timer list + * associated with the clock. + * + * Returns: a pointer to the newly created timer + */ +static inline QEMUTimer *timer_new_ms(QEMUClockType type, QEMUTimerCB *cb, + void *opaque) +{ + return timer_new(type, SCALE_MS, cb, opaque); +} + +/** + * timer_deinit: + * @ts: the timer to be de-initialised + * + * Deassociate the timer from any timerlist. You should + * call timer_del before. After this call, any further + * timer_del call cannot cause dangling pointer accesses + * even if the previously used timerlist is freed. + */ +void timer_deinit(QEMUTimer *ts); + +/** + * timer_free: + * @ts: the timer + * + * Free a timer (it must not be on the active list) + */ +void timer_free(QEMUTimer *ts); + +/** + * timer_del: + * @ts: the timer + * + * Delete a timer from the active list. + * + * This function is thread-safe but the timer and its timer list must not be + * freed while this function is running. + */ +void timer_del(QEMUTimer *ts); + +/** + * timer_mod_ns: + * @ts: the timer + * @expire_time: the expiry time in nanoseconds + * + * Modify a timer to expire at @expire_time + * + * This function is thread-safe but the timer and its timer list must not be + * freed while this function is running. + */ +void timer_mod_ns(QEMUTimer *ts, int64_t expire_time); + +/** + * timer_mod_anticipate_ns: + * @ts: the timer + * @expire_time: the expiry time in nanoseconds + * + * Modify a timer to expire at @expire_time or the current time, + * whichever comes earlier. + * + * This function is thread-safe but the timer and its timer list must not be + * freed while this function is running. + */ +void timer_mod_anticipate_ns(QEMUTimer *ts, int64_t expire_time); + +/** + * timer_mod: + * @ts: the timer + * @expire_time: the expire time in the units associated with the timer + * + * Modify a timer to expiry at @expire_time, taking into + * account the scale associated with the timer. + * + * This function is thread-safe but the timer and its timer list must not be + * freed while this function is running. + */ +void timer_mod(QEMUTimer *ts, int64_t expire_timer); + +/** + * timer_mod_anticipate: + * @ts: the timer + * @expire_time: the expiry time in nanoseconds + * + * Modify a timer to expire at @expire_time or the current time, whichever + * comes earlier, taking into account the scale associated with the timer. + * + * This function is thread-safe but the timer and its timer list must not be + * freed while this function is running. + */ +void timer_mod_anticipate(QEMUTimer *ts, int64_t expire_time); + +/** + * timer_pending: + * @ts: the timer + * + * Determines whether a timer is pending (i.e. is on the + * active list of timers, whether or not it has not yet expired). + * + * Returns: true if the timer is pending + */ +bool timer_pending(QEMUTimer *ts); + +/** + * timer_expired: + * @ts: the timer + * + * Determines whether a timer has expired. + * + * Returns: true if the timer has expired + */ +bool timer_expired(QEMUTimer *timer_head, int64_t current_time); + +/** + * timer_expire_time_ns: + * @ts: the timer + * + * Determine the expiry time of a timer + * + * Returns: the expiry time in nanoseconds + */ +uint64_t timer_expire_time_ns(QEMUTimer *ts); + +/** + * timer_get: + * @f: the file + * @ts: the timer + * + * Read a timer @ts from a file @f + */ +void timer_get(QEMUFile *f, QEMUTimer *ts); + +/** + * timer_put: + * @f: the file + * @ts: the timer + */ +void timer_put(QEMUFile *f, QEMUTimer *ts); + +/* + * General utility functions + */ + +/** + * qemu_timeout_ns_to_ms: + * @ns: nanosecond timeout value + * + * Convert a nanosecond timeout value (or -1) to + * a millisecond value (or -1), always rounding up. + * + * Returns: millisecond timeout value + */ +int qemu_timeout_ns_to_ms(int64_t ns); + +/** + * qemu_poll_ns: + * @fds: Array of file descriptors + * @nfds: number of file descriptors + * @timeout: timeout in nanoseconds + * + * Perform a poll like g_poll but with a timeout in nanoseconds. + * See g_poll documentation for further details. + * + * Returns: number of fds ready + */ +int qemu_poll_ns(GPollFD *fds, guint nfds, int64_t timeout); + +/** + * qemu_soonest_timeout: + * @timeout1: first timeout in nanoseconds (or -1 for infinite) + * @timeout2: second timeout in nanoseconds (or -1 for infinite) + * + * Calculates the soonest of two timeout values. -1 means infinite, which + * is later than any other value. + * + * Returns: soonest timeout value in nanoseconds (or -1 for infinite) + */ +static inline int64_t qemu_soonest_timeout(int64_t timeout1, int64_t timeout2) +{ + /* we can abuse the fact that -1 (which means infinite) is a maximal + * value when cast to unsigned. As this is disgusting, it's kept in + * one inline function. + */ + return ((uint64_t) timeout1 < (uint64_t) timeout2) ? timeout1 : timeout2; +} + +/** + * initclocks: + * + * Initialise the clock & timer infrastructure + */ +void init_clocks(void); + +int64_t cpu_get_ticks(void); +/* Caller must hold BQL */ +void cpu_enable_ticks(void); +/* Caller must hold BQL */ +void cpu_disable_ticks(void); + +static inline int64_t get_ticks_per_sec(void) +{ + return 1000000000LL; +} + +static inline int64_t get_max_clock_jump(void) +{ + /* This should be small enough to prevent excessive interrupts from being + * generated by the RTC on clock jumps, but large enough to avoid frequent + * unnecessary resets in idle VMs. + */ + return 60 * get_ticks_per_sec(); +} + +/* + * Low level clock functions + */ + +/* real time host monotonic timer */ +static inline int64_t get_clock_realtime(void) +{ +return qemu_clock_get_ns(QEMU_CLOCK_REALTIME); +} + +/* Warning: don't insert tracepoints into these functions, they are + also used by simpletrace backend and tracepoints would cause + an infinite recursion! */ +#ifdef _WIN32 +#if 0 +extern int64_t clock_freq; + +static inline int64_t get_clock(void) +{ + LARGE_INTEGER ti; + QueryPerformanceCounter(&ti); + return muldiv64(ti.QuadPart, get_ticks_per_sec(), clock_freq); +} +#endif +#else + +extern int use_rt_clock; + +static inline int64_t get_clock(void) +{ +#ifdef CLOCK_MONOTONIC + if (use_rt_clock) { + struct timespec ts; + clock_gettime(CLOCK_MONOTONIC, &ts); + return ts.tv_sec * 1000000000LL + ts.tv_nsec; + } else +#endif + { + /* XXX: using gettimeofday leads to problems if the date + changes, so it should be avoided. */ + return get_clock_realtime(); + } +} +#endif + +/* icount */ +int64_t cpu_get_icount_raw(void); +int64_t cpu_get_icount(void); +int64_t cpu_get_clock(void); +int64_t cpu_icount_to_ns(int64_t icount); + +/*******************************************/ +/* host CPU ticks (if available) */ + +#if defined(_ARCH_PPC) + +static inline int64_t cpu_get_real_ticks(void) +{ + int64_t retval; +#ifdef _ARCH_PPC64 + /* This reads timebase in one 64bit go and includes Cell workaround from: + http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html + */ + __asm__ __volatile__ ("mftb %0\n\t" + "cmpwi %0,0\n\t" + "beq- $-8" + : "=r" (retval)); +#else + /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */ + unsigned long junk; + __asm__ __volatile__ ("mfspr %1,269\n\t" /* mftbu */ + "mfspr %L0,268\n\t" /* mftb */ + "mfspr %0,269\n\t" /* mftbu */ + "cmpw %0,%1\n\t" + "bne $-16" + : "=r" (retval), "=r" (junk)); +#endif + return retval; +} + +#elif defined(__i386__) + +static inline int64_t cpu_get_real_ticks(void) +{ + int64_t val; + extern int asm_cpu_get_real_ticks(); + asm_cpu_get_real_ticks(); +// asm volatile ("rdtsc" : "=A" (val)); + return val; +} + +#elif defined(__x86_64__) + +static inline int64_t cpu_get_real_ticks(void) +{ + uint32_t low,high; + int64_t val; + extern int asm_cpu_get_real_ticks(); + asm_cpu_get_real_ticks(); +// asm volatile("rdtsc" : "=a" (low), "=d" (high)); + val = high; + val <<= 32; + val |= low; + return val; +} + +#elif defined(__hppa__) + +static inline int64_t cpu_get_real_ticks(void) +{ + int val; + asm volatile ("mfctl %%cr16, %0" : "=r"(val)); + return val; +} + +#elif defined(__ia64) + +static inline int64_t cpu_get_real_ticks(void) +{ + int64_t val; + asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory"); + return val; +} + +#elif defined(__s390__) + +static inline int64_t cpu_get_real_ticks(void) +{ + int64_t val; + asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc"); + return val; +} + +#elif defined(__sparc__) + +static inline int64_t cpu_get_real_ticks (void) +{ +#if defined(_LP64) + uint64_t rval; + asm volatile("rd %%tick,%0" : "=r"(rval)); + return rval; +#else + /* We need an %o or %g register for this. For recent enough gcc + there is an "h" constraint for that. Don't bother with that. */ + union { + uint64_t i64; + struct { + uint32_t high; + uint32_t low; + } i32; + } rval; + asm volatile("rd %%tick,%%g1; srlx %%g1,32,%0; mov %%g1,%1" + : "=r"(rval.i32.high), "=r"(rval.i32.low) : : "g1"); + return rval.i64; +#endif +} + +#elif defined(__mips__) && \ + ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__)) +/* + * binutils wants to use rdhwr only on mips32r2 + * but as linux kernel emulate it, it's fine + * to use it. + * + */ +#define MIPS_RDHWR(rd, value) { \ + __asm__ __volatile__ (".set push\n\t" \ + ".set mips32r2\n\t" \ + "rdhwr %0, "rd"\n\t" \ + ".set pop" \ + : "=r" (value)); \ + } + +static inline int64_t cpu_get_real_ticks(void) +{ + /* On kernels >= 2.6.25 rdhwr , $2 and $3 are emulated */ + uint32_t count; + static uint32_t cyc_per_count = 0; + + if (!cyc_per_count) { + MIPS_RDHWR("$3", cyc_per_count); + } + + MIPS_RDHWR("$2", count); + return (int64_t)(count * cyc_per_count); +} + +#elif defined(__alpha__) + +static inline int64_t cpu_get_real_ticks(void) +{ + uint64_t cc; + uint32_t cur, ofs; + + asm volatile("rpcc %0" : "=r"(cc)); + cur = cc; + ofs = cc >> 32; + return cur - ofs; +} + +#else +/* The host CPU doesn't have an easily accessible cycle counter. + Just return a monotonically increasing value. This will be + totally wrong, but hopefully better than nothing. */ +static inline int64_t cpu_get_real_ticks (void) +{ + static int64_t ticks = 0; + return ticks++; +} +#endif + +#ifdef CONFIG_PROFILER +static inline int64_t profile_getclock(void) +{ + return get_clock(); +} + +extern int64_t tcg_time; +extern int64_t dev_time; +#endif + +#endif diff --git a/slirp/simh/qemu/typedefs.h b/slirp/simh/qemu/typedefs.h new file mode 100644 index 00000000..3a835ffb --- /dev/null +++ b/slirp/simh/qemu/typedefs.h @@ -0,0 +1,87 @@ +#ifndef QEMU_TYPEDEFS_H +#define QEMU_TYPEDEFS_H + +/* A load of opaque types so that device init declarations don't have to + pull in all the real definitions. */ +struct Monitor; + +/* Please keep this list in alphabetical order */ +typedef struct AdapterInfo AdapterInfo; +typedef struct AddressSpace AddressSpace; +typedef struct AioContext AioContext; +typedef struct AudioState AudioState; +typedef struct BlockBackend BlockBackend; +typedef struct BlockDriverState BlockDriverState; +typedef struct BusClass BusClass; +typedef struct BusState BusState; +typedef struct CharDriverState CharDriverState; +typedef struct CompatProperty CompatProperty; +typedef struct DeviceState DeviceState; +typedef struct DeviceListener DeviceListener; +typedef struct DisplayChangeListener DisplayChangeListener; +typedef struct DisplayState DisplayState; +typedef struct DisplaySurface DisplaySurface; +typedef struct DriveInfo DriveInfo; +typedef struct EventNotifier EventNotifier; +typedef struct FWCfgIoState FWCfgIoState; +typedef struct FWCfgMemState FWCfgMemState; +typedef struct FWCfgState FWCfgState; +typedef struct HCIInfo HCIInfo; +typedef struct I2CBus I2CBus; +typedef struct I2SCodec I2SCodec; +typedef struct ISABus ISABus; +typedef struct ISADevice ISADevice; +typedef struct LoadStateEntry LoadStateEntry; +typedef struct MACAddr MACAddr; +typedef struct MachineClass MachineClass; +typedef struct MachineState MachineState; +typedef struct MemoryListener MemoryListener; +typedef struct MemoryMappingList MemoryMappingList; +typedef struct MemoryRegion MemoryRegion; +typedef struct MemoryRegionSection MemoryRegionSection; +typedef struct MigrationIncomingState MigrationIncomingState; +typedef struct MigrationParams MigrationParams; +typedef struct Monitor Monitor; +typedef struct MouseTransformInfo MouseTransformInfo; +typedef struct MSIMessage MSIMessage; +typedef struct NetClientState NetClientState; +typedef struct NICInfo NICInfo; +typedef struct PcGuestInfo PcGuestInfo; +typedef struct PCIBridge PCIBridge; +typedef struct PCIBus PCIBus; +typedef struct PCIDevice PCIDevice; +typedef struct PCIEAERErr PCIEAERErr; +typedef struct PCIEAERLog PCIEAERLog; +typedef struct PCIEAERMsg PCIEAERMsg; +typedef struct PCIEPort PCIEPort; +typedef struct PCIESlot PCIESlot; +typedef struct PCIExpressDevice PCIExpressDevice; +typedef struct PCIExpressHost PCIExpressHost; +typedef struct PCIHostState PCIHostState; +typedef struct PCMachineState PCMachineState; +typedef struct PCMachineClass PCMachineClass; +typedef struct PCMCIACardState PCMCIACardState; +typedef struct PixelFormat PixelFormat; +typedef struct PropertyInfo PropertyInfo; +typedef struct Property Property; +typedef struct QEMUBH QEMUBH; +typedef struct QemuConsole QemuConsole; +typedef struct QEMUFile QEMUFile; +typedef struct QemuOpt QemuOpt; +typedef struct QemuOpts QemuOpts; +typedef struct QemuOptsList QemuOptsList; +typedef struct QEMUSGList QEMUSGList; +typedef struct QEMUSizedBuffer QEMUSizedBuffer; +typedef struct QEMUTimerListGroup QEMUTimerListGroup; +typedef struct QEMUTimer QEMUTimer; +typedef struct Range Range; +typedef struct SerialState SerialState; +typedef struct SHPCDevice SHPCDevice; +typedef struct SMBusDevice SMBusDevice; +typedef struct SSIBus SSIBus; +typedef struct uWireSlave uWireSlave; +typedef struct VirtIODevice VirtIODevice; +typedef struct Visitor Visitor; +typedef struct MonitorDef MonitorDef; + +#endif /* QEMU_TYPEDEFS_H */ diff --git a/slirp/simh/qemu/win32/inttypes.h b/slirp/simh/qemu/win32/inttypes.h new file mode 100644 index 00000000..583f4379 --- /dev/null +++ b/slirp/simh/qemu/win32/inttypes.h @@ -0,0 +1,11 @@ +#ifndef INTTYPES_H +#define INTTYPES_H + +#include + +#ifdef _WIN64 +typedef __int64 ssize_t; +#else +typedef __int32 ssize_t; +#endif +#endif diff --git a/slirp/simh/qemu/win32/stdbool.h b/slirp/simh/qemu/win32/stdbool.h new file mode 100644 index 00000000..f4f25308 --- /dev/null +++ b/slirp/simh/qemu/win32/stdbool.h @@ -0,0 +1,8 @@ +#ifndef STDBOOL_H +#define STDBOOL_H + +#define true 1 +#define false 0 +#define __bool_true_false_are_defined 1 + +#endif diff --git a/slirp/simh/qemu/win32/stdint.h b/slirp/simh/qemu/win32/stdint.h new file mode 100644 index 00000000..ec384da2 --- /dev/null +++ b/slirp/simh/qemu/win32/stdint.h @@ -0,0 +1,14 @@ +#ifndef STDINT_H +#define STDINT_H + +typedef char int8_t; +typedef short int16_t; +typedef int int32_t; +typedef long long int64_t; +typedef unsigned char uint8_t; +typedef unsigned short uint16_t; +typedef unsigned int uint32_t; +typedef unsigned int u_int32_t; +typedef unsigned long long uint64_t; + +#endif diff --git a/slirp/simh/qemu/win32/sys/time.h b/slirp/simh/qemu/win32/sys/time.h new file mode 100644 index 00000000..b2956a56 --- /dev/null +++ b/slirp/simh/qemu/win32/sys/time.h @@ -0,0 +1,6 @@ +#ifndef SYS_TIME_H +#define SYS_TIME_H + +#include + +#endif diff --git a/slirp/simh/qemu/win32/unistd.h b/slirp/simh/qemu/win32/unistd.h new file mode 100644 index 00000000..c97c9e01 --- /dev/null +++ b/slirp/simh/qemu/win32/unistd.h @@ -0,0 +1,6 @@ +#ifndef UNISTD_H +#define UNISTD_H + +#include + +#endif diff --git a/slirp/simh/sim_slirp.c b/slirp/simh/sim_slirp.c new file mode 100644 index 00000000..7823dd05 --- /dev/null +++ b/slirp/simh/sim_slirp.c @@ -0,0 +1,554 @@ +/* sim_slirp.c: + ------------------------------------------------------------------------------ + Copyright (c) 2015, Mark Pizzolato + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + Except as contained in this notice, the name of the author shall not be + used in advertising or otherwise to promote the sale, use or other dealings + in this Software without prior written authorization from the author. + + ------------------------------------------------------------------------------ + + This module provides the interface needed between sim_ether and SLiRP to + provide NAT network functionality. + +*/ + +/* Actual slirp API interface support, some code taken from slirpvde.c */ + +#define DEFAULT_IP_ADDR "10.0.2.2" + +#include "glib.h" +#include "qemu/timer.h" +#include "libslirp.h" +#include "sim_defs.h" +#include "sim_slirp.h" +#include "sim_sock.h" +#include "libslirp.h" + +#define IS_TCP 0 +#define IS_UDP 1 +static const char *tcpudp[] = { + "TCP", + "UDP" + }; + +struct redir_tcp_udp { + struct in_addr inaddr; + int is_udp; + int port; + int lport; + struct redir_tcp_udp *next; + }; + +static int +_parse_redirect_port (struct redir_tcp_udp **head, char *buff, int is_udp) +{ +u_int32_t inaddr = 0; +int port = 0; +int lport = 0; +char *ipaddrstr = NULL; +char *portstr = NULL; +struct redir_tcp_udp *new; + +if (((ipaddrstr = strchr(buff, ':')) == NULL) || (*(ipaddrstr+1) == 0)) { + sim_printf ("redir %s syntax error\n", tcpudp[is_udp]); + return -1; + } +*ipaddrstr++ = 0; + +if (((portstr = strchr (ipaddrstr, ':')) == NULL) || (*(portstr+1) == 0)) { + sim_printf ("redir %s syntax error\n", tcpudp[is_udp]); + return -1; + } +*portstr++ = 0; + +sscanf (buff, "%d", &lport); +sscanf (portstr, "%d", &port); +if (ipaddrstr) + inaddr = inet_addr (ipaddrstr); + +if (!inaddr) { + sim_printf ("%s redirection error: an IP address must be specified\n", tcpudp[is_udp]); + return -1; + } + +if ((new = g_malloc (sizeof(struct redir_tcp_udp))) == NULL) + return -1; +else { + inet_aton (ipaddrstr, &new->inaddr); + new->is_udp = is_udp; + new->port = port; + new->lport = lport; + new->next = *head; + *head = new; + return 0; + } +} + +static int _do_redirects (Slirp *slirp, struct redir_tcp_udp *head) +{ +struct in_addr host_addr; +int ret = 0; + +host_addr.s_addr = htonl(INADDR_ANY); +if (head) { + ret = _do_redirects (slirp, head->next); + if (slirp_add_hostfwd (slirp, head->is_udp, host_addr, head->lport, head->inaddr, head->port) < 0) { + sim_printf("Can't establish redirector for: redir %s =%d:%s:%d\n", + tcpudp[head->is_udp], head->lport, inet_ntoa(head->inaddr), head->port); + ++ret; + } + } +return ret; +} + + +struct sim_slirp { + Slirp *slirp; + char *args; + struct in_addr vnetwork; + struct in_addr vnetmask; + int maskbits; + struct in_addr vgateway; + int dhcpmgmt; + struct in_addr vdhcp_start; + struct in_addr vnameserver; + char *tftp_path; + struct redir_tcp_udp *rtcp; + GArray *gpollfds; + SOCKET db_chime; /* write packet doorbell */ + struct sockaddr_in db_addr; /* doorbell address */ + struct write_request { + struct write_request *next; + char msg[1518]; + size_t len; + } *write_requests; + struct write_request *write_buffers; + pthread_mutex_t write_buffer_lock; + void *opaque; /* opaque value passed during packet delivery */ + packet_callback callback; /* slirp arriving packet delivery callback */ + }; + +SLIRP *sim_slirp_open (const char *args, void *opaque, packet_callback callback) +{ +SLIRP *slirp = (SLIRP *)g_malloc0(sizeof(*slirp)); +char *targs = g_strdup (args); +char *tptr = targs; +char *cptr; +char tbuf[CBUFSIZE], gbuf[CBUFSIZE]; +int err; + +slirp->args = (char *)g_malloc0(1 + strlen(args)); +strcpy (slirp->args, args); +slirp->opaque = opaque; +slirp->callback = callback; +slirp->maskbits = 24; +slirp->dhcpmgmt = 1; +inet_aton(DEFAULT_IP_ADDR,&slirp->vgateway); + +err = 0; +while (*tptr && !err) { + tptr = get_glyph_nc (tptr, tbuf, ','); + if (!tbuf[0]) + break; + cptr = tbuf; + cptr = get_glyph (cptr, gbuf, '='); + if (0 == MATCH_CMD (gbuf, "DHCP")) { + slirp->dhcpmgmt = 1; + if (cptr && *cptr) + inet_aton (cptr, &slirp->vdhcp_start); + continue; + } + if (0 == MATCH_CMD (gbuf, "TFTP")) { + if (cptr && *cptr) + slirp->tftp_path = g_strdup (cptr); + else { + sim_printf ("Missing TFTP Path\n"); + err = 1; + } + continue; + } + if ((0 == MATCH_CMD (gbuf, "NAMESERVER")) || + (0 == MATCH_CMD (gbuf, "DNS"))) { + if (cptr && *cptr) + inet_aton (cptr, &slirp->vnameserver); + else { + sim_printf ("Missing nameserver\n"); + err = 1; + } + continue; + } + if (0 == MATCH_CMD (gbuf, "GATEWAY")) { + if (cptr && *cptr) { + char *slash = strchr (cptr, '/'); + if (slash) { + slirp->maskbits = atoi (slash+1); + *slash = '\0'; + } + inet_aton (cptr, &slirp->vgateway); + } + else { + sim_printf ("Missing host\n"); + err = 1; + } + continue; + } + if (0 == MATCH_CMD (gbuf, "NETWORK")) { + if (cptr && *cptr) { + char *slash = strchr (cptr, '/'); + if (slash) { + slirp->maskbits = atoi (slash+1); + *slash = '\0'; + } + inet_aton (cptr, &slirp->vnetwork); + } + else { + sim_printf ("Missing network\n"); + err = 1; + } + continue; + } + if (0 == MATCH_CMD (gbuf, "NODHCP")) { + slirp->dhcpmgmt = 0; + continue; + } + if (0 == MATCH_CMD (gbuf, "UDP")) { + if (cptr && *cptr) + err = _parse_redirect_port (&slirp->rtcp, cptr, IS_UDP); + else { + sim_printf ("Missing UDP port mapping\n"); + err = 1; + } + continue; + } + if (0 == MATCH_CMD (gbuf, "TCP")) { + if (cptr && *cptr) + err = _parse_redirect_port (&slirp->rtcp, cptr, IS_TCP); + else { + sim_printf ("Missing TCP port mapping\n"); + err = 1; + } + continue; + } + sim_printf ("Unexpected NAT argument: %s\n", gbuf); + err = 1; + } +if (err) { + sim_slirp_close (slirp); + g_free (targs); + return NULL; + } + +slirp->vnetmask.s_addr = htonl(~((1 << (32-slirp->maskbits)) - 1)); +slirp->vnetwork.s_addr = slirp->vgateway.s_addr & slirp->vnetmask.s_addr; +if ((slirp->vgateway.s_addr & ~slirp->vnetmask.s_addr) == 0) + slirp->vgateway.s_addr = htonl(ntohl(slirp->vnetwork.s_addr) | 2); +if ((slirp->vdhcp_start.s_addr == 0) && slirp->dhcpmgmt) + slirp->vdhcp_start.s_addr = htonl(ntohl(slirp->vnetwork.s_addr) | 15); +if (slirp->vnameserver.s_addr == 0) + slirp->vnameserver.s_addr = htonl(ntohl(slirp->vnetwork.s_addr) | 3); +slirp->slirp = slirp_init (0, slirp->vnetwork, slirp->vnetmask, slirp->vgateway, + NULL, slirp->tftp_path, NULL, slirp->vdhcp_start, + slirp->vnameserver, NULL, (void *)slirp); + +if (_do_redirects (slirp->slirp, slirp->rtcp)) { + sim_slirp_close (slirp); + slirp = NULL; + } +else { + char db_host[32]; + GPollFD pfd; + int ret; + int64_t rnd_val = qemu_clock_get_ns (0) / 1000000; + + pthread_mutex_init (&slirp->write_buffer_lock, NULL); + slirp->gpollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD)); + /* setup transmit packet wakeup doorbell */ + slirp->db_chime = socket (AF_INET, SOCK_DGRAM, IPPROTO_UDP); + ret = socket_set_fast_reuse(slirp->db_chime); + memset (&slirp->db_addr, 0, sizeof (slirp->db_addr)); + slirp->db_addr.sin_family = AF_INET; + sprintf (db_host, "127.%d.%d.%d", (int)((rnd_val>>16) & 0xFF), (int)((rnd_val>>8) & 0xFF), (int)(rnd_val & 0xFF)); + slirp->db_addr.sin_port = (rnd_val >> 24) & 0xFFFF; + inet_aton (db_host, &slirp->db_addr.sin_addr); + ret = bind(slirp->db_chime, (struct sockaddr *)&slirp->db_addr, sizeof(slirp->db_addr)); + qemu_set_nonblock(slirp->db_chime); + memset (&pfd, 0, sizeof (pfd)); + pfd.fd = slirp->db_chime; + pfd.events = G_IO_IN; + g_array_append_val(slirp->gpollfds, pfd); + + sim_slirp_show(slirp, stdout); + if (sim_log && (sim_log != stdout)) + sim_slirp_show(slirp, sim_log); + if (sim_deb && (sim_deb != stdout) && (sim_deb != sim_log)) + sim_slirp_show(slirp, sim_deb); + } +g_free (targs); +return slirp; +} + +void sim_slirp_close (SLIRP *slirp) +{ +struct redir_tcp_udp *rtmp; + +if (slirp) { + g_free (slirp->args); + while ((rtmp = slirp->rtcp)) { + slirp_remove_hostfwd(slirp->slirp, rtmp->is_udp, rtmp->inaddr, rtmp->lport); + slirp->rtcp = rtmp->next; + g_free (rtmp); + } + g_array_free(slirp->gpollfds, true); + closesocket (slirp->db_chime); + if (1) { + struct write_request *buffer; + + while (NULL != (buffer = slirp->write_buffers)) { + slirp->write_buffers = buffer->next; + free(buffer); + } + while (NULL != (buffer = slirp->write_requests)) { + slirp->write_requests = buffer->next; + free(buffer); + } + } + pthread_mutex_destroy (&slirp->write_buffer_lock); + if (slirp->slirp) + slirp_cleanup(slirp->slirp); + } +g_free (slirp); +} + +t_stat sim_slirp_attach_help(FILE *st, DEVICE *dptr, UNIT *uptr, int32 flag, const char *cptr) +{ +fprintf (st, "%s", +"NAT options:\n" +" DHCP{=dhcp_start_address} Enables DHCP server and specifies\n" +" guest LAN DHCP start IP address\n" +" TFTP=tftp-base-path Enables TFTP server and specifies\n" +" base file path\n" +" NAMESERVER=nameserver_ipaddres specifies DHCP nameserver IP address\n" +" DNS=nameserver_ipaddres specifies DHCP nameserver IP address\n" +" GATEWAY=host_ipaddress{/masklen} specifies LAN gateway IP address\n" +" NETWORK=network_ipaddress{/masklen} specifies LAN network address\n" +" UDP=port:address:internal-port maps host UDP port to guest port\n" +" TCP=port:address:internal-port maps host TCP port to guest port\n" +" NODHCP disables DHCP server\n" +"Default NAT Options: GATEWAY=10.0.2.2, masklen=24(netmask is 255.255.255.0)\n" +" DHCP=10.0.2.15, NAMESERVER=10.0.2.3\n" +" Nameserver defaults to proxy traffic to host system's active nameserver\n" +); +return SCPE_OK; +} + +int sim_slirp_send (SLIRP *slirp, const char *msg, size_t len, int flags) +{ +struct write_request *request; +int wake_needed = 0; + +/* Get a buffer */ +pthread_mutex_lock (&slirp->write_buffer_lock); +if (NULL != (request = slirp->write_buffers)) + slirp->write_buffers = request->next; +pthread_mutex_unlock (&slirp->write_buffer_lock); +if (NULL == request) + request = (struct write_request *)g_malloc(sizeof(*request)); + +/* Copy buffer contents */ +request->len = len; +memcpy(request->msg, msg, len); + +/* Insert buffer at the end of the write list (to make sure that */ +/* packets make it to the wire in the order they were presented here) */ +pthread_mutex_lock (&slirp->write_buffer_lock); +request->next = NULL; +if (slirp->write_requests) { + struct write_request *last_request = slirp->write_requests; + + while (last_request->next) { + last_request = last_request->next; + } + last_request->next = request; + } +else { + slirp->write_requests = request; + wake_needed = 1; + } +pthread_mutex_unlock (&slirp->write_buffer_lock); + +if (wake_needed) + sendto (slirp->db_chime, msg, 0, 0, (struct sockaddr *)&slirp->db_addr, sizeof(slirp->db_addr)); +return len; +} + +void slirp_output (void *opaque, const uint8_t *pkt, int pkt_len) +{ +SLIRP *slirp = (SLIRP *)opaque; + +slirp->callback (slirp->opaque, pkt, pkt_len); +} + +void sim_slirp_show (SLIRP *slirp, FILE *st) +{ +struct redir_tcp_udp *rtmp; + +if ((slirp == NULL) || (slirp->slirp == NULL)) + return; +fprintf (st, "NAT args: %s\n", slirp->args); +fprintf (st, "NAT network setup:\n"); +fprintf (st, " gateway =%s/%d\n", inet_ntoa(slirp->vgateway), slirp->maskbits); +fprintf (st, " DNS =%s\n", inet_ntoa(slirp->vnameserver)); +if (slirp->vdhcp_start.s_addr != 0) + fprintf (st, " dhcp_start =%s\n", inet_ntoa(slirp->vdhcp_start)); +if (slirp->tftp_path) + fprintf (st, " tftp prefix =%s\n", slirp->tftp_path); +rtmp = slirp->rtcp; +while (rtmp) { + fprintf (st, " redir %3s =%d:%s:%d\n", tcpudp[rtmp->is_udp], rtmp->lport, inet_ntoa(rtmp->inaddr), rtmp->port); + rtmp = rtmp->next; + } +slirp_connection_info (slirp->slirp, (Monitor *)st); +} + +#if !defined(MAX) +#define MAX(a,b) (((a)>(b)) ? (a) : (b)) +#endif + +static int pollfds_fill (GArray *pollfds, fd_set *rfds, fd_set *wfds, + fd_set *xfds) +{ +int nfds = -1; +guint i; + +for (i = 0; i < pollfds->len; i++) { + GPollFD *pfd = &g_array_index(pollfds, GPollFD, i); + int fd = pfd->fd; + int events = pfd->events; + if (events & G_IO_IN) { + FD_SET(fd, rfds); + nfds = MAX(nfds, fd); + } + if (events & G_IO_OUT) { + FD_SET(fd, wfds); + nfds = MAX(nfds, fd); + } + if (events & G_IO_PRI) { + FD_SET(fd, xfds); + nfds = MAX(nfds, fd); + } + } +return nfds; +} + +static void pollfds_poll (GArray *pollfds, int nfds, fd_set *rfds, + fd_set *wfds, fd_set *xfds) +{ +guint i; + +for (i = 0; i < pollfds->len; i++) { + GPollFD *pfd = &g_array_index(pollfds, GPollFD, i); + int fd = pfd->fd; + int revents = 0; + + if (FD_ISSET(fd, rfds)) { + revents |= G_IO_IN; + } + if (FD_ISSET(fd, wfds)) { + revents |= G_IO_OUT; + } + if (FD_ISSET(fd, xfds)) { + revents |= G_IO_PRI; + } + pfd->revents = revents & pfd->events; + } +} + +int sim_slirp_select (SLIRP *slirp, int ms_timeout) +{ +int select_ret = 0; +uint32_t slirp_timeout = ms_timeout; +struct timeval timeout; +fd_set rfds, wfds, xfds; +fd_set save_rfds, save_wfds, save_xfds; +int nfds; + +/* Populate the GPollFDs from slirp */ +g_array_set_size (slirp->gpollfds, 1); /* Leave the doorbell chime alone */ +slirp_pollfds_fill(slirp->gpollfds, &slirp_timeout); +timeout.tv_sec = slirp_timeout / 1000; +timeout.tv_usec = (slirp_timeout % 1000) * 1000; + +FD_ZERO(&rfds); +FD_ZERO(&wfds); +FD_ZERO(&xfds); +/* Extract the GPollFDs interest */ +nfds = pollfds_fill (slirp->gpollfds, &rfds, &wfds, &xfds); +save_rfds = rfds; +save_wfds = wfds; +save_xfds = xfds; +select_ret = select(nfds + 1, &rfds, &wfds, &xfds, &timeout); +if (select_ret) { + int i; + /* Update the GPollFDs results */ + pollfds_poll (slirp->gpollfds, nfds, &rfds, &wfds, &xfds); + if (FD_ISSET (slirp->db_chime, &rfds)) { + char buf[32]; + /* consume the doorbell wakeup ring */ + recv (slirp->db_chime, buf, sizeof (buf), 0); + } + fprintf (stderr, "Select returned %d\r\n", select_ret); + for (i=0; iwrite_buffer_lock); +while (NULL != (request = slirp->write_requests)) { + /* Pull buffer off request list */ + slirp->write_requests = request->next; + pthread_mutex_unlock (&slirp->write_buffer_lock); + + slirp_input (slirp->slirp, (const uint8_t *)request->msg, (int)request->len); + + pthread_mutex_lock (&slirp->write_buffer_lock); + /* Put buffer on free buffer list */ + request->next = slirp->write_buffers; + slirp->write_buffers = request; + } +pthread_mutex_unlock (&slirp->write_buffer_lock); + +slirp_pollfds_poll(slirp->gpollfds, 0); + +} + diff --git a/slirp/simh/sim_slirp.h b/slirp/simh/sim_slirp.h new file mode 100644 index 00000000..7d16e395 --- /dev/null +++ b/slirp/simh/sim_slirp.h @@ -0,0 +1,21 @@ +#ifndef SIM_SLIRP_H +#define SIM_SLIRP_H + +#if defined(HAVE_SLIRP_NETWORK) + +#include "sim_defs.h" +typedef struct sim_slirp SLIRP; + +typedef void (*packet_callback)(void *opaque, const unsigned char *buf, int len); + +SLIRP *sim_slirp_open (const char *args, void *opaque, packet_callback callback); +void sim_slirp_close (SLIRP *slirp); +int sim_slirp_send (SLIRP *slirp, const char *msg, size_t len, int flags); +int sim_slirp_select (SLIRP *slirp, int ms_timeout); +void sim_slirp_dispatch (SLIRP *slirp); +t_stat sim_slirp_attach_help(FILE *st, DEVICE *dptr, UNIT *uptr, int32 flag, const char *cptr); +void sim_slirp_show (SLIRP *slirp, FILE *st); + +#endif /* HAVE_SLIRP_NETWORK */ + +#endif