target
int64
0
1
idx
int64
0
27.3k
func
stringlengths
23
97k
0
984
static void parse_error(JSONParserContext *ctxt, QObject *token, const char *msg, ...) { fprintf(stderr, "parse error: %s\n", msg); }
0
985
void gtod_save(QEMUFile *f, void *opaque) { uint64_t tod_low; uint8_t tod_high; int r; r = s390_get_clock(&tod_high, &tod_low); if (r) { fprintf(stderr, "WARNING: Unable to get guest clock for migration. " "Error code %d. Guest clock will not be migrated " "which could cause the guest to hang.\n", r); qemu_put_byte(f, S390_TOD_CLOCK_VALUE_MISSING); return; } qemu_put_byte(f, S390_TOD_CLOCK_VALUE_PRESENT); qemu_put_byte(f, tod_high); qemu_put_be64(f, tod_low); }
0
986
void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg) { const uint8_t type_code[] = { [GDB_BREAKPOINT_HW] = 0x0, [GDB_WATCHPOINT_WRITE] = 0x1, [GDB_WATCHPOINT_ACCESS] = 0x3 }; const uint8_t len_code[] = { [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2 }; int n; if (kvm_sw_breakpoints_active(env)) dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; if (nb_hw_breakpoint > 0) { dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; dbg->arch.debugreg[7] = 0x0600; for (n = 0; n < nb_hw_breakpoint; n++) { dbg->arch.debugreg[n] = hw_breakpoint[n].addr; dbg->arch.debugreg[7] |= (2 << (n * 2)) | (type_code[hw_breakpoint[n].type] << (16 + n*4)) | ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4)); } } /* Legal xcr0 for loading */ env->xcr0 = 1; }
0
987
SocketAddress *socket_local_address(int fd, Error **errp) { struct sockaddr_storage ss; socklen_t sslen = sizeof(ss); if (getsockname(fd, (struct sockaddr *)&ss, &sslen) < 0) { error_setg_errno(errp, errno, "%s", "Unable to query local socket address"); return NULL; } return socket_sockaddr_to_address(&ss, sslen, errp); }
0
988
static void eeprom_generate(eeprom24c0x_t *eeprom, ram_addr_t ram_size) { enum { SDR = 0x4, DDR2 = 0x8 } type; uint8_t *spd = eeprom->contents; uint8_t nbanks = 0; uint16_t density = 0; int i; /* work in terms of MB */ ram_size >>= 20; while ((ram_size >= 4) && (nbanks <= 2)) { int sz_log2 = MIN(31 - clz32(ram_size), 14); nbanks++; density |= 1 << (sz_log2 - 2); ram_size -= 1 << sz_log2; } /* split to 2 banks if possible */ if ((nbanks == 1) && (density > 1)) { nbanks++; density >>= 1; } if (density & 0xff00) { density = (density & 0xe0) | ((density >> 8) & 0x1f); type = DDR2; } else if (!(density & 0x1f)) { type = DDR2; } else { type = SDR; } if (ram_size) { fprintf(stderr, "Warning: SPD cannot represent final %dMB" " of SDRAM\n", (int)ram_size); } /* fill in SPD memory information */ spd[2] = type; spd[5] = nbanks; spd[31] = density; /* checksum */ spd[63] = 0; for (i = 0; i < 63; i++) { spd[63] += spd[i]; } }
0
989
static int virtio_blk_init_pci(PCIDevice *pci_dev) { VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev); VirtIODevice *vdev; if (proxy->class_code != PCI_CLASS_STORAGE_SCSI && proxy->class_code != PCI_CLASS_STORAGE_OTHER) proxy->class_code = PCI_CLASS_STORAGE_SCSI; if (!proxy->block.dinfo) { error_report("virtio-blk-pci: drive property not set"); return -1; } vdev = virtio_blk_init(&pci_dev->qdev, &proxy->block); vdev->nvectors = proxy->nvectors; virtio_init_pci(proxy, vdev, PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_DEVICE_ID_VIRTIO_BLOCK, proxy->class_code, 0x00); /* make the actual value visible */ proxy->nvectors = vdev->nvectors; return 0; }
0
990
void s390_io_interrupt(S390CPU *cpu, uint16_t subchannel_id, uint16_t subchannel_nr, uint32_t io_int_parm, uint32_t io_int_word) { if (kvm_enabled()) { kvm_s390_io_interrupt(cpu, subchannel_id, subchannel_nr, io_int_parm, io_int_word); } else { cpu_inject_io(cpu, subchannel_id, subchannel_nr, io_int_parm, io_int_word); } }
0
991
static void pci_bridge_region_del(PCIBridge *br, PCIBridgeWindows *w) { PCIDevice *pd = PCI_DEVICE(br); PCIBus *parent = pd->bus; memory_region_del_subregion(parent->address_space_io, &w->alias_io); memory_region_del_subregion(parent->address_space_mem, &w->alias_mem); memory_region_del_subregion(parent->address_space_mem, &w->alias_pref_mem); pci_unregister_vga(pd); }
0
992
void bt_l2cap_psm_register(struct bt_l2cap_device_s *dev, int psm, int min_mtu, int (*new_channel)(struct bt_l2cap_device_s *dev, struct bt_l2cap_conn_params_s *params)) { struct bt_l2cap_psm_s *new_psm = l2cap_psm(dev, psm); if (new_psm) { fprintf(stderr, "%s: PSM %04x already registered for device `%s'.\n", __FUNCTION__, psm, dev->device.lmp_name); exit(-1); } new_psm = g_malloc0(sizeof(*new_psm)); new_psm->psm = psm; new_psm->min_mtu = min_mtu; new_psm->new_channel = new_channel; new_psm->next = dev->first_psm; dev->first_psm = new_psm; }
0
993
static int check_pow_970 (CPUPPCState *env) { if (env->spr[SPR_HID0] & 0x00600000) return 1; return 0; }
0
994
void do_info_usernet(Monitor *mon) { SlirpState *s; TAILQ_FOREACH(s, &slirp_stacks, entry) { monitor_printf(mon, "VLAN %d (%s):\n", s->vc->vlan->id, s->vc->name); slirp_connection_info(s->slirp, mon); } }
0
995
static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, long src_size) { const uint16_t *end; #if COMPILE_TEMPLATE_MMX const uint16_t *mm_end; #endif uint8_t *d = dst; const uint16_t *s = (const uint16_t*)src; end = s + src_size/2; #if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory"); __asm__ volatile("pcmpeqd %%mm6,%%mm6 \n\t":::"memory"); mm_end = end - 3; while (s < mm_end) { __asm__ volatile( PREFETCH" 32%1 \n\t" "movq %1, %%mm0 \n\t" "movq %1, %%mm1 \n\t" "movq %1, %%mm2 \n\t" "pand %2, %%mm0 \n\t" "pand %3, %%mm1 \n\t" "pand %4, %%mm2 \n\t" "psllq $3, %%mm0 \n\t" "psrlq $3, %%mm1 \n\t" "psrlq $8, %%mm2 \n\t" PACK_RGB32 :"=m"(*d) :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r) :"memory"); d += 16; s += 4; } __asm__ volatile(SFENCE:::"memory"); __asm__ volatile(EMMS:::"memory"); #endif while (s < end) { register uint16_t bgr; bgr = *s++; #if HAVE_BIGENDIAN *d++ = 255; *d++ = (bgr&0xF800)>>8; *d++ = (bgr&0x7E0)>>3; *d++ = (bgr&0x1F)<<3; #else *d++ = (bgr&0x1F)<<3; *d++ = (bgr&0x7E0)>>3; *d++ = (bgr&0xF800)>>8; *d++ = 255; #endif } }
0
996
int ff_xvmc_field_start(MpegEncContext*s, AVCodecContext *avctx) { struct xvmc_pixfmt_render *last, *next, *render = (struct xvmc_pixfmt_render*)s->current_picture.data[2]; const int mb_block_count = 4 + (1 << s->chroma_format); assert(avctx); if (!render || render->magic_id != AV_XVMC_RENDER_MAGIC || !render->data_blocks || !render->mv_blocks){ av_log(avctx, AV_LOG_ERROR, "Render token doesn't look as expected.\n"); return -1; // make sure that this is a render packet } render->picture_structure = s->picture_structure; render->flags = s->first_field ? 0 : XVMC_SECOND_FIELD; if (render->filled_mv_blocks_num) { av_log(avctx, AV_LOG_ERROR, "Rendering surface contains %i unprocessed blocks.\n", render->filled_mv_blocks_num); return -1; } if (render->total_number_of_mv_blocks < 1 || render->total_number_of_data_blocks < mb_block_count) { av_log(avctx, AV_LOG_ERROR, "Rendering surface doesn't provide enough block structures to work with.\n"); return -1; } if (render->total_number_of_mv_blocks < 1 || render->total_number_of_data_blocks < mb_block_count) { av_log(avctx, AV_LOG_ERROR, "Rendering surface doesn't provide enough block structures to work with.\n"); return -1; } render->p_future_surface = NULL; render->p_past_surface = NULL; switch(s->pict_type) { case FF_I_TYPE: return 0; // no prediction from other frames case FF_B_TYPE: next = (struct xvmc_pixfmt_render*)s->next_picture.data[2]; if (!next) return -1; if (next->magic_id != AV_XVMC_RENDER_MAGIC) return -1; render->p_future_surface = next->p_surface; // no return here, going to set forward prediction case FF_P_TYPE: last = (struct xvmc_pixfmt_render*)s->last_picture.data[2]; if (!last) last = render; // predict second field from the first if (last->magic_id != AV_XVMC_RENDER_MAGIC) return -1; render->p_past_surface = last->p_surface; return 0; } return -1; }
0
997
int64_t url_fseek(ByteIOContext *s, int64_t offset, int whence) { int64_t offset1; int64_t pos; int force = whence & AVSEEK_FORCE; whence &= ~AVSEEK_FORCE; if(!s) return AVERROR(EINVAL); pos = s->pos - (s->write_flag ? 0 : (s->buf_end - s->buffer)); if (whence != SEEK_CUR && whence != SEEK_SET) return AVERROR(EINVAL); if (whence == SEEK_CUR) { offset1 = pos + (s->buf_ptr - s->buffer); if (offset == 0) return offset1; offset += offset1; } offset1 = offset - pos; if (!s->must_flush && offset1 >= 0 && offset1 <= (s->buf_end - s->buffer)) { /* can do the seek inside the buffer */ s->buf_ptr = s->buffer + offset1; } else if(s->is_streamed && !s->write_flag && offset1 >= 0 && (whence != SEEK_END || force)) { while(s->pos < offset && !s->eof_reached) fill_buffer(s); if (s->eof_reached) return AVERROR_EOF; s->buf_ptr = s->buf_end + offset - s->pos; } else { int64_t res = AVERROR(EPIPE); #if CONFIG_MUXERS || CONFIG_NETWORK if (s->write_flag) { flush_buffer(s); s->must_flush = 1; } #endif /* CONFIG_MUXERS || CONFIG_NETWORK */ if (!s->seek || (res = s->seek(s->opaque, offset, SEEK_SET)) < 0) return res; if (!s->write_flag) s->buf_end = s->buffer; s->buf_ptr = s->buffer; s->pos = offset; } s->eof_reached = 0; return offset; }
0
998
int ff_listen_connect(int fd, const struct sockaddr *addr, socklen_t addrlen, int timeout, URLContext *h) { struct pollfd p = {fd, POLLOUT, 0}; int ret; socklen_t optlen; ff_socket_nonblock(fd, 1); while ((ret = connect(fd, addr, addrlen))) { ret = ff_neterrno(); switch (ret) { case AVERROR(EINTR): if (ff_check_interrupt(&h->interrupt_callback)) return AVERROR_EXIT; continue; case AVERROR(EINPROGRESS): case AVERROR(EAGAIN): while (timeout--) { if (ff_check_interrupt(&h->interrupt_callback)) return AVERROR_EXIT; ret = poll(&p, 1, 100); if (ret > 0) break; } if (ret <= 0) return AVERROR(ETIMEDOUT); optlen = sizeof(ret); if (getsockopt (fd, SOL_SOCKET, SO_ERROR, &ret, &optlen)) ret = AVUNERROR(ff_neterrno()); if (ret != 0) { char errbuf[100]; ret = AVERROR(ret); av_strerror(ret, errbuf, sizeof(errbuf)); av_log(h, AV_LOG_ERROR, "Connection to %s failed: %s\n", h->filename, errbuf); } default: return ret; } } return ret; }
1
999
static int get_uint32_equal(QEMUFile *f, void *pv, size_t size) { uint32_t *v = pv; uint32_t v2; qemu_get_be32s(f, &v2); if (*v == v2) { return 0; } return -EINVAL; }
1
1,000
int socket_dgram(SocketAddress *remote, SocketAddress *local, Error **errp) { QemuOpts *opts; int fd; opts = qemu_opts_create_nofail(&socket_optslist); switch (remote->kind) { case SOCKET_ADDRESS_KIND_INET: qemu_opt_set(opts, "host", remote->inet->host); qemu_opt_set(opts, "port", remote->inet->port); if (local) { qemu_opt_set(opts, "localaddr", local->inet->host); qemu_opt_set(opts, "localport", local->inet->port); } fd = inet_dgram_opts(opts, errp); break; default: error_setg(errp, "socket type unsupported for datagram"); return -1; } qemu_opts_del(opts); return fd; }
1
1,001
static void cpu_common_initfn(Object *obj) { CPUState *cpu = CPU(obj); CPUClass *cc = CPU_GET_CLASS(obj); cpu->cpu_index = UNASSIGNED_CPU_INDEX; cpu->gdb_num_regs = cpu->gdb_num_g_regs = cc->gdb_num_core_regs; /* *-user doesn't have configurable SMP topology */ /* the default value is changed by qemu_init_vcpu() for softmmu */ cpu->nr_cores = 1; cpu->nr_threads = 1; qemu_mutex_init(&cpu->work_mutex); QTAILQ_INIT(&cpu->breakpoints); QTAILQ_INIT(&cpu->watchpoints); cpu->trace_dstate = bitmap_new(trace_get_vcpu_event_count()); cpu_exec_initfn(cpu); }
1
1,002
static void read_sbr_single_channel_element(AACContext *ac, SpectralBandReplication *sbr, GetBitContext *gb) { if (get_bits1(gb)) // bs_data_extra skip_bits(gb, 4); // bs_reserved read_sbr_grid(ac, sbr, gb, &sbr->data[0]); read_sbr_dtdf(sbr, gb, &sbr->data[0]); read_sbr_invf(sbr, gb, &sbr->data[0]); read_sbr_envelope(sbr, gb, &sbr->data[0], 0); read_sbr_noise(sbr, gb, &sbr->data[0], 0); if ((sbr->data[0].bs_add_harmonic_flag = get_bits1(gb))) get_bits1_vector(gb, sbr->data[0].bs_add_harmonic, sbr->n[1]); }
1
1,004
static int GLZWDecode(GifState * s, uint8_t * buf, int len) { int l, c, code, oc, fc; uint8_t *sp; if (s->end_code < 0) return 0; l = len; sp = s->sp; oc = s->oc; fc = s->fc; while (sp > s->stack) { *buf++ = *(--sp); if ((--l) == 0) goto the_end; } for (;;) { c = GetCode(s); if (c == s->end_code) { s->end_code = -1; break; } else if (c == s->clear_code) { s->cursize = s->codesize + 1; s->curmask = mask[s->cursize]; s->slot = s->newcodes; s->top_slot = 1 << s->cursize; while ((c = GetCode(s)) == s->clear_code); if (c == s->end_code) { s->end_code = -1; break; } /* test error */ if (c >= s->slot) c = 0; fc = oc = c; *buf++ = c; if ((--l) == 0) break; } else { code = c; if (code >= s->slot) { *sp++ = fc; code = oc; } while (code >= s->newcodes) { *sp++ = s->suffix[code]; code = s->prefix[code]; } *sp++ = code; if (s->slot < s->top_slot) { s->suffix[s->slot] = fc = code; s->prefix[s->slot++] = oc; oc = c; } if (s->slot >= s->top_slot) { if (s->cursize < MAXBITS) { s->top_slot <<= 1; s->curmask = mask[++s->cursize]; } } while (sp > s->stack) { *buf++ = *(--sp); if ((--l) == 0) goto the_end; } } } the_end: s->sp = sp; s->oc = oc; s->fc = fc; return len - l; }
1
1,005
bool throttle_is_valid(ThrottleConfig *cfg, Error **errp) { int i; bool bps_flag, ops_flag; bool bps_max_flag, ops_max_flag; bps_flag = cfg->buckets[THROTTLE_BPS_TOTAL].avg && (cfg->buckets[THROTTLE_BPS_READ].avg || cfg->buckets[THROTTLE_BPS_WRITE].avg); ops_flag = cfg->buckets[THROTTLE_OPS_TOTAL].avg && (cfg->buckets[THROTTLE_OPS_READ].avg || cfg->buckets[THROTTLE_OPS_WRITE].avg); bps_max_flag = cfg->buckets[THROTTLE_BPS_TOTAL].max && (cfg->buckets[THROTTLE_BPS_READ].max || cfg->buckets[THROTTLE_BPS_WRITE].max); ops_max_flag = cfg->buckets[THROTTLE_OPS_TOTAL].max && (cfg->buckets[THROTTLE_OPS_READ].max || cfg->buckets[THROTTLE_OPS_WRITE].max); if (bps_flag || ops_flag || bps_max_flag || ops_max_flag) { error_setg(errp, "bps/iops/max total values and read/write values" " cannot be used at the same time"); if (cfg->op_size && !cfg->buckets[THROTTLE_OPS_TOTAL].avg && !cfg->buckets[THROTTLE_OPS_READ].avg && !cfg->buckets[THROTTLE_OPS_WRITE].avg) { error_setg(errp, "iops size requires an iops value to be set"); for (i = 0; i < BUCKETS_COUNT; i++) { LeakyBucket *bkt = &cfg->buckets[i]; if (bkt->avg > THROTTLE_VALUE_MAX || bkt->max > THROTTLE_VALUE_MAX) { error_setg(errp, "bps/iops/max values must be within [0, %lld]", THROTTLE_VALUE_MAX); if (!bkt->burst_length) { error_setg(errp, "the burst length cannot be 0"); if (bkt->burst_length > 1 && !bkt->max) { error_setg(errp, "burst length set without burst rate"); if (bkt->max && !bkt->avg) { error_setg(errp, "bps_max/iops_max require corresponding" " bps/iops values"); if (bkt->max && bkt->max < bkt->avg) { error_setg(errp, "bps_max/iops_max cannot be lower than bps/iops"); return true;
1
1,006
static void put_ebml_uint(ByteIOContext *pb, unsigned int elementid, uint64_t val) { int i, bytes = 1; while (val >> bytes*8 && bytes < 8) bytes++; put_ebml_id(pb, elementid); put_ebml_num(pb, bytes, 0); for (i = bytes - 1; i >= 0; i--) put_byte(pb, val >> i*8); }
1
1,007
PCIBus *pci_grackle_init(uint32_t base, qemu_irq *pic) { DeviceState *dev; SysBusDevice *s; GrackleState *d; dev = qdev_create(NULL, "grackle"); qdev_init(dev); s = sysbus_from_qdev(dev); d = FROM_SYSBUS(GrackleState, s); d->host_state.bus = pci_register_bus(&d->busdev.qdev, "pci", pci_grackle_set_irq, pci_grackle_map_irq, pic, 0, 4); pci_create_simple(d->host_state.bus, 0, "grackle"); sysbus_mmio_map(s, 0, base); sysbus_mmio_map(s, 1, base + 0x00200000); return d->host_state.bus; }
1
1,008
static void xan_unpack(unsigned char *dest, const unsigned char *src, int dest_len) { unsigned char opcode; int size; unsigned char *dest_end = dest + dest_len; while (dest < dest_end) { opcode = *src++; if (opcode < 0xe0) { int size2, back; if ( (opcode & 0x80) == 0 ) { size = opcode & 3; back = ((opcode & 0x60) << 3) + *src++ + 1; size2 = ((opcode & 0x1c) >> 2) + 3; } else if ( (opcode & 0x40) == 0 ) { size = *src >> 6; back = (bytestream_get_be16(&src) & 0x3fff) + 1; size2 = (opcode & 0x3f) + 4; } else { size = opcode & 3; back = ((opcode & 0x10) << 12) + bytestream_get_be16(&src) + 1; size2 = ((opcode & 0x0c) << 6) + *src++ + 5; if (size + size2 > dest_end - dest) return; } memcpy(dest, src, size); dest += size; src += size; av_memcpy_backptr(dest, back, size2); dest += size2; } else { int finish = opcode >= 0xfc; size = finish ? opcode & 3 : ((opcode & 0x1f) << 2) + 4; memcpy(dest, src, size); dest += size; src += size; if (finish) return; } } }
0
1,009
static int adx_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { int buf_size = avpkt->size; ADXContext *c = avctx->priv_data; int16_t *samples; const uint8_t *buf = avpkt->data; int num_blocks, ch, ret; if (c->eof) { *got_frame_ptr = 0; return buf_size; } if (!c->header_parsed && buf_size >= 2 && AV_RB16(buf) == 0x8000) { int header_size; if ((ret = avpriv_adx_decode_header(avctx, buf, buf_size, &header_size, c->coeff)) < 0) { av_log(avctx, AV_LOG_ERROR, "error parsing ADX header\n"); return AVERROR_INVALIDDATA; } c->channels = avctx->channels; c->header_parsed = 1; if (buf_size < header_size) return AVERROR_INVALIDDATA; buf += header_size; buf_size -= header_size; } if (!c->header_parsed) return AVERROR_INVALIDDATA; /* calculate number of blocks in the packet */ num_blocks = buf_size / (BLOCK_SIZE * c->channels); /* if the packet is not an even multiple of BLOCK_SIZE, check for an EOF packet */ if (!num_blocks || buf_size % (BLOCK_SIZE * avctx->channels)) { if (buf_size >= 4 && (AV_RB16(buf) & 0x8000)) { c->eof = 1; *got_frame_ptr = 0; return avpkt->size; } return AVERROR_INVALIDDATA; } /* get output buffer */ c->frame.nb_samples = num_blocks * BLOCK_SAMPLES; if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } samples = (int16_t *)c->frame.data[0]; while (num_blocks--) { for (ch = 0; ch < c->channels; ch++) { if (adx_decode(c, samples + ch, buf, ch)) { c->eof = 1; buf = avpkt->data + avpkt->size; break; } buf_size -= BLOCK_SIZE; buf += BLOCK_SIZE; } samples += BLOCK_SAMPLES * c->channels; } *got_frame_ptr = 1; *(AVFrame *)data = c->frame; return buf - avpkt->data; }
1
1,010
static inline void RENAME(bgr15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width) { int i; assert(src1==src2); for(i=0; i<width; i++) { int d0= ((uint32_t*)src1)[i]; int dl= (d0&0x03E07C1F); int dh= ((d0>>5)&0x03E0F81F); int dh2= (dh>>11) + (dh<<21); int d= dh2 + dl; int b= d&0x7F; int r= (d>>10)&0x7F; int g= d>>21; dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1-3)) + 128; dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1-3)) + 128; } }
1
1,011
static int matroska_parse_cluster_incremental(MatroskaDemuxContext *matroska) { EbmlList *blocks_list; MatroskaBlock *blocks; int i, res; res = ebml_parse(matroska, matroska_cluster_incremental_parsing, &matroska->current_cluster); if (res == 1) { /* New Cluster */ if (matroska->current_cluster_pos) ebml_level_end(matroska); ebml_free(matroska_cluster, &matroska->current_cluster); memset(&matroska->current_cluster, 0, sizeof(MatroskaCluster)); matroska->current_cluster_num_blocks = 0; matroska->current_cluster_pos = avio_tell(matroska->ctx->pb); matroska->prev_pkt = NULL; /* sizeof the ID which was already read */ if (matroska->current_id) matroska->current_cluster_pos -= 4; res = ebml_parse(matroska, matroska_clusters_incremental, &matroska->current_cluster); /* Try parsing the block again. */ if (res == 1) res = ebml_parse(matroska, matroska_cluster_incremental_parsing, &matroska->current_cluster); } if (!res && matroska->current_cluster_num_blocks < matroska->current_cluster.blocks.nb_elem) { blocks_list = &matroska->current_cluster.blocks; blocks = blocks_list->elem; matroska->current_cluster_num_blocks = blocks_list->nb_elem; i = blocks_list->nb_elem - 1; if (blocks[i].bin.size > 0 && blocks[i].bin.data) { int is_keyframe = blocks[i].non_simple ? !blocks[i].reference : -1; uint8_t* additional = blocks[i].additional.size > 0 ? blocks[i].additional.data : NULL; if (!blocks[i].non_simple) blocks[i].duration = 0; res = matroska_parse_block(matroska, blocks[i].bin.data, blocks[i].bin.size, blocks[i].bin.pos, matroska->current_cluster.timecode, blocks[i].duration, is_keyframe, additional, blocks[i].additional_id, blocks[i].additional.size, matroska->current_cluster_pos); } } if (res < 0) matroska->done = 1; return res; }
1
1,012
static int cache_read(URLContext *h, unsigned char *buf, int size) { Context *c= h->priv_data; CacheEntry *entry, *next[2] = {NULL, NULL}; int r; entry = av_tree_find(c->root, &c->logical_pos, cmp, (void**)next); if (!entry) entry = next[0]; if (entry) { int64_t in_block_pos = c->logical_pos - entry->logical_pos; av_assert0(entry->logical_pos <= c->logical_pos); if (in_block_pos < entry->size) { int64_t physical_target = entry->physical_pos + in_block_pos; if (c->cache_pos != physical_target) { r = lseek(c->fd, physical_target, SEEK_SET); } else r = c->cache_pos; if (r >= 0) { c->cache_pos = r; r = read(c->fd, buf, FFMIN(size, entry->size - in_block_pos)); } if (r > 0) { c->cache_pos += r; c->logical_pos += r; c->cache_hit ++; return r; } } } // Cache miss or some kind of fault with the cache if (c->logical_pos != c->inner_pos) { r = ffurl_seek(c->inner, c->logical_pos, SEEK_SET); if (r<0) { av_log(h, AV_LOG_ERROR, "Failed to perform internal seek\n"); return r; } c->inner_pos = r; } r = ffurl_read(c->inner, buf, size); if (r == 0 && size>0) { c->is_true_eof = 1; av_assert0(c->end >= c->logical_pos); } if (r<=0) return r; c->inner_pos += r; c->cache_miss ++; add_entry(h, buf, r); c->logical_pos += r; c->end = FFMAX(c->end, c->logical_pos); return r; }
0
1,013
const AVOption *av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags) { AVClass *c = *(AVClass**)obj; const AVOption *o = NULL; if (c->opt_find && search_flags & AV_OPT_SEARCH_CHILDREN && (o = c->opt_find(obj, name, unit, opt_flags, search_flags))) return o; while (o = av_next_option(obj, o)) { if (!strcmp(o->name, name) && (!unit || (o->unit && !strcmp(o->unit, unit))) && (o->flags & opt_flags) == opt_flags) return o; } return NULL; }
0
1,014
static int vcr1_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { VCR1Context *const a = avctx->priv_data; AVFrame *const p = data; const uint8_t *bytestream = avpkt->data; const uint8_t *bytestream_end = bytestream + avpkt->size; int i, x, y, ret; if(avpkt->size < 16 + avctx->height + avctx->width*avctx->height*5/8){ av_log(avctx, AV_LOG_ERROR, "Insufficient input data.\n"); return AVERROR(EINVAL); } if ((ret = ff_get_buffer(avctx, p, 0)) < 0) return ret; p->pict_type = AV_PICTURE_TYPE_I; p->key_frame = 1; for (i = 0; i < 16; i++) { a->delta[i] = *bytestream++; bytestream++; } for (y = 0; y < avctx->height; y++) { int offset; uint8_t *luma = &p->data[0][y * p->linesize[0]]; if ((y & 3) == 0) { uint8_t *cb = &p->data[1][(y >> 2) * p->linesize[1]]; uint8_t *cr = &p->data[2][(y >> 2) * p->linesize[2]]; av_assert0 (bytestream_end - bytestream >= 4 + avctx->width); for (i = 0; i < 4; i++) a->offset[i] = *bytestream++; offset = a->offset[0] - a->delta[bytestream[2] & 0xF]; for (x = 0; x < avctx->width; x += 4) { luma[0] = offset += a->delta[bytestream[2] & 0xF]; luma[1] = offset += a->delta[bytestream[2] >> 4]; luma[2] = offset += a->delta[bytestream[0] & 0xF]; luma[3] = offset += a->delta[bytestream[0] >> 4]; luma += 4; *cb++ = bytestream[3]; *cr++ = bytestream[1]; bytestream += 4; } } else { av_assert0 (bytestream_end - bytestream >= avctx->width / 2); offset = a->offset[y & 3] - a->delta[bytestream[2] & 0xF]; for (x = 0; x < avctx->width; x += 8) { luma[0] = offset += a->delta[bytestream[2] & 0xF]; luma[1] = offset += a->delta[bytestream[2] >> 4]; luma[2] = offset += a->delta[bytestream[3] & 0xF]; luma[3] = offset += a->delta[bytestream[3] >> 4]; luma[4] = offset += a->delta[bytestream[0] & 0xF]; luma[5] = offset += a->delta[bytestream[0] >> 4]; luma[6] = offset += a->delta[bytestream[1] & 0xF]; luma[7] = offset += a->delta[bytestream[1] >> 4]; luma += 8; bytestream += 4; } } } *got_frame = 1; return bytestream - avpkt->data; }
0
1,015
static void fill_slice_long(AVCodecContext *avctx, DXVA_Slice_H264_Long *slice, unsigned position, unsigned size) { const H264Context *h = avctx->priv_data; struct dxva_context *ctx = avctx->hwaccel_context; unsigned list; memset(slice, 0, sizeof(*slice)); slice->BSNALunitDataLocation = position; slice->SliceBytesInBuffer = size; slice->wBadSliceChopping = 0; slice->first_mb_in_slice = (h->mb_y >> FIELD_OR_MBAFF_PICTURE(h)) * h->mb_width + h->mb_x; slice->NumMbsForSlice = 0; /* XXX it is set once we have all slices */ slice->BitOffsetToSliceData = get_bits_count(&h->gb); slice->slice_type = ff_h264_get_slice_type(h); if (h->slice_type_fixed) slice->slice_type += 5; slice->luma_log2_weight_denom = h->luma_log2_weight_denom; slice->chroma_log2_weight_denom = h->chroma_log2_weight_denom; if (h->list_count > 0) slice->num_ref_idx_l0_active_minus1 = h->ref_count[0] - 1; if (h->list_count > 1) slice->num_ref_idx_l1_active_minus1 = h->ref_count[1] - 1; slice->slice_alpha_c0_offset_div2 = h->slice_alpha_c0_offset / 2; slice->slice_beta_offset_div2 = h->slice_beta_offset / 2; slice->Reserved8Bits = 0; for (list = 0; list < 2; list++) { unsigned i; for (i = 0; i < FF_ARRAY_ELEMS(slice->RefPicList[list]); i++) { if (list < h->list_count && i < h->ref_count[list]) { const Picture *r = &h->ref_list[list][i]; unsigned plane; fill_picture_entry(&slice->RefPicList[list][i], ff_dxva2_get_surface_index(ctx, r), r->reference == PICT_BOTTOM_FIELD); for (plane = 0; plane < 3; plane++) { int w, o; if (plane == 0 && h->luma_weight_flag[list]) { w = h->luma_weight[i][list][0]; o = h->luma_weight[i][list][1]; } else if (plane >= 1 && h->chroma_weight_flag[list]) { w = h->chroma_weight[i][list][plane-1][0]; o = h->chroma_weight[i][list][plane-1][1]; } else { w = 1 << (plane == 0 ? h->luma_log2_weight_denom : h->chroma_log2_weight_denom); o = 0; } slice->Weights[list][i][plane][0] = w; slice->Weights[list][i][plane][1] = o; } } else { unsigned plane; slice->RefPicList[list][i].bPicEntry = 0xff; for (plane = 0; plane < 3; plane++) { slice->Weights[list][i][plane][0] = 0; slice->Weights[list][i][plane][1] = 0; } } } } slice->slice_qs_delta = 0; /* XXX not implemented by FFmpeg */ slice->slice_qp_delta = h->qscale - h->pps.init_qp; slice->redundant_pic_cnt = h->redundant_pic_count; if (h->slice_type == AV_PICTURE_TYPE_B) slice->direct_spatial_mv_pred_flag = h->direct_spatial_mv_pred; slice->cabac_init_idc = h->pps.cabac ? h->cabac_init_idc : 0; if (h->deblocking_filter < 2) slice->disable_deblocking_filter_idc = 1 - h->deblocking_filter; else slice->disable_deblocking_filter_idc = h->deblocking_filter; slice->slice_id = h->current_slice - 1; }
0
1,016
static char* mpjpeg_get_boundary(AVIOContext* pb) { uint8_t *mime_type = NULL; const char *start; const char *end; uint8_t *res = NULL; int len; /* get MIME type, and skip to the first parameter */ av_opt_get(pb, "mime_type", AV_OPT_SEARCH_CHILDREN, &mime_type); start = mime_type; while (start != NULL && *start != '\0') { start = strchr(start, ';'); if (start) start = start+1; while (av_isspace(*start)) start++; if (!av_stristart(start, "boundary=", &start)) { end = strchr(start, ';'); if (end) len = end - start - 1; else len = strlen(start); res = av_strndup(start, len); break; } } av_freep(&mime_type); return res; }
0
1,017
static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset) { AVPacket pkt1, *pkt = &pkt1; AVStream *st; int read_size, i, ret; int64_t end_time; int64_t filesize, offset, duration; int retry = 0; /* flush packet queue */ flush_packet_queue(ic); for (i = 0; i < ic->nb_streams; i++) { st = ic->streams[i]; if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE && st->codec->codec_type != AVMEDIA_TYPE_UNKNOWN) av_log(st->codec, AV_LOG_WARNING, "start time for stream %d is not set in estimate_timings_from_pts\n", i); if (st->parser) { av_parser_close(st->parser); st->parser = NULL; } } /* estimate the end time (duration) */ /* XXX: may need to support wrapping */ filesize = ic->pb ? avio_size(ic->pb) : 0; end_time = AV_NOPTS_VALUE; do { offset = filesize - (DURATION_MAX_READ_SIZE << retry); if (offset < 0) offset = 0; avio_seek(ic->pb, offset, SEEK_SET); read_size = 0; for (;;) { if (read_size >= DURATION_MAX_READ_SIZE << (FFMAX(retry - 1, 0))) break; do { ret = ff_read_packet(ic, pkt); } while (ret == AVERROR(EAGAIN)); if (ret != 0) break; read_size += pkt->size; st = ic->streams[pkt->stream_index]; if (pkt->pts != AV_NOPTS_VALUE && (st->start_time != AV_NOPTS_VALUE || st->first_dts != AV_NOPTS_VALUE)) { duration = end_time = pkt->pts + pkt->duration; if (st->start_time != AV_NOPTS_VALUE) duration -= st->start_time; else duration -= st->first_dts; if (duration > 0) { if (st->duration == AV_NOPTS_VALUE || st->info->last_duration<= 0 || (st->duration < duration && FFABS(duration - st->info->last_duration) < 60LL*st->time_base.den / st->time_base.num)) st->duration = duration; st->info->last_duration = duration; } } av_free_packet(pkt); } } while (end_time == AV_NOPTS_VALUE && filesize > (DURATION_MAX_READ_SIZE << retry) && ++retry <= DURATION_MAX_RETRY); /* warn about audio/video streams which duration could not be estimated */ for (i = 0; i < ic->nb_streams; i++) { st = ic->streams[i]; if (st->duration == AV_NOPTS_VALUE) { switch (st->codec->codec_type) { case AVMEDIA_TYPE_VIDEO: case AVMEDIA_TYPE_AUDIO: if (st->start_time != AV_NOPTS_VALUE || st->first_dts != AV_NOPTS_VALUE) { av_log(ic, AV_LOG_DEBUG, "stream %d : no PTS found at end of file, duration not set\n", i); } else av_log(ic, AV_LOG_DEBUG, "stream %d : no TS found at start of file, duration not set\n", i); } } } fill_all_stream_timings(ic); avio_seek(ic->pb, old_offset, SEEK_SET); for (i = 0; i < ic->nb_streams; i++) { int j; st = ic->streams[i]; st->cur_dts = st->first_dts; st->last_IP_pts = AV_NOPTS_VALUE; st->last_dts_for_order_check = AV_NOPTS_VALUE; for (j = 0; j < MAX_REORDER_DELAY + 1; j++) st->pts_buffer[j] = AV_NOPTS_VALUE; } }
1
1,019
int init_put_byte(ByteIOContext *s, unsigned char *buffer, int buffer_size, int write_flag, void *opaque, int (*read_packet)(void *opaque, uint8_t *buf, int buf_size), int (*write_packet)(void *opaque, uint8_t *buf, int buf_size), int64_t (*seek)(void *opaque, int64_t offset, int whence)) { s->buffer = buffer; s->buffer_size = buffer_size; s->buf_ptr = buffer; url_resetbuf(s, write_flag ? URL_WRONLY : URL_RDONLY); s->opaque = opaque; s->write_packet = write_packet; s->read_packet = read_packet; s->seek = seek; s->pos = 0; s->must_flush = 0; s->eof_reached = 0; s->error = 0; s->is_streamed = 0; s->max_packet_size = 0; s->update_checksum= NULL; if(!read_packet && !write_flag){ s->pos = buffer_size; s->buf_end = s->buffer + buffer_size; } s->read_pause = NULL; s->read_seek = NULL; return 0; }
1
1,020
static void vc1_decode_skip_blocks(VC1Context *v) { MpegEncContext *s = &v->s; if (!v->s.last_picture.f.data[0]) return; ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END); s->first_slice_line = 1; for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) { s->mb_x = 0; init_block_index(v); ff_update_block_index(s); memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16); memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8); memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8); ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16); s->first_slice_line = 0; } s->pict_type = AV_PICTURE_TYPE_P; }
1
1,022
static void psy_3gpp_analyze_channel(FFPsyContext *ctx, int channel, const float *coefs, const FFPsyWindowInfo *wi) { AacPsyContext *pctx = (AacPsyContext*) ctx->model_priv_data; AacPsyChannel *pch = &pctx->ch[channel]; int start = 0; int i, w, g; float desired_bits, desired_pe, delta_pe, reduction, spread_en[128] = {0}; float a = 0.0f, active_lines = 0.0f, norm_fac = 0.0f; float pe = pctx->chan_bitrate > 32000 ? 0.0f : FFMAX(50.0f, 100.0f - pctx->chan_bitrate * 100.0f / 32000.0f); const int num_bands = ctx->num_bands[wi->num_windows == 8]; const uint8_t *band_sizes = ctx->bands[wi->num_windows == 8]; AacPsyCoeffs *coeffs = pctx->psy_coef[wi->num_windows == 8]; const float avoid_hole_thr = wi->num_windows == 8 ? PSY_3GPP_AH_THR_SHORT : PSY_3GPP_AH_THR_LONG; //calculate energies, initial thresholds and related values - 5.4.2 "Threshold Calculation" for (w = 0; w < wi->num_windows*16; w += 16) { for (g = 0; g < num_bands; g++) { AacPsyBand *band = &pch->band[w+g]; float form_factor = 0.0f; band->energy = 0.0f; for (i = 0; i < band_sizes[g]; i++) { band->energy += coefs[start+i] * coefs[start+i]; form_factor += sqrtf(fabs(coefs[start+i])); } band->thr = band->energy * 0.001258925f; band->nz_lines = form_factor / powf(band->energy / band_sizes[g], 0.25f); start += band_sizes[g]; } } //modify thresholds and energies - spread, threshold in quiet, pre-echo control for (w = 0; w < wi->num_windows*16; w += 16) { AacPsyBand *bands = &pch->band[w]; //5.4.2.3 "Spreading" & 5.4.3 "Spreaded Energy Calculation" spread_en[0] = bands[0].energy; for (g = 1; g < num_bands; g++) { bands[g].thr = FFMAX(bands[g].thr, bands[g-1].thr * coeffs[g].spread_hi[0]); spread_en[w+g] = FFMAX(bands[g].energy, spread_en[w+g-1] * coeffs[g].spread_hi[1]); } for (g = num_bands - 2; g >= 0; g--) { bands[g].thr = FFMAX(bands[g].thr, bands[g+1].thr * coeffs[g].spread_low[0]); spread_en[w+g] = FFMAX(spread_en[w+g], spread_en[w+g+1] * coeffs[g].spread_low[1]); } //5.4.2.4 "Threshold in quiet" for (g = 0; g < num_bands; g++) { AacPsyBand *band = &bands[g]; band->thr_quiet = band->thr = FFMAX(band->thr, coeffs[g].ath); //5.4.2.5 "Pre-echo control" if (!(wi->window_type[0] == LONG_STOP_SEQUENCE || (wi->window_type[1] == LONG_START_SEQUENCE && !w))) band->thr = FFMAX(PSY_3GPP_RPEMIN*band->thr, FFMIN(band->thr, PSY_3GPP_RPELEV*pch->prev_band[w+g].thr_quiet)); /* 5.6.1.3.1 "Prepatory steps of the perceptual entropy calculation" */ pe += calc_pe_3gpp(band); a += band->pe_const; active_lines += band->active_lines; /* 5.6.1.3.3 "Selection of the bands for avoidance of holes" */ if (spread_en[w+g] * avoid_hole_thr > band->energy || coeffs[g].min_snr > 1.0f) band->avoid_holes = PSY_3GPP_AH_NONE; else band->avoid_holes = PSY_3GPP_AH_INACTIVE; } } /* 5.6.1.3.2 "Calculation of the desired perceptual entropy" */ ctx->ch[channel].entropy = pe; desired_bits = calc_bit_demand(pctx, pe, ctx->bitres.bits, ctx->bitres.size, wi->num_windows == 8); desired_pe = PSY_3GPP_BITS_TO_PE(desired_bits); /* NOTE: PE correction is kept simple. During initial testing it had very * little effect on the final bitrate. Probably a good idea to come * back and do more testing later. */ if (ctx->bitres.bits > 0) desired_pe *= av_clipf(pctx->pe.previous / PSY_3GPP_BITS_TO_PE(ctx->bitres.bits), 0.85f, 1.15f); pctx->pe.previous = PSY_3GPP_BITS_TO_PE(desired_bits); if (desired_pe < pe) { /* 5.6.1.3.4 "First Estimation of the reduction value" */ for (w = 0; w < wi->num_windows*16; w += 16) { reduction = calc_reduction_3gpp(a, desired_pe, pe, active_lines); pe = 0.0f; a = 0.0f; active_lines = 0.0f; for (g = 0; g < num_bands; g++) { AacPsyBand *band = &pch->band[w+g]; band->thr = calc_reduced_thr_3gpp(band, coeffs[g].min_snr, reduction); /* recalculate PE */ pe += calc_pe_3gpp(band); a += band->pe_const; active_lines += band->active_lines; } } /* 5.6.1.3.5 "Second Estimation of the reduction value" */ for (i = 0; i < 2; i++) { float pe_no_ah = 0.0f, desired_pe_no_ah; active_lines = a = 0.0f; for (w = 0; w < wi->num_windows*16; w += 16) { for (g = 0; g < num_bands; g++) { AacPsyBand *band = &pch->band[w+g]; if (band->avoid_holes != PSY_3GPP_AH_ACTIVE) { pe_no_ah += band->pe; a += band->pe_const; active_lines += band->active_lines; } } } desired_pe_no_ah = FFMAX(desired_pe - (pe - pe_no_ah), 0.0f); if (active_lines > 0.0f) reduction += calc_reduction_3gpp(a, desired_pe_no_ah, pe_no_ah, active_lines); pe = 0.0f; for (w = 0; w < wi->num_windows*16; w += 16) { for (g = 0; g < num_bands; g++) { AacPsyBand *band = &pch->band[w+g]; if (active_lines > 0.0f) band->thr = calc_reduced_thr_3gpp(band, coeffs[g].min_snr, reduction); pe += calc_pe_3gpp(band); band->norm_fac = band->active_lines / band->thr; norm_fac += band->norm_fac; } } delta_pe = desired_pe - pe; if (fabs(delta_pe) > 0.05f * desired_pe) break; } if (pe < 1.15f * desired_pe) { /* 6.6.1.3.6 "Final threshold modification by linearization" */ norm_fac = 1.0f / norm_fac; for (w = 0; w < wi->num_windows*16; w += 16) { for (g = 0; g < num_bands; g++) { AacPsyBand *band = &pch->band[w+g]; if (band->active_lines > 0.5f) { float delta_sfb_pe = band->norm_fac * norm_fac * delta_pe; float thr = band->thr; thr *= powf(2.0f, delta_sfb_pe / band->active_lines); if (thr > coeffs[g].min_snr * band->energy && band->avoid_holes == PSY_3GPP_AH_INACTIVE) thr = FFMAX(band->thr, coeffs[g].min_snr * band->energy); band->thr = thr; } } } } else { /* 5.6.1.3.7 "Further perceptual entropy reduction" */ g = num_bands; while (pe > desired_pe && g--) { for (w = 0; w < wi->num_windows*16; w+= 16) { AacPsyBand *band = &pch->band[w+g]; if (band->avoid_holes != PSY_3GPP_AH_NONE && coeffs[g].min_snr < PSY_SNR_1DB) { coeffs[g].min_snr = PSY_SNR_1DB; band->thr = band->energy * PSY_SNR_1DB; pe += band->active_lines * 1.5f - band->pe; } } } /* TODO: allow more holes (unused without mid/side) */ } } for (w = 0; w < wi->num_windows*16; w += 16) { for (g = 0; g < num_bands; g++) { AacPsyBand *band = &pch->band[w+g]; FFPsyBand *psy_band = &ctx->ch[channel].psy_bands[w+g]; psy_band->threshold = band->thr; psy_band->energy = band->energy; } } memcpy(pch->prev_band, pch->band, sizeof(pch->band)); }
1
1,023
decode_coeffs_b_generic(VP56RangeCoder *c, int16_t *coef, int n_coeffs, int is_tx32x32, int is8bitsperpixel, int bpp, unsigned (*cnt)[6][3], unsigned (*eob)[6][2], uint8_t (*p)[6][11], int nnz, const int16_t *scan, const int16_t (*nb)[2], const int16_t *band_counts, const int16_t *qmul) { int i = 0, band = 0, band_left = band_counts[band]; uint8_t *tp = p[0][nnz]; uint8_t cache[1024]; do { int val, rc; val = vp56_rac_get_prob_branchy(c, tp[0]); // eob eob[band][nnz][val]++; if (!val) break; skip_eob: if (!vp56_rac_get_prob_branchy(c, tp[1])) { // zero cnt[band][nnz][0]++; if (!--band_left) band_left = band_counts[++band]; cache[scan[i]] = 0; nnz = (1 + cache[nb[i][0]] + cache[nb[i][1]]) >> 1; tp = p[band][nnz]; if (++i == n_coeffs) break; //invalid input; blocks should end with EOB goto skip_eob; } rc = scan[i]; if (!vp56_rac_get_prob_branchy(c, tp[2])) { // one cnt[band][nnz][1]++; val = 1; cache[rc] = 1; } else { // fill in p[3-10] (model fill) - only once per frame for each pos if (!tp[3]) memcpy(&tp[3], ff_vp9_model_pareto8[tp[2]], 8); cnt[band][nnz][2]++; if (!vp56_rac_get_prob_branchy(c, tp[3])) { // 2, 3, 4 if (!vp56_rac_get_prob_branchy(c, tp[4])) { cache[rc] = val = 2; } else { val = 3 + vp56_rac_get_prob(c, tp[5]); cache[rc] = 3; } } else if (!vp56_rac_get_prob_branchy(c, tp[6])) { // cat1/2 cache[rc] = 4; if (!vp56_rac_get_prob_branchy(c, tp[7])) { val = vp56_rac_get_prob(c, 159) + 5; } else { val = (vp56_rac_get_prob(c, 165) << 1) + 7; val += vp56_rac_get_prob(c, 145); } } else { // cat 3-6 cache[rc] = 5; if (!vp56_rac_get_prob_branchy(c, tp[8])) { if (!vp56_rac_get_prob_branchy(c, tp[9])) { val = 11 + (vp56_rac_get_prob(c, 173) << 2); val += (vp56_rac_get_prob(c, 148) << 1); val += vp56_rac_get_prob(c, 140); } else { val = 19 + (vp56_rac_get_prob(c, 176) << 3); val += (vp56_rac_get_prob(c, 155) << 2); val += (vp56_rac_get_prob(c, 140) << 1); val += vp56_rac_get_prob(c, 135); } } else if (!vp56_rac_get_prob_branchy(c, tp[10])) { val = (vp56_rac_get_prob(c, 180) << 4) + 35; val += (vp56_rac_get_prob(c, 157) << 3); val += (vp56_rac_get_prob(c, 141) << 2); val += (vp56_rac_get_prob(c, 134) << 1); val += vp56_rac_get_prob(c, 130); } else { val = 67; if (!is8bitsperpixel) { if (bpp == 12) { val += vp56_rac_get_prob(c, 255) << 17; val += vp56_rac_get_prob(c, 255) << 16; } val += (vp56_rac_get_prob(c, 255) << 15); val += (vp56_rac_get_prob(c, 255) << 14); } val += (vp56_rac_get_prob(c, 254) << 13); val += (vp56_rac_get_prob(c, 254) << 12); val += (vp56_rac_get_prob(c, 254) << 11); val += (vp56_rac_get_prob(c, 252) << 10); val += (vp56_rac_get_prob(c, 249) << 9); val += (vp56_rac_get_prob(c, 243) << 8); val += (vp56_rac_get_prob(c, 230) << 7); val += (vp56_rac_get_prob(c, 196) << 6); val += (vp56_rac_get_prob(c, 177) << 5); val += (vp56_rac_get_prob(c, 153) << 4); val += (vp56_rac_get_prob(c, 140) << 3); val += (vp56_rac_get_prob(c, 133) << 2); val += (vp56_rac_get_prob(c, 130) << 1); val += vp56_rac_get_prob(c, 129); } } } #define STORE_COEF(c, i, v) do { \ if (is8bitsperpixel) { \ c[i] = v; \ } else { \ AV_WN32A(&c[i * 2], v); \ } \ } while (0) if (!--band_left) band_left = band_counts[++band]; if (is_tx32x32) STORE_COEF(coef, rc, ((vp8_rac_get(c) ? -val : val) * qmul[!!i]) / 2); else STORE_COEF(coef, rc, (vp8_rac_get(c) ? -val : val) * qmul[!!i]); nnz = (1 + cache[nb[i][0]] + cache[nb[i][1]]) >> 1; tp = p[band][nnz]; } while (++i < n_coeffs); return i; }
1
1,024
static void set_pci_devfn(Object *obj, Visitor *v, void *opaque, const char *name, Error **errp) { DeviceState *dev = DEVICE(obj); Property *prop = opaque; uint32_t *ptr = qdev_get_prop_ptr(dev, prop); unsigned int slot, fn, n; Error *local_err = NULL; char *str = (char *)""; if (dev->state != DEV_STATE_CREATED) { error_set(errp, QERR_PERMISSION_DENIED); return; } visit_type_str(v, &str, name, &local_err); if (local_err) { error_free(local_err); return set_int32(obj, v, opaque, name, errp); } if (sscanf(str, "%x.%x%n", &slot, &fn, &n) != 2) { fn = 0; if (sscanf(str, "%x%n", &slot, &n) != 1) { goto invalid; } } if (str[n] != '\0' || fn > 7 || slot > 31) { goto invalid; } *ptr = slot << 3 | fn; return; invalid: error_set_from_qdev_prop_error(errp, EINVAL, dev, prop, str); }
1
1,025
static void error_callback_bh(void *opaque) { Coroutine *co = opaque; qemu_coroutine_enter(co); }
1
1,026
void *colo_process_incoming_thread(void *opaque) { MigrationIncomingState *mis = opaque; QEMUFile *fb = NULL; QIOChannelBuffer *bioc = NULL; /* Cache incoming device state */ uint64_t total_size; uint64_t value; Error *local_err = NULL; migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_COLO); failover_init_state(); mis->to_src_file = qemu_file_get_return_path(mis->from_src_file); if (!mis->to_src_file) { error_report("COLO incoming thread: Open QEMUFile to_src_file failed"); goto out; } /* * Note: the communication between Primary side and Secondary side * should be sequential, we set the fd to unblocked in migration incoming * coroutine, and here we are in the COLO incoming thread, so it is ok to * set the fd back to blocked. */ qemu_file_set_blocking(mis->from_src_file, true); bioc = qio_channel_buffer_new(COLO_BUFFER_BASE_SIZE); fb = qemu_fopen_channel_input(QIO_CHANNEL(bioc)); object_unref(OBJECT(bioc)); colo_send_message(mis->to_src_file, COLO_MESSAGE_CHECKPOINT_READY, &local_err); if (local_err) { goto out; } while (mis->state == MIGRATION_STATUS_COLO) { int request; colo_wait_handle_message(mis->from_src_file, &request, &local_err); if (local_err) { goto out; } assert(request); if (failover_get_state() != FAILOVER_STATUS_NONE) { error_report("failover request"); goto out; } /* FIXME: This is unnecessary for periodic checkpoint mode */ colo_send_message(mis->to_src_file, COLO_MESSAGE_CHECKPOINT_REPLY, &local_err); if (local_err) { goto out; } colo_receive_check_message(mis->from_src_file, COLO_MESSAGE_VMSTATE_SEND, &local_err); if (local_err) { goto out; } value = colo_receive_message_value(mis->from_src_file, COLO_MESSAGE_VMSTATE_SIZE, &local_err); if (local_err) { goto out; } /* * Read VM device state data into channel buffer, * It's better to re-use the memory allocated. * Here we need to handle the channel buffer directly. */ if (value > bioc->capacity) { bioc->capacity = value; bioc->data = g_realloc(bioc->data, bioc->capacity); } total_size = qemu_get_buffer(mis->from_src_file, bioc->data, value); if (total_size != value) { error_report("Got %" PRIu64 " VMState data, less than expected" " %" PRIu64, total_size, value); goto out; } bioc->usage = total_size; qio_channel_io_seek(QIO_CHANNEL(bioc), 0, 0, NULL); colo_send_message(mis->to_src_file, COLO_MESSAGE_VMSTATE_RECEIVED, &local_err); if (local_err) { goto out; } qemu_mutex_lock_iothread(); qemu_system_reset(VMRESET_SILENT); if (qemu_loadvm_state(fb) < 0) { error_report("COLO: loadvm failed"); qemu_mutex_unlock_iothread(); goto out; } qemu_mutex_unlock_iothread(); colo_send_message(mis->to_src_file, COLO_MESSAGE_VMSTATE_LOADED, &local_err); if (local_err) { goto out; } } out: /* Throw the unreported error message after exited from loop */ if (local_err) { error_report_err(local_err); } if (fb) { qemu_fclose(fb); } if (mis->to_src_file) { qemu_fclose(mis->to_src_file); } migration_incoming_exit_colo(); return NULL; }
1
1,027
static int get_uint8(QEMUFile *f, void *pv, size_t size) { uint8_t *v = pv; qemu_get_8s(f, v); return 0; }
1
1,028
static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr, unsigned size) { VirtIOPCIProxy *proxy = opaque; VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev); uint64_t val = 0; if (addr < config) { return virtio_ioport_read(proxy, addr); } addr -= config; switch (size) { case 1: val = virtio_config_readb(vdev, addr); break; case 2: val = virtio_config_readw(vdev, addr); if (virtio_is_big_endian()) { val = bswap16(val); } break; case 4: val = virtio_config_readl(vdev, addr); if (virtio_is_big_endian()) { val = bswap32(val); } break; } return val; }
1
1,029
int net_init_socket(QemuOpts *opts, const char *name, VLANState *vlan) { if (qemu_opt_get(opts, "fd")) { int fd; if (qemu_opt_get(opts, "listen") || qemu_opt_get(opts, "connect") || qemu_opt_get(opts, "mcast") || qemu_opt_get(opts, "localaddr")) { error_report("listen=, connect=, mcast= and localaddr= is invalid with fd="); return -1; } fd = net_handle_fd_param(cur_mon, qemu_opt_get(opts, "fd")); if (fd == -1) { return -1; } if (!net_socket_fd_init(vlan, "socket", name, fd, 1)) { return -1; } } else if (qemu_opt_get(opts, "listen")) { const char *listen; if (qemu_opt_get(opts, "fd") || qemu_opt_get(opts, "connect") || qemu_opt_get(opts, "mcast") || qemu_opt_get(opts, "localaddr")) { error_report("fd=, connect=, mcast= and localaddr= is invalid with listen="); return -1; } listen = qemu_opt_get(opts, "listen"); if (net_socket_listen_init(vlan, "socket", name, listen) == -1) { return -1; } } else if (qemu_opt_get(opts, "connect")) { const char *connect; if (qemu_opt_get(opts, "fd") || qemu_opt_get(opts, "listen") || qemu_opt_get(opts, "mcast") || qemu_opt_get(opts, "localaddr")) { error_report("fd=, listen=, mcast= and localaddr= is invalid with connect="); return -1; } connect = qemu_opt_get(opts, "connect"); if (net_socket_connect_init(vlan, "socket", name, connect) == -1) { return -1; } } else if (qemu_opt_get(opts, "mcast")) { const char *mcast, *localaddr; if (qemu_opt_get(opts, "fd") || qemu_opt_get(opts, "connect") || qemu_opt_get(opts, "listen")) { error_report("fd=, connect= and listen= is invalid with mcast="); return -1; } mcast = qemu_opt_get(opts, "mcast"); localaddr = qemu_opt_get(opts, "localaddr"); if (net_socket_mcast_init(vlan, "socket", name, mcast, localaddr) == -1) { return -1; } } else if (qemu_opt_get(opts, "udp")) { const char *udp, *localaddr; if (qemu_opt_get(opts, "fd") || qemu_opt_get(opts, "connect") || qemu_opt_get(opts, "listen") || qemu_opt_get(opts, "mcast")) { error_report("fd=, connect=, listen=" " and mcast= is invalid with udp="); return -1; } udp = qemu_opt_get(opts, "udp"); localaddr = qemu_opt_get(opts, "localaddr"); if (localaddr == NULL) { error_report("localaddr= is mandatory with udp="); return -1; } if (net_socket_udp_init(vlan, "udp", name, udp, localaddr) == -1) { return -1; } } else { error_report("-socket requires fd=, listen=," " connect=, mcast= or udp="); return -1; } return 0; }
0
1,030
static int dvvideo_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { uint8_t *buf = avpkt->data; int buf_size = avpkt->size; DVVideoContext *s = avctx->priv_data; const uint8_t* vsc_pack; int apt, is16_9, ret; const DVprofile *sys; sys = avpriv_dv_frame_profile2(avctx, s->sys, buf, buf_size); if (!sys || buf_size < sys->frame_size) { av_log(avctx, AV_LOG_ERROR, "could not find dv frame profile\n"); return -1; /* NOTE: we only accept several full frames */ } if (sys != s->sys) { ret = ff_dv_init_dynamic_tables(s, sys); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "Error initializing the work tables.\n"); return ret; } s->sys = sys; } s->frame = data; s->frame->key_frame = 1; s->frame->pict_type = AV_PICTURE_TYPE_I; avctx->pix_fmt = s->sys->pix_fmt; avctx->time_base = s->sys->time_base; ret = ff_set_dimensions(avctx, s->sys->width, s->sys->height); if (ret < 0) return ret; /* Determine the codec's sample_aspect ratio from the packet */ vsc_pack = buf + 80*5 + 48 + 5; if ( *vsc_pack == dv_video_control ) { apt = buf[4] & 0x07; is16_9 = (vsc_pack && ((vsc_pack[2] & 0x07) == 0x02 || (!apt && (vsc_pack[2] & 0x07) == 0x07))); ff_set_sar(avctx, s->sys->sar[is16_9]); } if ((ret = ff_get_buffer(avctx, s->frame, 0)) < 0) return ret; s->frame->interlaced_frame = 1; s->frame->top_field_first = 0; /* Determine the codec's field order from the packet */ if ( *vsc_pack == dv_video_control ) { s->frame->top_field_first = !(vsc_pack[3] & 0x40); } s->buf = buf; avctx->execute(avctx, dv_decode_video_segment, s->work_chunks, NULL, dv_work_pool_size(s->sys), sizeof(DVwork_chunk)); emms_c(); /* return image */ *got_frame = 1; return s->sys->frame_size; }
0
1,031
static int theora_decode_header(AVCodecContext *avctx, GetBitContext gb) { Vp3DecodeContext *s = avctx->priv_data; s->theora = get_bits_long(&gb, 24); av_log(avctx, AV_LOG_INFO, "Theora bitstream version %X\n", s->theora); /* 3.2.0 aka alpha3 has the same frame orientation as original vp3 */ /* but previous versions have the image flipped relative to vp3 */ if (s->theora < 0x030200) { s->flipped_image = 1; av_log(avctx, AV_LOG_DEBUG, "Old (<alpha3) Theora bitstream, flipped image\n"); } s->width = get_bits(&gb, 16) << 4; s->height = get_bits(&gb, 16) << 4; if(avcodec_check_dimensions(avctx, s->width, s->height)){ av_log(avctx, AV_LOG_ERROR, "Invalid dimensions (%dx%d)\n", s->width, s->height); s->width= s->height= 0; return -1; } if (s->theora >= 0x030400) { skip_bits(&gb, 32); /* total number of superblocks in a frame */ // fixme, the next field is 36bits long skip_bits(&gb, 32); /* total number of blocks in a frame */ skip_bits(&gb, 4); /* total number of blocks in a frame */ skip_bits(&gb, 32); /* total number of macroblocks in a frame */ skip_bits(&gb, 24); /* frame width */ skip_bits(&gb, 24); /* frame height */ } else { skip_bits(&gb, 24); /* frame width */ skip_bits(&gb, 24); /* frame height */ } skip_bits(&gb, 8); /* offset x */ skip_bits(&gb, 8); /* offset y */ skip_bits(&gb, 32); /* fps numerator */ skip_bits(&gb, 32); /* fps denumerator */ skip_bits(&gb, 24); /* aspect numerator */ skip_bits(&gb, 24); /* aspect denumerator */ if (s->theora < 0x030200) skip_bits(&gb, 5); /* keyframe frequency force */ skip_bits(&gb, 8); /* colorspace */ if (s->theora >= 0x030400) skip_bits(&gb, 2); /* pixel format: 420,res,422,444 */ skip_bits(&gb, 24); /* bitrate */ skip_bits(&gb, 6); /* quality hint */ if (s->theora >= 0x030200) { skip_bits(&gb, 5); /* keyframe frequency force */ if (s->theora < 0x030400) skip_bits(&gb, 5); /* spare bits */ } // align_get_bits(&gb); avctx->width = s->width; avctx->height = s->height; return 0; }
1
1,032
static av_cold int init_buffers(SANMVideoContext *ctx) { av_fast_padded_malloc(&ctx->frm0, &ctx->frm0_size, ctx->buf_size); av_fast_padded_malloc(&ctx->frm1, &ctx->frm1_size, ctx->buf_size); av_fast_padded_malloc(&ctx->frm2, &ctx->frm2_size, ctx->buf_size); if (!ctx->version) av_fast_padded_malloc(&ctx->stored_frame, &ctx->stored_frame_size, ctx->buf_size); if (!ctx->frm0 || !ctx->frm1 || !ctx->frm2 || (!ctx->stored_frame && !ctx->version)) { destroy_buffers(ctx); return AVERROR(ENOMEM); } return 0; }
1
1,034
int unix_listen_opts(QemuOpts *opts) { struct sockaddr_un un; const char *path = qemu_opt_get(opts, "path"); int sock, fd; sock = socket(PF_UNIX, SOCK_STREAM, 0); if (sock < 0) { perror("socket(unix)"); return -1; } memset(&un, 0, sizeof(un)); un.sun_family = AF_UNIX; if (path && strlen(path)) { snprintf(un.sun_path, sizeof(un.sun_path), "%s", path); } else { char *tmpdir = getenv("TMPDIR"); snprintf(un.sun_path, sizeof(un.sun_path), "%s/qemu-socket-XXXXXX", tmpdir ? tmpdir : "/tmp"); /* * This dummy fd usage silences the mktemp() unsecure warning. * Using mkstemp() doesn't make things more secure here * though. bind() complains about existing files, so we have * to unlink first and thus re-open the race window. The * worst case possible is bind() failing, i.e. a DoS attack. */ fd = mkstemp(un.sun_path); close(fd); qemu_opt_set(opts, "path", un.sun_path); } unlink(un.sun_path); if (bind(sock, (struct sockaddr*) &un, sizeof(un)) < 0) { fprintf(stderr, "bind(unix:%s): %s\n", un.sun_path, strerror(errno)); goto err; } if (listen(sock, 1) < 0) { fprintf(stderr, "listen(unix:%s): %s\n", un.sun_path, strerror(errno)); goto err; } if (sockets_debug) fprintf(stderr, "bind(unix:%s): OK\n", un.sun_path); return sock; err: closesocket(sock); return -1; }
1
1,036
static void bus_add_child(BusState *bus, DeviceState *child) { char name[32]; BusChild *kid = g_malloc0(sizeof(*kid)); if (qdev_hotplug) { assert(bus->allow_hotplug); } kid->index = bus->max_index++; kid->child = child; object_ref(OBJECT(kid->child)); QTAILQ_INSERT_HEAD(&bus->children, kid, sibling); /* This transfers ownership of kid->child to the property. */ snprintf(name, sizeof(name), "child[%d]", kid->index); object_property_add_link(OBJECT(bus), name, object_get_typename(OBJECT(child)), (Object **)&kid->child, NULL); }
1
1,037
build_ssdt(GArray *table_data, GArray *linker, AcpiCpuInfo *cpu, AcpiPmInfo *pm, AcpiMiscInfo *misc, PcPciInfo *pci, PcGuestInfo *guest_info) { MachineState *machine = MACHINE(qdev_get_machine()); uint32_t nr_mem = machine->ram_slots; unsigned acpi_cpus = guest_info->apic_id_limit; Aml *ssdt, *sb_scope, *scope, *pkg, *dev, *method, *crs, *field, *ifctx; PCIBus *bus = NULL; GPtrArray *io_ranges = g_ptr_array_new_with_free_func(crs_range_free); GPtrArray *mem_ranges = g_ptr_array_new_with_free_func(crs_range_free); CrsRangeEntry *entry; int root_bus_limit = 0xFF; int i; ssdt = init_aml_allocator(); /* The current AML generator can cover the APIC ID range [0..255], * inclusive, for VCPU hotplug. */ QEMU_BUILD_BUG_ON(ACPI_CPU_HOTPLUG_ID_LIMIT > 256); g_assert(acpi_cpus <= ACPI_CPU_HOTPLUG_ID_LIMIT); /* Reserve space for header */ acpi_data_push(ssdt->buf, sizeof(AcpiTableHeader)); /* Extra PCI root buses are implemented only for i440fx */ bus = find_i440fx(); if (bus) { QLIST_FOREACH(bus, &bus->child, sibling) { uint8_t bus_num = pci_bus_num(bus); uint8_t numa_node = pci_bus_numa_node(bus); /* look only for expander root buses */ if (!pci_bus_is_root(bus)) { continue; } if (bus_num < root_bus_limit) { root_bus_limit = bus_num - 1; } scope = aml_scope("\\_SB"); dev = aml_device("PC%.02X", bus_num); aml_append(dev, aml_name_decl("_UID", aml_int(bus_num))); aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A03"))); aml_append(dev, aml_name_decl("_BBN", aml_int(bus_num))); if (numa_node != NUMA_NODE_UNASSIGNED) { aml_append(dev, aml_name_decl("_PXM", aml_int(numa_node))); } aml_append(dev, build_prt()); crs = build_crs(PCI_HOST_BRIDGE(BUS(bus)->parent), io_ranges, mem_ranges); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); aml_append(ssdt, scope); } } scope = aml_scope("\\_SB.PCI0"); /* build PCI0._CRS */ crs = aml_resource_template(); aml_append(crs, aml_word_bus_number(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, 0x0000, 0x0, root_bus_limit, 0x0000, root_bus_limit + 1)); aml_append(crs, aml_io(AML_DECODE16, 0x0CF8, 0x0CF8, 0x01, 0x08)); aml_append(crs, aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, AML_ENTIRE_RANGE, 0x0000, 0x0000, 0x0CF7, 0x0000, 0x0CF8)); crs_replace_with_free_ranges(io_ranges, 0x0D00, 0xFFFF); for (i = 0; i < io_ranges->len; i++) { entry = g_ptr_array_index(io_ranges, i); aml_append(crs, aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, AML_ENTIRE_RANGE, 0x0000, entry->base, entry->limit, 0x0000, entry->limit - entry->base + 1)); } aml_append(crs, aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_CACHEABLE, AML_READ_WRITE, 0, 0x000A0000, 0x000BFFFF, 0, 0x00020000)); crs_replace_with_free_ranges(mem_ranges, pci->w32.begin, pci->w32.end - 1); for (i = 0; i < mem_ranges->len; i++) { entry = g_ptr_array_index(mem_ranges, i); aml_append(crs, aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_NON_CACHEABLE, AML_READ_WRITE, 0, entry->base, entry->limit, 0, entry->limit - entry->base + 1)); } if (pci->w64.begin) { aml_append(crs, aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_CACHEABLE, AML_READ_WRITE, 0, pci->w64.begin, pci->w64.end - 1, 0, pci->w64.end - pci->w64.begin)); } aml_append(scope, aml_name_decl("_CRS", crs)); /* reserve GPE0 block resources */ dev = aml_device("GPE0"); aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A06"))); aml_append(dev, aml_name_decl("_UID", aml_string("GPE0 resources"))); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, pm->gpe0_blk, pm->gpe0_blk, 1, pm->gpe0_blk_len) ); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); g_ptr_array_free(io_ranges, true); g_ptr_array_free(mem_ranges, true); /* reserve PCIHP resources */ if (pm->pcihp_io_len) { dev = aml_device("PHPR"); aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A06"))); aml_append(dev, aml_name_decl("_UID", aml_string("PCI Hotplug resources"))); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, pm->pcihp_io_base, pm->pcihp_io_base, 1, pm->pcihp_io_len) ); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); } aml_append(ssdt, scope); /* create S3_ / S4_ / S5_ packages if necessary */ scope = aml_scope("\\"); if (!pm->s3_disabled) { pkg = aml_package(4); aml_append(pkg, aml_int(1)); /* PM1a_CNT.SLP_TYP */ aml_append(pkg, aml_int(1)); /* PM1b_CNT.SLP_TYP, FIXME: not impl. */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(scope, aml_name_decl("_S3", pkg)); } if (!pm->s4_disabled) { pkg = aml_package(4); aml_append(pkg, aml_int(pm->s4_val)); /* PM1a_CNT.SLP_TYP */ /* PM1b_CNT.SLP_TYP, FIXME: not impl. */ aml_append(pkg, aml_int(pm->s4_val)); aml_append(pkg, aml_int(0)); /* reserved */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(scope, aml_name_decl("_S4", pkg)); } pkg = aml_package(4); aml_append(pkg, aml_int(0)); /* PM1a_CNT.SLP_TYP */ aml_append(pkg, aml_int(0)); /* PM1b_CNT.SLP_TYP not impl. */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(scope, aml_name_decl("_S5", pkg)); aml_append(ssdt, scope); if (misc->applesmc_io_base) { scope = aml_scope("\\_SB.PCI0.ISA"); dev = aml_device("SMC"); aml_append(dev, aml_name_decl("_HID", aml_eisaid("APP0001"))); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, misc->applesmc_io_base, misc->applesmc_io_base, 0x01, APPLESMC_MAX_DATA_LENGTH) ); aml_append(crs, aml_irq_no_flags(6)); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); aml_append(ssdt, scope); } if (misc->pvpanic_port) { scope = aml_scope("\\_SB.PCI0.ISA"); dev = aml_device("PEVT"); aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0001"))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, misc->pvpanic_port, misc->pvpanic_port, 1, 1) ); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(dev, aml_operation_region("PEOR", AML_SYSTEM_IO, misc->pvpanic_port, 1)); field = aml_field("PEOR", AML_BYTE_ACC, AML_PRESERVE); aml_append(field, aml_named_field("PEPT", 8)); aml_append(dev, field); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); method = aml_method("RDPT", 0); aml_append(method, aml_store(aml_name("PEPT"), aml_local(0))); aml_append(method, aml_return(aml_local(0))); aml_append(dev, method); method = aml_method("WRPT", 1); aml_append(method, aml_store(aml_arg(0), aml_name("PEPT"))); aml_append(dev, method); aml_append(scope, dev); aml_append(ssdt, scope); } sb_scope = aml_scope("\\_SB"); { /* create PCI0.PRES device and its _CRS to reserve CPU hotplug MMIO */ dev = aml_device("PCI0." stringify(CPU_HOTPLUG_RESOURCE_DEVICE)); aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A06"))); aml_append(dev, aml_name_decl("_UID", aml_string("CPU Hotplug resources")) ); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, pm->cpu_hp_io_base, pm->cpu_hp_io_base, 1, pm->cpu_hp_io_len) ); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(sb_scope, dev); /* declare CPU hotplug MMIO region and PRS field to access it */ aml_append(sb_scope, aml_operation_region( "PRST", AML_SYSTEM_IO, pm->cpu_hp_io_base, pm->cpu_hp_io_len)); field = aml_field("PRST", AML_BYTE_ACC, AML_PRESERVE); aml_append(field, aml_named_field("PRS", 256)); aml_append(sb_scope, field); /* build Processor object for each processor */ for (i = 0; i < acpi_cpus; i++) { dev = aml_processor(i, 0, 0, "CP%.02X", i); method = aml_method("_MAT", 0); aml_append(method, aml_return(aml_call1("CPMA", aml_int(i)))); aml_append(dev, method); method = aml_method("_STA", 0); aml_append(method, aml_return(aml_call1("CPST", aml_int(i)))); aml_append(dev, method); method = aml_method("_EJ0", 1); aml_append(method, aml_return(aml_call2("CPEJ", aml_int(i), aml_arg(0))) ); aml_append(dev, method); aml_append(sb_scope, dev); } /* build this code: * Method(NTFY, 2) {If (LEqual(Arg0, 0x00)) {Notify(CP00, Arg1)} ...} */ /* Arg0 = Processor ID = APIC ID */ method = aml_method("NTFY", 2); for (i = 0; i < acpi_cpus; i++) { ifctx = aml_if(aml_equal(aml_arg(0), aml_int(i))); aml_append(ifctx, aml_notify(aml_name("CP%.02X", i), aml_arg(1)) ); aml_append(method, ifctx); } aml_append(sb_scope, method); /* build "Name(CPON, Package() { One, One, ..., Zero, Zero, ... })" * * Note: The ability to create variable-sized packages was first * introduced in ACPI 2.0. ACPI 1.0 only allowed fixed-size packages * ith up to 255 elements. Windows guests up to win2k8 fail when * VarPackageOp is used. */ pkg = acpi_cpus <= 255 ? aml_package(acpi_cpus) : aml_varpackage(acpi_cpus); for (i = 0; i < acpi_cpus; i++) { uint8_t b = test_bit(i, cpu->found_cpus) ? 0x01 : 0x00; aml_append(pkg, aml_int(b)); } aml_append(sb_scope, aml_name_decl("CPON", pkg)); /* build memory devices */ assert(nr_mem <= ACPI_MAX_RAM_SLOTS); scope = aml_scope("\\_SB.PCI0." stringify(MEMORY_HOTPLUG_DEVICE)); aml_append(scope, aml_name_decl(stringify(MEMORY_SLOTS_NUMBER), aml_int(nr_mem)) ); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, pm->mem_hp_io_base, pm->mem_hp_io_base, 0, pm->mem_hp_io_len) ); aml_append(scope, aml_name_decl("_CRS", crs)); aml_append(scope, aml_operation_region( stringify(MEMORY_HOTPLUG_IO_REGION), AML_SYSTEM_IO, pm->mem_hp_io_base, pm->mem_hp_io_len) ); field = aml_field(stringify(MEMORY_HOTPLUG_IO_REGION), AML_DWORD_ACC, AML_PRESERVE); aml_append(field, /* read only */ aml_named_field(stringify(MEMORY_SLOT_ADDR_LOW), 32)); aml_append(field, /* read only */ aml_named_field(stringify(MEMORY_SLOT_ADDR_HIGH), 32)); aml_append(field, /* read only */ aml_named_field(stringify(MEMORY_SLOT_SIZE_LOW), 32)); aml_append(field, /* read only */ aml_named_field(stringify(MEMORY_SLOT_SIZE_HIGH), 32)); aml_append(field, /* read only */ aml_named_field(stringify(MEMORY_SLOT_PROXIMITY), 32)); aml_append(scope, field); field = aml_field(stringify(MEMORY_HOTPLUG_IO_REGION), AML_BYTE_ACC, AML_WRITE_AS_ZEROS); aml_append(field, aml_reserved_field(160 /* bits, Offset(20) */)); aml_append(field, /* 1 if enabled, read only */ aml_named_field(stringify(MEMORY_SLOT_ENABLED), 1)); aml_append(field, /*(read) 1 if has a insert event. (write) 1 to clear event */ aml_named_field(stringify(MEMORY_SLOT_INSERT_EVENT), 1)); aml_append(field, /* (read) 1 if has a remove event. (write) 1 to clear event */ aml_named_field(stringify(MEMORY_SLOT_REMOVE_EVENT), 1)); aml_append(field, /* initiates device eject, write only */ aml_named_field(stringify(MEMORY_SLOT_EJECT), 1)); aml_append(scope, field); field = aml_field(stringify(MEMORY_HOTPLUG_IO_REGION), AML_DWORD_ACC, AML_PRESERVE); aml_append(field, /* DIMM selector, write only */ aml_named_field(stringify(MEMORY_SLOT_SLECTOR), 32)); aml_append(field, /* _OST event code, write only */ aml_named_field(stringify(MEMORY_SLOT_OST_EVENT), 32)); aml_append(field, /* _OST status code, write only */ aml_named_field(stringify(MEMORY_SLOT_OST_STATUS), 32)); aml_append(scope, field); aml_append(sb_scope, scope); for (i = 0; i < nr_mem; i++) { #define BASEPATH "\\_SB.PCI0." stringify(MEMORY_HOTPLUG_DEVICE) "." const char *s; dev = aml_device("MP%02X", i); aml_append(dev, aml_name_decl("_UID", aml_string("0x%02X", i))); aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0C80"))); method = aml_method("_CRS", 0); s = BASEPATH stringify(MEMORY_SLOT_CRS_METHOD); aml_append(method, aml_return(aml_call1(s, aml_name("_UID")))); aml_append(dev, method); method = aml_method("_STA", 0); s = BASEPATH stringify(MEMORY_SLOT_STATUS_METHOD); aml_append(method, aml_return(aml_call1(s, aml_name("_UID")))); aml_append(dev, method); method = aml_method("_PXM", 0); s = BASEPATH stringify(MEMORY_SLOT_PROXIMITY_METHOD); aml_append(method, aml_return(aml_call1(s, aml_name("_UID")))); aml_append(dev, method); method = aml_method("_OST", 3); s = BASEPATH stringify(MEMORY_SLOT_OST_METHOD); aml_append(method, aml_return(aml_call4( s, aml_name("_UID"), aml_arg(0), aml_arg(1), aml_arg(2) ))); aml_append(dev, method); method = aml_method("_EJ0", 1); s = BASEPATH stringify(MEMORY_SLOT_EJECT_METHOD); aml_append(method, aml_return(aml_call2( s, aml_name("_UID"), aml_arg(0)))); aml_append(dev, method); aml_append(sb_scope, dev); } /* build Method(MEMORY_SLOT_NOTIFY_METHOD, 2) { * If (LEqual(Arg0, 0x00)) {Notify(MP00, Arg1)} ... } */ method = aml_method(stringify(MEMORY_SLOT_NOTIFY_METHOD), 2); for (i = 0; i < nr_mem; i++) { ifctx = aml_if(aml_equal(aml_arg(0), aml_int(i))); aml_append(ifctx, aml_notify(aml_name("MP%.02X", i), aml_arg(1)) ); aml_append(method, ifctx); } aml_append(sb_scope, method); { Object *pci_host; PCIBus *bus = NULL; pci_host = acpi_get_i386_pci_host(); if (pci_host) { bus = PCI_HOST_BRIDGE(pci_host)->bus; } if (bus) { Aml *scope = aml_scope("PCI0"); /* Scan all PCI buses. Generate tables to support hotplug. */ build_append_pci_bus_devices(scope, bus, pm->pcihp_bridge_en); if (misc->tpm_version != TPM_VERSION_UNSPEC) { dev = aml_device("ISA.TPM"); aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0C31"))); aml_append(dev, aml_name_decl("_STA", aml_int(0xF))); crs = aml_resource_template(); aml_append(crs, aml_memory32_fixed(TPM_TIS_ADDR_BASE, TPM_TIS_ADDR_SIZE, AML_READ_WRITE)); aml_append(crs, aml_irq_no_flags(TPM_TIS_IRQ)); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); } aml_append(sb_scope, scope); } } aml_append(ssdt, sb_scope); } /* copy AML table into ACPI tables blob and patch header there */ g_array_append_vals(table_data, ssdt->buf->data, ssdt->buf->len); build_header(linker, table_data, (void *)(table_data->data + table_data->len - ssdt->buf->len), "SSDT", ssdt->buf->len, 1); free_aml_allocator(); }
1
1,039
static void release_delayed_buffers(PerThreadContext *p) { FrameThreadContext *fctx = p->parent; while (p->num_released_buffers > 0) { AVFrame *f = &p->released_buffers[--p->num_released_buffers]; pthread_mutex_lock(&fctx->buffer_mutex); free_progress(f); f->thread_opaque = NULL; f->owner->release_buffer(f->owner, f); pthread_mutex_unlock(&fctx->buffer_mutex); } }
1
1,041
double ff_lpc_calc_ref_coefs_f(LPCContext *s, const float *samples, int len, int order, double *ref) { int i; double signal = 0.0f, avg_err = 0.0f; double autoc[MAX_LPC_ORDER+1] = {0}, error[MAX_LPC_ORDER+1] = {0}; const double a = 0.5f, b = 1.0f - a; /* Apply windowing */ for (i = 0; i < len; i++) { double weight = a - b*cos((2*M_PI*i)/(len - 1)); s->windowed_samples[i] = weight*samples[i]; } s->lpc_compute_autocorr(s->windowed_samples, len, order, autoc); signal = autoc[0]; compute_ref_coefs(autoc, order, ref, error); for (i = 0; i < order; i++) avg_err = (avg_err + error[i])/2.0f; return signal/avg_err; }
1
1,043
int av_write_trailer(AVFormatContext *s) { int ret, i; for (;; ) { AVPacket pkt; ret = interleave_packet(s, &pkt, NULL, 1); if (ret < 0) //FIXME cleanup needed for ret<0 ? goto fail; if (!ret) break; ret = s->oformat->write_packet(s, &pkt); if (ret >= 0) s->streams[pkt.stream_index]->nb_frames++; av_free_packet(&pkt); if (ret < 0) goto fail; } if (s->oformat->write_trailer) ret = s->oformat->write_trailer(s); if (!(s->oformat->flags & AVFMT_NOFILE)) avio_flush(s->pb); fail: for (i = 0; i < s->nb_streams; i++) { av_freep(&s->streams[i]->priv_data); av_freep(&s->streams[i]->index_entries); } if (s->oformat->priv_class) av_opt_free(s->priv_data); av_freep(&s->priv_data); return ret; }
1
1,044
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags) { AVDictionary *m = *pm; AVDictionaryEntry *tag = av_dict_get(m, key, NULL, flags); char *oldval = NULL; if (!m) m = *pm = av_mallocz(sizeof(*m)); if (tag) { if (flags & AV_DICT_DONT_OVERWRITE) return 0; if (flags & AV_DICT_APPEND) oldval = tag->value; else av_free(tag->value); av_free(tag->key); *tag = m->elems[--m->count]; } else { AVDictionaryEntry *tmp = av_realloc(m->elems, (m->count + 1) * sizeof(*m->elems)); if (tmp) m->elems = tmp; else return AVERROR(ENOMEM); } if (value) { if (flags & AV_DICT_DONT_STRDUP_KEY) m->elems[m->count].key = key; else m->elems[m->count].key = av_strdup(key); if (flags & AV_DICT_DONT_STRDUP_VAL) { m->elems[m->count].value = value; } else if (oldval && flags & AV_DICT_APPEND) { int len = strlen(oldval) + strlen(value) + 1; if (!(oldval = av_realloc(oldval, len))) return AVERROR(ENOMEM); av_strlcat(oldval, value, len); m->elems[m->count].value = oldval; } else m->elems[m->count].value = av_strdup(value); m->count++; } if (!m->count) { av_free(m->elems); av_freep(pm); } return 0; }
1
1,045
static void s390_virtio_rng_instance_init(Object *obj) { VirtIORNGS390 *dev = VIRTIO_RNG_S390(obj); object_initialize(&dev->vdev, sizeof(dev->vdev), TYPE_VIRTIO_RNG); object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL); object_property_add_link(obj, "rng", TYPE_RNG_BACKEND, (Object **)&dev->vdev.conf.rng, OBJ_PROP_LINK_UNREF_ON_RELEASE, NULL); }
1
1,047
static struct omap_sti_s *omap_sti_init(struct omap_target_agent_s *ta, MemoryRegion *sysmem, hwaddr channel_base, qemu_irq irq, omap_clk clk, CharDriverState *chr) { struct omap_sti_s *s = (struct omap_sti_s *) g_malloc0(sizeof(struct omap_sti_s)); s->irq = irq; omap_sti_reset(s); s->chr = chr ?: qemu_chr_new("null", "null", NULL); memory_region_init_io(&s->iomem, NULL, &omap_sti_ops, s, "omap.sti", omap_l4_region_size(ta, 0)); omap_l4_attach(ta, 0, &s->iomem); memory_region_init_io(&s->iomem_fifo, NULL, &omap_sti_fifo_ops, s, "omap.sti.fifo", 0x10000); memory_region_add_subregion(sysmem, channel_base, &s->iomem_fifo); return s; }
1
1,048
void cpu_interrupt(CPUArchState *env, int mask) { CPUState *cpu = ENV_GET_CPU(env); env->interrupt_request |= mask; cpu_unlink_tb(cpu); }
1
1,049
static inline int read_huff_channels(MLPDecodeContext *m, GetBitContext *gbp, unsigned int substr, unsigned int pos) { SubStream *s = &m->substream[substr]; unsigned int mat, channel; for (mat = 0; mat < s->num_primitive_matrices; mat++) if (s->lsb_bypass[mat]) m->bypassed_lsbs[pos + s->blockpos][mat] = get_bits1(gbp); for (channel = s->min_channel; channel <= s->max_channel; channel++) { ChannelParams *cp = &s->channel_params[channel]; int codebook = cp->codebook; int quant_step_size = s->quant_step_size[channel]; int lsb_bits = cp->huff_lsbs - quant_step_size; int result = 0; if (codebook > 0) result = get_vlc2(gbp, huff_vlc[codebook-1].table, VLC_BITS, (9 + VLC_BITS - 1) / VLC_BITS); if (result < 0) return AVERROR_INVALIDDATA; if (lsb_bits > 0) result = (result << lsb_bits) + get_bits(gbp, lsb_bits); result += cp->sign_huff_offset; result <<= quant_step_size; m->sample_buffer[pos + s->blockpos][channel] = result; } return 0; }
1
1,050
static void superh_cpu_initfn(Object *obj) { CPUState *cs = CPU(obj); SuperHCPU *cpu = SUPERH_CPU(obj); CPUSH4State *env = &cpu->env; cs->env_ptr = env; cpu_exec_init(cs, &error_abort); env->movcal_backup_tail = &(env->movcal_backup); if (tcg_enabled()) { sh4_translate_init(); } }
1
1,051
void ff_estimate_b_frame_motion(MpegEncContext * s, int mb_x, int mb_y) { MotionEstContext * const c= &s->me; const int penalty_factor= c->mb_penalty_factor; int fmin, bmin, dmin, fbmin, bimin, fimin; int type=0; const int xy = mb_y*s->mb_stride + mb_x; init_ref(c, s->new_picture.f.data, s->last_picture.f.data, s->next_picture.f.data, 16 * mb_x, 16 * mb_y, 2); get_limits(s, 16*mb_x, 16*mb_y); c->skip=0; if (s->codec_id == AV_CODEC_ID_MPEG4 && s->next_picture.mbskip_table[xy]) { int score= direct_search(s, mb_x, mb_y); //FIXME just check 0,0 score= ((unsigned)(score*score + 128*256))>>16; c->mc_mb_var_sum_temp += score; s->current_picture.mc_mb_var[mb_y*s->mb_stride + mb_x] = score; //FIXME use SSE s->mb_type[mb_y*s->mb_stride + mb_x]= CANDIDATE_MB_TYPE_DIRECT0; return; } if (s->codec_id == AV_CODEC_ID_MPEG4) dmin= direct_search(s, mb_x, mb_y); else dmin= INT_MAX; //FIXME penalty stuff for non mpeg4 c->skip=0; fmin = estimate_motion_b(s, mb_x, mb_y, s->b_forw_mv_table, 0, s->f_code) + 3 * penalty_factor; c->skip=0; bmin = estimate_motion_b(s, mb_x, mb_y, s->b_back_mv_table, 2, s->b_code) + 2 * penalty_factor; av_dlog(s, " %d %d ", s->b_forw_mv_table[xy][0], s->b_forw_mv_table[xy][1]); c->skip=0; fbmin= bidir_refine(s, mb_x, mb_y) + penalty_factor; av_dlog(s, "%d %d %d %d\n", dmin, fmin, bmin, fbmin); if(s->flags & CODEC_FLAG_INTERLACED_ME){ //FIXME mb type penalty c->skip=0; c->current_mv_penalty= c->mv_penalty[s->f_code] + MAX_MV; fimin= interlaced_search(s, 0, s->b_field_mv_table[0], s->b_field_select_table[0], s->b_forw_mv_table[xy][0], s->b_forw_mv_table[xy][1], 0); c->current_mv_penalty= c->mv_penalty[s->b_code] + MAX_MV; bimin= interlaced_search(s, 2, s->b_field_mv_table[1], s->b_field_select_table[1], s->b_back_mv_table[xy][0], s->b_back_mv_table[xy][1], 0); }else fimin= bimin= INT_MAX; { int score= fmin; type = CANDIDATE_MB_TYPE_FORWARD; if (dmin <= score){ score = dmin; type = CANDIDATE_MB_TYPE_DIRECT; } if(bmin<score){ score=bmin; type= CANDIDATE_MB_TYPE_BACKWARD; } if(fbmin<score){ score=fbmin; type= CANDIDATE_MB_TYPE_BIDIR; } if(fimin<score){ score=fimin; type= CANDIDATE_MB_TYPE_FORWARD_I; } if(bimin<score){ score=bimin; type= CANDIDATE_MB_TYPE_BACKWARD_I; } score= ((unsigned)(score*score + 128*256))>>16; c->mc_mb_var_sum_temp += score; s->current_picture.mc_mb_var[mb_y*s->mb_stride + mb_x] = score; //FIXME use SSE } if(c->avctx->mb_decision > FF_MB_DECISION_SIMPLE){ type= CANDIDATE_MB_TYPE_FORWARD | CANDIDATE_MB_TYPE_BACKWARD | CANDIDATE_MB_TYPE_BIDIR | CANDIDATE_MB_TYPE_DIRECT; if(fimin < INT_MAX) type |= CANDIDATE_MB_TYPE_FORWARD_I; if(bimin < INT_MAX) type |= CANDIDATE_MB_TYPE_BACKWARD_I; if(fimin < INT_MAX && bimin < INT_MAX){ type |= CANDIDATE_MB_TYPE_BIDIR_I; } //FIXME something smarter if(dmin>256*256*16) type&= ~CANDIDATE_MB_TYPE_DIRECT; //do not try direct mode if it is invalid for this MB if(s->codec_id == AV_CODEC_ID_MPEG4 && type&CANDIDATE_MB_TYPE_DIRECT && s->flags&CODEC_FLAG_MV0 && *(uint32_t*)s->b_direct_mv_table[xy]) type |= CANDIDATE_MB_TYPE_DIRECT0; } s->mb_type[mb_y*s->mb_stride + mb_x]= type; }
1
1,052
xmit_seg(E1000State *s) { uint16_t len, *sp; unsigned int frames = s->tx.tso_frames, css, sofar, n; struct e1000_tx *tp = &s->tx; if (tp->tse && tp->cptse) { css = tp->ipcss; DBGOUT(TXSUM, "frames %d size %d ipcss %d\n", frames, tp->size, css); if (tp->ip) { // IPv4 cpu_to_be16wu((uint16_t *)(tp->data+css+2), tp->size - css); cpu_to_be16wu((uint16_t *)(tp->data+css+4), be16_to_cpup((uint16_t *)(tp->data+css+4))+frames); } else // IPv6 cpu_to_be16wu((uint16_t *)(tp->data+css+4), tp->size - css); css = tp->tucss; len = tp->size - css; DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->tcp, css, len); if (tp->tcp) { sofar = frames * tp->mss; cpu_to_be32wu((uint32_t *)(tp->data+css+4), // seq be32_to_cpupu((uint32_t *)(tp->data+css+4))+sofar); if (tp->paylen - sofar > tp->mss) tp->data[css + 13] &= ~9; // PSH, FIN } else // UDP cpu_to_be16wu((uint16_t *)(tp->data+css+4), len); if (tp->sum_needed & E1000_TXD_POPTS_TXSM) { // add pseudo-header length before checksum calculation sp = (uint16_t *)(tp->data + tp->tucso); cpu_to_be16wu(sp, be16_to_cpup(sp) + len); } tp->tso_frames++; } if (tp->sum_needed & E1000_TXD_POPTS_TXSM) putsum(tp->data, tp->size, tp->tucso, tp->tucss, tp->tucse); if (tp->sum_needed & E1000_TXD_POPTS_IXSM) putsum(tp->data, tp->size, tp->ipcso, tp->ipcss, tp->ipcse); if (tp->vlan_needed) { memmove(tp->vlan, tp->data, 4); memmove(tp->data, tp->data + 4, 8); memcpy(tp->data + 8, tp->vlan_header, 4); qemu_send_packet(&s->nic->nc, tp->vlan, tp->size + 4); } else qemu_send_packet(&s->nic->nc, tp->data, tp->size); s->mac_reg[TPT]++; s->mac_reg[GPTC]++; n = s->mac_reg[TOTL]; if ((s->mac_reg[TOTL] += s->tx.size) < n) s->mac_reg[TOTH]++; }
1
1,053
static void omap_timer_clk_setup(struct omap_mpu_timer_s *timer) { omap_clk_adduser(timer->clk, qemu_allocate_irqs(omap_timer_clk_update, timer, 1)[0]); timer->rate = omap_clk_getrate(timer->clk); }
1
1,054
static int drive_init(struct drive_opt *arg, int snapshot, QEMUMachine *machine) { char buf[128]; char file[1024]; char devname[128]; char serial[21]; const char *mediastr = ""; BlockInterfaceType type; enum { MEDIA_DISK, MEDIA_CDROM } media; int bus_id, unit_id; int cyls, heads, secs, translation; BlockDriverState *bdrv; BlockDriver *drv = NULL; int max_devs; int index; int cache; int bdrv_flags, onerror; int drives_table_idx; char *str = arg->opt; static const char * const params[] = { "bus", "unit", "if", "index", "cyls", "heads", "secs", "trans", "media", "snapshot", "file", "cache", "format", "serial", "werror", NULL }; if (check_params(buf, sizeof(buf), params, str) < 0) { fprintf(stderr, "qemu: unknown parameter '%s' in '%s'\n", buf, str); return -1; } file[0] = 0; cyls = heads = secs = 0; bus_id = 0; unit_id = -1; translation = BIOS_ATA_TRANSLATION_AUTO; index = -1; cache = 3; if (machine->use_scsi) { type = IF_SCSI; max_devs = MAX_SCSI_DEVS; pstrcpy(devname, sizeof(devname), "scsi"); } else { type = IF_IDE; max_devs = MAX_IDE_DEVS; pstrcpy(devname, sizeof(devname), "ide"); } media = MEDIA_DISK; /* extract parameters */ if (get_param_value(buf, sizeof(buf), "bus", str)) { bus_id = strtol(buf, NULL, 0); if (bus_id < 0) { fprintf(stderr, "qemu: '%s' invalid bus id\n", str); return -1; } } if (get_param_value(buf, sizeof(buf), "unit", str)) { unit_id = strtol(buf, NULL, 0); if (unit_id < 0) { fprintf(stderr, "qemu: '%s' invalid unit id\n", str); return -1; } } if (get_param_value(buf, sizeof(buf), "if", str)) { pstrcpy(devname, sizeof(devname), buf); if (!strcmp(buf, "ide")) { type = IF_IDE; max_devs = MAX_IDE_DEVS; } else if (!strcmp(buf, "scsi")) { type = IF_SCSI; max_devs = MAX_SCSI_DEVS; } else if (!strcmp(buf, "floppy")) { type = IF_FLOPPY; max_devs = 0; } else if (!strcmp(buf, "pflash")) { type = IF_PFLASH; max_devs = 0; } else if (!strcmp(buf, "mtd")) { type = IF_MTD; max_devs = 0; } else if (!strcmp(buf, "sd")) { type = IF_SD; max_devs = 0; } else if (!strcmp(buf, "virtio")) { type = IF_VIRTIO; max_devs = 0; } else { fprintf(stderr, "qemu: '%s' unsupported bus type '%s'\n", str, buf); return -1; } } if (get_param_value(buf, sizeof(buf), "index", str)) { index = strtol(buf, NULL, 0); if (index < 0) { fprintf(stderr, "qemu: '%s' invalid index\n", str); return -1; } } if (get_param_value(buf, sizeof(buf), "cyls", str)) { cyls = strtol(buf, NULL, 0); } if (get_param_value(buf, sizeof(buf), "heads", str)) { heads = strtol(buf, NULL, 0); } if (get_param_value(buf, sizeof(buf), "secs", str)) { secs = strtol(buf, NULL, 0); } if (cyls || heads || secs) { if (cyls < 1 || cyls > 16383) { fprintf(stderr, "qemu: '%s' invalid physical cyls number\n", str); return -1; } if (heads < 1 || heads > 16) { fprintf(stderr, "qemu: '%s' invalid physical heads number\n", str); return -1; } if (secs < 1 || secs > 63) { fprintf(stderr, "qemu: '%s' invalid physical secs number\n", str); return -1; } } if (get_param_value(buf, sizeof(buf), "trans", str)) { if (!cyls) { fprintf(stderr, "qemu: '%s' trans must be used with cyls,heads and secs\n", str); return -1; } if (!strcmp(buf, "none")) translation = BIOS_ATA_TRANSLATION_NONE; else if (!strcmp(buf, "lba")) translation = BIOS_ATA_TRANSLATION_LBA; else if (!strcmp(buf, "auto")) translation = BIOS_ATA_TRANSLATION_AUTO; else { fprintf(stderr, "qemu: '%s' invalid translation type\n", str); return -1; } } if (get_param_value(buf, sizeof(buf), "media", str)) { if (!strcmp(buf, "disk")) { media = MEDIA_DISK; } else if (!strcmp(buf, "cdrom")) { if (cyls || secs || heads) { fprintf(stderr, "qemu: '%s' invalid physical CHS format\n", str); return -1; } media = MEDIA_CDROM; } else { fprintf(stderr, "qemu: '%s' invalid media\n", str); return -1; } } if (get_param_value(buf, sizeof(buf), "snapshot", str)) { if (!strcmp(buf, "on")) snapshot = 1; else if (!strcmp(buf, "off")) snapshot = 0; else { fprintf(stderr, "qemu: '%s' invalid snapshot option\n", str); return -1; } } if (get_param_value(buf, sizeof(buf), "cache", str)) { if (!strcmp(buf, "off") || !strcmp(buf, "none")) cache = 0; else if (!strcmp(buf, "writethrough")) cache = 1; else if (!strcmp(buf, "writeback")) cache = 2; else { fprintf(stderr, "qemu: invalid cache option\n"); return -1; } } if (get_param_value(buf, sizeof(buf), "format", str)) { if (strcmp(buf, "?") == 0) { fprintf(stderr, "qemu: Supported formats:"); bdrv_iterate_format(bdrv_format_print, NULL); fprintf(stderr, "\n"); return -1; } drv = bdrv_find_format(buf); if (!drv) { fprintf(stderr, "qemu: '%s' invalid format\n", buf); return -1; } } if (arg->file == NULL) get_param_value(file, sizeof(file), "file", str); else pstrcpy(file, sizeof(file), arg->file); if (!get_param_value(serial, sizeof(serial), "serial", str)) memset(serial, 0, sizeof(serial)); onerror = BLOCK_ERR_REPORT; if (get_param_value(buf, sizeof(serial), "werror", str)) { if (type != IF_IDE && type != IF_SCSI && type != IF_VIRTIO) { fprintf(stderr, "werror is no supported by this format\n"); return -1; } if (!strcmp(buf, "ignore")) onerror = BLOCK_ERR_IGNORE; else if (!strcmp(buf, "enospc")) onerror = BLOCK_ERR_STOP_ENOSPC; else if (!strcmp(buf, "stop")) onerror = BLOCK_ERR_STOP_ANY; else if (!strcmp(buf, "report")) onerror = BLOCK_ERR_REPORT; else { fprintf(stderr, "qemu: '%s' invalid write error action\n", buf); return -1; } } /* compute bus and unit according index */ if (index != -1) { if (bus_id != 0 || unit_id != -1) { fprintf(stderr, "qemu: '%s' index cannot be used with bus and unit\n", str); return -1; } if (max_devs == 0) { unit_id = index; bus_id = 0; } else { unit_id = index % max_devs; bus_id = index / max_devs; } } /* if user doesn't specify a unit_id, * try to find the first free */ if (unit_id == -1) { unit_id = 0; while (drive_get_index(type, bus_id, unit_id) != -1) { unit_id++; if (max_devs && unit_id >= max_devs) { unit_id -= max_devs; bus_id++; } } } /* check unit id */ if (max_devs && unit_id >= max_devs) { fprintf(stderr, "qemu: '%s' unit %d too big (max is %d)\n", str, unit_id, max_devs - 1); return -1; } /* * ignore multiple definitions */ if (drive_get_index(type, bus_id, unit_id) != -1) return 0; /* init */ if (type == IF_IDE || type == IF_SCSI) mediastr = (media == MEDIA_CDROM) ? "-cd" : "-hd"; if (max_devs) snprintf(buf, sizeof(buf), "%s%i%s%i", devname, bus_id, mediastr, unit_id); else snprintf(buf, sizeof(buf), "%s%s%i", devname, mediastr, unit_id); bdrv = bdrv_new(buf); drives_table_idx = drive_get_free_idx(); drives_table[drives_table_idx].bdrv = bdrv; drives_table[drives_table_idx].type = type; drives_table[drives_table_idx].bus = bus_id; drives_table[drives_table_idx].unit = unit_id; drives_table[drives_table_idx].onerror = onerror; drives_table[drives_table_idx].drive_opt_idx = arg - drives_opt; strncpy(drives_table[nb_drives].serial, serial, sizeof(serial)); nb_drives++; switch(type) { case IF_IDE: case IF_SCSI: switch(media) { case MEDIA_DISK: if (cyls != 0) { bdrv_set_geometry_hint(bdrv, cyls, heads, secs); bdrv_set_translation_hint(bdrv, translation); } break; case MEDIA_CDROM: bdrv_set_type_hint(bdrv, BDRV_TYPE_CDROM); break; } break; case IF_SD: /* FIXME: This isn't really a floppy, but it's a reasonable approximation. */ case IF_FLOPPY: bdrv_set_type_hint(bdrv, BDRV_TYPE_FLOPPY); break; case IF_PFLASH: case IF_MTD: case IF_VIRTIO: break; } if (!file[0]) return 0; bdrv_flags = 0; if (snapshot) { bdrv_flags |= BDRV_O_SNAPSHOT; cache = 2; /* always use write-back with snapshot */ } if (cache == 0) /* no caching */ bdrv_flags |= BDRV_O_NOCACHE; else if (cache == 2) /* write-back */ bdrv_flags |= BDRV_O_CACHE_WB; else if (cache == 3) /* not specified */ bdrv_flags |= BDRV_O_CACHE_DEF; if (bdrv_open2(bdrv, file, bdrv_flags, drv) < 0 || qemu_key_check(bdrv, file)) { fprintf(stderr, "qemu: could not open disk image %s\n", file); return -1; } return 0; }
1
1,055
static void qvirtio_pci_set_status(QVirtioDevice *d, uint8_t status) { QVirtioPCIDevice *dev = (QVirtioPCIDevice *)d; qpci_io_writeb(dev->pdev, dev->addr + VIRTIO_PCI_STATUS, status); }
1
1,057
static void test_acpi_piix4_tcg_cphp(void) { test_data data; memset(&data, 0, sizeof(data)); data.machine = MACHINE_PC; data.variant = ".cphp"; test_acpi_one("-smp 2,cores=3,sockets=2,maxcpus=6", &data); free_test_data(&data); }
1
1,058
void qmp_block_set_io_throttle(const char *device, int64_t bps, int64_t bps_rd, int64_t bps_wr, int64_t iops, int64_t iops_rd, int64_t iops_wr, bool has_bps_max, int64_t bps_max, bool has_bps_rd_max, int64_t bps_rd_max, bool has_bps_wr_max, int64_t bps_wr_max, bool has_iops_max, int64_t iops_max, bool has_iops_rd_max, int64_t iops_rd_max, bool has_iops_wr_max, int64_t iops_wr_max, bool has_iops_size, int64_t iops_size, Error **errp) { ThrottleConfig cfg; BlockDriverState *bs; bs = bdrv_find(device); if (!bs) { error_set(errp, QERR_DEVICE_NOT_FOUND, device); return; } memset(&cfg, 0, sizeof(cfg)); cfg.buckets[THROTTLE_BPS_TOTAL].avg = bps; cfg.buckets[THROTTLE_BPS_READ].avg = bps_rd; cfg.buckets[THROTTLE_BPS_WRITE].avg = bps_wr; cfg.buckets[THROTTLE_OPS_TOTAL].avg = iops; cfg.buckets[THROTTLE_OPS_READ].avg = iops_rd; cfg.buckets[THROTTLE_OPS_WRITE].avg = iops_wr; if (has_bps_max) { cfg.buckets[THROTTLE_BPS_TOTAL].max = bps_max; } if (has_bps_rd_max) { cfg.buckets[THROTTLE_BPS_READ].max = bps_rd_max; } if (has_bps_wr_max) { cfg.buckets[THROTTLE_BPS_WRITE].max = bps_wr_max; } if (has_iops_max) { cfg.buckets[THROTTLE_OPS_TOTAL].max = iops_max; } if (has_iops_rd_max) { cfg.buckets[THROTTLE_OPS_READ].max = iops_rd_max; } if (has_iops_wr_max) { cfg.buckets[THROTTLE_OPS_WRITE].max = iops_wr_max; } if (has_iops_size) { cfg.op_size = iops_size; } if (!check_throttle_config(&cfg, errp)) { return; } aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); if (!bs->io_limits_enabled && throttle_enabled(&cfg)) { bdrv_io_limits_enable(bs); } else if (bs->io_limits_enabled && !throttle_enabled(&cfg)) { bdrv_io_limits_disable(bs); } if (bs->io_limits_enabled) { bdrv_set_io_limits(bs, &cfg); } aio_context_release(aio_context); }
0
1,061
static void setup_frame(int sig, struct target_sigaction *ka, target_sigset_t *set, CPUM68KState *env) { struct target_sigframe *frame; abi_ulong frame_addr; abi_ulong retcode_addr; abi_ulong sc_addr; int err = 0; int i; frame_addr = get_sigframe(ka, env, sizeof *frame); if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) goto give_sigsegv; __put_user(sig, &frame->sig); sc_addr = frame_addr + offsetof(struct target_sigframe, sc); __put_user(sc_addr, &frame->psc); err |= setup_sigcontext(&frame->sc, env, set->sig[0]); if (err) goto give_sigsegv; for(i = 1; i < TARGET_NSIG_WORDS; i++) { if (__put_user(set->sig[i], &frame->extramask[i - 1])) goto give_sigsegv; } /* Set up to return from userspace. */ retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); __put_user(retcode_addr, &frame->pretcode); /* moveq #,d0; trap #0 */ __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16), (long *)(frame->retcode)); if (err) goto give_sigsegv; /* Set up to return from userspace */ env->aregs[7] = frame_addr; env->pc = ka->_sa_handler; unlock_user_struct(frame, frame_addr, 1); return; give_sigsegv: unlock_user_struct(frame, frame_addr, 1); force_sig(TARGET_SIGSEGV); }
0
1,062
do_socket_read(GIOChannel *source, GIOCondition condition, gpointer data) { int rv; int dwSendLength; int dwRecvLength; uint8_t pbRecvBuffer[APDUBufSize]; static uint8_t pbSendBuffer[APDUBufSize]; VReaderStatus reader_status; VReader *reader = NULL; static VSCMsgHeader mhHeader; VSCMsgError *error_msg; GError *err = NULL; static gchar *buf; static gsize br, to_read; static int state = STATE_HEADER; if (state == STATE_HEADER && to_read == 0) { buf = (gchar *)&mhHeader; to_read = sizeof(mhHeader); } if (to_read > 0) { g_io_channel_read_chars(source, (gchar *)buf, to_read, &br, &err); if (err != NULL) { g_error("error while reading: %s", err->message); } buf += br; to_read -= br; if (to_read != 0) { return TRUE; } } if (state == STATE_HEADER) { mhHeader.type = ntohl(mhHeader.type); mhHeader.reader_id = ntohl(mhHeader.reader_id); mhHeader.length = ntohl(mhHeader.length); if (verbose) { printf("Header: type=%d, reader_id=%u length=%d (0x%x)\n", mhHeader.type, mhHeader.reader_id, mhHeader.length, mhHeader.length); } switch (mhHeader.type) { case VSC_APDU: case VSC_Flush: case VSC_Error: case VSC_Init: buf = (gchar *)pbSendBuffer; to_read = mhHeader.length; state = STATE_MESSAGE; return TRUE; default: fprintf(stderr, "Unexpected message of type 0x%X\n", mhHeader.type); return FALSE; } } if (state == STATE_MESSAGE) { switch (mhHeader.type) { case VSC_APDU: if (verbose) { printf(" recv APDU: "); print_byte_array(pbSendBuffer, mhHeader.length); } /* Transmit received APDU */ dwSendLength = mhHeader.length; dwRecvLength = sizeof(pbRecvBuffer); reader = vreader_get_reader_by_id(mhHeader.reader_id); reader_status = vreader_xfr_bytes(reader, pbSendBuffer, dwSendLength, pbRecvBuffer, &dwRecvLength); if (reader_status == VREADER_OK) { mhHeader.length = dwRecvLength; if (verbose) { printf(" send response: "); print_byte_array(pbRecvBuffer, mhHeader.length); } send_msg(VSC_APDU, mhHeader.reader_id, pbRecvBuffer, dwRecvLength); } else { rv = reader_status; /* warning: not meaningful */ send_msg(VSC_Error, mhHeader.reader_id, &rv, sizeof(uint32_t)); } vreader_free(reader); reader = NULL; /* we've freed it, don't use it by accident again */ break; case VSC_Flush: /* TODO: actually flush */ send_msg(VSC_FlushComplete, mhHeader.reader_id, NULL, 0); break; case VSC_Error: error_msg = (VSCMsgError *) pbSendBuffer; if (error_msg->code == VSC_SUCCESS) { qemu_mutex_lock(&pending_reader_lock); if (pending_reader) { vreader_set_id(pending_reader, mhHeader.reader_id); vreader_free(pending_reader); pending_reader = NULL; qemu_cond_signal(&pending_reader_condition); } qemu_mutex_unlock(&pending_reader_lock); break; } printf("warning: qemu refused to add reader\n"); if (error_msg->code == VSC_CANNOT_ADD_MORE_READERS) { /* clear pending reader, qemu can't handle any more */ qemu_mutex_lock(&pending_reader_lock); if (pending_reader) { pending_reader = NULL; /* make sure the event loop doesn't hang */ qemu_cond_signal(&pending_reader_condition); } qemu_mutex_unlock(&pending_reader_lock); } break; case VSC_Init: if (on_host_init(&mhHeader, (VSCMsgInit *)pbSendBuffer) < 0) { return FALSE; } break; default: g_assert_not_reached(); return FALSE; } state = STATE_HEADER; } return TRUE; }
0
1,063
static size_t buffered_get_rate_limit(void *opaque) { QEMUFileBuffered *s = opaque; return s->xfer_limit; }
0
1,066
static void check_time(int wiggle) { struct tm start, date[4], end; struct tm *datep; time_t ts; /* * This check assumes a few things. First, we cannot guarantee that we get * a consistent reading from the wall clock because we may hit an edge of * the clock while reading. To work around this, we read four clock readings * such that at least two of them should match. We need to assume that one * reading is corrupt so we need four readings to ensure that we have at * least two consecutive identical readings * * It's also possible that we'll cross an edge reading the host clock so * simply check to make sure that the clock reading is within the period of * when we expect it to be. */ ts = time(NULL); gmtime_r(&ts, &start); cmos_get_date_time(&date[0]); cmos_get_date_time(&date[1]); cmos_get_date_time(&date[2]); cmos_get_date_time(&date[3]); ts = time(NULL); gmtime_r(&ts, &end); if (tm_cmp(&date[0], &date[1]) == 0) { datep = &date[0]; } else if (tm_cmp(&date[1], &date[2]) == 0) { datep = &date[1]; } else if (tm_cmp(&date[2], &date[3]) == 0) { datep = &date[2]; } else { g_assert_not_reached(); } if (!(tm_cmp(&start, datep) <= 0 && tm_cmp(datep, &end) <= 0)) { time_t t, s; start.tm_isdst = datep->tm_isdst; t = mktime(datep); s = mktime(&start); if (t < s) { g_test_message("RTC is %ld second(s) behind wall-clock\n", (s - t)); } else { g_test_message("RTC is %ld second(s) ahead of wall-clock\n", (t - s)); } g_assert_cmpint(ABS(t - s), <=, wiggle); } }
0
1,067
aio_ctx_finalize(GSource *source) { AioContext *ctx = (AioContext *) source; thread_pool_free(ctx->thread_pool); #ifdef CONFIG_LINUX_AIO if (ctx->linux_aio) { laio_detach_aio_context(ctx->linux_aio, ctx); laio_cleanup(ctx->linux_aio); ctx->linux_aio = NULL; } #endif qemu_lockcnt_lock(&ctx->list_lock); assert(!qemu_lockcnt_count(&ctx->list_lock)); while (ctx->first_bh) { QEMUBH *next = ctx->first_bh->next; /* qemu_bh_delete() must have been called on BHs in this AioContext */ assert(ctx->first_bh->deleted); g_free(ctx->first_bh); ctx->first_bh = next; } qemu_lockcnt_unlock(&ctx->list_lock); aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL); event_notifier_cleanup(&ctx->notifier); qemu_rec_mutex_destroy(&ctx->lock); qemu_lockcnt_destroy(&ctx->list_lock); timerlistgroup_deinit(&ctx->tlg); }
0
1,068
static void bdrv_stats_iter(QObject *data, void *opaque) { QDict *qdict; Monitor *mon = opaque; qdict = qobject_to_qdict(data); monitor_printf(mon, "%s:", qdict_get_str(qdict, "device")); qdict = qobject_to_qdict(qdict_get(qdict, "stats")); monitor_printf(mon, " rd_bytes=%" PRId64 " wr_bytes=%" PRId64 " rd_operations=%" PRId64 " wr_operations=%" PRId64 " flush_operations=%" PRId64 "\n", qdict_get_int(qdict, "rd_bytes"), qdict_get_int(qdict, "wr_bytes"), qdict_get_int(qdict, "rd_operations"), qdict_get_int(qdict, "wr_operations"), qdict_get_int(qdict, "flush_operations")); }
0
1,069
static int nbd_send_negotiate(NBDClient *client) { int csock = client->sock; char buf[8 + 8 + 8 + 128]; int rc; const int myflags = (NBD_FLAG_HAS_FLAGS | NBD_FLAG_SEND_TRIM | NBD_FLAG_SEND_FLUSH | NBD_FLAG_SEND_FUA); /* Negotiation header without options: [ 0 .. 7] passwd ("NBDMAGIC") [ 8 .. 15] magic (NBD_CLIENT_MAGIC) [16 .. 23] size [24 .. 25] server flags (0) [26 .. 27] export flags [28 .. 151] reserved (0) Negotiation header with options, part 1: [ 0 .. 7] passwd ("NBDMAGIC") [ 8 .. 15] magic (NBD_OPTS_MAGIC) [16 .. 17] server flags (0) part 2 (after options are sent): [18 .. 25] size [26 .. 27] export flags [28 .. 151] reserved (0) */ qemu_set_block(csock); rc = -EINVAL; TRACE("Beginning negotiation."); memset(buf, 0, sizeof(buf)); memcpy(buf, "NBDMAGIC", 8); if (client->exp) { assert ((client->exp->nbdflags & ~65535) == 0); cpu_to_be64w((uint64_t*)(buf + 8), NBD_CLIENT_MAGIC); cpu_to_be64w((uint64_t*)(buf + 16), client->exp->size); cpu_to_be16w((uint16_t*)(buf + 26), client->exp->nbdflags | myflags); } else { cpu_to_be64w((uint64_t*)(buf + 8), NBD_OPTS_MAGIC); cpu_to_be16w((uint16_t *)(buf + 16), NBD_FLAG_FIXED_NEWSTYLE); } if (client->exp) { if (write_sync(csock, buf, sizeof(buf)) != sizeof(buf)) { LOG("write failed"); goto fail; } } else { if (write_sync(csock, buf, 18) != 18) { LOG("write failed"); goto fail; } rc = nbd_receive_options(client); if (rc != 0) { LOG("option negotiation failed"); goto fail; } assert ((client->exp->nbdflags & ~65535) == 0); cpu_to_be64w((uint64_t*)(buf + 18), client->exp->size); cpu_to_be16w((uint16_t*)(buf + 26), client->exp->nbdflags | myflags); if (write_sync(csock, buf + 18, sizeof(buf) - 18) != sizeof(buf) - 18) { LOG("write failed"); goto fail; } } TRACE("Negotiation succeeded."); rc = 0; fail: qemu_set_nonblock(csock); return rc; }
0
1,070
void tcg_gen_mb(TCGBar mb_type) { if (parallel_cpus) { tcg_gen_op1(INDEX_op_mb, mb_type); } }
0
1,071
void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num) { VirtQueueElement *elem; size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0])); size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]); size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]); size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0])); size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]); size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]); assert(sz >= sizeof(VirtQueueElement)); elem = g_malloc(out_sg_end); elem->out_num = out_num; elem->in_num = in_num; elem->in_addr = (void *)elem + in_addr_ofs; elem->out_addr = (void *)elem + out_addr_ofs; elem->in_sg = (void *)elem + in_sg_ofs; elem->out_sg = (void *)elem + out_sg_ofs; return elem; }
0
1,072
static int usb_serial_initfn(USBDevice *dev) { USBSerialState *s = DO_UPCAST(USBSerialState, dev, dev); s->dev.speed = USB_SPEED_FULL; if (!s->cs) { error_report("Property chardev is required"); return -1; } qemu_chr_add_handlers(s->cs, usb_serial_can_read, usb_serial_read, usb_serial_event, s); usb_serial_handle_reset(dev); return 0; }
0
1,073
DeviceState *nand_init(BlockDriverState *bdrv, int manf_id, int chip_id) { DeviceState *dev; if (nand_flash_ids[chip_id].size == 0) { hw_error("%s: Unsupported NAND chip ID.\n", __FUNCTION__); } dev = DEVICE(object_new(TYPE_NAND)); qdev_prop_set_uint8(dev, "manufacturer_id", manf_id); qdev_prop_set_uint8(dev, "chip_id", chip_id); if (bdrv) { qdev_prop_set_drive_nofail(dev, "drive", bdrv); } qdev_init_nofail(dev); return dev; }
0
1,074
static void CALLBACK host_alarm_handler(UINT uTimerID, UINT uMsg, DWORD_PTR dwUser, DWORD_PTR dw1, DWORD_PTR dw2) #else static void host_alarm_handler(int host_signum) #endif { #if 0 #define DISP_FREQ 1000 { static int64_t delta_min = INT64_MAX; static int64_t delta_max, delta_cum, last_clock, delta, ti; static int count; ti = qemu_get_clock(vm_clock); if (last_clock != 0) { delta = ti - last_clock; if (delta < delta_min) delta_min = delta; if (delta > delta_max) delta_max = delta; delta_cum += delta; if (++count == DISP_FREQ) { printf("timer: min=%" PRId64 " us max=%" PRId64 " us avg=%" PRId64 " us avg_freq=%0.3f Hz\n", muldiv64(delta_min, 1000000, ticks_per_sec), muldiv64(delta_max, 1000000, ticks_per_sec), muldiv64(delta_cum, 1000000 / DISP_FREQ, ticks_per_sec), (double)ticks_per_sec / ((double)delta_cum / DISP_FREQ)); count = 0; delta_min = INT64_MAX; delta_max = 0; delta_cum = 0; } } last_clock = ti; } #endif if (alarm_has_dynticks(alarm_timer) || (!use_icount && qemu_timer_expired(active_timers[QEMU_TIMER_VIRTUAL], qemu_get_clock(vm_clock))) || qemu_timer_expired(active_timers[QEMU_TIMER_REALTIME], qemu_get_clock(rt_clock))) { qemu_event_increment(); alarm_timer->flags |= ALARM_FLAG_EXPIRED; #ifndef CONFIG_IOTHREAD if (next_cpu) { /* stop the currently executing cpu because a timer occured */ cpu_exit(next_cpu); #ifdef CONFIG_KQEMU if (next_cpu->kqemu_enabled) { kqemu_cpu_interrupt(next_cpu); } #endif } #endif timer_alarm_pending = 1; qemu_notify_event(); } }
0
1,077
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, int access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, target_ulong *page_size_ptr, uint32_t *fsr) { ARMCPU *cpu = arm_env_get_cpu(env); CPUState *cs = CPU(cpu); /* Read an LPAE long-descriptor translation table. */ MMUFaultType fault_type = translation_fault; uint32_t level = 1; uint32_t epd = 0; int32_t t0sz, t1sz; uint32_t tg; uint64_t ttbr; int ttbr_select; hwaddr descaddr, descmask; uint32_t tableattrs; target_ulong page_size; uint32_t attrs; int32_t stride = 9; int32_t va_size = 32; int inputsize; int32_t tbi = 0; TCR *tcr = regime_tcr(env, mmu_idx); int ap, ns, xn, pxn; uint32_t el = regime_el(env, mmu_idx); bool ttbr1_valid = true; /* TODO: * This code does not handle the different format TCR for VTCR_EL2. * This code also does not support shareability levels. * Attribute and permission bit handling should also be checked when adding * support for those page table walks. */ if (arm_el_is_aa64(env, el)) { va_size = 64; if (el > 1) { if (mmu_idx != ARMMMUIdx_S2NS) { tbi = extract64(tcr->raw_tcr, 20, 1); } } else { if (extract64(address, 55, 1)) { tbi = extract64(tcr->raw_tcr, 38, 1); } else { tbi = extract64(tcr->raw_tcr, 37, 1); } } tbi *= 8; /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it * invalid. */ if (el > 1) { ttbr1_valid = false; } } else { /* There is no TTBR1 for EL2 */ if (el == 2) { ttbr1_valid = false; } } /* Determine whether this address is in the region controlled by * TTBR0 or TTBR1 (or if it is in neither region and should fault). * This is a Non-secure PL0/1 stage 1 translation, so controlled by * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32: */ if (va_size == 64) { /* AArch64 translation. */ t0sz = extract32(tcr->raw_tcr, 0, 6); t0sz = MIN(t0sz, 39); t0sz = MAX(t0sz, 16); } else if (mmu_idx != ARMMMUIdx_S2NS) { /* AArch32 stage 1 translation. */ t0sz = extract32(tcr->raw_tcr, 0, 3); } else { /* AArch32 stage 2 translation. */ bool sext = extract32(tcr->raw_tcr, 4, 1); bool sign = extract32(tcr->raw_tcr, 3, 1); t0sz = sextract32(tcr->raw_tcr, 0, 4); /* If the sign-extend bit is not the same as t0sz[3], the result * is unpredictable. Flag this as a guest error. */ if (sign != sext) { qemu_log_mask(LOG_GUEST_ERROR, "AArch32: VTCR.S / VTCR.T0SZ[3] missmatch\n"); } } t1sz = extract32(tcr->raw_tcr, 16, 6); if (va_size == 64) { t1sz = MIN(t1sz, 39); t1sz = MAX(t1sz, 16); } if (t0sz && !extract64(address, va_size - t0sz, t0sz - tbi)) { /* there is a ttbr0 region and we are in it (high bits all zero) */ ttbr_select = 0; } else if (ttbr1_valid && t1sz && !extract64(~address, va_size - t1sz, t1sz - tbi)) { /* there is a ttbr1 region and we are in it (high bits all one) */ ttbr_select = 1; } else if (!t0sz) { /* ttbr0 region is "everything not in the ttbr1 region" */ ttbr_select = 0; } else if (!t1sz && ttbr1_valid) { /* ttbr1 region is "everything not in the ttbr0 region" */ ttbr_select = 1; } else { /* in the gap between the two regions, this is a Translation fault */ fault_type = translation_fault; goto do_fault; } /* Note that QEMU ignores shareability and cacheability attributes, * so we don't need to do anything with the SH, ORGN, IRGN fields * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently * implement any ASID-like capability so we can ignore it (instead * we will always flush the TLB any time the ASID is changed). */ if (ttbr_select == 0) { ttbr = regime_ttbr(env, mmu_idx, 0); if (el < 2) { epd = extract32(tcr->raw_tcr, 7, 1); } inputsize = va_size - t0sz; tg = extract32(tcr->raw_tcr, 14, 2); if (tg == 1) { /* 64KB pages */ stride = 13; } if (tg == 2) { /* 16KB pages */ stride = 11; } } else { /* We should only be here if TTBR1 is valid */ assert(ttbr1_valid); ttbr = regime_ttbr(env, mmu_idx, 1); epd = extract32(tcr->raw_tcr, 23, 1); inputsize = va_size - t1sz; tg = extract32(tcr->raw_tcr, 30, 2); if (tg == 3) { /* 64KB pages */ stride = 13; } if (tg == 1) { /* 16KB pages */ stride = 11; } } /* Here we should have set up all the parameters for the translation: * va_size, inputsize, ttbr, epd, stride, tbi */ if (epd) { /* Translation table walk disabled => Translation fault on TLB miss * Note: This is always 0 on 64-bit EL2 and EL3. */ goto do_fault; } if (mmu_idx != ARMMMUIdx_S2NS) { /* The starting level depends on the virtual address size (which can * be up to 48 bits) and the translation granule size. It indicates * the number of strides (stride bits at a time) needed to * consume the bits of the input address. In the pseudocode this is: * level = 4 - RoundUp((inputsize - grainsize) / stride) * where their 'inputsize' is our 'inputsize', 'grainsize' is * our 'stride + 3' and 'stride' is our 'stride'. * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: * = 4 - (inputsize - stride - 3 + stride - 1) / stride * = 4 - (inputsize - 4) / stride; */ level = 4 - (inputsize - 4) / stride; } else { /* For stage 2 translations the starting level is specified by the * VTCR_EL2.SL0 field (whose interpretation depends on the page size) */ int startlevel = extract32(tcr->raw_tcr, 6, 2); bool ok; if (va_size == 32 || stride == 9) { /* AArch32 or 4KB pages */ level = 2 - startlevel; } else { /* 16KB or 64KB pages */ level = 3 - startlevel; } /* Check that the starting level is valid. */ ok = check_s2_startlevel(cpu, va_size == 64, level, inputsize, stride); if (!ok) { /* AArch64 reports these as level 0 faults. * AArch32 reports these as level 1 faults. */ level = va_size == 64 ? 0 : 1; fault_type = translation_fault; goto do_fault; } } /* Clear the vaddr bits which aren't part of the within-region address, * so that we don't have to special case things when calculating the * first descriptor address. */ if (va_size != inputsize) { address &= (1ULL << inputsize) - 1; } descmask = (1ULL << (stride + 3)) - 1; /* Now we can extract the actual base address from the TTBR */ descaddr = extract64(ttbr, 0, 48); descaddr &= ~((1ULL << (inputsize - (stride * (4 - level)))) - 1); /* Secure accesses start with the page table in secure memory and * can be downgraded to non-secure at any step. Non-secure accesses * remain non-secure. We implement this by just ORing in the NSTable/NS * bits at each step. */ tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4); for (;;) { uint64_t descriptor; bool nstable; descaddr |= (address >> (stride * (4 - level))) & descmask; descaddr &= ~7ULL; nstable = extract32(tableattrs, 4, 1); descriptor = arm_ldq_ptw(cs, descaddr, !nstable); if (!(descriptor & 1) || (!(descriptor & 2) && (level == 3))) { /* Invalid, or the Reserved level 3 encoding */ goto do_fault; } descaddr = descriptor & 0xfffffff000ULL; if ((descriptor & 2) && (level < 3)) { /* Table entry. The top five bits are attributes which may * propagate down through lower levels of the table (and * which are all arranged so that 0 means "no effect", so * we can gather them up by ORing in the bits at each level). */ tableattrs |= extract64(descriptor, 59, 5); level++; continue; } /* Block entry at level 1 or 2, or page entry at level 3. * These are basically the same thing, although the number * of bits we pull in from the vaddr varies. */ page_size = (1ULL << ((stride * (4 - level)) + 3)); descaddr |= (address & (page_size - 1)); /* Extract attributes from the descriptor and merge with table attrs */ attrs = extract64(descriptor, 2, 10) | (extract64(descriptor, 52, 12) << 10); attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */ attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */ /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 * means "force PL1 access only", which means forcing AP[1] to 0. */ if (extract32(tableattrs, 2, 1)) { attrs &= ~(1 << 4); } attrs |= nstable << 3; /* NS */ break; } /* Here descaddr is the final physical address, and attributes * are all in attrs. */ fault_type = access_fault; if ((attrs & (1 << 8)) == 0) { /* Access flag */ goto do_fault; } ap = extract32(attrs, 4, 2); ns = extract32(attrs, 3, 1); xn = extract32(attrs, 12, 1); pxn = extract32(attrs, 11, 1); *prot = get_S1prot(env, mmu_idx, va_size == 64, ap, ns, xn, pxn); fault_type = permission_fault; if (!(*prot & (1 << access_type))) { goto do_fault; } if (ns) { /* The NS bit will (as required by the architecture) have no effect if * the CPU doesn't support TZ or this is a non-secure translation * regime, because the attribute will already be non-secure. */ txattrs->secure = false; } *phys_ptr = descaddr; *page_size_ptr = page_size; return false; do_fault: /* Long-descriptor format IFSR/DFSR value */ *fsr = (1 << 9) | (fault_type << 2) | level; return true; }
0
1,078
static int openfile(char *name, int flags, int growable) { if (bs) { fprintf(stderr, "file open already, try 'help close'\n"); return 1; } bs = bdrv_new("hda"); if (!bs) return 1; if (growable) { flags |= BDRV_O_FILE; } if (bdrv_open(bs, name, flags) == -1) { fprintf(stderr, "%s: can't open device %s\n", progname, name); bs = NULL; return 1; } if (growable) { bs->growable = 1; } return 0; }
0
1,079
static void visit_nested_struct(Visitor *v, void **native, Error **errp) { visit_type_UserDefNested(v, (UserDefNested **)native, NULL, errp); }
0
1,080
static uint64_t bmdma_read(void *opaque, target_phys_addr_t addr, unsigned size) { BMDMAState *bm = opaque; uint32_t val; if (size != 1) { return ((uint64_t)1 << (size * 8)) - 1; } switch(addr & 3) { case 0: val = bm->cmd; break; case 2: val = bm->status; break; default: val = 0xff; break; } #ifdef DEBUG_IDE printf("bmdma: readb 0x%02x : 0x%02x\n", (uint8_t)addr, val); #endif return val; }
0
1,081
static void palmte_init(MachineState *machine) { const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; MemoryRegion *address_space_mem = get_system_memory(); struct omap_mpu_state_s *mpu; int flash_size = 0x00800000; int sdram_size = palmte_binfo.ram_size; static uint32_t cs0val = 0xffffffff; static uint32_t cs1val = 0x0000e1a0; static uint32_t cs2val = 0x0000e1a0; static uint32_t cs3val = 0xe1a0e1a0; int rom_size, rom_loaded = 0; MemoryRegion *flash = g_new(MemoryRegion, 1); MemoryRegion *cs = g_new(MemoryRegion, 4); mpu = omap310_mpu_init(address_space_mem, sdram_size, machine->cpu_type); /* External Flash (EMIFS) */ memory_region_init_ram(flash, NULL, "palmte.flash", flash_size, &error_fatal); memory_region_set_readonly(flash, true); memory_region_add_subregion(address_space_mem, OMAP_CS0_BASE, flash); memory_region_init_io(&cs[0], NULL, &static_ops, &cs0val, "palmte-cs0", OMAP_CS0_SIZE - flash_size); memory_region_add_subregion(address_space_mem, OMAP_CS0_BASE + flash_size, &cs[0]); memory_region_init_io(&cs[1], NULL, &static_ops, &cs1val, "palmte-cs1", OMAP_CS1_SIZE); memory_region_add_subregion(address_space_mem, OMAP_CS1_BASE, &cs[1]); memory_region_init_io(&cs[2], NULL, &static_ops, &cs2val, "palmte-cs2", OMAP_CS2_SIZE); memory_region_add_subregion(address_space_mem, OMAP_CS2_BASE, &cs[2]); memory_region_init_io(&cs[3], NULL, &static_ops, &cs3val, "palmte-cs3", OMAP_CS3_SIZE); memory_region_add_subregion(address_space_mem, OMAP_CS3_BASE, &cs[3]); palmte_microwire_setup(mpu); qemu_add_kbd_event_handler(palmte_button_event, mpu); palmte_gpio_setup(mpu); /* Setup initial (reset) machine state */ if (nb_option_roms) { rom_size = get_image_size(option_rom[0].name); if (rom_size > flash_size) { fprintf(stderr, "%s: ROM image too big (%x > %x)\n", __FUNCTION__, rom_size, flash_size); rom_size = 0; } if (rom_size > 0) { rom_size = load_image_targphys(option_rom[0].name, OMAP_CS0_BASE, flash_size); rom_loaded = 1; } if (rom_size < 0) { fprintf(stderr, "%s: error loading '%s'\n", __FUNCTION__, option_rom[0].name); } } if (!rom_loaded && !kernel_filename && !qtest_enabled()) { fprintf(stderr, "Kernel or ROM image must be specified\n"); exit(1); } /* Load the kernel. */ palmte_binfo.kernel_filename = kernel_filename; palmte_binfo.kernel_cmdline = kernel_cmdline; palmte_binfo.initrd_filename = initrd_filename; arm_load_kernel(mpu->cpu, &palmte_binfo); }
0
1,082
void configure_icount(const char *option) { vmstate_register(NULL, 0, &vmstate_timers, &timers_state); if (!option) return; if (strcmp(option, "auto") != 0) { icount_time_shift = strtol(option, NULL, 0); use_icount = 1; return; } use_icount = 2; /* 125MIPS seems a reasonable initial guess at the guest speed. It will be corrected fairly quickly anyway. */ icount_time_shift = 3; /* Have both realtime and virtual time triggers for speed adjustment. The realtime trigger catches emulated time passing too slowly, the virtual time trigger catches emulated time passing too fast. Realtime triggers occur even when idle, so use them less frequently than VM triggers. */ icount_rt_timer = qemu_new_timer(rt_clock, icount_adjust_rt, NULL); qemu_mod_timer(icount_rt_timer, qemu_get_clock(rt_clock) + 1000); icount_vm_timer = qemu_new_timer(vm_clock, icount_adjust_vm, NULL); qemu_mod_timer(icount_vm_timer, qemu_get_clock(vm_clock) + get_ticks_per_sec() / 10); }
0
1,083
struct omap_mmc_s *omap_mmc_init(hwaddr base, MemoryRegion *sysmem, BlockDriverState *bd, qemu_irq irq, qemu_irq dma[], omap_clk clk) { struct omap_mmc_s *s = (struct omap_mmc_s *) g_malloc0(sizeof(struct omap_mmc_s)); s->irq = irq; s->dma = dma; s->clk = clk; s->lines = 1; /* TODO: needs to be settable per-board */ s->rev = 1; omap_mmc_reset(s); memory_region_init_io(&s->iomem, NULL, &omap_mmc_ops, s, "omap.mmc", 0x800); memory_region_add_subregion(sysmem, base, &s->iomem); /* Instantiate the storage */ s->card = sd_init(bd, false); if (s->card == NULL) { exit(1); } return s; }
0
1,084
static int v9fs_synth_utimensat(FsContext *fs_ctx, V9fsPath *path, const struct timespec *buf) { errno = EPERM; return 0; }
0
1,085
static int monitor_fprintf(FILE *stream, const char *fmt, ...) { va_list ap; va_start(ap, fmt); monitor_vprintf((Monitor *)stream, fmt, ap); va_end(ap); return 0; }
0
1,086
static void hls_transform_unit(HEVCContext *s, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int trafo_depth, int blk_idx) { HEVCLocalContext *lc = &s->HEVClc; if (lc->cu.pred_mode == MODE_INTRA) { int trafo_size = 1 << log2_trafo_size; ff_hevc_set_neighbour_available(s, x0, y0, trafo_size, trafo_size); s->hpc.intra_pred(s, x0, y0, log2_trafo_size, 0); if (log2_trafo_size > 2) { trafo_size = trafo_size << (s->sps->hshift[1] - 1); ff_hevc_set_neighbour_available(s, x0, y0, trafo_size, trafo_size); s->hpc.intra_pred(s, x0, y0, log2_trafo_size - 1, 1); s->hpc.intra_pred(s, x0, y0, log2_trafo_size - 1, 2); } else if (blk_idx == 3) { trafo_size = trafo_size << s->sps->hshift[1]; ff_hevc_set_neighbour_available(s, xBase, yBase, trafo_size, trafo_size); s->hpc.intra_pred(s, xBase, yBase, log2_trafo_size, 1); s->hpc.intra_pred(s, xBase, yBase, log2_trafo_size, 2); } } if (lc->tt.cbf_luma || SAMPLE_CBF(lc->tt.cbf_cb[trafo_depth], x0, y0) || SAMPLE_CBF(lc->tt.cbf_cr[trafo_depth], x0, y0)) { int scan_idx = SCAN_DIAG; int scan_idx_c = SCAN_DIAG; if (s->pps->cu_qp_delta_enabled_flag && !lc->tu.is_cu_qp_delta_coded) { lc->tu.cu_qp_delta = ff_hevc_cu_qp_delta_abs(s); if (lc->tu.cu_qp_delta != 0) if (ff_hevc_cu_qp_delta_sign_flag(s) == 1) lc->tu.cu_qp_delta = -lc->tu.cu_qp_delta; lc->tu.is_cu_qp_delta_coded = 1; ff_hevc_set_qPy(s, x0, y0, cb_xBase, cb_yBase, log2_cb_size); } if (lc->cu.pred_mode == MODE_INTRA && log2_trafo_size < 4) { if (lc->tu.cur_intra_pred_mode >= 6 && lc->tu.cur_intra_pred_mode <= 14) { scan_idx = SCAN_VERT; } else if (lc->tu.cur_intra_pred_mode >= 22 && lc->tu.cur_intra_pred_mode <= 30) { scan_idx = SCAN_HORIZ; } if (lc->pu.intra_pred_mode_c >= 6 && lc->pu.intra_pred_mode_c <= 14) { scan_idx_c = SCAN_VERT; } else if (lc->pu.intra_pred_mode_c >= 22 && lc->pu.intra_pred_mode_c <= 30) { scan_idx_c = SCAN_HORIZ; } } if (lc->tt.cbf_luma) hls_residual_coding(s, x0, y0, log2_trafo_size, scan_idx, 0); if (log2_trafo_size > 2) { if (SAMPLE_CBF(lc->tt.cbf_cb[trafo_depth], x0, y0)) hls_residual_coding(s, x0, y0, log2_trafo_size - 1, scan_idx_c, 1); if (SAMPLE_CBF(lc->tt.cbf_cr[trafo_depth], x0, y0)) hls_residual_coding(s, x0, y0, log2_trafo_size - 1, scan_idx_c, 2); } else if (blk_idx == 3) { if (SAMPLE_CBF(lc->tt.cbf_cb[trafo_depth], xBase, yBase)) hls_residual_coding(s, xBase, yBase, log2_trafo_size, scan_idx_c, 1); if (SAMPLE_CBF(lc->tt.cbf_cr[trafo_depth], xBase, yBase)) hls_residual_coding(s, xBase, yBase, log2_trafo_size, scan_idx_c, 2); } } }
0
1,087
static int tgv_decode_inter(TgvContext *s, AVFrame *frame, const uint8_t *buf, const uint8_t *buf_end) { int num_mvs; int num_blocks_raw; int num_blocks_packed; int vector_bits; int i,j,x,y; GetBitContext gb; int mvbits; const uint8_t *blocks_raw; if(buf_end - buf < 12) return AVERROR_INVALIDDATA; num_mvs = AV_RL16(&buf[0]); num_blocks_raw = AV_RL16(&buf[2]); num_blocks_packed = AV_RL16(&buf[4]); vector_bits = AV_RL16(&buf[6]); buf += 12; if (vector_bits > MIN_CACHE_BITS || !vector_bits) { av_log(s->avctx, AV_LOG_ERROR, "Invalid value for motion vector bits: %d\n", vector_bits); return AVERROR_INVALIDDATA; } /* allocate codebook buffers as necessary */ if (num_mvs > s->num_mvs) { s->mv_codebook = av_realloc(s->mv_codebook, num_mvs*2*sizeof(int)); s->num_mvs = num_mvs; } if (num_blocks_packed > s->num_blocks_packed) { s->block_codebook = av_realloc(s->block_codebook, num_blocks_packed*16); s->num_blocks_packed = num_blocks_packed; } /* read motion vectors */ mvbits = (num_mvs * 2 * 10 + 31) & ~31; if (buf_end - buf < (mvbits>>3) + 16*num_blocks_raw + 8*num_blocks_packed) return AVERROR_INVALIDDATA; init_get_bits(&gb, buf, mvbits); for (i = 0; i < num_mvs; i++) { s->mv_codebook[i][0] = get_sbits(&gb, 10); s->mv_codebook[i][1] = get_sbits(&gb, 10); } buf += mvbits >> 3; /* note ptr to uncompressed blocks */ blocks_raw = buf; buf += num_blocks_raw * 16; /* read compressed blocks */ init_get_bits(&gb, buf, (buf_end - buf) << 3); for (i = 0; i < num_blocks_packed; i++) { int tmp[4]; for (j = 0; j < 4; j++) tmp[j] = get_bits(&gb, 8); for (j = 0; j < 16; j++) s->block_codebook[i][15-j] = tmp[get_bits(&gb, 2)]; } if (get_bits_left(&gb) < vector_bits * (s->avctx->height / 4) * (s->avctx->width / 4)) return AVERROR_INVALIDDATA; /* read vectors and build frame */ for (y = 0; y < s->avctx->height / 4; y++) for (x = 0; x < s->avctx->width / 4; x++) { unsigned int vector = get_bits(&gb, vector_bits); const uint8_t *src; int src_stride; if (vector < num_mvs) { int mx = x * 4 + s->mv_codebook[vector][0]; int my = y * 4 + s->mv_codebook[vector][1]; if (mx < 0 || mx + 4 > s->avctx->width || my < 0 || my + 4 > s->avctx->height) { av_log(s->avctx, AV_LOG_ERROR, "MV %d %d out of picture\n", mx, my); continue; } src = s->last_frame.data[0] + mx + my * s->last_frame.linesize[0]; src_stride = s->last_frame.linesize[0]; } else { int offset = vector - num_mvs; if (offset < num_blocks_raw) src = blocks_raw + 16*offset; else if (offset - num_blocks_raw < num_blocks_packed) src = s->block_codebook[offset - num_blocks_raw]; else continue; src_stride = 4; } for (j = 0; j < 4; j++) for (i = 0; i < 4; i++) frame->data[0][(y * 4 + j) * frame->linesize[0] + (x * 4 + i)] = src[j * src_stride + i]; } return 0; }
1
1,092
void FUNCC(ff_h264_luma_dc_dequant_idct)(int16_t *_output, int16_t *_input, int qmul){ #define stride 16 int i; int temp[16]; static const uint8_t x_offset[4]={0, 2*stride, 8*stride, 10*stride}; dctcoef *input = (dctcoef*)_input; dctcoef *output = (dctcoef*)_output; for(i=0; i<4; i++){ const int z0= input[4*i+0] + input[4*i+1]; const int z1= input[4*i+0] - input[4*i+1]; const int z2= input[4*i+2] - input[4*i+3]; const int z3= input[4*i+2] + input[4*i+3]; temp[4*i+0]= z0+z3; temp[4*i+1]= z0-z3; temp[4*i+2]= z1-z2; temp[4*i+3]= z1+z2; } for(i=0; i<4; i++){ const int offset= x_offset[i]; const int z0= temp[4*0+i] + temp[4*2+i]; const int z1= temp[4*0+i] - temp[4*2+i]; const int z2= temp[4*1+i] - temp[4*3+i]; const int z3= temp[4*1+i] + temp[4*3+i]; output[stride* 0+offset]= ((((z0 + z3)*qmul + 128 ) >> 8)); output[stride* 1+offset]= ((((z1 + z2)*qmul + 128 ) >> 8)); output[stride* 4+offset]= ((((z1 - z2)*qmul + 128 ) >> 8)); output[stride* 5+offset]= ((((z0 - z3)*qmul + 128 ) >> 8)); } #undef stride }
1
1,093
static uint64_t qemu_rdma_make_wrid(uint64_t wr_id, uint64_t index, uint64_t chunk) { uint64_t result = wr_id & RDMA_WRID_TYPE_MASK; result |= (index << RDMA_WRID_BLOCK_SHIFT); result |= (chunk << RDMA_WRID_CHUNK_SHIFT); return result; }
1
1,094
static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs, Error **errp) { Error *local_err = NULL; BlockMeasureInfo *info; uint64_t required = 0; /* bytes that contribute to required size */ uint64_t virtual_size; /* disk size as seen by guest */ uint64_t refcount_bits; uint64_t l2_tables; size_t cluster_size; int version; char *optstr; PreallocMode prealloc; bool has_backing_file; /* Parse image creation options */ cluster_size = qcow2_opt_get_cluster_size_del(opts, &local_err); if (local_err) { goto err; } version = qcow2_opt_get_version_del(opts, &local_err); if (local_err) { goto err; } refcount_bits = qcow2_opt_get_refcount_bits_del(opts, version, &local_err); if (local_err) { goto err; } optstr = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); prealloc = qapi_enum_parse(PreallocMode_lookup, optstr, PREALLOC_MODE__MAX, PREALLOC_MODE_OFF, &local_err); g_free(optstr); if (local_err) { goto err; } optstr = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); has_backing_file = !!optstr; g_free(optstr); virtual_size = align_offset(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), cluster_size); /* Check that virtual disk size is valid */ l2_tables = DIV_ROUND_UP(virtual_size / cluster_size, cluster_size / sizeof(uint64_t)); if (l2_tables * sizeof(uint64_t) > QCOW_MAX_L1_SIZE) { error_setg(&local_err, "The image size is too large " "(try using a larger cluster size)"); goto err; } /* Account for input image */ if (in_bs) { int64_t ssize = bdrv_getlength(in_bs); if (ssize < 0) { error_setg_errno(&local_err, -ssize, "Unable to get image virtual_size"); goto err; } virtual_size = align_offset(ssize, cluster_size); if (has_backing_file) { /* We don't how much of the backing chain is shared by the input * image and the new image file. In the worst case the new image's * backing file has nothing in common with the input image. Be * conservative and assume all clusters need to be written. */ required = virtual_size; } else { int cluster_sectors = cluster_size / BDRV_SECTOR_SIZE; int64_t sector_num; int pnum = 0; for (sector_num = 0; sector_num < ssize / BDRV_SECTOR_SIZE; sector_num += pnum) { int nb_sectors = MAX(ssize / BDRV_SECTOR_SIZE - sector_num, INT_MAX); BlockDriverState *file; int64_t ret; ret = bdrv_get_block_status_above(in_bs, NULL, sector_num, nb_sectors, &pnum, &file); if (ret < 0) { error_setg_errno(&local_err, -ret, "Unable to get block status"); goto err; } if (ret & BDRV_BLOCK_ZERO) { /* Skip zero regions (safe with no backing file) */ } else if ((ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) == (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) { /* Extend pnum to end of cluster for next iteration */ pnum = ROUND_UP(sector_num + pnum, cluster_sectors) - sector_num; /* Count clusters we've seen */ required += (sector_num % cluster_sectors + pnum) * BDRV_SECTOR_SIZE; } } } } /* Take into account preallocation. Nothing special is needed for * PREALLOC_MODE_METADATA since metadata is always counted. */ if (prealloc == PREALLOC_MODE_FULL || prealloc == PREALLOC_MODE_FALLOC) { required = virtual_size; } info = g_new(BlockMeasureInfo, 1); info->fully_allocated = qcow2_calc_prealloc_size(virtual_size, cluster_size, ctz32(refcount_bits)); /* Remove data clusters that are not required. This overestimates the * required size because metadata needed for the fully allocated file is * still counted. */ info->required = info->fully_allocated - virtual_size + required; return info; err: error_propagate(errp, local_err); return NULL; }
0
1,095
static target_ulong disas_insn(DisasContext *s, CPUState *cpu) { CPUX86State *env = cpu->env_ptr; int b, prefixes; int shift; TCGMemOp ot, aflag, dflag; int modrm, reg, rm, mod, op, opreg, val; target_ulong next_eip, tval; int rex_w, rex_r; target_ulong pc_start = s->base.pc_next; s->pc_start = s->pc = pc_start; prefixes = 0; s->override = -1; rex_w = -1; rex_r = 0; #ifdef TARGET_X86_64 s->rex_x = 0; s->rex_b = 0; x86_64_hregs = 0; #endif s->rip_offset = 0; /* for relative ip address */ s->vex_l = 0; s->vex_v = 0; if (sigsetjmp(s->jmpbuf, 0) != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); return s->pc; } next_byte: b = x86_ldub_code(env, s); /* Collect prefixes. */ switch (b) { case 0xf3: prefixes |= PREFIX_REPZ; goto next_byte; case 0xf2: prefixes |= PREFIX_REPNZ; goto next_byte; case 0xf0: prefixes |= PREFIX_LOCK; goto next_byte; case 0x2e: s->override = R_CS; goto next_byte; case 0x36: s->override = R_SS; goto next_byte; case 0x3e: s->override = R_DS; goto next_byte; case 0x26: s->override = R_ES; goto next_byte; case 0x64: s->override = R_FS; goto next_byte; case 0x65: s->override = R_GS; goto next_byte; case 0x66: prefixes |= PREFIX_DATA; goto next_byte; case 0x67: prefixes |= PREFIX_ADR; goto next_byte; #ifdef TARGET_X86_64 case 0x40 ... 0x4f: if (CODE64(s)) { /* REX prefix */ rex_w = (b >> 3) & 1; rex_r = (b & 0x4) << 1; s->rex_x = (b & 0x2) << 2; REX_B(s) = (b & 0x1) << 3; x86_64_hregs = 1; /* select uniform byte register addressing */ goto next_byte; } break; #endif case 0xc5: /* 2-byte VEX */ case 0xc4: /* 3-byte VEX */ /* VEX prefixes cannot be used except in 32-bit mode. Otherwise the instruction is LES or LDS. */ if (s->code32 && !s->vm86) { static const int pp_prefix[4] = { 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ }; int vex3, vex2 = x86_ldub_code(env, s); if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) { /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b, otherwise the instruction is LES or LDS. */ break; } s->pc++; /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */ if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ | PREFIX_LOCK | PREFIX_DATA)) { goto illegal_op; } #ifdef TARGET_X86_64 if (x86_64_hregs) { goto illegal_op; } #endif rex_r = (~vex2 >> 4) & 8; if (b == 0xc5) { vex3 = vex2; b = x86_ldub_code(env, s); } else { #ifdef TARGET_X86_64 s->rex_x = (~vex2 >> 3) & 8; s->rex_b = (~vex2 >> 2) & 8; #endif vex3 = x86_ldub_code(env, s); rex_w = (vex3 >> 7) & 1; switch (vex2 & 0x1f) { case 0x01: /* Implied 0f leading opcode bytes. */ b = x86_ldub_code(env, s) | 0x100; break; case 0x02: /* Implied 0f 38 leading opcode bytes. */ b = 0x138; break; case 0x03: /* Implied 0f 3a leading opcode bytes. */ b = 0x13a; break; default: /* Reserved for future use. */ goto unknown_op; } } s->vex_v = (~vex3 >> 3) & 0xf; s->vex_l = (vex3 >> 2) & 1; prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX; } break; } /* Post-process prefixes. */ if (CODE64(s)) { /* In 64-bit mode, the default data size is 32-bit. Select 64-bit data with rex_w, and 16-bit data with 0x66; rex_w takes precedence over 0x66 if both are present. */ dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32); /* In 64-bit mode, 0x67 selects 32-bit addressing. */ aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64); } else { /* In 16/32-bit mode, 0x66 selects the opposite data size. */ if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) { dflag = MO_32; } else { dflag = MO_16; } /* In 16/32-bit mode, 0x67 selects the opposite addressing. */ if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) { aflag = MO_32; } else { aflag = MO_16; } } s->prefix = prefixes; s->aflag = aflag; s->dflag = dflag; /* now check op code */ reswitch: switch(b) { case 0x0f: /**************************/ /* extended op code */ b = x86_ldub_code(env, s) | 0x100; goto reswitch; /**************************/ /* arith & logic */ case 0x00 ... 0x05: case 0x08 ... 0x0d: case 0x10 ... 0x15: case 0x18 ... 0x1d: case 0x20 ... 0x25: case 0x28 ... 0x2d: case 0x30 ... 0x35: case 0x38 ... 0x3d: { int op, f, val; op = (b >> 3) & 7; f = (b >> 1) & 3; ot = mo_b_d(b, dflag); switch(f) { case 0: /* OP Ev, Gv */ modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod != 3) { gen_lea_modrm(env, s, modrm); opreg = OR_TMP0; } else if (op == OP_XORL && rm == reg) { xor_zero: /* xor reg, reg optimisation */ set_cc_op(s, CC_OP_CLR); tcg_gen_movi_tl(cpu_T0, 0); gen_op_mov_reg_v(ot, reg, cpu_T0); break; } else { opreg = rm; } gen_op_mov_v_reg(ot, cpu_T1, reg); gen_op(s, op, ot, opreg); break; case 1: /* OP Gv, Ev */ modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; reg = ((modrm >> 3) & 7) | rex_r; rm = (modrm & 7) | REX_B(s); if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, ot, cpu_T1, cpu_A0); } else if (op == OP_XORL && rm == reg) { goto xor_zero; } else { gen_op_mov_v_reg(ot, cpu_T1, rm); } gen_op(s, op, ot, reg); break; case 2: /* OP A, Iv */ val = insn_get(env, s, ot); tcg_gen_movi_tl(cpu_T1, val); gen_op(s, op, ot, OR_EAX); break; } } break; case 0x82: if (CODE64(s)) goto illegal_op; case 0x80: /* GRP1 */ case 0x81: case 0x83: { int val; ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); op = (modrm >> 3) & 7; if (mod != 3) { if (b == 0x83) s->rip_offset = 1; else s->rip_offset = insn_const_size(ot); gen_lea_modrm(env, s, modrm); opreg = OR_TMP0; } else { opreg = rm; } switch(b) { default: case 0x80: case 0x81: case 0x82: val = insn_get(env, s, ot); break; case 0x83: val = (int8_t)insn_get(env, s, MO_8); break; } tcg_gen_movi_tl(cpu_T1, val); gen_op(s, op, ot, opreg); } break; /**************************/ /* inc, dec, and other misc arith */ case 0x40 ... 0x47: /* inc Gv */ ot = dflag; gen_inc(s, ot, OR_EAX + (b & 7), 1); break; case 0x48 ... 0x4f: /* dec Gv */ ot = dflag; gen_inc(s, ot, OR_EAX + (b & 7), -1); break; case 0xf6: /* GRP3 */ case 0xf7: ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); op = (modrm >> 3) & 7; if (mod != 3) { if (op == 0) { s->rip_offset = insn_const_size(ot); } gen_lea_modrm(env, s, modrm); /* For those below that handle locked memory, don't load here. */ if (!(s->prefix & PREFIX_LOCK) || op != 2) { gen_op_ld_v(s, ot, cpu_T0, cpu_A0); } } else { gen_op_mov_v_reg(ot, cpu_T0, rm); } switch(op) { case 0: /* test */ val = insn_get(env, s, ot); tcg_gen_movi_tl(cpu_T1, val); gen_op_testl_T0_T1_cc(); set_cc_op(s, CC_OP_LOGICB + ot); break; case 2: /* not */ if (s->prefix & PREFIX_LOCK) { if (mod == 3) { goto illegal_op; } tcg_gen_movi_tl(cpu_T0, ~0); tcg_gen_atomic_xor_fetch_tl(cpu_T0, cpu_A0, cpu_T0, s->mem_index, ot | MO_LE); } else { tcg_gen_not_tl(cpu_T0, cpu_T0); if (mod != 3) { gen_op_st_v(s, ot, cpu_T0, cpu_A0); } else { gen_op_mov_reg_v(ot, rm, cpu_T0); } } break; case 3: /* neg */ if (s->prefix & PREFIX_LOCK) { TCGLabel *label1; TCGv a0, t0, t1, t2; if (mod == 3) { goto illegal_op; } a0 = tcg_temp_local_new(); t0 = tcg_temp_local_new(); label1 = gen_new_label(); tcg_gen_mov_tl(a0, cpu_A0); tcg_gen_mov_tl(t0, cpu_T0); gen_set_label(label1); t1 = tcg_temp_new(); t2 = tcg_temp_new(); tcg_gen_mov_tl(t2, t0); tcg_gen_neg_tl(t1, t0); tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1, s->mem_index, ot | MO_LE); tcg_temp_free(t1); tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1); tcg_temp_free(t2); tcg_temp_free(a0); tcg_gen_mov_tl(cpu_T0, t0); tcg_temp_free(t0); } else { tcg_gen_neg_tl(cpu_T0, cpu_T0); if (mod != 3) { gen_op_st_v(s, ot, cpu_T0, cpu_A0); } else { gen_op_mov_reg_v(ot, rm, cpu_T0); } } gen_op_update_neg_cc(); set_cc_op(s, CC_OP_SUBB + ot); break; case 4: /* mul */ switch(ot) { case MO_8: gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX); tcg_gen_ext8u_tl(cpu_T0, cpu_T0); tcg_gen_ext8u_tl(cpu_T1, cpu_T1); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1); gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0); tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); tcg_gen_andi_tl(cpu_cc_src, cpu_T0, 0xff00); set_cc_op(s, CC_OP_MULB); break; case MO_16: gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX); tcg_gen_ext16u_tl(cpu_T0, cpu_T0); tcg_gen_ext16u_tl(cpu_T1, cpu_T1); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1); gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0); tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); tcg_gen_shri_tl(cpu_T0, cpu_T0, 16); gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0); tcg_gen_mov_tl(cpu_cc_src, cpu_T0); set_cc_op(s, CC_OP_MULW); break; default: case MO_32: tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]); tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32, cpu_tmp2_i32, cpu_tmp3_i32); tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32); tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]); set_cc_op(s, CC_OP_MULL); break; #ifdef TARGET_X86_64 case MO_64: tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_T0, cpu_regs[R_EAX]); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]); set_cc_op(s, CC_OP_MULQ); break; #endif } break; case 5: /* imul */ switch(ot) { case MO_8: gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX); tcg_gen_ext8s_tl(cpu_T0, cpu_T0); tcg_gen_ext8s_tl(cpu_T1, cpu_T1); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1); gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0); tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); tcg_gen_ext8s_tl(cpu_tmp0, cpu_T0); tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0); set_cc_op(s, CC_OP_MULB); break; case MO_16: gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX); tcg_gen_ext16s_tl(cpu_T0, cpu_T0); tcg_gen_ext16s_tl(cpu_T1, cpu_T1); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1); gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0); tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0); tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0); tcg_gen_shri_tl(cpu_T0, cpu_T0, 16); gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0); set_cc_op(s, CC_OP_MULW); break; default: case MO_32: tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]); tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32, cpu_tmp2_i32, cpu_tmp3_i32); tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32); tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32); tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32); set_cc_op(s, CC_OP_MULL); break; #ifdef TARGET_X86_64 case MO_64: tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_T0, cpu_regs[R_EAX]); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63); tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]); set_cc_op(s, CC_OP_MULQ); break; #endif } break; case 6: /* div */ switch(ot) { case MO_8: gen_helper_divb_AL(cpu_env, cpu_T0); break; case MO_16: gen_helper_divw_AX(cpu_env, cpu_T0); break; default: case MO_32: gen_helper_divl_EAX(cpu_env, cpu_T0); break; #ifdef TARGET_X86_64 case MO_64: gen_helper_divq_EAX(cpu_env, cpu_T0); break; #endif } break; case 7: /* idiv */ switch(ot) { case MO_8: gen_helper_idivb_AL(cpu_env, cpu_T0); break; case MO_16: gen_helper_idivw_AX(cpu_env, cpu_T0); break; default: case MO_32: gen_helper_idivl_EAX(cpu_env, cpu_T0); break; #ifdef TARGET_X86_64 case MO_64: gen_helper_idivq_EAX(cpu_env, cpu_T0); break; #endif } break; default: goto unknown_op; } break; case 0xfe: /* GRP4 */ case 0xff: /* GRP5 */ ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); op = (modrm >> 3) & 7; if (op >= 2 && b == 0xfe) { goto unknown_op; } if (CODE64(s)) { if (op == 2 || op == 4) { /* operand size for jumps is 64 bit */ ot = MO_64; } else if (op == 3 || op == 5) { ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16; } else if (op == 6) { /* default push size is 64 bit */ ot = mo_pushpop(s, dflag); } } if (mod != 3) { gen_lea_modrm(env, s, modrm); if (op >= 2 && op != 3 && op != 5) gen_op_ld_v(s, ot, cpu_T0, cpu_A0); } else { gen_op_mov_v_reg(ot, cpu_T0, rm); } switch(op) { case 0: /* inc Ev */ if (mod != 3) opreg = OR_TMP0; else opreg = rm; gen_inc(s, ot, opreg, 1); break; case 1: /* dec Ev */ if (mod != 3) opreg = OR_TMP0; else opreg = rm; gen_inc(s, ot, opreg, -1); break; case 2: /* call Ev */ /* XXX: optimize if memory (no 'and' is necessary) */ if (dflag == MO_16) { tcg_gen_ext16u_tl(cpu_T0, cpu_T0); } next_eip = s->pc - s->cs_base; tcg_gen_movi_tl(cpu_T1, next_eip); gen_push_v(s, cpu_T1); gen_op_jmp_v(cpu_T0); gen_bnd_jmp(s); gen_jr(s, cpu_T0); break; case 3: /* lcall Ev */ gen_op_ld_v(s, ot, cpu_T1, cpu_A0); gen_add_A0_im(s, 1 << ot); gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0); do_lcall: if (s->pe && !s->vm86) { tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T1, tcg_const_i32(dflag - 1), tcg_const_tl(s->pc - s->cs_base)); } else { tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T1, tcg_const_i32(dflag - 1), tcg_const_i32(s->pc - s->cs_base)); } tcg_gen_ld_tl(cpu_tmp4, cpu_env, offsetof(CPUX86State, eip)); gen_jr(s, cpu_tmp4); break; case 4: /* jmp Ev */ if (dflag == MO_16) { tcg_gen_ext16u_tl(cpu_T0, cpu_T0); } gen_op_jmp_v(cpu_T0); gen_bnd_jmp(s); gen_jr(s, cpu_T0); break; case 5: /* ljmp Ev */ gen_op_ld_v(s, ot, cpu_T1, cpu_A0); gen_add_A0_im(s, 1 << ot); gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0); do_ljmp: if (s->pe && !s->vm86) { tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T1, tcg_const_tl(s->pc - s->cs_base)); } else { gen_op_movl_seg_T0_vm(R_CS); gen_op_jmp_v(cpu_T1); } tcg_gen_ld_tl(cpu_tmp4, cpu_env, offsetof(CPUX86State, eip)); gen_jr(s, cpu_tmp4); break; case 6: /* push Ev */ gen_push_v(s, cpu_T0); break; default: goto unknown_op; } break; case 0x84: /* test Ev, Gv */ case 0x85: ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); gen_op_mov_v_reg(ot, cpu_T1, reg); gen_op_testl_T0_T1_cc(); set_cc_op(s, CC_OP_LOGICB + ot); break; case 0xa8: /* test eAX, Iv */ case 0xa9: ot = mo_b_d(b, dflag); val = insn_get(env, s, ot); gen_op_mov_v_reg(ot, cpu_T0, OR_EAX); tcg_gen_movi_tl(cpu_T1, val); gen_op_testl_T0_T1_cc(); set_cc_op(s, CC_OP_LOGICB + ot); break; case 0x98: /* CWDE/CBW */ switch (dflag) { #ifdef TARGET_X86_64 case MO_64: gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX); tcg_gen_ext32s_tl(cpu_T0, cpu_T0); gen_op_mov_reg_v(MO_64, R_EAX, cpu_T0); break; #endif case MO_32: gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX); tcg_gen_ext16s_tl(cpu_T0, cpu_T0); gen_op_mov_reg_v(MO_32, R_EAX, cpu_T0); break; case MO_16: gen_op_mov_v_reg(MO_8, cpu_T0, R_EAX); tcg_gen_ext8s_tl(cpu_T0, cpu_T0); gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0); break; default: tcg_abort(); } break; case 0x99: /* CDQ/CWD */ switch (dflag) { #ifdef TARGET_X86_64 case MO_64: gen_op_mov_v_reg(MO_64, cpu_T0, R_EAX); tcg_gen_sari_tl(cpu_T0, cpu_T0, 63); gen_op_mov_reg_v(MO_64, R_EDX, cpu_T0); break; #endif case MO_32: gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX); tcg_gen_ext32s_tl(cpu_T0, cpu_T0); tcg_gen_sari_tl(cpu_T0, cpu_T0, 31); gen_op_mov_reg_v(MO_32, R_EDX, cpu_T0); break; case MO_16: gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX); tcg_gen_ext16s_tl(cpu_T0, cpu_T0); tcg_gen_sari_tl(cpu_T0, cpu_T0, 15); gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0); break; default: tcg_abort(); } break; case 0x1af: /* imul Gv, Ev */ case 0x69: /* imul Gv, Ev, I */ case 0x6b: ot = dflag; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; if (b == 0x69) s->rip_offset = insn_const_size(ot); else if (b == 0x6b) s->rip_offset = 1; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); if (b == 0x69) { val = insn_get(env, s, ot); tcg_gen_movi_tl(cpu_T1, val); } else if (b == 0x6b) { val = (int8_t)insn_get(env, s, MO_8); tcg_gen_movi_tl(cpu_T1, val); } else { gen_op_mov_v_reg(ot, cpu_T1, reg); } switch (ot) { #ifdef TARGET_X86_64 case MO_64: tcg_gen_muls2_i64(cpu_regs[reg], cpu_T1, cpu_T0, cpu_T1); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]); tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63); tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T1); break; #endif case MO_32: tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1); tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32, cpu_tmp2_i32, cpu_tmp3_i32); tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32); tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]); tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32); break; default: tcg_gen_ext16s_tl(cpu_T0, cpu_T0); tcg_gen_ext16s_tl(cpu_T1, cpu_T1); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1); tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0); tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0); gen_op_mov_reg_v(ot, reg, cpu_T0); break; } set_cc_op(s, CC_OP_MULB + ot); break; case 0x1c0: case 0x1c1: /* xadd Ev, Gv */ ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; gen_op_mov_v_reg(ot, cpu_T0, reg); if (mod == 3) { rm = (modrm & 7) | REX_B(s); gen_op_mov_v_reg(ot, cpu_T1, rm); tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1); gen_op_mov_reg_v(ot, reg, cpu_T1); gen_op_mov_reg_v(ot, rm, cpu_T0); } else { gen_lea_modrm(env, s, modrm); if (s->prefix & PREFIX_LOCK) { tcg_gen_atomic_fetch_add_tl(cpu_T1, cpu_A0, cpu_T0, s->mem_index, ot | MO_LE); tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1); } else { gen_op_ld_v(s, ot, cpu_T1, cpu_A0); tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1); gen_op_st_v(s, ot, cpu_T0, cpu_A0); } gen_op_mov_reg_v(ot, reg, cpu_T1); } gen_op_update2_cc(); set_cc_op(s, CC_OP_ADDB + ot); break; case 0x1b0: case 0x1b1: /* cmpxchg Ev, Gv */ { TCGv oldv, newv, cmpv; ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; oldv = tcg_temp_new(); newv = tcg_temp_new(); cmpv = tcg_temp_new(); gen_op_mov_v_reg(ot, newv, reg); tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]); if (s->prefix & PREFIX_LOCK) { if (mod == 3) { goto illegal_op; } gen_lea_modrm(env, s, modrm); tcg_gen_atomic_cmpxchg_tl(oldv, cpu_A0, cmpv, newv, s->mem_index, ot | MO_LE); gen_op_mov_reg_v(ot, R_EAX, oldv); } else { if (mod == 3) { rm = (modrm & 7) | REX_B(s); gen_op_mov_v_reg(ot, oldv, rm); } else { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, ot, oldv, cpu_A0); rm = 0; /* avoid warning */ } gen_extu(ot, oldv); gen_extu(ot, cmpv); /* store value = (old == cmp ? new : old); */ tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv); if (mod == 3) { gen_op_mov_reg_v(ot, R_EAX, oldv); gen_op_mov_reg_v(ot, rm, newv); } else { /* Perform an unconditional store cycle like physical cpu; must be before changing accumulator to ensure idempotency if the store faults and the instruction is restarted */ gen_op_st_v(s, ot, newv, cpu_A0); gen_op_mov_reg_v(ot, R_EAX, oldv); } } tcg_gen_mov_tl(cpu_cc_src, oldv); tcg_gen_mov_tl(cpu_cc_srcT, cmpv); tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv); set_cc_op(s, CC_OP_SUBB + ot); tcg_temp_free(oldv); tcg_temp_free(newv); tcg_temp_free(cmpv); } break; case 0x1c7: /* cmpxchg8b */ modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; if ((mod == 3) || ((modrm & 0x38) != 0x8)) goto illegal_op; #ifdef TARGET_X86_64 if (dflag == MO_64) { if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) goto illegal_op; gen_lea_modrm(env, s, modrm); if ((s->prefix & PREFIX_LOCK) && parallel_cpus) { gen_helper_cmpxchg16b(cpu_env, cpu_A0); } else { gen_helper_cmpxchg16b_unlocked(cpu_env, cpu_A0); } } else #endif { if (!(s->cpuid_features & CPUID_CX8)) goto illegal_op; gen_lea_modrm(env, s, modrm); if ((s->prefix & PREFIX_LOCK) && parallel_cpus) { gen_helper_cmpxchg8b(cpu_env, cpu_A0); } else { gen_helper_cmpxchg8b_unlocked(cpu_env, cpu_A0); } } set_cc_op(s, CC_OP_EFLAGS); break; /**************************/ /* push/pop */ case 0x50 ... 0x57: /* push */ gen_op_mov_v_reg(MO_32, cpu_T0, (b & 7) | REX_B(s)); gen_push_v(s, cpu_T0); break; case 0x58 ... 0x5f: /* pop */ ot = gen_pop_T0(s); /* NOTE: order is important for pop %sp */ gen_pop_update(s, ot); gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T0); break; case 0x60: /* pusha */ if (CODE64(s)) goto illegal_op; gen_pusha(s); break; case 0x61: /* popa */ if (CODE64(s)) goto illegal_op; gen_popa(s); break; case 0x68: /* push Iv */ case 0x6a: ot = mo_pushpop(s, dflag); if (b == 0x68) val = insn_get(env, s, ot); else val = (int8_t)insn_get(env, s, MO_8); tcg_gen_movi_tl(cpu_T0, val); gen_push_v(s, cpu_T0); break; case 0x8f: /* pop Ev */ modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; ot = gen_pop_T0(s); if (mod == 3) { /* NOTE: order is important for pop %sp */ gen_pop_update(s, ot); rm = (modrm & 7) | REX_B(s); gen_op_mov_reg_v(ot, rm, cpu_T0); } else { /* NOTE: order is important too for MMU exceptions */ s->popl_esp_hack = 1 << ot; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); s->popl_esp_hack = 0; gen_pop_update(s, ot); } break; case 0xc8: /* enter */ { int level; val = x86_lduw_code(env, s); level = x86_ldub_code(env, s); gen_enter(s, val, level); } break; case 0xc9: /* leave */ gen_leave(s); break; case 0x06: /* push es */ case 0x0e: /* push cs */ case 0x16: /* push ss */ case 0x1e: /* push ds */ if (CODE64(s)) goto illegal_op; gen_op_movl_T0_seg(b >> 3); gen_push_v(s, cpu_T0); break; case 0x1a0: /* push fs */ case 0x1a8: /* push gs */ gen_op_movl_T0_seg((b >> 3) & 7); gen_push_v(s, cpu_T0); break; case 0x07: /* pop es */ case 0x17: /* pop ss */ case 0x1f: /* pop ds */ if (CODE64(s)) goto illegal_op; reg = b >> 3; ot = gen_pop_T0(s); gen_movl_seg_T0(s, reg); gen_pop_update(s, ot); /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */ if (s->base.is_jmp) { gen_jmp_im(s->pc - s->cs_base); if (reg == R_SS) { s->tf = 0; gen_eob_inhibit_irq(s, true); } else { gen_eob(s); } } break; case 0x1a1: /* pop fs */ case 0x1a9: /* pop gs */ ot = gen_pop_T0(s); gen_movl_seg_T0(s, (b >> 3) & 7); gen_pop_update(s, ot); if (s->base.is_jmp) { gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } break; /**************************/ /* mov */ case 0x88: case 0x89: /* mov Gv, Ev */ ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; /* generate a generic store */ gen_ldst_modrm(env, s, modrm, ot, reg, 1); break; case 0xc6: case 0xc7: /* mov Ev, Iv */ ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; if (mod != 3) { s->rip_offset = insn_const_size(ot); gen_lea_modrm(env, s, modrm); } val = insn_get(env, s, ot); tcg_gen_movi_tl(cpu_T0, val); if (mod != 3) { gen_op_st_v(s, ot, cpu_T0, cpu_A0); } else { gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T0); } break; case 0x8a: case 0x8b: /* mov Ev, Gv */ ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); gen_op_mov_reg_v(ot, reg, cpu_T0); break; case 0x8e: /* mov seg, Gv */ modrm = x86_ldub_code(env, s); reg = (modrm >> 3) & 7; if (reg >= 6 || reg == R_CS) goto illegal_op; gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); gen_movl_seg_T0(s, reg); /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */ if (s->base.is_jmp) { gen_jmp_im(s->pc - s->cs_base); if (reg == R_SS) { s->tf = 0; gen_eob_inhibit_irq(s, true); } else { gen_eob(s); } } break; case 0x8c: /* mov Gv, seg */ modrm = x86_ldub_code(env, s); reg = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; if (reg >= 6) goto illegal_op; gen_op_movl_T0_seg(reg); ot = mod == 3 ? dflag : MO_16; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); break; case 0x1b6: /* movzbS Gv, Eb */ case 0x1b7: /* movzwS Gv, Eb */ case 0x1be: /* movsbS Gv, Eb */ case 0x1bf: /* movswS Gv, Eb */ { TCGMemOp d_ot; TCGMemOp s_ot; /* d_ot is the size of destination */ d_ot = dflag; /* ot is the size of source */ ot = (b & 1) + MO_8; /* s_ot is the sign+size of source */ s_ot = b & 8 ? MO_SIGN | ot : ot; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod == 3) { if (s_ot == MO_SB && byte_reg_is_xH(rm)) { tcg_gen_sextract_tl(cpu_T0, cpu_regs[rm - 4], 8, 8); } else { gen_op_mov_v_reg(ot, cpu_T0, rm); switch (s_ot) { case MO_UB: tcg_gen_ext8u_tl(cpu_T0, cpu_T0); break; case MO_SB: tcg_gen_ext8s_tl(cpu_T0, cpu_T0); break; case MO_UW: tcg_gen_ext16u_tl(cpu_T0, cpu_T0); break; default: case MO_SW: tcg_gen_ext16s_tl(cpu_T0, cpu_T0); break; } } gen_op_mov_reg_v(d_ot, reg, cpu_T0); } else { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, s_ot, cpu_T0, cpu_A0); gen_op_mov_reg_v(d_ot, reg, cpu_T0); } } break; case 0x8d: /* lea */ modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; reg = ((modrm >> 3) & 7) | rex_r; { AddressParts a = gen_lea_modrm_0(env, s, modrm); TCGv ea = gen_lea_modrm_1(a); gen_lea_v_seg(s, s->aflag, ea, -1, -1); gen_op_mov_reg_v(dflag, reg, cpu_A0); } break; case 0xa0: /* mov EAX, Ov */ case 0xa1: case 0xa2: /* mov Ov, EAX */ case 0xa3: { target_ulong offset_addr; ot = mo_b_d(b, dflag); switch (s->aflag) { #ifdef TARGET_X86_64 case MO_64: offset_addr = x86_ldq_code(env, s); break; #endif default: offset_addr = insn_get(env, s, s->aflag); break; } tcg_gen_movi_tl(cpu_A0, offset_addr); gen_add_A0_ds_seg(s); if ((b & 2) == 0) { gen_op_ld_v(s, ot, cpu_T0, cpu_A0); gen_op_mov_reg_v(ot, R_EAX, cpu_T0); } else { gen_op_mov_v_reg(ot, cpu_T0, R_EAX); gen_op_st_v(s, ot, cpu_T0, cpu_A0); } } break; case 0xd7: /* xlat */ tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EBX]); tcg_gen_ext8u_tl(cpu_T0, cpu_regs[R_EAX]); tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T0); gen_extu(s->aflag, cpu_A0); gen_add_A0_ds_seg(s); gen_op_ld_v(s, MO_8, cpu_T0, cpu_A0); gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0); break; case 0xb0 ... 0xb7: /* mov R, Ib */ val = insn_get(env, s, MO_8); tcg_gen_movi_tl(cpu_T0, val); gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T0); break; case 0xb8 ... 0xbf: /* mov R, Iv */ #ifdef TARGET_X86_64 if (dflag == MO_64) { uint64_t tmp; /* 64 bit case */ tmp = x86_ldq_code(env, s); reg = (b & 7) | REX_B(s); tcg_gen_movi_tl(cpu_T0, tmp); gen_op_mov_reg_v(MO_64, reg, cpu_T0); } else #endif { ot = dflag; val = insn_get(env, s, ot); reg = (b & 7) | REX_B(s); tcg_gen_movi_tl(cpu_T0, val); gen_op_mov_reg_v(ot, reg, cpu_T0); } break; case 0x91 ... 0x97: /* xchg R, EAX */ do_xchg_reg_eax: ot = dflag; reg = (b & 7) | REX_B(s); rm = R_EAX; goto do_xchg_reg; case 0x86: case 0x87: /* xchg Ev, Gv */ ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; if (mod == 3) { rm = (modrm & 7) | REX_B(s); do_xchg_reg: gen_op_mov_v_reg(ot, cpu_T0, reg); gen_op_mov_v_reg(ot, cpu_T1, rm); gen_op_mov_reg_v(ot, rm, cpu_T0); gen_op_mov_reg_v(ot, reg, cpu_T1); } else { gen_lea_modrm(env, s, modrm); gen_op_mov_v_reg(ot, cpu_T0, reg); /* for xchg, lock is implicit */ tcg_gen_atomic_xchg_tl(cpu_T1, cpu_A0, cpu_T0, s->mem_index, ot | MO_LE); gen_op_mov_reg_v(ot, reg, cpu_T1); } break; case 0xc4: /* les Gv */ /* In CODE64 this is VEX3; see above. */ op = R_ES; goto do_lxx; case 0xc5: /* lds Gv */ /* In CODE64 this is VEX2; see above. */ op = R_DS; goto do_lxx; case 0x1b2: /* lss Gv */ op = R_SS; goto do_lxx; case 0x1b4: /* lfs Gv */ op = R_FS; goto do_lxx; case 0x1b5: /* lgs Gv */ op = R_GS; do_lxx: ot = dflag != MO_16 ? MO_32 : MO_16; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, ot, cpu_T1, cpu_A0); gen_add_A0_im(s, 1 << ot); /* load the segment first to handle exceptions properly */ gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0); gen_movl_seg_T0(s, op); /* then put the data */ gen_op_mov_reg_v(ot, reg, cpu_T1); if (s->base.is_jmp) { gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } break; /************************/ /* shifts */ case 0xc0: case 0xc1: /* shift Ev,Ib */ shift = 2; grp2: { ot = mo_b_d(b, dflag); modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; op = (modrm >> 3) & 7; if (mod != 3) { if (shift == 2) { s->rip_offset = 1; } gen_lea_modrm(env, s, modrm); opreg = OR_TMP0; } else { opreg = (modrm & 7) | REX_B(s); } /* simpler op */ if (shift == 0) { gen_shift(s, op, ot, opreg, OR_ECX); } else { if (shift == 2) { shift = x86_ldub_code(env, s); } gen_shifti(s, op, ot, opreg, shift); } } break; case 0xd0: case 0xd1: /* shift Ev,1 */ shift = 1; goto grp2; case 0xd2: case 0xd3: /* shift Ev,cl */ shift = 0; goto grp2; case 0x1a4: /* shld imm */ op = 0; shift = 1; goto do_shiftd; case 0x1a5: /* shld cl */ op = 0; shift = 0; goto do_shiftd; case 0x1ac: /* shrd imm */ op = 1; shift = 1; goto do_shiftd; case 0x1ad: /* shrd cl */ op = 1; shift = 0; do_shiftd: ot = dflag; modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); reg = ((modrm >> 3) & 7) | rex_r; if (mod != 3) { gen_lea_modrm(env, s, modrm); opreg = OR_TMP0; } else { opreg = rm; } gen_op_mov_v_reg(ot, cpu_T1, reg); if (shift) { TCGv imm = tcg_const_tl(x86_ldub_code(env, s)); gen_shiftd_rm_T1(s, ot, opreg, op, imm); tcg_temp_free(imm); } else { gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]); } break; /************************/ /* floats */ case 0xd8 ... 0xdf: if (s->flags & (HF_EM_MASK | HF_TS_MASK)) { /* if CR0.EM or CR0.TS are set, generate an FPU exception */ /* XXX: what to do if illegal op ? */ gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; } modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; rm = modrm & 7; op = ((b & 7) << 3) | ((modrm >> 3) & 7); if (mod != 3) { /* memory op */ gen_lea_modrm(env, s, modrm); switch(op) { case 0x00 ... 0x07: /* fxxxs */ case 0x10 ... 0x17: /* fixxxl */ case 0x20 ... 0x27: /* fxxxl */ case 0x30 ... 0x37: /* fixxx */ { int op1; op1 = op & 7; switch(op >> 4) { case 0: tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32); break; case 1: tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32); break; case 2: tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64); break; case 3: default: tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LESW); gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32); break; } gen_helper_fp_arith_ST0_FT0(op1); if (op1 == 3) { /* fcomp needs pop */ gen_helper_fpop(cpu_env); } } break; case 0x08: /* flds */ case 0x0a: /* fsts */ case 0x0b: /* fstps */ case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */ case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */ case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */ switch(op & 7) { case 0: switch(op >> 4) { case 0: tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32); break; case 1: tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32); break; case 2: tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64); break; case 3: default: tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LESW); gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32); break; } break; case 1: /* XXX: the corresponding CPUID bit must be tested ! */ switch(op >> 4) { case 1: gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env); tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); break; case 2: gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env); tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); break; case 3: default: gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env); tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUW); break; } gen_helper_fpop(cpu_env); break; default: switch(op >> 4) { case 0: gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env); tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); break; case 1: gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env); tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); break; case 2: gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env); tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); break; case 3: default: gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env); tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUW); break; } if ((op & 7) == 3) gen_helper_fpop(cpu_env); break; } break; case 0x0c: /* fldenv mem */ gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1)); break; case 0x0d: /* fldcw mem */ tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUW); gen_helper_fldcw(cpu_env, cpu_tmp2_i32); break; case 0x0e: /* fnstenv mem */ gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1)); break; case 0x0f: /* fnstcw mem */ gen_helper_fnstcw(cpu_tmp2_i32, cpu_env); tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUW); break; case 0x1d: /* fldt mem */ gen_helper_fldt_ST0(cpu_env, cpu_A0); break; case 0x1f: /* fstpt mem */ gen_helper_fstt_ST0(cpu_env, cpu_A0); gen_helper_fpop(cpu_env); break; case 0x2c: /* frstor mem */ gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(dflag - 1)); break; case 0x2e: /* fnsave mem */ gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(dflag - 1)); break; case 0x2f: /* fnstsw mem */ gen_helper_fnstsw(cpu_tmp2_i32, cpu_env); tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUW); break; case 0x3c: /* fbld */ gen_helper_fbld_ST0(cpu_env, cpu_A0); break; case 0x3e: /* fbstp */ gen_helper_fbst_ST0(cpu_env, cpu_A0); gen_helper_fpop(cpu_env); break; case 0x3d: /* fildll */ tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64); break; case 0x3f: /* fistpll */ gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env); tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); gen_helper_fpop(cpu_env); break; default: goto unknown_op; } } else { /* register float ops */ opreg = rm; switch(op) { case 0x08: /* fld sti */ gen_helper_fpush(cpu_env); gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32((opreg + 1) & 7)); break; case 0x09: /* fxchg sti */ case 0x29: /* fxchg4 sti, undocumented op */ case 0x39: /* fxchg7 sti, undocumented op */ gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg)); break; case 0x0a: /* grp d9/2 */ switch(rm) { case 0: /* fnop */ /* check exceptions (FreeBSD FPU probe) */ gen_helper_fwait(cpu_env); break; default: goto unknown_op; } break; case 0x0c: /* grp d9/4 */ switch(rm) { case 0: /* fchs */ gen_helper_fchs_ST0(cpu_env); break; case 1: /* fabs */ gen_helper_fabs_ST0(cpu_env); break; case 4: /* ftst */ gen_helper_fldz_FT0(cpu_env); gen_helper_fcom_ST0_FT0(cpu_env); break; case 5: /* fxam */ gen_helper_fxam_ST0(cpu_env); break; default: goto unknown_op; } break; case 0x0d: /* grp d9/5 */ { switch(rm) { case 0: gen_helper_fpush(cpu_env); gen_helper_fld1_ST0(cpu_env); break; case 1: gen_helper_fpush(cpu_env); gen_helper_fldl2t_ST0(cpu_env); break; case 2: gen_helper_fpush(cpu_env); gen_helper_fldl2e_ST0(cpu_env); break; case 3: gen_helper_fpush(cpu_env); gen_helper_fldpi_ST0(cpu_env); break; case 4: gen_helper_fpush(cpu_env); gen_helper_fldlg2_ST0(cpu_env); break; case 5: gen_helper_fpush(cpu_env); gen_helper_fldln2_ST0(cpu_env); break; case 6: gen_helper_fpush(cpu_env); gen_helper_fldz_ST0(cpu_env); break; default: goto unknown_op; } } break; case 0x0e: /* grp d9/6 */ switch(rm) { case 0: /* f2xm1 */ gen_helper_f2xm1(cpu_env); break; case 1: /* fyl2x */ gen_helper_fyl2x(cpu_env); break; case 2: /* fptan */ gen_helper_fptan(cpu_env); break; case 3: /* fpatan */ gen_helper_fpatan(cpu_env); break; case 4: /* fxtract */ gen_helper_fxtract(cpu_env); break; case 5: /* fprem1 */ gen_helper_fprem1(cpu_env); break; case 6: /* fdecstp */ gen_helper_fdecstp(cpu_env); break; default: case 7: /* fincstp */ gen_helper_fincstp(cpu_env); break; } break; case 0x0f: /* grp d9/7 */ switch(rm) { case 0: /* fprem */ gen_helper_fprem(cpu_env); break; case 1: /* fyl2xp1 */ gen_helper_fyl2xp1(cpu_env); break; case 2: /* fsqrt */ gen_helper_fsqrt(cpu_env); break; case 3: /* fsincos */ gen_helper_fsincos(cpu_env); break; case 5: /* fscale */ gen_helper_fscale(cpu_env); break; case 4: /* frndint */ gen_helper_frndint(cpu_env); break; case 6: /* fsin */ gen_helper_fsin(cpu_env); break; default: case 7: /* fcos */ gen_helper_fcos(cpu_env); break; } break; case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */ case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */ case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */ { int op1; op1 = op & 7; if (op >= 0x20) { gen_helper_fp_arith_STN_ST0(op1, opreg); if (op >= 0x30) gen_helper_fpop(cpu_env); } else { gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fp_arith_ST0_FT0(op1); } } break; case 0x02: /* fcom */ case 0x22: /* fcom2, undocumented op */ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fcom_ST0_FT0(cpu_env); break; case 0x03: /* fcomp */ case 0x23: /* fcomp3, undocumented op */ case 0x32: /* fcomp5, undocumented op */ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fcom_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); break; case 0x15: /* da/5 */ switch(rm) { case 1: /* fucompp */ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1)); gen_helper_fucom_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); gen_helper_fpop(cpu_env); break; default: goto unknown_op; } break; case 0x1c: switch(rm) { case 0: /* feni (287 only, just do nop here) */ break; case 1: /* fdisi (287 only, just do nop here) */ break; case 2: /* fclex */ gen_helper_fclex(cpu_env); break; case 3: /* fninit */ gen_helper_fninit(cpu_env); break; case 4: /* fsetpm (287 only, just do nop here) */ break; default: goto unknown_op; } break; case 0x1d: /* fucomi */ if (!(s->cpuid_features & CPUID_CMOV)) { goto illegal_op; } gen_update_cc_op(s); gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fucomi_ST0_FT0(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x1e: /* fcomi */ if (!(s->cpuid_features & CPUID_CMOV)) { goto illegal_op; } gen_update_cc_op(s); gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fcomi_ST0_FT0(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x28: /* ffree sti */ gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg)); break; case 0x2a: /* fst sti */ gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg)); break; case 0x2b: /* fstp sti */ case 0x0b: /* fstp1 sti, undocumented op */ case 0x3a: /* fstp8 sti, undocumented op */ case 0x3b: /* fstp9 sti, undocumented op */ gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg)); gen_helper_fpop(cpu_env); break; case 0x2c: /* fucom st(i) */ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fucom_ST0_FT0(cpu_env); break; case 0x2d: /* fucomp st(i) */ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fucom_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); break; case 0x33: /* de/3 */ switch(rm) { case 1: /* fcompp */ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1)); gen_helper_fcom_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); gen_helper_fpop(cpu_env); break; default: goto unknown_op; } break; case 0x38: /* ffreep sti, undocumented op */ gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fpop(cpu_env); break; case 0x3c: /* df/4 */ switch(rm) { case 0: gen_helper_fnstsw(cpu_tmp2_i32, cpu_env); tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32); gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0); break; default: goto unknown_op; } break; case 0x3d: /* fucomip */ if (!(s->cpuid_features & CPUID_CMOV)) { goto illegal_op; } gen_update_cc_op(s); gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fucomi_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x3e: /* fcomip */ if (!(s->cpuid_features & CPUID_CMOV)) { goto illegal_op; } gen_update_cc_op(s); gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fcomi_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x10 ... 0x13: /* fcmovxx */ case 0x18 ... 0x1b: { int op1; TCGLabel *l1; static const uint8_t fcmov_cc[8] = { (JCC_B << 1), (JCC_Z << 1), (JCC_BE << 1), (JCC_P << 1), }; if (!(s->cpuid_features & CPUID_CMOV)) { goto illegal_op; } op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1); l1 = gen_new_label(); gen_jcc1_noeob(s, op1, l1); gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg)); gen_set_label(l1); } break; default: goto unknown_op; } } break; /************************/ /* string ops */ case 0xa4: /* movsS */ case 0xa5: ot = mo_b_d(b, dflag); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_movs(s, ot); } break; case 0xaa: /* stosS */ case 0xab: ot = mo_b_d(b, dflag); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_stos(s, ot); } break; case 0xac: /* lodsS */ case 0xad: ot = mo_b_d(b, dflag); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_lods(s, ot); } break; case 0xae: /* scasS */ case 0xaf: ot = mo_b_d(b, dflag); if (prefixes & PREFIX_REPNZ) { gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1); } else if (prefixes & PREFIX_REPZ) { gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0); } else { gen_scas(s, ot); } break; case 0xa6: /* cmpsS */ case 0xa7: ot = mo_b_d(b, dflag); if (prefixes & PREFIX_REPNZ) { gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1); } else if (prefixes & PREFIX_REPZ) { gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0); } else { gen_cmps(s, ot); } break; case 0x6c: /* insS */ case 0x6d: ot = mo_b_d32(b, dflag); tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]); gen_check_io(s, ot, pc_start - s->cs_base, SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_ins(s, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_jmp(s, s->pc - s->cs_base); } } break; case 0x6e: /* outsS */ case 0x6f: ot = mo_b_d32(b, dflag); tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes) | 4); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_outs(s, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_jmp(s, s->pc - s->cs_base); } } break; /************************/ /* port I/O */ case 0xe4: case 0xe5: ot = mo_b_d32(b, dflag); val = x86_ldub_code(env, s); tcg_gen_movi_tl(cpu_T0, val); gen_check_io(s, ot, pc_start - s->cs_base, SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } tcg_gen_movi_i32(cpu_tmp2_i32, val); gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32); gen_op_mov_reg_v(ot, R_EAX, cpu_T1); gen_bpt_io(s, cpu_tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; case 0xe6: case 0xe7: ot = mo_b_d32(b, dflag); val = x86_ldub_code(env, s); tcg_gen_movi_tl(cpu_T0, val); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes)); gen_op_mov_v_reg(ot, cpu_T1, R_EAX); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } tcg_gen_movi_i32(cpu_tmp2_i32, val); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1); gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); gen_bpt_io(s, cpu_tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; case 0xec: case 0xed: ot = mo_b_d32(b, dflag); tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]); gen_check_io(s, ot, pc_start - s->cs_base, SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32); gen_op_mov_reg_v(ot, R_EAX, cpu_T1); gen_bpt_io(s, cpu_tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; case 0xee: case 0xef: ot = mo_b_d32(b, dflag); tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes)); gen_op_mov_v_reg(ot, cpu_T1, R_EAX); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1); gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); gen_bpt_io(s, cpu_tmp2_i32, ot); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; /************************/ /* control */ case 0xc2: /* ret im */ val = x86_ldsw_code(env, s); ot = gen_pop_T0(s); gen_stack_update(s, val + (1 << ot)); /* Note that gen_pop_T0 uses a zero-extending load. */ gen_op_jmp_v(cpu_T0); gen_bnd_jmp(s); gen_jr(s, cpu_T0); break; case 0xc3: /* ret */ ot = gen_pop_T0(s); gen_pop_update(s, ot); /* Note that gen_pop_T0 uses a zero-extending load. */ gen_op_jmp_v(cpu_T0); gen_bnd_jmp(s); gen_jr(s, cpu_T0); break; case 0xca: /* lret im */ val = x86_ldsw_code(env, s); do_lret: if (s->pe && !s->vm86) { gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1), tcg_const_i32(val)); } else { gen_stack_A0(s); /* pop offset */ gen_op_ld_v(s, dflag, cpu_T0, cpu_A0); /* NOTE: keeping EIP updated is not a problem in case of exception */ gen_op_jmp_v(cpu_T0); /* pop selector */ gen_add_A0_im(s, 1 << dflag); gen_op_ld_v(s, dflag, cpu_T0, cpu_A0); gen_op_movl_seg_T0_vm(R_CS); /* add stack offset */ gen_stack_update(s, val + (2 << dflag)); } gen_eob(s); break; case 0xcb: /* lret */ val = 0; goto do_lret; case 0xcf: /* iret */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET); if (!s->pe) { /* real mode */ gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1)); set_cc_op(s, CC_OP_EFLAGS); } else if (s->vm86) { if (s->iopl != 3) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1)); set_cc_op(s, CC_OP_EFLAGS); } } else { gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1), tcg_const_i32(s->pc - s->cs_base)); set_cc_op(s, CC_OP_EFLAGS); } gen_eob(s); break; case 0xe8: /* call im */ { if (dflag != MO_16) { tval = (int32_t)insn_get(env, s, MO_32); } else { tval = (int16_t)insn_get(env, s, MO_16); } next_eip = s->pc - s->cs_base; tval += next_eip; if (dflag == MO_16) { tval &= 0xffff; } else if (!CODE64(s)) { tval &= 0xffffffff; } tcg_gen_movi_tl(cpu_T0, next_eip); gen_push_v(s, cpu_T0); gen_bnd_jmp(s); gen_jmp(s, tval); } break; case 0x9a: /* lcall im */ { unsigned int selector, offset; if (CODE64(s)) goto illegal_op; ot = dflag; offset = insn_get(env, s, ot); selector = insn_get(env, s, MO_16); tcg_gen_movi_tl(cpu_T0, selector); tcg_gen_movi_tl(cpu_T1, offset); } goto do_lcall; case 0xe9: /* jmp im */ if (dflag != MO_16) { tval = (int32_t)insn_get(env, s, MO_32); } else { tval = (int16_t)insn_get(env, s, MO_16); } tval += s->pc - s->cs_base; if (dflag == MO_16) { tval &= 0xffff; } else if (!CODE64(s)) { tval &= 0xffffffff; } gen_bnd_jmp(s); gen_jmp(s, tval); break; case 0xea: /* ljmp im */ { unsigned int selector, offset; if (CODE64(s)) goto illegal_op; ot = dflag; offset = insn_get(env, s, ot); selector = insn_get(env, s, MO_16); tcg_gen_movi_tl(cpu_T0, selector); tcg_gen_movi_tl(cpu_T1, offset); } goto do_ljmp; case 0xeb: /* jmp Jb */ tval = (int8_t)insn_get(env, s, MO_8); tval += s->pc - s->cs_base; if (dflag == MO_16) { tval &= 0xffff; } gen_jmp(s, tval); break; case 0x70 ... 0x7f: /* jcc Jb */ tval = (int8_t)insn_get(env, s, MO_8); goto do_jcc; case 0x180 ... 0x18f: /* jcc Jv */ if (dflag != MO_16) { tval = (int32_t)insn_get(env, s, MO_32); } else { tval = (int16_t)insn_get(env, s, MO_16); } do_jcc: next_eip = s->pc - s->cs_base; tval += next_eip; if (dflag == MO_16) { tval &= 0xffff; } gen_bnd_jmp(s); gen_jcc(s, b, tval, next_eip); break; case 0x190 ... 0x19f: /* setcc Gv */ modrm = x86_ldub_code(env, s); gen_setcc1(s, b, cpu_T0); gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1); break; case 0x140 ... 0x14f: /* cmov Gv, Ev */ if (!(s->cpuid_features & CPUID_CMOV)) { goto illegal_op; } ot = dflag; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; gen_cmovcc1(env, s, ot, b, modrm, reg); break; /************************/ /* flags */ case 0x9c: /* pushf */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF); if (s->vm86 && s->iopl != 3) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_update_cc_op(s); gen_helper_read_eflags(cpu_T0, cpu_env); gen_push_v(s, cpu_T0); } break; case 0x9d: /* popf */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF); if (s->vm86 && s->iopl != 3) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { ot = gen_pop_T0(s); if (s->cpl == 0) { if (dflag != MO_16) { gen_helper_write_eflags(cpu_env, cpu_T0, tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK))); } else { gen_helper_write_eflags(cpu_env, cpu_T0, tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK) & 0xffff)); } } else { if (s->cpl <= s->iopl) { if (dflag != MO_16) { gen_helper_write_eflags(cpu_env, cpu_T0, tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK))); } else { gen_helper_write_eflags(cpu_env, cpu_T0, tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK) & 0xffff)); } } else { if (dflag != MO_16) { gen_helper_write_eflags(cpu_env, cpu_T0, tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK))); } else { gen_helper_write_eflags(cpu_env, cpu_T0, tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff)); } } } gen_pop_update(s, ot); set_cc_op(s, CC_OP_EFLAGS); /* abort translation because TF/AC flag may change */ gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } break; case 0x9e: /* sahf */ if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) goto illegal_op; gen_op_mov_v_reg(MO_8, cpu_T0, R_AH); gen_compute_eflags(s); tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O); tcg_gen_andi_tl(cpu_T0, cpu_T0, CC_S | CC_Z | CC_A | CC_P | CC_C); tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T0); break; case 0x9f: /* lahf */ if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) goto illegal_op; gen_compute_eflags(s); /* Note: gen_compute_eflags() only gives the condition codes */ tcg_gen_ori_tl(cpu_T0, cpu_cc_src, 0x02); gen_op_mov_reg_v(MO_8, R_AH, cpu_T0); break; case 0xf5: /* cmc */ gen_compute_eflags(s); tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C); break; case 0xf8: /* clc */ gen_compute_eflags(s); tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C); break; case 0xf9: /* stc */ gen_compute_eflags(s); tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C); break; case 0xfc: /* cld */ tcg_gen_movi_i32(cpu_tmp2_i32, 1); tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df)); break; case 0xfd: /* std */ tcg_gen_movi_i32(cpu_tmp2_i32, -1); tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df)); break; /************************/ /* bit operations */ case 0x1ba: /* bt/bts/btr/btc Gv, im */ ot = dflag; modrm = x86_ldub_code(env, s); op = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod != 3) { s->rip_offset = 1; gen_lea_modrm(env, s, modrm); if (!(s->prefix & PREFIX_LOCK)) { gen_op_ld_v(s, ot, cpu_T0, cpu_A0); } } else { gen_op_mov_v_reg(ot, cpu_T0, rm); } /* load shift */ val = x86_ldub_code(env, s); tcg_gen_movi_tl(cpu_T1, val); if (op < 4) goto unknown_op; op -= 4; goto bt_op; case 0x1a3: /* bt Gv, Ev */ op = 0; goto do_btx; case 0x1ab: /* bts */ op = 1; goto do_btx; case 0x1b3: /* btr */ op = 2; goto do_btx; case 0x1bb: /* btc */ op = 3; do_btx: ot = dflag; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); gen_op_mov_v_reg(MO_32, cpu_T1, reg); if (mod != 3) { AddressParts a = gen_lea_modrm_0(env, s, modrm); /* specific case: we need to add a displacement */ gen_exts(ot, cpu_T1); tcg_gen_sari_tl(cpu_tmp0, cpu_T1, 3 + ot); tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot); tcg_gen_add_tl(cpu_A0, gen_lea_modrm_1(a), cpu_tmp0); gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override); if (!(s->prefix & PREFIX_LOCK)) { gen_op_ld_v(s, ot, cpu_T0, cpu_A0); } } else { gen_op_mov_v_reg(ot, cpu_T0, rm); } bt_op: tcg_gen_andi_tl(cpu_T1, cpu_T1, (1 << (3 + ot)) - 1); tcg_gen_movi_tl(cpu_tmp0, 1); tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1); if (s->prefix & PREFIX_LOCK) { switch (op) { case 0: /* bt */ /* Needs no atomic ops; we surpressed the normal memory load for LOCK above so do it now. */ gen_op_ld_v(s, ot, cpu_T0, cpu_A0); break; case 1: /* bts */ tcg_gen_atomic_fetch_or_tl(cpu_T0, cpu_A0, cpu_tmp0, s->mem_index, ot | MO_LE); break; case 2: /* btr */ tcg_gen_not_tl(cpu_tmp0, cpu_tmp0); tcg_gen_atomic_fetch_and_tl(cpu_T0, cpu_A0, cpu_tmp0, s->mem_index, ot | MO_LE); break; default: case 3: /* btc */ tcg_gen_atomic_fetch_xor_tl(cpu_T0, cpu_A0, cpu_tmp0, s->mem_index, ot | MO_LE); break; } tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1); } else { tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1); switch (op) { case 0: /* bt */ /* Data already loaded; nothing to do. */ break; case 1: /* bts */ tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0); break; case 2: /* btr */ tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_tmp0); break; default: case 3: /* btc */ tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_tmp0); break; } if (op != 0) { if (mod != 3) { gen_op_st_v(s, ot, cpu_T0, cpu_A0); } else { gen_op_mov_reg_v(ot, rm, cpu_T0); } } } /* Delay all CC updates until after the store above. Note that C is the result of the test, Z is unchanged, and the others are all undefined. */ switch (s->cc_op) { case CC_OP_MULB ... CC_OP_MULQ: case CC_OP_ADDB ... CC_OP_ADDQ: case CC_OP_ADCB ... CC_OP_ADCQ: case CC_OP_SUBB ... CC_OP_SUBQ: case CC_OP_SBBB ... CC_OP_SBBQ: case CC_OP_LOGICB ... CC_OP_LOGICQ: case CC_OP_INCB ... CC_OP_INCQ: case CC_OP_DECB ... CC_OP_DECQ: case CC_OP_SHLB ... CC_OP_SHLQ: case CC_OP_SARB ... CC_OP_SARQ: case CC_OP_BMILGB ... CC_OP_BMILGQ: /* Z was going to be computed from the non-zero status of CC_DST. We can get that same Z value (and the new C value) by leaving CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the same width. */ tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4); set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB); break; default: /* Otherwise, generate EFLAGS and replace the C bit. */ gen_compute_eflags(s); tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, cpu_tmp4, ctz32(CC_C), 1); break; } break; case 0x1bc: /* bsf / tzcnt */ case 0x1bd: /* bsr / lzcnt */ ot = dflag; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); gen_extu(ot, cpu_T0); /* Note that lzcnt and tzcnt are in different extensions. */ if ((prefixes & PREFIX_REPZ) && (b & 1 ? s->cpuid_ext3_features & CPUID_EXT3_ABM : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) { int size = 8 << ot; /* For lzcnt/tzcnt, C bit is defined related to the input. */ tcg_gen_mov_tl(cpu_cc_src, cpu_T0); if (b & 1) { /* For lzcnt, reduce the target_ulong result by the number of zeros that we expect to find at the top. */ tcg_gen_clzi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS); tcg_gen_subi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - size); } else { /* For tzcnt, a zero input must return the operand size. */ tcg_gen_ctzi_tl(cpu_T0, cpu_T0, size); } /* For lzcnt/tzcnt, Z bit is defined related to the result. */ gen_op_update1_cc(); set_cc_op(s, CC_OP_BMILGB + ot); } else { /* For bsr/bsf, only the Z bit is defined and it is related to the input and not the result. */ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); set_cc_op(s, CC_OP_LOGICB + ot); /* ??? The manual says that the output is undefined when the input is zero, but real hardware leaves it unchanged, and real programs appear to depend on that. Accomplish this by passing the output as the value to return upon zero. */ if (b & 1) { /* For bsr, return the bit index of the first 1 bit, not the count of leading zeros. */ tcg_gen_xori_tl(cpu_T1, cpu_regs[reg], TARGET_LONG_BITS - 1); tcg_gen_clz_tl(cpu_T0, cpu_T0, cpu_T1); tcg_gen_xori_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - 1); } else { tcg_gen_ctz_tl(cpu_T0, cpu_T0, cpu_regs[reg]); } } gen_op_mov_reg_v(ot, reg, cpu_T0); break; /************************/ /* bcd */ case 0x27: /* daa */ if (CODE64(s)) goto illegal_op; gen_update_cc_op(s); gen_helper_daa(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x2f: /* das */ if (CODE64(s)) goto illegal_op; gen_update_cc_op(s); gen_helper_das(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x37: /* aaa */ if (CODE64(s)) goto illegal_op; gen_update_cc_op(s); gen_helper_aaa(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x3f: /* aas */ if (CODE64(s)) goto illegal_op; gen_update_cc_op(s); gen_helper_aas(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0xd4: /* aam */ if (CODE64(s)) goto illegal_op; val = x86_ldub_code(env, s); if (val == 0) { gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base); } else { gen_helper_aam(cpu_env, tcg_const_i32(val)); set_cc_op(s, CC_OP_LOGICB); } break; case 0xd5: /* aad */ if (CODE64(s)) goto illegal_op; val = x86_ldub_code(env, s); gen_helper_aad(cpu_env, tcg_const_i32(val)); set_cc_op(s, CC_OP_LOGICB); break; /************************/ /* misc */ case 0x90: /* nop */ /* XXX: correct lock test for all insn */ if (prefixes & PREFIX_LOCK) { goto illegal_op; } /* If REX_B is set, then this is xchg eax, r8d, not a nop. */ if (REX_B(s)) { goto do_xchg_reg_eax; } if (prefixes & PREFIX_REPZ) { gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start)); s->base.is_jmp = DISAS_NORETURN; } break; case 0x9b: /* fwait */ if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) == (HF_MP_MASK | HF_TS_MASK)) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); } else { gen_helper_fwait(cpu_env); } break; case 0xcc: /* int3 */ gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base); break; case 0xcd: /* int N */ val = x86_ldub_code(env, s); if (s->vm86 && s->iopl != 3) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base); } break; case 0xce: /* into */ if (CODE64(s)) goto illegal_op; gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start)); break; #ifdef WANT_ICEBP case 0xf1: /* icebp (undocumented, exits to external debugger) */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP); #if 1 gen_debug(s, pc_start - s->cs_base); #else /* start debug */ tb_flush(CPU(x86_env_get_cpu(env))); qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM); #endif break; #endif case 0xfa: /* cli */ if (!s->vm86) { if (s->cpl <= s->iopl) { gen_helper_cli(cpu_env); } else { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } } else { if (s->iopl == 3) { gen_helper_cli(cpu_env); } else { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } } break; case 0xfb: /* sti */ if (s->vm86 ? s->iopl == 3 : s->cpl <= s->iopl) { gen_helper_sti(cpu_env); /* interruptions are enabled only the first insn after sti */ gen_jmp_im(s->pc - s->cs_base); gen_eob_inhibit_irq(s, true); } else { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } break; case 0x62: /* bound */ if (CODE64(s)) goto illegal_op; ot = dflag; modrm = x86_ldub_code(env, s); reg = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; gen_op_mov_v_reg(ot, cpu_T0, reg); gen_lea_modrm(env, s, modrm); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); if (ot == MO_16) { gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32); } else { gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32); } break; case 0x1c8 ... 0x1cf: /* bswap reg */ reg = (b & 7) | REX_B(s); #ifdef TARGET_X86_64 if (dflag == MO_64) { gen_op_mov_v_reg(MO_64, cpu_T0, reg); tcg_gen_bswap64_i64(cpu_T0, cpu_T0); gen_op_mov_reg_v(MO_64, reg, cpu_T0); } else #endif { gen_op_mov_v_reg(MO_32, cpu_T0, reg); tcg_gen_ext32u_tl(cpu_T0, cpu_T0); tcg_gen_bswap32_tl(cpu_T0, cpu_T0); gen_op_mov_reg_v(MO_32, reg, cpu_T0); } break; case 0xd6: /* salc */ if (CODE64(s)) goto illegal_op; gen_compute_eflags_c(s, cpu_T0); tcg_gen_neg_tl(cpu_T0, cpu_T0); gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0); break; case 0xe0: /* loopnz */ case 0xe1: /* loopz */ case 0xe2: /* loop */ case 0xe3: /* jecxz */ { TCGLabel *l1, *l2, *l3; tval = (int8_t)insn_get(env, s, MO_8); next_eip = s->pc - s->cs_base; tval += next_eip; if (dflag == MO_16) { tval &= 0xffff; } l1 = gen_new_label(); l2 = gen_new_label(); l3 = gen_new_label(); b &= 3; switch(b) { case 0: /* loopnz */ case 1: /* loopz */ gen_op_add_reg_im(s->aflag, R_ECX, -1); gen_op_jz_ecx(s->aflag, l3); gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1); break; case 2: /* loop */ gen_op_add_reg_im(s->aflag, R_ECX, -1); gen_op_jnz_ecx(s->aflag, l1); break; default: case 3: /* jcxz */ gen_op_jz_ecx(s->aflag, l1); break; } gen_set_label(l3); gen_jmp_im(next_eip); tcg_gen_br(l2); gen_set_label(l1); gen_jmp_im(tval); gen_set_label(l2); gen_eob(s); } break; case 0x130: /* wrmsr */ case 0x132: /* rdmsr */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); if (b & 2) { gen_helper_rdmsr(cpu_env); } else { gen_helper_wrmsr(cpu_env); } } break; case 0x131: /* rdtsc */ gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } gen_helper_rdtsc(cpu_env); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; case 0x133: /* rdpmc */ gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_rdpmc(cpu_env); break; case 0x134: /* sysenter */ /* For Intel SYSENTER is valid on 64-bit */ if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) goto illegal_op; if (!s->pe) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_helper_sysenter(cpu_env); gen_eob(s); } break; case 0x135: /* sysexit */ /* For Intel SYSEXIT is valid on 64-bit */ if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) goto illegal_op; if (!s->pe) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1)); gen_eob(s); } break; #ifdef TARGET_X86_64 case 0x105: /* syscall */ /* XXX: is it usable in real mode ? */ gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start)); /* TF handling for the syscall insn is different. The TF bit is checked after the syscall insn completes. This allows #DB to not be generated after one has entered CPL0 if TF is set in FMASK. */ gen_eob_worker(s, false, true); break; case 0x107: /* sysret */ if (!s->pe) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1)); /* condition codes are modified only in long mode */ if (s->lma) { set_cc_op(s, CC_OP_EFLAGS); } /* TF handling for the sysret insn is different. The TF bit is checked after the sysret insn completes. This allows #DB to be generated "as if" the syscall insn in userspace has just completed. */ gen_eob_worker(s, false, true); } break; #endif case 0x1a2: /* cpuid */ gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_cpuid(cpu_env); break; case 0xf4: /* hlt */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start)); s->base.is_jmp = DISAS_NORETURN; } break; case 0x100: modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; op = (modrm >> 3) & 7; switch(op) { case 0: /* sldt */ if (!s->pe || s->vm86) goto illegal_op; gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ); tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, ldt.selector)); ot = mod == 3 ? dflag : MO_16; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); break; case 2: /* lldt */ if (!s->pe || s->vm86) goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE); gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); gen_helper_lldt(cpu_env, cpu_tmp2_i32); } break; case 1: /* str */ if (!s->pe || s->vm86) goto illegal_op; gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ); tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, tr.selector)); ot = mod == 3 ? dflag : MO_16; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); break; case 3: /* ltr */ if (!s->pe || s->vm86) goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE); gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); gen_helper_ltr(cpu_env, cpu_tmp2_i32); } break; case 4: /* verr */ case 5: /* verw */ if (!s->pe || s->vm86) goto illegal_op; gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); gen_update_cc_op(s); if (op == 4) { gen_helper_verr(cpu_env, cpu_T0); } else { gen_helper_verw(cpu_env, cpu_T0); } set_cc_op(s, CC_OP_EFLAGS); break; default: goto unknown_op; } break; case 0x101: modrm = x86_ldub_code(env, s); switch (modrm) { CASE_MODRM_MEM_OP(0): /* sgdt */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ); gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.limit)); gen_op_st_v(s, MO_16, cpu_T0, cpu_A0); gen_add_A0_im(s, 2); tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base)); if (dflag == MO_16) { tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff); } gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0); break; case 0xc8: /* monitor */ if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) { goto illegal_op; } gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]); gen_extu(s->aflag, cpu_A0); gen_add_A0_ds_seg(s); gen_helper_monitor(cpu_env, cpu_A0); break; case 0xc9: /* mwait */ if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) { goto illegal_op; } gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start)); gen_eob(s); break; case 0xca: /* clac */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) || s->cpl != 0) { goto illegal_op; } gen_helper_clac(cpu_env); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; case 0xcb: /* stac */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) || s->cpl != 0) { goto illegal_op; } gen_helper_stac(cpu_env); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; CASE_MODRM_MEM_OP(1): /* sidt */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ); gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.limit)); gen_op_st_v(s, MO_16, cpu_T0, cpu_A0); gen_add_A0_im(s, 2); tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base)); if (dflag == MO_16) { tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff); } gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0); break; case 0xd0: /* xgetbv */ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { goto illegal_op; } tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]); gen_helper_xgetbv(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32); tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64); break; case 0xd1: /* xsetbv */ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { goto illegal_op; } if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]); gen_helper_xsetbv(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64); /* End TB because translation flags may change. */ gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; case 0xd8: /* VMRUN */ if (!(s->flags & HF_SVME_MASK) || !s->pe) { goto illegal_op; } if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1), tcg_const_i32(s->pc - pc_start)); tcg_gen_exit_tb(0); s->base.is_jmp = DISAS_NORETURN; break; case 0xd9: /* VMMCALL */ if (!(s->flags & HF_SVME_MASK)) { goto illegal_op; } gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_vmmcall(cpu_env); break; case 0xda: /* VMLOAD */ if (!(s->flags & HF_SVME_MASK) || !s->pe) { goto illegal_op; } if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1)); break; case 0xdb: /* VMSAVE */ if (!(s->flags & HF_SVME_MASK) || !s->pe) { goto illegal_op; } if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1)); break; case 0xdc: /* STGI */ if ((!(s->flags & HF_SVME_MASK) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) || !s->pe) { goto illegal_op; } if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_stgi(cpu_env); break; case 0xdd: /* CLGI */ if (!(s->flags & HF_SVME_MASK) || !s->pe) { goto illegal_op; } if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_clgi(cpu_env); break; case 0xde: /* SKINIT */ if ((!(s->flags & HF_SVME_MASK) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) || !s->pe) { goto illegal_op; } gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_skinit(cpu_env); break; case 0xdf: /* INVLPGA */ if (!(s->flags & HF_SVME_MASK) || !s->pe) { goto illegal_op; } if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag - 1)); break; CASE_MODRM_MEM_OP(2): /* lgdt */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_WRITE); gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0); gen_add_A0_im(s, 2); gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0); if (dflag == MO_16) { tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff); } tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base)); tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, gdt.limit)); break; CASE_MODRM_MEM_OP(3): /* lidt */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_WRITE); gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0); gen_add_A0_im(s, 2); gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0); if (dflag == MO_16) { tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff); } tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base)); tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, idt.limit)); break; CASE_MODRM_OP(4): /* smsw */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0); tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, cr[0])); if (CODE64(s)) { mod = (modrm >> 6) & 3; ot = (mod != 3 ? MO_16 : s->dflag); } else { ot = MO_16; } gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); break; case 0xee: /* rdpkru */ if (prefixes & PREFIX_LOCK) { goto illegal_op; } tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]); gen_helper_rdpkru(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32); tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64); break; case 0xef: /* wrpkru */ if (prefixes & PREFIX_LOCK) { goto illegal_op; } tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]); gen_helper_wrpkru(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64); break; CASE_MODRM_OP(6): /* lmsw */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); gen_helper_lmsw(cpu_env, cpu_T0); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; CASE_MODRM_MEM_OP(7): /* invlpg */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_lea_modrm(env, s, modrm); gen_helper_invlpg(cpu_env, cpu_A0); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; case 0xf8: /* swapgs */ #ifdef TARGET_X86_64 if (CODE64(s)) { if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { tcg_gen_mov_tl(cpu_T0, cpu_seg_base[R_GS]); tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env, offsetof(CPUX86State, kernelgsbase)); tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, kernelgsbase)); } break; } #endif goto illegal_op; case 0xf9: /* rdtscp */ if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) { goto illegal_op; } gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } gen_helper_rdtscp(cpu_env); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; default: goto unknown_op; } break; case 0x108: /* invd */ case 0x109: /* wbinvd */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD); /* nothing to do */ } break; case 0x63: /* arpl or movslS (x86_64) */ #ifdef TARGET_X86_64 if (CODE64(s)) { int d_ot; /* d_ot is the size of destination */ d_ot = dflag; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod == 3) { gen_op_mov_v_reg(MO_32, cpu_T0, rm); /* sign extend */ if (d_ot == MO_64) { tcg_gen_ext32s_tl(cpu_T0, cpu_T0); } gen_op_mov_reg_v(d_ot, reg, cpu_T0); } else { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T0, cpu_A0); gen_op_mov_reg_v(d_ot, reg, cpu_T0); } } else #endif { TCGLabel *label1; TCGv t0, t1, t2, a0; if (!s->pe || s->vm86) goto illegal_op; t0 = tcg_temp_local_new(); t1 = tcg_temp_local_new(); t2 = tcg_temp_local_new(); ot = MO_16; modrm = x86_ldub_code(env, s); reg = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; rm = modrm & 7; if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, ot, t0, cpu_A0); a0 = tcg_temp_local_new(); tcg_gen_mov_tl(a0, cpu_A0); } else { gen_op_mov_v_reg(ot, t0, rm); TCGV_UNUSED(a0); } gen_op_mov_v_reg(ot, t1, reg); tcg_gen_andi_tl(cpu_tmp0, t0, 3); tcg_gen_andi_tl(t1, t1, 3); tcg_gen_movi_tl(t2, 0); label1 = gen_new_label(); tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1); tcg_gen_andi_tl(t0, t0, ~3); tcg_gen_or_tl(t0, t0, t1); tcg_gen_movi_tl(t2, CC_Z); gen_set_label(label1); if (mod != 3) { gen_op_st_v(s, ot, t0, a0); tcg_temp_free(a0); } else { gen_op_mov_reg_v(ot, rm, t0); } gen_compute_eflags(s); tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z); tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2); tcg_temp_free(t0); tcg_temp_free(t1); tcg_temp_free(t2); } break; case 0x102: /* lar */ case 0x103: /* lsl */ { TCGLabel *label1; TCGv t0; if (!s->pe || s->vm86) goto illegal_op; ot = dflag != MO_16 ? MO_32 : MO_16; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); t0 = tcg_temp_local_new(); gen_update_cc_op(s); if (b == 0x102) { gen_helper_lar(t0, cpu_env, cpu_T0); } else { gen_helper_lsl(t0, cpu_env, cpu_T0); } tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z); label1 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1); gen_op_mov_reg_v(ot, reg, t0); gen_set_label(label1); set_cc_op(s, CC_OP_EFLAGS); tcg_temp_free(t0); } break; case 0x118: modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; op = (modrm >> 3) & 7; switch(op) { case 0: /* prefetchnta */ case 1: /* prefetchnt0 */ case 2: /* prefetchnt0 */ case 3: /* prefetchnt0 */ if (mod == 3) goto illegal_op; gen_nop_modrm(env, s, modrm); /* nothing more to do */ break; default: /* nop (multi byte) */ gen_nop_modrm(env, s, modrm); break; } break; case 0x11a: modrm = x86_ldub_code(env, s); if (s->flags & HF_MPX_EN_MASK) { mod = (modrm >> 6) & 3; reg = ((modrm >> 3) & 7) | rex_r; if (prefixes & PREFIX_REPZ) { /* bndcl */ if (reg >= 4 || (prefixes & PREFIX_LOCK) || s->aflag == MO_16) { goto illegal_op; } gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]); } else if (prefixes & PREFIX_REPNZ) { /* bndcu */ if (reg >= 4 || (prefixes & PREFIX_LOCK) || s->aflag == MO_16) { goto illegal_op; } TCGv_i64 notu = tcg_temp_new_i64(); tcg_gen_not_i64(notu, cpu_bndu[reg]); gen_bndck(env, s, modrm, TCG_COND_GTU, notu); tcg_temp_free_i64(notu); } else if (prefixes & PREFIX_DATA) { /* bndmov -- from reg/mem */ if (reg >= 4 || s->aflag == MO_16) { goto illegal_op; } if (mod == 3) { int reg2 = (modrm & 7) | REX_B(s); if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) { goto illegal_op; } if (s->flags & HF_MPX_IU_MASK) { tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]); tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]); } } else { gen_lea_modrm(env, s, modrm); if (CODE64(s)) { tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0, s->mem_index, MO_LEQ); tcg_gen_addi_tl(cpu_A0, cpu_A0, 8); tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0, s->mem_index, MO_LEQ); } else { tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0, s->mem_index, MO_LEUL); tcg_gen_addi_tl(cpu_A0, cpu_A0, 4); tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0, s->mem_index, MO_LEUL); } /* bnd registers are now in-use */ gen_set_hflag(s, HF_MPX_IU_MASK); } } else if (mod != 3) { /* bndldx */ AddressParts a = gen_lea_modrm_0(env, s, modrm); if (reg >= 4 || (prefixes & PREFIX_LOCK) || s->aflag == MO_16 || a.base < -1) { goto illegal_op; } if (a.base >= 0) { tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp); } else { tcg_gen_movi_tl(cpu_A0, 0); } gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override); if (a.index >= 0) { tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]); } else { tcg_gen_movi_tl(cpu_T0, 0); } if (CODE64(s)) { gen_helper_bndldx64(cpu_bndl[reg], cpu_env, cpu_A0, cpu_T0); tcg_gen_ld_i64(cpu_bndu[reg], cpu_env, offsetof(CPUX86State, mmx_t0.MMX_Q(0))); } else { gen_helper_bndldx32(cpu_bndu[reg], cpu_env, cpu_A0, cpu_T0); tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]); tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32); } gen_set_hflag(s, HF_MPX_IU_MASK); } } gen_nop_modrm(env, s, modrm); break; case 0x11b: modrm = x86_ldub_code(env, s); if (s->flags & HF_MPX_EN_MASK) { mod = (modrm >> 6) & 3; reg = ((modrm >> 3) & 7) | rex_r; if (mod != 3 && (prefixes & PREFIX_REPZ)) { /* bndmk */ if (reg >= 4 || (prefixes & PREFIX_LOCK) || s->aflag == MO_16) { goto illegal_op; } AddressParts a = gen_lea_modrm_0(env, s, modrm); if (a.base >= 0) { tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]); if (!CODE64(s)) { tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]); } } else if (a.base == -1) { /* no base register has lower bound of 0 */ tcg_gen_movi_i64(cpu_bndl[reg], 0); } else { /* rip-relative generates #ud */ goto illegal_op; } tcg_gen_not_tl(cpu_A0, gen_lea_modrm_1(a)); if (!CODE64(s)) { tcg_gen_ext32u_tl(cpu_A0, cpu_A0); } tcg_gen_extu_tl_i64(cpu_bndu[reg], cpu_A0); /* bnd registers are now in-use */ gen_set_hflag(s, HF_MPX_IU_MASK); break; } else if (prefixes & PREFIX_REPNZ) { /* bndcn */ if (reg >= 4 || (prefixes & PREFIX_LOCK) || s->aflag == MO_16) { goto illegal_op; } gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]); } else if (prefixes & PREFIX_DATA) { /* bndmov -- to reg/mem */ if (reg >= 4 || s->aflag == MO_16) { goto illegal_op; } if (mod == 3) { int reg2 = (modrm & 7) | REX_B(s); if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) { goto illegal_op; } if (s->flags & HF_MPX_IU_MASK) { tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]); tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]); } } else { gen_lea_modrm(env, s, modrm); if (CODE64(s)) { tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0, s->mem_index, MO_LEQ); tcg_gen_addi_tl(cpu_A0, cpu_A0, 8); tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0, s->mem_index, MO_LEQ); } else { tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0, s->mem_index, MO_LEUL); tcg_gen_addi_tl(cpu_A0, cpu_A0, 4); tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0, s->mem_index, MO_LEUL); } } } else if (mod != 3) { /* bndstx */ AddressParts a = gen_lea_modrm_0(env, s, modrm); if (reg >= 4 || (prefixes & PREFIX_LOCK) || s->aflag == MO_16 || a.base < -1) { goto illegal_op; } if (a.base >= 0) { tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp); } else { tcg_gen_movi_tl(cpu_A0, 0); } gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override); if (a.index >= 0) { tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]); } else { tcg_gen_movi_tl(cpu_T0, 0); } if (CODE64(s)) { gen_helper_bndstx64(cpu_env, cpu_A0, cpu_T0, cpu_bndl[reg], cpu_bndu[reg]); } else { gen_helper_bndstx32(cpu_env, cpu_A0, cpu_T0, cpu_bndl[reg], cpu_bndu[reg]); } } } gen_nop_modrm(env, s, modrm); break; case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */ modrm = x86_ldub_code(env, s); gen_nop_modrm(env, s, modrm); break; case 0x120: /* mov reg, crN */ case 0x122: /* mov crN, reg */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { modrm = x86_ldub_code(env, s); /* Ignore the mod bits (assume (modrm&0xc0)==0xc0). * AMD documentation (24594.pdf) and testing of * intel 386 and 486 processors all show that the mod bits * are assumed to be 1's, regardless of actual values. */ rm = (modrm & 7) | REX_B(s); reg = ((modrm >> 3) & 7) | rex_r; if (CODE64(s)) ot = MO_64; else ot = MO_32; if ((prefixes & PREFIX_LOCK) && (reg == 0) && (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) { reg = 8; } switch(reg) { case 0: case 2: case 3: case 4: case 8: gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); if (b & 2) { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } gen_op_mov_v_reg(ot, cpu_T0, rm); gen_helper_write_crN(cpu_env, tcg_const_i32(reg), cpu_T0); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); } gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } else { if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } gen_helper_read_crN(cpu_T0, cpu_env, tcg_const_i32(reg)); gen_op_mov_reg_v(ot, rm, cpu_T0); if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { gen_io_end(); } } break; default: goto unknown_op; } } break; case 0x121: /* mov reg, drN */ case 0x123: /* mov drN, reg */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { modrm = x86_ldub_code(env, s); /* Ignore the mod bits (assume (modrm&0xc0)==0xc0). * AMD documentation (24594.pdf) and testing of * intel 386 and 486 processors all show that the mod bits * are assumed to be 1's, regardless of actual values. */ rm = (modrm & 7) | REX_B(s); reg = ((modrm >> 3) & 7) | rex_r; if (CODE64(s)) ot = MO_64; else ot = MO_32; if (reg >= 8) { goto illegal_op; } if (b & 2) { gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg); gen_op_mov_v_reg(ot, cpu_T0, rm); tcg_gen_movi_i32(cpu_tmp2_i32, reg); gen_helper_set_dr(cpu_env, cpu_tmp2_i32, cpu_T0); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg); tcg_gen_movi_i32(cpu_tmp2_i32, reg); gen_helper_get_dr(cpu_T0, cpu_env, cpu_tmp2_i32); gen_op_mov_reg_v(ot, rm, cpu_T0); } } break; case 0x106: /* clts */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); gen_helper_clts(cpu_env); /* abort block because static cpu state changed */ gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } break; /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */ case 0x1c3: /* MOVNTI reg, mem */ if (!(s->cpuid_features & CPUID_SSE2)) goto illegal_op; ot = mo_64_32(dflag); modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; reg = ((modrm >> 3) & 7) | rex_r; /* generate a generic store */ gen_ldst_modrm(env, s, modrm, ot, reg, 1); break; case 0x1ae: modrm = x86_ldub_code(env, s); switch (modrm) { CASE_MODRM_MEM_OP(0): /* fxsave */ if (!(s->cpuid_features & CPUID_FXSR) || (prefixes & PREFIX_LOCK)) { goto illegal_op; } if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; } gen_lea_modrm(env, s, modrm); gen_helper_fxsave(cpu_env, cpu_A0); break; CASE_MODRM_MEM_OP(1): /* fxrstor */ if (!(s->cpuid_features & CPUID_FXSR) || (prefixes & PREFIX_LOCK)) { goto illegal_op; } if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; } gen_lea_modrm(env, s, modrm); gen_helper_fxrstor(cpu_env, cpu_A0); break; CASE_MODRM_MEM_OP(2): /* ldmxcsr */ if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) { goto illegal_op; } if (s->flags & HF_TS_MASK) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; } gen_lea_modrm(env, s, modrm); tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32); break; CASE_MODRM_MEM_OP(3): /* stmxcsr */ if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) { goto illegal_op; } if (s->flags & HF_TS_MASK) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; } gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, mxcsr)); gen_op_st_v(s, MO_32, cpu_T0, cpu_A0); break; CASE_MODRM_MEM_OP(4): /* xsave */ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 || (prefixes & (PREFIX_LOCK | PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { goto illegal_op; } gen_lea_modrm(env, s, modrm); tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); gen_helper_xsave(cpu_env, cpu_A0, cpu_tmp1_i64); break; CASE_MODRM_MEM_OP(5): /* xrstor */ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 || (prefixes & (PREFIX_LOCK | PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { goto illegal_op; } gen_lea_modrm(env, s, modrm); tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); gen_helper_xrstor(cpu_env, cpu_A0, cpu_tmp1_i64); /* XRSTOR is how MPX is enabled, which changes how we translate. Thus we need to end the TB. */ gen_update_cc_op(s); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */ if (prefixes & PREFIX_LOCK) { goto illegal_op; } if (prefixes & PREFIX_DATA) { /* clwb */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) { goto illegal_op; } gen_nop_modrm(env, s, modrm); } else { /* xsaveopt */ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0 || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) { goto illegal_op; } gen_lea_modrm(env, s, modrm); tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); gen_helper_xsaveopt(cpu_env, cpu_A0, cpu_tmp1_i64); } break; CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */ if (prefixes & PREFIX_LOCK) { goto illegal_op; } if (prefixes & PREFIX_DATA) { /* clflushopt */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) { goto illegal_op; } } else { /* clflush */ if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) || !(s->cpuid_features & CPUID_CLFLUSH)) { goto illegal_op; } } gen_nop_modrm(env, s, modrm); break; case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */ case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */ case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */ case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */ if (CODE64(s) && (prefixes & PREFIX_REPZ) && !(prefixes & PREFIX_LOCK) && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) { TCGv base, treg, src, dst; /* Preserve hflags bits by testing CR4 at runtime. */ tcg_gen_movi_i32(cpu_tmp2_i32, CR4_FSGSBASE_MASK); gen_helper_cr4_testbit(cpu_env, cpu_tmp2_i32); base = cpu_seg_base[modrm & 8 ? R_GS : R_FS]; treg = cpu_regs[(modrm & 7) | REX_B(s)]; if (modrm & 0x10) { /* wr*base */ dst = base, src = treg; } else { /* rd*base */ dst = treg, src = base; } if (s->dflag == MO_32) { tcg_gen_ext32u_tl(dst, src); } else { tcg_gen_mov_tl(dst, src); } break; } goto unknown_op; case 0xf8: /* sfence / pcommit */ if (prefixes & PREFIX_DATA) { /* pcommit */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT) || (prefixes & PREFIX_LOCK)) { goto illegal_op; } break; } /* fallthru */ case 0xf9 ... 0xff: /* sfence */ if (!(s->cpuid_features & CPUID_SSE) || (prefixes & PREFIX_LOCK)) { goto illegal_op; } tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC); break; case 0xe8 ... 0xef: /* lfence */ if (!(s->cpuid_features & CPUID_SSE) || (prefixes & PREFIX_LOCK)) { goto illegal_op; } tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC); break; case 0xf0 ... 0xf7: /* mfence */ if (!(s->cpuid_features & CPUID_SSE2) || (prefixes & PREFIX_LOCK)) { goto illegal_op; } tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); break; default: goto unknown_op; } break; case 0x10d: /* 3DNow! prefetch(w) */ modrm = x86_ldub_code(env, s); mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; gen_nop_modrm(env, s, modrm); break; case 0x1aa: /* rsm */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM); if (!(s->flags & HF_SMM_MASK)) goto illegal_op; gen_update_cc_op(s); gen_jmp_im(s->pc - s->cs_base); gen_helper_rsm(cpu_env); gen_eob(s); break; case 0x1b8: /* SSE4.2 popcnt */ if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) != PREFIX_REPZ) goto illegal_op; if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT)) goto illegal_op; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | rex_r; if (s->prefix & PREFIX_DATA) { ot = MO_16; } else { ot = mo_64_32(dflag); } gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); gen_extu(ot, cpu_T0); tcg_gen_mov_tl(cpu_cc_src, cpu_T0); tcg_gen_ctpop_tl(cpu_T0, cpu_T0); gen_op_mov_reg_v(ot, reg, cpu_T0); set_cc_op(s, CC_OP_POPCNT); break; case 0x10e ... 0x10f: /* 3DNow! instructions, ignore prefixes */ s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA); case 0x110 ... 0x117: case 0x128 ... 0x12f: case 0x138 ... 0x13a: case 0x150 ... 0x179: case 0x17c ... 0x17f: case 0x1c2: case 0x1c4 ... 0x1c6: case 0x1d0 ... 0x1fe: gen_sse(env, s, b, pc_start, rex_r); break; default: goto unknown_op; } return s->pc; illegal_op: gen_illegal_opcode(s); return s->pc; unknown_op: gen_unknown_opcode(env, s); return s->pc; }
0
1,096
static int kvm_mce_in_exception(CPUState *env) { struct kvm_msr_entry msr_mcg_status = { .index = MSR_MCG_STATUS, }; int r; r = kvm_get_msr(env, &msr_mcg_status, 1); if (r == -1 || r == 0) { return -1; } return !!(msr_mcg_status.data & MCG_STATUS_MCIP); }
0
1,097
static void count_cpreg(gpointer key, gpointer opaque) { ARMCPU *cpu = opaque; uint64_t regidx; const ARMCPRegInfo *ri; regidx = *(uint32_t *)key; ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); if (!(ri->type & ARM_CP_NO_MIGRATE)) { cpu->cpreg_array_len++; } }
0
1,098
static inline uint32_t efsctsiz(uint32_t val) { CPU_FloatU u; u.l = val; /* NaN are not treated the same way IEEE 754 does */ if (unlikely(float32_is_nan(u.f))) return 0; return float32_to_int32_round_to_zero(u.f, &env->vec_status); }
0
1,099
static void scsi_unrealize(SCSIDevice *s, Error **errp) { scsi_device_purge_requests(s, SENSE_CODE(NO_SENSE)); blockdev_mark_auto_del(s->conf.blk); }
0
1,100
int slirp_is_inited(void) { return slirp_inited; }
0
1,102
int pit_get_gate(PITState *pit, int channel) { PITChannelState *s = &pit->channels[channel]; return s->gate; }
0
1,103
void pci_register_bar(PCIDevice *pci_dev, int region_num, uint8_t type, MemoryRegion *memory) { PCIIORegion *r; uint32_t addr; /* offset in pci config space */ uint64_t wmask; pcibus_t size = memory_region_size(memory); assert(region_num >= 0); assert(region_num < PCI_NUM_REGIONS); if (size & (size-1)) { fprintf(stderr, "ERROR: PCI region size must be pow2 " "type=0x%x, size=0x%"FMT_PCIBUS"\n", type, size); exit(1); } r = &pci_dev->io_regions[region_num]; r->addr = PCI_BAR_UNMAPPED; r->size = size; r->type = type; r->memory = memory; r->address_space = type & PCI_BASE_ADDRESS_SPACE_IO ? pci_dev->bus->address_space_io : pci_dev->bus->address_space_mem; wmask = ~(size - 1); if (region_num == PCI_ROM_SLOT) { /* ROM enable bit is writable */ wmask |= PCI_ROM_ADDRESS_ENABLE; } addr = pci_bar(pci_dev, region_num); pci_set_long(pci_dev->config + addr, type); if (!(r->type & PCI_BASE_ADDRESS_SPACE_IO) && r->type & PCI_BASE_ADDRESS_MEM_TYPE_64) { pci_set_quad(pci_dev->wmask + addr, wmask); pci_set_quad(pci_dev->cmask + addr, ~0ULL); } else { pci_set_long(pci_dev->wmask + addr, wmask & 0xffffffff); pci_set_long(pci_dev->cmask + addr, 0xffffffff); } }
0
1,104
static int configure_accelerator(void) { const char *p = NULL; char buf[10]; int i, ret; bool accel_initalised = 0; bool init_failed = 0; QemuOptsList *list = qemu_find_opts("machine"); if (!QTAILQ_EMPTY(&list->head)) { p = qemu_opt_get(QTAILQ_FIRST(&list->head), "accel"); } if (p == NULL) { /* Use the default "accelerator", tcg */ p = "tcg"; } while (!accel_initalised && *p != '\0') { if (*p == ':') { p++; } p = get_opt_name(buf, sizeof (buf), p, ':'); for (i = 0; i < ARRAY_SIZE(accel_list); i++) { if (strcmp(accel_list[i].opt_name, buf) == 0) { ret = accel_list[i].init(); if (ret < 0) { init_failed = 1; if (!accel_list[i].available()) { printf("%s not supported for this target\n", accel_list[i].name); } else { fprintf(stderr, "failed to initialize %s: %s\n", accel_list[i].name, strerror(-ret)); } } else { accel_initalised = 1; *(accel_list[i].allowed) = 1; } break; } } if (i == ARRAY_SIZE(accel_list)) { fprintf(stderr, "\"%s\" accelerator does not exist.\n", buf); } } if (!accel_initalised) { fprintf(stderr, "No accelerator found!\n"); exit(1); } if (init_failed) { fprintf(stderr, "Back to %s accelerator.\n", accel_list[i].name); } return !accel_initalised; }