target
int64
0
1
idx
int64
0
27.3k
func
stringlengths
23
97k
1
865
static int rtp_parse_one_packet(RTPDemuxContext *s, AVPacket *pkt, uint8_t **bufptr, int len) { uint8_t *buf = bufptr ? *bufptr : NULL; int flags = 0; uint32_t timestamp; int rv = 0; if (!buf) { /* If parsing of the previous packet actually returned 0 or an error, * there's nothing more to be parsed from that packet, but we may have * indicated that we can return the next enqueued packet. */ if (s->prev_ret <= 0) return rtp_parse_queued_packet(s, pkt); /* return the next packets, if any */ if (s->handler && s->handler->parse_packet) { /* timestamp should be overwritten by parse_packet, if not, * the packet is left with pts == AV_NOPTS_VALUE */ timestamp = RTP_NOTS_VALUE; rv = s->handler->parse_packet(s->ic, s->dynamic_protocol_context, s->st, pkt, &timestamp, NULL, 0, 0, flags); finalize_packet(s, pkt, timestamp); return rv; } } if (len < 12) return -1; if ((buf[0] & 0xc0) != (RTP_VERSION << 6)) return -1; if (RTP_PT_IS_RTCP(buf[1])) { return rtcp_parse_packet(s, buf, len); } if (s->st) { int64_t received = av_gettime_relative(); uint32_t arrival_ts = av_rescale_q(received, AV_TIME_BASE_Q, s->st->time_base); timestamp = AV_RB32(buf + 4); // Calculate the jitter immediately, before queueing the packet // into the reordering queue. rtcp_update_jitter(&s->statistics, timestamp, arrival_ts); } if ((s->seq == 0 && !s->queue) || s->queue_size <= 1) { /* First packet, or no reordering */ return rtp_parse_packet_internal(s, pkt, buf, len); } else { uint16_t seq = AV_RB16(buf + 2); int16_t diff = seq - s->seq; if (diff < 0) { /* Packet older than the previously emitted one, drop */ av_log(s->st ? s->st->codec : NULL, AV_LOG_WARNING, "RTP: dropping old packet received too late\n"); return -1; } else if (diff <= 1) { /* Correct packet */ rv = rtp_parse_packet_internal(s, pkt, buf, len); return rv; } else { /* Still missing some packet, enqueue this one. */ enqueue_packet(s, buf, len); *bufptr = NULL; /* Return the first enqueued packet if the queue is full, * even if we're missing something */ if (s->queue_len >= s->queue_size) { av_log(s->st ? s->st->codec : NULL, AV_LOG_WARNING, "jitter buffer full\n"); return rtp_parse_queued_packet(s, pkt); } return -1; } } }
1
866
static inline void RENAME(rgb16to15)(const uint8_t *src,uint8_t *dst,long src_size) { register const uint8_t* s=src; register uint8_t* d=dst; register const uint8_t *end; const uint8_t *mm_end; end = s + src_size; #ifdef HAVE_MMX __asm __volatile(PREFETCH" %0"::"m"(*s)); __asm __volatile("movq %0, %%mm7"::"m"(mask15rg)); __asm __volatile("movq %0, %%mm6"::"m"(mask15b)); mm_end = end - 15; while(s<mm_end) { __asm __volatile( PREFETCH" 32%1\n\t" "movq %1, %%mm0\n\t" "movq 8%1, %%mm2\n\t" "movq %%mm0, %%mm1\n\t" "movq %%mm2, %%mm3\n\t" "psrlq $1, %%mm0\n\t" "psrlq $1, %%mm2\n\t" "pand %%mm7, %%mm0\n\t" "pand %%mm7, %%mm2\n\t" "pand %%mm6, %%mm1\n\t" "pand %%mm6, %%mm3\n\t" "por %%mm1, %%mm0\n\t" "por %%mm3, %%mm2\n\t" MOVNTQ" %%mm0, %0\n\t" MOVNTQ" %%mm2, 8%0" :"=m"(*d) :"m"(*s) ); d+=16; s+=16; } __asm __volatile(SFENCE:::"memory"); __asm __volatile(EMMS:::"memory"); #endif mm_end = end - 3; while(s < mm_end) { register uint32_t x= *((uint32_t *)s); *((uint32_t *)d) = ((x>>1)&0x7FE07FE0) | (x&0x001F001F); s+=4; d+=4; } if(s < end) { register uint16_t x= *((uint16_t *)s); *((uint16_t *)d) = ((x>>1)&0x7FE0) | (x&0x001F); s+=2; d+=2; } }
0
868
static void xics_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = xics_realize; dc->props = xics_properties; dc->reset = xics_reset; }
0
869
void acpi_memory_hotplug_init(MemoryRegion *as, Object *owner, MemHotplugState *state) { MachineState *machine = MACHINE(qdev_get_machine()); state->dev_count = machine->ram_slots; if (!state->dev_count) { return; } state->devs = g_malloc0(sizeof(*state->devs) * state->dev_count); memory_region_init_io(&state->io, owner, &acpi_memory_hotplug_ops, state, "acpi-mem-hotplug", ACPI_MEMORY_HOTPLUG_IO_LEN); memory_region_add_subregion(as, ACPI_MEMORY_HOTPLUG_BASE, &state->io); }
0
870
static void test_qemu_strtoul_correct(void) { const char *str = "12345 foo"; char f = 'X'; const char *endptr = &f; unsigned long res = 999; int err; err = qemu_strtoul(str, &endptr, 0, &res); g_assert_cmpint(err, ==, 0); g_assert_cmpint(res, ==, 12345); g_assert(endptr == str + 5); }
0
871
Aml *aml_shiftleft(Aml *arg1, Aml *count) { Aml *var = aml_opcode(0x79 /* ShiftLeftOp */); aml_append(var, arg1); aml_append(var, count); build_append_byte(var->buf, 0x00); /* NullNameOp */ return var; }
0
872
static void nfs_process_write(void *arg) { NFSClient *client = arg; aio_context_acquire(client->aio_context); nfs_service(client->context, POLLOUT); nfs_set_events(client); aio_context_release(client->aio_context); }
0
873
static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, TranslationBlock **last_tb, int *tb_exit) { uintptr_t ret; int32_t insns_left; if (unlikely(atomic_read(&cpu->exit_request))) { return; } trace_exec_tb(tb, tb->pc); ret = cpu_tb_exec(cpu, tb); tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK); *tb_exit = ret & TB_EXIT_MASK; if (*tb_exit != TB_EXIT_REQUESTED) { *last_tb = tb; return; } *last_tb = NULL; insns_left = atomic_read(&cpu->icount_decr.u32); atomic_set(&cpu->icount_decr.u16.high, 0); if (insns_left < 0) { /* Something asked us to stop executing * chained TBs; just continue round the main * loop. Whatever requested the exit will also * have set something else (eg exit_request or * interrupt_request) which we will handle * next time around the loop. But we need to * ensure the zeroing of tcg_exit_req (see cpu_tb_exec) * comes before the next read of cpu->exit_request * or cpu->interrupt_request. */ smp_mb(); return; } /* Instruction counter expired. */ assert(use_icount); #ifndef CONFIG_USER_ONLY if (cpu->icount_extra) { /* Refill decrementer and continue execution. */ cpu->icount_extra += insns_left; insns_left = MIN(0xffff, cpu->icount_extra); cpu->icount_extra -= insns_left; cpu->icount_decr.u16.low = insns_left; } else { /* Execute any remaining instructions, then let the main loop * handle the next event. */ if (insns_left > 0) { cpu_exec_nocache(cpu, insns_left, tb, false); } } #endif }
0
874
static int xen_pt_byte_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, uint8_t *value, uint8_t valid_mask) { XenPTRegInfo *reg = cfg_entry->reg; uint8_t valid_emu_mask = 0; /* emulate byte register */ valid_emu_mask = reg->emu_mask & valid_mask; *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); return 0; }
0
875
static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size) { uint16_t flags; uint8_t type; flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS); type = (flags & PCI_EXP_FLAGS_TYPE) >> 4; if (type != PCI_EXP_TYPE_ENDPOINT && type != PCI_EXP_TYPE_LEG_END && type != PCI_EXP_TYPE_RC_END) { error_report("vfio: Assignment of PCIe type 0x%x " "devices is not currently supported", type); return -EINVAL; } if (!pci_bus_is_express(vdev->pdev.bus)) { /* * Use express capability as-is on PCI bus. It doesn't make much * sense to even expose, but some drivers (ex. tg3) depend on it * and guests don't seem to be particular about it. We'll need * to revist this or force express devices to express buses if we * ever expose an IOMMU to the guest. */ } else if (pci_bus_is_root(vdev->pdev.bus)) { /* * On a Root Complex bus Endpoints become Root Complex Integrated * Endpoints, which changes the type and clears the LNK & LNK2 fields. */ if (type == PCI_EXP_TYPE_ENDPOINT) { vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS, PCI_EXP_TYPE_RC_END << 4, PCI_EXP_FLAGS_TYPE); /* Link Capabilities, Status, and Control goes away */ if (size > PCI_EXP_LNKCTL) { vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0); vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0); vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0); #ifndef PCI_EXP_LNKCAP2 #define PCI_EXP_LNKCAP2 44 #endif #ifndef PCI_EXP_LNKSTA2 #define PCI_EXP_LNKSTA2 50 #endif /* Link 2 Capabilities, Status, and Control goes away */ if (size > PCI_EXP_LNKCAP2) { vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0); vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0); vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0); } } } else if (type == PCI_EXP_TYPE_LEG_END) { /* * Legacy endpoints don't belong on the root complex. Windows * seems to be happier with devices if we skip the capability. */ return 0; } } else { /* * Convert Root Complex Integrated Endpoints to regular endpoints. * These devices don't support LNK/LNK2 capabilities, so make them up. */ if (type == PCI_EXP_TYPE_RC_END) { vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS, PCI_EXP_TYPE_ENDPOINT << 4, PCI_EXP_FLAGS_TYPE); vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, PCI_EXP_LNK_MLW_1 | PCI_EXP_LNK_LS_25, ~0); vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0); } /* Mark the Link Status bits as emulated to allow virtual negotiation */ vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, pci_get_word(vdev->pdev.config + pos + PCI_EXP_LNKSTA), PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS); } pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size); if (pos >= 0) { vdev->pdev.exp.exp_cap = pos; } return pos; }
0
876
static UserDefNested *nested_struct_create(void) { UserDefNested *udnp = g_malloc0(sizeof(*udnp)); udnp->string0 = strdup("test_string0"); udnp->dict1.string1 = strdup("test_string1"); udnp->dict1.dict2.userdef1 = g_malloc0(sizeof(UserDefOne)); udnp->dict1.dict2.userdef1->base = g_new0(UserDefZero, 1); udnp->dict1.dict2.userdef1->base->integer = 42; udnp->dict1.dict2.userdef1->string = strdup("test_string"); udnp->dict1.dict2.string2 = strdup("test_string2"); udnp->dict1.has_dict3 = true; udnp->dict1.dict3.userdef2 = g_malloc0(sizeof(UserDefOne)); udnp->dict1.dict3.userdef2->base = g_new0(UserDefZero, 1); udnp->dict1.dict3.userdef2->base->integer = 43; udnp->dict1.dict3.userdef2->string = strdup("test_string"); udnp->dict1.dict3.string3 = strdup("test_string3"); return udnp; }
0
877
static int qcow2_create(const char *filename, QEMUOptionParameter *options) { const char *backing_file = NULL; const char *backing_fmt = NULL; uint64_t sectors = 0; int flags = 0; size_t cluster_size = DEFAULT_CLUSTER_SIZE; int prealloc = 0; /* Read out options */ while (options && options->name) { if (!strcmp(options->name, BLOCK_OPT_SIZE)) { sectors = options->value.n / 512; } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) { backing_file = options->value.s; } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FMT)) { backing_fmt = options->value.s; } else if (!strcmp(options->name, BLOCK_OPT_ENCRYPT)) { flags |= options->value.n ? BLOCK_FLAG_ENCRYPT : 0; } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) { if (options->value.n) { cluster_size = options->value.n; } } else if (!strcmp(options->name, BLOCK_OPT_PREALLOC)) { if (!options->value.s || !strcmp(options->value.s, "off")) { prealloc = 0; } else if (!strcmp(options->value.s, "metadata")) { prealloc = 1; } else { fprintf(stderr, "Invalid preallocation mode: '%s'\n", options->value.s); return -EINVAL; } } options++; } if (backing_file && prealloc) { fprintf(stderr, "Backing file and preallocation cannot be used at " "the same time\n"); return -EINVAL; } return qcow2_create2(filename, sectors, backing_file, backing_fmt, flags, cluster_size, prealloc, options); }
0
878
static int rpza_decode_init(AVCodecContext *avctx) { RpzaContext *s = avctx->priv_data; s->avctx = avctx; avctx->pix_fmt = PIX_FMT_RGB555; dsputil_init(&s->dsp, avctx); s->frame.data[0] = NULL; return 0; }
0
879
static void qmp_output_type_str(Visitor *v, const char *name, char **obj, Error **errp) { QmpOutputVisitor *qov = to_qov(v); if (*obj) { qmp_output_add(qov, name, qstring_from_str(*obj)); } else { qmp_output_add(qov, name, qstring_from_str("")); } }
0
880
static void v9fs_open_post_lstat(V9fsState *s, V9fsOpenState *vs, int err) { int flags; if (err) { err = -errno; goto out; } stat_to_qid(&vs->stbuf, &vs->qid); if (S_ISDIR(vs->stbuf.st_mode)) { vs->fidp->fs.dir = v9fs_do_opendir(s, &vs->fidp->path); v9fs_open_post_opendir(s, vs, err); } else { if (s->proto_version == V9FS_PROTO_2000L) { if (!valid_flags(vs->mode)) { err = -EINVAL; goto out; } flags = vs->mode; } else { flags = omode_to_uflags(vs->mode); } vs->fidp->fs.fd = v9fs_do_open(s, &vs->fidp->path, flags); v9fs_open_post_open(s, vs, err); } return; out: complete_pdu(s, vs->pdu, err); qemu_free(vs); }
0
881
static void spapr_cpu_core_unrealizefn(DeviceState *dev, Error **errp) { sPAPRCPUCore *sc = SPAPR_CPU_CORE(OBJECT(dev)); sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_GET_CLASS(OBJECT(dev)); size_t size = object_type_get_instance_size(scc->cpu_type); CPUCore *cc = CPU_CORE(dev); int i; for (i = 0; i < cc->nr_threads; i++) { void *obj = sc->threads + i * size; DeviceState *dev = DEVICE(obj); CPUState *cs = CPU(dev); PowerPCCPU *cpu = POWERPC_CPU(cs); spapr_cpu_destroy(cpu); object_unparent(cpu->intc); cpu_remove_sync(cs); object_unparent(obj); } g_free(sc->threads); }
0
884
static uint32_t taihu_cpld_readw (void *opaque, hwaddr addr) { uint32_t ret; ret = taihu_cpld_readb(opaque, addr) << 8; ret |= taihu_cpld_readb(opaque, addr + 1); return ret; }
0
885
int vfio_container_ioctl(AddressSpace *as, int32_t groupid, int req, void *param) { /* We allow only certain ioctls to the container */ switch (req) { case VFIO_CHECK_EXTENSION: case VFIO_IOMMU_SPAPR_TCE_GET_INFO: case VFIO_EEH_PE_OP: break; default: /* Return an error on unknown requests */ error_report("vfio: unsupported ioctl %X", req); return -1; } return vfio_container_do_ioctl(as, groupid, req, param); }
0
887
static void vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, uint8_t devfn, hwaddr addr, bool is_write, IOMMUTLBEntry *entry) { IntelIOMMUState *s = vtd_as->iommu_state; VTDContextEntry ce; uint8_t bus_num = pci_bus_num(bus); VTDContextCacheEntry *cc_entry = &vtd_as->context_cache_entry; uint64_t slpte; uint32_t level; uint16_t source_id = vtd_make_source_id(bus_num, devfn); int ret_fr; bool is_fpd_set = false; bool reads = true; bool writes = true; VTDIOTLBEntry *iotlb_entry; /* Check if the request is in interrupt address range */ if (vtd_is_interrupt_addr(addr)) { if (is_write) { /* FIXME: since we don't know the length of the access here, we * treat Non-DWORD length write requests without PASID as * interrupt requests, too. Withoud interrupt remapping support, * we just use 1:1 mapping. */ VTD_DPRINTF(MMU, "write request to interrupt address " "gpa 0x%"PRIx64, addr); entry->iova = addr & VTD_PAGE_MASK_4K; entry->translated_addr = addr & VTD_PAGE_MASK_4K; entry->addr_mask = ~VTD_PAGE_MASK_4K; entry->perm = IOMMU_WO; return; } else { VTD_DPRINTF(GENERAL, "error: read request from interrupt address " "gpa 0x%"PRIx64, addr); vtd_report_dmar_fault(s, source_id, addr, VTD_FR_READ, is_write); return; } } /* Try to fetch slpte form IOTLB */ iotlb_entry = vtd_lookup_iotlb(s, source_id, addr); if (iotlb_entry) { VTD_DPRINTF(CACHE, "hit iotlb sid 0x%"PRIx16 " gpa 0x%"PRIx64 " slpte 0x%"PRIx64 " did 0x%"PRIx16, source_id, addr, iotlb_entry->slpte, iotlb_entry->domain_id); slpte = iotlb_entry->slpte; reads = iotlb_entry->read_flags; writes = iotlb_entry->write_flags; goto out; } /* Try to fetch context-entry from cache first */ if (cc_entry->context_cache_gen == s->context_cache_gen) { VTD_DPRINTF(CACHE, "hit context-cache bus %d devfn %d " "(hi %"PRIx64 " lo %"PRIx64 " gen %"PRIu32 ")", bus_num, devfn, cc_entry->context_entry.hi, cc_entry->context_entry.lo, cc_entry->context_cache_gen); ce = cc_entry->context_entry; is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD; } else { ret_fr = vtd_dev_to_context_entry(s, bus_num, devfn, &ce); is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD; if (ret_fr) { ret_fr = -ret_fr; if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) { VTD_DPRINTF(FLOG, "fault processing is disabled for DMA " "requests through this context-entry " "(with FPD Set)"); } else { vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write); } return; } /* Update context-cache */ VTD_DPRINTF(CACHE, "update context-cache bus %d devfn %d " "(hi %"PRIx64 " lo %"PRIx64 " gen %"PRIu32 "->%"PRIu32 ")", bus_num, devfn, ce.hi, ce.lo, cc_entry->context_cache_gen, s->context_cache_gen); cc_entry->context_entry = ce; cc_entry->context_cache_gen = s->context_cache_gen; } ret_fr = vtd_gpa_to_slpte(&ce, addr, is_write, &slpte, &level, &reads, &writes); if (ret_fr) { ret_fr = -ret_fr; if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) { VTD_DPRINTF(FLOG, "fault processing is disabled for DMA requests " "through this context-entry (with FPD Set)"); } else { vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write); } return; } vtd_update_iotlb(s, source_id, VTD_CONTEXT_ENTRY_DID(ce.hi), addr, slpte, reads, writes); out: entry->iova = addr & VTD_PAGE_MASK_4K; entry->translated_addr = vtd_get_slpte_addr(slpte) & VTD_PAGE_MASK_4K; entry->addr_mask = ~VTD_PAGE_MASK_4K; entry->perm = (writes ? 2 : 0) + (reads ? 1 : 0); }
0
888
void call_pal (CPUState *env, int palcode) { target_ulong ret; if (logfile != NULL) fprintf(logfile, "%s: palcode %02x\n", __func__, palcode); switch (palcode) { case 0x83: /* CALLSYS */ if (logfile != NULL) fprintf(logfile, "CALLSYS n " TARGET_FMT_ld "\n", env->ir[0]); ret = do_syscall(env, env->ir[IR_V0], env->ir[IR_A0], env->ir[IR_A1], env->ir[IR_A2], env->ir[IR_A3], env->ir[IR_A4], env->ir[IR_A5]); if (ret >= 0) { env->ir[IR_A3] = 0; env->ir[IR_V0] = ret; } else { env->ir[IR_A3] = 1; env->ir[IR_V0] = -ret; } break; case 0x9E: /* RDUNIQUE */ env->ir[IR_V0] = env->unique; if (logfile != NULL) fprintf(logfile, "RDUNIQUE: " TARGET_FMT_lx "\n", env->unique); break; case 0x9F: /* WRUNIQUE */ env->unique = env->ir[IR_A0]; if (logfile != NULL) fprintf(logfile, "WRUNIQUE: " TARGET_FMT_lx "\n", env->unique); break; default: if (logfile != NULL) fprintf(logfile, "%s: unhandled palcode %02x\n", __func__, palcode); exit(1); } }
0
889
static void RENAME(yuv2yuyv422_1)(SwsContext *c, const uint16_t *buf0, const uint16_t *ubuf0, const uint16_t *ubuf1, const uint16_t *vbuf0, const uint16_t *vbuf1, const uint16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, enum PixelFormat dstFormat, int flags, int y) { const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" YSCALEYUV2PACKED1(%%REGBP, %5) WRITEYUY2(%%REGb, 8280(%5), %%REGBP) "pop %%"REG_BP" \n\t" "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither) ); } else { __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" YSCALEYUV2PACKED1b(%%REGBP, %5) WRITEYUY2(%%REGb, 8280(%5), %%REGBP) "pop %%"REG_BP" \n\t" "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither) ); } }
0
890
yuv2gray16_1_c_template(SwsContext *c, const uint16_t *buf0, const uint16_t *ubuf0, const uint16_t *ubuf1, const uint16_t *vbuf0, const uint16_t *vbuf1, const uint16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, enum PixelFormat dstFormat, int flags, int y, enum PixelFormat target) { int i; for (i = 0; i < (dstW >> 1); i++) { const int i2 = 2 * i; int Y1 = buf0[i2 ] << 1; int Y2 = buf0[i2+1] << 1; output_pixel(&dest[2 * i2 + 0], Y1); output_pixel(&dest[2 * i2 + 2], Y2); } }
0
891
uint32_t helper_efdctuiz (uint64_t val) { CPU_DoubleU u; u.ll = val; /* NaN are not treated the same way IEEE 754 does */ if (unlikely(float64_is_nan(u.d))) return 0; return float64_to_uint32_round_to_zero(u.d, &env->vec_status); }
0
892
int qcow2_zero_clusters(BlockDriverState *bs, uint64_t offset, int nb_sectors) { BDRVQcow2State *s = bs->opaque; unsigned int nb_clusters; int ret; /* The zero flag is only supported by version 3 and newer */ if (s->qcow_version < 3) { return -ENOTSUP; } /* Each L2 table is handled by its own loop iteration */ nb_clusters = size_to_clusters(s, nb_sectors << BDRV_SECTOR_BITS); s->cache_discards = true; while (nb_clusters > 0) { ret = zero_single_l2(bs, offset, nb_clusters); if (ret < 0) { goto fail; } nb_clusters -= ret; offset += (ret * s->cluster_size); } ret = 0; fail: s->cache_discards = false; qcow2_process_discards(bs, ret); return ret; }
0
893
void async_context_push(void) { struct AsyncContext *new = qemu_mallocz(sizeof(*new)); new->parent = async_context; new->id = async_context->id + 1; async_context = new; }
0
894
static int h264_extradata_to_annexb(AVCodecContext *avctx, const int padding) { uint16_t unit_size; uint64_t total_size = 0; uint8_t *out = NULL, unit_nb, sps_done = 0, sps_seen = 0, pps_seen = 0; const uint8_t *extradata = avctx->extradata + 4; static const uint8_t nalu_header[4] = { 0, 0, 0, 1 }; int length_size = (*extradata++ & 0x3) + 1; // retrieve length coded size /* retrieve sps and pps unit(s) */ unit_nb = *extradata++ & 0x1f; /* number of sps unit(s) */ if (!unit_nb) { goto pps; } else { sps_seen = 1; } while (unit_nb--) { void *tmp; unit_size = AV_RB16(extradata); total_size += unit_size + 4; if (total_size > INT_MAX - padding || extradata + 2 + unit_size > avctx->extradata + avctx->extradata_size) { av_free(out); return AVERROR(EINVAL); } tmp = av_realloc(out, total_size + padding); if (!tmp) { av_free(out); return AVERROR(ENOMEM); } out = tmp; memcpy(out + total_size - unit_size - 4, nalu_header, 4); memcpy(out + total_size - unit_size, extradata + 2, unit_size); extradata += 2 + unit_size; pps: if (!unit_nb && !sps_done++) { unit_nb = *extradata++; /* number of pps unit(s) */ if (unit_nb) pps_seen = 1; } } if (out) memset(out + total_size, 0, FF_INPUT_BUFFER_PADDING_SIZE); if (!sps_seen) av_log(avctx, AV_LOG_WARNING, "Warning: SPS NALU missing or invalid. " "The resulting stream may not play.\n"); if (!pps_seen) av_log(avctx, AV_LOG_WARNING, "Warning: PPS NALU missing or invalid. " "The resulting stream may not play.\n"); av_free(avctx->extradata); avctx->extradata = out; avctx->extradata_size = total_size; return length_size; }
0
895
static inline void stl_phys_internal(hwaddr addr, uint32_t val, enum device_endian endian) { uint8_t *ptr; MemoryRegionSection *section; hwaddr l = 4; hwaddr addr1; section = address_space_translate(&address_space_memory, addr, &addr1, &l, true); if (l < 4 || !memory_region_is_ram(section->mr) || section->readonly) { if (memory_region_is_ram(section->mr)) { section = &phys_sections[phys_section_rom]; } #if defined(TARGET_WORDS_BIGENDIAN) if (endian == DEVICE_LITTLE_ENDIAN) { val = bswap32(val); } #else if (endian == DEVICE_BIG_ENDIAN) { val = bswap32(val); } #endif io_mem_write(section->mr, addr1, val, 4); } else { /* RAM case */ addr1 += memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK; ptr = qemu_get_ram_ptr(addr1); switch (endian) { case DEVICE_LITTLE_ENDIAN: stl_le_p(ptr, val); break; case DEVICE_BIG_ENDIAN: stl_be_p(ptr, val); break; default: stl_p(ptr, val); break; } invalidate_and_set_dirty(addr1, 4); } }
0
896
static int vmdk_write(BlockDriverState *bs, int64_t sector_num, const uint8_t *buf, int nb_sectors) { BDRVVmdkState *s = bs->opaque; VmdkExtent *extent = NULL; int n; int64_t index_in_cluster; uint64_t cluster_offset; VmdkMetaData m_data; if (sector_num > bs->total_sectors) { fprintf(stderr, "(VMDK) Wrong offset: sector_num=0x%" PRIx64 " total_sectors=0x%" PRIx64 "\n", sector_num, bs->total_sectors); return -1; } while (nb_sectors > 0) { extent = find_extent(s, sector_num, extent); if (!extent) { return -EIO; } cluster_offset = get_cluster_offset( bs, extent, &m_data, sector_num << 9, 1); if (!cluster_offset) { return -1; } index_in_cluster = sector_num % extent->cluster_sectors; n = extent->cluster_sectors - index_in_cluster; if (n > nb_sectors) { n = nb_sectors; } if (bdrv_pwrite(bs->file, cluster_offset + index_in_cluster * 512, buf, n * 512) != n * 512) { return -1; } if (m_data.valid) { /* update L2 tables */ if (vmdk_L2update(extent, &m_data) == -1) { return -1; } } nb_sectors -= n; sector_num += n; buf += n * 512; // update CID on the first write every time the virtual disk is opened if (!s->cid_updated) { vmdk_write_cid(bs, time(NULL)); s->cid_updated = true; } } return 0; }
0
897
static inline void gen_evfsabs(DisasContext *ctx) { if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_APU); return; } #if defined(TARGET_PPC64) tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], ~0x8000000080000000LL); #else tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], ~0x80000000); tcg_gen_andi_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], ~0x80000000); #endif }
0
898
void usb_ep_reset(USBDevice *dev) { int ep; dev->ep_ctl.nr = 0; dev->ep_ctl.type = USB_ENDPOINT_XFER_CONTROL; dev->ep_ctl.ifnum = 0; dev->ep_ctl.dev = dev; dev->ep_ctl.pipeline = false; for (ep = 0; ep < USB_MAX_ENDPOINTS; ep++) { dev->ep_in[ep].nr = ep + 1; dev->ep_out[ep].nr = ep + 1; dev->ep_in[ep].pid = USB_TOKEN_IN; dev->ep_out[ep].pid = USB_TOKEN_OUT; dev->ep_in[ep].type = USB_ENDPOINT_XFER_INVALID; dev->ep_out[ep].type = USB_ENDPOINT_XFER_INVALID; dev->ep_in[ep].ifnum = 0; dev->ep_out[ep].ifnum = 0; dev->ep_in[ep].dev = dev; dev->ep_out[ep].dev = dev; dev->ep_in[ep].pipeline = false; dev->ep_out[ep].pipeline = false; } }
0
900
int qemu_v9fs_synth_mkdir(V9fsSynthNode *parent, int mode, const char *name, V9fsSynthNode **result) { int ret; V9fsSynthNode *node, *tmp; if (!v9fs_synth_fs) { return EAGAIN; } if (!name || (strlen(name) >= NAME_MAX)) { return EINVAL; } if (!parent) { parent = &v9fs_synth_root; } qemu_mutex_lock(&v9fs_synth_mutex); QLIST_FOREACH(tmp, &parent->child, sibling) { if (!strcmp(tmp->name, name)) { ret = EEXIST; goto err_out; } } /* Add the name */ node = v9fs_add_dir_node(parent, mode, name, NULL, v9fs_synth_node_count++); v9fs_add_dir_node(node, parent->attr->mode, "..", parent->attr, parent->attr->inode); v9fs_add_dir_node(node, node->attr->mode, ".", node->attr, node->attr->inode); *result = node; ret = 0; err_out: qemu_mutex_unlock(&v9fs_synth_mutex); return ret; }
0
901
static void megasas_handle_frame(MegasasState *s, uint64_t frame_addr, uint32_t frame_count) { uint8_t frame_status = MFI_STAT_INVALID_CMD; uint64_t frame_context; MegasasCmd *cmd; /* * Always read 64bit context, top bits will be * masked out if required in megasas_enqueue_frame() */ frame_context = megasas_frame_get_context(s, frame_addr); cmd = megasas_enqueue_frame(s, frame_addr, frame_context, frame_count); if (!cmd) { /* reply queue full */ trace_megasas_frame_busy(frame_addr); megasas_frame_set_scsi_status(s, frame_addr, BUSY); megasas_frame_set_cmd_status(s, frame_addr, MFI_STAT_SCSI_DONE_WITH_ERROR); megasas_complete_frame(s, frame_context); s->event_count++; return; } switch (cmd->frame->header.frame_cmd) { case MFI_CMD_INIT: frame_status = megasas_init_firmware(s, cmd); break; case MFI_CMD_DCMD: frame_status = megasas_handle_dcmd(s, cmd); break; case MFI_CMD_ABORT: frame_status = megasas_handle_abort(s, cmd); break; case MFI_CMD_PD_SCSI_IO: frame_status = megasas_handle_scsi(s, cmd, 0); break; case MFI_CMD_LD_SCSI_IO: frame_status = megasas_handle_scsi(s, cmd, 1); break; case MFI_CMD_LD_READ: case MFI_CMD_LD_WRITE: frame_status = megasas_handle_io(s, cmd); break; default: trace_megasas_unhandled_frame_cmd(cmd->index, cmd->frame->header.frame_cmd); s->event_count++; break; } if (frame_status != MFI_STAT_INVALID_STATUS) { if (cmd->frame) { cmd->frame->header.cmd_status = frame_status; } else { megasas_frame_set_cmd_status(s, frame_addr, frame_status); } megasas_unmap_frame(s, cmd); megasas_complete_frame(s, cmd->context); } }
0
902
static void intel_hda_mmio_writew(void *opaque, target_phys_addr_t addr, uint32_t val) { IntelHDAState *d = opaque; const IntelHDAReg *reg = intel_hda_reg_find(d, addr); intel_hda_reg_write(d, reg, val, 0xffff); }
0
903
void pcnet_common_cleanup(PCNetState *d) { d->nic = NULL; }
0
904
static void lan9118_16bit_mode_write(void *opaque, target_phys_addr_t offset, uint64_t val, unsigned size) { switch (size) { case 2: lan9118_writew(opaque, offset, (uint32_t)val); return; case 4: lan9118_writel(opaque, offset, val, size); return; } hw_error("lan9118_write: Bad size 0x%x\n", size); }
0
905
av_cold int ff_h264_decode_init(AVCodecContext *avctx) { H264Context *h = avctx->priv_data; int ret; ret = h264_init_context(avctx, h); if (ret < 0) return ret; memset(h->pps.scaling_matrix4, 16, 6 * 16 * sizeof(uint8_t)); memset(h->pps.scaling_matrix8, 16, 2 * 64 * sizeof(uint8_t)); /* set defaults */ // s->decode_mb = ff_h263_decode_mb; if (!avctx->has_b_frames) h->low_delay = 1; ff_h264_decode_init_vlc(); ff_init_cabac_states(); if (avctx->codec_id == AV_CODEC_ID_H264) { if (avctx->ticks_per_frame == 1) h->avctx->framerate.num *= 2; avctx->ticks_per_frame = 2; } if (avctx->extradata_size > 0 && avctx->extradata) { ret = ff_h264_decode_extradata(h); if (ret < 0) { ff_h264_free_context(h); return ret; } } if (h->sps.bitstream_restriction_flag && h->avctx->has_b_frames < h->sps.num_reorder_frames) { h->avctx->has_b_frames = h->sps.num_reorder_frames; h->low_delay = 0; } avctx->internal->allocate_progress = 1; if (h->enable_er) { av_log(avctx, AV_LOG_WARNING, "Error resilience is enabled. It is unsafe and unsupported and may crash. " "Use it at your own risk\n"); } return 0; }
0
907
static void stream_desc_load(struct Stream *s, hwaddr addr) { struct SDesc *d = &s->desc; int i; cpu_physical_memory_read(addr, (void *) d, sizeof *d); /* Convert from LE into host endianness. */ d->buffer_address = le64_to_cpu(d->buffer_address); d->nxtdesc = le64_to_cpu(d->nxtdesc); d->control = le32_to_cpu(d->control); d->status = le32_to_cpu(d->status); for (i = 0; i < ARRAY_SIZE(d->app); i++) { d->app[i] = le32_to_cpu(d->app[i]); } }
0
908
static CharDriverState *qemu_chr_open_win_file(HANDLE fd_out) { CharDriverState *chr; WinCharState *s; chr = g_malloc0(sizeof(CharDriverState)); s = g_malloc0(sizeof(WinCharState)); s->hcom = fd_out; chr->opaque = s; chr->chr_write = win_chr_write; return chr; }
0
909
void gdb_exit(CPUState *env, int code) { GDBState *s; char buf[4]; s = &gdbserver_state; if (gdbserver_fd < 0 || s->fd < 0) return; snprintf(buf, sizeof(buf), "W%02x", code); put_packet(s, buf); }
0
910
static int usb_uhci_piix4_initfn(PCIDevice *dev) { UHCIState *s = DO_UPCAST(UHCIState, dev, dev); uint8_t *pci_conf = s->dev.config; pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_INTEL); pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_INTEL_82371AB_2); return usb_uhci_common_initfn(s); }
0
911
static void pxa2xx_gpio_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { PXA2xxGPIOInfo *s = (PXA2xxGPIOInfo *) opaque; int bank; if (offset >= 0x200) return; bank = pxa2xx_gpio_regs[offset].bank; switch (pxa2xx_gpio_regs[offset].reg) { case GPDR: /* GPIO Pin-Direction registers */ s->dir[bank] = value; pxa2xx_gpio_handler_update(s); break; case GPSR: /* GPIO Pin-Output Set registers */ s->olevel[bank] |= value; pxa2xx_gpio_handler_update(s); break; case GPCR: /* GPIO Pin-Output Clear registers */ s->olevel[bank] &= ~value; pxa2xx_gpio_handler_update(s); break; case GRER: /* GPIO Rising-Edge Detect Enable registers */ s->rising[bank] = value; break; case GFER: /* GPIO Falling-Edge Detect Enable registers */ s->falling[bank] = value; break; case GAFR_L: /* GPIO Alternate Function registers */ s->gafr[bank * 2] = value; break; case GAFR_U: /* GPIO Alternate Function registers */ s->gafr[bank * 2 + 1] = value; break; case GEDR: /* GPIO Edge Detect Status registers */ s->status[bank] &= ~value; pxa2xx_gpio_irq_update(s); break; default: hw_error("%s: Bad offset " REG_FMT "\n", __FUNCTION__, offset); } }
0
912
int cpu_signal_handler(int host_signum, void *pinfo, void *puc) { siginfo_t *info = pinfo; ucontext_t *uc = puc; unsigned long ip; int is_write = 0; ip = uc->uc_mcontext.sc_ip; switch (host_signum) { case SIGILL: case SIGFPE: case SIGSEGV: case SIGBUS: case SIGTRAP: if (info->si_code && (info->si_segvflags & __ISR_VALID)) { /* ISR.W (write-access) is bit 33: */ is_write = (info->si_isr >> 33) & 1; } break; default: break; } return handle_cpu_signal(ip, (unsigned long)info->si_addr, is_write, (sigset_t *)&uc->uc_sigmask); }
0
915
void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev))); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); VirtIOBlock *vblk = VIRTIO_BLK(s->vdev); VirtQueue *vq; int r; if (s->started || s->disabled) { return; } if (s->starting) { return; } s->starting = true; vq = virtio_get_queue(s->vdev, 0); if (!vring_setup(&s->vring, s->vdev, 0)) { goto fail_vring; } /* Set up guest notifier (irq) */ r = k->set_guest_notifiers(qbus->parent, 1, true); if (r != 0) { fprintf(stderr, "virtio-blk failed to set guest notifier (%d), " "ensure -enable-kvm is set\n", r); goto fail_guest_notifiers; } s->guest_notifier = virtio_queue_get_guest_notifier(vq); /* Set up virtqueue notify */ r = k->set_host_notifier(qbus->parent, 0, true); if (r != 0) { fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r); goto fail_host_notifier; } s->host_notifier = *virtio_queue_get_host_notifier(vq); s->saved_complete_request = vblk->complete_request; vblk->complete_request = complete_request_vring; s->starting = false; s->started = true; trace_virtio_blk_data_plane_start(s); blk_set_aio_context(s->conf->conf.blk, s->ctx); /* Kick right away to begin processing requests already in vring */ event_notifier_set(virtio_queue_get_host_notifier(vq)); /* Get this show started by hooking up our callbacks */ aio_context_acquire(s->ctx); aio_set_event_notifier(s->ctx, &s->host_notifier, true, handle_notify); aio_context_release(s->ctx); return; fail_host_notifier: k->set_guest_notifiers(qbus->parent, 1, false); fail_guest_notifiers: vring_teardown(&s->vring, s->vdev, 0); s->disabled = true; fail_vring: s->starting = false; }
0
916
static void qdm2_decode_fft_packets (QDM2Context *q) { int i, j, min, max, value, type, unknown_flag; GetBitContext gb; if (q->sub_packet_list_B[0].packet == NULL) return; /* reset minimum indices for FFT coefficients */ q->fft_coefs_index = 0; for (i=0; i < 5; i++) q->fft_coefs_min_index[i] = -1; /* process subpackets ordered by type, largest type first */ for (i = 0, max = 256; i < q->sub_packets_B; i++) { QDM2SubPacket *packet; /* find subpacket with largest type less than max */ for (j = 0, min = 0, packet = NULL; j < q->sub_packets_B; j++) { value = q->sub_packet_list_B[j].packet->type; if (value > min && value < max) { min = value; packet = q->sub_packet_list_B[j].packet; } } max = min; /* check for errors (?) */ if (i == 0 && (packet->type < 16 || packet->type >= 48 || fft_subpackets[packet->type - 16])) return; /* decode FFT tones */ init_get_bits (&gb, packet->data, packet->size*8); if (packet->type >= 32 && packet->type < 48 && !fft_subpackets[packet->type - 16]) unknown_flag = 1; else unknown_flag = 0; type = packet->type; if ((type >= 17 && type < 24) || (type >= 33 && type < 40)) { int duration = q->sub_sampling + 5 - (type & 15); if (duration >= 0 && duration < 4) qdm2_fft_decode_tones(q, duration, &gb, unknown_flag); } else if (type == 31) { for (i=0; i < 4; i++) qdm2_fft_decode_tones(q, i, &gb, unknown_flag); } else if (type == 46) { for (i=0; i < 6; i++) q->fft_level_exp[i] = get_bits(&gb, 6); for (i=0; i < 4; i++) qdm2_fft_decode_tones(q, i, &gb, unknown_flag); } } // Loop on B packets /* calculate maximum indices for FFT coefficients */ for (i = 0, j = -1; i < 5; i++) if (q->fft_coefs_min_index[i] >= 0) { if (j >= 0) q->fft_coefs_max_index[j] = q->fft_coefs_min_index[i]; j = i; } if (j >= 0) q->fft_coefs_max_index[j] = q->fft_coefs_index; }
0
917
void avcodec_get_channel_layout_string(char *buf, int buf_size, int nb_channels, int64_t channel_layout) { int i; if (channel_layout==0) channel_layout = avcodec_guess_channel_layout(nb_channels, CODEC_ID_NONE, NULL); for (i=0; channel_layout_map[i].name; i++) if (nb_channels == channel_layout_map[i].nb_channels && channel_layout == channel_layout_map[i].layout) { snprintf(buf, buf_size, channel_layout_map[i].name); return; } snprintf(buf, buf_size, "%d channels", nb_channels); if (channel_layout) { int i,ch; av_strlcat(buf, " (", buf_size); for(i=0,ch=0; i<64; i++) { if ((channel_layout & (1L<<i))) { const char *name = get_channel_name(i); if (name) { if (ch>0) av_strlcat(buf, "|", buf_size); av_strlcat(buf, name, buf_size); } ch++; } } av_strlcat(buf, ")", buf_size); } }
0
918
int ff_h264_decode_ref_pic_list_reordering(H264Context *h, H264SliceContext *sl) { int list, index, pic_structure; print_short_term(h); print_long_term(h); for (list = 0; list < sl->list_count; list++) { memcpy(sl->ref_list[list], h->default_ref_list[list], sl->ref_count[list] * sizeof(sl->ref_list[0][0])); if (get_bits1(&sl->gb)) { // ref_pic_list_modification_flag_l[01] int pred = h->curr_pic_num; for (index = 0; ; index++) { unsigned int modification_of_pic_nums_idc = get_ue_golomb_31(&sl->gb); unsigned int pic_id; int i; H264Picture *ref = NULL; if (modification_of_pic_nums_idc == 3) break; if (index >= sl->ref_count[list]) { av_log(h->avctx, AV_LOG_ERROR, "reference count overflow\n"); return -1; } switch (modification_of_pic_nums_idc) { case 0: case 1: { const unsigned int abs_diff_pic_num = get_ue_golomb(&sl->gb) + 1; int frame_num; if (abs_diff_pic_num > h->max_pic_num) { av_log(h->avctx, AV_LOG_ERROR, "abs_diff_pic_num overflow\n"); return AVERROR_INVALIDDATA; } if (modification_of_pic_nums_idc == 0) pred -= abs_diff_pic_num; else pred += abs_diff_pic_num; pred &= h->max_pic_num - 1; frame_num = pic_num_extract(h, pred, &pic_structure); for (i = h->short_ref_count - 1; i >= 0; i--) { ref = h->short_ref[i]; assert(ref->reference); assert(!ref->long_ref); if (ref->frame_num == frame_num && (ref->reference & pic_structure)) break; } if (i >= 0) ref->pic_id = pred; break; } case 2: { int long_idx; pic_id = get_ue_golomb(&sl->gb); // long_term_pic_idx long_idx = pic_num_extract(h, pic_id, &pic_structure); if (long_idx > 31) { av_log(h->avctx, AV_LOG_ERROR, "long_term_pic_idx overflow\n"); return AVERROR_INVALIDDATA; } ref = h->long_ref[long_idx]; assert(!(ref && !ref->reference)); if (ref && (ref->reference & pic_structure)) { ref->pic_id = pic_id; assert(ref->long_ref); i = 0; } else { i = -1; } break; } default: av_log(h->avctx, AV_LOG_ERROR, "illegal modification_of_pic_nums_idc %u\n", modification_of_pic_nums_idc); return AVERROR_INVALIDDATA; } if (i < 0) { av_log(h->avctx, AV_LOG_ERROR, "reference picture missing during reorder\n"); memset(&sl->ref_list[list][index], 0, sizeof(sl->ref_list[0][0])); // FIXME } else { for (i = index; i + 1 < sl->ref_count[list]; i++) { if (sl->ref_list[list][i].parent && ref->long_ref == sl->ref_list[list][i].parent->long_ref && ref->pic_id == sl->ref_list[list][i].pic_id) break; } for (; i > index; i--) { sl->ref_list[list][i] = sl->ref_list[list][i - 1]; } ref_from_h264pic(&sl->ref_list[list][index], ref); if (FIELD_PICTURE(h)) { pic_as_field(&sl->ref_list[list][index], pic_structure); } } } } } for (list = 0; list < sl->list_count; list++) { for (index = 0; index < sl->ref_count[list]; index++) { if ( !sl->ref_list[list][index].parent || (!FIELD_PICTURE(h) && (sl->ref_list[list][index].reference&3) != 3)) { int i; av_log(h->avctx, AV_LOG_ERROR, "Missing reference picture, default is %d\n", h->default_ref_list[list][0].poc); for (i = 0; i < FF_ARRAY_ELEMS(h->last_pocs); i++) h->last_pocs[i] = INT_MIN; if (h->default_ref_list[list][0].parent && !(!FIELD_PICTURE(h) && (h->default_ref_list[list][0].reference&3) != 3)) sl->ref_list[list][index] = h->default_ref_list[list][0]; else return -1; } av_assert0(av_buffer_get_ref_count(sl->ref_list[list][index].parent->f->buf[0]) > 0); } } return 0; }
1
919
void FUNCC(ff_h264_chroma422_dc_dequant_idct)(int16_t *_block, int qmul){ const int stride= 16*2; const int xStride= 16; int i; int temp[8]; static const uint8_t x_offset[2]={0, 16}; dctcoef *block = (dctcoef*)_block; for(i=0; i<4; i++){ temp[2*i+0] = block[stride*i + xStride*0] + block[stride*i + xStride*1]; temp[2*i+1] = block[stride*i + xStride*0] - block[stride*i + xStride*1]; } for(i=0; i<2; i++){ const int offset= x_offset[i]; const int z0= temp[2*0+i] + temp[2*2+i]; const int z1= temp[2*0+i] - temp[2*2+i]; const int z2= temp[2*1+i] - temp[2*3+i]; const int z3= temp[2*1+i] + temp[2*3+i]; block[stride*0+offset]= ((z0 + z3)*qmul + 128) >> 8; block[stride*1+offset]= ((z1 + z2)*qmul + 128) >> 8; block[stride*2+offset]= ((z1 - z2)*qmul + 128) >> 8; block[stride*3+offset]= ((z0 - z3)*qmul + 128) >> 8; } }
1
920
static int curl_open(BlockDriverState *bs, const char *filename, int flags) { BDRVCURLState *s = bs->opaque; CURLState *state = NULL; double d; #define RA_OPTSTR ":readahead=" char *file; char *ra; const char *ra_val; int parse_state = 0; static int inited = 0; file = strdup(filename); s->readahead_size = READ_AHEAD_SIZE; /* Parse a trailing ":readahead=#:" param, if present. */ ra = file + strlen(file) - 1; while (ra >= file) { if (parse_state == 0) { if (*ra == ':') parse_state++; else break; } else if (parse_state == 1) { if (*ra > '9' || *ra < '0') { char *opt_start = ra - strlen(RA_OPTSTR) + 1; if (opt_start > file && strncmp(opt_start, RA_OPTSTR, strlen(RA_OPTSTR)) == 0) { ra_val = ra + 1; ra -= strlen(RA_OPTSTR) - 1; *ra = '\0'; s->readahead_size = atoi(ra_val); break; } else { break; } } } ra--; } if ((s->readahead_size & 0x1ff) != 0) { fprintf(stderr, "HTTP_READAHEAD_SIZE %zd is not a multiple of 512\n", s->readahead_size); goto out_noclean; } if (!inited) { curl_global_init(CURL_GLOBAL_ALL); inited = 1; } DPRINTF("CURL: Opening %s\n", file); s->url = file; state = curl_init_state(s); if (!state) goto out_noclean; // Get file size curl_easy_setopt(state->curl, CURLOPT_NOBODY, 1); curl_easy_setopt(state->curl, CURLOPT_WRITEFUNCTION, (void *)curl_size_cb); if (curl_easy_perform(state->curl)) goto out; curl_easy_getinfo(state->curl, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &d); curl_easy_setopt(state->curl, CURLOPT_WRITEFUNCTION, (void *)curl_read_cb); curl_easy_setopt(state->curl, CURLOPT_NOBODY, 0); if (d) s->len = (size_t)d; else if(!s->len) goto out; DPRINTF("CURL: Size = %lld\n", (long long)s->len); curl_clean_state(state); curl_easy_cleanup(state->curl); state->curl = NULL; // Now we know the file exists and its size, so let's // initialize the multi interface! s->multi = curl_multi_init(); curl_multi_setopt( s->multi, CURLMOPT_SOCKETDATA, s); curl_multi_setopt( s->multi, CURLMOPT_SOCKETFUNCTION, curl_sock_cb ); curl_multi_do(s); return 0; out: fprintf(stderr, "CURL: Error opening file: %s\n", state->errmsg); curl_easy_cleanup(state->curl); state->curl = NULL; out_noclean: qemu_free(file); return -EINVAL; }
1
922
static void cin_decode_rle(const unsigned char *src, int src_size, unsigned char *dst, int dst_size) { int len, code; unsigned char *dst_end = dst + dst_size; const unsigned char *src_end = src + src_size; while (src < src_end && dst < dst_end) { code = *src++; if (code & 0x80) { len = code - 0x7F; memset(dst, *src++, FFMIN(len, dst_end - dst)); } else { len = code + 1; memcpy(dst, src, FFMIN(len, dst_end - dst)); src += len; } dst += len; } }
1
923
static int64_t coroutine_fn iscsi_co_get_block_status(BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum, BlockDriverState **file) { IscsiLun *iscsilun = bs->opaque; struct scsi_get_lba_status *lbas = NULL; struct scsi_lba_status_descriptor *lbasd = NULL; struct IscsiTask iTask; uint64_t lba; int64_t ret; if (!is_sector_request_lun_aligned(sector_num, nb_sectors, iscsilun)) { ret = -EINVAL; goto out; } /* default to all sectors allocated */ ret = BDRV_BLOCK_DATA; ret |= (sector_num << BDRV_SECTOR_BITS) | BDRV_BLOCK_OFFSET_VALID; *pnum = nb_sectors; /* LUN does not support logical block provisioning */ if (!iscsilun->lbpme) { goto out; } lba = sector_qemu2lun(sector_num, iscsilun); iscsi_co_init_iscsitask(iscsilun, &iTask); qemu_mutex_lock(&iscsilun->mutex); retry: if (iscsi_get_lba_status_task(iscsilun->iscsi, iscsilun->lun, lba, 8 + 16, iscsi_co_generic_cb, &iTask) == NULL) { ret = -ENOMEM; goto out_unlock; } while (!iTask.complete) { iscsi_set_events(iscsilun); qemu_mutex_unlock(&iscsilun->mutex); qemu_coroutine_yield(); qemu_mutex_lock(&iscsilun->mutex); } if (iTask.do_retry) { if (iTask.task != NULL) { scsi_free_scsi_task(iTask.task); iTask.task = NULL; } iTask.complete = 0; goto retry; } if (iTask.status != SCSI_STATUS_GOOD) { /* in case the get_lba_status_callout fails (i.e. * because the device is busy or the cmd is not * supported) we pretend all blocks are allocated * for backwards compatibility */ error_report("iSCSI GET_LBA_STATUS failed at lba %" PRIu64 ": %s", lba, iTask.err_str); goto out_unlock; } lbas = scsi_datain_unmarshall(iTask.task); if (lbas == NULL) { ret = -EIO; goto out_unlock; } lbasd = &lbas->descriptors[0]; if (sector_qemu2lun(sector_num, iscsilun) != lbasd->lba) { ret = -EIO; goto out_unlock; } *pnum = sector_lun2qemu(lbasd->num_blocks, iscsilun); if (lbasd->provisioning == SCSI_PROVISIONING_TYPE_DEALLOCATED || lbasd->provisioning == SCSI_PROVISIONING_TYPE_ANCHORED) { ret &= ~BDRV_BLOCK_DATA; if (iscsilun->lbprz) { ret |= BDRV_BLOCK_ZERO; } } if (ret & BDRV_BLOCK_ZERO) { iscsi_allocmap_set_unallocated(iscsilun, sector_num, *pnum); } else { iscsi_allocmap_set_allocated(iscsilun, sector_num, *pnum); } if (*pnum > nb_sectors) { *pnum = nb_sectors; } out_unlock: qemu_mutex_unlock(&iscsilun->mutex); g_free(iTask.err_str); out: if (iTask.task != NULL) { scsi_free_scsi_task(iTask.task); } if (ret > 0 && ret & BDRV_BLOCK_OFFSET_VALID) { *file = bs; } return ret; }
0
925
static int dvvideo_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { uint8_t *buf = avpkt->data; int buf_size = avpkt->size; DVVideoContext *s = avctx->priv_data; const uint8_t *vsc_pack; int apt, is16_9, ret; const AVDVProfile *sys; sys = av_dv_frame_profile(s->sys, buf, buf_size); if (!sys || buf_size < sys->frame_size) { av_log(avctx, AV_LOG_ERROR, "could not find dv frame profile\n"); return -1; /* NOTE: we only accept several full frames */ } if (sys != s->sys) { ret = ff_dv_init_dynamic_tables(s, sys); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "Error initializing the work tables.\n"); return ret; } s->sys = sys; } s->frame = data; s->frame->key_frame = 1; s->frame->pict_type = AV_PICTURE_TYPE_I; avctx->pix_fmt = s->sys->pix_fmt; avctx->framerate = av_inv_q(s->sys->time_base); ret = ff_set_dimensions(avctx, s->sys->width, s->sys->height); if (ret < 0) return ret; /* Determine the codec's sample_aspect ratio from the packet */ vsc_pack = buf + 80 * 5 + 48 + 5; if (*vsc_pack == dv_video_control) { apt = buf[4] & 0x07; is16_9 = (vsc_pack && ((vsc_pack[2] & 0x07) == 0x02 || (!apt && (vsc_pack[2] & 0x07) == 0x07))); ff_set_sar(avctx, s->sys->sar[is16_9]); } if (ff_get_buffer(avctx, s->frame, 0) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } s->frame->interlaced_frame = 1; s->frame->top_field_first = 0; s->buf = buf; avctx->execute(avctx, dv_decode_video_segment, s->work_chunks, NULL, dv_work_pool_size(s->sys), sizeof(DVwork_chunk)); emms_c(); /* return image */ *got_frame = 1; return s->sys->frame_size; }
1
926
static int inc_refcounts(BlockDriverState *bs, uint16_t *refcount_table, int refcount_table_size, int64_t offset, int64_t size) { BDRVQcowState *s = bs->opaque; int64_t start, last, cluster_offset; int k; int errors = 0; if (size <= 0) return 0; start = offset & ~(s->cluster_size - 1); last = (offset + size - 1) & ~(s->cluster_size - 1); for(cluster_offset = start; cluster_offset <= last; cluster_offset += s->cluster_size) { k = cluster_offset >> s->cluster_bits; if (k < 0 || k >= refcount_table_size) { fprintf(stderr, "ERROR: invalid cluster offset=0x%" PRIx64 "\n", cluster_offset); errors++; } else { if (++refcount_table[k] == 0) { fprintf(stderr, "ERROR: overflow cluster offset=0x%" PRIx64 "\n", cluster_offset); errors++; } } } return errors; }
1
927
static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); if (dc->base.tb->cflags & CF_LAST_IO && dc->condjmp) { /* FIXME: This can theoretically happen with self-modifying code. */ cpu_abort(cpu, "IO on conditional branch instruction"); } /* At this stage dc->condjmp will only be set when the skipped instruction was a conditional branch or trap, and the PC has already been written. */ gen_set_condexec(dc); if (dc->base.is_jmp == DISAS_BX_EXCRET) { /* Exception return branches need some special case code at the * end of the TB, which is complex enough that it has to * handle the single-step vs not and the condition-failed * insn codepath itself. */ gen_bx_excret_final_code(dc); } else if (unlikely(is_singlestepping(dc))) { /* Unconditional and "condition passed" instruction codepath. */ switch (dc->base.is_jmp) { case DISAS_SWI: gen_ss_advance(dc); gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb), default_exception_el(dc)); break; case DISAS_HVC: gen_ss_advance(dc); gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2); break; case DISAS_SMC: gen_ss_advance(dc); gen_exception(EXCP_SMC, syn_aa32_smc(), 3); break; case DISAS_NEXT: case DISAS_TOO_MANY: case DISAS_UPDATE: gen_set_pc_im(dc, dc->pc); /* fall through */ default: /* FIXME: Single stepping a WFI insn will not halt the CPU. */ gen_singlestep_exception(dc); break; case DISAS_NORETURN: break; } } else { /* While branches must always occur at the end of an IT block, there are a few other things that can cause us to terminate the TB in the middle of an IT block: - Exception generating instructions (bkpt, swi, undefined). - Page boundaries. - Hardware watchpoints. Hardware breakpoints have already been handled and skip this code. */ switch(dc->base.is_jmp) { case DISAS_NEXT: case DISAS_TOO_MANY: gen_goto_tb(dc, 1, dc->pc); break; case DISAS_JUMP: gen_goto_ptr(); break; case DISAS_UPDATE: gen_set_pc_im(dc, dc->pc); /* fall through */ default: /* indicate that the hash table must be used to find the next TB */ tcg_gen_exit_tb(0); break; case DISAS_NORETURN: /* nothing more to generate */ break; case DISAS_WFI: gen_helper_wfi(cpu_env); /* The helper doesn't necessarily throw an exception, but we * must go back to the main loop to check for interrupts anyway. */ tcg_gen_exit_tb(0); break; case DISAS_WFE: gen_helper_wfe(cpu_env); break; case DISAS_YIELD: gen_helper_yield(cpu_env); break; case DISAS_SWI: gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb), default_exception_el(dc)); break; case DISAS_HVC: gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2); break; case DISAS_SMC: gen_exception(EXCP_SMC, syn_aa32_smc(), 3); break; } } if (dc->condjmp) { /* "Condition failed" instruction codepath for the branch/trap insn */ gen_set_label(dc->condlabel); gen_set_condexec(dc); if (unlikely(is_singlestepping(dc))) { gen_set_pc_im(dc, dc->pc); gen_singlestep_exception(dc); } else { gen_goto_tb(dc, 1, dc->pc); } } /* Functions above can change dc->pc, so re-align db->pc_next */ dc->base.pc_next = dc->pc; }
1
928
void pcmcia_info(Monitor *mon, const QDict *qdict) { struct pcmcia_socket_entry_s *iter; if (!pcmcia_sockets) monitor_printf(mon, "No PCMCIA sockets\n"); for (iter = pcmcia_sockets; iter; iter = iter->next) monitor_printf(mon, "%s: %s\n", iter->socket->slot_string, iter->socket->attached ? iter->socket->card_string : "Empty"); }
1
929
static void ahci_reg_init(AHCIState *s) { int i; s->control_regs.cap = (s->ports - 1) | (AHCI_NUM_COMMAND_SLOTS << 8) | (AHCI_SUPPORTED_SPEED_GEN1 << AHCI_SUPPORTED_SPEED) | HOST_CAP_NCQ | HOST_CAP_AHCI; s->control_regs.impl = (1 << s->ports) - 1; s->control_regs.version = AHCI_VERSION_1_0; for (i = 0; i < s->ports; i++) { s->dev[i].port_state = STATE_RUN; } }
1
930
static void virtio_crypto_get_config(VirtIODevice *vdev, uint8_t *config) { VirtIOCrypto *c = VIRTIO_CRYPTO(vdev); struct virtio_crypto_config crypto_cfg; /* * Virtio-crypto device conforms to VIRTIO 1.0 which is always LE, * so we can use LE accessors directly. */ stl_le_p(&crypto_cfg.status, c->status); stl_le_p(&crypto_cfg.max_dataqueues, c->max_queues); stl_le_p(&crypto_cfg.crypto_services, c->conf.crypto_services); stl_le_p(&crypto_cfg.cipher_algo_l, c->conf.cipher_algo_l); stl_le_p(&crypto_cfg.cipher_algo_h, c->conf.cipher_algo_h); stl_le_p(&crypto_cfg.hash_algo, c->conf.hash_algo); stl_le_p(&crypto_cfg.mac_algo_l, c->conf.mac_algo_l); stl_le_p(&crypto_cfg.mac_algo_h, c->conf.mac_algo_h); stl_le_p(&crypto_cfg.aead_algo, c->conf.aead_algo); stl_le_p(&crypto_cfg.max_cipher_key_len, c->conf.max_cipher_key_len); stl_le_p(&crypto_cfg.max_auth_key_len, c->conf.max_auth_key_len); stq_le_p(&crypto_cfg.max_size, c->conf.max_size); memcpy(config, &crypto_cfg, c->config_size); }
1
931
static int flac_probe(AVProbeData *p) { uint8_t *bufptr = p->buf; uint8_t *end = p->buf + p->buf_size; if(bufptr > end-4 || memcmp(bufptr, "fLaC", 4)) return 0; else return AVPROBE_SCORE_MAX/2; }
1
932
static int get_qcd(Jpeg2000DecoderContext *s, int n, Jpeg2000QuantStyle *q, uint8_t *properties) { Jpeg2000QuantStyle tmp; int compno, ret; if ((ret = get_qcx(s, n, &tmp)) < 0) return ret; for (compno = 0; compno < s->ncomponents; compno++) if (!(properties[compno] & HAD_QCC)) memcpy(q + compno, &tmp, sizeof(tmp)); return 0; }
1
933
void slirp_init(int restricted, struct in_addr vnetwork, struct in_addr vnetmask, struct in_addr vhost, const char *vhostname, const char *tftp_path, const char *bootfile, struct in_addr vdhcp_start, struct in_addr vnameserver) { #ifdef _WIN32 WSADATA Data; WSAStartup(MAKEWORD(2,0), &Data); atexit(slirp_cleanup); #endif link_up = 1; slirp_restrict = restricted; if_init(); ip_init(); /* Initialise mbufs *after* setting the MTU */ m_init(); /* set default addresses */ inet_aton("127.0.0.1", &loopback_addr); if (get_dns_addr(&dns_addr) < 0) { dns_addr = loopback_addr; fprintf (stderr, "Warning: No DNS servers found\n"); } vnetwork_addr = vnetwork; vnetwork_mask = vnetmask; vhost_addr = vhost; if (vhostname) { pstrcpy(slirp_hostname, sizeof(slirp_hostname), vhostname); } qemu_free(tftp_prefix); tftp_prefix = NULL; if (tftp_path) { tftp_prefix = qemu_strdup(tftp_path); } qemu_free(bootp_filename); bootp_filename = NULL; if (bootfile) { bootp_filename = qemu_strdup(bootfile); } vdhcp_startaddr = vdhcp_start; vnameserver_addr = vnameserver; getouraddr(); register_savevm("slirp", 0, 1, slirp_state_save, slirp_state_load, NULL); }
1
934
void virtio_net_set_config_size(VirtIONet *n, uint32_t host_features) { int i, config_size = 0; for (i = 0; feature_sizes[i].flags != 0; i++) { if (host_features & feature_sizes[i].flags) { config_size = MAX(feature_sizes[i].end, config_size); } } n->config_size = config_size; }
1
935
qemu_irq *openpic_init (PCIBus *bus, int *pmem_index, int nb_cpus, qemu_irq **irqs, qemu_irq irq_out) { openpic_t *opp; uint8_t *pci_conf; int i, m; /* XXX: for now, only one CPU is supported */ if (nb_cpus != 1) return NULL; if (bus) { opp = (openpic_t *)pci_register_device(bus, "OpenPIC", sizeof(openpic_t), -1, NULL, NULL); if (opp == NULL) return NULL; pci_conf = opp->pci_dev.config; pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_IBM); pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_IBM_OPENPIC2); pci_config_set_class(pci_conf, PCI_CLASS_SYSTEM_OTHER); // FIXME? pci_conf[PCI_HEADER_TYPE] = PCI_HEADER_TYPE_NORMAL; // header_type pci_conf[0x3d] = 0x00; // no interrupt pin /* Register I/O spaces */ pci_register_bar((PCIDevice *)opp, 0, 0x40000, PCI_BASE_ADDRESS_SPACE_MEMORY, &openpic_map); } else { opp = qemu_mallocz(sizeof(openpic_t)); } opp->mem_index = cpu_register_io_memory(openpic_read, openpic_write, opp); // isu_base &= 0xFFFC0000; opp->nb_cpus = nb_cpus; opp->max_irq = OPENPIC_MAX_IRQ; opp->irq_ipi0 = OPENPIC_IRQ_IPI0; opp->irq_tim0 = OPENPIC_IRQ_TIM0; /* Set IRQ types */ for (i = 0; i < OPENPIC_EXT_IRQ; i++) { opp->src[i].type = IRQ_EXTERNAL; } for (; i < OPENPIC_IRQ_TIM0; i++) { opp->src[i].type = IRQ_SPECIAL; } #if MAX_IPI > 0 m = OPENPIC_IRQ_IPI0; #else m = OPENPIC_IRQ_DBL0; #endif for (; i < m; i++) { opp->src[i].type = IRQ_TIMER; } for (; i < OPENPIC_MAX_IRQ; i++) { opp->src[i].type = IRQ_INTERNAL; } for (i = 0; i < nb_cpus; i++) opp->dst[i].irqs = irqs[i]; opp->irq_out = irq_out; opp->need_swap = 1; register_savevm("openpic", 0, 2, openpic_save, openpic_load, opp); qemu_register_reset(openpic_reset, opp); opp->irq_raise = openpic_irq_raise; opp->reset = openpic_reset; if (pmem_index) *pmem_index = opp->mem_index; return qemu_allocate_irqs(openpic_set_irq, opp, opp->max_irq); }
0
936
static int decode_fctl_chunk(AVCodecContext *avctx, PNGDecContext *s, uint32_t length) { uint32_t sequence_number; if (length != 26) return AVERROR_INVALIDDATA; if (!(s->state & PNG_IHDR)) { av_log(avctx, AV_LOG_ERROR, "fctl before IHDR\n"); return AVERROR_INVALIDDATA; } s->last_w = s->cur_w; s->last_h = s->cur_h; s->last_x_offset = s->x_offset; s->last_y_offset = s->y_offset; s->last_dispose_op = s->dispose_op; sequence_number = bytestream2_get_be32(&s->gb); s->cur_w = bytestream2_get_be32(&s->gb); s->cur_h = bytestream2_get_be32(&s->gb); s->x_offset = bytestream2_get_be32(&s->gb); s->y_offset = bytestream2_get_be32(&s->gb); bytestream2_skip(&s->gb, 4); /* delay_num (2), delay_den (2) */ s->dispose_op = bytestream2_get_byte(&s->gb); s->blend_op = bytestream2_get_byte(&s->gb); bytestream2_skip(&s->gb, 4); /* crc */ if (sequence_number == 0 && (s->cur_w != s->width || s->cur_h != s->height || s->x_offset != 0 || s->y_offset != 0) || s->cur_w <= 0 || s->cur_h <= 0 || s->x_offset < 0 || s->y_offset < 0 || s->cur_w > s->width - s->x_offset|| s->cur_h > s->height - s->y_offset) return AVERROR_INVALIDDATA; if (sequence_number == 0 && s->dispose_op == APNG_DISPOSE_OP_PREVIOUS) { // No previous frame to revert to for the first frame // Spec says to just treat it as a APNG_DISPOSE_OP_BACKGROUND s->dispose_op = APNG_DISPOSE_OP_BACKGROUND; } if (s->dispose_op == APNG_BLEND_OP_OVER && !s->has_trns && ( avctx->pix_fmt == AV_PIX_FMT_RGB24 || avctx->pix_fmt == AV_PIX_FMT_RGB48BE || avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8 || avctx->pix_fmt == AV_PIX_FMT_GRAY16BE || avctx->pix_fmt == AV_PIX_FMT_MONOBLACK )) { // APNG_DISPOSE_OP_OVER is the same as APNG_DISPOSE_OP_SOURCE when there is no alpha channel s->dispose_op = APNG_BLEND_OP_SOURCE; } return 0; }
1
938
int ff_h264_build_ref_list(H264Context *h, H264SliceContext *sl) { int list, index, pic_structure; print_short_term(h); print_long_term(h); h264_initialise_ref_list(h, sl); for (list = 0; list < sl->list_count; list++) { int pred = sl->curr_pic_num; for (index = 0; index < sl->nb_ref_modifications[list]; index++) { unsigned int modification_of_pic_nums_idc = sl->ref_modifications[list][index].op; unsigned int val = sl->ref_modifications[list][index].val; unsigned int pic_id; int i; H264Picture *ref = NULL; switch (modification_of_pic_nums_idc) { case 0: case 1: { const unsigned int abs_diff_pic_num = val + 1; int frame_num; if (abs_diff_pic_num > sl->max_pic_num) { av_log(h->avctx, AV_LOG_ERROR, "abs_diff_pic_num overflow\n"); return AVERROR_INVALIDDATA; } if (modification_of_pic_nums_idc == 0) pred -= abs_diff_pic_num; else pred += abs_diff_pic_num; pred &= sl->max_pic_num - 1; frame_num = pic_num_extract(h, pred, &pic_structure); for (i = h->short_ref_count - 1; i >= 0; i--) { ref = h->short_ref[i]; assert(ref->reference); assert(!ref->long_ref); if (ref->frame_num == frame_num && (ref->reference & pic_structure)) break; } if (i >= 0) ref->pic_id = pred; break; } case 2: { int long_idx; pic_id = val; // long_term_pic_idx long_idx = pic_num_extract(h, pic_id, &pic_structure); if (long_idx > 31U) { av_log(h->avctx, AV_LOG_ERROR, "long_term_pic_idx overflow\n"); return AVERROR_INVALIDDATA; } ref = h->long_ref[long_idx]; assert(!(ref && !ref->reference)); if (ref && (ref->reference & pic_structure)) { ref->pic_id = pic_id; assert(ref->long_ref); i = 0; } else { i = -1; } break; } default: av_assert1(0); } if (i < 0) { av_log(h->avctx, AV_LOG_ERROR, "reference picture missing during reorder\n"); memset(&sl->ref_list[list][index], 0, sizeof(sl->ref_list[0][0])); // FIXME } else { for (i = index; i + 1 < sl->ref_count[list]; i++) { if (sl->ref_list[list][i].parent && ref->long_ref == sl->ref_list[list][i].parent->long_ref && ref->pic_id == sl->ref_list[list][i].pic_id) break; } for (; i > index; i--) { sl->ref_list[list][i] = sl->ref_list[list][i - 1]; } ref_from_h264pic(&sl->ref_list[list][index], ref); if (FIELD_PICTURE(h)) { pic_as_field(&sl->ref_list[list][index], pic_structure); } } } } for (list = 0; list < sl->list_count; list++) { for (index = 0; index < sl->ref_count[list]; index++) { if ( !sl->ref_list[list][index].parent || (!FIELD_PICTURE(h) && (sl->ref_list[list][index].reference&3) != 3)) { int i; av_log(h->avctx, AV_LOG_ERROR, "Missing reference picture, default is %d\n", h->default_ref[list].poc); for (i = 0; i < FF_ARRAY_ELEMS(h->last_pocs); i++) h->last_pocs[i] = INT_MIN; if (h->default_ref[list].parent && !(!FIELD_PICTURE(h) && (h->default_ref[list].reference&3) != 3)) sl->ref_list[list][index] = h->default_ref[list]; else return -1; } av_assert0(av_buffer_get_ref_count(sl->ref_list[list][index].parent->f->buf[0]) > 0); } } if (FRAME_MBAFF(h)) h264_fill_mbaff_ref_list(sl); return 0; }
1
939
int check_hw_breakpoints(CPUX86State *env, int force_dr6_update) { target_ulong dr6; int reg, type; int hit_enabled = 0; dr6 = env->dr[6] & ~0xf; for (reg = 0; reg < DR7_MAX_BP; reg++) { type = hw_breakpoint_type(env->dr[7], reg); if ((type == 0 && env->dr[reg] == env->eip) || ((type & 1) && env->cpu_watchpoint[reg] && (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) { dr6 |= 1 << reg; if (hw_breakpoint_enabled(env->dr[7], reg)) { hit_enabled = 1; } } } if (hit_enabled || force_dr6_update) env->dr[6] = dr6; return hit_enabled; }
1
940
void ff_dv_offset_reset(DVDemuxContext *c, int64_t frame_offset) { c->frames= frame_offset; if (c->ach) c->abytes= av_rescale_q(c->frames, c->sys->time_base, (AVRational){8, c->ast[0]->codec->bit_rate}); c->audio_pkt[0].size = c->audio_pkt[1].size = 0; c->audio_pkt[2].size = c->audio_pkt[3].size = 0; }
1
942
static int process_ipmovie_chunk(IPMVEContext *s, ByteIOContext *pb, AVPacket *pkt) { unsigned char chunk_preamble[CHUNK_PREAMBLE_SIZE]; int chunk_type; int chunk_size; unsigned char opcode_preamble[OPCODE_PREAMBLE_SIZE]; unsigned char opcode_type; unsigned char opcode_version; int opcode_size; unsigned char scratch[1024]; int i, j; int first_color, last_color; int audio_flags; unsigned char r, g, b; /* see if there are any pending packets */ chunk_type = load_ipmovie_packet(s, pb, pkt); if ((chunk_type == CHUNK_VIDEO) && (chunk_type != CHUNK_DONE)) return chunk_type; /* read the next chunk, wherever the file happens to be pointing */ if (url_feof(pb)) return CHUNK_EOF; if (get_buffer(pb, chunk_preamble, CHUNK_PREAMBLE_SIZE) != CHUNK_PREAMBLE_SIZE) return CHUNK_BAD; chunk_size = AV_RL16(&chunk_preamble[0]); chunk_type = AV_RL16(&chunk_preamble[2]); debug_ipmovie("chunk type 0x%04X, 0x%04X bytes: ", chunk_type, chunk_size); switch (chunk_type) { case CHUNK_INIT_AUDIO: debug_ipmovie("initialize audio\n"); break; case CHUNK_AUDIO_ONLY: debug_ipmovie("audio only\n"); break; case CHUNK_INIT_VIDEO: debug_ipmovie("initialize video\n"); break; case CHUNK_VIDEO: debug_ipmovie("video (and audio)\n"); break; case CHUNK_SHUTDOWN: debug_ipmovie("shutdown\n"); break; case CHUNK_END: debug_ipmovie("end\n"); break; default: debug_ipmovie("invalid chunk\n"); chunk_type = CHUNK_BAD; break; } while ((chunk_size > 0) && (chunk_type != CHUNK_BAD)) { /* read the next chunk, wherever the file happens to be pointing */ if (url_feof(pb)) { chunk_type = CHUNK_EOF; break; } if (get_buffer(pb, opcode_preamble, CHUNK_PREAMBLE_SIZE) != CHUNK_PREAMBLE_SIZE) { chunk_type = CHUNK_BAD; break; } opcode_size = AV_RL16(&opcode_preamble[0]); opcode_type = opcode_preamble[2]; opcode_version = opcode_preamble[3]; chunk_size -= OPCODE_PREAMBLE_SIZE; chunk_size -= opcode_size; if (chunk_size < 0) { debug_ipmovie("chunk_size countdown just went negative\n"); chunk_type = CHUNK_BAD; break; } debug_ipmovie(" opcode type %02X, version %d, 0x%04X bytes: ", opcode_type, opcode_version, opcode_size); switch (opcode_type) { case OPCODE_END_OF_STREAM: debug_ipmovie("end of stream\n"); url_fseek(pb, opcode_size, SEEK_CUR); break; case OPCODE_END_OF_CHUNK: debug_ipmovie("end of chunk\n"); url_fseek(pb, opcode_size, SEEK_CUR); break; case OPCODE_CREATE_TIMER: debug_ipmovie("create timer\n"); if ((opcode_version > 0) || (opcode_size > 6)) { debug_ipmovie("bad create_timer opcode\n"); chunk_type = CHUNK_BAD; break; } if (get_buffer(pb, scratch, opcode_size) != opcode_size) { chunk_type = CHUNK_BAD; break; } s->fps = 1000000.0 / (AV_RL32(&scratch[0]) * AV_RL16(&scratch[4])); s->frame_pts_inc = 90000 / s->fps; debug_ipmovie(" %.2f frames/second (timer div = %d, subdiv = %d)\n", s->fps, AV_RL32(&scratch[0]), AV_RL16(&scratch[4])); break; case OPCODE_INIT_AUDIO_BUFFERS: debug_ipmovie("initialize audio buffers\n"); if ((opcode_version > 1) || (opcode_size > 10)) { debug_ipmovie("bad init_audio_buffers opcode\n"); chunk_type = CHUNK_BAD; break; } if (get_buffer(pb, scratch, opcode_size) != opcode_size) { chunk_type = CHUNK_BAD; break; } s->audio_sample_rate = AV_RL16(&scratch[4]); audio_flags = AV_RL16(&scratch[2]); /* bit 0 of the flags: 0 = mono, 1 = stereo */ s->audio_channels = (audio_flags & 1) + 1; /* bit 1 of the flags: 0 = 8 bit, 1 = 16 bit */ s->audio_bits = (((audio_flags >> 1) & 1) + 1) * 8; /* bit 2 indicates compressed audio in version 1 opcode */ if ((opcode_version == 1) && (audio_flags & 0x4)) s->audio_type = CODEC_ID_INTERPLAY_DPCM; else if (s->audio_bits == 16) s->audio_type = CODEC_ID_PCM_S16LE; else s->audio_type = CODEC_ID_PCM_U8; debug_ipmovie("audio: %d bits, %d Hz, %s, %s format\n", s->audio_bits, s->audio_sample_rate, (s->audio_channels == 2) ? "stereo" : "mono", (s->audio_type == CODEC_ID_INTERPLAY_DPCM) ? "Interplay audio" : "PCM"); break; case OPCODE_START_STOP_AUDIO: debug_ipmovie("start/stop audio\n"); url_fseek(pb, opcode_size, SEEK_CUR); break; case OPCODE_INIT_VIDEO_BUFFERS: debug_ipmovie("initialize video buffers\n"); if ((opcode_version > 2) || (opcode_size > 8)) { debug_ipmovie("bad init_video_buffers opcode\n"); chunk_type = CHUNK_BAD; break; } if (get_buffer(pb, scratch, opcode_size) != opcode_size) { chunk_type = CHUNK_BAD; break; } s->video_width = AV_RL16(&scratch[0]) * 8; s->video_height = AV_RL16(&scratch[2]) * 8; debug_ipmovie("video resolution: %d x %d\n", s->video_width, s->video_height); break; case OPCODE_UNKNOWN_06: case OPCODE_UNKNOWN_0E: case OPCODE_UNKNOWN_10: case OPCODE_UNKNOWN_12: case OPCODE_UNKNOWN_13: case OPCODE_UNKNOWN_14: case OPCODE_UNKNOWN_15: debug_ipmovie("unknown (but documented) opcode %02X\n", opcode_type); url_fseek(pb, opcode_size, SEEK_CUR); break; case OPCODE_SEND_BUFFER: debug_ipmovie("send buffer\n"); url_fseek(pb, opcode_size, SEEK_CUR); break; case OPCODE_AUDIO_FRAME: debug_ipmovie("audio frame\n"); /* log position and move on for now */ s->audio_chunk_offset = url_ftell(pb); s->audio_chunk_size = opcode_size; url_fseek(pb, opcode_size, SEEK_CUR); break; case OPCODE_SILENCE_FRAME: debug_ipmovie("silence frame\n"); url_fseek(pb, opcode_size, SEEK_CUR); break; case OPCODE_INIT_VIDEO_MODE: debug_ipmovie("initialize video mode\n"); url_fseek(pb, opcode_size, SEEK_CUR); break; case OPCODE_CREATE_GRADIENT: debug_ipmovie("create gradient\n"); url_fseek(pb, opcode_size, SEEK_CUR); break; case OPCODE_SET_PALETTE: debug_ipmovie("set palette\n"); /* check for the logical maximum palette size * (3 * 256 + 4 bytes) */ if (opcode_size > 0x304) { debug_ipmovie("demux_ipmovie: set_palette opcode too large\n"); chunk_type = CHUNK_BAD; break; } if (get_buffer(pb, scratch, opcode_size) != opcode_size) { chunk_type = CHUNK_BAD; break; } /* load the palette into internal data structure */ first_color = AV_RL16(&scratch[0]); last_color = first_color + AV_RL16(&scratch[2]) - 1; /* sanity check (since they are 16 bit values) */ if ((first_color > 0xFF) || (last_color > 0xFF)) { debug_ipmovie("demux_ipmovie: set_palette indices out of range (%d -> %d)\n", first_color, last_color); chunk_type = CHUNK_BAD; break; } j = 4; /* offset of first palette data */ for (i = first_color; i <= last_color; i++) { /* the palette is stored as a 6-bit VGA palette, thus each * component is shifted up to a 8-bit range */ r = scratch[j++] * 4; g = scratch[j++] * 4; b = scratch[j++] * 4; s->palette_control.palette[i] = (r << 16) | (g << 8) | (b); } /* indicate a palette change */ s->palette_control.palette_changed = 1; break; case OPCODE_SET_PALETTE_COMPRESSED: debug_ipmovie("set palette compressed\n"); url_fseek(pb, opcode_size, SEEK_CUR); break; case OPCODE_SET_DECODING_MAP: debug_ipmovie("set decoding map\n"); /* log position and move on for now */ s->decode_map_chunk_offset = url_ftell(pb); s->decode_map_chunk_size = opcode_size; url_fseek(pb, opcode_size, SEEK_CUR); break; case OPCODE_VIDEO_DATA: debug_ipmovie("set video data\n"); /* log position and move on for now */ s->video_chunk_offset = url_ftell(pb); s->video_chunk_size = opcode_size; url_fseek(pb, opcode_size, SEEK_CUR); break; default: debug_ipmovie("*** unknown opcode type\n"); chunk_type = CHUNK_BAD; break; } } /* make a note of where the stream is sitting */ s->next_chunk_offset = url_ftell(pb); /* dispatch the first of any pending packets */ if ((chunk_type == CHUNK_VIDEO) || (chunk_type == CHUNK_AUDIO_ONLY)) chunk_type = load_ipmovie_packet(s, pb, pkt); return chunk_type; }
1
943
static int save_xbzrle_page(RAMState *rs, uint8_t **current_data, ram_addr_t current_addr, RAMBlock *block, ram_addr_t offset, bool last_stage) { int encoded_len = 0, bytes_xbzrle; uint8_t *prev_cached_page; if (!cache_is_cached(XBZRLE.cache, current_addr, rs->bitmap_sync_count)) { rs->xbzrle_cache_miss++; if (!last_stage) { if (cache_insert(XBZRLE.cache, current_addr, *current_data, rs->bitmap_sync_count) == -1) { return -1; } else { /* update *current_data when the page has been inserted into cache */ *current_data = get_cached_data(XBZRLE.cache, current_addr); } } return -1; } prev_cached_page = get_cached_data(XBZRLE.cache, current_addr); /* save current buffer into memory */ memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE); /* XBZRLE encoding (if there is no overflow) */ encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE, XBZRLE.encoded_buf, TARGET_PAGE_SIZE); if (encoded_len == 0) { trace_save_xbzrle_page_skipping(); return 0; } else if (encoded_len == -1) { trace_save_xbzrle_page_overflow(); rs->xbzrle_overflows++; /* update data in the cache */ if (!last_stage) { memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE); *current_data = prev_cached_page; } return -1; } /* we need to update the data in the cache, in order to get the same data */ if (!last_stage) { memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE); } /* Send XBZRLE based compressed page */ bytes_xbzrle = save_page_header(rs, block, offset | RAM_SAVE_FLAG_XBZRLE); qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE); qemu_put_be16(rs->f, encoded_len); qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len); bytes_xbzrle += encoded_len + 1 + 2; rs->xbzrle_pages++; rs->xbzrle_bytes += bytes_xbzrle; rs->bytes_transferred += bytes_xbzrle; return 1; }
1
945
static void dump_qobject(fprintf_function func_fprintf, void *f, int comp_indent, QObject *obj) { switch (qobject_type(obj)) { case QTYPE_QINT: { QInt *value = qobject_to_qint(obj); func_fprintf(f, "%" PRId64, qint_get_int(value)); break; } case QTYPE_QSTRING: { QString *value = qobject_to_qstring(obj); func_fprintf(f, "%s", qstring_get_str(value)); break; } case QTYPE_QDICT: { QDict *value = qobject_to_qdict(obj); dump_qdict(func_fprintf, f, comp_indent, value); break; } case QTYPE_QLIST: { QList *value = qobject_to_qlist(obj); dump_qlist(func_fprintf, f, comp_indent, value); break; } case QTYPE_QFLOAT: { QFloat *value = qobject_to_qfloat(obj); func_fprintf(f, "%g", qfloat_get_double(value)); break; } case QTYPE_QBOOL: { QBool *value = qobject_to_qbool(obj); func_fprintf(f, "%s", qbool_get_int(value) ? "true" : "false"); break; } case QTYPE_QERROR: { QString *value = qerror_human((QError *)obj); func_fprintf(f, "%s", qstring_get_str(value)); break; } case QTYPE_NONE: break; case QTYPE_MAX: default: abort(); } }
1
947
int sws_getColorspaceDetails(SwsContext *c, int **inv_table, int *srcRange, int **table, int *dstRange, int *brightness, int *contrast, int *saturation) { if (isYUV(c->dstFormat) || isGray(c->dstFormat)) return -1; *inv_table = c->srcColorspaceTable; *table = c->dstColorspaceTable; *srcRange = c->srcRange; *dstRange = c->dstRange; *brightness= c->brightness; *contrast = c->contrast; *saturation= c->saturation; return 0; }
0
948
static int mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *data_size, UINT8 *buf, int buf_size) { MJpegDecodeContext *s = avctx->priv_data; UINT8 *buf_end, *buf_ptr; int i, start_code; AVPicture *picture = data; *data_size = 0; /* no supplementary picture */ if (buf_size == 0) return 0; buf_ptr = buf; buf_end = buf + buf_size; while (buf_ptr < buf_end) { /* find start next marker */ start_code = find_marker(&buf_ptr, buf_end); { /* EOF */ if (start_code < 0) { goto the_end; } else { dprintf("marker=%x avail_size_in_buf=%d\n", start_code, buf_end - buf_ptr); if ((buf_end - buf_ptr) > s->buffer_size) { av_free(s->buffer); s->buffer_size = buf_end-buf_ptr; s->buffer = av_malloc(s->buffer_size); dprintf("buffer too small, expanding to %d bytes\n", s->buffer_size); } /* unescape buffer of SOS */ if (start_code == SOS) { UINT8 *src = buf_ptr; UINT8 *dst = s->buffer; while (src<buf_end) { UINT8 x = *(src++); *(dst++) = x; if (x == 0xff) { while(*src == 0xff) src++; x = *(src++); if (x >= 0xd0 && x <= 0xd7) *(dst++) = x; else if (x) break; } } init_get_bits(&s->gb, s->buffer, dst - s->buffer); dprintf("escaping removed %d bytes\n", (buf_end - buf_ptr) - (dst - s->buffer)); } else init_get_bits(&s->gb, buf_ptr, buf_end - buf_ptr); s->start_code = start_code; /* process markers */ if (start_code >= 0xd0 && start_code <= 0xd7) { dprintf("restart marker: %d\n", start_code&0x0f); } else if (s->first_picture) { /* APP fields */ if (start_code >= 0xe0 && start_code <= 0xef) mjpeg_decode_app(s); /* Comment */ else if (start_code == COM) mjpeg_decode_com(s); } switch(start_code) { case SOI: s->restart_interval = 0; /* nothing to do on SOI */ break; case DQT: mjpeg_decode_dqt(s); break; case DHT: mjpeg_decode_dht(s); break; case SOF0: if (mjpeg_decode_sof0(s) < 0) return -1; break; case EOI: eoi_parser: { if (s->interlaced) { s->bottom_field ^= 1; /* if not bottom field, do not output image yet */ if (s->bottom_field) goto not_the_end; } for(i=0;i<3;i++) { picture->data[i] = s->current_picture[i]; picture->linesize[i] = (s->interlaced) ? s->linesize[i] >> 1 : s->linesize[i]; } *data_size = sizeof(AVPicture); avctx->height = s->height; if (s->interlaced) avctx->height *= 2; avctx->width = s->width; /* XXX: not complete test ! */ switch((s->h_count[0] << 4) | s->v_count[0]) { case 0x11: avctx->pix_fmt = PIX_FMT_YUV444P; break; case 0x21: avctx->pix_fmt = PIX_FMT_YUV422P; break; default: case 0x22: avctx->pix_fmt = PIX_FMT_YUV420P; break; } /* dummy quality */ /* XXX: infer it with matrix */ // avctx->quality = 3; goto the_end; } break; case SOS: mjpeg_decode_sos(s); /* buggy avid puts EOI every 10-20th frame */ /* if restart period is over process EOI */ if ((s->buggy_avid && !s->interlaced) || s->restart_interval) goto eoi_parser; break; case DRI: mjpeg_decode_dri(s); break; case SOF1: case SOF2: case SOF3: case SOF5: case SOF6: case SOF7: case SOF9: case SOF10: case SOF11: case SOF13: case SOF14: case SOF15: case JPG: printf("mjpeg: unsupported coding type (%x)\n", start_code); break; // default: // printf("mjpeg: unsupported marker (%x)\n", start_code); // break; } not_the_end: /* eof process start code */ buf_ptr += (get_bits_count(&s->gb)+7)/8; dprintf("marker parser used %d bytes (%d bits)\n", (get_bits_count(&s->gb)+7)/8, get_bits_count(&s->gb)); } } } the_end: dprintf("mjpeg decode frame unused %d bytes\n", buf_end - buf_ptr); // return buf_end - buf_ptr; return buf_ptr - buf; }
0
949
static int draw_text(AVFilterContext *ctx, AVFrame *frame, int width, int height) { DrawTextContext *s = ctx->priv; AVFilterLink *inlink = ctx->inputs[0]; uint32_t code = 0, prev_code = 0; int x = 0, y = 0, i = 0, ret; int max_text_line_w = 0, len; int box_w, box_h; char *text; uint8_t *p; int y_min = 32000, y_max = -32000; int x_min = 32000, x_max = -32000; FT_Vector delta; Glyph *glyph = NULL, *prev_glyph = NULL; Glyph dummy = { 0 }; time_t now = time(0); struct tm ltime; AVBPrint *bp = &s->expanded_text; FFDrawColor fontcolor; FFDrawColor shadowcolor; FFDrawColor bordercolor; FFDrawColor boxcolor; av_bprint_clear(bp); if(s->basetime != AV_NOPTS_VALUE) now= frame->pts*av_q2d(ctx->inputs[0]->time_base) + s->basetime/1000000; switch (s->exp_mode) { case EXP_NONE: av_bprintf(bp, "%s", s->text); break; case EXP_NORMAL: if ((ret = expand_text(ctx, s->text, &s->expanded_text)) < 0) return ret; break; case EXP_STRFTIME: localtime_r(&now, &ltime); av_bprint_strftime(bp, s->text, &ltime); break; } if (s->tc_opt_string) { char tcbuf[AV_TIMECODE_STR_SIZE]; av_timecode_make_string(&s->tc, tcbuf, inlink->frame_count); av_bprint_clear(bp); av_bprintf(bp, "%s%s", s->text, tcbuf); } if (!av_bprint_is_complete(bp)) return AVERROR(ENOMEM); text = s->expanded_text.str; if ((len = s->expanded_text.len) > s->nb_positions) { if (!(s->positions = av_realloc(s->positions, len*sizeof(*s->positions)))) return AVERROR(ENOMEM); s->nb_positions = len; } if (s->fontcolor_expr[0]) { /* If expression is set, evaluate and replace the static value */ av_bprint_clear(&s->expanded_fontcolor); if ((ret = expand_text(ctx, s->fontcolor_expr, &s->expanded_fontcolor)) < 0) return ret; if (!av_bprint_is_complete(&s->expanded_fontcolor)) return AVERROR(ENOMEM); av_log(s, AV_LOG_DEBUG, "Evaluated fontcolor is '%s'\n", s->expanded_fontcolor.str); ret = av_parse_color(s->fontcolor.rgba, s->expanded_fontcolor.str, -1, s); if (ret) return ret; ff_draw_color(&s->dc, &s->fontcolor, s->fontcolor.rgba); } x = 0; y = 0; /* load and cache glyphs */ for (i = 0, p = text; *p; i++) { GET_UTF8(code, *p++, continue;); /* get glyph */ dummy.code = code; glyph = av_tree_find(s->glyphs, &dummy, glyph_cmp, NULL); if (!glyph) { load_glyph(ctx, &glyph, code); } y_min = FFMIN(glyph->bbox.yMin, y_min); y_max = FFMAX(glyph->bbox.yMax, y_max); x_min = FFMIN(glyph->bbox.xMin, x_min); x_max = FFMAX(glyph->bbox.xMax, x_max); } s->max_glyph_h = y_max - y_min; s->max_glyph_w = x_max - x_min; /* compute and save position for each glyph */ glyph = NULL; for (i = 0, p = text; *p; i++) { GET_UTF8(code, *p++, continue;); /* skip the \n in the sequence \r\n */ if (prev_code == '\r' && code == '\n') continue; prev_code = code; if (is_newline(code)) { max_text_line_w = FFMAX(max_text_line_w, x); y += s->max_glyph_h; x = 0; continue; } /* get glyph */ prev_glyph = glyph; dummy.code = code; glyph = av_tree_find(s->glyphs, &dummy, glyph_cmp, NULL); /* kerning */ if (s->use_kerning && prev_glyph && glyph->code) { FT_Get_Kerning(s->face, prev_glyph->code, glyph->code, ft_kerning_default, &delta); x += delta.x >> 6; } /* save position */ s->positions[i].x = x + glyph->bitmap_left; s->positions[i].y = y - glyph->bitmap_top + y_max; if (code == '\t') x = (x / s->tabsize + 1)*s->tabsize; else x += glyph->advance; } max_text_line_w = FFMAX(x, max_text_line_w); s->var_values[VAR_TW] = s->var_values[VAR_TEXT_W] = max_text_line_w; s->var_values[VAR_TH] = s->var_values[VAR_TEXT_H] = y + s->max_glyph_h; s->var_values[VAR_MAX_GLYPH_W] = s->max_glyph_w; s->var_values[VAR_MAX_GLYPH_H] = s->max_glyph_h; s->var_values[VAR_MAX_GLYPH_A] = s->var_values[VAR_ASCENT ] = y_max; s->var_values[VAR_MAX_GLYPH_D] = s->var_values[VAR_DESCENT] = y_min; s->var_values[VAR_LINE_H] = s->var_values[VAR_LH] = s->max_glyph_h; s->x = s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, &s->prng); s->y = s->var_values[VAR_Y] = av_expr_eval(s->y_pexpr, s->var_values, &s->prng); s->x = s->var_values[VAR_X] = av_expr_eval(s->x_pexpr, s->var_values, &s->prng); update_alpha(s); update_color_with_alpha(s, &fontcolor , s->fontcolor ); update_color_with_alpha(s, &shadowcolor, s->shadowcolor); update_color_with_alpha(s, &bordercolor, s->bordercolor); update_color_with_alpha(s, &boxcolor , s->boxcolor ); box_w = FFMIN(width - 1 , max_text_line_w); box_h = FFMIN(height - 1, y + s->max_glyph_h); /* draw box */ if (s->draw_box) ff_blend_rectangle(&s->dc, &boxcolor, frame->data, frame->linesize, width, height, s->x - s->boxborderw, s->y - s->boxborderw, box_w + s->boxborderw * 2, box_h + s->boxborderw * 2); if (s->shadowx || s->shadowy) { if ((ret = draw_glyphs(s, frame, width, height, &shadowcolor, s->shadowx, s->shadowy, 0)) < 0) return ret; } if (s->borderw) { if ((ret = draw_glyphs(s, frame, width, height, &bordercolor, 0, 0, s->borderw)) < 0) return ret; } if ((ret = draw_glyphs(s, frame, width, height, &fontcolor, 0, 0, 0)) < 0) return ret; return 0; }
0
950
static int planarCopyWrapper(SwsContext *c, const uint8_t *src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t *dst[], int dstStride[]) { const AVPixFmtDescriptor *desc_src = av_pix_fmt_desc_get(c->srcFormat); const AVPixFmtDescriptor *desc_dst = av_pix_fmt_desc_get(c->dstFormat); int plane, i, j; for (plane = 0; plane < 4; plane++) { int length = (plane == 0 || plane == 3) ? c->srcW : -((-c->srcW ) >> c->chrDstHSubSample); int y = (plane == 0 || plane == 3) ? srcSliceY: -((-srcSliceY) >> c->chrDstVSubSample); int height = (plane == 0 || plane == 3) ? srcSliceH: -((-srcSliceH) >> c->chrDstVSubSample); const uint8_t *srcPtr = src[plane]; uint8_t *dstPtr = dst[plane] + dstStride[plane] * y; if (!dst[plane]) continue; // ignore palette for GRAY8 if (plane == 1 && !dst[2]) continue; if (!src[plane] || (plane == 1 && !src[2])) { int val = (plane == 3) ? 255 : 128; if (is16BPS(c->dstFormat)) length *= 2; if (is9_OR_10BPS(c->dstFormat)) { fill_plane9or10(dst[plane], dstStride[plane], length, height, y, val, desc_dst->comp[plane].depth_minus1 + 1, isBE(c->dstFormat)); } else fillPlane(dst[plane], dstStride[plane], length, height, y, val); } else { if (is9_OR_10BPS(c->srcFormat)) { const int src_depth = desc_src->comp[plane].depth_minus1 + 1; const int dst_depth = desc_dst->comp[plane].depth_minus1 + 1; const uint16_t *srcPtr2 = (const uint16_t *) srcPtr; if (is16BPS(c->dstFormat)) { uint16_t *dstPtr2 = (uint16_t *) dstPtr; #define COPY9_OR_10TO16(rfunc, wfunc) \ for (i = 0; i < height; i++) { \ for (j = 0; j < length; j++) { \ int srcpx = rfunc(&srcPtr2[j]); \ wfunc(&dstPtr2[j], (srcpx << (16 - src_depth)) | (srcpx >> (2 * src_depth - 16))); \ } \ dstPtr2 += dstStride[plane] / 2; \ srcPtr2 += srcStride[plane] / 2; \ } if (isBE(c->dstFormat)) { if (isBE(c->srcFormat)) { COPY9_OR_10TO16(AV_RB16, AV_WB16); } else { COPY9_OR_10TO16(AV_RL16, AV_WB16); } } else { if (isBE(c->srcFormat)) { COPY9_OR_10TO16(AV_RB16, AV_WL16); } else { COPY9_OR_10TO16(AV_RL16, AV_WL16); } } } else if (is9_OR_10BPS(c->dstFormat)) { uint16_t *dstPtr2 = (uint16_t *) dstPtr; #define COPY9_OR_10TO9_OR_10(loop) \ for (i = 0; i < height; i++) { \ for (j = 0; j < length; j++) { \ loop; \ } \ dstPtr2 += dstStride[plane] / 2; \ srcPtr2 += srcStride[plane] / 2; \ } #define COPY9_OR_10TO9_OR_10_2(rfunc, wfunc) \ if (dst_depth > src_depth) { \ COPY9_OR_10TO9_OR_10(int srcpx = rfunc(&srcPtr2[j]); \ wfunc(&dstPtr2[j], (srcpx << 1) | (srcpx >> 9))); \ } else if (dst_depth < src_depth) { \ DITHER_COPY(dstPtr2, dstStride[plane] / 2, wfunc, \ srcPtr2, srcStride[plane] / 2, rfunc, \ dither_8x8_1, 1, clip9); \ } else { \ COPY9_OR_10TO9_OR_10(wfunc(&dstPtr2[j], rfunc(&srcPtr2[j]))); \ } if (isBE(c->dstFormat)) { if (isBE(c->srcFormat)) { COPY9_OR_10TO9_OR_10_2(AV_RB16, AV_WB16); } else { COPY9_OR_10TO9_OR_10_2(AV_RL16, AV_WB16); } } else { if (isBE(c->srcFormat)) { COPY9_OR_10TO9_OR_10_2(AV_RB16, AV_WL16); } else { COPY9_OR_10TO9_OR_10_2(AV_RL16, AV_WL16); } } } else { #define W8(a, b) { *(a) = (b); } #define COPY9_OR_10TO8(rfunc) \ if (src_depth == 9) { \ DITHER_COPY(dstPtr, dstStride[plane], W8, \ srcPtr2, srcStride[plane] / 2, rfunc, \ dither_8x8_1, 1, av_clip_uint8); \ } else { \ DITHER_COPY(dstPtr, dstStride[plane], W8, \ srcPtr2, srcStride[plane] / 2, rfunc, \ dither_8x8_3, 2, av_clip_uint8); \ } if (isBE(c->srcFormat)) { COPY9_OR_10TO8(AV_RB16); } else { COPY9_OR_10TO8(AV_RL16); } } } else if (is9_OR_10BPS(c->dstFormat)) { const int dst_depth = desc_dst->comp[plane].depth_minus1 + 1; uint16_t *dstPtr2 = (uint16_t *) dstPtr; if (is16BPS(c->srcFormat)) { const uint16_t *srcPtr2 = (const uint16_t *) srcPtr; #define COPY16TO9_OR_10(rfunc, wfunc) \ if (dst_depth == 9) { \ DITHER_COPY(dstPtr2, dstStride[plane] / 2, wfunc, \ srcPtr2, srcStride[plane] / 2, rfunc, \ ff_dither_8x8_128, 7, clip9); \ } else { \ DITHER_COPY(dstPtr2, dstStride[plane] / 2, wfunc, \ srcPtr2, srcStride[plane] / 2, rfunc, \ dither_8x8_64, 6, clip10); \ } if (isBE(c->dstFormat)) { if (isBE(c->srcFormat)) { COPY16TO9_OR_10(AV_RB16, AV_WB16); } else { COPY16TO9_OR_10(AV_RL16, AV_WB16); } } else { if (isBE(c->srcFormat)) { COPY16TO9_OR_10(AV_RB16, AV_WL16); } else { COPY16TO9_OR_10(AV_RL16, AV_WL16); } } } else /* 8bit */ { #define COPY8TO9_OR_10(wfunc) \ for (i = 0; i < height; i++) { \ for (j = 0; j < length; j++) { \ const int srcpx = srcPtr[j]; \ wfunc(&dstPtr2[j], (srcpx << (dst_depth - 8)) | (srcpx >> (16 - dst_depth))); \ } \ dstPtr2 += dstStride[plane] / 2; \ srcPtr += srcStride[plane]; \ } if (isBE(c->dstFormat)) { COPY8TO9_OR_10(AV_WB16); } else { COPY8TO9_OR_10(AV_WL16); } } } else if (is16BPS(c->srcFormat) && !is16BPS(c->dstFormat)) { const uint16_t *srcPtr2 = (const uint16_t *) srcPtr; #define COPY16TO8(rfunc) \ DITHER_COPY(dstPtr, dstStride[plane], W8, \ srcPtr2, srcStride[plane] / 2, rfunc, \ dither_8x8_256, 8, av_clip_uint8); if (isBE(c->srcFormat)) { COPY16TO8(AV_RB16); } else { COPY16TO8(AV_RL16); } } else if (!is16BPS(c->srcFormat) && is16BPS(c->dstFormat)) { for (i = 0; i < height; i++) { for (j = 0; j < length; j++) { dstPtr[ j << 1 ] = srcPtr[j]; dstPtr[(j << 1) + 1] = srcPtr[j]; } srcPtr += srcStride[plane]; dstPtr += dstStride[plane]; } } else if (is16BPS(c->srcFormat) && is16BPS(c->dstFormat) && isBE(c->srcFormat) != isBE(c->dstFormat)) { for (i = 0; i < height; i++) { for (j = 0; j < length; j++) ((uint16_t *) dstPtr)[j] = av_bswap16(((const uint16_t *) srcPtr)[j]); srcPtr += srcStride[plane]; dstPtr += dstStride[plane]; } } else if (dstStride[plane] == srcStride[plane] && srcStride[plane] > 0 && srcStride[plane] == length) { memcpy(dst[plane] + dstStride[plane] * y, src[plane], height * dstStride[plane]); } else { if (is16BPS(c->srcFormat) && is16BPS(c->dstFormat)) length *= 2; else if (!desc_src->comp[0].depth_minus1) length >>= 3; // monowhite/black for (i = 0; i < height; i++) { memcpy(dstPtr, srcPtr, length); srcPtr += srcStride[plane]; dstPtr += dstStride[plane]; } } } } return srcSliceH; }
1
951
static uint64_t pci_read(void *opaque, hwaddr addr, unsigned int size) { AcpiPciHpState *s = opaque; uint32_t val = 0; int bsel = s->hotplug_select; if (bsel < 0 || bsel > ACPI_PCIHP_MAX_HOTPLUG_BUS) { return 0; } switch (addr) { case PCI_UP_BASE: val = s->acpi_pcihp_pci_status[bsel].up; if (!s->legacy_piix) { s->acpi_pcihp_pci_status[bsel].up = 0; } ACPI_PCIHP_DPRINTF("pci_up_read %" PRIu32 "\n", val); break; case PCI_DOWN_BASE: val = s->acpi_pcihp_pci_status[bsel].down; ACPI_PCIHP_DPRINTF("pci_down_read %" PRIu32 "\n", val); break; case PCI_EJ_BASE: /* No feature defined yet */ ACPI_PCIHP_DPRINTF("pci_features_read %" PRIu32 "\n", val); break; case PCI_RMV_BASE: val = s->acpi_pcihp_pci_status[bsel].hotplug_enable; ACPI_PCIHP_DPRINTF("pci_rmv_read %" PRIu32 "\n", val); break; case PCI_SEL_BASE: val = s->hotplug_select; ACPI_PCIHP_DPRINTF("pci_sel_read %" PRIu32 "\n", val); default: break; } return val; }
1
952
static void free_field_queue(PullupField *head, PullupField **last) { PullupField *f = head; while (f) { av_free(f->diffs); av_free(f->combs); av_free(f->vars); if (f == *last) { av_freep(last); break; } f = f->next; av_freep(&f->prev); }; }
1
953
static void notify_guest_bh(void *opaque) { VirtIOBlockDataPlane *s = opaque; unsigned nvqs = s->conf->num_queues; unsigned long bitmap[BITS_TO_LONGS(nvqs)]; unsigned j; memcpy(bitmap, s->batch_notify_vqs, sizeof(bitmap)); memset(s->batch_notify_vqs, 0, sizeof(bitmap)); for (j = 0; j < nvqs; j += BITS_PER_LONG) { unsigned long bits = bitmap[j]; while (bits != 0) { unsigned i = j + ctzl(bits); VirtQueue *vq = virtio_get_queue(s->vdev, i); if (virtio_should_notify(s->vdev, vq)) { event_notifier_set(virtio_queue_get_guest_notifier(vq)); } bits &= bits - 1; /* clear right-most bit */ } } }
1
954
static char *doubles2str(double *dp, int count, const char *sep) { int i; char *ap, *ap0; int component_len = 15 + strlen(sep); if (!sep) sep = ", "; ap = av_malloc(component_len * count); if (!ap) return NULL; ap0 = ap; ap[0] = '\0'; for (i = 0; i < count; i++) { unsigned l = snprintf(ap, component_len, "%f%s", dp[i], sep); if(l >= component_len) return NULL; ap += l; } ap0[strlen(ap0) - strlen(sep)] = '\0'; return ap0; }
1
955
static uint32_t arm_v7m_load_vector(ARMCPU *cpu) { CPUState *cs = CPU(cpu); CPUARMState *env = &cpu->env; MemTxResult result; hwaddr vec = env->v7m.vecbase + env->v7m.exception * 4; uint32_t addr; addr = address_space_ldl(cs->as, vec, MEMTXATTRS_UNSPECIFIED, &result); if (result != MEMTX_OK) { /* Architecturally this should cause a HardFault setting HSFR.VECTTBL, * which would then be immediately followed by our failing to load * the entry vector for that HardFault, which is a Lockup case. * Since we don't model Lockup, we just report this guest error * via cpu_abort(). */ cpu_abort(cs, "Failed to read from exception vector table " "entry %08x\n", (unsigned)vec); } return addr; }
1
956
static void htab_save_first_pass(QEMUFile *f, sPAPREnvironment *spapr, int64_t max_ns) { int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64; int index = spapr->htab_save_index; int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); assert(spapr->htab_first_pass); do { int chunkstart; /* Consume invalid HPTEs */ while ((index < htabslots) && !HPTE_VALID(HPTE(spapr->htab, index))) { index++; CLEAN_HPTE(HPTE(spapr->htab, index)); } /* Consume valid HPTEs */ chunkstart = index; while ((index < htabslots) && HPTE_VALID(HPTE(spapr->htab, index))) { index++; CLEAN_HPTE(HPTE(spapr->htab, index)); } if (index > chunkstart) { int n_valid = index - chunkstart; qemu_put_be32(f, chunkstart); qemu_put_be16(f, n_valid); qemu_put_be16(f, 0); qemu_put_buffer(f, HPTE(spapr->htab, chunkstart), HASH_PTE_SIZE_64 * n_valid); if ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) { break; } } } while ((index < htabslots) && !qemu_file_rate_limit(f)); if (index >= htabslots) { assert(index == htabslots); index = 0; spapr->htab_first_pass = false; } spapr->htab_save_index = index; }
1
957
build_dsdt(GArray *table_data, BIOSLinker *linker, AcpiPmInfo *pm, AcpiMiscInfo *misc, Range *pci_hole, Range *pci_hole64, MachineState *machine) { CrsRangeEntry *entry; Aml *dsdt, *sb_scope, *scope, *dev, *method, *field, *pkg, *crs; CrsRangeSet crs_range_set; PCMachineState *pcms = PC_MACHINE(machine); PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(machine); uint32_t nr_mem = machine->ram_slots; int root_bus_limit = 0xFF; PCIBus *bus = NULL; int i; dsdt = init_aml_allocator(); /* Reserve space for header */ acpi_data_push(dsdt->buf, sizeof(AcpiTableHeader)); build_dbg_aml(dsdt); if (misc->is_piix4) { sb_scope = aml_scope("_SB"); dev = aml_device("PCI0"); aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A03"))); aml_append(dev, aml_name_decl("_ADR", aml_int(0))); aml_append(dev, aml_name_decl("_UID", aml_int(1))); aml_append(sb_scope, dev); aml_append(dsdt, sb_scope); build_hpet_aml(dsdt); build_piix4_pm(dsdt); build_piix4_isa_bridge(dsdt); build_isa_devices_aml(dsdt); build_piix4_pci_hotplug(dsdt); build_piix4_pci0_int(dsdt); } else { sb_scope = aml_scope("_SB"); dev = aml_device("PCI0"); aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08"))); aml_append(dev, aml_name_decl("_CID", aml_eisaid("PNP0A03"))); aml_append(dev, aml_name_decl("_ADR", aml_int(0))); aml_append(dev, aml_name_decl("_UID", aml_int(1))); aml_append(dev, build_q35_osc_method()); aml_append(sb_scope, dev); aml_append(dsdt, sb_scope); build_hpet_aml(dsdt); build_q35_isa_bridge(dsdt); build_isa_devices_aml(dsdt); build_q35_pci0_int(dsdt); } if (pcmc->legacy_cpu_hotplug) { build_legacy_cpu_hotplug_aml(dsdt, machine, pm->cpu_hp_io_base); } else { CPUHotplugFeatures opts = { .apci_1_compatible = true, .has_legacy_cphp = true }; build_cpus_aml(dsdt, machine, opts, pm->cpu_hp_io_base, "\\_SB.PCI0", "\\_GPE._E02"); } build_memory_hotplug_aml(dsdt, nr_mem, "\\_SB.PCI0", "\\_GPE._E03"); scope = aml_scope("_GPE"); { aml_append(scope, aml_name_decl("_HID", aml_string("ACPI0006"))); if (misc->is_piix4) { method = aml_method("_E01", 0, AML_NOTSERIALIZED); aml_append(method, aml_acquire(aml_name("\\_SB.PCI0.BLCK"), 0xFFFF)); aml_append(method, aml_call0("\\_SB.PCI0.PCNT")); aml_append(method, aml_release(aml_name("\\_SB.PCI0.BLCK"))); aml_append(scope, method); } if (pcms->acpi_nvdimm_state.is_enabled) { method = aml_method("_E04", 0, AML_NOTSERIALIZED); aml_append(method, aml_notify(aml_name("\\_SB.NVDR"), aml_int(0x80))); aml_append(scope, method); } } aml_append(dsdt, scope); crs_range_set_init(&crs_range_set); bus = PC_MACHINE(machine)->bus; if (bus) { QLIST_FOREACH(bus, &bus->child, sibling) { uint8_t bus_num = pci_bus_num(bus); uint8_t numa_node = pci_bus_numa_node(bus); /* look only for expander root buses */ if (!pci_bus_is_root(bus)) { continue; } if (bus_num < root_bus_limit) { root_bus_limit = bus_num - 1; } scope = aml_scope("\\_SB"); dev = aml_device("PC%.02X", bus_num); aml_append(dev, aml_name_decl("_UID", aml_int(bus_num))); aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A03"))); aml_append(dev, aml_name_decl("_BBN", aml_int(bus_num))); if (pci_bus_is_express(bus)) { aml_append(dev, build_q35_osc_method()); } if (numa_node != NUMA_NODE_UNASSIGNED) { aml_append(dev, aml_name_decl("_PXM", aml_int(numa_node))); } aml_append(dev, build_prt(false)); crs = build_crs(PCI_HOST_BRIDGE(BUS(bus)->parent), &crs_range_set); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); aml_append(dsdt, scope); } } scope = aml_scope("\\_SB.PCI0"); /* build PCI0._CRS */ crs = aml_resource_template(); aml_append(crs, aml_word_bus_number(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, 0x0000, 0x0, root_bus_limit, 0x0000, root_bus_limit + 1)); aml_append(crs, aml_io(AML_DECODE16, 0x0CF8, 0x0CF8, 0x01, 0x08)); aml_append(crs, aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, AML_ENTIRE_RANGE, 0x0000, 0x0000, 0x0CF7, 0x0000, 0x0CF8)); crs_replace_with_free_ranges(crs_range_set.io_ranges, 0x0D00, 0xFFFF); for (i = 0; i < crs_range_set.io_ranges->len; i++) { entry = g_ptr_array_index(crs_range_set.io_ranges, i); aml_append(crs, aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, AML_ENTIRE_RANGE, 0x0000, entry->base, entry->limit, 0x0000, entry->limit - entry->base + 1)); } aml_append(crs, aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_CACHEABLE, AML_READ_WRITE, 0, 0x000A0000, 0x000BFFFF, 0, 0x00020000)); crs_replace_with_free_ranges(crs_range_set.mem_ranges, range_lob(pci_hole), range_upb(pci_hole)); for (i = 0; i < crs_range_set.mem_ranges->len; i++) { entry = g_ptr_array_index(crs_range_set.mem_ranges, i); aml_append(crs, aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_NON_CACHEABLE, AML_READ_WRITE, 0, entry->base, entry->limit, 0, entry->limit - entry->base + 1)); } if (!range_is_empty(pci_hole64)) { crs_replace_with_free_ranges(crs_range_set.mem_64bit_ranges, range_lob(pci_hole64), range_upb(pci_hole64)); for (i = 0; i < crs_range_set.mem_64bit_ranges->len; i++) { entry = g_ptr_array_index(crs_range_set.mem_64bit_ranges, i); aml_append(crs, aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_CACHEABLE, AML_READ_WRITE, 0, entry->base, entry->limit, 0, entry->limit - entry->base + 1)); } } if (misc->tpm_version != TPM_VERSION_UNSPEC) { aml_append(crs, aml_memory32_fixed(TPM_TIS_ADDR_BASE, TPM_TIS_ADDR_SIZE, AML_READ_WRITE)); } aml_append(scope, aml_name_decl("_CRS", crs)); /* reserve GPE0 block resources */ dev = aml_device("GPE0"); aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A06"))); aml_append(dev, aml_name_decl("_UID", aml_string("GPE0 resources"))); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, pm->gpe0_blk, pm->gpe0_blk, 1, pm->gpe0_blk_len) ); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); crs_range_set_free(&crs_range_set); /* reserve PCIHP resources */ if (pm->pcihp_io_len) { dev = aml_device("PHPR"); aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A06"))); aml_append(dev, aml_name_decl("_UID", aml_string("PCI Hotplug resources"))); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, pm->pcihp_io_base, pm->pcihp_io_base, 1, pm->pcihp_io_len) ); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); } aml_append(dsdt, scope); /* create S3_ / S4_ / S5_ packages if necessary */ scope = aml_scope("\\"); if (!pm->s3_disabled) { pkg = aml_package(4); aml_append(pkg, aml_int(1)); /* PM1a_CNT.SLP_TYP */ aml_append(pkg, aml_int(1)); /* PM1b_CNT.SLP_TYP, FIXME: not impl. */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(scope, aml_name_decl("_S3", pkg)); } if (!pm->s4_disabled) { pkg = aml_package(4); aml_append(pkg, aml_int(pm->s4_val)); /* PM1a_CNT.SLP_TYP */ /* PM1b_CNT.SLP_TYP, FIXME: not impl. */ aml_append(pkg, aml_int(pm->s4_val)); aml_append(pkg, aml_int(0)); /* reserved */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(scope, aml_name_decl("_S4", pkg)); } pkg = aml_package(4); aml_append(pkg, aml_int(0)); /* PM1a_CNT.SLP_TYP */ aml_append(pkg, aml_int(0)); /* PM1b_CNT.SLP_TYP not impl. */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(scope, aml_name_decl("_S5", pkg)); aml_append(dsdt, scope); /* create fw_cfg node, unconditionally */ { /* when using port i/o, the 8-bit data register *always* overlaps * with half of the 16-bit control register. Hence, the total size * of the i/o region used is FW_CFG_CTL_SIZE; when using DMA, the * DMA control register is located at FW_CFG_DMA_IO_BASE + 4 */ uint8_t io_size = object_property_get_bool(OBJECT(pcms->fw_cfg), "dma_enabled", NULL) ? ROUND_UP(FW_CFG_CTL_SIZE, 4) + sizeof(dma_addr_t) : FW_CFG_CTL_SIZE; scope = aml_scope("\\_SB.PCI0"); dev = aml_device("FWCF"); aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002"))); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, FW_CFG_IO_BASE, FW_CFG_IO_BASE, 0x01, io_size) ); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); aml_append(dsdt, scope); } if (misc->applesmc_io_base) { scope = aml_scope("\\_SB.PCI0.ISA"); dev = aml_device("SMC"); aml_append(dev, aml_name_decl("_HID", aml_eisaid("APP0001"))); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, misc->applesmc_io_base, misc->applesmc_io_base, 0x01, APPLESMC_MAX_DATA_LENGTH) ); aml_append(crs, aml_irq_no_flags(6)); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); aml_append(dsdt, scope); } if (misc->pvpanic_port) { scope = aml_scope("\\_SB.PCI0.ISA"); dev = aml_device("PEVT"); aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0001"))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, misc->pvpanic_port, misc->pvpanic_port, 1, 1) ); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(dev, aml_operation_region("PEOR", AML_SYSTEM_IO, aml_int(misc->pvpanic_port), 1)); field = aml_field("PEOR", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE); aml_append(field, aml_named_field("PEPT", 8)); aml_append(dev, field); /* device present, functioning, decoding, shown in UI */ aml_append(dev, aml_name_decl("_STA", aml_int(0xF))); method = aml_method("RDPT", 0, AML_NOTSERIALIZED); aml_append(method, aml_store(aml_name("PEPT"), aml_local(0))); aml_append(method, aml_return(aml_local(0))); aml_append(dev, method); method = aml_method("WRPT", 1, AML_NOTSERIALIZED); aml_append(method, aml_store(aml_arg(0), aml_name("PEPT"))); aml_append(dev, method); aml_append(scope, dev); aml_append(dsdt, scope); } sb_scope = aml_scope("\\_SB"); { Object *pci_host; PCIBus *bus = NULL; pci_host = acpi_get_i386_pci_host(); if (pci_host) { bus = PCI_HOST_BRIDGE(pci_host)->bus; } if (bus) { Aml *scope = aml_scope("PCI0"); /* Scan all PCI buses. Generate tables to support hotplug. */ build_append_pci_bus_devices(scope, bus, pm->pcihp_bridge_en); if (misc->tpm_version != TPM_VERSION_UNSPEC) { dev = aml_device("ISA.TPM"); aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0C31"))); aml_append(dev, aml_name_decl("_STA", aml_int(0xF))); crs = aml_resource_template(); aml_append(crs, aml_memory32_fixed(TPM_TIS_ADDR_BASE, TPM_TIS_ADDR_SIZE, AML_READ_WRITE)); /* FIXME: TPM_TIS_IRQ=5 conflicts with PNP0C0F irqs, Rewrite to take IRQ from TPM device model and fix default IRQ value there to use some unused IRQ */ /* aml_append(crs, aml_irq_no_flags(TPM_TIS_IRQ)); */ aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); } aml_append(sb_scope, scope); } } aml_append(dsdt, sb_scope); /* copy AML table into ACPI tables blob and patch header there */ g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len); build_header(linker, table_data, (void *)(table_data->data + table_data->len - dsdt->buf->len), "DSDT", dsdt->buf->len, 1, NULL, NULL); free_aml_allocator(); }
1
958
int64_t cpu_get_clock(void) { int64_t ti; if (!timers_state.cpu_ticks_enabled) { return timers_state.cpu_clock_offset; } else { ti = get_clock(); return ti + timers_state.cpu_clock_offset; } }
1
959
static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, int16_t *block, int n) { int level, dc, diff, j, run; int component; RLTable *rl; uint8_t * scantable = s->intra_scantable.permutated; const uint16_t *quant_matrix; const int qscale = s->qscale; /* DC coefficient */ if (n < 4) { quant_matrix = s->intra_matrix; component = 0; } else { quant_matrix = s->chroma_intra_matrix; component = (n & 1) + 1; } diff = decode_dc(&s->gb, component); if (diff >= 0xffff) return -1; dc = s->last_dc[component]; dc += diff; s->last_dc[component] = dc; block[0] = dc << (3 - s->intra_dc_precision); if (s->intra_vlc_format) rl = &ff_rl_mpeg2; else rl = &ff_rl_mpeg1; { OPEN_READER(re, &s->gb); /* now quantify & encode AC coefficients */ for (;;) { UPDATE_CACHE(re, &s->gb); GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); if (level == 127) { break; } else if (level != 0) { scantable += run; j = *scantable; level = (level * qscale * quant_matrix[j]) >> 4; level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); LAST_SKIP_BITS(re, &s->gb, 1); } else { /* escape */ run = SHOW_UBITS(re, &s->gb, 6) + 1; LAST_SKIP_BITS(re, &s->gb, 6); UPDATE_CACHE(re, &s->gb); level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12); scantable += run; j = *scantable; if (level < 0) { level = (-level * qscale * quant_matrix[j]) >> 4; level = -level; } else { level = (level * qscale * quant_matrix[j]) >> 4; } } block[j] = level; } CLOSE_READER(re, &s->gb); } s->block_last_index[n] = scantable - s->intra_scantable.permutated; return 0; }
1
960
abi_long do_syscall(void *cpu_env, int num, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6, abi_long arg7, abi_long arg8) { CPUState *cpu = ENV_GET_CPU(cpu_env); abi_long ret; struct stat st; struct statfs stfs; void *p; #ifdef DEBUG gemu_log("syscall %d", num); #endif if(do_strace) print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); switch(num) { case TARGET_NR_exit: /* In old applications this may be used to implement _exit(2). However in threaded applictions it is used for thread termination, and _exit_group is used for application termination. Do thread termination if we have more then one thread. */ /* FIXME: This probably breaks if a signal arrives. We should probably be disabling signals. */ if (CPU_NEXT(first_cpu)) { TaskState *ts; cpu_list_lock(); /* Remove the CPU from the list. */ QTAILQ_REMOVE(&cpus, cpu, node); cpu_list_unlock(); ts = cpu->opaque; if (ts->child_tidptr) { put_user_u32(0, ts->child_tidptr); sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, NULL, NULL, 0); } thread_cpu = NULL; object_unref(OBJECT(cpu)); g_free(ts); pthread_exit(NULL); } #ifdef TARGET_GPROF _mcleanup(); #endif gdb_exit(cpu_env, arg1); _exit(arg1); ret = 0; /* avoid warning */ break; case TARGET_NR_read: if (arg3 == 0) else { if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) goto efault; ret = get_errno(read(arg1, p, arg3)); unlock_user(p, arg2, ret); } break; case TARGET_NR_write: if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) goto efault; ret = get_errno(write(arg1, p, arg3)); unlock_user(p, arg2, 0); break; case TARGET_NR_open: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(do_open(cpu_env, p, target_to_host_bitmask(arg2, fcntl_flags_tbl), arg3)); unlock_user(p, arg1, 0); break; #if defined(TARGET_NR_openat) && defined(__NR_openat) case TARGET_NR_openat: if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(sys_openat(arg1, path(p), target_to_host_bitmask(arg3, fcntl_flags_tbl), arg4)); unlock_user(p, arg2, 0); break; #endif case TARGET_NR_close: ret = get_errno(close(arg1)); break; case TARGET_NR_brk: ret = do_brk(arg1); break; case TARGET_NR_fork: ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0)); break; #ifdef TARGET_NR_waitpid case TARGET_NR_waitpid: { int status; ret = get_errno(waitpid(arg1, &status, arg3)); if (!is_error(ret) && arg2 && ret && put_user_s32(host_to_target_waitstatus(status), arg2)) goto efault; } break; #endif #ifdef TARGET_NR_waitid case TARGET_NR_waitid: { siginfo_t info; info.si_pid = 0; ret = get_errno(waitid(arg1, arg2, &info, arg4)); if (!is_error(ret) && arg3 && info.si_pid != 0) { if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) goto efault; host_to_target_siginfo(p, &info); unlock_user(p, arg3, sizeof(target_siginfo_t)); } } break; #endif #ifdef TARGET_NR_creat /* not on alpha */ case TARGET_NR_creat: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(creat(p, arg2)); unlock_user(p, arg1, 0); break; #endif case TARGET_NR_link: { void * p2; p = lock_user_string(arg1); p2 = lock_user_string(arg2); if (!p || !p2) ret = -TARGET_EFAULT; else ret = get_errno(link(p, p2)); unlock_user(p2, arg2, 0); unlock_user(p, arg1, 0); } break; #if defined(TARGET_NR_linkat) case TARGET_NR_linkat: { void * p2 = NULL; if (!arg2 || !arg4) goto efault; p = lock_user_string(arg2); p2 = lock_user_string(arg4); if (!p || !p2) ret = -TARGET_EFAULT; else ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); unlock_user(p, arg2, 0); unlock_user(p2, arg4, 0); } break; #endif case TARGET_NR_unlink: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(unlink(p)); unlock_user(p, arg1, 0); break; #if defined(TARGET_NR_unlinkat) case TARGET_NR_unlinkat: if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(unlinkat(arg1, p, arg3)); unlock_user(p, arg2, 0); break; #endif case TARGET_NR_execve: { char **argp, **envp; int argc, envc; abi_ulong gp; abi_ulong guest_argp; abi_ulong guest_envp; abi_ulong addr; char **q; int total_size = 0; argc = 0; guest_argp = arg2; for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { if (get_user_ual(addr, gp)) goto efault; if (!addr) break; argc++; } envc = 0; guest_envp = arg3; for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { if (get_user_ual(addr, gp)) goto efault; if (!addr) break; envc++; } argp = alloca((argc + 1) * sizeof(void *)); envp = alloca((envc + 1) * sizeof(void *)); for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) { if (get_user_ual(addr, gp)) goto execve_efault; if (!addr) break; if (!(*q = lock_user_string(addr))) goto execve_efault; total_size += strlen(*q) + 1; } *q = NULL; for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) { if (get_user_ual(addr, gp)) goto execve_efault; if (!addr) break; if (!(*q = lock_user_string(addr))) goto execve_efault; total_size += strlen(*q) + 1; } *q = NULL; /* This case will not be caught by the host's execve() if its page size is bigger than the target's. */ if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) { ret = -TARGET_E2BIG; goto execve_end; } if (!(p = lock_user_string(arg1))) goto execve_efault; ret = get_errno(execve(p, argp, envp)); unlock_user(p, arg1, 0); goto execve_end; execve_efault: ret = -TARGET_EFAULT; execve_end: for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) { if (get_user_ual(addr, gp) || !addr) break; unlock_user(*q, addr, 0); } for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) { if (get_user_ual(addr, gp) || !addr) break; unlock_user(*q, addr, 0); } } break; case TARGET_NR_chdir: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(chdir(p)); unlock_user(p, arg1, 0); break; #ifdef TARGET_NR_time case TARGET_NR_time: { time_t host_time; ret = get_errno(time(&host_time)); if (!is_error(ret) && arg1 && put_user_sal(host_time, arg1)) goto efault; } break; #endif case TARGET_NR_mknod: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(mknod(p, arg2, arg3)); unlock_user(p, arg1, 0); break; #if defined(TARGET_NR_mknodat) case TARGET_NR_mknodat: if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(mknodat(arg1, p, arg3, arg4)); unlock_user(p, arg2, 0); break; #endif case TARGET_NR_chmod: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(chmod(p, arg2)); unlock_user(p, arg1, 0); break; #ifdef TARGET_NR_break case TARGET_NR_break: goto unimplemented; #endif #ifdef TARGET_NR_oldstat case TARGET_NR_oldstat: goto unimplemented; #endif case TARGET_NR_lseek: ret = get_errno(lseek(arg1, arg2, arg3)); break; #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) /* Alpha specific */ case TARGET_NR_getxpid: ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); ret = get_errno(getpid()); break; #endif #ifdef TARGET_NR_getpid case TARGET_NR_getpid: ret = get_errno(getpid()); break; #endif case TARGET_NR_mount: { /* need to look at the data field */ void *p2, *p3; p = lock_user_string(arg1); p2 = lock_user_string(arg2); p3 = lock_user_string(arg3); if (!p || !p2 || !p3) ret = -TARGET_EFAULT; else { /* FIXME - arg5 should be locked, but it isn't clear how to * do that since it's not guaranteed to be a NULL-terminated * string. */ if ( ! arg5 ) ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL)); else ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5))); } unlock_user(p, arg1, 0); unlock_user(p2, arg2, 0); unlock_user(p3, arg3, 0); break; } #ifdef TARGET_NR_umount case TARGET_NR_umount: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(umount(p)); unlock_user(p, arg1, 0); break; #endif #ifdef TARGET_NR_stime /* not on alpha */ case TARGET_NR_stime: { time_t host_time; if (get_user_sal(host_time, arg1)) goto efault; ret = get_errno(stime(&host_time)); } break; #endif case TARGET_NR_ptrace: goto unimplemented; #ifdef TARGET_NR_alarm /* not on alpha */ case TARGET_NR_alarm: ret = alarm(arg1); break; #endif #ifdef TARGET_NR_oldfstat case TARGET_NR_oldfstat: goto unimplemented; #endif #ifdef TARGET_NR_pause /* not on alpha */ case TARGET_NR_pause: ret = get_errno(pause()); break; #endif #ifdef TARGET_NR_utime case TARGET_NR_utime: { struct utimbuf tbuf, *host_tbuf; struct target_utimbuf *target_tbuf; if (arg2) { if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) goto efault; tbuf.actime = tswapal(target_tbuf->actime); tbuf.modtime = tswapal(target_tbuf->modtime); unlock_user_struct(target_tbuf, arg2, 0); host_tbuf = &tbuf; } else { host_tbuf = NULL; } if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(utime(p, host_tbuf)); unlock_user(p, arg1, 0); } break; #endif case TARGET_NR_utimes: { struct timeval *tvp, tv[2]; if (arg2) { if (copy_from_user_timeval(&tv[0], arg2) || copy_from_user_timeval(&tv[1], arg2 + sizeof(struct target_timeval))) goto efault; tvp = tv; } else { tvp = NULL; } if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(utimes(p, tvp)); unlock_user(p, arg1, 0); } break; #if defined(TARGET_NR_futimesat) case TARGET_NR_futimesat: { struct timeval *tvp, tv[2]; if (arg3) { if (copy_from_user_timeval(&tv[0], arg3) || copy_from_user_timeval(&tv[1], arg3 + sizeof(struct target_timeval))) goto efault; tvp = tv; } else { tvp = NULL; } if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(futimesat(arg1, path(p), tvp)); unlock_user(p, arg2, 0); } break; #endif #ifdef TARGET_NR_stty case TARGET_NR_stty: goto unimplemented; #endif #ifdef TARGET_NR_gtty case TARGET_NR_gtty: goto unimplemented; #endif case TARGET_NR_access: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(access(path(p), arg2)); unlock_user(p, arg1, 0); break; #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) case TARGET_NR_faccessat: if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(faccessat(arg1, p, arg3, 0)); unlock_user(p, arg2, 0); break; #endif #ifdef TARGET_NR_nice /* not on alpha */ case TARGET_NR_nice: ret = get_errno(nice(arg1)); break; #endif #ifdef TARGET_NR_ftime case TARGET_NR_ftime: goto unimplemented; #endif case TARGET_NR_sync: sync(); break; case TARGET_NR_kill: ret = get_errno(kill(arg1, target_to_host_signal(arg2))); break; case TARGET_NR_rename: { void *p2; p = lock_user_string(arg1); p2 = lock_user_string(arg2); if (!p || !p2) ret = -TARGET_EFAULT; else ret = get_errno(rename(p, p2)); unlock_user(p2, arg2, 0); unlock_user(p, arg1, 0); } break; #if defined(TARGET_NR_renameat) case TARGET_NR_renameat: { void *p2; p = lock_user_string(arg2); p2 = lock_user_string(arg4); if (!p || !p2) ret = -TARGET_EFAULT; else ret = get_errno(renameat(arg1, p, arg3, p2)); unlock_user(p2, arg4, 0); unlock_user(p, arg2, 0); } break; #endif case TARGET_NR_mkdir: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(mkdir(p, arg2)); unlock_user(p, arg1, 0); break; #if defined(TARGET_NR_mkdirat) case TARGET_NR_mkdirat: if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(mkdirat(arg1, p, arg3)); unlock_user(p, arg2, 0); break; #endif case TARGET_NR_rmdir: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(rmdir(p)); unlock_user(p, arg1, 0); break; case TARGET_NR_dup: ret = get_errno(dup(arg1)); break; case TARGET_NR_pipe: ret = do_pipe(cpu_env, arg1, 0, 0); break; #ifdef TARGET_NR_pipe2 case TARGET_NR_pipe2: ret = do_pipe(cpu_env, arg1, target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); break; #endif case TARGET_NR_times: { struct target_tms *tmsp; struct tms tms; ret = get_errno(times(&tms)); if (arg1) { tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); if (!tmsp) goto efault; tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); } if (!is_error(ret)) ret = host_to_target_clock_t(ret); } break; #ifdef TARGET_NR_prof case TARGET_NR_prof: goto unimplemented; #endif #ifdef TARGET_NR_signal case TARGET_NR_signal: goto unimplemented; #endif case TARGET_NR_acct: if (arg1 == 0) { ret = get_errno(acct(NULL)); } else { if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(acct(path(p))); unlock_user(p, arg1, 0); } break; #ifdef TARGET_NR_umount2 case TARGET_NR_umount2: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(umount2(p, arg2)); unlock_user(p, arg1, 0); break; #endif #ifdef TARGET_NR_lock case TARGET_NR_lock: goto unimplemented; #endif case TARGET_NR_ioctl: ret = do_ioctl(arg1, arg2, arg3); break; case TARGET_NR_fcntl: ret = do_fcntl(arg1, arg2, arg3); break; #ifdef TARGET_NR_mpx case TARGET_NR_mpx: goto unimplemented; #endif case TARGET_NR_setpgid: ret = get_errno(setpgid(arg1, arg2)); break; #ifdef TARGET_NR_ulimit case TARGET_NR_ulimit: goto unimplemented; #endif #ifdef TARGET_NR_oldolduname case TARGET_NR_oldolduname: goto unimplemented; #endif case TARGET_NR_umask: ret = get_errno(umask(arg1)); break; case TARGET_NR_chroot: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(chroot(p)); unlock_user(p, arg1, 0); break; case TARGET_NR_ustat: goto unimplemented; case TARGET_NR_dup2: ret = get_errno(dup2(arg1, arg2)); break; #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) case TARGET_NR_dup3: ret = get_errno(dup3(arg1, arg2, arg3)); break; #endif #ifdef TARGET_NR_getppid /* not on alpha */ case TARGET_NR_getppid: ret = get_errno(getppid()); break; #endif case TARGET_NR_getpgrp: ret = get_errno(getpgrp()); break; case TARGET_NR_setsid: ret = get_errno(setsid()); break; #ifdef TARGET_NR_sigaction case TARGET_NR_sigaction: { #if defined(TARGET_ALPHA) struct target_sigaction act, oact, *pact = 0; struct target_old_sigaction *old_act; if (arg2) { if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) goto efault; act._sa_handler = old_act->_sa_handler; target_siginitset(&act.sa_mask, old_act->sa_mask); act.sa_flags = old_act->sa_flags; act.sa_restorer = 0; unlock_user_struct(old_act, arg2, 0); pact = &act; } ret = get_errno(do_sigaction(arg1, pact, &oact)); if (!is_error(ret) && arg3) { if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) goto efault; old_act->_sa_handler = oact._sa_handler; old_act->sa_mask = oact.sa_mask.sig[0]; old_act->sa_flags = oact.sa_flags; unlock_user_struct(old_act, arg3, 1); } #elif defined(TARGET_MIPS) struct target_sigaction act, oact, *pact, *old_act; if (arg2) { if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) goto efault; act._sa_handler = old_act->_sa_handler; target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); act.sa_flags = old_act->sa_flags; unlock_user_struct(old_act, arg2, 0); pact = &act; } else { pact = NULL; } ret = get_errno(do_sigaction(arg1, pact, &oact)); if (!is_error(ret) && arg3) { if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) goto efault; old_act->_sa_handler = oact._sa_handler; old_act->sa_flags = oact.sa_flags; old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; old_act->sa_mask.sig[1] = 0; old_act->sa_mask.sig[2] = 0; old_act->sa_mask.sig[3] = 0; unlock_user_struct(old_act, arg3, 1); } #else struct target_old_sigaction *old_act; struct target_sigaction act, oact, *pact; if (arg2) { if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) goto efault; act._sa_handler = old_act->_sa_handler; target_siginitset(&act.sa_mask, old_act->sa_mask); act.sa_flags = old_act->sa_flags; act.sa_restorer = old_act->sa_restorer; unlock_user_struct(old_act, arg2, 0); pact = &act; } else { pact = NULL; } ret = get_errno(do_sigaction(arg1, pact, &oact)); if (!is_error(ret) && arg3) { if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) goto efault; old_act->_sa_handler = oact._sa_handler; old_act->sa_mask = oact.sa_mask.sig[0]; old_act->sa_flags = oact.sa_flags; old_act->sa_restorer = oact.sa_restorer; unlock_user_struct(old_act, arg3, 1); } #endif } break; #endif case TARGET_NR_rt_sigaction: { #if defined(TARGET_ALPHA) struct target_sigaction act, oact, *pact = 0; struct target_rt_sigaction *rt_act; /* ??? arg4 == sizeof(sigset_t). */ if (arg2) { if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) goto efault; act._sa_handler = rt_act->_sa_handler; act.sa_mask = rt_act->sa_mask; act.sa_flags = rt_act->sa_flags; act.sa_restorer = arg5; unlock_user_struct(rt_act, arg2, 0); pact = &act; } ret = get_errno(do_sigaction(arg1, pact, &oact)); if (!is_error(ret) && arg3) { if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) goto efault; rt_act->_sa_handler = oact._sa_handler; rt_act->sa_mask = oact.sa_mask; rt_act->sa_flags = oact.sa_flags; unlock_user_struct(rt_act, arg3, 1); } #else struct target_sigaction *act; struct target_sigaction *oact; if (arg2) { if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) goto efault; } else act = NULL; if (arg3) { if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { ret = -TARGET_EFAULT; goto rt_sigaction_fail; } } else oact = NULL; ret = get_errno(do_sigaction(arg1, act, oact)); rt_sigaction_fail: if (act) unlock_user_struct(act, arg2, 0); if (oact) unlock_user_struct(oact, arg3, 1); #endif } break; #ifdef TARGET_NR_sgetmask /* not on alpha */ case TARGET_NR_sgetmask: { sigset_t cur_set; abi_ulong target_set; sigprocmask(0, NULL, &cur_set); host_to_target_old_sigset(&target_set, &cur_set); ret = target_set; } break; #endif #ifdef TARGET_NR_ssetmask /* not on alpha */ case TARGET_NR_ssetmask: { sigset_t set, oset, cur_set; abi_ulong target_set = arg1; sigprocmask(0, NULL, &cur_set); target_to_host_old_sigset(&set, &target_set); sigorset(&set, &set, &cur_set); sigprocmask(SIG_SETMASK, &set, &oset); host_to_target_old_sigset(&target_set, &oset); ret = target_set; } break; #endif #ifdef TARGET_NR_sigprocmask case TARGET_NR_sigprocmask: { #if defined(TARGET_ALPHA) sigset_t set, oldset; abi_ulong mask; int how; switch (arg1) { case TARGET_SIG_BLOCK: how = SIG_BLOCK; break; case TARGET_SIG_UNBLOCK: how = SIG_UNBLOCK; break; case TARGET_SIG_SETMASK: how = SIG_SETMASK; break; default: ret = -TARGET_EINVAL; goto fail; } mask = arg2; target_to_host_old_sigset(&set, &mask); ret = get_errno(sigprocmask(how, &set, &oldset)); if (!is_error(ret)) { host_to_target_old_sigset(&mask, &oldset); ret = mask; ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ } #else sigset_t set, oldset, *set_ptr; int how; if (arg2) { switch (arg1) { case TARGET_SIG_BLOCK: how = SIG_BLOCK; break; case TARGET_SIG_UNBLOCK: how = SIG_UNBLOCK; break; case TARGET_SIG_SETMASK: how = SIG_SETMASK; break; default: ret = -TARGET_EINVAL; goto fail; } if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) goto efault; target_to_host_old_sigset(&set, p); unlock_user(p, arg2, 0); set_ptr = &set; } else { how = 0; set_ptr = NULL; } ret = get_errno(sigprocmask(how, set_ptr, &oldset)); if (!is_error(ret) && arg3) { if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) goto efault; host_to_target_old_sigset(p, &oldset); unlock_user(p, arg3, sizeof(target_sigset_t)); } #endif } break; #endif case TARGET_NR_rt_sigprocmask: { int how = arg1; sigset_t set, oldset, *set_ptr; if (arg2) { switch(how) { case TARGET_SIG_BLOCK: how = SIG_BLOCK; break; case TARGET_SIG_UNBLOCK: how = SIG_UNBLOCK; break; case TARGET_SIG_SETMASK: how = SIG_SETMASK; break; default: ret = -TARGET_EINVAL; goto fail; } if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) goto efault; target_to_host_sigset(&set, p); unlock_user(p, arg2, 0); set_ptr = &set; } else { how = 0; set_ptr = NULL; } ret = get_errno(sigprocmask(how, set_ptr, &oldset)); if (!is_error(ret) && arg3) { if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) goto efault; host_to_target_sigset(p, &oldset); unlock_user(p, arg3, sizeof(target_sigset_t)); } } break; #ifdef TARGET_NR_sigpending case TARGET_NR_sigpending: { sigset_t set; ret = get_errno(sigpending(&set)); if (!is_error(ret)) { if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) goto efault; host_to_target_old_sigset(p, &set); unlock_user(p, arg1, sizeof(target_sigset_t)); } } break; #endif case TARGET_NR_rt_sigpending: { sigset_t set; ret = get_errno(sigpending(&set)); if (!is_error(ret)) { if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) goto efault; host_to_target_sigset(p, &set); unlock_user(p, arg1, sizeof(target_sigset_t)); } } break; #ifdef TARGET_NR_sigsuspend case TARGET_NR_sigsuspend: { sigset_t set; #if defined(TARGET_ALPHA) abi_ulong mask = arg1; target_to_host_old_sigset(&set, &mask); #else if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) goto efault; target_to_host_old_sigset(&set, p); unlock_user(p, arg1, 0); #endif ret = get_errno(sigsuspend(&set)); } break; #endif case TARGET_NR_rt_sigsuspend: { sigset_t set; if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) goto efault; target_to_host_sigset(&set, p); unlock_user(p, arg1, 0); ret = get_errno(sigsuspend(&set)); } break; case TARGET_NR_rt_sigtimedwait: { sigset_t set; struct timespec uts, *puts; siginfo_t uinfo; if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) goto efault; target_to_host_sigset(&set, p); unlock_user(p, arg1, 0); if (arg3) { puts = &uts; target_to_host_timespec(puts, arg3); } else { puts = NULL; } ret = get_errno(sigtimedwait(&set, &uinfo, puts)); if (!is_error(ret)) { if (arg2) { p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0); if (!p) { goto efault; } host_to_target_siginfo(p, &uinfo); unlock_user(p, arg2, sizeof(target_siginfo_t)); } ret = host_to_target_signal(ret); } } break; case TARGET_NR_rt_sigqueueinfo: { siginfo_t uinfo; if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1))) goto efault; target_to_host_siginfo(&uinfo, p); unlock_user(p, arg1, 0); ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); } break; #ifdef TARGET_NR_sigreturn case TARGET_NR_sigreturn: /* NOTE: ret is eax, so not transcoding must be done */ ret = do_sigreturn(cpu_env); break; #endif case TARGET_NR_rt_sigreturn: /* NOTE: ret is eax, so not transcoding must be done */ ret = do_rt_sigreturn(cpu_env); break; case TARGET_NR_sethostname: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(sethostname(p, arg2)); unlock_user(p, arg1, 0); break; case TARGET_NR_setrlimit: { int resource = target_to_host_resource(arg1); struct target_rlimit *target_rlim; struct rlimit rlim; if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) goto efault; rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); unlock_user_struct(target_rlim, arg2, 0); ret = get_errno(setrlimit(resource, &rlim)); } break; case TARGET_NR_getrlimit: { int resource = target_to_host_resource(arg1); struct target_rlimit *target_rlim; struct rlimit rlim; ret = get_errno(getrlimit(resource, &rlim)); if (!is_error(ret)) { if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) goto efault; target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); unlock_user_struct(target_rlim, arg2, 1); } } break; case TARGET_NR_getrusage: { struct rusage rusage; ret = get_errno(getrusage(arg1, &rusage)); if (!is_error(ret)) { host_to_target_rusage(arg2, &rusage); } } break; case TARGET_NR_gettimeofday: { struct timeval tv; ret = get_errno(gettimeofday(&tv, NULL)); if (!is_error(ret)) { if (copy_to_user_timeval(arg1, &tv)) goto efault; } } break; case TARGET_NR_settimeofday: { struct timeval tv; if (copy_from_user_timeval(&tv, arg1)) goto efault; ret = get_errno(settimeofday(&tv, NULL)); } break; #if defined(TARGET_NR_select) case TARGET_NR_select: #if defined(TARGET_S390X) || defined(TARGET_ALPHA) ret = do_select(arg1, arg2, arg3, arg4, arg5); #else { struct target_sel_arg_struct *sel; abi_ulong inp, outp, exp, tvp; long nsel; if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) goto efault; nsel = tswapal(sel->n); inp = tswapal(sel->inp); outp = tswapal(sel->outp); exp = tswapal(sel->exp); tvp = tswapal(sel->tvp); unlock_user_struct(sel, arg1, 0); ret = do_select(nsel, inp, outp, exp, tvp); } #endif break; #endif #ifdef TARGET_NR_pselect6 case TARGET_NR_pselect6: { abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; fd_set rfds, wfds, efds; fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; struct timespec ts, *ts_ptr; /* * The 6th arg is actually two args smashed together, * so we cannot use the C library. */ sigset_t set; struct { sigset_t *set; size_t size; } sig, *sig_ptr; abi_ulong arg_sigset, arg_sigsize, *arg7; target_sigset_t *target_sigset; n = arg1; rfd_addr = arg2; wfd_addr = arg3; efd_addr = arg4; ts_addr = arg5; ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); if (ret) { goto fail; } ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); if (ret) { goto fail; } ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); if (ret) { goto fail; } /* * This takes a timespec, and not a timeval, so we cannot * use the do_select() helper ... */ if (ts_addr) { if (target_to_host_timespec(&ts, ts_addr)) { goto efault; } ts_ptr = &ts; } else { ts_ptr = NULL; } /* Extract the two packed args for the sigset */ if (arg6) { sig_ptr = &sig; sig.size = _NSIG / 8; arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); if (!arg7) { goto efault; } arg_sigset = tswapal(arg7[0]); arg_sigsize = tswapal(arg7[1]); unlock_user(arg7, arg6, 0); if (arg_sigset) { sig.set = &set; if (arg_sigsize != sizeof(*target_sigset)) { /* Like the kernel, we enforce correct size sigsets */ ret = -TARGET_EINVAL; goto fail; } target_sigset = lock_user(VERIFY_READ, arg_sigset, sizeof(*target_sigset), 1); if (!target_sigset) { goto efault; } target_to_host_sigset(&set, target_sigset); unlock_user(target_sigset, arg_sigset, 0); } else { sig.set = NULL; } } else { sig_ptr = NULL; } ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, ts_ptr, sig_ptr)); if (!is_error(ret)) { if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) goto efault; if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) goto efault; if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) goto efault; if (ts_addr && host_to_target_timespec(ts_addr, &ts)) goto efault; } } break; #endif case TARGET_NR_symlink: { void *p2; p = lock_user_string(arg1); p2 = lock_user_string(arg2); if (!p || !p2) ret = -TARGET_EFAULT; else ret = get_errno(symlink(p, p2)); unlock_user(p2, arg2, 0); unlock_user(p, arg1, 0); } break; #if defined(TARGET_NR_symlinkat) case TARGET_NR_symlinkat: { void *p2; p = lock_user_string(arg1); p2 = lock_user_string(arg3); if (!p || !p2) ret = -TARGET_EFAULT; else ret = get_errno(symlinkat(p, arg2, p2)); unlock_user(p2, arg3, 0); unlock_user(p, arg1, 0); } break; #endif #ifdef TARGET_NR_oldlstat case TARGET_NR_oldlstat: goto unimplemented; #endif case TARGET_NR_readlink: { void *p2; p = lock_user_string(arg1); p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); if (!p || !p2) { ret = -TARGET_EFAULT; } else if (is_proc_myself((const char *)p, "exe")) { char real[PATH_MAX], *temp; temp = realpath(exec_path, real); ret = temp == NULL ? get_errno(-1) : strlen(real) ; snprintf((char *)p2, arg3, "%s", real); } else { ret = get_errno(readlink(path(p), p2, arg3)); } unlock_user(p2, arg2, ret); unlock_user(p, arg1, 0); } break; #if defined(TARGET_NR_readlinkat) case TARGET_NR_readlinkat: { void *p2; p = lock_user_string(arg2); p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); if (!p || !p2) { ret = -TARGET_EFAULT; } else if (is_proc_myself((const char *)p, "exe")) { char real[PATH_MAX], *temp; temp = realpath(exec_path, real); ret = temp == NULL ? get_errno(-1) : strlen(real) ; snprintf((char *)p2, arg4, "%s", real); } else { ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); } unlock_user(p2, arg3, ret); unlock_user(p, arg2, 0); } break; #endif #ifdef TARGET_NR_uselib case TARGET_NR_uselib: goto unimplemented; #endif #ifdef TARGET_NR_swapon case TARGET_NR_swapon: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(swapon(p, arg2)); unlock_user(p, arg1, 0); break; #endif case TARGET_NR_reboot: if (arg3 == LINUX_REBOOT_CMD_RESTART2) { /* arg4 must be ignored in all other cases */ p = lock_user_string(arg4); if (!p) { goto efault; } ret = get_errno(reboot(arg1, arg2, arg3, p)); unlock_user(p, arg4, 0); } else { ret = get_errno(reboot(arg1, arg2, arg3, NULL)); } break; #ifdef TARGET_NR_readdir case TARGET_NR_readdir: goto unimplemented; #endif #ifdef TARGET_NR_mmap case TARGET_NR_mmap: #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ || defined(TARGET_S390X) { abi_ulong *v; abi_ulong v1, v2, v3, v4, v5, v6; if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) goto efault; v1 = tswapal(v[0]); v2 = tswapal(v[1]); v3 = tswapal(v[2]); v4 = tswapal(v[3]); v5 = tswapal(v[4]); v6 = tswapal(v[5]); unlock_user(v, arg1, 0); ret = get_errno(target_mmap(v1, v2, v3, target_to_host_bitmask(v4, mmap_flags_tbl), v5, v6)); } #else ret = get_errno(target_mmap(arg1, arg2, arg3, target_to_host_bitmask(arg4, mmap_flags_tbl), arg5, arg6)); #endif break; #endif #ifdef TARGET_NR_mmap2 case TARGET_NR_mmap2: #ifndef MMAP_SHIFT #define MMAP_SHIFT 12 #endif ret = get_errno(target_mmap(arg1, arg2, arg3, target_to_host_bitmask(arg4, mmap_flags_tbl), arg5, arg6 << MMAP_SHIFT)); break; #endif case TARGET_NR_munmap: ret = get_errno(target_munmap(arg1, arg2)); break; case TARGET_NR_mprotect: { TaskState *ts = cpu->opaque; /* Special hack to detect libc making the stack executable. */ if ((arg3 & PROT_GROWSDOWN) && arg1 >= ts->info->stack_limit && arg1 <= ts->info->start_stack) { arg3 &= ~PROT_GROWSDOWN; arg2 = arg2 + arg1 - ts->info->stack_limit; arg1 = ts->info->stack_limit; } } ret = get_errno(target_mprotect(arg1, arg2, arg3)); break; #ifdef TARGET_NR_mremap case TARGET_NR_mremap: ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); break; #endif /* ??? msync/mlock/munlock are broken for softmmu. */ #ifdef TARGET_NR_msync case TARGET_NR_msync: ret = get_errno(msync(g2h(arg1), arg2, arg3)); break; #endif #ifdef TARGET_NR_mlock case TARGET_NR_mlock: ret = get_errno(mlock(g2h(arg1), arg2)); break; #endif #ifdef TARGET_NR_munlock case TARGET_NR_munlock: ret = get_errno(munlock(g2h(arg1), arg2)); break; #endif #ifdef TARGET_NR_mlockall case TARGET_NR_mlockall: ret = get_errno(mlockall(arg1)); break; #endif #ifdef TARGET_NR_munlockall case TARGET_NR_munlockall: ret = get_errno(munlockall()); break; #endif case TARGET_NR_truncate: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(truncate(p, arg2)); unlock_user(p, arg1, 0); break; case TARGET_NR_ftruncate: ret = get_errno(ftruncate(arg1, arg2)); break; case TARGET_NR_fchmod: ret = get_errno(fchmod(arg1, arg2)); break; #if defined(TARGET_NR_fchmodat) case TARGET_NR_fchmodat: if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(fchmodat(arg1, p, arg3, 0)); unlock_user(p, arg2, 0); break; #endif case TARGET_NR_getpriority: /* Note that negative values are valid for getpriority, so we must differentiate based on errno settings. */ errno = 0; ret = getpriority(arg1, arg2); if (ret == -1 && errno != 0) { ret = -host_to_target_errno(errno); break; } #ifdef TARGET_ALPHA /* Return value is the unbiased priority. Signal no error. */ ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; #else /* Return value is a biased priority to avoid negative numbers. */ ret = 20 - ret; #endif break; case TARGET_NR_setpriority: ret = get_errno(setpriority(arg1, arg2, arg3)); break; #ifdef TARGET_NR_profil case TARGET_NR_profil: goto unimplemented; #endif case TARGET_NR_statfs: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(statfs(path(p), &stfs)); unlock_user(p, arg1, 0); convert_statfs: if (!is_error(ret)) { struct target_statfs *target_stfs; if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) goto efault; __put_user(stfs.f_type, &target_stfs->f_type); __put_user(stfs.f_bsize, &target_stfs->f_bsize); __put_user(stfs.f_blocks, &target_stfs->f_blocks); __put_user(stfs.f_bfree, &target_stfs->f_bfree); __put_user(stfs.f_bavail, &target_stfs->f_bavail); __put_user(stfs.f_files, &target_stfs->f_files); __put_user(stfs.f_ffree, &target_stfs->f_ffree); __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); __put_user(stfs.f_namelen, &target_stfs->f_namelen); __put_user(stfs.f_frsize, &target_stfs->f_frsize); memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); unlock_user_struct(target_stfs, arg2, 1); } break; case TARGET_NR_fstatfs: ret = get_errno(fstatfs(arg1, &stfs)); goto convert_statfs; #ifdef TARGET_NR_statfs64 case TARGET_NR_statfs64: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(statfs(path(p), &stfs)); unlock_user(p, arg1, 0); convert_statfs64: if (!is_error(ret)) { struct target_statfs64 *target_stfs; if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) goto efault; __put_user(stfs.f_type, &target_stfs->f_type); __put_user(stfs.f_bsize, &target_stfs->f_bsize); __put_user(stfs.f_blocks, &target_stfs->f_blocks); __put_user(stfs.f_bfree, &target_stfs->f_bfree); __put_user(stfs.f_bavail, &target_stfs->f_bavail); __put_user(stfs.f_files, &target_stfs->f_files); __put_user(stfs.f_ffree, &target_stfs->f_ffree); __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); __put_user(stfs.f_namelen, &target_stfs->f_namelen); __put_user(stfs.f_frsize, &target_stfs->f_frsize); memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); unlock_user_struct(target_stfs, arg3, 1); } break; case TARGET_NR_fstatfs64: ret = get_errno(fstatfs(arg1, &stfs)); goto convert_statfs64; #endif #ifdef TARGET_NR_ioperm case TARGET_NR_ioperm: goto unimplemented; #endif #ifdef TARGET_NR_socketcall case TARGET_NR_socketcall: ret = do_socketcall(arg1, arg2); break; #endif #ifdef TARGET_NR_accept case TARGET_NR_accept: ret = do_accept4(arg1, arg2, arg3, 0); break; #endif #ifdef TARGET_NR_accept4 case TARGET_NR_accept4: #ifdef CONFIG_ACCEPT4 ret = do_accept4(arg1, arg2, arg3, arg4); #else goto unimplemented; #endif break; #endif #ifdef TARGET_NR_bind case TARGET_NR_bind: ret = do_bind(arg1, arg2, arg3); break; #endif #ifdef TARGET_NR_connect case TARGET_NR_connect: ret = do_connect(arg1, arg2, arg3); break; #endif #ifdef TARGET_NR_getpeername case TARGET_NR_getpeername: ret = do_getpeername(arg1, arg2, arg3); break; #endif #ifdef TARGET_NR_getsockname case TARGET_NR_getsockname: ret = do_getsockname(arg1, arg2, arg3); break; #endif #ifdef TARGET_NR_getsockopt case TARGET_NR_getsockopt: ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); break; #endif #ifdef TARGET_NR_listen case TARGET_NR_listen: ret = get_errno(listen(arg1, arg2)); break; #endif #ifdef TARGET_NR_recv case TARGET_NR_recv: ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); break; #endif #ifdef TARGET_NR_recvfrom case TARGET_NR_recvfrom: ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); break; #endif #ifdef TARGET_NR_recvmsg case TARGET_NR_recvmsg: ret = do_sendrecvmsg(arg1, arg2, arg3, 0); break; #endif #ifdef TARGET_NR_send case TARGET_NR_send: ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); break; #endif #ifdef TARGET_NR_sendmsg case TARGET_NR_sendmsg: ret = do_sendrecvmsg(arg1, arg2, arg3, 1); break; #endif #ifdef TARGET_NR_sendmmsg case TARGET_NR_sendmmsg: ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1); break; case TARGET_NR_recvmmsg: ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0); break; #endif #ifdef TARGET_NR_sendto case TARGET_NR_sendto: ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); break; #endif #ifdef TARGET_NR_shutdown case TARGET_NR_shutdown: ret = get_errno(shutdown(arg1, arg2)); break; #endif #ifdef TARGET_NR_socket case TARGET_NR_socket: ret = do_socket(arg1, arg2, arg3); break; #endif #ifdef TARGET_NR_socketpair case TARGET_NR_socketpair: ret = do_socketpair(arg1, arg2, arg3, arg4); break; #endif #ifdef TARGET_NR_setsockopt case TARGET_NR_setsockopt: ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); break; #endif case TARGET_NR_syslog: if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); unlock_user(p, arg2, 0); break; case TARGET_NR_setitimer: { struct itimerval value, ovalue, *pvalue; if (arg2) { pvalue = &value; if (copy_from_user_timeval(&pvalue->it_interval, arg2) || copy_from_user_timeval(&pvalue->it_value, arg2 + sizeof(struct target_timeval))) goto efault; } else { pvalue = NULL; } ret = get_errno(setitimer(arg1, pvalue, &ovalue)); if (!is_error(ret) && arg3) { if (copy_to_user_timeval(arg3, &ovalue.it_interval) || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), &ovalue.it_value)) goto efault; } } break; case TARGET_NR_getitimer: { struct itimerval value; ret = get_errno(getitimer(arg1, &value)); if (!is_error(ret) && arg2) { if (copy_to_user_timeval(arg2, &value.it_interval) || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), &value.it_value)) goto efault; } } break; case TARGET_NR_stat: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(stat(path(p), &st)); unlock_user(p, arg1, 0); goto do_stat; case TARGET_NR_lstat: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(lstat(path(p), &st)); unlock_user(p, arg1, 0); goto do_stat; case TARGET_NR_fstat: { ret = get_errno(fstat(arg1, &st)); do_stat: if (!is_error(ret)) { struct target_stat *target_st; if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) goto efault; memset(target_st, 0, sizeof(*target_st)); __put_user(st.st_dev, &target_st->st_dev); __put_user(st.st_ino, &target_st->st_ino); __put_user(st.st_mode, &target_st->st_mode); __put_user(st.st_uid, &target_st->st_uid); __put_user(st.st_gid, &target_st->st_gid); __put_user(st.st_nlink, &target_st->st_nlink); __put_user(st.st_rdev, &target_st->st_rdev); __put_user(st.st_size, &target_st->st_size); __put_user(st.st_blksize, &target_st->st_blksize); __put_user(st.st_blocks, &target_st->st_blocks); __put_user(st.st_atime, &target_st->target_st_atime); __put_user(st.st_mtime, &target_st->target_st_mtime); __put_user(st.st_ctime, &target_st->target_st_ctime); unlock_user_struct(target_st, arg2, 1); } } break; #ifdef TARGET_NR_olduname case TARGET_NR_olduname: goto unimplemented; #endif #ifdef TARGET_NR_iopl case TARGET_NR_iopl: goto unimplemented; #endif case TARGET_NR_vhangup: ret = get_errno(vhangup()); break; #ifdef TARGET_NR_idle case TARGET_NR_idle: goto unimplemented; #endif #ifdef TARGET_NR_syscall case TARGET_NR_syscall: ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, arg6, arg7, arg8, 0); break; #endif case TARGET_NR_wait4: { int status; abi_long status_ptr = arg2; struct rusage rusage, *rusage_ptr; abi_ulong target_rusage = arg4; if (target_rusage) rusage_ptr = &rusage; else rusage_ptr = NULL; ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr)); if (!is_error(ret)) { if (status_ptr && ret) { status = host_to_target_waitstatus(status); if (put_user_s32(status, status_ptr)) goto efault; } if (target_rusage) host_to_target_rusage(target_rusage, &rusage); } } break; #ifdef TARGET_NR_swapoff case TARGET_NR_swapoff: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(swapoff(p)); unlock_user(p, arg1, 0); break; #endif case TARGET_NR_sysinfo: { struct target_sysinfo *target_value; struct sysinfo value; ret = get_errno(sysinfo(&value)); if (!is_error(ret) && arg1) { if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) goto efault; __put_user(value.uptime, &target_value->uptime); __put_user(value.loads[0], &target_value->loads[0]); __put_user(value.loads[1], &target_value->loads[1]); __put_user(value.loads[2], &target_value->loads[2]); __put_user(value.totalram, &target_value->totalram); __put_user(value.freeram, &target_value->freeram); __put_user(value.sharedram, &target_value->sharedram); __put_user(value.bufferram, &target_value->bufferram); __put_user(value.totalswap, &target_value->totalswap); __put_user(value.freeswap, &target_value->freeswap); __put_user(value.procs, &target_value->procs); __put_user(value.totalhigh, &target_value->totalhigh); __put_user(value.freehigh, &target_value->freehigh); __put_user(value.mem_unit, &target_value->mem_unit); unlock_user_struct(target_value, arg1, 1); } } break; #ifdef TARGET_NR_ipc case TARGET_NR_ipc: ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6); break; #endif #ifdef TARGET_NR_semget case TARGET_NR_semget: ret = get_errno(semget(arg1, arg2, arg3)); break; #endif #ifdef TARGET_NR_semop case TARGET_NR_semop: ret = do_semop(arg1, arg2, arg3); break; #endif #ifdef TARGET_NR_semctl case TARGET_NR_semctl: ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4); break; #endif #ifdef TARGET_NR_msgctl case TARGET_NR_msgctl: ret = do_msgctl(arg1, arg2, arg3); break; #endif #ifdef TARGET_NR_msgget case TARGET_NR_msgget: ret = get_errno(msgget(arg1, arg2)); break; #endif #ifdef TARGET_NR_msgrcv case TARGET_NR_msgrcv: ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); break; #endif #ifdef TARGET_NR_msgsnd case TARGET_NR_msgsnd: ret = do_msgsnd(arg1, arg2, arg3, arg4); break; #endif #ifdef TARGET_NR_shmget case TARGET_NR_shmget: ret = get_errno(shmget(arg1, arg2, arg3)); break; #endif #ifdef TARGET_NR_shmctl case TARGET_NR_shmctl: ret = do_shmctl(arg1, arg2, arg3); break; #endif #ifdef TARGET_NR_shmat case TARGET_NR_shmat: ret = do_shmat(arg1, arg2, arg3); break; #endif #ifdef TARGET_NR_shmdt case TARGET_NR_shmdt: ret = do_shmdt(arg1); break; #endif case TARGET_NR_fsync: ret = get_errno(fsync(arg1)); break; case TARGET_NR_clone: /* Linux manages to have three different orderings for its * arguments to clone(); the BACKWARDS and BACKWARDS2 defines * match the kernel's CONFIG_CLONE_* settings. * Microblaze is further special in that it uses a sixth * implicit argument to clone for the TLS pointer. */ #if defined(TARGET_MICROBLAZE) ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); #elif defined(TARGET_CLONE_BACKWARDS) ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); #elif defined(TARGET_CLONE_BACKWARDS2) ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); #else ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); #endif break; #ifdef __NR_exit_group /* new thread calls */ case TARGET_NR_exit_group: #ifdef TARGET_GPROF _mcleanup(); #endif gdb_exit(cpu_env, arg1); ret = get_errno(exit_group(arg1)); break; #endif case TARGET_NR_setdomainname: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(setdomainname(p, arg2)); unlock_user(p, arg1, 0); break; case TARGET_NR_uname: /* no need to transcode because we use the linux syscall */ { struct new_utsname * buf; if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) goto efault; ret = get_errno(sys_uname(buf)); if (!is_error(ret)) { /* Overrite the native machine name with whatever is being emulated. */ strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); /* Allow the user to override the reported release. */ if (qemu_uname_release && *qemu_uname_release) strcpy (buf->release, qemu_uname_release); } unlock_user_struct(buf, arg1, 1); } break; #ifdef TARGET_I386 case TARGET_NR_modify_ldt: ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); break; #if !defined(TARGET_X86_64) case TARGET_NR_vm86old: goto unimplemented; case TARGET_NR_vm86: ret = do_vm86(cpu_env, arg1, arg2); break; #endif #endif case TARGET_NR_adjtimex: goto unimplemented; #ifdef TARGET_NR_create_module case TARGET_NR_create_module: #endif case TARGET_NR_init_module: case TARGET_NR_delete_module: #ifdef TARGET_NR_get_kernel_syms case TARGET_NR_get_kernel_syms: #endif goto unimplemented; case TARGET_NR_quotactl: goto unimplemented; case TARGET_NR_getpgid: ret = get_errno(getpgid(arg1)); break; case TARGET_NR_fchdir: ret = get_errno(fchdir(arg1)); break; #ifdef TARGET_NR_bdflush /* not on x86_64 */ case TARGET_NR_bdflush: goto unimplemented; #endif #ifdef TARGET_NR_sysfs case TARGET_NR_sysfs: goto unimplemented; #endif case TARGET_NR_personality: ret = get_errno(personality(arg1)); break; #ifdef TARGET_NR_afs_syscall case TARGET_NR_afs_syscall: goto unimplemented; #endif #ifdef TARGET_NR__llseek /* Not on alpha */ case TARGET_NR__llseek: { int64_t res; #if !defined(__NR_llseek) res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5); if (res == -1) { ret = get_errno(res); } else { } #else ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); #endif if ((ret == 0) && put_user_s64(res, arg4)) { goto efault; } } break; #endif case TARGET_NR_getdents: #ifdef __NR_getdents #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 { struct target_dirent *target_dirp; struct linux_dirent *dirp; abi_long count = arg3; dirp = malloc(count); if (!dirp) { ret = -TARGET_ENOMEM; goto fail; } ret = get_errno(sys_getdents(arg1, dirp, count)); if (!is_error(ret)) { struct linux_dirent *de; struct target_dirent *tde; int len = ret; int reclen, treclen; int count1, tnamelen; count1 = 0; de = dirp; if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) goto efault; tde = target_dirp; while (len > 0) { reclen = de->d_reclen; tnamelen = reclen - offsetof(struct linux_dirent, d_name); assert(tnamelen >= 0); treclen = tnamelen + offsetof(struct target_dirent, d_name); assert(count1 + treclen <= count); tde->d_reclen = tswap16(treclen); tde->d_ino = tswapal(de->d_ino); tde->d_off = tswapal(de->d_off); memcpy(tde->d_name, de->d_name, tnamelen); de = (struct linux_dirent *)((char *)de + reclen); len -= reclen; tde = (struct target_dirent *)((char *)tde + treclen); count1 += treclen; } ret = count1; unlock_user(target_dirp, arg2, ret); } free(dirp); } #else { struct linux_dirent *dirp; abi_long count = arg3; if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) goto efault; ret = get_errno(sys_getdents(arg1, dirp, count)); if (!is_error(ret)) { struct linux_dirent *de; int len = ret; int reclen; de = dirp; while (len > 0) { reclen = de->d_reclen; if (reclen > len) break; de->d_reclen = tswap16(reclen); tswapls(&de->d_ino); tswapls(&de->d_off); de = (struct linux_dirent *)((char *)de + reclen); len -= reclen; } } unlock_user(dirp, arg2, ret); } #endif #else /* Implement getdents in terms of getdents64 */ { struct linux_dirent64 *dirp; abi_long count = arg3; dirp = lock_user(VERIFY_WRITE, arg2, count, 0); if (!dirp) { goto efault; } ret = get_errno(sys_getdents64(arg1, dirp, count)); if (!is_error(ret)) { /* Convert the dirent64 structs to target dirent. We do this * in-place, since we can guarantee that a target_dirent is no * larger than a dirent64; however this means we have to be * careful to read everything before writing in the new format. */ struct linux_dirent64 *de; struct target_dirent *tde; int len = ret; int tlen = 0; de = dirp; tde = (struct target_dirent *)dirp; while (len > 0) { int namelen, treclen; int reclen = de->d_reclen; uint64_t ino = de->d_ino; int64_t off = de->d_off; uint8_t type = de->d_type; namelen = strlen(de->d_name); treclen = offsetof(struct target_dirent, d_name) + namelen + 2; treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long)); memmove(tde->d_name, de->d_name, namelen + 1); tde->d_ino = tswapal(ino); tde->d_off = tswapal(off); tde->d_reclen = tswap16(treclen); /* The target_dirent type is in what was formerly a padding * byte at the end of the structure: */ *(((char *)tde) + treclen - 1) = type; de = (struct linux_dirent64 *)((char *)de + reclen); tde = (struct target_dirent *)((char *)tde + treclen); len -= reclen; tlen += treclen; } ret = tlen; } unlock_user(dirp, arg2, ret); } #endif break; #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) case TARGET_NR_getdents64: { struct linux_dirent64 *dirp; abi_long count = arg3; if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) goto efault; ret = get_errno(sys_getdents64(arg1, dirp, count)); if (!is_error(ret)) { struct linux_dirent64 *de; int len = ret; int reclen; de = dirp; while (len > 0) { reclen = de->d_reclen; if (reclen > len) break; de->d_reclen = tswap16(reclen); tswap64s((uint64_t *)&de->d_ino); tswap64s((uint64_t *)&de->d_off); de = (struct linux_dirent64 *)((char *)de + reclen); len -= reclen; } } unlock_user(dirp, arg2, ret); } break; #endif /* TARGET_NR_getdents64 */ #if defined(TARGET_NR__newselect) case TARGET_NR__newselect: ret = do_select(arg1, arg2, arg3, arg4, arg5); break; #endif #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) # ifdef TARGET_NR_poll case TARGET_NR_poll: # endif # ifdef TARGET_NR_ppoll case TARGET_NR_ppoll: # endif { struct target_pollfd *target_pfd; unsigned int nfds = arg2; int timeout = arg3; struct pollfd *pfd; unsigned int i; target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1); if (!target_pfd) goto efault; pfd = alloca(sizeof(struct pollfd) * nfds); for(i = 0; i < nfds; i++) { pfd[i].fd = tswap32(target_pfd[i].fd); pfd[i].events = tswap16(target_pfd[i].events); } # ifdef TARGET_NR_ppoll if (num == TARGET_NR_ppoll) { struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; target_sigset_t *target_set; sigset_t _set, *set = &_set; if (arg3) { if (target_to_host_timespec(timeout_ts, arg3)) { unlock_user(target_pfd, arg1, 0); goto efault; } } else { timeout_ts = NULL; } if (arg4) { target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); if (!target_set) { unlock_user(target_pfd, arg1, 0); goto efault; } target_to_host_sigset(set, target_set); } else { set = NULL; } ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8)); if (!is_error(ret) && arg3) { host_to_target_timespec(arg3, timeout_ts); } if (arg4) { unlock_user(target_set, arg4, 0); } } else # endif ret = get_errno(poll(pfd, nfds, timeout)); if (!is_error(ret)) { for(i = 0; i < nfds; i++) { target_pfd[i].revents = tswap16(pfd[i].revents); } } unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); } break; #endif case TARGET_NR_flock: /* NOTE: the flock constant seems to be the same for every Linux platform */ ret = get_errno(flock(arg1, arg2)); break; case TARGET_NR_readv: { struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); if (vec != NULL) { ret = get_errno(readv(arg1, vec, arg3)); unlock_iovec(vec, arg2, arg3, 1); } else { ret = -host_to_target_errno(errno); } } break; case TARGET_NR_writev: { struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); if (vec != NULL) { ret = get_errno(writev(arg1, vec, arg3)); unlock_iovec(vec, arg2, arg3, 0); } else { ret = -host_to_target_errno(errno); } } break; case TARGET_NR_getsid: ret = get_errno(getsid(arg1)); break; #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ case TARGET_NR_fdatasync: ret = get_errno(fdatasync(arg1)); break; #endif case TARGET_NR__sysctl: /* We don't implement this, but ENOTDIR is always a safe return value. */ ret = -TARGET_ENOTDIR; break; case TARGET_NR_sched_getaffinity: { unsigned int mask_size; unsigned long *mask; /* * sched_getaffinity needs multiples of ulong, so need to take * care of mismatches between target ulong and host ulong sizes. */ if (arg2 & (sizeof(abi_ulong) - 1)) { ret = -TARGET_EINVAL; break; } mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); mask = alloca(mask_size); ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); if (!is_error(ret)) { if (copy_to_user(arg3, mask, ret)) { goto efault; } } } break; case TARGET_NR_sched_setaffinity: { unsigned int mask_size; unsigned long *mask; /* * sched_setaffinity needs multiples of ulong, so need to take * care of mismatches between target ulong and host ulong sizes. */ if (arg2 & (sizeof(abi_ulong) - 1)) { ret = -TARGET_EINVAL; break; } mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); mask = alloca(mask_size); if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) { goto efault; } memcpy(mask, p, arg2); unlock_user_struct(p, arg2, 0); ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); } break; case TARGET_NR_sched_setparam: { struct sched_param *target_schp; struct sched_param schp; if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) goto efault; schp.sched_priority = tswap32(target_schp->sched_priority); unlock_user_struct(target_schp, arg2, 0); ret = get_errno(sched_setparam(arg1, &schp)); } break; case TARGET_NR_sched_getparam: { struct sched_param *target_schp; struct sched_param schp; ret = get_errno(sched_getparam(arg1, &schp)); if (!is_error(ret)) { if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) goto efault; target_schp->sched_priority = tswap32(schp.sched_priority); unlock_user_struct(target_schp, arg2, 1); } } break; case TARGET_NR_sched_setscheduler: { struct sched_param *target_schp; struct sched_param schp; if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) goto efault; schp.sched_priority = tswap32(target_schp->sched_priority); unlock_user_struct(target_schp, arg3, 0); ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); } break; case TARGET_NR_sched_getscheduler: ret = get_errno(sched_getscheduler(arg1)); break; case TARGET_NR_sched_yield: ret = get_errno(sched_yield()); break; case TARGET_NR_sched_get_priority_max: ret = get_errno(sched_get_priority_max(arg1)); break; case TARGET_NR_sched_get_priority_min: ret = get_errno(sched_get_priority_min(arg1)); break; case TARGET_NR_sched_rr_get_interval: { struct timespec ts; ret = get_errno(sched_rr_get_interval(arg1, &ts)); if (!is_error(ret)) { host_to_target_timespec(arg2, &ts); } } break; case TARGET_NR_nanosleep: { struct timespec req, rem; target_to_host_timespec(&req, arg1); ret = get_errno(nanosleep(&req, &rem)); if (is_error(ret) && arg2) { host_to_target_timespec(arg2, &rem); } } break; #ifdef TARGET_NR_query_module case TARGET_NR_query_module: goto unimplemented; #endif #ifdef TARGET_NR_nfsservctl case TARGET_NR_nfsservctl: goto unimplemented; #endif case TARGET_NR_prctl: switch (arg1) { case PR_GET_PDEATHSIG: { int deathsig; ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); if (!is_error(ret) && arg2 && put_user_ual(deathsig, arg2)) { goto efault; } break; } #ifdef PR_GET_NAME case PR_GET_NAME: { void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); if (!name) { goto efault; } ret = get_errno(prctl(arg1, (unsigned long)name, arg3, arg4, arg5)); unlock_user(name, arg2, 16); break; } case PR_SET_NAME: { void *name = lock_user(VERIFY_READ, arg2, 16, 1); if (!name) { goto efault; } ret = get_errno(prctl(arg1, (unsigned long)name, arg3, arg4, arg5)); unlock_user(name, arg2, 0); break; } #endif default: /* Most prctl options have no pointer arguments */ ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); break; } break; #ifdef TARGET_NR_arch_prctl case TARGET_NR_arch_prctl: #if defined(TARGET_I386) && !defined(TARGET_ABI32) ret = do_arch_prctl(cpu_env, arg1, arg2); break; #else goto unimplemented; #endif #endif #ifdef TARGET_NR_pread64 case TARGET_NR_pread64: if (regpairs_aligned(cpu_env)) { arg4 = arg5; arg5 = arg6; } if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) goto efault; ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); unlock_user(p, arg2, ret); break; case TARGET_NR_pwrite64: if (regpairs_aligned(cpu_env)) { arg4 = arg5; arg5 = arg6; } if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) goto efault; ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); unlock_user(p, arg2, 0); break; #endif case TARGET_NR_getcwd: if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) goto efault; ret = get_errno(sys_getcwd1(p, arg2)); unlock_user(p, arg1, ret); break; case TARGET_NR_capget: goto unimplemented; case TARGET_NR_capset: goto unimplemented; case TARGET_NR_sigaltstack: #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \ defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \ defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC) ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env)); break; #else goto unimplemented; #endif #ifdef CONFIG_SENDFILE case TARGET_NR_sendfile: { off_t *offp = NULL; off_t off; if (arg3) { ret = get_user_sal(off, arg3); if (is_error(ret)) { break; } offp = &off; } ret = get_errno(sendfile(arg1, arg2, offp, arg4)); if (!is_error(ret) && arg3) { abi_long ret2 = put_user_sal(off, arg3); if (is_error(ret2)) { ret = ret2; } } break; } #ifdef TARGET_NR_sendfile64 case TARGET_NR_sendfile64: { off_t *offp = NULL; off_t off; if (arg3) { ret = get_user_s64(off, arg3); if (is_error(ret)) { break; } offp = &off; } ret = get_errno(sendfile(arg1, arg2, offp, arg4)); if (!is_error(ret) && arg3) { abi_long ret2 = put_user_s64(off, arg3); if (is_error(ret2)) { ret = ret2; } } break; } #endif #else case TARGET_NR_sendfile: #ifdef TARGET_NR_sendfile64 case TARGET_NR_sendfile64: #endif goto unimplemented; #endif #ifdef TARGET_NR_getpmsg case TARGET_NR_getpmsg: goto unimplemented; #endif #ifdef TARGET_NR_putpmsg case TARGET_NR_putpmsg: goto unimplemented; #endif #ifdef TARGET_NR_vfork case TARGET_NR_vfork: ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 0, 0, 0, 0)); break; #endif #ifdef TARGET_NR_ugetrlimit case TARGET_NR_ugetrlimit: { struct rlimit rlim; int resource = target_to_host_resource(arg1); ret = get_errno(getrlimit(resource, &rlim)); if (!is_error(ret)) { struct target_rlimit *target_rlim; if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) goto efault; target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); unlock_user_struct(target_rlim, arg2, 1); } break; } #endif #ifdef TARGET_NR_truncate64 case TARGET_NR_truncate64: if (!(p = lock_user_string(arg1))) goto efault; ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); unlock_user(p, arg1, 0); break; #endif #ifdef TARGET_NR_ftruncate64 case TARGET_NR_ftruncate64: ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); break; #endif #ifdef TARGET_NR_stat64 case TARGET_NR_stat64: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(stat(path(p), &st)); unlock_user(p, arg1, 0); if (!is_error(ret)) ret = host_to_target_stat64(cpu_env, arg2, &st); break; #endif #ifdef TARGET_NR_lstat64 case TARGET_NR_lstat64: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(lstat(path(p), &st)); unlock_user(p, arg1, 0); if (!is_error(ret)) ret = host_to_target_stat64(cpu_env, arg2, &st); break; #endif #ifdef TARGET_NR_fstat64 case TARGET_NR_fstat64: ret = get_errno(fstat(arg1, &st)); if (!is_error(ret)) ret = host_to_target_stat64(cpu_env, arg2, &st); break; #endif #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) #ifdef TARGET_NR_fstatat64 case TARGET_NR_fstatat64: #endif #ifdef TARGET_NR_newfstatat case TARGET_NR_newfstatat: #endif if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(fstatat(arg1, path(p), &st, arg4)); if (!is_error(ret)) ret = host_to_target_stat64(cpu_env, arg3, &st); break; #endif case TARGET_NR_lchown: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); unlock_user(p, arg1, 0); break; #ifdef TARGET_NR_getuid case TARGET_NR_getuid: ret = get_errno(high2lowuid(getuid())); break; #endif #ifdef TARGET_NR_getgid case TARGET_NR_getgid: ret = get_errno(high2lowgid(getgid())); break; #endif #ifdef TARGET_NR_geteuid case TARGET_NR_geteuid: ret = get_errno(high2lowuid(geteuid())); break; #endif #ifdef TARGET_NR_getegid case TARGET_NR_getegid: ret = get_errno(high2lowgid(getegid())); break; #endif case TARGET_NR_setreuid: ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); break; case TARGET_NR_setregid: ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); break; case TARGET_NR_getgroups: { int gidsetsize = arg1; target_id *target_grouplist; gid_t *grouplist; int i; grouplist = alloca(gidsetsize * sizeof(gid_t)); ret = get_errno(getgroups(gidsetsize, grouplist)); if (gidsetsize == 0) break; if (!is_error(ret)) { target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); if (!target_grouplist) goto efault; for(i = 0;i < ret; i++) target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); } } break; case TARGET_NR_setgroups: { int gidsetsize = arg1; target_id *target_grouplist; gid_t *grouplist = NULL; int i; if (gidsetsize) { grouplist = alloca(gidsetsize * sizeof(gid_t)); target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); if (!target_grouplist) { ret = -TARGET_EFAULT; goto fail; } for (i = 0; i < gidsetsize; i++) { grouplist[i] = low2highgid(tswapid(target_grouplist[i])); } unlock_user(target_grouplist, arg2, 0); } ret = get_errno(setgroups(gidsetsize, grouplist)); } break; case TARGET_NR_fchown: ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); break; #if defined(TARGET_NR_fchownat) case TARGET_NR_fchownat: if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5)); unlock_user(p, arg2, 0); break; #endif #ifdef TARGET_NR_setresuid case TARGET_NR_setresuid: ret = get_errno(setresuid(low2highuid(arg1), low2highuid(arg2), low2highuid(arg3))); break; #endif #ifdef TARGET_NR_getresuid case TARGET_NR_getresuid: { uid_t ruid, euid, suid; ret = get_errno(getresuid(&ruid, &euid, &suid)); if (!is_error(ret)) { if (put_user_id(high2lowuid(ruid), arg1) || put_user_id(high2lowuid(euid), arg2) || put_user_id(high2lowuid(suid), arg3)) goto efault; } } break; #endif #ifdef TARGET_NR_getresgid case TARGET_NR_setresgid: ret = get_errno(setresgid(low2highgid(arg1), low2highgid(arg2), low2highgid(arg3))); break; #endif #ifdef TARGET_NR_getresgid case TARGET_NR_getresgid: { gid_t rgid, egid, sgid; ret = get_errno(getresgid(&rgid, &egid, &sgid)); if (!is_error(ret)) { if (put_user_id(high2lowgid(rgid), arg1) || put_user_id(high2lowgid(egid), arg2) || put_user_id(high2lowgid(sgid), arg3)) goto efault; } } break; #endif case TARGET_NR_chown: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); unlock_user(p, arg1, 0); break; case TARGET_NR_setuid: ret = get_errno(setuid(low2highuid(arg1))); break; case TARGET_NR_setgid: ret = get_errno(setgid(low2highgid(arg1))); break; case TARGET_NR_setfsuid: ret = get_errno(setfsuid(arg1)); break; case TARGET_NR_setfsgid: ret = get_errno(setfsgid(arg1)); break; #ifdef TARGET_NR_lchown32 case TARGET_NR_lchown32: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(lchown(p, arg2, arg3)); unlock_user(p, arg1, 0); break; #endif #ifdef TARGET_NR_getuid32 case TARGET_NR_getuid32: ret = get_errno(getuid()); break; #endif #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) /* Alpha specific */ case TARGET_NR_getxuid: { uid_t euid; euid=geteuid(); ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; } ret = get_errno(getuid()); break; #endif #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) /* Alpha specific */ case TARGET_NR_getxgid: { uid_t egid; egid=getegid(); ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; } ret = get_errno(getgid()); break; #endif #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) /* Alpha specific */ case TARGET_NR_osf_getsysinfo: ret = -TARGET_EOPNOTSUPP; switch (arg1) { case TARGET_GSI_IEEE_FP_CONTROL: { uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); /* Copied from linux ieee_fpcr_to_swcr. */ swcr = (fpcr >> 35) & SWCR_STATUS_MASK; swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV | SWCR_TRAP_ENABLE_DZE | SWCR_TRAP_ENABLE_OVF); swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF | SWCR_TRAP_ENABLE_INE); swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; if (put_user_u64 (swcr, arg2)) goto efault; } break; /* case GSI_IEEE_STATE_AT_SIGNAL: -- Not implemented in linux kernel. case GSI_UACPROC: -- Retrieves current unaligned access state; not much used. case GSI_PROC_TYPE: -- Retrieves implver information; surely not used. case GSI_GET_HWRPB: -- Grabs a copy of the HWRPB; surely not used. */ } break; #endif #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) /* Alpha specific */ case TARGET_NR_osf_setsysinfo: ret = -TARGET_EOPNOTSUPP; switch (arg1) { case TARGET_SSI_IEEE_FP_CONTROL: { uint64_t swcr, fpcr, orig_fpcr; if (get_user_u64 (swcr, arg2)) { goto efault; } orig_fpcr = cpu_alpha_load_fpcr(cpu_env); fpcr = orig_fpcr & FPCR_DYN_MASK; /* Copied from linux ieee_swcr_to_fpcr. */ fpcr |= (swcr & SWCR_STATUS_MASK) << 35; fpcr |= (swcr & SWCR_MAP_DMZ) << 36; fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV | SWCR_TRAP_ENABLE_DZE | SWCR_TRAP_ENABLE_OVF)) << 48; fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF | SWCR_TRAP_ENABLE_INE)) << 57; fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; cpu_alpha_store_fpcr(cpu_env, fpcr); } break; case TARGET_SSI_IEEE_RAISE_EXCEPTION: { uint64_t exc, fpcr, orig_fpcr; int si_code; if (get_user_u64(exc, arg2)) { goto efault; } orig_fpcr = cpu_alpha_load_fpcr(cpu_env); /* We only add to the exception status here. */ fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35); cpu_alpha_store_fpcr(cpu_env, fpcr); /* Old exceptions are not signaled. */ fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); /* If any exceptions set by this call, and are unmasked, send a signal. */ si_code = 0; if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) { si_code = TARGET_FPE_FLTRES; } if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) { si_code = TARGET_FPE_FLTUND; } if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) { si_code = TARGET_FPE_FLTOVF; } if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) { si_code = TARGET_FPE_FLTDIV; } if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) { si_code = TARGET_FPE_FLTINV; } if (si_code != 0) { target_siginfo_t info; info.si_signo = SIGFPE; info.si_errno = 0; info.si_code = si_code; info._sifields._sigfault._addr = ((CPUArchState *)cpu_env)->pc; queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); } } break; /* case SSI_NVPAIRS: -- Used with SSIN_UACPROC to enable unaligned accesses. case SSI_IEEE_STATE_AT_SIGNAL: case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: -- Not implemented in linux kernel */ } break; #endif #ifdef TARGET_NR_osf_sigprocmask /* Alpha specific. */ case TARGET_NR_osf_sigprocmask: { abi_ulong mask; int how; sigset_t set, oldset; switch(arg1) { case TARGET_SIG_BLOCK: how = SIG_BLOCK; break; case TARGET_SIG_UNBLOCK: how = SIG_UNBLOCK; break; case TARGET_SIG_SETMASK: how = SIG_SETMASK; break; default: ret = -TARGET_EINVAL; goto fail; } mask = arg2; target_to_host_old_sigset(&set, &mask); sigprocmask(how, &set, &oldset); host_to_target_old_sigset(&mask, &oldset); ret = mask; } break; #endif #ifdef TARGET_NR_getgid32 case TARGET_NR_getgid32: ret = get_errno(getgid()); break; #endif #ifdef TARGET_NR_geteuid32 case TARGET_NR_geteuid32: ret = get_errno(geteuid()); break; #endif #ifdef TARGET_NR_getegid32 case TARGET_NR_getegid32: ret = get_errno(getegid()); break; #endif #ifdef TARGET_NR_setreuid32 case TARGET_NR_setreuid32: ret = get_errno(setreuid(arg1, arg2)); break; #endif #ifdef TARGET_NR_setregid32 case TARGET_NR_setregid32: ret = get_errno(setregid(arg1, arg2)); break; #endif #ifdef TARGET_NR_getgroups32 case TARGET_NR_getgroups32: { int gidsetsize = arg1; uint32_t *target_grouplist; gid_t *grouplist; int i; grouplist = alloca(gidsetsize * sizeof(gid_t)); ret = get_errno(getgroups(gidsetsize, grouplist)); if (gidsetsize == 0) break; if (!is_error(ret)) { target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); if (!target_grouplist) { ret = -TARGET_EFAULT; goto fail; } for(i = 0;i < ret; i++) target_grouplist[i] = tswap32(grouplist[i]); unlock_user(target_grouplist, arg2, gidsetsize * 4); } } break; #endif #ifdef TARGET_NR_setgroups32 case TARGET_NR_setgroups32: { int gidsetsize = arg1; uint32_t *target_grouplist; gid_t *grouplist; int i; grouplist = alloca(gidsetsize * sizeof(gid_t)); target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); if (!target_grouplist) { ret = -TARGET_EFAULT; goto fail; } for(i = 0;i < gidsetsize; i++) grouplist[i] = tswap32(target_grouplist[i]); unlock_user(target_grouplist, arg2, 0); ret = get_errno(setgroups(gidsetsize, grouplist)); } break; #endif #ifdef TARGET_NR_fchown32 case TARGET_NR_fchown32: ret = get_errno(fchown(arg1, arg2, arg3)); break; #endif #ifdef TARGET_NR_setresuid32 case TARGET_NR_setresuid32: ret = get_errno(setresuid(arg1, arg2, arg3)); break; #endif #ifdef TARGET_NR_getresuid32 case TARGET_NR_getresuid32: { uid_t ruid, euid, suid; ret = get_errno(getresuid(&ruid, &euid, &suid)); if (!is_error(ret)) { if (put_user_u32(ruid, arg1) || put_user_u32(euid, arg2) || put_user_u32(suid, arg3)) goto efault; } } break; #endif #ifdef TARGET_NR_setresgid32 case TARGET_NR_setresgid32: ret = get_errno(setresgid(arg1, arg2, arg3)); break; #endif #ifdef TARGET_NR_getresgid32 case TARGET_NR_getresgid32: { gid_t rgid, egid, sgid; ret = get_errno(getresgid(&rgid, &egid, &sgid)); if (!is_error(ret)) { if (put_user_u32(rgid, arg1) || put_user_u32(egid, arg2) || put_user_u32(sgid, arg3)) goto efault; } } break; #endif #ifdef TARGET_NR_chown32 case TARGET_NR_chown32: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(chown(p, arg2, arg3)); unlock_user(p, arg1, 0); break; #endif #ifdef TARGET_NR_setuid32 case TARGET_NR_setuid32: ret = get_errno(setuid(arg1)); break; #endif #ifdef TARGET_NR_setgid32 case TARGET_NR_setgid32: ret = get_errno(setgid(arg1)); break; #endif #ifdef TARGET_NR_setfsuid32 case TARGET_NR_setfsuid32: ret = get_errno(setfsuid(arg1)); break; #endif #ifdef TARGET_NR_setfsgid32 case TARGET_NR_setfsgid32: ret = get_errno(setfsgid(arg1)); break; #endif case TARGET_NR_pivot_root: goto unimplemented; #ifdef TARGET_NR_mincore case TARGET_NR_mincore: { void *a; ret = -TARGET_EFAULT; if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0))) goto efault; if (!(p = lock_user_string(arg3))) goto mincore_fail; ret = get_errno(mincore(a, arg2, p)); unlock_user(p, arg3, ret); mincore_fail: unlock_user(a, arg1, 0); } break; #endif #ifdef TARGET_NR_arm_fadvise64_64 case TARGET_NR_arm_fadvise64_64: { /* * arm_fadvise64_64 looks like fadvise64_64 but * with different argument order */ abi_long temp; temp = arg3; arg3 = arg4; arg4 = temp; } #endif #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64) #ifdef TARGET_NR_fadvise64_64 case TARGET_NR_fadvise64_64: #endif #ifdef TARGET_NR_fadvise64 case TARGET_NR_fadvise64: #endif #ifdef TARGET_S390X switch (arg4) { case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ case 6: arg4 = POSIX_FADV_DONTNEED; break; case 7: arg4 = POSIX_FADV_NOREUSE; break; default: break; } #endif ret = -posix_fadvise(arg1, arg2, arg3, arg4); break; #endif #ifdef TARGET_NR_madvise case TARGET_NR_madvise: /* A straight passthrough may not be safe because qemu sometimes turns private file-backed mappings into anonymous mappings. This will break MADV_DONTNEED. This is a hint, so ignoring and returning success is ok. */ ret = get_errno(0); break; #endif #if TARGET_ABI_BITS == 32 case TARGET_NR_fcntl64: { int cmd; struct flock64 fl; struct target_flock64 *target_fl; #ifdef TARGET_ARM struct target_eabi_flock64 *target_efl; #endif cmd = target_to_host_fcntl_cmd(arg2); if (cmd == -TARGET_EINVAL) { ret = cmd; break; } switch(arg2) { case TARGET_F_GETLK64: #ifdef TARGET_ARM if (((CPUARMState *)cpu_env)->eabi) { if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) goto efault; fl.l_type = tswap16(target_efl->l_type); fl.l_whence = tswap16(target_efl->l_whence); fl.l_start = tswap64(target_efl->l_start); fl.l_len = tswap64(target_efl->l_len); fl.l_pid = tswap32(target_efl->l_pid); unlock_user_struct(target_efl, arg3, 0); } else #endif { if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) goto efault; fl.l_type = tswap16(target_fl->l_type); fl.l_whence = tswap16(target_fl->l_whence); fl.l_start = tswap64(target_fl->l_start); fl.l_len = tswap64(target_fl->l_len); fl.l_pid = tswap32(target_fl->l_pid); unlock_user_struct(target_fl, arg3, 0); } ret = get_errno(fcntl(arg1, cmd, &fl)); if (ret == 0) { #ifdef TARGET_ARM if (((CPUARMState *)cpu_env)->eabi) { if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0)) goto efault; target_efl->l_type = tswap16(fl.l_type); target_efl->l_whence = tswap16(fl.l_whence); target_efl->l_start = tswap64(fl.l_start); target_efl->l_len = tswap64(fl.l_len); target_efl->l_pid = tswap32(fl.l_pid); unlock_user_struct(target_efl, arg3, 1); } else #endif { if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0)) goto efault; target_fl->l_type = tswap16(fl.l_type); target_fl->l_whence = tswap16(fl.l_whence); target_fl->l_start = tswap64(fl.l_start); target_fl->l_len = tswap64(fl.l_len); target_fl->l_pid = tswap32(fl.l_pid); unlock_user_struct(target_fl, arg3, 1); } } break; case TARGET_F_SETLK64: case TARGET_F_SETLKW64: #ifdef TARGET_ARM if (((CPUARMState *)cpu_env)->eabi) { if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1)) goto efault; fl.l_type = tswap16(target_efl->l_type); fl.l_whence = tswap16(target_efl->l_whence); fl.l_start = tswap64(target_efl->l_start); fl.l_len = tswap64(target_efl->l_len); fl.l_pid = tswap32(target_efl->l_pid); unlock_user_struct(target_efl, arg3, 0); } else #endif { if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1)) goto efault; fl.l_type = tswap16(target_fl->l_type); fl.l_whence = tswap16(target_fl->l_whence); fl.l_start = tswap64(target_fl->l_start); fl.l_len = tswap64(target_fl->l_len); fl.l_pid = tswap32(target_fl->l_pid); unlock_user_struct(target_fl, arg3, 0); } ret = get_errno(fcntl(arg1, cmd, &fl)); break; default: ret = do_fcntl(arg1, arg2, arg3); break; } break; } #endif #ifdef TARGET_NR_cacheflush case TARGET_NR_cacheflush: /* self-modifying code is handled automatically, so nothing needed */ break; #endif #ifdef TARGET_NR_security case TARGET_NR_security: goto unimplemented; #endif #ifdef TARGET_NR_getpagesize case TARGET_NR_getpagesize: ret = TARGET_PAGE_SIZE; break; #endif case TARGET_NR_gettid: ret = get_errno(gettid()); break; #ifdef TARGET_NR_readahead case TARGET_NR_readahead: #if TARGET_ABI_BITS == 32 if (regpairs_aligned(cpu_env)) { arg2 = arg3; arg3 = arg4; arg4 = arg5; } ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4)); #else ret = get_errno(readahead(arg1, arg2, arg3)); #endif break; #endif #ifdef CONFIG_ATTR #ifdef TARGET_NR_setxattr case TARGET_NR_listxattr: case TARGET_NR_llistxattr: { void *p, *b = 0; if (arg2) { b = lock_user(VERIFY_WRITE, arg2, arg3, 0); if (!b) { ret = -TARGET_EFAULT; break; } } p = lock_user_string(arg1); if (p) { if (num == TARGET_NR_listxattr) { ret = get_errno(listxattr(p, b, arg3)); } else { ret = get_errno(llistxattr(p, b, arg3)); } } else { ret = -TARGET_EFAULT; } unlock_user(p, arg1, 0); unlock_user(b, arg2, arg3); break; } case TARGET_NR_flistxattr: { void *b = 0; if (arg2) { b = lock_user(VERIFY_WRITE, arg2, arg3, 0); if (!b) { ret = -TARGET_EFAULT; break; } } ret = get_errno(flistxattr(arg1, b, arg3)); unlock_user(b, arg2, arg3); break; } case TARGET_NR_setxattr: case TARGET_NR_lsetxattr: { void *p, *n, *v = 0; if (arg3) { v = lock_user(VERIFY_READ, arg3, arg4, 1); if (!v) { ret = -TARGET_EFAULT; break; } } p = lock_user_string(arg1); n = lock_user_string(arg2); if (p && n) { if (num == TARGET_NR_setxattr) { ret = get_errno(setxattr(p, n, v, arg4, arg5)); } else { ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); } } else { ret = -TARGET_EFAULT; } unlock_user(p, arg1, 0); unlock_user(n, arg2, 0); unlock_user(v, arg3, 0); } break; case TARGET_NR_fsetxattr: { void *n, *v = 0; if (arg3) { v = lock_user(VERIFY_READ, arg3, arg4, 1); if (!v) { ret = -TARGET_EFAULT; break; } } n = lock_user_string(arg2); if (n) { ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); } else { ret = -TARGET_EFAULT; } unlock_user(n, arg2, 0); unlock_user(v, arg3, 0); } break; case TARGET_NR_getxattr: case TARGET_NR_lgetxattr: { void *p, *n, *v = 0; if (arg3) { v = lock_user(VERIFY_WRITE, arg3, arg4, 0); if (!v) { ret = -TARGET_EFAULT; break; } } p = lock_user_string(arg1); n = lock_user_string(arg2); if (p && n) { if (num == TARGET_NR_getxattr) { ret = get_errno(getxattr(p, n, v, arg4)); } else { ret = get_errno(lgetxattr(p, n, v, arg4)); } } else { ret = -TARGET_EFAULT; } unlock_user(p, arg1, 0); unlock_user(n, arg2, 0); unlock_user(v, arg3, arg4); } break; case TARGET_NR_fgetxattr: { void *n, *v = 0; if (arg3) { v = lock_user(VERIFY_WRITE, arg3, arg4, 0); if (!v) { ret = -TARGET_EFAULT; break; } } n = lock_user_string(arg2); if (n) { ret = get_errno(fgetxattr(arg1, n, v, arg4)); } else { ret = -TARGET_EFAULT; } unlock_user(n, arg2, 0); unlock_user(v, arg3, arg4); } break; case TARGET_NR_removexattr: case TARGET_NR_lremovexattr: { void *p, *n; p = lock_user_string(arg1); n = lock_user_string(arg2); if (p && n) { if (num == TARGET_NR_removexattr) { ret = get_errno(removexattr(p, n)); } else { ret = get_errno(lremovexattr(p, n)); } } else { ret = -TARGET_EFAULT; } unlock_user(p, arg1, 0); unlock_user(n, arg2, 0); } break; case TARGET_NR_fremovexattr: { void *n; n = lock_user_string(arg2); if (n) { ret = get_errno(fremovexattr(arg1, n)); } else { ret = -TARGET_EFAULT; } unlock_user(n, arg2, 0); } break; #endif #endif /* CONFIG_ATTR */ #ifdef TARGET_NR_set_thread_area case TARGET_NR_set_thread_area: #if defined(TARGET_MIPS) ((CPUMIPSState *) cpu_env)->tls_value = arg1; break; #elif defined(TARGET_CRIS) if (arg1 & 0xff) ret = -TARGET_EINVAL; else { ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; } break; #elif defined(TARGET_I386) && defined(TARGET_ABI32) ret = do_set_thread_area(cpu_env, arg1); break; #elif defined(TARGET_M68K) { TaskState *ts = cpu->opaque; ts->tp_value = arg1; break; } #else goto unimplemented_nowarn; #endif #endif #ifdef TARGET_NR_get_thread_area case TARGET_NR_get_thread_area: #if defined(TARGET_I386) && defined(TARGET_ABI32) ret = do_get_thread_area(cpu_env, arg1); break; #elif defined(TARGET_M68K) { TaskState *ts = cpu->opaque; ret = ts->tp_value; break; } #else goto unimplemented_nowarn; #endif #endif #ifdef TARGET_NR_getdomainname case TARGET_NR_getdomainname: goto unimplemented_nowarn; #endif #ifdef TARGET_NR_clock_gettime case TARGET_NR_clock_gettime: { struct timespec ts; ret = get_errno(clock_gettime(arg1, &ts)); if (!is_error(ret)) { host_to_target_timespec(arg2, &ts); } break; } #endif #ifdef TARGET_NR_clock_getres case TARGET_NR_clock_getres: { struct timespec ts; ret = get_errno(clock_getres(arg1, &ts)); if (!is_error(ret)) { host_to_target_timespec(arg2, &ts); } break; } #endif #ifdef TARGET_NR_clock_nanosleep case TARGET_NR_clock_nanosleep: { struct timespec ts; target_to_host_timespec(&ts, arg3); ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL)); if (arg4) host_to_target_timespec(arg4, &ts); break; } #endif #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) case TARGET_NR_set_tid_address: ret = get_errno(set_tid_address((int *)g2h(arg1))); break; #endif #if defined(TARGET_NR_tkill) && defined(__NR_tkill) case TARGET_NR_tkill: ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2))); break; #endif #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill) case TARGET_NR_tgkill: ret = get_errno(sys_tgkill((int)arg1, (int)arg2, target_to_host_signal(arg3))); break; #endif #ifdef TARGET_NR_set_robust_list case TARGET_NR_set_robust_list: case TARGET_NR_get_robust_list: /* The ABI for supporting robust futexes has userspace pass * the kernel a pointer to a linked list which is updated by * userspace after the syscall; the list is walked by the kernel * when the thread exits. Since the linked list in QEMU guest * memory isn't a valid linked list for the host and we have * no way to reliably intercept the thread-death event, we can't * support these. Silently return ENOSYS so that guest userspace * falls back to a non-robust futex implementation (which should * be OK except in the corner case of the guest crashing while * holding a mutex that is shared with another process via * shared memory). */ goto unimplemented_nowarn; #endif #if defined(TARGET_NR_utimensat) case TARGET_NR_utimensat: { struct timespec *tsp, ts[2]; if (!arg3) { tsp = NULL; } else { target_to_host_timespec(ts, arg3); target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); tsp = ts; } if (!arg2) ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); else { if (!(p = lock_user_string(arg2))) { ret = -TARGET_EFAULT; goto fail; } ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); unlock_user(p, arg2, 0); } } break; #endif case TARGET_NR_futex: ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); break; #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) case TARGET_NR_inotify_init: ret = get_errno(sys_inotify_init()); break; #endif #ifdef CONFIG_INOTIFY1 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) case TARGET_NR_inotify_init1: ret = get_errno(sys_inotify_init1(arg1)); break; #endif #endif #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) case TARGET_NR_inotify_add_watch: p = lock_user_string(arg2); ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); unlock_user(p, arg2, 0); break; #endif #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) case TARGET_NR_inotify_rm_watch: ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); break; #endif #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) case TARGET_NR_mq_open: { struct mq_attr posix_mq_attr; p = lock_user_string(arg1 - 1); if (arg4 != 0) copy_from_user_mq_attr (&posix_mq_attr, arg4); ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr)); unlock_user (p, arg1, 0); } break; case TARGET_NR_mq_unlink: p = lock_user_string(arg1 - 1); ret = get_errno(mq_unlink(p)); unlock_user (p, arg1, 0); break; case TARGET_NR_mq_timedsend: { struct timespec ts; p = lock_user (VERIFY_READ, arg2, arg3, 1); if (arg5 != 0) { target_to_host_timespec(&ts, arg5); ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts)); host_to_target_timespec(arg5, &ts); } else ret = get_errno(mq_send(arg1, p, arg3, arg4)); unlock_user (p, arg2, arg3); } break; case TARGET_NR_mq_timedreceive: { struct timespec ts; unsigned int prio; p = lock_user (VERIFY_READ, arg2, arg3, 1); if (arg5 != 0) { target_to_host_timespec(&ts, arg5); ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts)); host_to_target_timespec(arg5, &ts); } else ret = get_errno(mq_receive(arg1, p, arg3, &prio)); unlock_user (p, arg2, arg3); if (arg4 != 0) put_user_u32(prio, arg4); } break; /* Not implemented for now... */ /* case TARGET_NR_mq_notify: */ /* break; */ case TARGET_NR_mq_getsetattr: { struct mq_attr posix_mq_attr_in, posix_mq_attr_out; if (arg3 != 0) { ret = mq_getattr(arg1, &posix_mq_attr_out); copy_to_user_mq_attr(arg3, &posix_mq_attr_out); } if (arg2 != 0) { copy_from_user_mq_attr(&posix_mq_attr_in, arg2); ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); } } break; #endif #ifdef CONFIG_SPLICE #ifdef TARGET_NR_tee case TARGET_NR_tee: { ret = get_errno(tee(arg1,arg2,arg3,arg4)); } break; #endif #ifdef TARGET_NR_splice case TARGET_NR_splice: { loff_t loff_in, loff_out; loff_t *ploff_in = NULL, *ploff_out = NULL; if(arg2) { get_user_u64(loff_in, arg2); ploff_in = &loff_in; } if(arg4) { get_user_u64(loff_out, arg2); ploff_out = &loff_out; } ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); } break; #endif #ifdef TARGET_NR_vmsplice case TARGET_NR_vmsplice: { struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); if (vec != NULL) { ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); unlock_iovec(vec, arg2, arg3, 0); } else { ret = -host_to_target_errno(errno); } } break; #endif #endif /* CONFIG_SPLICE */ #ifdef CONFIG_EVENTFD #if defined(TARGET_NR_eventfd) case TARGET_NR_eventfd: ret = get_errno(eventfd(arg1, 0)); break; #endif #if defined(TARGET_NR_eventfd2) case TARGET_NR_eventfd2: { int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)); if (arg2 & TARGET_O_NONBLOCK) { host_flags |= O_NONBLOCK; } if (arg2 & TARGET_O_CLOEXEC) { host_flags |= O_CLOEXEC; } ret = get_errno(eventfd(arg1, host_flags)); break; } #endif #endif /* CONFIG_EVENTFD */ #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) case TARGET_NR_fallocate: #if TARGET_ABI_BITS == 32 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), target_offset64(arg5, arg6))); #else ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); #endif break; #endif #if defined(CONFIG_SYNC_FILE_RANGE) #if defined(TARGET_NR_sync_file_range) case TARGET_NR_sync_file_range: #if TARGET_ABI_BITS == 32 #if defined(TARGET_MIPS) ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), target_offset64(arg5, arg6), arg7)); #else ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), target_offset64(arg4, arg5), arg6)); #endif /* !TARGET_MIPS */ #else ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); #endif break; #endif #if defined(TARGET_NR_sync_file_range2) case TARGET_NR_sync_file_range2: /* This is like sync_file_range but the arguments are reordered */ #if TARGET_ABI_BITS == 32 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), target_offset64(arg5, arg6), arg2)); #else ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); #endif break; #endif #endif #if defined(CONFIG_EPOLL) #if defined(TARGET_NR_epoll_create) case TARGET_NR_epoll_create: ret = get_errno(epoll_create(arg1)); break; #endif #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) case TARGET_NR_epoll_create1: ret = get_errno(epoll_create1(arg1)); break; #endif #if defined(TARGET_NR_epoll_ctl) case TARGET_NR_epoll_ctl: { struct epoll_event ep; struct epoll_event *epp = 0; if (arg4) { struct target_epoll_event *target_ep; if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { goto efault; } ep.events = tswap32(target_ep->events); /* The epoll_data_t union is just opaque data to the kernel, * so we transfer all 64 bits across and need not worry what * actual data type it is. */ ep.data.u64 = tswap64(target_ep->data.u64); unlock_user_struct(target_ep, arg4, 0); epp = &ep; } ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); break; } #endif #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT) #define IMPLEMENT_EPOLL_PWAIT #endif #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT) #if defined(TARGET_NR_epoll_wait) case TARGET_NR_epoll_wait: #endif #if defined(IMPLEMENT_EPOLL_PWAIT) case TARGET_NR_epoll_pwait: #endif { struct target_epoll_event *target_ep; struct epoll_event *ep; int epfd = arg1; int maxevents = arg3; int timeout = arg4; target_ep = lock_user(VERIFY_WRITE, arg2, maxevents * sizeof(struct target_epoll_event), 1); if (!target_ep) { goto efault; } ep = alloca(maxevents * sizeof(struct epoll_event)); switch (num) { #if defined(IMPLEMENT_EPOLL_PWAIT) case TARGET_NR_epoll_pwait: { target_sigset_t *target_set; sigset_t _set, *set = &_set; if (arg5) { target_set = lock_user(VERIFY_READ, arg5, sizeof(target_sigset_t), 1); if (!target_set) { unlock_user(target_ep, arg2, 0); goto efault; } target_to_host_sigset(set, target_set); unlock_user(target_set, arg5, 0); } else { set = NULL; } ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set)); break; } #endif #if defined(TARGET_NR_epoll_wait) case TARGET_NR_epoll_wait: ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout)); break; #endif default: ret = -TARGET_ENOSYS; } if (!is_error(ret)) { int i; for (i = 0; i < ret; i++) { target_ep[i].events = tswap32(ep[i].events); target_ep[i].data.u64 = tswap64(ep[i].data.u64); } } unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event)); break; } #endif #endif #ifdef TARGET_NR_prlimit64 case TARGET_NR_prlimit64: { /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ struct target_rlimit64 *target_rnew, *target_rold; struct host_rlimit64 rnew, rold, *rnewp = 0; if (arg3) { if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { goto efault; } rnew.rlim_cur = tswap64(target_rnew->rlim_cur); rnew.rlim_max = tswap64(target_rnew->rlim_max); unlock_user_struct(target_rnew, arg3, 0); rnewp = &rnew; } ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0)); if (!is_error(ret) && arg4) { if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { goto efault; } target_rold->rlim_cur = tswap64(rold.rlim_cur); target_rold->rlim_max = tswap64(rold.rlim_max); unlock_user_struct(target_rold, arg4, 1); } break; } #endif #ifdef TARGET_NR_gethostname case TARGET_NR_gethostname: { char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); if (name) { ret = get_errno(gethostname(name, arg2)); unlock_user(name, arg1, arg2); } else { ret = -TARGET_EFAULT; } break; } #endif #ifdef TARGET_NR_atomic_cmpxchg_32 case TARGET_NR_atomic_cmpxchg_32: { /* should use start_exclusive from main.c */ abi_ulong mem_value; if (get_user_u32(mem_value, arg6)) { target_siginfo_t info; info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = TARGET_SEGV_MAPERR; info._sifields._sigfault._addr = arg6; queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); ret = 0xdeadbeef; } if (mem_value == arg2) put_user_u32(arg1, arg6); ret = mem_value; break; } #endif #ifdef TARGET_NR_atomic_barrier case TARGET_NR_atomic_barrier: { /* Like the kernel implementation and the qemu arm barrier, no-op this? */ break; } #endif #ifdef TARGET_NR_timer_create case TARGET_NR_timer_create: { /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */ struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL; struct target_sigevent *ptarget_sevp; struct target_timer_t *ptarget_timer; int clkid = arg1; int timer_index = next_free_host_timer(); if (timer_index < 0) { ret = -TARGET_EAGAIN; } else { timer_t *phtimer = g_posix_timers + timer_index; if (arg2) { if (!lock_user_struct(VERIFY_READ, ptarget_sevp, arg2, 1)) { goto efault; } host_sevp.sigev_signo = tswap32(ptarget_sevp->sigev_signo); host_sevp.sigev_notify = tswap32(ptarget_sevp->sigev_notify); phost_sevp = &host_sevp; } ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); if (ret) { phtimer = NULL; } else { if (!lock_user_struct(VERIFY_WRITE, ptarget_timer, arg3, 1)) { goto efault; } ptarget_timer->ptr = tswap32(0xcafe0000 | timer_index); unlock_user_struct(ptarget_timer, arg3, 1); } } break; } #endif #ifdef TARGET_NR_timer_settime case TARGET_NR_timer_settime: { /* args: timer_t timerid, int flags, const struct itimerspec *new_value, * struct itimerspec * old_value */ arg1 &= 0xffff; if (arg3 == 0 || arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { ret = -TARGET_EINVAL; } else { timer_t htimer = g_posix_timers[arg1]; struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; target_to_host_itimerspec(&hspec_new, arg3); ret = get_errno( timer_settime(htimer, arg2, &hspec_new, &hspec_old)); host_to_target_itimerspec(arg2, &hspec_old); } break; } #endif #ifdef TARGET_NR_timer_gettime case TARGET_NR_timer_gettime: { /* args: timer_t timerid, struct itimerspec *curr_value */ arg1 &= 0xffff; if (!arg2) { return -TARGET_EFAULT; } else if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { ret = -TARGET_EINVAL; } else { timer_t htimer = g_posix_timers[arg1]; struct itimerspec hspec; ret = get_errno(timer_gettime(htimer, &hspec)); if (host_to_target_itimerspec(arg2, &hspec)) { ret = -TARGET_EFAULT; } } break; } #endif #ifdef TARGET_NR_timer_getoverrun case TARGET_NR_timer_getoverrun: { /* args: timer_t timerid */ arg1 &= 0xffff; if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { ret = -TARGET_EINVAL; } else { timer_t htimer = g_posix_timers[arg1]; ret = get_errno(timer_getoverrun(htimer)); } break; } #endif #ifdef TARGET_NR_timer_delete case TARGET_NR_timer_delete: { /* args: timer_t timerid */ arg1 &= 0xffff; if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) { ret = -TARGET_EINVAL; } else { timer_t htimer = g_posix_timers[arg1]; ret = get_errno(timer_delete(htimer)); g_posix_timers[arg1] = 0; } break; } #endif default: unimplemented: gemu_log("qemu: Unsupported syscall: %d\n", num); #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) unimplemented_nowarn: #endif ret = -TARGET_ENOSYS; break; } fail: #ifdef DEBUG gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); #endif if(do_strace) print_syscall_ret(num, ret); return ret; efault: ret = -TARGET_EFAULT; goto fail; }
1
962
static int dxv_decompress_dxt1(AVCodecContext *avctx) { DXVContext *ctx = avctx->priv_data; GetByteContext *gbc = &ctx->gbc; uint32_t value, prev, op; int idx = 0, state = 0; int pos = 2; /* Copy the first two elements */ AV_WL32(ctx->tex_data, bytestream2_get_le32(gbc)); AV_WL32(ctx->tex_data + 4, bytestream2_get_le32(gbc)); /* Process input until the whole texture has been filled */ while (pos < ctx->tex_size / 4) { CHECKPOINT(2); /* Copy two elements from a previous offset or from the input buffer */ if (op) { prev = AV_RL32(ctx->tex_data + 4 * (pos - idx)); AV_WL32(ctx->tex_data + 4 * pos, prev); pos++; prev = AV_RL32(ctx->tex_data + 4 * (pos - idx)); AV_WL32(ctx->tex_data + 4 * pos, prev); pos++; } else { CHECKPOINT(2); if (op) prev = AV_RL32(ctx->tex_data + 4 * (pos - idx)); else prev = bytestream2_get_le32(gbc); AV_WL32(ctx->tex_data + 4 * pos, prev); pos++; CHECKPOINT(2); if (op) prev = AV_RL32(ctx->tex_data + 4 * (pos - idx)); else prev = bytestream2_get_le32(gbc); AV_WL32(ctx->tex_data + 4 * pos, prev); pos++; } } return 0; }
0
964
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { JvContext *s = avctx->priv_data; const uint8_t *buf = avpkt->data; const uint8_t *buf_end = buf + avpkt->size; int video_size, video_type, i, j; video_size = AV_RL32(buf); video_type = buf[4]; buf += 5; if (video_size) { if (video_size < 0 || video_size > avpkt->size - 5) { av_log(avctx, AV_LOG_ERROR, "video size %d invalid\n", video_size); return AVERROR_INVALIDDATA; } if (avctx->reget_buffer(avctx, &s->frame) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } if (video_type == 0 || video_type == 1) { GetBitContext gb; init_get_bits(&gb, buf, 8 * video_size); for (j = 0; j < avctx->height; j += 8) for (i = 0; i < avctx->width; i += 8) decode8x8(&gb, s->frame.data[0] + j*s->frame.linesize[0] + i, s->frame.linesize[0], &s->dsp); buf += video_size; } else if (video_type == 2) { if (buf + 1 <= buf_end) { int v = *buf++; for (j = 0; j < avctx->height; j++) memset(s->frame.data[0] + j*s->frame.linesize[0], v, avctx->width); } } else { av_log(avctx, AV_LOG_WARNING, "unsupported frame type %i\n", video_type); return AVERROR_INVALIDDATA; } } if (buf_end - buf >= AVPALETTE_COUNT * 3) { for (i = 0; i < AVPALETTE_COUNT; i++) { uint32_t pal = AV_RB24(buf); s->palette[i] = 0xFF << 24 | pal << 2 | ((pal >> 4) & 0x30303); buf += 3; } s->palette_has_changed = 1; } if (video_size) { s->frame.key_frame = 1; s->frame.pict_type = AV_PICTURE_TYPE_I; s->frame.palette_has_changed = s->palette_has_changed; s->palette_has_changed = 0; memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE); *data_size = sizeof(AVFrame); *(AVFrame*)data = s->frame; } return avpkt->size; }
0
965
static av_cold int asv_encode_close(AVCodecContext *avctx) { av_frame_free(&avctx->coded_frame); return 0; }
1
966
static void megasas_reset_frames(MegasasState *s) { PCIDevice *pcid = PCI_DEVICE(s); int i; MegasasCmd *cmd; for (i = 0; i < s->fw_cmds; i++) { cmd = &s->frames[i]; if (cmd->pa) { pci_dma_unmap(pcid, cmd->frame, cmd->pa_size, 0, 0); cmd->frame = NULL; cmd->pa = 0; } } }
1
967
void aio_set_fd_handler(AioContext *ctx, int fd, bool is_external, IOHandler *io_read, IOHandler *io_write, void *opaque) { AioHandler *node; bool is_new = false; node = find_aio_handler(ctx, fd); /* Are we deleting the fd handler? */ if (!io_read && !io_write) { if (node) { g_source_remove_poll(&ctx->source, &node->pfd); /* If the lock is held, just mark the node as deleted */ if (ctx->walking_handlers) { node->deleted = 1; node->pfd.revents = 0; } else { /* Otherwise, delete it for real. We can't just mark it as * deleted because deleted nodes are only cleaned up after * releasing the walking_handlers lock. */ QLIST_REMOVE(node, node); g_free(node); } } } else { if (node == NULL) { /* Alloc and insert if it's not already there */ node = g_new0(AioHandler, 1); node->pfd.fd = fd; QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node); g_source_add_poll(&ctx->source, &node->pfd); is_new = true; } /* Update handler with latest information */ node->io_read = io_read; node->io_write = io_write; node->opaque = opaque; node->is_external = is_external; node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0); node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0); } aio_epoll_update(ctx, node, is_new); aio_notify(ctx); }
1
968
unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi) { size_t pos = hbi->pos; const HBitmap *hb = hbi->hb; unsigned i = HBITMAP_LEVELS - 1; unsigned long cur; do { cur = hbi->cur[--i]; pos >>= BITS_PER_LEVEL; } while (cur == 0); /* Check for end of iteration. We always use fewer than BITS_PER_LONG * bits in the level 0 bitmap; thus we can repurpose the most significant * bit as a sentinel. The sentinel is set in hbitmap_alloc and ensures * that the above loop ends even without an explicit check on i. */ if (i == 0 && cur == (1UL << (BITS_PER_LONG - 1))) { return 0; } for (; i < HBITMAP_LEVELS - 1; i++) { /* Shift back pos to the left, matching the right shifts above. * The index of this word's least significant set bit provides * the low-order bits. */ pos = (pos << BITS_PER_LEVEL) + ffsl(cur) - 1; hbi->cur[i] = cur & (cur - 1); /* Set up next level for iteration. */ cur = hb->levels[i + 1][pos]; } hbi->pos = pos; trace_hbitmap_iter_skip_words(hbi->hb, hbi, pos, cur); assert(cur); return cur; }
1
969
static int intel_hda_exit(PCIDevice *pci) { IntelHDAState *d = DO_UPCAST(IntelHDAState, pci, pci); if (d->msi) { msi_uninit(&d->pci); } cpu_unregister_io_memory(d->mmio_addr); return 0; }
1
970
static int compand_nodelay(AVFilterContext *ctx, AVFrame *frame) { CompandContext *s = ctx->priv; AVFilterLink *inlink = ctx->inputs[0]; const int channels = inlink->channels; const int nb_samples = frame->nb_samples; AVFrame *out_frame; int chan, i; if (av_frame_is_writable(frame)) { out_frame = frame; } else { out_frame = ff_get_audio_buffer(inlink, nb_samples); if (!out_frame) return AVERROR(ENOMEM); av_frame_copy_props(out_frame, frame); } for (chan = 0; chan < channels; chan++) { const double *src = (double *)frame->extended_data[chan]; double *dst = (double *)out_frame->extended_data[chan]; ChanParam *cp = &s->channels[chan]; for (i = 0; i < nb_samples; i++) { update_volume(cp, fabs(src[i])); dst[i] = av_clipd(src[i] * get_volume(s, cp->volume), -1, 1); } } if (frame != out_frame) av_frame_free(&frame); return ff_filter_frame(ctx->outputs[0], out_frame); }
1
972
static void pfpu_start(MilkymistPFPUState *s) { int x, y; int i; for (y = 0; y <= s->regs[R_VMESHLAST]; y++) { for (x = 0; x <= s->regs[R_HMESHLAST]; x++) { D_EXEC(qemu_log("\nprocessing x=%d y=%d\n", x, y)); /* set current position */ s->gp_regs[GPR_X] = x; s->gp_regs[GPR_Y] = y; /* run microcode on this position */ i = 0; while (pfpu_decode_insn(s)) { /* decode at most MICROCODE_WORDS instructions */ if (i++ >= MICROCODE_WORDS) { error_report("milkymist_pfpu: too many instructions " "executed in microcode. No VECTOUT?"); break; } } /* reset pc for next run */ s->regs[R_PC] = 0; } } s->regs[R_VERTICES] = x * y; trace_milkymist_pfpu_pulse_irq(); qemu_irq_pulse(s->irq); }
1
973
av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp) { if (bpp == 8) { ff_vp9dsp_init_8(dsp); } else if (bpp == 10) { ff_vp9dsp_init_10(dsp); } else { av_assert0(bpp == 12); ff_vp9dsp_init_12(dsp); } if (ARCH_X86) ff_vp9dsp_init_x86(dsp, bpp); if (ARCH_MIPS) ff_vp9dsp_init_mips(dsp, bpp); }
1
974
static int mov_read_mdhd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom) { AVStream *st = c->fc->streams[c->fc->nb_streams-1]; print_atom("mdhd", atom); get_byte(pb); /* version */ get_byte(pb); get_byte(pb); get_byte(pb); /* flags */ get_be32(pb); /* creation time */ get_be32(pb); /* modification time */ c->streams[c->total_streams]->time_scale = get_be32(pb); #ifdef DEBUG printf("track[%i].time_scale = %i\n", c->fc->nb_streams-1, c->streams[c->total_streams]->time_scale); /* time scale */ #endif get_be32(pb); /* duration */ get_be16(pb); /* language */ get_be16(pb); /* quality */ return 0; }
1
975
static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane, unsigned char *ref_plane, unsigned char *decoded_plane, int width, int height, int src_stride, int stride) { int x, y; int i; int block_width, block_height; int level; int threshold[6]; const int lambda= (s->picture.quality*s->picture.quality) >> (2*FF_LAMBDA_SHIFT); /* figure out the acceptable level thresholds in advance */ threshold[5] = QUALITY_THRESHOLD; for (level = 4; level >= 0; level--) threshold[level] = threshold[level + 1] * THRESHOLD_MULTIPLIER; block_width = (width + 15) / 16; block_height = (height + 15) / 16; if(s->picture.pict_type == FF_P_TYPE){ s->m.avctx= s->avctx; s->m.current_picture_ptr= &s->m.current_picture; s->m.last_picture_ptr = &s->m.last_picture; s->m.last_picture.data[0]= ref_plane; s->m.linesize= s->m.last_picture.linesize[0]= s->m.new_picture.linesize[0]= s->m.current_picture.linesize[0]= stride; s->m.width= width; s->m.height= height; s->m.mb_width= block_width; s->m.mb_height= block_height; s->m.mb_stride= s->m.mb_width+1; s->m.b8_stride= 2*s->m.mb_width+1; s->m.f_code=1; s->m.pict_type= s->picture.pict_type; s->m.me_method= s->avctx->me_method; s->m.me.scene_change_score=0; s->m.flags= s->avctx->flags; // s->m.out_format = FMT_H263; // s->m.unrestricted_mv= 1; s->m.lambda= s->picture.quality; s->m.qscale= (s->m.lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7); s->m.lambda2= (s->m.lambda*s->m.lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT; if(!s->motion_val8[plane]){ s->motion_val8 [plane]= av_mallocz((s->m.b8_stride*block_height*2 + 2)*2*sizeof(int16_t)); s->motion_val16[plane]= av_mallocz((s->m.mb_stride*(block_height + 2) + 1)*2*sizeof(int16_t)); } s->m.mb_type= s->mb_type; //dummies, to avoid segfaults s->m.current_picture.mb_mean= (uint8_t *)s->dummy; s->m.current_picture.mb_var= (uint16_t*)s->dummy; s->m.current_picture.mc_mb_var= (uint16_t*)s->dummy; s->m.current_picture.mb_type= s->dummy; s->m.current_picture.motion_val[0]= s->motion_val8[plane] + 2; s->m.p_mv_table= s->motion_val16[plane] + s->m.mb_stride + 1; s->m.dsp= s->dsp; //move ff_init_me(&s->m); s->m.me.dia_size= s->avctx->dia_size; s->m.first_slice_line=1; for (y = 0; y < block_height; y++) { uint8_t src[stride*16]; s->m.new_picture.data[0]= src - y*16*stride; //ugly s->m.mb_y= y; for(i=0; i<16 && i + 16*y<height; i++){ memcpy(&src[i*stride], &src_plane[(i+16*y)*src_stride], width); for(x=width; x<16*block_width; x++) src[i*stride+x]= src[i*stride+x-1]; } for(; i<16 && i + 16*y<16*block_height; i++) memcpy(&src[i*stride], &src[(i-1)*stride], 16*block_width); for (x = 0; x < block_width; x++) { s->m.mb_x= x; ff_init_block_index(&s->m); ff_update_block_index(&s->m); ff_estimate_p_frame_motion(&s->m, x, y); } s->m.first_slice_line=0; } ff_fix_long_p_mvs(&s->m); ff_fix_long_mvs(&s->m, NULL, 0, s->m.p_mv_table, s->m.f_code, CANDIDATE_MB_TYPE_INTER, 0); } s->m.first_slice_line=1; for (y = 0; y < block_height; y++) { uint8_t src[stride*16]; for(i=0; i<16 && i + 16*y<height; i++){ memcpy(&src[i*stride], &src_plane[(i+16*y)*src_stride], width); for(x=width; x<16*block_width; x++) src[i*stride+x]= src[i*stride+x-1]; } for(; i<16 && i + 16*y<16*block_height; i++) memcpy(&src[i*stride], &src[(i-1)*stride], 16*block_width); s->m.mb_y= y; for (x = 0; x < block_width; x++) { uint8_t reorder_buffer[3][6][7*32]; int count[3][6]; int offset = y * 16 * stride + x * 16; uint8_t *decoded= decoded_plane + offset; uint8_t *ref= ref_plane + offset; int score[4]={0,0,0,0}, best; uint8_t *temp = s->scratchbuf; if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3000){ //FIXME check size av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); return -1; } s->m.mb_x= x; ff_init_block_index(&s->m); ff_update_block_index(&s->m); if(s->picture.pict_type == FF_I_TYPE || (s->m.mb_type[x + y*s->m.mb_stride]&CANDIDATE_MB_TYPE_INTRA)){ for(i=0; i<6; i++){ init_put_bits(&s->reorder_pb[i], reorder_buffer[0][i], 7*32); } if(s->picture.pict_type == FF_P_TYPE){ const uint8_t *vlc= ff_svq1_block_type_vlc[SVQ1_BLOCK_INTRA]; put_bits(&s->reorder_pb[5], vlc[1], vlc[0]); score[0]= vlc[1]*lambda; } score[0]+= encode_block(s, src+16*x, NULL, temp, stride, 5, 64, lambda, 1); for(i=0; i<6; i++){ count[0][i]= put_bits_count(&s->reorder_pb[i]); flush_put_bits(&s->reorder_pb[i]); } }else score[0]= INT_MAX; best=0; if(s->picture.pict_type == FF_P_TYPE){ const uint8_t *vlc= ff_svq1_block_type_vlc[SVQ1_BLOCK_INTER]; int mx, my, pred_x, pred_y, dxy; int16_t *motion_ptr; motion_ptr= h263_pred_motion(&s->m, 0, 0, &pred_x, &pred_y); if(s->m.mb_type[x + y*s->m.mb_stride]&CANDIDATE_MB_TYPE_INTER){ for(i=0; i<6; i++) init_put_bits(&s->reorder_pb[i], reorder_buffer[1][i], 7*32); put_bits(&s->reorder_pb[5], vlc[1], vlc[0]); s->m.pb= s->reorder_pb[5]; mx= motion_ptr[0]; my= motion_ptr[1]; assert(mx>=-32 && mx<=31); assert(my>=-32 && my<=31); assert(pred_x>=-32 && pred_x<=31); assert(pred_y>=-32 && pred_y<=31); ff_h263_encode_motion(&s->m, mx - pred_x, 1); ff_h263_encode_motion(&s->m, my - pred_y, 1); s->reorder_pb[5]= s->m.pb; score[1] += lambda*put_bits_count(&s->reorder_pb[5]); dxy= (mx&1) + 2*(my&1); s->dsp.put_pixels_tab[0][dxy](temp+16, ref + (mx>>1) + stride*(my>>1), stride, 16); score[1]+= encode_block(s, src+16*x, temp+16, decoded, stride, 5, 64, lambda, 0); best= score[1] <= score[0]; vlc= ff_svq1_block_type_vlc[SVQ1_BLOCK_SKIP]; score[2]= s->dsp.sse[0](NULL, src+16*x, ref, stride, 16); score[2]+= vlc[1]*lambda; if(score[2] < score[best] && mx==0 && my==0){ best=2; s->dsp.put_pixels_tab[0][0](decoded, ref, stride, 16); for(i=0; i<6; i++){ count[2][i]=0; } put_bits(&s->pb, vlc[1], vlc[0]); } } if(best==1){ for(i=0; i<6; i++){ count[1][i]= put_bits_count(&s->reorder_pb[i]); flush_put_bits(&s->reorder_pb[i]); } }else{ motion_ptr[0 ] = motion_ptr[1 ]= motion_ptr[2 ] = motion_ptr[3 ]= motion_ptr[0+2*s->m.b8_stride] = motion_ptr[1+2*s->m.b8_stride]= motion_ptr[2+2*s->m.b8_stride] = motion_ptr[3+2*s->m.b8_stride]=0; } } s->rd_total += score[best]; for(i=5; i>=0; i--){ ff_copy_bits(&s->pb, reorder_buffer[best][i], count[best][i]); } if(best==0){ s->dsp.put_pixels_tab[0][0](decoded, temp, stride, 16); } } s->m.first_slice_line=0; } return 0; }
1
976
static int commit_direntries(BDRVVVFATState* s, int dir_index, int parent_mapping_index) { direntry_t* direntry = array_get(&(s->directory), dir_index); uint32_t first_cluster = dir_index == 0 ? 0 : begin_of_direntry(direntry); mapping_t* mapping = find_mapping_for_cluster(s, first_cluster); int factor = 0x10 * s->sectors_per_cluster; int old_cluster_count, new_cluster_count; int current_dir_index = mapping->info.dir.first_dir_index; int first_dir_index = current_dir_index; int ret, i; uint32_t c; DLOG(fprintf(stderr, "commit_direntries for %s, parent_mapping_index %d\n", mapping->path, parent_mapping_index)); assert(direntry); assert(mapping); assert(mapping->begin == first_cluster); assert(mapping->info.dir.first_dir_index < s->directory.next); assert(mapping->mode & MODE_DIRECTORY); assert(dir_index == 0 || is_directory(direntry)); mapping->info.dir.parent_mapping_index = parent_mapping_index; if (first_cluster == 0) { old_cluster_count = new_cluster_count = s->last_cluster_of_root_directory; } else { for (old_cluster_count = 0, c = first_cluster; !fat_eof(s, c); c = fat_get(s, c)) old_cluster_count++; for (new_cluster_count = 0, c = first_cluster; !fat_eof(s, c); c = modified_fat_get(s, c)) new_cluster_count++; } if (new_cluster_count > old_cluster_count) { if (insert_direntries(s, current_dir_index + factor * old_cluster_count, factor * (new_cluster_count - old_cluster_count)) == NULL) return -1; } else if (new_cluster_count < old_cluster_count) remove_direntries(s, current_dir_index + factor * new_cluster_count, factor * (old_cluster_count - new_cluster_count)); for (c = first_cluster; !fat_eof(s, c); c = modified_fat_get(s, c)) { void* direntry = array_get(&(s->directory), current_dir_index); int ret = vvfat_read(s->bs, cluster2sector(s, c), direntry, s->sectors_per_cluster); if (ret) return ret; assert(!strncmp(s->directory.pointer, "QEMU", 4)); current_dir_index += factor; } ret = commit_mappings(s, first_cluster, dir_index); if (ret) return ret; /* recurse */ for (i = 0; i < factor * new_cluster_count; i++) { direntry = array_get(&(s->directory), first_dir_index + i); if (is_directory(direntry) && !is_dot(direntry)) { mapping = find_mapping_for_cluster(s, first_cluster); assert(mapping->mode & MODE_DIRECTORY); ret = commit_direntries(s, first_dir_index + i, array_index(&(s->mapping), mapping)); if (ret) return ret; } } return 0; }
1
977
void vfio_put_group(VFIOGroup *group) { if (!QLIST_EMPTY(&group->device_list)) { return; } vfio_kvm_device_del_group(group); vfio_disconnect_container(group); QLIST_REMOVE(group, next); trace_vfio_put_group(group->fd); close(group->fd); g_free(group); if (QLIST_EMPTY(&vfio_group_list)) { qemu_unregister_reset(vfio_reset_handler, NULL); } }
1
978
void do_POWER_div (void) { uint64_t tmp; if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) || (int32_t)T1 == 0) { T0 = (long)((-1) * (T0 >> 31)); env->spr[SPR_MQ] = 0; } else { tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ]; env->spr[SPR_MQ] = tmp % T1; T0 = tmp / (int32_t)T1; } }
0
980
void qemu_chr_info(Monitor *mon) { CharDriverState *chr; TAILQ_FOREACH(chr, &chardevs, next) { monitor_printf(mon, "%s: filename=%s\n", chr->label, chr->filename); } }
0
981
void lm32_debug_excp_handler(CPUState *cs) { LM32CPU *cpu = LM32_CPU(cs); CPULM32State *env = &cpu->env; CPUBreakpoint *bp; if (cs->watchpoint_hit) { if (cs->watchpoint_hit->flags & BP_CPU) { cs->watchpoint_hit = NULL; if (check_watchpoints(env)) { raise_exception(env, EXCP_WATCHPOINT); } else { cpu_resume_from_signal(cs, NULL); } } } else { QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { if (bp->pc == env->pc) { if (bp->flags & BP_CPU) { raise_exception(env, EXCP_BREAKPOINT); } break; } } } }
0
983
int qemu_get_byte(QEMUFile *f) { if (f->is_write) abort(); if (f->buf_index >= f->buf_size) { qemu_fill_buffer(f); if (f->buf_index >= f->buf_size) return 0; } return f->buf[f->buf_index++]; }