{"target": 1, "idx": 8, "func": "static void filter_mirror_setup(NetFilterState *nf, Error **errp) { MirrorState *s = FILTER_MIRROR(nf); Chardev *chr; chr = qemu_chr_find(s->outdev); if (chr == NULL) { error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, \"Device '%s' not found\", s->outdev); qemu_chr_fe_init(&s->chr_out, chr, errp);"} {"target": 1, "idx": 10, "func": "static inline int64_t sub64(const int64_t a, const int64_t b) { return a - b; }"} {"target": 1, "idx": 15, "func": "void fw_cfg_add_callback(FWCfgState *s, uint16_t key, FWCfgCallback callback, void *callback_opaque, uint8_t *data, size_t len) { int arch = !!(key & FW_CFG_ARCH_LOCAL); assert(key & FW_CFG_WRITE_CHANNEL); key &= FW_CFG_ENTRY_MASK; assert(key < FW_CFG_MAX_ENTRY && len <= 65535); s->entries[arch][key].data = data; s->entries[arch][key].len = len; s->entries[arch][key].callback_opaque = callback_opaque; s->entries[arch][key].callback = callback; }"} {"target": 1, "idx": 22, "func": "static void emulated_push_error(EmulatedState *card, uint64_t code) { EmulEvent *event = (EmulEvent *)g_malloc(sizeof(EmulEvent)); assert(event); event->p.error.type = EMUL_ERROR; event->p.error.code = code; emulated_push_event(card, event); }"} {"target": 0, "idx": 51, "func": "static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid) { int32_t datalen; int lun; DPRINTF(\"do_busid_cmd: busid 0x%x\\n\", busid); lun = busid & 7; s->current_req = scsi_req_new(s->current_dev, 0, lun, NULL); datalen = scsi_req_enqueue(s->current_req, buf); s->ti_size = datalen; if (datalen != 0) { s->rregs[ESP_RSTAT] = STAT_TC; s->dma_left = 0; s->dma_counter = 0; if (datalen > 0) { s->rregs[ESP_RSTAT] |= STAT_DI; } else { s->rregs[ESP_RSTAT] |= STAT_DO; } scsi_req_continue(s->current_req); } s->rregs[ESP_RINTR] = INTR_BS | INTR_FC; s->rregs[ESP_RSEQ] = SEQ_CD; esp_raise_irq(s); }"} {"target": 0, "idx": 62, "func": "static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { uint32_t config_addr = rtas_ld(args, 0); uint64_t buid = rtas_ldq(args, 1); unsigned int func = rtas_ld(args, 3); unsigned int req_num = rtas_ld(args, 4); /* 0 == remove all */ unsigned int seq_num = rtas_ld(args, 5); unsigned int ret_intr_type; unsigned int irq, max_irqs = 0, num = 0; sPAPRPHBState *phb = NULL; PCIDevice *pdev = NULL; spapr_pci_msi *msi; int *config_addr_key; switch (func) { case RTAS_CHANGE_MSI_FN: case RTAS_CHANGE_FN: ret_intr_type = RTAS_TYPE_MSI; break; case RTAS_CHANGE_MSIX_FN: ret_intr_type = RTAS_TYPE_MSIX; break; default: error_report(\"rtas_ibm_change_msi(%u) is not implemented\", func); rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; } /* Fins sPAPRPHBState */ phb = spapr_pci_find_phb(spapr, buid); if (phb) { pdev = spapr_pci_find_dev(spapr, buid, config_addr); } if (!phb || !pdev) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; } /* Releasing MSIs */ if (!req_num) { msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr); if (!msi) { trace_spapr_pci_msi(\"Releasing wrong config\", config_addr); rtas_st(rets, 0, RTAS_OUT_HW_ERROR); return; } xics_free(spapr->icp, msi->first_irq, msi->num); if (msi_present(pdev)) { spapr_msi_setmsg(pdev, 0, false, 0, num); } if (msix_present(pdev)) { spapr_msi_setmsg(pdev, 0, true, 0, num); } g_hash_table_remove(phb->msi, &config_addr); trace_spapr_pci_msi(\"Released MSIs\", config_addr); rtas_st(rets, 0, RTAS_OUT_SUCCESS); rtas_st(rets, 1, 0); return; } /* Enabling MSI */ /* Check if the device supports as many IRQs as requested */ if (ret_intr_type == RTAS_TYPE_MSI) { max_irqs = msi_nr_vectors_allocated(pdev); } else if (ret_intr_type == RTAS_TYPE_MSIX) { max_irqs = pdev->msix_entries_nr; } if (!max_irqs) { error_report(\"Requested interrupt type %d is not enabled for device %x\", ret_intr_type, config_addr); rtas_st(rets, 0, -1); /* Hardware error */ return; } /* Correct the number if the guest asked for too many */ if (req_num > max_irqs) { trace_spapr_pci_msi_retry(config_addr, req_num, max_irqs); req_num = max_irqs; irq = 0; /* to avoid misleading trace */ goto out; } /* Allocate MSIs */ irq = xics_alloc_block(spapr->icp, 0, req_num, false, ret_intr_type == RTAS_TYPE_MSI); if (!irq) { error_report(\"Cannot allocate MSIs for device %x\", config_addr); rtas_st(rets, 0, RTAS_OUT_HW_ERROR); return; } /* Setup MSI/MSIX vectors in the device (via cfgspace or MSIX BAR) */ spapr_msi_setmsg(pdev, SPAPR_PCI_MSI_WINDOW, ret_intr_type == RTAS_TYPE_MSIX, irq, req_num); /* Add MSI device to cache */ msi = g_new(spapr_pci_msi, 1); msi->first_irq = irq; msi->num = req_num; config_addr_key = g_new(int, 1); *config_addr_key = config_addr; g_hash_table_insert(phb->msi, config_addr_key, msi); out: rtas_st(rets, 0, RTAS_OUT_SUCCESS); rtas_st(rets, 1, req_num); rtas_st(rets, 2, ++seq_num); if (nret > 3) { rtas_st(rets, 3, ret_intr_type); } trace_spapr_pci_rtas_ibm_change_msi(config_addr, func, req_num, irq); }"} {"target": 0, "idx": 71, "func": "void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...) { va_list va; char buf[256]; char *p; target_ulong addr; uint64_t i64; GDBState *s; s = gdbserver_state; if (!s) return; gdb_current_syscall_cb = cb; s->state = RS_SYSCALL; #ifndef CONFIG_USER_ONLY vm_stop(EXCP_DEBUG); #endif s->state = RS_IDLE; va_start(va, fmt); p = buf; *(p++) = 'F'; while (*fmt) { if (*fmt == '%') { fmt++; switch (*fmt++) { case 'x': addr = va_arg(va, target_ulong); p += snprintf(p, &buf[sizeof(buf)] - p, TARGET_FMT_lx, addr); break; case 'l': if (*(fmt++) != 'x') goto bad_format; i64 = va_arg(va, uint64_t); p += snprintf(p, &buf[sizeof(buf)] - p, \"%\" PRIx64, i64); break; case 's': addr = va_arg(va, target_ulong); p += snprintf(p, &buf[sizeof(buf)] - p, TARGET_FMT_lx \"/%x\", addr, va_arg(va, int)); break; default: bad_format: fprintf(stderr, \"gdbstub: Bad syscall format string '%s'\\n\", fmt - 1); break; } } else { *(p++) = *(fmt++); } } *p = 0; va_end(va); put_packet(s, buf); #ifdef CONFIG_USER_ONLY gdb_handlesig(s->c_cpu, 0); #else cpu_interrupt(s->c_cpu, CPU_INTERRUPT_EXIT); #endif }"} {"target": 0, "idx": 75, "func": "static void gen_cp0 (CPUState *env, DisasContext *ctx, uint32_t opc, int rt, int rd) { const char *opn = \"ldst\"; switch (opc) { case OPC_MFC0: if (rt == 0) { /* Treat as NOP */ return; } gen_mfc0(ctx, rd, ctx->opcode & 0x7); gen_op_store_T0_gpr(rt); opn = \"mfc0\"; break; case OPC_MTC0: GEN_LOAD_REG_TN(T0, rt); gen_mtc0(ctx, rd, ctx->opcode & 0x7); opn = \"mtc0\"; break; #ifdef TARGET_MIPS64 case OPC_DMFC0: if (rt == 0) { /* Treat as NOP */ return; } gen_dmfc0(ctx, rd, ctx->opcode & 0x7); gen_op_store_T0_gpr(rt); opn = \"dmfc0\"; break; case OPC_DMTC0: GEN_LOAD_REG_TN(T0, rt); gen_dmtc0(ctx, rd, ctx->opcode & 0x7); opn = \"dmtc0\"; break; #endif case OPC_TLBWI: opn = \"tlbwi\"; if (!env->do_tlbwi) goto die; gen_op_tlbwi(); break; case OPC_TLBWR: opn = \"tlbwr\"; if (!env->do_tlbwr) goto die; gen_op_tlbwr(); break; case OPC_TLBP: opn = \"tlbp\"; if (!env->do_tlbp) goto die; gen_op_tlbp(); break; case OPC_TLBR: opn = \"tlbr\"; if (!env->do_tlbr) goto die; gen_op_tlbr(); break; case OPC_ERET: opn = \"eret\"; save_cpu_state(ctx, 0); gen_op_eret(); ctx->bstate = BS_EXCP; break; case OPC_DERET: opn = \"deret\"; if (!(ctx->hflags & MIPS_HFLAG_DM)) { MIPS_INVAL(opn); generate_exception(ctx, EXCP_RI); } else { save_cpu_state(ctx, 0); gen_op_deret(); ctx->bstate = BS_EXCP; } break; case OPC_WAIT: opn = \"wait\"; /* If we get an exception, we want to restart at next instruction */ ctx->pc += 4; save_cpu_state(ctx, 1); ctx->pc -= 4; gen_op_wait(); ctx->bstate = BS_EXCP; break; default: die: MIPS_INVAL(opn); generate_exception(ctx, EXCP_RI); return; } MIPS_DEBUG(\"%s %s %d\", opn, regnames[rt], rd); }"} {"target": 1, "idx": 88, "func": "static void tricore_cpu_initfn(Object *obj) { CPUState *cs = CPU(obj); TriCoreCPU *cpu = TRICORE_CPU(obj); CPUTriCoreState *env = &cpu->env; cs->env_ptr = env; cpu_exec_init(cs, &error_abort); if (tcg_enabled()) { tricore_tcg_init(); } }"} {"target": 1, "idx": 125, "func": "static int mxf_read_content_storage(MXFContext *mxf, ByteIOContext *pb, int tag) { switch (tag) { case 0x1901: mxf->packages_count = get_be32(pb); if (mxf->packages_count >= UINT_MAX / sizeof(UID)) return -1; mxf->packages_refs = av_malloc(mxf->packages_count * sizeof(UID)); if (!mxf->packages_refs) return -1; url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */ get_buffer(pb, (uint8_t *)mxf->packages_refs, mxf->packages_count * sizeof(UID)); break; } return 0; }"} {"target": 0, "idx": 127, "func": "int av_reallocp_array(void *ptr, size_t nmemb, size_t size) { void **ptrptr = ptr; void *ret; if (size <= 0 || nmemb >= INT_MAX / size) return AVERROR(ENOMEM); if (nmemb <= 0) { av_freep(ptr); return 0; } ret = av_realloc(*ptrptr, nmemb * size); if (!ret) { av_freep(ptr); return AVERROR(ENOMEM); } *ptrptr = ret; return 0; }"} {"target": 0, "idx": 137, "func": "void ff_put_h264_qpel16_mc31_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { avc_luma_hv_qrt_16w_msa(src - 2, src - (stride * 2) + sizeof(uint8_t), stride, dst, stride, 16); }"} {"target": 1, "idx": 143, "func": "struct vhost_net *vhost_net_init(VLANClientState *backend, int devfd, bool force) { return NULL; }"} {"target": 1, "idx": 146, "func": "static int bit8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2, ptrdiff_t stride, int h) { const uint8_t *scantable = s->intra_scantable.permutated; LOCAL_ALIGNED_16(int16_t, temp, [64]); int i, last, run, bits, level, start_i; const int esc_length = s->ac_esc_length; uint8_t *length, *last_length; av_assert2(h == 8); s->pdsp.diff_pixels(temp, src1, src2, stride); s->block_last_index[0 /* FIXME */] = last = s->fast_dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i); bits = 0; if (s->mb_intra) { start_i = 1; length = s->intra_ac_vlc_length; last_length = s->intra_ac_vlc_last_length; bits += s->luma_dc_vlc_length[temp[0] + 256]; // FIXME: chroma } else { start_i = 0; length = s->inter_ac_vlc_length; last_length = s->inter_ac_vlc_last_length; } if (last >= start_i) { run = 0; for (i = start_i; i < last; i++) { int j = scantable[i]; level = temp[j]; if (level) { level += 64; if ((level & (~127)) == 0) bits += length[UNI_AC_ENC_INDEX(run, level)]; else bits += esc_length; run = 0; } else run++; } i = scantable[last]; level = temp[i] + 64; av_assert2(level - 64); if ((level & (~127)) == 0) bits += last_length[UNI_AC_ENC_INDEX(run, level)]; else bits += esc_length; } return bits; }"} {"target": 1, "idx": 155, "func": "static void ehci_mem_writew(void *ptr, target_phys_addr_t addr, uint32_t val) { fprintf(stderr, \"EHCI doesn't handle 16-bit writes to MMIO\\n\"); exit(1); }"} {"target": 1, "idx": 161, "func": "static void xendev_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->props = xendev_properties; set_bit(DEVICE_CATEGORY_MISC, dc->categories); }"} {"target": 1, "idx": 162, "func": "static uint64_t qdev_get_prop_mask64(Property *prop) { assert(prop->info == &qdev_prop_bit); return 0x1 << prop->bitnr; }"} {"target": 0, "idx": 166, "func": "static int create_ppc_opcodes (CPUPPCState *env, ppc_def_t *def) { opcode_t *opc, *start, *end; fill_new_table(env->opcodes, 0x40); #if defined(PPC_DUMP_CPU) printf(\"* PowerPC instructions for PVR %08x: %s flags %016\" PRIx64 \" %08x\\n\", def->pvr, def->name, def->insns_flags, def->flags); #endif if (&opc_start < &opc_end) { start = &opc_start; end = &opc_end; } else { start = &opc_end; end = &opc_start; } for (opc = start + 1; opc != end; opc++) { if ((opc->handler.type & def->insns_flags) != 0) { if (register_insn(env->opcodes, opc) < 0) { printf(\"*** ERROR initializing PowerPC instruction \" \"0x%02x 0x%02x 0x%02x\\n\", opc->opc1, opc->opc2, opc->opc3); return -1; } #if defined(PPC_DUMP_CPU) if (opc1 != 0x00) { if (opc->opc3 == 0xFF) { if (opc->opc2 == 0xFF) { printf(\" %02x -- -- (%2d ----) : %s\\n\", opc->opc1, opc->opc1, opc->oname); } else { printf(\" %02x %02x -- (%2d %4d) : %s\\n\", opc->opc1, opc->opc2, opc->opc1, opc->opc2, opc->oname); } } else { printf(\" %02x %02x %02x (%2d %4d) : %s\\n\", opc->opc1, opc->opc2, opc->opc3, opc->opc1, (opc->opc3 << 5) | opc->opc2, opc->oname); } } #endif } } fix_opcode_tables(env->opcodes); fflush(stdout); fflush(stderr); return 0; }"} {"target": 0, "idx": 174, "func": "static void smp_parse(const char *optarg) { int smp, sockets = 0, threads = 0, cores = 0; char *endptr; char option[128]; smp = strtoul(optarg, &endptr, 10); if (endptr != optarg) { if (*endptr == ',') { endptr++; } } if (get_param_value(option, 128, \"sockets\", endptr) != 0) sockets = strtoull(option, NULL, 10); if (get_param_value(option, 128, \"cores\", endptr) != 0) cores = strtoull(option, NULL, 10); if (get_param_value(option, 128, \"threads\", endptr) != 0) threads = strtoull(option, NULL, 10); if (get_param_value(option, 128, \"maxcpus\", endptr) != 0) max_cpus = strtoull(option, NULL, 10); /* compute missing values, prefer sockets over cores over threads */ if (smp == 0 || sockets == 0) { sockets = sockets > 0 ? sockets : 1; cores = cores > 0 ? cores : 1; threads = threads > 0 ? threads : 1; if (smp == 0) { smp = cores * threads * sockets; } } else { if (cores == 0) { threads = threads > 0 ? threads : 1; cores = smp / (sockets * threads); } else { threads = smp / (cores * sockets); } } smp_cpus = smp; smp_cores = cores > 0 ? cores : 1; smp_threads = threads > 0 ? threads : 1; if (max_cpus == 0) max_cpus = smp_cpus; }"} {"target": 0, "idx": 183, "func": "static inline void mix_2f_1r_to_dolby(AC3DecodeContext *ctx) { int i; float (*output)[256] = ctx->audio_block.block_output; for (i = 0; i < 256; i++) { output[1][i] -= output[3][i]; output[2][i] += output[3][i]; } memset(output[3], 0, sizeof(output[3])); }"} {"target": 0, "idx": 188, "func": "static int config_props(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; AVFilterLink *inlink = outlink->src->inputs[0]; ScaleContext *scale = ctx->priv; int64_t w, h; if (!(w = scale->w)) w = inlink->w; if (!(h = scale->h)) h = inlink->h; if (w == -1) w = av_rescale(h, inlink->w, inlink->h); if (h == -1) h = av_rescale(w, inlink->h, inlink->w); if (w > INT_MAX || h > INT_MAX || (h * inlink->w) > INT_MAX || (w * inlink->h) > INT_MAX) av_log(ctx, AV_LOG_ERROR, \"Rescaled value for width or height is too big.\\n\"); outlink->w = w; outlink->h = h; /* TODO: make algorithm configurable */ scale->sws = sws_getContext(inlink ->w, inlink ->h, inlink ->format, outlink->w, outlink->h, outlink->format, SWS_BILINEAR, NULL, NULL, NULL); av_log(ctx, AV_LOG_INFO, \"w:%d h:%d fmt:%s\\n\", outlink->w, outlink->h, av_pix_fmt_descriptors[outlink->format].name); scale->input_is_pal = inlink->format == PIX_FMT_PAL8 || inlink->format == PIX_FMT_BGR4_BYTE || inlink->format == PIX_FMT_RGB4_BYTE || inlink->format == PIX_FMT_BGR8 || inlink->format == PIX_FMT_RGB8; return !scale->sws; }"} {"target": 0, "idx": 227, "func": "static uint64_t omap_pwt_read(void *opaque, target_phys_addr_t addr, unsigned size) { struct omap_pwt_s *s = (struct omap_pwt_s *) opaque; int offset = addr & OMAP_MPUI_REG_MASK; if (size != 1) { return omap_badwidth_read8(opaque, addr); } switch (offset) { case 0x00: /* FRC */ return s->frc; case 0x04: /* VCR */ return s->vrc; case 0x08: /* GCR */ return s->gcr; } OMAP_BAD_REG(addr); return 0; }"} {"target": 0, "idx": 235, "func": "uint32_t lduw_be_phys(target_phys_addr_t addr) { return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN); }"} {"target": 0, "idx": 253, "func": "static int find_stream_index(AVFormatContext *s) { int i; AVStream *st; if (s->nb_streams <= 0) return -1; for(i = 0; i < s->nb_streams; i++) { st = s->streams[i]; if (st->codec.codec_type == CODEC_TYPE_VIDEO) { return i; } } return 0; }"} {"target": 1, "idx": 255, "func": "gen_intermediate_code_internal(MIPSCPU *cpu, TranslationBlock *tb, bool search_pc) { CPUState *cs = CPU(cpu); CPUMIPSState *env = &cpu->env; DisasContext ctx; target_ulong pc_start; uint16_t *gen_opc_end; CPUBreakpoint *bp; int j, lj = -1; int num_insns; int max_insns; int insn_bytes; int is_branch; if (search_pc) qemu_log(\"search pc %d\\n\", search_pc); pc_start = tb->pc; gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE; ctx.pc = pc_start; ctx.saved_pc = -1; ctx.singlestep_enabled = cs->singlestep_enabled; ctx.insn_flags = env->insn_flags; ctx.tb = tb; ctx.bstate = BS_NONE; /* Restore delay slot state from the tb context. */ ctx.hflags = (uint32_t)tb->flags; /* FIXME: maybe use 64 bits here? */ restore_cpu_state(env, &ctx); #ifdef CONFIG_USER_ONLY ctx.mem_idx = MIPS_HFLAG_UM; #else ctx.mem_idx = ctx.hflags & MIPS_HFLAG_KSU; #endif num_insns = 0; max_insns = tb->cflags & CF_COUNT_MASK; if (max_insns == 0) max_insns = CF_COUNT_MASK; LOG_DISAS(\"\\ntb %p idx %d hflags %04x\\n\", tb, ctx.mem_idx, ctx.hflags); gen_tb_start(); while (ctx.bstate == BS_NONE) { if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) { QTAILQ_FOREACH(bp, &env->breakpoints, entry) { if (bp->pc == ctx.pc) { save_cpu_state(&ctx, 1); ctx.bstate = BS_BRANCH; gen_helper_0e0i(raise_exception, EXCP_DEBUG); /* Include the breakpoint location or the tb won't * be flushed when it must be. */ ctx.pc += 4; goto done_generating; } } } if (search_pc) { j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf; if (lj < j) { lj++; while (lj < j) tcg_ctx.gen_opc_instr_start[lj++] = 0; } tcg_ctx.gen_opc_pc[lj] = ctx.pc; gen_opc_hflags[lj] = ctx.hflags & MIPS_HFLAG_BMASK; gen_opc_btarget[lj] = ctx.btarget; tcg_ctx.gen_opc_instr_start[lj] = 1; tcg_ctx.gen_opc_icount[lj] = num_insns; } if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) gen_io_start(); is_branch = 0; if (!(ctx.hflags & MIPS_HFLAG_M16)) { ctx.opcode = cpu_ldl_code(env, ctx.pc); insn_bytes = 4; decode_opc(env, &ctx, &is_branch); } else if (ctx.insn_flags & ASE_MICROMIPS) { ctx.opcode = cpu_lduw_code(env, ctx.pc); insn_bytes = decode_micromips_opc(env, &ctx, &is_branch); } else if (ctx.insn_flags & ASE_MIPS16) { ctx.opcode = cpu_lduw_code(env, ctx.pc); insn_bytes = decode_mips16_opc(env, &ctx, &is_branch); } else { generate_exception(&ctx, EXCP_RI); ctx.bstate = BS_STOP; break; } if (!is_branch) { handle_delay_slot(&ctx, insn_bytes); } ctx.pc += insn_bytes; num_insns++; /* Execute a branch and its delay slot as a single instruction. This is what GDB expects and is consistent with what the hardware does (e.g. if a delay slot instruction faults, the reported PC is the PC of the branch). */ if (cs->singlestep_enabled && (ctx.hflags & MIPS_HFLAG_BMASK) == 0) { break; } if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0) break; if (tcg_ctx.gen_opc_ptr >= gen_opc_end) { break; } if (num_insns >= max_insns) break; if (singlestep) break; } if (tb->cflags & CF_LAST_IO) { gen_io_end(); } if (cs->singlestep_enabled && ctx.bstate != BS_BRANCH) { save_cpu_state(&ctx, ctx.bstate == BS_NONE); gen_helper_0e0i(raise_exception, EXCP_DEBUG); } else { switch (ctx.bstate) { case BS_STOP: gen_goto_tb(&ctx, 0, ctx.pc); break; case BS_NONE: save_cpu_state(&ctx, 0); gen_goto_tb(&ctx, 0, ctx.pc); break; case BS_EXCP: tcg_gen_exit_tb(0); break; case BS_BRANCH: default: break; } } done_generating: gen_tb_end(tb, num_insns); *tcg_ctx.gen_opc_ptr = INDEX_op_end; if (search_pc) { j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf; lj++; while (lj <= j) tcg_ctx.gen_opc_instr_start[lj++] = 0; } else { tb->size = ctx.pc - pc_start; tb->icount = num_insns; } #ifdef DEBUG_DISAS LOG_DISAS(\"\\n\"); if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { qemu_log(\"IN: %s\\n\", lookup_symbol(pc_start)); log_target_disas(env, pc_start, ctx.pc - pc_start, 0); qemu_log(\"\\n\"); } #endif }"} {"target": 1, "idx": 257, "func": "static void parse_type_bool(Visitor *v, const char *name, bool *obj, Error **errp) { StringInputVisitor *siv = to_siv(v); if (siv->string) { if (!strcasecmp(siv->string, \"on\") || !strcasecmp(siv->string, \"yes\") || !strcasecmp(siv->string, \"true\")) { *obj = true; return; } if (!strcasecmp(siv->string, \"off\") || !strcasecmp(siv->string, \"no\") || !strcasecmp(siv->string, \"false\")) { *obj = false; return; } } error_setg(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : \"null\", \"boolean\"); }"} {"target": 1, "idx": 268, "func": "void ff_rtsp_undo_setup(AVFormatContext *s) { RTSPState *rt = s->priv_data; int i; for (i = 0; i < rt->nb_rtsp_streams; i++) { RTSPStream *rtsp_st = rt->rtsp_streams[i]; if (!rtsp_st) continue; if (rtsp_st->transport_priv) { if (s->oformat) { AVFormatContext *rtpctx = rtsp_st->transport_priv; av_write_trailer(rtpctx); if (rt->lower_transport == RTSP_LOWER_TRANSPORT_TCP) { uint8_t *ptr; url_close_dyn_buf(rtpctx->pb, &ptr); av_free(ptr); } else { url_fclose(rtpctx->pb); } av_metadata_free(&rtpctx->streams[0]->metadata); av_metadata_free(&rtpctx->metadata); av_free(rtpctx->streams[0]); av_free(rtpctx); } else if (rt->transport == RTSP_TRANSPORT_RDT && CONFIG_RTPDEC) ff_rdt_parse_close(rtsp_st->transport_priv); else if (CONFIG_RTPDEC) rtp_parse_close(rtsp_st->transport_priv); } rtsp_st->transport_priv = NULL; if (rtsp_st->rtp_handle) url_close(rtsp_st->rtp_handle); rtsp_st->rtp_handle = NULL; } }"} {"target": 1, "idx": 274, "func": "static void musb_rx_req(MUSBState *s, int epnum) { MUSBEndPoint *ep = s->ep + epnum; int total; /* If we already have a packet, which didn't fit into the * 64 bytes of the FIFO, only move the FIFO start and return. (Obsolete) */ if (ep->packey[1].p.pid == USB_TOKEN_IN && ep->status[1] >= 0 && (ep->fifostart[1]) + ep->rxcount < ep->packey[1].p.len) { TRACE(\"0x%08x, %d\", ep->fifostart[1], ep->rxcount ); ep->fifostart[1] += ep->rxcount; ep->fifolen[1] = 0; ep->rxcount = MIN(ep->packey[0].p.len - (ep->fifostart[1]), ep->maxp[1]); ep->csr[1] &= ~MGC_M_RXCSR_H_REQPKT; if (!epnum) ep->csr[0] &= ~MGC_M_CSR0_H_REQPKT; /* Clear all of the error bits first */ ep->csr[1] &= ~(MGC_M_RXCSR_H_ERROR | MGC_M_RXCSR_H_RXSTALL | MGC_M_RXCSR_DATAERROR); if (!epnum) ep->csr[0] &= ~(MGC_M_CSR0_H_ERROR | MGC_M_CSR0_H_RXSTALL | MGC_M_CSR0_H_NAKTIMEOUT | MGC_M_CSR0_H_NO_PING); ep->csr[1] |= MGC_M_RXCSR_FIFOFULL | MGC_M_RXCSR_RXPKTRDY; if (!epnum) ep->csr[0] |= MGC_M_CSR0_RXPKTRDY; musb_rx_intr_set(s, epnum, 1); return; } /* The driver sets maxp[1] to 64 or less because it knows the hardware * FIFO is this deep. Bigger packets get split in * usb_generic_handle_packet but we can also do the splitting locally * for performance. It turns out we can also have a bigger FIFO and * ignore the limit set in ep->maxp[1]. The Linux MUSB driver deals * OK with single packets of even 32KB and we avoid splitting, however * usb_msd.c sometimes sends a packet bigger than what Linux expects * (e.g. 8192 bytes instead of 4096) and we get an OVERRUN. Splitting * hides this overrun from Linux. Up to 4096 everything is fine * though. Currently this is disabled. * * XXX: mind ep->fifosize. */ total = MIN(ep->maxp[1] & 0x3ff, sizeof(s->buf)); #ifdef SETUPLEN_HACK /* Why should *we* do that instead of Linux? */ if (!epnum) { if (ep->packey[0].p.devaddr == 2) { total = MIN(s->setup_len, 8); } else { total = MIN(s->setup_len, 64); } s->setup_len -= total; } #endif return musb_packet(s, ep, epnum, USB_TOKEN_IN, total, musb_rx_packet_complete, 1); }"} {"target": 0, "idx": 292, "func": "static uint64_t sp804_read(void *opaque, target_phys_addr_t offset, unsigned size) { sp804_state *s = (sp804_state *)opaque; if (offset < 0x20) { return arm_timer_read(s->timer[0], offset); } if (offset < 0x40) { return arm_timer_read(s->timer[1], offset - 0x20); } /* TimerPeriphID */ if (offset >= 0xfe0 && offset <= 0xffc) { return sp804_ids[(offset - 0xfe0) >> 2]; } switch (offset) { /* Integration Test control registers, which we won't support */ case 0xf00: /* TimerITCR */ case 0xf04: /* TimerITOP (strictly write only but..) */ return 0; } hw_error(\"%s: Bad offset %x\\n\", __func__, (int)offset); return 0; }"} {"target": 0, "idx": 298, "func": "static void timerlist_rearm(QEMUTimerList *timer_list) { /* Interrupt execution to force deadline recalculation. */ if (timer_list->clock->type == QEMU_CLOCK_VIRTUAL) { qemu_start_warp_timer(); } timerlist_notify(timer_list); }"} {"target": 0, "idx": 300, "func": "CharDriverState *text_console_init(DisplayState *ds, const char *p) { CharDriverState *chr; TextConsole *s; unsigned width; unsigned height; static int color_inited; chr = qemu_mallocz(sizeof(CharDriverState)); if (!chr) return NULL; s = new_console(ds, TEXT_CONSOLE); if (!s) { free(chr); return NULL; } if (!p) p = DEFAULT_MONITOR_SIZE; chr->opaque = s; chr->chr_write = console_puts; chr->chr_send_event = console_send_event; s->chr = chr; s->out_fifo.buf = s->out_fifo_buf; s->out_fifo.buf_size = sizeof(s->out_fifo_buf); s->kbd_timer = qemu_new_timer(rt_clock, kbd_send_chars, s); if (!color_inited) { color_inited = 1; console_color_init(s->ds); } s->y_displayed = 0; s->y_base = 0; s->total_height = DEFAULT_BACKSCROLL; s->x = 0; s->y = 0; width = s->ds->width; height = s->ds->height; if (p != 0) { width = strtoul(p, (char **)&p, 10); if (*p == 'C') { p++; width *= FONT_WIDTH; } if (*p == 'x') { p++; height = strtoul(p, (char **)&p, 10); if (*p == 'C') { p++; height *= FONT_HEIGHT; } } } s->g_width = width; s->g_height = height; s->hw_invalidate = text_console_invalidate; s->hw_text_update = text_console_update; s->hw = s; /* Set text attribute defaults */ s->t_attrib_default.bold = 0; s->t_attrib_default.uline = 0; s->t_attrib_default.blink = 0; s->t_attrib_default.invers = 0; s->t_attrib_default.unvisible = 0; s->t_attrib_default.fgcol = COLOR_WHITE; s->t_attrib_default.bgcol = COLOR_BLACK; /* set current text attributes to default */ s->t_attrib = s->t_attrib_default; text_console_resize(s); qemu_chr_reset(chr); return chr; }"} {"target": 0, "idx": 301, "func": "static void tcg_liveness_analysis(TCGContext *s) { uint8_t *dead_temps, *mem_temps; int oi, oi_prev, nb_ops; nb_ops = s->gen_next_op_idx; s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t)); s->op_sync_args = tcg_malloc(nb_ops * sizeof(uint8_t)); dead_temps = tcg_malloc(s->nb_temps); mem_temps = tcg_malloc(s->nb_temps); tcg_la_func_end(s, dead_temps, mem_temps); for (oi = s->gen_last_op_idx; oi >= 0; oi = oi_prev) { int i, nb_iargs, nb_oargs; TCGOpcode opc_new, opc_new2; bool have_opc_new2; uint16_t dead_args; uint8_t sync_args; TCGArg arg; TCGOp * const op = &s->gen_op_buf[oi]; TCGArg * const args = &s->gen_opparam_buf[op->args]; TCGOpcode opc = op->opc; const TCGOpDef *def = &tcg_op_defs[opc]; oi_prev = op->prev; switch (opc) { case INDEX_op_call: { int call_flags; nb_oargs = op->callo; nb_iargs = op->calli; call_flags = args[nb_oargs + nb_iargs + 1]; /* pure functions can be removed if their result is unused */ if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) { for (i = 0; i < nb_oargs; i++) { arg = args[i]; if (!dead_temps[arg] || mem_temps[arg]) { goto do_not_remove_call; } } goto do_remove; } else { do_not_remove_call: /* output args are dead */ dead_args = 0; sync_args = 0; for (i = 0; i < nb_oargs; i++) { arg = args[i]; if (dead_temps[arg]) { dead_args |= (1 << i); } if (mem_temps[arg]) { sync_args |= (1 << i); } dead_temps[arg] = 1; mem_temps[arg] = 0; } if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) { /* globals should be synced to memory */ memset(mem_temps, 1, s->nb_globals); } if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS | TCG_CALL_NO_READ_GLOBALS))) { /* globals should go back to memory */ memset(dead_temps, 1, s->nb_globals); } /* input args are live */ for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { arg = args[i]; if (arg != TCG_CALL_DUMMY_ARG) { if (dead_temps[arg]) { dead_args |= (1 << i); } dead_temps[arg] = 0; } } s->op_dead_args[oi] = dead_args; s->op_sync_args[oi] = sync_args; } } break; case INDEX_op_debug_insn_start: break; case INDEX_op_discard: /* mark the temporary as dead */ dead_temps[args[0]] = 1; mem_temps[args[0]] = 0; break; case INDEX_op_add2_i32: opc_new = INDEX_op_add_i32; goto do_addsub2; case INDEX_op_sub2_i32: opc_new = INDEX_op_sub_i32; goto do_addsub2; case INDEX_op_add2_i64: opc_new = INDEX_op_add_i64; goto do_addsub2; case INDEX_op_sub2_i64: opc_new = INDEX_op_sub_i64; do_addsub2: nb_iargs = 4; nb_oargs = 2; /* Test if the high part of the operation is dead, but not the low part. The result can be optimized to a simple add or sub. This happens often for x86_64 guest when the cpu mode is set to 32 bit. */ if (dead_temps[args[1]] && !mem_temps[args[1]]) { if (dead_temps[args[0]] && !mem_temps[args[0]]) { goto do_remove; } /* Replace the opcode and adjust the args in place, leaving 3 unused args at the end. */ op->opc = opc = opc_new; args[1] = args[2]; args[2] = args[4]; /* Fall through and mark the single-word operation live. */ nb_iargs = 2; nb_oargs = 1; } goto do_not_remove; case INDEX_op_mulu2_i32: opc_new = INDEX_op_mul_i32; opc_new2 = INDEX_op_muluh_i32; have_opc_new2 = TCG_TARGET_HAS_muluh_i32; goto do_mul2; case INDEX_op_muls2_i32: opc_new = INDEX_op_mul_i32; opc_new2 = INDEX_op_mulsh_i32; have_opc_new2 = TCG_TARGET_HAS_mulsh_i32; goto do_mul2; case INDEX_op_mulu2_i64: opc_new = INDEX_op_mul_i64; opc_new2 = INDEX_op_muluh_i64; have_opc_new2 = TCG_TARGET_HAS_muluh_i64; goto do_mul2; case INDEX_op_muls2_i64: opc_new = INDEX_op_mul_i64; opc_new2 = INDEX_op_mulsh_i64; have_opc_new2 = TCG_TARGET_HAS_mulsh_i64; goto do_mul2; do_mul2: nb_iargs = 2; nb_oargs = 2; if (dead_temps[args[1]] && !mem_temps[args[1]]) { if (dead_temps[args[0]] && !mem_temps[args[0]]) { /* Both parts of the operation are dead. */ goto do_remove; } /* The high part of the operation is dead; generate the low. */ op->opc = opc = opc_new; args[1] = args[2]; args[2] = args[3]; } else if (have_opc_new2 && dead_temps[args[0]] && !mem_temps[args[0]]) { /* The low part of the operation is dead; generate the high. */ op->opc = opc = opc_new2; args[0] = args[1]; args[1] = args[2]; args[2] = args[3]; } else { goto do_not_remove; } /* Mark the single-word operation live. */ nb_oargs = 1; goto do_not_remove; default: /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */ nb_iargs = def->nb_iargs; nb_oargs = def->nb_oargs; /* Test if the operation can be removed because all its outputs are dead. We assume that nb_oargs == 0 implies side effects */ if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) { for (i = 0; i < nb_oargs; i++) { arg = args[i]; if (!dead_temps[arg] || mem_temps[arg]) { goto do_not_remove; } } do_remove: tcg_op_remove(s, op); } else { do_not_remove: /* output args are dead */ dead_args = 0; sync_args = 0; for (i = 0; i < nb_oargs; i++) { arg = args[i]; if (dead_temps[arg]) { dead_args |= (1 << i); } if (mem_temps[arg]) { sync_args |= (1 << i); } dead_temps[arg] = 1; mem_temps[arg] = 0; } /* if end of basic block, update */ if (def->flags & TCG_OPF_BB_END) { tcg_la_bb_end(s, dead_temps, mem_temps); } else if (def->flags & TCG_OPF_SIDE_EFFECTS) { /* globals should be synced to memory */ memset(mem_temps, 1, s->nb_globals); } /* input args are live */ for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { arg = args[i]; if (dead_temps[arg]) { dead_args |= (1 << i); } dead_temps[arg] = 0; } s->op_dead_args[oi] = dead_args; s->op_sync_args[oi] = sync_args; } break; } } }"} {"target": 0, "idx": 311, "func": "bool error_is_type(Error *err, const char *fmt) { const char *error_class; char *ptr; char *end; if (!err) { return false; } ptr = strstr(fmt, \"'class': '\"); assert(ptr != NULL); ptr += strlen(\"'class': '\"); end = strchr(ptr, '\\''); assert(end != NULL); error_class = error_get_field(err, \"class\"); if (strlen(error_class) != end - ptr) { return false; } return strncmp(ptr, error_class, end - ptr) == 0; }"} {"target": 0, "idx": 319, "func": "static inline int onenand_load_main(OneNANDState *s, int sec, int secn, void *dest) { if (s->bdrv_cur) return bdrv_read(s->bdrv_cur, sec, dest, secn) < 0; else if (sec + secn > s->secs_cur) return 1; memcpy(dest, s->current + (sec << 9), secn << 9); return 0; }"} {"target": 1, "idx": 342, "func": "static void generate_offset_lut(DiracGolombLUT *lut, int off) { int idx; for (idx = 0; idx < LUT_SIZE; idx++) { DiracGolombLUT *l = &lut[idx]; INIT_RESIDUE(res); SET_RESIDUE(res, idx, LUT_BITS); l->preamble = CONVERT_TO_RESIDUE(res >> (RSIZE_BITS - off), off); l->preamble_bits = off; l->sign = ((l->preamble >> (RSIZE_BITS - l->preamble_bits)) & 1) ? -1 : +1; search_for_golomb(l, res << off, LUT_BITS - off); } }"} {"target": 1, "idx": 345, "func": "vmxnet3_pop_next_tx_descr(VMXNET3State *s, int qidx, struct Vmxnet3_TxDesc *txd, uint32_t *descr_idx) { Vmxnet3Ring *ring = &s->txq_descr[qidx].tx_ring; PCIDevice *d = PCI_DEVICE(s); vmxnet3_ring_read_curr_cell(d, ring, txd); if (txd->gen == vmxnet3_ring_curr_gen(ring)) { /* Only read after generation field verification */ smp_rmb(); /* Re-read to be sure we got the latest version */ vmxnet3_ring_read_curr_cell(d, ring, txd); VMXNET3_RING_DUMP(VMW_RIPRN, \"TX\", qidx, ring); *descr_idx = vmxnet3_ring_curr_cell_idx(ring); vmxnet3_inc_tx_consumption_counter(s, qidx); return true; } return false; }"} {"target": 1, "idx": 346, "func": "static void init_excp_BookE (CPUPPCState *env) { #if !defined(CONFIG_USER_ONLY) env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000000; env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000000; env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000000; env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000000; env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000000; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000000; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000000; env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000000; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000000; env->excp_vectors[POWERPC_EXCP_APU] = 0x00000000; env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000000; env->excp_vectors[POWERPC_EXCP_FIT] = 0x00000000; env->excp_vectors[POWERPC_EXCP_WDT] = 0x00000000; env->excp_vectors[POWERPC_EXCP_DTLB] = 0x00000000; env->excp_vectors[POWERPC_EXCP_ITLB] = 0x00000000; env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00000000; env->excp_prefix = 0x00000000; env->ivor_mask = 0x0000FFE0; env->ivpr_mask = 0xFFFF0000; /* Hardware reset vector */ env->hreset_vector = 0xFFFFFFFCUL; #endif }"} {"target": 1, "idx": 354, "func": "DISAS_INSN(divw) { TCGv reg; TCGv tmp; TCGv src; int sign; sign = (insn & 0x100) != 0; reg = DREG(insn, 9); if (sign) { tcg_gen_ext16s_i32(QREG_DIV1, reg); } else { tcg_gen_ext16u_i32(QREG_DIV1, reg); } SRC_EA(env, src, OS_WORD, sign, NULL); tcg_gen_mov_i32(QREG_DIV2, src); if (sign) { gen_helper_divs(cpu_env, tcg_const_i32(1)); } else { gen_helper_divu(cpu_env, tcg_const_i32(1)); } tmp = tcg_temp_new(); src = tcg_temp_new(); tcg_gen_ext16u_i32(tmp, QREG_DIV1); tcg_gen_shli_i32(src, QREG_DIV2, 16); tcg_gen_or_i32(reg, tmp, src); set_cc_op(s, CC_OP_FLAGS); }"} {"target": 1, "idx": 355, "func": "static int msrle_decode_8_16_24_32(AVCodecContext *avctx, AVPicture *pic, int depth, const uint8_t *data, int srcsize) { uint8_t *output, *output_end; const uint8_t* src = data; int p1, p2, line=avctx->height, pos=0, i; uint16_t pix16; uint32_t pix32; output = pic->data[0] + (avctx->height - 1) * pic->linesize[0]; output_end = pic->data[0] + (avctx->height) * pic->linesize[0]; while(src < data + srcsize) { p1 = *src++; if(p1 == 0) { //Escape code p2 = *src++; if(p2 == 0) { //End-of-line output = pic->data[0] + (--line) * pic->linesize[0]; if (line < 0){ av_log(avctx, AV_LOG_ERROR, \"Next line is beyond picture bounds\\n\"); return -1; } pos = 0; continue; } else if(p2 == 1) { //End-of-picture return 0; } else if(p2 == 2) { //Skip p1 = *src++; p2 = *src++; line -= p2; if (line < 0){ av_log(avctx, AV_LOG_ERROR, \"Skip beyond picture bounds\\n\"); return -1; } pos += p1; output = pic->data[0] + line * pic->linesize[0] + pos * (depth >> 3); continue; } // Copy data if (output + p2 * (depth >> 3) > output_end) { src += p2 * (depth >> 3); continue; } if ((depth == 8) || (depth == 24)) { for(i = 0; i < p2 * (depth >> 3); i++) { *output++ = *src++; } // RLE8 copy is actually padded - and runs are not! if(depth == 8 && (p2 & 1)) { src++; } } else if (depth == 16) { for(i = 0; i < p2; i++) { pix16 = AV_RL16(src); src += 2; *(uint16_t*)output = pix16; output += 2; } } else if (depth == 32) { for(i = 0; i < p2; i++) { pix32 = AV_RL32(src); src += 4; *(uint32_t*)output = pix32; output += 4; } } pos += p2; } else { //Run of pixels uint8_t pix[3]; //original pixel switch(depth){ case 8: pix[0] = *src++; break; case 16: pix16 = AV_RL16(src); src += 2; break; case 24: pix[0] = *src++; pix[1] = *src++; pix[2] = *src++; break; case 32: pix32 = AV_RL32(src); src += 4; break; } if (output + p1 * (depth >> 3) > output_end) continue; for(i = 0; i < p1; i++) { switch(depth){ case 8: *output++ = pix[0]; break; case 16: *(uint16_t*)output = pix16; output += 2; break; case 24: *output++ = pix[0]; *output++ = pix[1]; *output++ = pix[2]; break; case 32: *(uint32_t*)output = pix32; output += 4; break; } } pos += p1; } } av_log(avctx, AV_LOG_WARNING, \"MS RLE warning: no End-of-picture code\\n\"); return 0; }"} {"target": 1, "idx": 368, "func": "static void RENAME(vertical_compose53iL0)(uint8_t *_b0, uint8_t *_b1, uint8_t *_b2, int width) { int i; TYPE *b0 = (TYPE *)_b0; TYPE *b1 = (TYPE *)_b1; TYPE *b2 = (TYPE *)_b2; for (i = 0; i < width; i++) b1[i] -= (b0[i] + b2[i] + 2) >> 2; }"} {"target": 1, "idx": 378, "func": "static void handle_user_command(Monitor *mon, const char *cmdline) { QDict *qdict; const mon_cmd_t *cmd; qdict = qdict_new(); cmd = monitor_parse_command(mon, cmdline, 0, mon->cmd_table, qdict); if (!cmd) goto out; if (handler_is_async(cmd)) { user_async_cmd_handler(mon, cmd, qdict); } else if (handler_is_qobject(cmd)) { QObject *data = NULL; /* XXX: ignores the error code */ cmd->mhandler.cmd_new(mon, qdict, &data); assert(!monitor_has_error(mon)); if (data) { cmd->user_print(mon, data); qobject_decref(data); } } else { cmd->mhandler.cmd(mon, qdict); } out: QDECREF(qdict); }"} {"target": 1, "idx": 390, "func": "static void gen_exception_return(DisasContext *s, TCGv pc) { TCGv tmp; store_reg(s, 15, pc); tmp = load_cpu_field(spsr); gen_set_cpsr(tmp, 0xffffffff); dead_tmp(tmp); s->is_jmp = DISAS_UPDATE; }"} {"target": 1, "idx": 395, "func": "static int fourxm_read_packet(AVFormatContext *s, AVPacket *pkt) { FourxmDemuxContext *fourxm = s->priv_data; ByteIOContext *pb = &s->pb; unsigned int fourcc_tag; unsigned int size, out_size; int ret = 0; int track_number; int packet_read = 0; unsigned char header[8]; int64_t pts_inc; int audio_frame_count; while (!packet_read) { if ((ret = get_buffer(&s->pb, header, 8)) < 0) return ret; fourcc_tag = LE_32(&header[0]); size = LE_32(&header[4]); if (url_feof(pb)) return AVERROR_IO; switch (fourcc_tag) { case LIST_TAG: /* this is a good time to bump the video pts */ fourxm->video_pts += fourxm->video_pts_inc; /* skip the LIST-* tag and move on to the next fourcc */ get_le32(pb); break; case ifrm_TAG: case pfrm_TAG: case cfrm_TAG:{ /* allocate 8 more bytes than 'size' to account for fourcc * and size */ if (av_new_packet(pkt, size + 8)) return AVERROR_IO; pkt->stream_index = fourxm->video_stream_index; pkt->pts = fourxm->video_pts; memcpy(pkt->data, header, 8); ret = get_buffer(&s->pb, &pkt->data[8], size); if (ret < 0) av_free_packet(pkt); else packet_read = 1; break; } case snd__TAG: track_number = get_le32(pb); out_size= get_le32(pb); size-=8; if (track_number == fourxm->selected_track) { if (av_new_packet(pkt, size)) return AVERROR_IO; pkt->stream_index = fourxm->tracks[fourxm->selected_track].stream_index; pkt->pts = fourxm->audio_pts; ret = get_buffer(&s->pb, pkt->data, size); if (ret < 0) av_free_packet(pkt); else packet_read = 1; /* pts accounting */ audio_frame_count = size; if (fourxm->tracks[fourxm->selected_track].adpcm) audio_frame_count -= 2 * (fourxm->tracks[fourxm->selected_track].channels); audio_frame_count /= fourxm->tracks[fourxm->selected_track].channels; if (fourxm->tracks[fourxm->selected_track].adpcm) audio_frame_count *= 2; else audio_frame_count /= (fourxm->tracks[fourxm->selected_track].bits / 8); pts_inc = audio_frame_count; pts_inc *= 90000; pts_inc /= fourxm->tracks[fourxm->selected_track].sample_rate; fourxm->audio_pts += pts_inc; } else { url_fseek(pb, size, SEEK_CUR); } break; default: url_fseek(pb, size, SEEK_CUR); break; } } return ret; }"} {"target": 1, "idx": 396, "func": "static bool bdrv_drain_recurse(BlockDriverState *bs, bool begin) { BdrvChild *child, *tmp; bool waited; /* Ensure any pending metadata writes are submitted to bs->file. */ bdrv_drain_invoke(bs, begin); /* Wait for drained requests to finish */ waited = BDRV_POLL_WHILE(bs, atomic_read(&bs->in_flight) > 0); QLIST_FOREACH_SAFE(child, &bs->children, next, tmp) { BlockDriverState *bs = child->bs; bool in_main_loop = qemu_get_current_aio_context() == qemu_get_aio_context(); assert(bs->refcnt > 0); if (in_main_loop) { /* In case the recursive bdrv_drain_recurse processes a * block_job_defer_to_main_loop BH and modifies the graph, * let's hold a reference to bs until we are done. * * IOThread doesn't have such a BH, and it is not safe to call * bdrv_unref without BQL, so skip doing it there. */ bdrv_ref(bs); } waited |= bdrv_drain_recurse(bs, begin); if (in_main_loop) { bdrv_unref(bs); } } return waited; }"} {"target": 0, "idx": 420, "func": "static int count_paired_channels(uint8_t (*layout_map)[3], int tags, int pos, int *current) { int num_pos_channels = 0; int first_cpe = 0; int sce_parity = 0; int i; for (i = *current; i < tags; i++) { if (layout_map[i][2] != pos) break; if (layout_map[i][0] == TYPE_CPE) { if (sce_parity) { if (pos == AAC_CHANNEL_FRONT || !first_cpe) { sce_parity = 0; } else { return -1; } } num_pos_channels += 2; first_cpe = 1; } else { num_pos_channels++; sce_parity ^= 1; } } if (sce_parity && ((pos == AAC_CHANNEL_FRONT && first_cpe) || pos == AAC_CHANNEL_SIDE)) return -1; *current = i; return num_pos_channels; }"} {"target": 1, "idx": 427, "func": "uint64_t helper_sublv (uint64_t op1, uint64_t op2) { uint64_t tmp = op1; op1 = (uint32_t)(op1 - op2); if (unlikely(((~tmp) ^ op1 ^ (-1UL)) & ((~tmp) ^ op2) & (1UL << 31))) { helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW); } return op1; }"} {"target": 1, "idx": 443, "func": "static int decode_slice(struct AVCodecContext *avctx, void *arg) { H264Context *h = *(void **)arg; int lf_x_start = h->mb_x; h->mb_skip_run = -1; av_assert0(h->block_offset[15] == (4 * ((scan8[15] - scan8[0]) & 7) << h->pixel_shift) + 4 * h->linesize * ((scan8[15] - scan8[0]) >> 3)); h->is_complex = FRAME_MBAFF(h) || h->picture_structure != PICT_FRAME || avctx->codec_id != AV_CODEC_ID_H264 || (CONFIG_GRAY && (h->flags & CODEC_FLAG_GRAY)); if (!(h->avctx->active_thread_type & FF_THREAD_SLICE) && h->picture_structure == PICT_FRAME) { const int start_i = av_clip(h->resync_mb_x + h->resync_mb_y * h->mb_width, 0, h->mb_num - 1); if (start_i) { int prev_status = h->er.error_status_table[h->er.mb_index2xy[start_i - 1]]; prev_status &= ~ VP_START; if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END)) h->er.error_occurred = 1; } } if (h->pps.cabac) { /* realign */ align_get_bits(&h->gb); /* init cabac */ ff_init_cabac_decoder(&h->cabac, h->gb.buffer + get_bits_count(&h->gb) / 8, (get_bits_left(&h->gb) + 7) / 8); ff_h264_init_cabac_states(h); for (;;) { // START_TIMER int ret = ff_h264_decode_mb_cabac(h); int eos; // STOP_TIMER(\"decode_mb_cabac\") if (ret >= 0) ff_h264_hl_decode_mb(h); // FIXME optimal? or let mb_decode decode 16x32 ? if (ret >= 0 && FRAME_MBAFF(h)) { h->mb_y++; ret = ff_h264_decode_mb_cabac(h); if (ret >= 0) ff_h264_hl_decode_mb(h); h->mb_y--; } eos = get_cabac_terminate(&h->cabac); if ((h->workaround_bugs & FF_BUG_TRUNCATED) && h->cabac.bytestream > h->cabac.bytestream_end + 2) { er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x - 1, h->mb_y, ER_MB_END); if (h->mb_x >= lf_x_start) loop_filter(h, lf_x_start, h->mb_x + 1); return 0; } if (h->cabac.bytestream > h->cabac.bytestream_end + 2 ) av_log(h->avctx, AV_LOG_DEBUG, \"bytestream overread %td\\n\", h->cabac.bytestream_end - h->cabac.bytestream); if (ret < 0 || h->cabac.bytestream > h->cabac.bytestream_end + 4) { av_log(h->avctx, AV_LOG_ERROR, \"error while decoding MB %d %d, bytestream (%td)\\n\", h->mb_x, h->mb_y, h->cabac.bytestream_end - h->cabac.bytestream); er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x, h->mb_y, ER_MB_ERROR); return -1; } if (++h->mb_x >= h->mb_width) { loop_filter(h, lf_x_start, h->mb_x); h->mb_x = lf_x_start = 0; decode_finish_row(h); ++h->mb_y; if (FIELD_OR_MBAFF_PICTURE(h)) { ++h->mb_y; if (FRAME_MBAFF(h) && h->mb_y < h->mb_height) predict_field_decoding_flag(h); } } if (eos || h->mb_y >= h->mb_height) { tprintf(h->avctx, \"slice end %d %d\\n\", get_bits_count(&h->gb), h->gb.size_in_bits); er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x - 1, h->mb_y, ER_MB_END); if (h->mb_x > lf_x_start) loop_filter(h, lf_x_start, h->mb_x); return 0; } } } else { for (;;) { int ret = ff_h264_decode_mb_cavlc(h); if (ret >= 0) ff_h264_hl_decode_mb(h); // FIXME optimal? or let mb_decode decode 16x32 ? if (ret >= 0 && FRAME_MBAFF(h)) { h->mb_y++; ret = ff_h264_decode_mb_cavlc(h); if (ret >= 0) ff_h264_hl_decode_mb(h); h->mb_y--; } if (ret < 0) { av_log(h->avctx, AV_LOG_ERROR, \"error while decoding MB %d %d\\n\", h->mb_x, h->mb_y); er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x, h->mb_y, ER_MB_ERROR); return -1; } if (++h->mb_x >= h->mb_width) { loop_filter(h, lf_x_start, h->mb_x); h->mb_x = lf_x_start = 0; decode_finish_row(h); ++h->mb_y; if (FIELD_OR_MBAFF_PICTURE(h)) { ++h->mb_y; if (FRAME_MBAFF(h) && h->mb_y < h->mb_height) predict_field_decoding_flag(h); } if (h->mb_y >= h->mb_height) { tprintf(h->avctx, \"slice end %d %d\\n\", get_bits_count(&h->gb), h->gb.size_in_bits); if ( get_bits_left(&h->gb) == 0 || get_bits_left(&h->gb) > 0 && !(h->avctx->err_recognition & AV_EF_AGGRESSIVE)) { er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x - 1, h->mb_y, ER_MB_END); return 0; } else { er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x, h->mb_y, ER_MB_END); return -1; } } } if (get_bits_left(&h->gb) <= 0 && h->mb_skip_run <= 0) { tprintf(h->avctx, \"slice end %d %d\\n\", get_bits_count(&h->gb), h->gb.size_in_bits); if (get_bits_left(&h->gb) == 0) { er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x - 1, h->mb_y, ER_MB_END); if (h->mb_x > lf_x_start) loop_filter(h, lf_x_start, h->mb_x); return 0; } else { er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x, h->mb_y, ER_MB_ERROR); return -1; } } } } }"} {"target": 1, "idx": 446, "func": "static int img_read_packet(AVFormatContext *s1, AVPacket *pkt) { VideoData *s = s1->priv_data; char filename[1024]; int ret; ByteIOContext f1, *f; if (get_frame_filename(filename, sizeof(filename), s->path, s->img_number) < 0) return -EIO; if (!s->is_pipe) { f = &f1; if (url_fopen(f, filename, URL_RDONLY) < 0) return -EIO; } else { f = &s1->pb; if (url_feof(f)) return -EIO; } av_new_packet(pkt, s->img_size); pkt->stream_index = 0; switch(s->img_fmt) { case IMGFMT_PGMYUV: ret = pgm_read(s, f, pkt->data, pkt->size, 1); break; case IMGFMT_PGM: ret = pgm_read(s, f, pkt->data, pkt->size, 0); break; case IMGFMT_YUV: ret = yuv_read(s, filename, pkt->data, pkt->size); break; case IMGFMT_PPM: ret = ppm_read(s, f, pkt->data, pkt->size); break; default: return -EIO; } if (!s->is_pipe) { url_fclose(f); } if (ret < 0) { av_free_packet(pkt); return -EIO; /* signal EOF */ } else { s->img_number++; return 0; } }"} {"target": 1, "idx": 449, "func": "void configure_alarms(char const *opt) { int i; int cur = 0; int count = ARRAY_SIZE(alarm_timers) - 1; char *arg; char *name; struct qemu_alarm_timer tmp; if (!strcmp(opt, \"?\")) { show_available_alarms(); exit(0); } arg = g_strdup(opt); /* Reorder the array */ name = strtok(arg, \",\"); while (name) { for (i = 0; i < count && alarm_timers[i].name; i++) { if (!strcmp(alarm_timers[i].name, name)) break; } if (i == count) { fprintf(stderr, \"Unknown clock %s\\n\", name); goto next; } if (i < cur) /* Ignore */ goto next; /* Swap */ tmp = alarm_timers[i]; alarm_timers[i] = alarm_timers[cur]; alarm_timers[cur] = tmp; cur++; next: name = strtok(NULL, \",\"); } g_free(arg); if (cur) { /* Disable remaining timers */ for (i = cur; i < count; i++) alarm_timers[i].name = NULL; } else { show_available_alarms(); exit(1); } }"} {"target": 0, "idx": 452, "func": "static int motion_inter_4v_block (bit_buffer_t *bitbuf, uint8_t *current, uint8_t *previous, int pitch, svq1_pmv_t *motion,int x, int y) { uint8_t *src; uint8_t *dst; svq1_pmv_t mv; svq1_pmv_t *pmv[4]; int i, result; /* predict and decode motion vector (0) */ pmv[0] = &motion[0]; pmv[1] = &motion[(x / 8) + 2]; pmv[2] = &motion[(x / 8) + 4]; if (y == 0) { pmv[1] = pmv[0]; pmv[2] = pmv[0]; } result = decode_motion_vector (bitbuf, &mv, pmv); if (result != 0) return result; /* predict and decode motion vector (1) */ pmv[0] = &mv; pmv[1] = &motion[(x / 8) + 3]; if (y == 0) { pmv[1] = pmv[0]; pmv[2] = pmv[0]; } result = decode_motion_vector (bitbuf, &motion[0], pmv); if (result != 0) return result; /* predict and decode motion vector (2) */ pmv[1] = &motion[0]; pmv[2] = &motion[(x / 8) + 1]; result = decode_motion_vector (bitbuf, &motion[(x / 8) + 2], pmv); if (result != 0) return result; /* predict and decode motion vector (3) */ pmv[2] = &motion[(x / 8) + 2]; pmv[3] = &motion[(x / 8) + 3]; result = decode_motion_vector (bitbuf, pmv[3], pmv); if (result != 0) return result; /* form predictions */ for (i=0; i < 4; i++) { src = &previous[(x + (pmv[i]->x >> 1)) + (y + (pmv[i]->y >> 1))*pitch]; dst = current; put_pixels_tab[((pmv[i]->y & 1) << 1) | (pmv[i]->x & 1)](dst,src,pitch,8); /* select next block */ if (i & 1) { current += 8*(pitch - 1); previous += 8*(pitch - 1); } else { current += 8; previous += 8; } } return 0; }"} {"target": 0, "idx": 497, "func": "void ide_sector_read(IDEState *s) { int64_t sector_num; int ret, n; s->status = READY_STAT | SEEK_STAT; s->error = 0; /* not needed by IDE spec, but needed by Windows */ sector_num = ide_get_sector(s); n = s->nsector; if (n == 0) { /* no more sector to read from disk */ ide_transfer_stop(s); } else { #if defined(DEBUG_IDE) printf(\"read sector=%\" PRId64 \"\\n\", sector_num); #endif if (n > s->req_nb_sectors) n = s->req_nb_sectors; bdrv_acct_start(s->bs, &s->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_READ); ret = bdrv_read(s->bs, sector_num, s->io_buffer, n); bdrv_acct_done(s->bs, &s->acct); if (ret != 0) { if (ide_handle_rw_error(s, -ret, BM_STATUS_PIO_RETRY | BM_STATUS_RETRY_READ)) { return; } } ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_read); ide_set_irq(s->bus); ide_set_sector(s, sector_num + n); s->nsector -= n; } }"} {"target": 0, "idx": 498, "func": "static void cpu_ioreq_pio(ioreq_t *req) { int i; if (req->dir == IOREQ_READ) { if (!req->data_is_ptr) { req->data = do_inp(req->addr, req->size); } else { uint32_t tmp; for (i = 0; i < req->count; i++) { tmp = do_inp(req->addr, req->size); write_phys_req_item(req->data, req, i, &tmp); } } } else if (req->dir == IOREQ_WRITE) { if (!req->data_is_ptr) { do_outp(req->addr, req->size, req->data); } else { for (i = 0; i < req->count; i++) { uint32_t tmp = 0; read_phys_req_item(req->data, req, i, &tmp); do_outp(req->addr, req->size, tmp); } } } }"} {"target": 0, "idx": 512, "func": "static void rtas_display_character(sPAPREnvironment *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { uint8_t c = rtas_ld(args, 0); VIOsPAPRDevice *sdev = vty_lookup(spapr, 0); if (!sdev) { rtas_st(rets, 0, -1); } else { vty_putchars(sdev, &c, sizeof(c)); rtas_st(rets, 0, 0); } }"} {"target": 0, "idx": 514, "func": "static int xmv_read_header(AVFormatContext *s) { XMVDemuxContext *xmv = s->priv_data; AVIOContext *pb = s->pb; uint32_t file_version; uint32_t this_packet_size; uint16_t audio_track; int ret; s->ctx_flags |= AVFMTCTX_NOHEADER; avio_skip(pb, 4); /* Next packet size */ this_packet_size = avio_rl32(pb); avio_skip(pb, 4); /* Max packet size */ avio_skip(pb, 4); /* \"xobX\" */ file_version = avio_rl32(pb); if ((file_version != 4) && (file_version != 2)) avpriv_request_sample(s, \"Uncommon version %\"PRIu32\"\", file_version); /* Video tracks */ xmv->video_width = avio_rl32(pb); xmv->video_height = avio_rl32(pb); xmv->video_duration = avio_rl32(pb); /* Audio tracks */ xmv->audio_track_count = avio_rl16(pb); avio_skip(pb, 2); /* Unknown (padding?) */ xmv->audio = av_mallocz_array(xmv->audio_track_count, sizeof(XMVAudioPacket)); if (!xmv->audio) { ret = AVERROR(ENOMEM); goto fail; } for (audio_track = 0; audio_track < xmv->audio_track_count; audio_track++) { XMVAudioPacket *packet = &xmv->audio[audio_track]; packet->compression = avio_rl16(pb); packet->channels = avio_rl16(pb); packet->sample_rate = avio_rl32(pb); packet->bits_per_sample = avio_rl16(pb); packet->flags = avio_rl16(pb); packet->bit_rate = packet->bits_per_sample * packet->sample_rate * packet->channels; packet->block_align = XMV_BLOCK_ALIGN_SIZE * packet->channels; packet->block_samples = 64; packet->codec_id = ff_wav_codec_get_id(packet->compression, packet->bits_per_sample); packet->stream_index = -1; packet->frame_size = 0; packet->block_count = 0; /* TODO: ADPCM'd 5.1 sound is encoded in three separate streams. * Those need to be interleaved to a proper 5.1 stream. */ if (packet->flags & XMV_AUDIO_ADPCM51) av_log(s, AV_LOG_WARNING, \"Unsupported 5.1 ADPCM audio stream \" \"(0x%04X)\\n\", packet->flags); if (!packet->channels || !packet->sample_rate || packet->channels >= UINT16_MAX / XMV_BLOCK_ALIGN_SIZE) { av_log(s, AV_LOG_ERROR, \"Invalid parameters for audio track %\"PRIu16\".\\n\", audio_track); ret = AVERROR_INVALIDDATA; goto fail; } } /* Initialize the packet context */ xmv->next_packet_offset = avio_tell(pb); xmv->next_packet_size = this_packet_size - xmv->next_packet_offset; xmv->stream_count = xmv->audio_track_count + 1; return 0; fail: xmv_read_close(s); return ret; }"} {"target": 1, "idx": 521, "func": "static int bdrv_rd_badreq_sectors(BlockDriverState *bs, int64_t sector_num, int nb_sectors) { return nb_sectors < 0 || sector_num < 0 || nb_sectors > bs->total_sectors || sector_num > bs->total_sectors - nb_sectors; }"} {"target": 1, "idx": 526, "func": "static int handle_update_file_cred(int dirfd, const char *name, FsCred *credp) { int fd, ret; fd = openat(dirfd, name, O_NONBLOCK | O_NOFOLLOW); if (fd < 0) { return fd; } ret = fchmod(fd, credp->fc_mode & 07777); if (ret < 0) { goto err_out; } ret = fchownat(fd, \"\", credp->fc_uid, credp->fc_gid, AT_EMPTY_PATH); err_out: close(fd); return ret; }"} {"target": 0, "idx": 534, "func": "static av_cold int ffv1_encode_init(AVCodecContext *avctx) { FFV1Context *s = avctx->priv_data; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt); int i, j, k, m, ret; ffv1_common_init(avctx); s->version = 0; if ((avctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2)) || avctx->slices > 1) s->version = FFMAX(s->version, 2); if (avctx->level == 3) { s->version = 3; } if (s->ec < 0) { s->ec = (s->version >= 3); } if (s->version >= 2 && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { av_log(avctx, AV_LOG_ERROR, \"Version %d requested, please set -strict experimental in \" \"order to enable it\\n\", s->version); return AVERROR(ENOSYS); } s->ac = avctx->coder_type > 0 ? 2 : 0; s->plane_count = 3; switch (avctx->pix_fmt) { case AV_PIX_FMT_YUV444P9: case AV_PIX_FMT_YUV422P9: case AV_PIX_FMT_YUV420P9: if (!avctx->bits_per_raw_sample) s->bits_per_raw_sample = 9; case AV_PIX_FMT_YUV444P10: case AV_PIX_FMT_YUV420P10: case AV_PIX_FMT_YUV422P10: s->packed_at_lsb = 1; if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample) s->bits_per_raw_sample = 10; case AV_PIX_FMT_GRAY16: case AV_PIX_FMT_YUV444P16: case AV_PIX_FMT_YUV422P16: case AV_PIX_FMT_YUV420P16: if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample) { s->bits_per_raw_sample = 16; } else if (!s->bits_per_raw_sample) { s->bits_per_raw_sample = avctx->bits_per_raw_sample; } if (s->bits_per_raw_sample <= 8) { av_log(avctx, AV_LOG_ERROR, \"bits_per_raw_sample invalid\\n\"); return AVERROR_INVALIDDATA; } if (!s->ac && avctx->coder_type == -1) { av_log(avctx, AV_LOG_INFO, \"bits_per_raw_sample > 8, forcing coder 1\\n\"); s->ac = 2; } if (!s->ac) { av_log( avctx, AV_LOG_ERROR, \"bits_per_raw_sample of more than 8 needs -coder 1 currently\\n\"); return AVERROR_INVALIDDATA; } s->version = FFMAX(s->version, 1); case AV_PIX_FMT_GRAY8: case AV_PIX_FMT_YUV444P: case AV_PIX_FMT_YUV440P: case AV_PIX_FMT_YUV422P: case AV_PIX_FMT_YUV420P: case AV_PIX_FMT_YUV411P: case AV_PIX_FMT_YUV410P: s->chroma_planes = desc->nb_components < 3 ? 0 : 1; s->colorspace = 0; break; case AV_PIX_FMT_YUVA444P: case AV_PIX_FMT_YUVA422P: case AV_PIX_FMT_YUVA420P: s->chroma_planes = 1; s->colorspace = 0; s->transparency = 1; break; case AV_PIX_FMT_RGB32: s->colorspace = 1; s->transparency = 1; break; case AV_PIX_FMT_GBRP9: if (!avctx->bits_per_raw_sample) s->bits_per_raw_sample = 9; case AV_PIX_FMT_GBRP10: if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample) s->bits_per_raw_sample = 10; case AV_PIX_FMT_GBRP16: if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample) s->bits_per_raw_sample = 16; else if (!s->bits_per_raw_sample) s->bits_per_raw_sample = avctx->bits_per_raw_sample; s->colorspace = 1; s->chroma_planes = 1; s->version = FFMAX(s->version, 1); break; default: av_log(avctx, AV_LOG_ERROR, \"format not supported\\n\"); return AVERROR_INVALIDDATA; } if (s->transparency) { av_log( avctx, AV_LOG_WARNING, \"Storing alpha plane, this will require a recent FFV1 decoder to playback!\\n\"); } if (avctx->context_model > 1U) { av_log(avctx, AV_LOG_ERROR, \"Invalid context model %d, valid values are 0 and 1\\n\", avctx->context_model); return AVERROR(EINVAL); } if (s->ac > 1) for (i = 1; i < 256; i++) s->state_transition[i] = ffv1_ver2_state[i]; for (i = 0; i < 256; i++) { s->quant_table_count = 2; if (s->bits_per_raw_sample <= 8) { s->quant_tables[0][0][i] = ffv1_quant11[i]; s->quant_tables[0][1][i] = ffv1_quant11[i] * 11; s->quant_tables[0][2][i] = ffv1_quant11[i] * 11 * 11; s->quant_tables[1][0][i] = ffv1_quant11[i]; s->quant_tables[1][1][i] = ffv1_quant11[i] * 11; s->quant_tables[1][2][i] = ffv1_quant5[i] * 11 * 11; s->quant_tables[1][3][i] = ffv1_quant5[i] * 5 * 11 * 11; s->quant_tables[1][4][i] = ffv1_quant5[i] * 5 * 5 * 11 * 11; } else { s->quant_tables[0][0][i] = ffv1_quant9_10bit[i]; s->quant_tables[0][1][i] = ffv1_quant9_10bit[i] * 11; s->quant_tables[0][2][i] = ffv1_quant9_10bit[i] * 11 * 11; s->quant_tables[1][0][i] = ffv1_quant9_10bit[i]; s->quant_tables[1][1][i] = ffv1_quant9_10bit[i] * 11; s->quant_tables[1][2][i] = ffv1_quant5_10bit[i] * 11 * 11; s->quant_tables[1][3][i] = ffv1_quant5_10bit[i] * 5 * 11 * 11; s->quant_tables[1][4][i] = ffv1_quant5_10bit[i] * 5 * 5 * 11 * 11; } } s->context_count[0] = (11 * 11 * 11 + 1) / 2; s->context_count[1] = (11 * 11 * 5 * 5 * 5 + 1) / 2; memcpy(s->quant_table, s->quant_tables[avctx->context_model], sizeof(s->quant_table)); for (i = 0; i < s->plane_count; i++) { PlaneContext *const p = &s->plane[i]; memcpy(p->quant_table, s->quant_table, sizeof(p->quant_table)); p->quant_table_index = avctx->context_model; p->context_count = s->context_count[p->quant_table_index]; } if ((ret = ffv1_allocate_initial_states(s)) < 0) return ret; avctx->coded_frame = av_frame_alloc(); if (!avctx->coded_frame) return AVERROR(ENOMEM); avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; if (!s->transparency) s->plane_count = 2; av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift); s->picture_number = 0; if (avctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2)) { for (i = 0; i < s->quant_table_count; i++) { s->rc_stat2[i] = av_mallocz(s->context_count[i] * sizeof(*s->rc_stat2[i])); if (!s->rc_stat2[i]) return AVERROR(ENOMEM); } } if (avctx->stats_in) { char *p = avctx->stats_in; uint8_t best_state[256][256]; int gob_count = 0; char *next; av_assert0(s->version >= 2); for (;; ) { for (j = 0; j < 256; j++) for (i = 0; i < 2; i++) { s->rc_stat[j][i] = strtol(p, &next, 0); if (next == p) { av_log(avctx, AV_LOG_ERROR, \"2Pass file invalid at %d %d [%s]\\n\", j, i, p); return AVERROR_INVALIDDATA; } p = next; } for (i = 0; i < s->quant_table_count; i++) for (j = 0; j < s->context_count[i]; j++) { for (k = 0; k < 32; k++) for (m = 0; m < 2; m++) { s->rc_stat2[i][j][k][m] = strtol(p, &next, 0); if (next == p) { av_log(avctx, AV_LOG_ERROR, \"2Pass file invalid at %d %d %d %d [%s]\\n\", i, j, k, m, p); return AVERROR_INVALIDDATA; } p = next; } } gob_count = strtol(p, &next, 0); if (next == p || gob_count <= 0) { av_log(avctx, AV_LOG_ERROR, \"2Pass file invalid\\n\"); return AVERROR_INVALIDDATA; } p = next; while (*p == '\\n' || *p == ' ') p++; if (p[0] == 0) break; } sort_stt(s, s->state_transition); find_best_state(best_state, s->state_transition); for (i = 0; i < s->quant_table_count; i++) { for (j = 0; j < s->context_count[i]; j++) for (k = 0; k < 32; k++) { double p = 128; if (s->rc_stat2[i][j][k][0] + s->rc_stat2[i][j][k][1]) { p = 256.0 * s->rc_stat2[i][j][k][1] / (s->rc_stat2[i][j][k][0] + s->rc_stat2[i][j][k][1]); } s->initial_states[i][j][k] = best_state[av_clip(round(p), 1, 255)][av_clip((s->rc_stat2[i][j][k][0] + s->rc_stat2[i][j][k][1]) / gob_count, 0, 255)]; } } } if (s->version > 1) { for (s->num_v_slices = 2; s->num_v_slices < 9; s->num_v_slices++) for (s->num_h_slices = s->num_v_slices; s->num_h_slices < 2 * s->num_v_slices; s->num_h_slices++) if (avctx->slices == s->num_h_slices * s->num_v_slices && avctx->slices <= 64 || !avctx->slices) goto slices_ok; av_log(avctx, AV_LOG_ERROR, \"Unsupported number %d of slices requested, please specify a \" \"supported number with -slices (ex:4,6,9,12,16, ...)\\n\", avctx->slices); return AVERROR(ENOSYS); slices_ok: write_extradata(s); } if ((ret = ffv1_init_slice_contexts(s)) < 0) return ret; if ((ret = init_slices_state(s)) < 0) return ret; #define STATS_OUT_SIZE 1024 * 1024 * 6 if (avctx->flags & CODEC_FLAG_PASS1) { avctx->stats_out = av_mallocz(STATS_OUT_SIZE); for (i = 0; i < s->quant_table_count; i++) for (j = 0; j < s->slice_count; j++) { FFV1Context *sf = s->slice_context[j]; av_assert0(!sf->rc_stat2[i]); sf->rc_stat2[i] = av_mallocz(s->context_count[i] * sizeof(*sf->rc_stat2[i])); if (!sf->rc_stat2[i]) return AVERROR(ENOMEM); } } return 0; }"} {"target": 1, "idx": 536, "func": "int qemu_devtree_setprop(void *fdt, const char *node_path, const char *property, void *val_array, int size) { int offset; offset = fdt_path_offset(fdt, node_path); if (offset < 0) return offset; return fdt_setprop(fdt, offset, property, val_array, size); }"} {"target": 0, "idx": 555, "func": "static int mp3lame_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { LAMEContext *s = avctx->priv_data; MPADecodeHeader hdr; int len, ret, ch; int lame_result; uint32_t h; if (frame) { switch (avctx->sample_fmt) { case AV_SAMPLE_FMT_S16P: ENCODE_BUFFER(lame_encode_buffer, int16_t, frame->data); break; case AV_SAMPLE_FMT_S32P: ENCODE_BUFFER(lame_encode_buffer_int, int32_t, frame->data); break; case AV_SAMPLE_FMT_FLTP: if (frame->linesize[0] < 4 * FFALIGN(frame->nb_samples, 8)) { av_log(avctx, AV_LOG_ERROR, \"inadequate AVFrame plane padding\\n\"); return AVERROR(EINVAL); } for (ch = 0; ch < avctx->channels; ch++) { s->fdsp.vector_fmul_scalar(s->samples_flt[ch], (const float *)frame->data[ch], 32768.0f, FFALIGN(frame->nb_samples, 8)); } ENCODE_BUFFER(lame_encode_buffer_float, float, s->samples_flt); break; default: return AVERROR_BUG; } } else { lame_result = lame_encode_flush(s->gfp, s->buffer + s->buffer_index, s->buffer_size - s->buffer_index); } if (lame_result < 0) { if (lame_result == -1) { av_log(avctx, AV_LOG_ERROR, \"lame: output buffer too small (buffer index: %d, free bytes: %d)\\n\", s->buffer_index, s->buffer_size - s->buffer_index); } return -1; } s->buffer_index += lame_result; ret = realloc_buffer(s); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, \"error reallocating output buffer\\n\"); return ret; } /* add current frame to the queue */ if (frame) { if ((ret = ff_af_queue_add(&s->afq, frame)) < 0) return ret; } /* Move 1 frame from the LAME buffer to the output packet, if available. We have to parse the first frame header in the output buffer to determine the frame size. */ if (s->buffer_index < 4) return 0; h = AV_RB32(s->buffer); if (ff_mpa_check_header(h) < 0) { av_log(avctx, AV_LOG_ERROR, \"Invalid mp3 header at start of buffer\\n\"); return AVERROR_BUG; } if (avpriv_mpegaudio_decode_header(&hdr, h)) { av_log(avctx, AV_LOG_ERROR, \"free format output not supported\\n\"); return -1; } len = hdr.frame_size; ff_dlog(avctx, \"in:%d packet-len:%d index:%d\\n\", avctx->frame_size, len, s->buffer_index); if (len <= s->buffer_index) { if ((ret = ff_alloc_packet(avpkt, len))) { av_log(avctx, AV_LOG_ERROR, \"Error getting output packet\\n\"); return ret; } memcpy(avpkt->data, s->buffer, len); s->buffer_index -= len; memmove(s->buffer, s->buffer + len, s->buffer_index); /* Get the next frame pts/duration */ ff_af_queue_remove(&s->afq, avctx->frame_size, &avpkt->pts, &avpkt->duration); avpkt->size = len; *got_packet_ptr = 1; } return 0; }"} {"target": 1, "idx": 578, "func": "static int tta_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; TTAContext *s = avctx->priv_data; int i; init_get_bits(&s->gb, buf, buf_size*8); { int32_t predictors[s->channels]; TTAFilter filters[s->channels]; TTARice rices[s->channels]; int cur_chan = 0, framelen = s->frame_length; int32_t *p; // FIXME: seeking s->total_frames--; if (!s->total_frames && s->last_frame_length) framelen = s->last_frame_length; // init per channel states for (i = 0; i < s->channels; i++) { predictors[i] = 0; ttafilter_init(&(filters[i]), ttafilter_configs[s->bps-1][0], ttafilter_configs[s->bps-1][1]); rice_init(&(rices[i]), 10, 10); } for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++) { int32_t *predictor = &(predictors[cur_chan]); TTAFilter *filter = &(filters[cur_chan]); TTARice *rice = &(rices[cur_chan]); uint32_t unary, depth, k; int32_t value; unary = tta_get_unary(&s->gb); if (unary == 0) { depth = 0; k = rice->k0; } else { depth = 1; k = rice->k1; unary--; } if (k) value = (unary << k) + get_bits(&s->gb, k); else value = unary; // FIXME: copy paste from original switch (depth) { case 1: rice->sum1 += value - (rice->sum1 >> 4); if (rice->k1 > 0 && rice->sum1 < shift_16[rice->k1]) rice->k1--; else if(rice->sum1 > shift_16[rice->k1 + 1]) rice->k1++; value += shift_1[rice->k0]; default: rice->sum0 += value - (rice->sum0 >> 4); if (rice->k0 > 0 && rice->sum0 < shift_16[rice->k0]) rice->k0--; else if(rice->sum0 > shift_16[rice->k0 + 1]) rice->k0++; } // extract coded value #define UNFOLD(x) (((x)&1) ? (++(x)>>1) : (-(x)>>1)) *p = UNFOLD(value); // run hybrid filter ttafilter_process(filter, p, 0); // fixed order prediction #define PRED(x, k) (int32_t)((((uint64_t)x << k) - x) >> k) switch (s->bps) { case 1: *p += PRED(*predictor, 4); break; case 2: case 3: *p += PRED(*predictor, 5); break; case 4: *p += *predictor; break; } *predictor = *p; #if 0 // extract 32bit float from last two int samples if (s->is_float && ((p - data) & 1)) { uint32_t neg = *p & 0x80000000; uint32_t hi = *(p - 1); uint32_t lo = abs(*p) - 1; hi += (hi || lo) ? 0x3f80 : 0; // SWAP16: swap all the 16 bits *(p - 1) = (hi << 16) | SWAP16(lo) | neg; } #endif /*if ((get_bits_count(&s->gb)+7)/8 > buf_size) { av_log(NULL, AV_LOG_INFO, \"overread!!\\n\"); break; }*/ // flip channels if (cur_chan < (s->channels-1)) cur_chan++; else { // decorrelate in case of stereo integer if (!s->is_float && (s->channels > 1)) { int32_t *r = p - 1; for (*p += *r / 2; r > p - s->channels; r--) *r = *(r + 1) - *r; } cur_chan = 0; } } skip_bits(&s->gb, 32); // frame crc // convert to output buffer switch(s->bps) { case 2: { uint16_t *samples = data; for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++) { // *samples++ = (unsigned char)*p; // *samples++ = (unsigned char)(*p >> 8); *samples++ = *p; } *data_size = (uint8_t *)samples - (uint8_t *)data; break; } default: av_log(s->avctx, AV_LOG_ERROR, \"Error, only 16bit samples supported!\\n\"); } } // return get_bits_count(&s->gb)+7)/8; return buf_size; }"} {"target": 0, "idx": 583, "func": "yuv2rgba64_2_c_template(SwsContext *c, const int32_t *buf[2], const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf[2], uint16_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes) { const int32_t *buf0 = buf[0], *buf1 = buf[1], *ubuf0 = ubuf[0], *ubuf1 = ubuf[1], *vbuf0 = vbuf[0], *vbuf1 = vbuf[1], *abuf0 = hasAlpha ? abuf[0] : NULL, *abuf1 = hasAlpha ? abuf[1] : NULL; int yalpha1 = 4096 - yalpha; int uvalpha1 = 4096 - uvalpha; int i; int A1 = 0xffff<<14, A2 = 0xffff<<14; for (i = 0; i < ((dstW + 1) >> 1); i++) { int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 14; int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 14; int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha + (-128 << 23)) >> 14; int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha + (-128 << 23)) >> 14; int R, G, B; Y1 -= c->yuv2rgb_y_offset; Y2 -= c->yuv2rgb_y_offset; Y1 *= c->yuv2rgb_y_coeff; Y2 *= c->yuv2rgb_y_coeff; Y1 += 1 << 13; Y2 += 1 << 13; R = V * c->yuv2rgb_v2r_coeff; G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; B = U * c->yuv2rgb_u2b_coeff; if (hasAlpha) { A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 1; A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 1; A1 += 1 << 13; A2 += 1 << 13; } output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14); output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14); output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14); if (eightbytes) { output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14); output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14); output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14); output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14); output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14); dest += 8; } else { output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14); output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14); output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14); dest += 6; } } }"} {"target": 1, "idx": 646, "func": "static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){ #define COPY(a) bak->a= src->a COPY(allocated_edge_emu_buffer); COPY(edge_emu_buffer); COPY(me.scratchpad); COPY(me.temp); COPY(rd_scratchpad); COPY(b_scratchpad); COPY(obmc_scratchpad); COPY(me.map); COPY(me.score_map); COPY(blocks); COPY(block); COPY(start_mb_y); COPY(end_mb_y); COPY(me.map_generation); COPY(pb); COPY(dct_error_sum); COPY(dct_count[0]); COPY(dct_count[1]); COPY(ac_val_base); COPY(ac_val[0]); COPY(ac_val[1]); COPY(ac_val[2]); #undef COPY }"} {"target": 0, "idx": 653, "func": "static av_cold void free_frame_buffers(Indeo3DecodeContext *ctx) { int p; ctx->width= ctx->height= 0; for (p = 0; p < 3; p++) { av_freep(&ctx->planes[p].buffers[0]); av_freep(&ctx->planes[p].buffers[1]); ctx->planes[p].pixels[0] = ctx->planes[p].pixels[1] = 0; } }"} {"target": 0, "idx": 668, "func": "static AVInputFormat *probe_input_format(AVProbeData *pd, int is_opened) { AVInputFormat *fmt1, *fmt; int score, score_max; fmt = NULL; score_max = 0; for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) { if (!is_opened && !(fmt1->flags & AVFMT_NOFILE)) continue; score = 0; if (fmt1->extensions) { if (match_ext(pd->filename, fmt1->extensions)) { score = 50; } } else if (fmt1->read_probe) { score = fmt1->read_probe(pd); } if (score > score_max) { score_max = score; fmt = fmt1; } } return fmt; }"} {"target": 1, "idx": 670, "func": "static void pc_dimm_check_memdev_is_busy(Object *obj, const char *name, Object *val, Error **errp) { MemoryRegion *mr; mr = host_memory_backend_get_memory(MEMORY_BACKEND(val), errp); if (memory_region_is_mapped(mr)) { char *path = object_get_canonical_path_component(val); error_setg(errp, \"can't use already busy memdev: %s\", path); g_free(path); } else { qdev_prop_allow_set_link_before_realize(obj, name, val, errp); } }"} {"target": 1, "idx": 682, "func": "int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { int ret; AVStream *st; ff_read_frame_flush(s); if(flags & AVSEEK_FLAG_BYTE) return av_seek_frame_byte(s, stream_index, timestamp, flags); if(stream_index < 0){ stream_index= av_find_default_stream_index(s); if(stream_index < 0) return -1; st= s->streams[stream_index]; /* timestamp for default must be expressed in AV_TIME_BASE units */ timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num); } /* first, we try the format specific seek */ if (s->iformat->read_seek) ret = s->iformat->read_seek(s, stream_index, timestamp, flags); else ret = -1; if (ret >= 0) { return 0; } if(s->iformat->read_timestamp) return av_seek_frame_binary(s, stream_index, timestamp, flags); else return av_seek_frame_generic(s, stream_index, timestamp, flags); }"} {"target": 0, "idx": 691, "func": "static int mpeg_mux_end(AVFormatContext *ctx) { StreamInfo *stream; int i; /* flush each packet */ for(i=0;inb_streams;i++) { stream = ctx->streams[i]->priv_data; if (stream->buffer_ptr > 0) { if (i == (ctx->nb_streams - 1)) flush_packet(ctx, i, 1); else flush_packet(ctx, i, 0); } } /* write the end header */ //put_be32(&ctx->pb, ISO_11172_END_CODE); //put_flush_packet(&ctx->pb); for(i=0;inb_streams;i++) av_freep(&ctx->streams[i]->priv_data); return 0; }"} {"target": 1, "idx": 696, "func": "static void virtio_scsi_complete_req(VirtIOSCSIReq *req) { VirtIOSCSI *s = req->dev; VirtQueue *vq = req->vq; VirtIODevice *vdev = VIRTIO_DEVICE(s); qemu_iovec_from_buf(&req->resp_iov, 0, &req->resp, req->resp_size); virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size); if (s->dataplane_started && !s->dataplane_fenced) { virtio_scsi_dataplane_notify(vdev, req); } else { virtio_notify(vdev, vq); } if (req->sreq) { req->sreq->hba_private = NULL; scsi_req_unref(req->sreq); } virtio_scsi_free_req(req); }"} {"target": 1, "idx": 705, "func": "static int inet_connect_addr(struct addrinfo *addr, bool block, bool *in_progress) { int sock, rc; if (in_progress) { *in_progress = false; } sock = qemu_socket(addr->ai_family, addr->ai_socktype, addr->ai_protocol); if (sock < 0) { fprintf(stderr, \"%s: socket(%s): %s\\n\", __func__, inet_strfamily(addr->ai_family), strerror(errno)); return -1; } setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)); if (!block) { socket_set_nonblock(sock); } /* connect to peer */ do { rc = 0; if (connect(sock, addr->ai_addr, addr->ai_addrlen) < 0) { rc = -socket_error(); } } while (rc == -EINTR); if (!block && QEMU_SOCKET_RC_INPROGRESS(rc)) { if (in_progress) { *in_progress = true; } } else if (rc < 0) { closesocket(sock); return -1; } return sock; }"} {"target": 1, "idx": 712, "func": "static void gen_rfci(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); #else if (unlikely(ctx->pr)) { gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; } /* Restore CPU state */ gen_helper_rfci(cpu_env); gen_sync_exception(ctx); #endif }"} {"target": 1, "idx": 713, "func": "static int opt_new_stream(const char *opt, const char *arg) { AVFormatContext *oc; if (nb_output_files <= 0) { fprintf(stderr, \"At least one output file must be specified\\n\"); ffmpeg_exit(1); } oc = output_files[nb_output_files - 1]; if (!strcmp(opt, \"newvideo\" )) new_video_stream (oc); else if (!strcmp(opt, \"newaudio\" )) new_audio_stream (oc); else if (!strcmp(opt, \"newsubtitle\")) new_subtitle_stream(oc); else av_assert0(0); return 0; }"} {"target": 0, "idx": 734, "func": "void ff_vp3_v_loop_filter_c(uint8_t *first_pixel, int stride, int *bounding_values) { unsigned char *end; int filter_value; const int nstride= -stride; for (end= first_pixel + 8; first_pixel < end; first_pixel++) { filter_value = (first_pixel[2 * nstride] - first_pixel[ stride]) +3*(first_pixel[0 ] - first_pixel[nstride]); filter_value = bounding_values[(filter_value + 4) >> 3]; first_pixel[nstride] = av_clip_uint8(first_pixel[nstride] + filter_value); first_pixel[0] = av_clip_uint8(first_pixel[0] - filter_value); } }"} {"target": 1, "idx": 747, "func": "void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[4]){ int w_align= 1; int h_align= 1; switch(s->pix_fmt){ case PIX_FMT_YUV420P: case PIX_FMT_YUYV422: case PIX_FMT_UYVY422: case PIX_FMT_YUV422P: case PIX_FMT_YUV440P: case PIX_FMT_YUV444P: case PIX_FMT_GRAY8: case PIX_FMT_GRAY16BE: case PIX_FMT_GRAY16LE: case PIX_FMT_YUVJ420P: case PIX_FMT_YUVJ422P: case PIX_FMT_YUVJ440P: case PIX_FMT_YUVJ444P: case PIX_FMT_YUVA420P: case PIX_FMT_YUV420P9LE: case PIX_FMT_YUV420P9BE: case PIX_FMT_YUV420P10LE: case PIX_FMT_YUV420P10BE: case PIX_FMT_YUV422P10LE: case PIX_FMT_YUV422P10BE: w_align= 16; //FIXME check for non mpeg style codecs and use less alignment h_align= 16; if(s->codec_id == CODEC_ID_MPEG2VIDEO || s->codec_id == CODEC_ID_MJPEG || s->codec_id == CODEC_ID_AMV || s->codec_id == CODEC_ID_THP || s->codec_id == CODEC_ID_H264) h_align= 32; // interlaced is rounded up to 2 MBs break; case PIX_FMT_YUV411P: case PIX_FMT_UYYVYY411: w_align=32; h_align=8; break; case PIX_FMT_YUV410P: if(s->codec_id == CODEC_ID_SVQ1){ w_align=64; h_align=64; } case PIX_FMT_RGB555: if(s->codec_id == CODEC_ID_RPZA){ w_align=4; h_align=4; } case PIX_FMT_PAL8: case PIX_FMT_BGR8: case PIX_FMT_RGB8: if(s->codec_id == CODEC_ID_SMC){ w_align=4; h_align=4; } break; case PIX_FMT_BGR24: if((s->codec_id == CODEC_ID_MSZH) || (s->codec_id == CODEC_ID_ZLIB)){ w_align=4; h_align=4; } break; default: w_align= 1; h_align= 1; break; } *width = FFALIGN(*width , w_align); *height= FFALIGN(*height, h_align); if(s->codec_id == CODEC_ID_H264 || s->lowres) *height+=2; // some of the optimized chroma MC reads one line too much // which is also done in mpeg decoders with lowres > 0 linesize_align[0] = linesize_align[1] = linesize_align[2] = linesize_align[3] = STRIDE_ALIGN; //STRIDE_ALIGN is 8 for SSE* but this does not work for SVQ1 chroma planes //we could change STRIDE_ALIGN to 16 for x86/sse but it would increase the //picture size unneccessarily in some cases. The solution here is not //pretty and better ideas are welcome! #if HAVE_MMX if(s->codec_id == CODEC_ID_SVQ1 || s->codec_id == CODEC_ID_VP5 || s->codec_id == CODEC_ID_VP6 || s->codec_id == CODEC_ID_VP6F || s->codec_id == CODEC_ID_VP6A) { linesize_align[0] = linesize_align[1] = linesize_align[2] = 16; } #endif }"} {"target": 0, "idx": 754, "func": "static int h264_slice_header_parse(H264Context *h, H264SliceContext *sl, const H2645NAL *nal) { const SPS *sps; const PPS *pps; int ret; unsigned int slice_type, tmp, i; int field_pic_flag, bottom_field_flag; int frame_num, droppable, picture_structure; int mb_aff_frame = 0; sl->first_mb_addr = get_ue_golomb(&sl->gb); if (sl->first_mb_addr == 0) { // FIXME better field boundary detection if (h->current_slice && h->cur_pic_ptr && FIELD_PICTURE(h)) { ff_h264_field_end(h, sl, 1); } h->current_slice = 0; if (!h->first_field) { if (h->cur_pic_ptr && !h->droppable) { ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, h->picture_structure == PICT_BOTTOM_FIELD); } h->cur_pic_ptr = NULL; } } slice_type = get_ue_golomb_31(&sl->gb); if (slice_type > 9) { av_log(h->avctx, AV_LOG_ERROR, \"slice type %d too large at %d\\n\", slice_type, sl->first_mb_addr); return AVERROR_INVALIDDATA; } if (slice_type > 4) { slice_type -= 5; sl->slice_type_fixed = 1; } else sl->slice_type_fixed = 0; slice_type = ff_h264_golomb_to_pict_type[slice_type]; sl->slice_type = slice_type; sl->slice_type_nos = slice_type & 3; if (nal->type == NAL_IDR_SLICE && sl->slice_type_nos != AV_PICTURE_TYPE_I) { av_log(h->avctx, AV_LOG_ERROR, \"A non-intra slice in an IDR NAL unit.\\n\"); return AVERROR_INVALIDDATA; } sl->pps_id = get_ue_golomb(&sl->gb); if (sl->pps_id >= MAX_PPS_COUNT) { av_log(h->avctx, AV_LOG_ERROR, \"pps_id %u out of range\\n\", sl->pps_id); return AVERROR_INVALIDDATA; } if (!h->ps.pps_list[sl->pps_id]) { av_log(h->avctx, AV_LOG_ERROR, \"non-existing PPS %u referenced\\n\", sl->pps_id); return AVERROR_INVALIDDATA; } if (h->current_slice > 0 && h->ps.pps != (const PPS*)h->ps.pps_list[sl->pps_id]->data) { av_log(h->avctx, AV_LOG_ERROR, \"PPS changed between slices\\n\"); return AVERROR_INVALIDDATA; } pps = (const PPS*)h->ps.pps_list[sl->pps_id]->data; if (!h->ps.sps_list[pps->sps_id]) { av_log(h->avctx, AV_LOG_ERROR, \"non-existing SPS %u referenced\\n\", pps->sps_id); return AVERROR_INVALIDDATA; } sps = (const SPS*)h->ps.sps_list[pps->sps_id]->data; frame_num = get_bits(&sl->gb, sps->log2_max_frame_num); if (!h->setup_finished) h->poc.frame_num = frame_num; sl->mb_mbaff = 0; droppable = nal->ref_idc == 0; if (sps->frame_mbs_only_flag) { picture_structure = PICT_FRAME; } else { field_pic_flag = get_bits1(&sl->gb); if (field_pic_flag) { bottom_field_flag = get_bits1(&sl->gb); picture_structure = PICT_TOP_FIELD + bottom_field_flag; } else { picture_structure = PICT_FRAME; mb_aff_frame = sps->mb_aff; } } if (!h->setup_finished) { h->mb_aff_frame = mb_aff_frame; } sl->picture_structure = picture_structure; sl->mb_field_decoding_flag = picture_structure != PICT_FRAME; if (h->current_slice != 0) { if (h->picture_structure != picture_structure || h->droppable != droppable) { av_log(h->avctx, AV_LOG_ERROR, \"Changing field mode (%d -> %d) between slices is not allowed\\n\", h->picture_structure, picture_structure); return AVERROR_INVALIDDATA; } else if (!h->cur_pic_ptr) { av_log(h->avctx, AV_LOG_ERROR, \"unset cur_pic_ptr on slice %d\\n\", h->current_slice + 1); return AVERROR_INVALIDDATA; } } if (picture_structure == PICT_FRAME) { h->curr_pic_num = h->poc.frame_num; h->max_pic_num = 1 << sps->log2_max_frame_num; } else { h->curr_pic_num = 2 * h->poc.frame_num + 1; h->max_pic_num = 1 << (sps->log2_max_frame_num + 1); } if (nal->type == NAL_IDR_SLICE) get_ue_golomb(&sl->gb); /* idr_pic_id */ if (sps->poc_type == 0) { int poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb); if (!h->setup_finished) h->poc.poc_lsb = poc_lsb; if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME) { int delta_poc_bottom = get_se_golomb(&sl->gb); if (!h->setup_finished) h->poc.delta_poc_bottom = delta_poc_bottom; } } if (sps->poc_type == 1 && !sps->delta_pic_order_always_zero_flag) { int delta_poc = get_se_golomb(&sl->gb); if (!h->setup_finished) h->poc.delta_poc[0] = delta_poc; if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME) { delta_poc = get_se_golomb(&sl->gb); if (!h->setup_finished) h->poc.delta_poc[1] = delta_poc; } } if (pps->redundant_pic_cnt_present) sl->redundant_pic_count = get_ue_golomb(&sl->gb); if (sl->slice_type_nos == AV_PICTURE_TYPE_B) sl->direct_spatial_mv_pred = get_bits1(&sl->gb); ret = ff_h264_parse_ref_count(&sl->list_count, sl->ref_count, &sl->gb, pps, sl->slice_type_nos, picture_structure); if (ret < 0) return ret; if (sl->slice_type_nos != AV_PICTURE_TYPE_I) { ret = ff_h264_decode_ref_pic_list_reordering(h, sl); if (ret < 0) { sl->ref_count[1] = sl->ref_count[0] = 0; return ret; } } sl->pwt.use_weight = 0; for (i = 0; i < 2; i++) { sl->pwt.luma_weight_flag[i] = 0; sl->pwt.chroma_weight_flag[i] = 0; } if ((pps->weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) || (pps->weighted_bipred_idc == 1 && sl->slice_type_nos == AV_PICTURE_TYPE_B)) ff_h264_pred_weight_table(&sl->gb, sps, sl->ref_count, sl->slice_type_nos, &sl->pwt); sl->explicit_ref_marking = 0; if (nal->ref_idc) { ret = ff_h264_decode_ref_pic_marking(h, sl, &sl->gb); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) return AVERROR_INVALIDDATA; } if (sl->slice_type_nos != AV_PICTURE_TYPE_I && pps->cabac) { tmp = get_ue_golomb_31(&sl->gb); if (tmp > 2) { av_log(h->avctx, AV_LOG_ERROR, \"cabac_init_idc %u overflow\\n\", tmp); return AVERROR_INVALIDDATA; } sl->cabac_init_idc = tmp; } sl->last_qscale_diff = 0; tmp = pps->init_qp + get_se_golomb(&sl->gb); if (tmp > 51 + 6 * (sps->bit_depth_luma - 8)) { av_log(h->avctx, AV_LOG_ERROR, \"QP %u out of range\\n\", tmp); return AVERROR_INVALIDDATA; } sl->qscale = tmp; sl->chroma_qp[0] = get_chroma_qp(pps, 0, sl->qscale); sl->chroma_qp[1] = get_chroma_qp(pps, 1, sl->qscale); // FIXME qscale / qp ... stuff if (sl->slice_type == AV_PICTURE_TYPE_SP) get_bits1(&sl->gb); /* sp_for_switch_flag */ if (sl->slice_type == AV_PICTURE_TYPE_SP || sl->slice_type == AV_PICTURE_TYPE_SI) get_se_golomb(&sl->gb); /* slice_qs_delta */ sl->deblocking_filter = 1; sl->slice_alpha_c0_offset = 0; sl->slice_beta_offset = 0; if (pps->deblocking_filter_parameters_present) { tmp = get_ue_golomb_31(&sl->gb); if (tmp > 2) { av_log(h->avctx, AV_LOG_ERROR, \"deblocking_filter_idc %u out of range\\n\", tmp); return AVERROR_INVALIDDATA; } sl->deblocking_filter = tmp; if (sl->deblocking_filter < 2) sl->deblocking_filter ^= 1; // 1<->0 if (sl->deblocking_filter) { sl->slice_alpha_c0_offset = get_se_golomb(&sl->gb) * 2; sl->slice_beta_offset = get_se_golomb(&sl->gb) * 2; if (sl->slice_alpha_c0_offset > 12 || sl->slice_alpha_c0_offset < -12 || sl->slice_beta_offset > 12 || sl->slice_beta_offset < -12) { av_log(h->avctx, AV_LOG_ERROR, \"deblocking filter parameters %d %d out of range\\n\", sl->slice_alpha_c0_offset, sl->slice_beta_offset); return AVERROR_INVALIDDATA; } } } return 0; }"} {"target": 0, "idx": 757, "func": "static inline void mct_decode(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile) { int i, csize = 1; void *src[3]; for (i = 1; i < 3; i++) if (tile->codsty[0].transform != tile->codsty[i].transform) { av_log(s->avctx, AV_LOG_ERROR, \"Transforms mismatch, MCT not supported\\n\"); return; } for (i = 0; i < 3; i++) if (tile->codsty[0].transform == FF_DWT97) src[i] = tile->comp[i].f_data; else src[i] = tile->comp[i].i_data; for (i = 0; i < 2; i++) csize *= tile->comp[0].coord[i][1] - tile->comp[0].coord[i][0]; s->dsp.mct_decode[tile->codsty[0].transform](src[0], src[1], src[2], csize); }"} {"target": 1, "idx": 768, "func": "static inline void RENAME(rgb2rgb_init)(void) { #if !COMPILE_TEMPLATE_SSE2 #if !COMPILE_TEMPLATE_AMD3DNOW rgb15to16 = RENAME(rgb15to16); rgb15tobgr24 = RENAME(rgb15tobgr24); rgb15to32 = RENAME(rgb15to32); rgb16tobgr24 = RENAME(rgb16tobgr24); rgb16to32 = RENAME(rgb16to32); rgb16to15 = RENAME(rgb16to15); rgb24tobgr16 = RENAME(rgb24tobgr16); rgb24tobgr15 = RENAME(rgb24tobgr15); rgb24tobgr32 = RENAME(rgb24tobgr32); rgb32to16 = RENAME(rgb32to16); rgb32to15 = RENAME(rgb32to15); rgb32tobgr24 = RENAME(rgb32tobgr24); rgb24to15 = RENAME(rgb24to15); rgb24to16 = RENAME(rgb24to16); rgb24tobgr24 = RENAME(rgb24tobgr24); shuffle_bytes_2103 = RENAME(shuffle_bytes_2103); rgb32tobgr16 = RENAME(rgb32tobgr16); rgb32tobgr15 = RENAME(rgb32tobgr15); yv12toyuy2 = RENAME(yv12toyuy2); yv12touyvy = RENAME(yv12touyvy); yuv422ptoyuy2 = RENAME(yuv422ptoyuy2); yuv422ptouyvy = RENAME(yuv422ptouyvy); yuy2toyv12 = RENAME(yuy2toyv12); vu9_to_vu12 = RENAME(vu9_to_vu12); yvu9_to_yuy2 = RENAME(yvu9_to_yuy2); uyvytoyuv422 = RENAME(uyvytoyuv422); yuyvtoyuv422 = RENAME(yuyvtoyuv422); #endif /* !COMPILE_TEMPLATE_AMD3DNOW */ #if COMPILE_TEMPLATE_MMXEXT || COMPILE_TEMPLATE_AMD3DNOW planar2x = RENAME(planar2x); #endif /* COMPILE_TEMPLATE_MMXEXT || COMPILE_TEMPLATE_AMD3DNOW */ rgb24toyv12 = RENAME(rgb24toyv12); yuyvtoyuv420 = RENAME(yuyvtoyuv420); uyvytoyuv420 = RENAME(uyvytoyuv420); #endif /* !COMPILE_TEMPLATE_SSE2 */ #if !COMPILE_TEMPLATE_AMD3DNOW interleaveBytes = RENAME(interleaveBytes); #endif /* !COMPILE_TEMPLATE_AMD3DNOW */ }"} {"target": 1, "idx": 771, "func": "static int hls_write_header(AVFormatContext *s) { HLSContext *hls = s->priv_data; int ret, i; char *p; const char *pattern = \"%d.ts\"; int basename_size = strlen(s->filename) + strlen(pattern); hls->number = 0; hls->recording_time = hls->time * 1000000; hls->start_pts = AV_NOPTS_VALUE; for (i = 0; i < s->nb_streams; i++) hls->has_video += s->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO; if (hls->has_video > 1) av_log(s, AV_LOG_WARNING, \"More than a single video stream present, \" \"expect issues decoding it.\\n\"); hls->oformat = av_guess_format(\"mpegts\", NULL, NULL); if (!hls->oformat) { ret = AVERROR_MUXER_NOT_FOUND; goto fail; } hls->basename = av_malloc(basename_size); if (!hls->basename) { ret = AVERROR(ENOMEM); goto fail; } strcpy(hls->basename, s->filename); p = strrchr(hls->basename, '.'); if (p) *p = '\\0'; av_strlcat(hls->basename, \"%d.ts\", basename_size); if ((ret = hls_mux_init(s)) < 0) goto fail; if ((ret = hls_start(s)) < 0) goto fail; if ((ret = avformat_write_header(hls->avf, NULL)) < 0) return ret; fail: if (ret) { av_free(hls->basename); if (hls->avf) avformat_free_context(hls->avf); } return ret; }"} {"target": 1, "idx": 772, "func": "static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, long width) { #ifdef HAVE_MMX asm volatile( \"mov %2, %%\"REG_a\" \\n\\t\" \"movq \"MANGLE(bgr2YCoeff)\", %%mm6 \\n\\t\" \"movq \"MANGLE(w1111)\", %%mm5 \\n\\t\" \"pxor %%mm7, %%mm7 \\n\\t\" \"lea (%%\"REG_a\", %%\"REG_a\", 2), %%\"REG_d\"\\n\\t\" ASMALIGN(4) \"1: \\n\\t\" PREFETCH\" 64(%0, %%\"REG_d\") \\n\\t\" \"movd (%0, %%\"REG_d\"), %%mm0 \\n\\t\" \"movd 3(%0, %%\"REG_d\"), %%mm1 \\n\\t\" \"punpcklbw %%mm7, %%mm0 \\n\\t\" \"punpcklbw %%mm7, %%mm1 \\n\\t\" \"movd 6(%0, %%\"REG_d\"), %%mm2 \\n\\t\" \"movd 9(%0, %%\"REG_d\"), %%mm3 \\n\\t\" \"punpcklbw %%mm7, %%mm2 \\n\\t\" \"punpcklbw %%mm7, %%mm3 \\n\\t\" \"pmaddwd %%mm6, %%mm0 \\n\\t\" \"pmaddwd %%mm6, %%mm1 \\n\\t\" \"pmaddwd %%mm6, %%mm2 \\n\\t\" \"pmaddwd %%mm6, %%mm3 \\n\\t\" #ifndef FAST_BGR2YV12 \"psrad $8, %%mm0 \\n\\t\" \"psrad $8, %%mm1 \\n\\t\" \"psrad $8, %%mm2 \\n\\t\" \"psrad $8, %%mm3 \\n\\t\" #endif \"packssdw %%mm1, %%mm0 \\n\\t\" \"packssdw %%mm3, %%mm2 \\n\\t\" \"pmaddwd %%mm5, %%mm0 \\n\\t\" \"pmaddwd %%mm5, %%mm2 \\n\\t\" \"packssdw %%mm2, %%mm0 \\n\\t\" \"psraw $7, %%mm0 \\n\\t\" \"movd 12(%0, %%\"REG_d\"), %%mm4 \\n\\t\" \"movd 15(%0, %%\"REG_d\"), %%mm1 \\n\\t\" \"punpcklbw %%mm7, %%mm4 \\n\\t\" \"punpcklbw %%mm7, %%mm1 \\n\\t\" \"movd 18(%0, %%\"REG_d\"), %%mm2 \\n\\t\" \"movd 21(%0, %%\"REG_d\"), %%mm3 \\n\\t\" \"punpcklbw %%mm7, %%mm2 \\n\\t\" \"punpcklbw %%mm7, %%mm3 \\n\\t\" \"pmaddwd %%mm6, %%mm4 \\n\\t\" \"pmaddwd %%mm6, %%mm1 \\n\\t\" \"pmaddwd %%mm6, %%mm2 \\n\\t\" \"pmaddwd %%mm6, %%mm3 \\n\\t\" #ifndef FAST_BGR2YV12 \"psrad $8, %%mm4 \\n\\t\" \"psrad $8, %%mm1 \\n\\t\" \"psrad $8, %%mm2 \\n\\t\" \"psrad $8, %%mm3 \\n\\t\" #endif \"packssdw %%mm1, %%mm4 \\n\\t\" \"packssdw %%mm3, %%mm2 \\n\\t\" \"pmaddwd %%mm5, %%mm4 \\n\\t\" \"pmaddwd %%mm5, %%mm2 \\n\\t\" \"add $24, %%\"REG_d\" \\n\\t\" \"packssdw %%mm2, %%mm4 \\n\\t\" \"psraw $7, %%mm4 \\n\\t\" \"packuswb %%mm4, %%mm0 \\n\\t\" \"paddusb \"MANGLE(bgr2YOffset)\", %%mm0 \\n\\t\" \"movq %%mm0, (%1, %%\"REG_a\") \\n\\t\" \"add $8, %%\"REG_a\" \\n\\t\" \" js 1b \\n\\t\" : : \"r\" (src+width*3), \"r\" (dst+width), \"g\" (-width) : \"%\"REG_a, \"%\"REG_d ); #else int i; for(i=0; i>RGB2YUV_SHIFT); } #endif }"} {"target": 1, "idx": 782, "func": "static void vvfat_close(BlockDriverState *bs) { BDRVVVFATState *s = bs->opaque; vvfat_close_current_file(s); array_free(&(s->fat)); array_free(&(s->directory)); array_free(&(s->mapping)); if(s->cluster_buffer) free(s->cluster_buffer); }"} {"target": 0, "idx": 795, "func": "static int ivi_mc(ivi_mc_func mc, int16_t *buf, const int16_t *ref_buf, int offs, int mv_x, int mv_y, uint32_t pitch, int mc_type) { int ref_offs = offs + mv_y * pitch + mv_x; if (offs < 0 || ref_offs < 0 || !ref_buf) return AVERROR_INVALIDDATA; mc(buf + offs, ref_buf + ref_offs, pitch, mc_type); return 0; }"} {"target": 0, "idx": 814, "func": "static void test_io_channel_setup_sync(SocketAddressLegacy *listen_addr, SocketAddressLegacy *connect_addr, QIOChannel **src, QIOChannel **dst) { QIOChannelSocket *lioc; lioc = qio_channel_socket_new(); qio_channel_socket_listen_sync(lioc, listen_addr, &error_abort); if (listen_addr->type == SOCKET_ADDRESS_LEGACY_KIND_INET) { SocketAddressLegacy *laddr = qio_channel_socket_get_local_address( lioc, &error_abort); g_free(connect_addr->u.inet.data->port); connect_addr->u.inet.data->port = g_strdup(laddr->u.inet.data->port); qapi_free_SocketAddressLegacy(laddr); } *src = QIO_CHANNEL(qio_channel_socket_new()); qio_channel_socket_connect_sync( QIO_CHANNEL_SOCKET(*src), connect_addr, &error_abort); qio_channel_set_delay(*src, false); qio_channel_wait(QIO_CHANNEL(lioc), G_IO_IN); *dst = QIO_CHANNEL(qio_channel_socket_accept(lioc, &error_abort)); g_assert(*dst); test_io_channel_set_socket_bufs(*src, *dst); object_unref(OBJECT(lioc)); }"} {"target": 0, "idx": 815, "func": "static void monitor_readline(const char *prompt, int is_password, char *buf, int buf_size) { readline_start(prompt, is_password, monitor_readline_cb, NULL); readline_show_prompt(); monitor_readline_buf = buf; monitor_readline_buf_size = buf_size; monitor_readline_started = 1; while (monitor_readline_started) { main_loop_wait(10); } }"} {"target": 1, "idx": 838, "func": "static int cllc_decode_frame(AVCodecContext *avctx, void *data, int *got_picture_ptr, AVPacket *avpkt) { CLLCContext *ctx = avctx->priv_data; AVFrame *pic = data; ThreadFrame frame = { .f = data }; uint8_t *src = avpkt->data; uint32_t info_tag, info_offset; int data_size; GetBitContext gb; int coding_type, ret; if (avpkt->size < 4 + 4) { av_log(avctx, AV_LOG_ERROR, \"Frame is too small %d.\\n\", avpkt->size); } info_offset = 0; info_tag = AV_RL32(src); if (info_tag == MKTAG('I', 'N', 'F', 'O')) { info_offset = AV_RL32(src + 4); if (info_offset > UINT32_MAX - 8 || info_offset + 8 > avpkt->size) { av_log(avctx, AV_LOG_ERROR, \"Invalid INFO header offset: 0x%08\"PRIX32\" is too large.\\n\", info_offset); } ff_canopus_parse_info_tag(avctx, src + 8, info_offset); info_offset += 8; src += info_offset; } data_size = (avpkt->size - info_offset) & ~1; /* Make sure our bswap16'd buffer is big enough */ av_fast_padded_malloc(&ctx->swapped_buf, &ctx->swapped_buf_size, data_size); if (!ctx->swapped_buf) { av_log(avctx, AV_LOG_ERROR, \"Could not allocate swapped buffer.\\n\"); return AVERROR(ENOMEM); } /* bswap16 the buffer since CLLC's bitreader works in 16-bit words */ ctx->bdsp.bswap16_buf((uint16_t *) ctx->swapped_buf, (uint16_t *) src, data_size / 2); if ((ret = init_get_bits8(&gb, ctx->swapped_buf, data_size)) < 0) return ret; /* * Read in coding type. The types are as follows: * * 0 - YUY2 * 1 - BGR24 (Triples) * 2 - BGR24 (Quads) * 3 - BGRA */ coding_type = (AV_RL32(src) >> 8) & 0xFF; av_log(avctx, AV_LOG_DEBUG, \"Frame coding type: %d\\n\", coding_type); switch (coding_type) { case 0: avctx->pix_fmt = AV_PIX_FMT_YUV422P; avctx->bits_per_raw_sample = 8; if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0) return ret; ret = decode_yuv_frame(ctx, &gb, pic); if (ret < 0) return ret; break; case 1: case 2: avctx->pix_fmt = AV_PIX_FMT_RGB24; avctx->bits_per_raw_sample = 8; if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0) return ret; ret = decode_rgb24_frame(ctx, &gb, pic); if (ret < 0) return ret; break; case 3: avctx->pix_fmt = AV_PIX_FMT_ARGB; avctx->bits_per_raw_sample = 8; if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0) return ret; ret = decode_argb_frame(ctx, &gb, pic); if (ret < 0) return ret; break; default: av_log(avctx, AV_LOG_ERROR, \"Unknown coding type: %d.\\n\", coding_type); } pic->key_frame = 1; pic->pict_type = AV_PICTURE_TYPE_I; *got_picture_ptr = 1; return avpkt->size; }"} {"target": 1, "idx": 850, "func": "static av_always_inline void decode_dc_coeffs(GetBitContext *gb, int16_t *out, int blocks_per_slice) { int16_t prev_dc; int code, i, sign; OPEN_READER(re, gb); DECODE_CODEWORD(code, FIRST_DC_CB); prev_dc = TOSIGNED(code); out[0] = prev_dc; out += 64; // dc coeff for the next block code = 5; sign = 0; for (i = 1; i < blocks_per_slice; i++, out += 64) { DECODE_CODEWORD(code, dc_codebook[FFMIN(code, 6U)]); if(code) sign ^= -(code & 1); else sign = 0; prev_dc += (((code + 1) >> 1) ^ sign) - sign; out[0] = prev_dc; } CLOSE_READER(re, gb); }"} {"target": 0, "idx": 854, "func": "static void avc_luma_midh_qrt_and_aver_dst_8w_msa(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, int32_t height, uint8_t horiz_offset) { uint32_t multiple8_cnt; for (multiple8_cnt = 2; multiple8_cnt--;) { avc_luma_midh_qrt_and_aver_dst_4w_msa(src, src_stride, dst, dst_stride, height, horiz_offset); src += 4; dst += 4; } }"} {"target": 1, "idx": 859, "func": "static int mp_pacl_removexattr(FsContext *ctx, const char *path, const char *name) { int ret; char *buffer; buffer = rpath(ctx, path); ret = lremovexattr(buffer, MAP_ACL_ACCESS); if (ret == -1 && errno == ENODATA) { /* * We don't get ENODATA error when trying to remove a * posix acl that is not present. So don't throw the error * even in case of mapped security model */ errno = 0; ret = 0; } g_free(buffer); return ret; }"} {"target": 1, "idx": 863, "func": "static av_cold OMXContext *omx_init(void *logctx, const char *libname, const char *prefix) { static const char * const libnames[] = { \"libOMX_Core.so\", \"libOmxCore.so\", NULL }; const char* const* nameptr; int ret = AVERROR_ENCODER_NOT_FOUND; OMXContext *omx_context; omx_context = av_mallocz(sizeof(*omx_context)); if (!omx_context) return NULL; if (libname) { ret = omx_try_load(omx_context, logctx, libname, prefix); if (ret < 0) { av_free(omx_context); return NULL; } } else { for (nameptr = libnames; *nameptr; nameptr++) if (!(ret = omx_try_load(omx_context, logctx, *nameptr, prefix))) break; if (!*nameptr) { av_free(omx_context); return NULL; } } omx_context->ptr_Init(); return omx_context; }"} {"target": 0, "idx": 867, "func": "int av_get_channel_layout_nb_channels(int64_t channel_layout) { int count; uint64_t x = channel_layout; for (count = 0; x; count++) x &= x-1; // unset lowest set bit return count; }"} {"target": 0, "idx": 882, "func": "static void openpic_load_IRQ_queue(QEMUFile* f, IRQQueue *q) { unsigned int i; for (i = 0; i < BF_WIDTH(MAX_IRQ); i++) qemu_get_be32s(f, &q->queue[i]); qemu_get_sbe32s(f, &q->next); qemu_get_sbe32s(f, &q->priority); }"} {"target": 0, "idx": 914, "func": "static void cs_write (void *opaque, target_phys_addr_t addr, uint64_t val64, unsigned size) { CSState *s = opaque; uint32_t saddr, iaddr, val; saddr = addr; val = val64; switch (saddr) { case Index_Address: if (!(s->regs[Index_Address] & MCE) && (val & MCE) && (s->dregs[Interface_Configuration] & (3 << 3))) s->aci_counter = conf.aci_counter; s->regs[Index_Address] = val & ~(1 << 7); break; case Index_Data: if (!(s->dregs[MODE_And_ID] & MODE2)) iaddr = s->regs[Index_Address] & 0x0f; else iaddr = s->regs[Index_Address] & 0x1f; switch (iaddr) { case RESERVED: case RESERVED_2: case RESERVED_3: lwarn (\"attempt to write %#x to reserved indirect register %d\\n\", val, iaddr); break; case FS_And_Playback_Data_Format: if (s->regs[Index_Address] & MCE) { cs_reset_voices (s, val); } else { if (s->dregs[Alternate_Feature_Status] & PMCE) { val = (val & ~0x0f) | (s->dregs[iaddr] & 0x0f); cs_reset_voices (s, val); } else { lwarn (\"[P]MCE(%#x, %#x) is not set, val=%#x\\n\", s->regs[Index_Address], s->dregs[Alternate_Feature_Status], val); break; } } s->dregs[iaddr] = val; break; case Interface_Configuration: val &= ~(1 << 5); /* D5 is reserved */ s->dregs[iaddr] = val; if (val & PPIO) { lwarn (\"PIO is not supported (%#x)\\n\", val); break; } if (val & PEN) { if (!s->dma_running) { cs_reset_voices (s, s->dregs[FS_And_Playback_Data_Format]); } } else { if (s->dma_running) { DMA_release_DREQ (s->dma); AUD_set_active_out (s->voice, 0); s->dma_running = 0; } } break; case Error_Status_And_Initialization: lwarn (\"attempt to write to read only register %d\\n\", iaddr); break; case MODE_And_ID: dolog (\"val=%#x\\n\", val); if (val & MODE2) s->dregs[iaddr] |= MODE2; else s->dregs[iaddr] &= ~MODE2; break; case Alternate_Feature_Enable_I: if (val & TE) lerr (\"timer is not yet supported\\n\"); s->dregs[iaddr] = val; break; case Alternate_Feature_Status: if ((s->dregs[iaddr] & PI) && !(val & PI)) { /* XXX: TI CI */ qemu_irq_lower (s->pic); s->regs[Status] &= ~INT; } s->dregs[iaddr] = val; break; case Version_Chip_ID: lwarn (\"write to Version_Chip_ID register %#x\\n\", val); s->dregs[iaddr] = val; break; default: s->dregs[iaddr] = val; break; } dolog (\"written value %#x to indirect register %d\\n\", val, iaddr); break; case Status: if (s->regs[Status] & INT) { qemu_irq_lower (s->pic); } s->regs[Status] &= ~INT; s->dregs[Alternate_Feature_Status] &= ~(PI | CI | TI); break; case PIO_Data: lwarn (\"attempt to write value %#x to PIO register\\n\", val); break; } }"} {"target": 0, "idx": 979, "func": "static uint64_t omap_pin_cfg_read(void *opaque, target_phys_addr_t addr, unsigned size) { struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) opaque; if (size != 4) { return omap_badwidth_read32(opaque, addr); } switch (addr) { case 0x00: /* FUNC_MUX_CTRL_0 */ case 0x04: /* FUNC_MUX_CTRL_1 */ case 0x08: /* FUNC_MUX_CTRL_2 */ return s->func_mux_ctrl[addr >> 2]; case 0x0c: /* COMP_MODE_CTRL_0 */ return s->comp_mode_ctrl[0]; case 0x10: /* FUNC_MUX_CTRL_3 */ case 0x14: /* FUNC_MUX_CTRL_4 */ case 0x18: /* FUNC_MUX_CTRL_5 */ case 0x1c: /* FUNC_MUX_CTRL_6 */ case 0x20: /* FUNC_MUX_CTRL_7 */ case 0x24: /* FUNC_MUX_CTRL_8 */ case 0x28: /* FUNC_MUX_CTRL_9 */ case 0x2c: /* FUNC_MUX_CTRL_A */ case 0x30: /* FUNC_MUX_CTRL_B */ case 0x34: /* FUNC_MUX_CTRL_C */ case 0x38: /* FUNC_MUX_CTRL_D */ return s->func_mux_ctrl[(addr >> 2) - 1]; case 0x40: /* PULL_DWN_CTRL_0 */ case 0x44: /* PULL_DWN_CTRL_1 */ case 0x48: /* PULL_DWN_CTRL_2 */ case 0x4c: /* PULL_DWN_CTRL_3 */ return s->pull_dwn_ctrl[(addr & 0xf) >> 2]; case 0x50: /* GATE_INH_CTRL_0 */ return s->gate_inh_ctrl[0]; case 0x60: /* VOLTAGE_CTRL_0 */ return s->voltage_ctrl[0]; case 0x70: /* TEST_DBG_CTRL_0 */ return s->test_dbg_ctrl[0]; case 0x80: /* MOD_CONF_CTRL_0 */ return s->mod_conf_ctrl[0]; } OMAP_BAD_REG(addr); return 0; }"} {"target": 1, "idx": 1003, "func": "static int decode0(GetByteContext *gb, RangeCoder *rc, unsigned cumFreq, unsigned freq, unsigned total_freq) { int t; if (total_freq == 0) return AVERROR_INVALIDDATA; t = rc->range * (uint64_t)cumFreq / total_freq; rc->code1 += t + 1; rc->range = rc->range * (uint64_t)(freq + cumFreq) / total_freq - (t + 1); while (rc->range < TOP && bytestream2_get_bytes_left(gb) > 0) { unsigned byte = bytestream2_get_byte(gb); rc->code = (rc->code << 8) | byte; rc->code1 <<= 8; rc->range <<= 8; } return 0; }"} {"target": 1, "idx": 1018, "func": "static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output, AVPacket *avpkt) { int ret; int new_extradata_size; uint8_t *new_extradata; HEVCContext *s = avctx->priv_data; if (!avpkt->size) { ret = ff_hevc_output_frame(s, data, 1); if (ret < 0) return ret; *got_output = ret; return 0; } new_extradata = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &new_extradata_size); if (new_extradata && new_extradata_size > 0) { ret = hevc_decode_extradata(s, new_extradata, new_extradata_size); if (ret < 0) return ret; } s->ref = NULL; ret = decode_nal_units(s, avpkt->data, avpkt->size); if (ret < 0) return ret; if (avctx->hwaccel) { if (s->ref && (ret = avctx->hwaccel->end_frame(avctx)) < 0) { av_log(avctx, AV_LOG_ERROR, \"hardware accelerator failed to decode picture\\n\"); ff_hevc_unref_frame(s, s->ref, ~0); return ret; } } else { /* verify the SEI checksum */ if (avctx->err_recognition & AV_EF_CRCCHECK && s->is_decoded && s->sei.picture_hash.is_md5) { ret = verify_md5(s, s->ref->frame); if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) { ff_hevc_unref_frame(s, s->ref, ~0); return ret; } } } s->sei.picture_hash.is_md5 = 0; if (s->is_decoded) { av_log(avctx, AV_LOG_DEBUG, \"Decoded frame with POC %d.\\n\", s->poc); s->is_decoded = 0; } if (s->output_frame->buf[0]) { av_frame_move_ref(data, s->output_frame); *got_output = 1; } return avpkt->size; }"} {"target": 1, "idx": 1021, "func": "static void RENAME(yuv2bgr24_1)(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *bguf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y) { const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1]; const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster __asm__ volatile( \"mov %%\"REG_b\", \"ESP_OFFSET\"(%5) \\n\\t\" \"mov %4, %%\"REG_b\" \\n\\t\" \"push %%\"REG_BP\" \\n\\t\" YSCALEYUV2RGB1(%%REGBP, %5) \"pxor %%mm7, %%mm7 \\n\\t\" WRITEBGR24(%%REGb, 8280(%5), %%REGBP) \"pop %%\"REG_BP\" \\n\\t\" \"mov \"ESP_OFFSET\"(%5), %%\"REG_b\" \\n\\t\" :: \"c\" (buf0), \"d\" (buf1), \"S\" (ubuf0), \"D\" (ubuf1), \"m\" (dest), \"a\" (&c->redDither) ); } else { __asm__ volatile( \"mov %%\"REG_b\", \"ESP_OFFSET\"(%5) \\n\\t\" \"mov %4, %%\"REG_b\" \\n\\t\" \"push %%\"REG_BP\" \\n\\t\" YSCALEYUV2RGB1b(%%REGBP, %5) \"pxor %%mm7, %%mm7 \\n\\t\" WRITEBGR24(%%REGb, 8280(%5), %%REGBP) \"pop %%\"REG_BP\" \\n\\t\" \"mov \"ESP_OFFSET\"(%5), %%\"REG_b\" \\n\\t\" :: \"c\" (buf0), \"d\" (buf1), \"S\" (ubuf0), \"D\" (ubuf1), \"m\" (dest), \"a\" (&c->redDither) ); } }"} {"target": 1, "idx": 1033, "func": "static always_inline void gen_rldimi (DisasContext *ctx, int mbn, int shn) { uint64_t mask; uint32_t sh, mb; sh = SH(ctx->opcode) | (shn << 5); mb = MB(ctx->opcode) | (mbn << 5); if (likely(sh == 0)) { if (likely(mb == 0)) { gen_op_load_gpr_T0(rS(ctx->opcode)); goto do_store; } else if (likely(mb == 63)) { gen_op_load_gpr_T0(rA(ctx->opcode)); goto do_store; } gen_op_load_gpr_T0(rS(ctx->opcode)); gen_op_load_gpr_T1(rA(ctx->opcode)); goto do_mask; } gen_op_load_gpr_T0(rS(ctx->opcode)); gen_op_load_gpr_T1(rA(ctx->opcode)); gen_op_rotli64_T0(sh); do_mask: mask = MASK(mb, 63 - sh); gen_andi_T0_64(ctx, mask); gen_andi_T1_64(ctx, ~mask); gen_op_or(); do_store: gen_op_store_T0_gpr(rA(ctx->opcode)); if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx); }"} {"target": 1, "idx": 1042, "func": "static int dx2_decode_slice_410(GetBitContext *gb, AVFrame *frame, int line, int left, uint8_t lru[3][8]) { int x, y, i, j; int width = frame->width; int ystride = frame->linesize[0]; int ustride = frame->linesize[1]; int vstride = frame->linesize[2]; uint8_t *Y = frame->data[0] + ystride * line; uint8_t *U = frame->data[1] + (ustride >> 2) * line; uint8_t *V = frame->data[2] + (vstride >> 2) * line; for (y = 0; y < left - 3 && get_bits_left(gb) > 16; y += 4) { for (x = 0; x < width; x += 4) { for (j = 0; j < 4; j++) for (i = 0; i < 4; i++) Y[x + i + j * ystride] = decode_sym(gb, lru[0]); U[x >> 2] = decode_sym(gb, lru[1]) ^ 0x80; V[x >> 2] = decode_sym(gb, lru[2]) ^ 0x80; } Y += ystride << 2; U += ustride; V += vstride; } return y; }"} {"target": 0, "idx": 1064, "func": "void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr) { LOG_INT(\"kqemu_set_notdirty: addr=%08lx\\n\", (unsigned long)ram_addr); /* we only track transitions to dirty state */ if (phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] != 0xff) return; if (nb_ram_pages_to_update >= KQEMU_MAX_RAM_PAGES_TO_UPDATE) nb_ram_pages_to_update = KQEMU_RAM_PAGES_UPDATE_ALL; else ram_pages_to_update[nb_ram_pages_to_update++] = ram_addr; }"} {"target": 0, "idx": 1065, "func": "static void put_no_rnd_pixels_y2_mmx( UINT8 *block, const UINT8 *pixels, int line_size, int h) { UINT8 *p; const UINT8 *pix; p = block; pix = pixels; MOVQ_ZERO(mm7); JUMPALIGN(); do { __asm __volatile( \"movq %1, %%mm0\\n\\t\" \"movq %2, %%mm1\\n\\t\" \"movq %%mm0, %%mm2\\n\\t\" \"movq %%mm1, %%mm3\\n\\t\" \"punpcklbw %%mm7, %%mm0\\n\\t\" \"punpcklbw %%mm7, %%mm1\\n\\t\" \"punpckhbw %%mm7, %%mm2\\n\\t\" \"punpckhbw %%mm7, %%mm3\\n\\t\" \"paddusw %%mm1, %%mm0\\n\\t\" \"paddusw %%mm3, %%mm2\\n\\t\" \"psrlw $1, %%mm0\\n\\t\" \"psrlw $1, %%mm2\\n\\t\" \"packuswb %%mm2, %%mm0\\n\\t\" \"movq %%mm0, %0\\n\\t\" :\"=m\"(*p) :\"m\"(*pix), \"m\"(*(pix+line_size)) :\"memory\"); pix += line_size; p += line_size; } while(--h); }"} {"target": 0, "idx": 1075, "func": "static int iscsi_open(BlockDriverState *bs, const char *filename, int flags) { IscsiLun *iscsilun = bs->opaque; struct iscsi_context *iscsi = NULL; struct iscsi_url *iscsi_url = NULL; struct IscsiTask task; char *initiator_name = NULL; int ret; if ((BDRV_SECTOR_SIZE % 512) != 0) { error_report(\"iSCSI: Invalid BDRV_SECTOR_SIZE. \" \"BDRV_SECTOR_SIZE(%lld) is not a multiple \" \"of 512\", BDRV_SECTOR_SIZE); return -EINVAL; } iscsi_url = iscsi_parse_full_url(iscsi, filename); if (iscsi_url == NULL) { error_report(\"Failed to parse URL : %s %s\", filename, iscsi_get_error(iscsi)); ret = -EINVAL; goto out; } memset(iscsilun, 0, sizeof(IscsiLun)); initiator_name = parse_initiator_name(iscsi_url->target); iscsi = iscsi_create_context(initiator_name); if (iscsi == NULL) { error_report(\"iSCSI: Failed to create iSCSI context.\"); ret = -ENOMEM; goto out; } if (iscsi_set_targetname(iscsi, iscsi_url->target)) { error_report(\"iSCSI: Failed to set target name.\"); ret = -EINVAL; goto out; } if (iscsi_url->user != NULL) { ret = iscsi_set_initiator_username_pwd(iscsi, iscsi_url->user, iscsi_url->passwd); if (ret != 0) { error_report(\"Failed to set initiator username and password\"); ret = -EINVAL; goto out; } } /* check if we got CHAP username/password via the options */ if (parse_chap(iscsi, iscsi_url->target) != 0) { error_report(\"iSCSI: Failed to set CHAP user/password\"); ret = -EINVAL; goto out; } if (iscsi_set_session_type(iscsi, ISCSI_SESSION_NORMAL) != 0) { error_report(\"iSCSI: Failed to set session type to normal.\"); ret = -EINVAL; goto out; } iscsi_set_header_digest(iscsi, ISCSI_HEADER_DIGEST_NONE_CRC32C); /* check if we got HEADER_DIGEST via the options */ parse_header_digest(iscsi, iscsi_url->target); task.iscsilun = iscsilun; task.status = 0; task.complete = 0; task.bs = bs; iscsilun->iscsi = iscsi; iscsilun->lun = iscsi_url->lun; if (iscsi_full_connect_async(iscsi, iscsi_url->portal, iscsi_url->lun, iscsi_connect_cb, &task) != 0) { error_report(\"iSCSI: Failed to start async connect.\"); ret = -EINVAL; goto out; } while (!task.complete) { iscsi_set_events(iscsilun); qemu_aio_wait(); } if (task.status != 0) { error_report(\"iSCSI: Failed to connect to LUN : %s\", iscsi_get_error(iscsi)); ret = -EINVAL; goto out; } /* Medium changer or tape. We dont have any emulation for this so this must * be sg ioctl compatible. We force it to be sg, otherwise qemu will try * to read from the device to guess the image format. */ if (iscsilun->type == TYPE_MEDIUM_CHANGER || iscsilun->type == TYPE_TAPE) { bs->sg = 1; } ret = 0; out: if (initiator_name != NULL) { g_free(initiator_name); } if (iscsi_url != NULL) { iscsi_destroy_url(iscsi_url); } if (ret) { if (iscsi != NULL) { iscsi_destroy_context(iscsi); } memset(iscsilun, 0, sizeof(IscsiLun)); } return ret; }"} {"target": 0, "idx": 1076, "func": "static int encode_frame(AVCodecContext * avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet) { TiffEncoderContext *s = avctx->priv_data; AVFrame *const p = &s->picture; int i; uint8_t *ptr; uint8_t *offset; uint32_t strips; uint32_t *strip_sizes = NULL; uint32_t *strip_offsets = NULL; int bytes_per_row; uint32_t res[2] = { 72, 1 }; // image resolution (72/1) uint16_t bpp_tab[] = { 8, 8, 8, 8 }; int ret; int is_yuv = 0; uint8_t *yuv_line = NULL; int shift_h, shift_v; const AVPixFmtDescriptor* pfd; s->avctx = avctx; *p = *pict; p->pict_type = AV_PICTURE_TYPE_I; p->key_frame = 1; avctx->coded_frame= &s->picture; s->width = avctx->width; s->height = avctx->height; s->subsampling[0] = 1; s->subsampling[1] = 1; switch (avctx->pix_fmt) { case PIX_FMT_RGB48LE: case PIX_FMT_GRAY16LE: case PIX_FMT_RGB24: case PIX_FMT_GRAY8: case PIX_FMT_PAL8: pfd = &av_pix_fmt_descriptors[avctx->pix_fmt]; s->bpp = av_get_bits_per_pixel(pfd); if (pfd->flags & PIX_FMT_PAL) { s->photometric_interpretation = 3; } else if (pfd->flags & PIX_FMT_RGB) { s->photometric_interpretation = 2; } else { s->photometric_interpretation = 1; } s->bpp_tab_size = pfd->nb_components; for (i = 0; i < s->bpp_tab_size; i++) { bpp_tab[i] = s->bpp / s->bpp_tab_size; } break; case PIX_FMT_MONOBLACK: s->bpp = 1; s->photometric_interpretation = 1; s->bpp_tab_size = 0; break; case PIX_FMT_MONOWHITE: s->bpp = 1; s->photometric_interpretation = 0; s->bpp_tab_size = 0; break; case PIX_FMT_YUV420P: case PIX_FMT_YUV422P: case PIX_FMT_YUV444P: case PIX_FMT_YUV410P: case PIX_FMT_YUV411P: s->photometric_interpretation = 6; avcodec_get_chroma_sub_sample(avctx->pix_fmt, &shift_h, &shift_v); s->bpp = 8 + (16 >> (shift_h + shift_v)); s->subsampling[0] = 1 << shift_h; s->subsampling[1] = 1 << shift_v; s->bpp_tab_size = 3; is_yuv = 1; break; default: av_log(s->avctx, AV_LOG_ERROR, \"This colors format is not supported\\n\"); return -1; } if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE || s->compr == TIFF_LZW) //best choose for DEFLATE s->rps = s->height; else s->rps = FFMAX(8192 / (((s->width * s->bpp) >> 3) + 1), 1); // suggest size of strip s->rps = ((s->rps - 1) / s->subsampling[1] + 1) * s->subsampling[1]; // round rps up strips = (s->height - 1) / s->rps + 1; if (!pkt->data && (ret = av_new_packet(pkt, avctx->width * avctx->height * s->bpp * 2 + avctx->height * 4 + FF_MIN_BUFFER_SIZE)) < 0) { av_log(avctx, AV_LOG_ERROR, \"Error getting output packet.\\n\"); return ret; } ptr = pkt->data; s->buf_start = pkt->data; s->buf = &ptr; s->buf_size = pkt->size; if (check_size(s, 8)) goto fail; // write header bytestream_put_le16(&ptr, 0x4949); bytestream_put_le16(&ptr, 42); offset = ptr; bytestream_put_le32(&ptr, 0); strip_sizes = av_mallocz(sizeof(*strip_sizes) * strips); strip_offsets = av_mallocz(sizeof(*strip_offsets) * strips); bytes_per_row = (((s->width - 1)/s->subsampling[0] + 1) * s->bpp * s->subsampling[0] * s->subsampling[1] + 7) >> 3; if (is_yuv){ yuv_line = av_malloc(bytes_per_row); if (yuv_line == NULL){ av_log(s->avctx, AV_LOG_ERROR, \"Not enough memory\\n\"); goto fail; } } #if CONFIG_ZLIB if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE) { uint8_t *zbuf; int zlen, zn; int j; zlen = bytes_per_row * s->rps; zbuf = av_malloc(zlen); strip_offsets[0] = ptr - pkt->data; zn = 0; for (j = 0; j < s->rps; j++) { if (is_yuv){ pack_yuv(s, yuv_line, j); memcpy(zbuf + zn, yuv_line, bytes_per_row); j += s->subsampling[1] - 1; } else memcpy(zbuf + j * bytes_per_row, p->data[0] + j * p->linesize[0], bytes_per_row); zn += bytes_per_row; } ret = encode_strip(s, zbuf, ptr, zn, s->compr); av_free(zbuf); if (ret < 0) { av_log(s->avctx, AV_LOG_ERROR, \"Encode strip failed\\n\"); goto fail; } ptr += ret; strip_sizes[0] = ptr - pkt->data - strip_offsets[0]; } else #endif { if(s->compr == TIFF_LZW) s->lzws = av_malloc(ff_lzw_encode_state_size); for (i = 0; i < s->height; i++) { if (strip_sizes[i / s->rps] == 0) { if(s->compr == TIFF_LZW){ ff_lzw_encode_init(s->lzws, ptr, s->buf_size - (*s->buf - s->buf_start), 12, FF_LZW_TIFF, put_bits); } strip_offsets[i / s->rps] = ptr - pkt->data; } if (is_yuv){ pack_yuv(s, yuv_line, i); ret = encode_strip(s, yuv_line, ptr, bytes_per_row, s->compr); i += s->subsampling[1] - 1; } else ret = encode_strip(s, p->data[0] + i * p->linesize[0], ptr, bytes_per_row, s->compr); if (ret < 0) { av_log(s->avctx, AV_LOG_ERROR, \"Encode strip failed\\n\"); goto fail; } strip_sizes[i / s->rps] += ret; ptr += ret; if(s->compr == TIFF_LZW && (i==s->height-1 || i%s->rps == s->rps-1)){ ret = ff_lzw_encode_flush(s->lzws, flush_put_bits); strip_sizes[(i / s->rps )] += ret ; ptr += ret; } } if(s->compr == TIFF_LZW) av_free(s->lzws); } s->num_entries = 0; add_entry1(s,TIFF_SUBFILE, TIFF_LONG, 0); add_entry1(s,TIFF_WIDTH, TIFF_LONG, s->width); add_entry1(s,TIFF_HEIGHT, TIFF_LONG, s->height); if (s->bpp_tab_size) add_entry(s, TIFF_BPP, TIFF_SHORT, s->bpp_tab_size, bpp_tab); add_entry1(s,TIFF_COMPR, TIFF_SHORT, s->compr); add_entry1(s,TIFF_INVERT, TIFF_SHORT, s->photometric_interpretation); add_entry(s, TIFF_STRIP_OFFS, TIFF_LONG, strips, strip_offsets); if (s->bpp_tab_size) add_entry1(s,TIFF_SAMPLES_PER_PIXEL, TIFF_SHORT, s->bpp_tab_size); add_entry1(s,TIFF_ROWSPERSTRIP, TIFF_LONG, s->rps); add_entry(s, TIFF_STRIP_SIZE, TIFF_LONG, strips, strip_sizes); add_entry(s, TIFF_XRES, TIFF_RATIONAL, 1, res); add_entry(s, TIFF_YRES, TIFF_RATIONAL, 1, res); add_entry1(s,TIFF_RES_UNIT, TIFF_SHORT, 2); if(!(avctx->flags & CODEC_FLAG_BITEXACT)) add_entry(s, TIFF_SOFTWARE_NAME, TIFF_STRING, strlen(LIBAVCODEC_IDENT) + 1, LIBAVCODEC_IDENT); if (avctx->pix_fmt == PIX_FMT_PAL8) { uint16_t pal[256 * 3]; for (i = 0; i < 256; i++) { uint32_t rgb = *(uint32_t *) (p->data[1] + i * 4); pal[i] = ((rgb >> 16) & 0xff) * 257; pal[i + 256] = ((rgb >> 8 ) & 0xff) * 257; pal[i + 512] = ( rgb & 0xff) * 257; } add_entry(s, TIFF_PAL, TIFF_SHORT, 256 * 3, pal); } if (is_yuv){ /** according to CCIR Recommendation 601.1 */ uint32_t refbw[12] = {15, 1, 235, 1, 128, 1, 240, 1, 128, 1, 240, 1}; add_entry(s, TIFF_YCBCR_SUBSAMPLING, TIFF_SHORT, 2, s->subsampling); add_entry(s, TIFF_REFERENCE_BW, TIFF_RATIONAL, 6, refbw); } bytestream_put_le32(&offset, ptr - pkt->data); // write offset to dir if (check_size(s, 6 + s->num_entries * 12)) { ret = AVERROR(EINVAL); goto fail; } bytestream_put_le16(&ptr, s->num_entries); // write tag count bytestream_put_buffer(&ptr, s->entries, s->num_entries * 12); bytestream_put_le32(&ptr, 0); pkt->size = ptr - pkt->data; pkt->flags |= AV_PKT_FLAG_KEY; *got_packet = 1; fail: av_free(strip_sizes); av_free(strip_offsets); av_free(yuv_line); return ret; }"} {"target": 1, "idx": 1088, "func": "static int mxf_parse_structural_metadata(MXFContext *mxf) { MXFPackage *material_package = NULL; MXFPackage *temp_package = NULL; int i, j, k; av_dlog(mxf->fc, \"metadata sets count %d\\n\", mxf->metadata_sets_count); /* TODO: handle multiple material packages (OP3x) */ for (i = 0; i < mxf->packages_count; i++) { material_package = mxf_resolve_strong_ref(mxf, &mxf->packages_refs[i], MaterialPackage); if (material_package) break; } if (!material_package) { av_log(mxf->fc, AV_LOG_ERROR, \"no material package found\\n\"); return AVERROR_INVALIDDATA; } for (i = 0; i < material_package->tracks_count; i++) { MXFPackage *source_package = NULL; MXFTrack *material_track = NULL; MXFTrack *source_track = NULL; MXFTrack *temp_track = NULL; MXFDescriptor *descriptor = NULL; MXFStructuralComponent *component = NULL; UID *essence_container_ul = NULL; const MXFCodecUL *codec_ul = NULL; const MXFCodecUL *container_ul = NULL; AVStream *st; if (!(material_track = mxf_resolve_strong_ref(mxf, &material_package->tracks_refs[i], Track))) { av_log(mxf->fc, AV_LOG_ERROR, \"could not resolve material track strong ref\\n\"); continue; } if (!(material_track->sequence = mxf_resolve_strong_ref(mxf, &material_track->sequence_ref, Sequence))) { av_log(mxf->fc, AV_LOG_ERROR, \"could not resolve material track sequence strong ref\\n\"); continue; } /* TODO: handle multiple source clips */ for (j = 0; j < material_track->sequence->structural_components_count; j++) { /* TODO: handle timecode component */ component = mxf_resolve_strong_ref(mxf, &material_track->sequence->structural_components_refs[j], SourceClip); if (!component) continue; for (k = 0; k < mxf->packages_count; k++) { temp_package = mxf_resolve_strong_ref(mxf, &mxf->packages_refs[k], SourcePackage); if (!temp_package) continue; if (!memcmp(temp_package->package_uid, component->source_package_uid, 16)) { source_package = temp_package; break; } } if (!source_package) { av_log(mxf->fc, AV_LOG_ERROR, \"material track %d: no corresponding source package found\\n\", material_track->track_id); break; } for (k = 0; k < source_package->tracks_count; k++) { if (!(temp_track = mxf_resolve_strong_ref(mxf, &source_package->tracks_refs[k], Track))) { av_log(mxf->fc, AV_LOG_ERROR, \"could not resolve source track strong ref\\n\"); return AVERROR_INVALIDDATA; } if (temp_track->track_id == component->source_track_id) { source_track = temp_track; break; } } if (!source_track) { av_log(mxf->fc, AV_LOG_ERROR, \"material track %d: no corresponding source track found\\n\", material_track->track_id); break; } } if (!source_track) continue; st = avformat_new_stream(mxf->fc, NULL); if (!st) { av_log(mxf->fc, AV_LOG_ERROR, \"could not allocate stream\\n\"); return AVERROR(ENOMEM); } st->id = source_track->track_id; st->priv_data = source_track; st->duration = component->duration; if (st->duration == -1) st->duration = AV_NOPTS_VALUE; st->start_time = component->start_position; avpriv_set_pts_info(st, 64, material_track->edit_rate.num, material_track->edit_rate.den); if (!(source_track->sequence = mxf_resolve_strong_ref(mxf, &source_track->sequence_ref, Sequence))) { av_log(mxf->fc, AV_LOG_ERROR, \"could not resolve source track sequence strong ref\\n\"); return AVERROR_INVALIDDATA; } PRINT_KEY(mxf->fc, \"data definition ul\", source_track->sequence->data_definition_ul); codec_ul = mxf_get_codec_ul(ff_mxf_data_definition_uls, &source_track->sequence->data_definition_ul); st->codec->codec_type = codec_ul->id; source_package->descriptor = mxf_resolve_strong_ref(mxf, &source_package->descriptor_ref, AnyType); if (source_package->descriptor) { if (source_package->descriptor->type == MultipleDescriptor) { for (j = 0; j < source_package->descriptor->sub_descriptors_count; j++) { MXFDescriptor *sub_descriptor = mxf_resolve_strong_ref(mxf, &source_package->descriptor->sub_descriptors_refs[j], Descriptor); if (!sub_descriptor) { av_log(mxf->fc, AV_LOG_ERROR, \"could not resolve sub descriptor strong ref\\n\"); continue; } if (sub_descriptor->linked_track_id == source_track->track_id) { descriptor = sub_descriptor; break; } } } else if (source_package->descriptor->type == Descriptor) descriptor = source_package->descriptor; } if (!descriptor) { av_log(mxf->fc, AV_LOG_INFO, \"source track %d: stream %d, no descriptor found\\n\", source_track->track_id, st->index); continue; } PRINT_KEY(mxf->fc, \"essence codec ul\", descriptor->essence_codec_ul); PRINT_KEY(mxf->fc, \"essence container ul\", descriptor->essence_container_ul); essence_container_ul = &descriptor->essence_container_ul; /* HACK: replacing the original key with mxf_encrypted_essence_container * is not allowed according to s429-6, try to find correct information anyway */ if (IS_KLV_KEY(essence_container_ul, mxf_encrypted_essence_container)) { av_log(mxf->fc, AV_LOG_INFO, \"broken encrypted mxf file\\n\"); for (k = 0; k < mxf->metadata_sets_count; k++) { MXFMetadataSet *metadata = mxf->metadata_sets[k]; if (metadata->type == CryptoContext) { essence_container_ul = &((MXFCryptoContext *)metadata)->source_container_ul; break; } } } /* TODO: drop PictureEssenceCoding and SoundEssenceCompression, only check EssenceContainer */ codec_ul = mxf_get_codec_ul(ff_mxf_codec_uls, &descriptor->essence_codec_ul); st->codec->codec_id = codec_ul->id; if (descriptor->extradata) { st->codec->extradata = descriptor->extradata; st->codec->extradata_size = descriptor->extradata_size; } if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { container_ul = mxf_get_codec_ul(mxf_essence_container_uls, essence_container_ul); if (st->codec->codec_id == CODEC_ID_NONE) st->codec->codec_id = container_ul->id; st->codec->width = descriptor->width; st->codec->height = descriptor->height; if (st->codec->codec_id == CODEC_ID_RAWVIDEO) st->codec->pix_fmt = descriptor->pix_fmt; st->need_parsing = AVSTREAM_PARSE_HEADERS; } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { container_ul = mxf_get_codec_ul(mxf_essence_container_uls, essence_container_ul); if (st->codec->codec_id == CODEC_ID_NONE) st->codec->codec_id = container_ul->id; st->codec->channels = descriptor->channels; st->codec->bits_per_coded_sample = descriptor->bits_per_sample; st->codec->sample_rate = descriptor->sample_rate.num / descriptor->sample_rate.den; /* TODO: implement CODEC_ID_RAWAUDIO */ if (st->codec->codec_id == CODEC_ID_PCM_S16LE) { if (descriptor->bits_per_sample > 16 && descriptor->bits_per_sample <= 24) st->codec->codec_id = CODEC_ID_PCM_S24LE; else if (descriptor->bits_per_sample == 32) st->codec->codec_id = CODEC_ID_PCM_S32LE; } else if (st->codec->codec_id == CODEC_ID_PCM_S16BE) { if (descriptor->bits_per_sample > 16 && descriptor->bits_per_sample <= 24) st->codec->codec_id = CODEC_ID_PCM_S24BE; else if (descriptor->bits_per_sample == 32) st->codec->codec_id = CODEC_ID_PCM_S32BE; } else if (st->codec->codec_id == CODEC_ID_MP2) { st->need_parsing = AVSTREAM_PARSE_FULL; } } if (st->codec->codec_type != AVMEDIA_TYPE_DATA && (*essence_container_ul)[15] > 0x01) { av_log(mxf->fc, AV_LOG_WARNING, \"only frame wrapped mappings are correctly supported\\n\"); st->need_parsing = AVSTREAM_PARSE_FULL; } } return 0; }"} {"target": 1, "idx": 1090, "func": "static inline int sym_quant(int c, int e, int levels) { int v = ((((levels * c) >> (24 - e)) + 1) >> 1) + (levels >> 1); av_assert2(v >= 0 && v < levels); return v; }"} {"target": 1, "idx": 1091, "func": "static inline int seek_to_sector(BlockDriverState *bs, int64_t sector_num) { BDRVParallelsState *s = bs->opaque; uint32_t index, offset, position; index = sector_num / s->tracks; offset = sector_num % s->tracks; // not allocated if ((index > s->catalog_size) || (s->catalog_bitmap[index] == 0)) return -1; position = (s->catalog_bitmap[index] + offset) * 512; // fprintf(stderr, \"sector: %llx index=%x offset=%x pointer=%x position=%x\\n\", // sector_num, index, offset, s->catalog_bitmap[index], position); if (lseek(s->fd, position, SEEK_SET) != position) return -1; return 0; }"} {"target": 0, "idx": 1101, "func": "static void imx_epit_reload_compare_timer(IMXEPITState *s) { if ((s->cr & (CR_EN | CR_OCIEN)) == (CR_EN | CR_OCIEN)) { /* if the compare feature is on and timers are running */ uint32_t tmp = imx_epit_update_count(s); uint64_t next; if (tmp > s->cmp) { /* It'll fire in this round of the timer */ next = tmp - s->cmp; } else { /* catch it next time around */ next = tmp - s->cmp + ((s->cr & CR_RLD) ? TIMER_MAX : s->lr); } ptimer_set_count(s->timer_cmp, next); } }"} {"target": 1, "idx": 1131, "func": "static int handle_intercept(CPUS390XState *env) { struct kvm_run *run = env->kvm_run; int icpt_code = run->s390_sieic.icptcode; int r = 0; dprintf(\"intercept: 0x%x (at 0x%lx)\\n\", icpt_code, (long)env->kvm_run->psw_addr); switch (icpt_code) { case ICPT_INSTRUCTION: r = handle_instruction(env, run); case ICPT_WAITPSW: case ICPT_CPU_STOP: if (s390_del_running_cpu(env) == 0) { case ICPT_SOFT_INTERCEPT: fprintf(stderr, \"KVM unimplemented icpt SOFT\\n\"); exit(1); case ICPT_IO: fprintf(stderr, \"KVM unimplemented icpt IO\\n\"); exit(1); default: fprintf(stderr, \"Unknown intercept code: %d\\n\", icpt_code); exit(1); return r;"} {"target": 1, "idx": 1132, "func": "void av_cold ff_ivi_free_buffers(IVIPlaneDesc *planes) { int p, b, t; for (p = 0; p < 3; p++) { for (b = 0; b < planes[p].num_bands; b++) { av_freep(&planes[p].bands[b].bufs[0]); av_freep(&planes[p].bands[b].bufs[1]); av_freep(&planes[p].bands[b].bufs[2]); for (t = 0; t < planes[p].bands[b].num_tiles; t++) av_freep(&planes[p].bands[b].tiles[t].mbs); av_freep(&planes[p].bands[b].tiles); } av_freep(&planes[p].bands); } }"} {"target": 0, "idx": 1138, "func": "static void RENAME(extract_odd2avg)(const uint8_t *src0, const uint8_t *src1, uint8_t *dst0, uint8_t *dst1, x86_reg count) { dst0 += count; dst1 += count; src0 += 4*count; src1 += 4*count; count= - count; #ifdef PAVGB if(count <= -8) { count += 7; __asm__ volatile( \"pcmpeqw %%mm7, %%mm7 \\n\\t\" \"psrlw $8, %%mm7 \\n\\t\" \"1: \\n\\t\" \"movq -28(%1, %0, 4), %%mm0 \\n\\t\" \"movq -20(%1, %0, 4), %%mm1 \\n\\t\" \"movq -12(%1, %0, 4), %%mm2 \\n\\t\" \"movq -4(%1, %0, 4), %%mm3 \\n\\t\" PAVGB\" -28(%2, %0, 4), %%mm0 \\n\\t\" PAVGB\" -20(%2, %0, 4), %%mm1 \\n\\t\" PAVGB\" -12(%2, %0, 4), %%mm2 \\n\\t\" PAVGB\" - 4(%2, %0, 4), %%mm3 \\n\\t\" \"psrlw $8, %%mm0 \\n\\t\" \"psrlw $8, %%mm1 \\n\\t\" \"psrlw $8, %%mm2 \\n\\t\" \"psrlw $8, %%mm3 \\n\\t\" \"packuswb %%mm1, %%mm0 \\n\\t\" \"packuswb %%mm3, %%mm2 \\n\\t\" \"movq %%mm0, %%mm1 \\n\\t\" \"movq %%mm2, %%mm3 \\n\\t\" \"psrlw $8, %%mm0 \\n\\t\" \"psrlw $8, %%mm2 \\n\\t\" \"pand %%mm7, %%mm1 \\n\\t\" \"pand %%mm7, %%mm3 \\n\\t\" \"packuswb %%mm2, %%mm0 \\n\\t\" \"packuswb %%mm3, %%mm1 \\n\\t\" MOVNTQ\" %%mm0,- 7(%4, %0) \\n\\t\" MOVNTQ\" %%mm1,- 7(%3, %0) \\n\\t\" \"add $8, %0 \\n\\t\" \" js 1b \\n\\t\" : \"+r\"(count) : \"r\"(src0), \"r\"(src1), \"r\"(dst0), \"r\"(dst1) ); count -= 7; } #endif src0++; src1++; while(count<0) { dst0[count]= (src0[4*count+0]+src1[4*count+0])>>1; dst1[count]= (src0[4*count+2]+src1[4*count+2])>>1; count++; } }"} {"target": 1, "idx": 1150, "func": "static gboolean qio_channel_websock_handshake_send(QIOChannel *ioc, GIOCondition condition, gpointer user_data) { QIOTask *task = user_data; QIOChannelWebsock *wioc = QIO_CHANNEL_WEBSOCK( qio_task_get_source(task)); Error *err = NULL; ssize_t ret; ret = qio_channel_write(wioc->master, (char *)wioc->encoutput.buffer, wioc->encoutput.offset, &err); if (ret < 0) { trace_qio_channel_websock_handshake_fail(ioc); qio_task_set_error(task, err); qio_task_complete(task); return FALSE; } buffer_advance(&wioc->encoutput, ret); if (wioc->encoutput.offset == 0) { trace_qio_channel_websock_handshake_complete(ioc); qio_task_complete(task); return FALSE; } trace_qio_channel_websock_handshake_pending(ioc, G_IO_OUT); return TRUE; }"} {"target": 1, "idx": 1151, "func": "static int pix_norm1_altivec(uint8_t *pix, int line_size) { int i, s = 0; const vector unsigned int zero = (const vector unsigned int) vec_splat_u32(0); vector unsigned int sv = (vector unsigned int) vec_splat_u32(0); vector signed int sum; for (i = 0; i < 16; i++) { /* Read the potentially unaligned pixels. */ //vector unsigned char pixl = vec_ld(0, pix); //vector unsigned char pixr = vec_ld(15, pix); //vector unsigned char pixv = vec_perm(pixl, pixr, perm); vector unsigned char pixv = vec_vsx_ld(0, pix); /* Square the values, and add them to our sum. */ sv = vec_msum(pixv, pixv, sv); pix += line_size; } /* Sum up the four partial sums, and put the result into s. */ sum = vec_sums((vector signed int) sv, (vector signed int) zero); sum = vec_splat(sum, 3); vec_vsx_st(sum, 0, &s); return s; }"} {"target": 0, "idx": 1161, "func": "static void test_visitor_in_native_list_uint8(TestInputVisitorData *data, const void *unused) { test_native_list_integer_helper(data, unused, USER_DEF_NATIVE_LIST_UNION_KIND_U8); }"} {"target": 0, "idx": 1180, "func": "static void scsi_disk_class_initfn(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); sc->init = scsi_disk_initfn; sc->destroy = scsi_destroy; sc->alloc_req = scsi_new_request; sc->unit_attention_reported = scsi_disk_unit_attention_reported; dc->fw_name = \"disk\"; dc->desc = \"virtual SCSI disk or CD-ROM (legacy)\"; dc->reset = scsi_disk_reset; dc->props = scsi_disk_properties; dc->vmsd = &vmstate_scsi_disk_state; }"} {"target": 0, "idx": 1193, "func": "static void test_tco2_status_bits(void) { TestData d; uint16_t ticks = 8; uint16_t val; int ret; d.args = \"-watchdog-action none\"; test_init(&d); stop_tco(&d); clear_tco_status(&d); reset_on_second_timeout(true); set_tco_timeout(&d, ticks); load_tco(&d); start_tco(&d); clock_step(ticks * TCO_TICK_NSEC * 2); val = qpci_io_readw(d.dev, d.tco_io_base + TCO2_STS); ret = val & (TCO_SECOND_TO_STS | TCO_BOOT_STS) ? 1 : 0; g_assert(ret == 1); qpci_io_writew(d.dev, d.tco_io_base + TCO2_STS, val); g_assert_cmpint(qpci_io_readw(d.dev, d.tco_io_base + TCO2_STS), ==, 0); qtest_end(); }"} {"target": 0, "idx": 1199, "func": "sdhci_write(SDHCIState *s, unsigned int offset, uint32_t value, unsigned size) { unsigned shift = 8 * (offset & 0x3); uint32_t mask = ~(((1ULL << (size * 8)) - 1) << shift); value <<= shift; switch (offset & ~0x3) { case SDHC_SYSAD: s->sdmasysad = (s->sdmasysad & mask) | value; MASKED_WRITE(s->sdmasysad, mask, value); /* Writing to last byte of sdmasysad might trigger transfer */ if (!(mask & 0xFF000000) && TRANSFERRING_DATA(s->prnsts) && s->blkcnt && s->blksize && SDHC_DMA_TYPE(s->hostctl) == SDHC_CTRL_SDMA) { SDHCI_GET_CLASS(s)->do_sdma_multi(s); } break; case SDHC_BLKSIZE: if (!TRANSFERRING_DATA(s->prnsts)) { MASKED_WRITE(s->blksize, mask, value); MASKED_WRITE(s->blkcnt, mask >> 16, value >> 16); } break; case SDHC_ARGUMENT: MASKED_WRITE(s->argument, mask, value); break; case SDHC_TRNMOD: /* DMA can be enabled only if it is supported as indicated by * capabilities register */ if (!(s->capareg & SDHC_CAN_DO_DMA)) { value &= ~SDHC_TRNS_DMA; } MASKED_WRITE(s->trnmod, mask, value); MASKED_WRITE(s->cmdreg, mask >> 16, value >> 16); /* Writing to the upper byte of CMDREG triggers SD command generation */ if ((mask & 0xFF000000) || !SDHCI_GET_CLASS(s)->can_issue_command(s)) { break; } SDHCI_GET_CLASS(s)->send_command(s); break; case SDHC_BDATA: if (sdhci_buff_access_is_sequential(s, offset - SDHC_BDATA)) { SDHCI_GET_CLASS(s)->bdata_write(s, value >> shift, size); } break; case SDHC_HOSTCTL: if (!(mask & 0xFF0000)) { sdhci_blkgap_write(s, value >> 16); } MASKED_WRITE(s->hostctl, mask, value); MASKED_WRITE(s->pwrcon, mask >> 8, value >> 8); MASKED_WRITE(s->wakcon, mask >> 24, value >> 24); if (!(s->prnsts & SDHC_CARD_PRESENT) || ((s->pwrcon >> 1) & 0x7) < 5 || !(s->capareg & (1 << (31 - ((s->pwrcon >> 1) & 0x7))))) { s->pwrcon &= ~SDHC_POWER_ON; } break; case SDHC_CLKCON: if (!(mask & 0xFF000000)) { sdhci_reset_write(s, value >> 24); } MASKED_WRITE(s->clkcon, mask, value); MASKED_WRITE(s->timeoutcon, mask >> 16, value >> 16); if (s->clkcon & SDHC_CLOCK_INT_EN) { s->clkcon |= SDHC_CLOCK_INT_STABLE; } else { s->clkcon &= ~SDHC_CLOCK_INT_STABLE; } break; case SDHC_NORINTSTS: if (s->norintstsen & SDHC_NISEN_CARDINT) { value &= ~SDHC_NIS_CARDINT; } s->norintsts &= mask | ~value; s->errintsts &= (mask >> 16) | ~(value >> 16); if (s->errintsts) { s->norintsts |= SDHC_NIS_ERR; } else { s->norintsts &= ~SDHC_NIS_ERR; } sdhci_update_irq(s); break; case SDHC_NORINTSTSEN: MASKED_WRITE(s->norintstsen, mask, value); MASKED_WRITE(s->errintstsen, mask >> 16, value >> 16); s->norintsts &= s->norintstsen; s->errintsts &= s->errintstsen; if (s->errintsts) { s->norintsts |= SDHC_NIS_ERR; } else { s->norintsts &= ~SDHC_NIS_ERR; } sdhci_update_irq(s); break; case SDHC_NORINTSIGEN: MASKED_WRITE(s->norintsigen, mask, value); MASKED_WRITE(s->errintsigen, mask >> 16, value >> 16); sdhci_update_irq(s); break; case SDHC_ADMAERR: MASKED_WRITE(s->admaerr, mask, value); break; case SDHC_ADMASYSADDR: s->admasysaddr = (s->admasysaddr & (0xFFFFFFFF00000000ULL | (uint64_t)mask)) | (uint64_t)value; break; case SDHC_ADMASYSADDR + 4: s->admasysaddr = (s->admasysaddr & (0x00000000FFFFFFFFULL | ((uint64_t)mask << 32))) | ((uint64_t)value << 32); break; case SDHC_FEAER: s->acmd12errsts |= value; s->errintsts |= (value >> 16) & s->errintstsen; if (s->acmd12errsts) { s->errintsts |= SDHC_EIS_CMD12ERR; } if (s->errintsts) { s->norintsts |= SDHC_NIS_ERR; } sdhci_update_irq(s); break; default: ERRPRINT(\"bad %ub write offset: addr[0x%04x] <- %u(0x%x)\\n\", size, offset, value >> shift, value >> shift); break; } DPRINT_L2(\"write %ub: addr[0x%04x] <- %u(0x%x)\\n\", size, offset, value >> shift, value >> shift); }"} {"target": 0, "idx": 1211, "func": "build_ssdt(GArray *table_data, GArray *linker, AcpiCpuInfo *cpu, AcpiPmInfo *pm, AcpiMiscInfo *misc, PcPciInfo *pci, PcGuestInfo *guest_info) { int acpi_cpus = MIN(0xff, guest_info->apic_id_limit); int ssdt_start = table_data->len; uint8_t *ssdt_ptr; int i; /* Copy header and patch values in the S3_ / S4_ / S5_ packages */ ssdt_ptr = acpi_data_push(table_data, sizeof(ssdp_misc_aml)); memcpy(ssdt_ptr, ssdp_misc_aml, sizeof(ssdp_misc_aml)); if (pm->s3_disabled) { ssdt_ptr[acpi_s3_name[0]] = 'X'; } if (pm->s4_disabled) { ssdt_ptr[acpi_s4_name[0]] = 'X'; } else { ssdt_ptr[acpi_s4_pkg[0] + 1] = ssdt_ptr[acpi_s4_pkg[0] + 3] = pm->s4_val; } patch_pci_windows(pci, ssdt_ptr, sizeof(ssdp_misc_aml)); ACPI_BUILD_SET_LE(ssdt_ptr, sizeof(ssdp_misc_aml), ssdt_isa_pest[0], 16, misc->pvpanic_port); { GArray *sb_scope = build_alloc_array(); uint8_t op = 0x10; /* ScopeOp */ build_append_nameseg(sb_scope, \"_SB_\"); /* build Processor object for each processor */ for (i = 0; i < acpi_cpus; i++) { uint8_t *proc = acpi_data_push(sb_scope, ACPI_PROC_SIZEOF); memcpy(proc, ACPI_PROC_AML, ACPI_PROC_SIZEOF); proc[ACPI_PROC_OFFSET_CPUHEX] = acpi_get_hex(i >> 4); proc[ACPI_PROC_OFFSET_CPUHEX+1] = acpi_get_hex(i); proc[ACPI_PROC_OFFSET_CPUID1] = i; proc[ACPI_PROC_OFFSET_CPUID2] = i; } /* build this code: * Method(NTFY, 2) {If (LEqual(Arg0, 0x00)) {Notify(CP00, Arg1)} ...} */ /* Arg0 = Processor ID = APIC ID */ build_append_notify_method(sb_scope, \"NTFY\", \"CP%0.02X\", acpi_cpus); /* build \"Name(CPON, Package() { One, One, ..., Zero, Zero, ... })\" */ build_append_byte(sb_scope, 0x08); /* NameOp */ build_append_nameseg(sb_scope, \"CPON\"); { GArray *package = build_alloc_array(); uint8_t op = 0x12; /* PackageOp */ build_append_byte(package, acpi_cpus); /* NumElements */ for (i = 0; i < acpi_cpus; i++) { uint8_t b = test_bit(i, cpu->found_cpus) ? 0x01 : 0x00; build_append_byte(package, b); } build_package(package, op, 2); build_append_array(sb_scope, package); build_free_array(package); } { AcpiBuildPciBusHotplugState hotplug_state; Object *pci_host; PCIBus *bus = NULL; bool ambiguous; pci_host = object_resolve_path_type(\"\", TYPE_PCI_HOST_BRIDGE, &ambiguous); if (!ambiguous && pci_host) { bus = PCI_HOST_BRIDGE(pci_host)->bus; } build_pci_bus_state_init(&hotplug_state, NULL); if (bus) { /* Scan all PCI buses. Generate tables to support hotplug. */ pci_for_each_bus_depth_first(bus, build_pci_bus_begin, build_pci_bus_end, &hotplug_state); } build_append_array(sb_scope, hotplug_state.device_table); build_pci_bus_state_cleanup(&hotplug_state); } build_package(sb_scope, op, 3); build_append_array(table_data, sb_scope); build_free_array(sb_scope); } build_header(linker, table_data, (void *)(table_data->data + ssdt_start), ACPI_SSDT_SIGNATURE, table_data->len - ssdt_start, 1); }"} {"target": 0, "idx": 1219, "func": "int bdrv_child_check_perm(BdrvChild *c, uint64_t perm, uint64_t shared, Error **errp) { return bdrv_check_update_perm(c->bs, perm, shared, c, errp); }"} {"target": 0, "idx": 1226, "func": "static int mxf_write_header(AVFormatContext *s) { MXFContext *mxf = s->priv_data; int i, ret; uint8_t present[FF_ARRAY_ELEMS(mxf_essence_container_uls)] = {0}; const MXFSamplesPerFrame *spf = NULL; AVDictionaryEntry *t; int64_t timestamp = 0; if (!s->nb_streams) return -1; if (s->oformat == &ff_mxf_opatom_muxer && s->nb_streams !=1){ av_log(s, AV_LOG_ERROR, \"there must be exactly one stream for mxf opatom\\n\"); return -1; } for (i = 0; i < s->nb_streams; i++) { AVStream *st = s->streams[i]; MXFStreamContext *sc = av_mallocz(sizeof(*sc)); if (!sc) return AVERROR(ENOMEM); st->priv_data = sc; if (((i == 0) ^ (st->codec->codec_type == AVMEDIA_TYPE_VIDEO)) && s->oformat != &ff_mxf_opatom_muxer) { av_log(s, AV_LOG_ERROR, \"there must be exactly one video stream and it must be the first one\\n\"); return -1; } if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(st->codec->pix_fmt); // TODO: should be avg_frame_rate AVRational rate, tbc = st->time_base; // Default component depth to 8 sc->component_depth = 8; sc->h_chroma_sub_sample = 2; sc->color_siting = 0xFF; if (pix_desc) { sc->component_depth = pix_desc->comp[0].depth_minus1 + 1; sc->h_chroma_sub_sample = 1 << pix_desc->log2_chroma_w; } switch (ff_choose_chroma_location(s, st)) { case AVCHROMA_LOC_TOPLEFT: sc->color_siting = 0; break; case AVCHROMA_LOC_LEFT: sc->color_siting = 6; break; case AVCHROMA_LOC_TOP: sc->color_siting = 1; break; case AVCHROMA_LOC_CENTER: sc->color_siting = 3; break; } mxf->timecode_base = (tbc.den + tbc.num/2) / tbc.num; spf = ff_mxf_get_samples_per_frame(s, tbc); if (!spf) { av_log(s, AV_LOG_ERROR, \"Unsupported video frame rate %d/%d\\n\", tbc.den, tbc.num); return AVERROR(EINVAL); } mxf->time_base = spf->time_base; rate = av_inv_q(mxf->time_base); avpriv_set_pts_info(st, 64, mxf->time_base.num, mxf->time_base.den); if((ret = mxf_init_timecode(s, st, rate)) < 0) return ret; sc->video_bit_rate = st->codec->bit_rate ? st->codec->bit_rate : st->codec->rc_max_rate; if (s->oformat == &ff_mxf_d10_muxer) { if (sc->video_bit_rate == 50000000) { if (mxf->time_base.den == 25) sc->index = 3; else sc->index = 5; } else if (sc->video_bit_rate == 40000000) { if (mxf->time_base.den == 25) sc->index = 7; else sc->index = 9; } else if (sc->video_bit_rate == 30000000) { if (mxf->time_base.den == 25) sc->index = 11; else sc->index = 13; } else { av_log(s, AV_LOG_ERROR, \"error MXF D-10 only support 30/40/50 mbit/s\\n\"); return -1; } mxf->edit_unit_byte_count = KAG_SIZE; // system element mxf->edit_unit_byte_count += 16 + 4 + (uint64_t)sc->video_bit_rate * mxf->time_base.num / (8*mxf->time_base.den); mxf->edit_unit_byte_count += klv_fill_size(mxf->edit_unit_byte_count); mxf->edit_unit_byte_count += 16 + 4 + 4 + spf->samples_per_frame[0]*8*4; mxf->edit_unit_byte_count += klv_fill_size(mxf->edit_unit_byte_count); sc->signal_standard = 1; } } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { if (st->codec->sample_rate != 48000) { av_log(s, AV_LOG_ERROR, \"only 48khz is implemented\\n\"); return -1; } avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate); if (s->oformat == &ff_mxf_d10_muxer) { if (st->index != 1) { av_log(s, AV_LOG_ERROR, \"MXF D-10 only support one audio track\\n\"); return -1; } if (st->codec->codec_id != AV_CODEC_ID_PCM_S16LE && st->codec->codec_id != AV_CODEC_ID_PCM_S24LE) { av_log(s, AV_LOG_ERROR, \"MXF D-10 only support 16 or 24 bits le audio\\n\"); } sc->index = ((MXFStreamContext*)s->streams[0]->priv_data)->index + 1; } else if (s->oformat == &ff_mxf_opatom_muxer) { AVRational tbc = av_inv_q(mxf->audio_edit_rate); if (st->codec->codec_id != AV_CODEC_ID_PCM_S16LE && st->codec->codec_id != AV_CODEC_ID_PCM_S24LE) { av_log(s, AV_LOG_ERROR, \"Only pcm_s16le and pcm_s24le audio codecs are implemented\\n\"); return AVERROR_PATCHWELCOME; } if (st->codec->channels != 1) { av_log(s, AV_LOG_ERROR, \"MXF OPAtom only supports single channel audio\\n\"); return AVERROR(EINVAL); } spf = ff_mxf_get_samples_per_frame(s, tbc); if (!spf){ av_log(s, AV_LOG_ERROR, \"Unsupported timecode frame rate %d/%d\\n\", tbc.den, tbc.num); return AVERROR(EINVAL); } mxf->time_base = st->time_base; if((ret = mxf_init_timecode(s, st, av_inv_q(spf->time_base))) < 0) return ret; mxf->timecode_base = (tbc.den + tbc.num/2) / tbc.num; mxf->edit_unit_byte_count = (av_get_bits_per_sample(st->codec->codec_id) * st->codec->channels) >> 3; sc->index = 2; } else { mxf->slice_count = 1; } } if (!sc->index) { sc->index = mxf_get_essence_container_ul_index(st->codec->codec_id); if (sc->index == -1) { av_log(s, AV_LOG_ERROR, \"track %d: could not find essence container ul, \" \"codec not currently supported in container\\n\", i); return -1; } } sc->codec_ul = &mxf_essence_container_uls[sc->index].codec_ul; memcpy(sc->track_essence_element_key, mxf_essence_container_uls[sc->index].element_ul, 15); sc->track_essence_element_key[15] = present[sc->index]; PRINT_KEY(s, \"track essence element key\", sc->track_essence_element_key); if (!present[sc->index]) mxf->essence_container_count++; present[sc->index]++; } if (s->oformat == &ff_mxf_d10_muxer || s->oformat == &ff_mxf_opatom_muxer) { mxf->essence_container_count = 1; } if (!(s->flags & AVFMT_FLAG_BITEXACT)) mxf_gen_umid(s); for (i = 0; i < s->nb_streams; i++) { MXFStreamContext *sc = s->streams[i]->priv_data; // update element count sc->track_essence_element_key[13] = present[sc->index]; if (!memcmp(sc->track_essence_element_key, mxf_essence_container_uls[15].element_ul, 13)) // DV sc->order = (0x15 << 24) | AV_RB32(sc->track_essence_element_key+13); else sc->order = AV_RB32(sc->track_essence_element_key+12); } if (t = av_dict_get(s->metadata, \"creation_time\", NULL, 0)) timestamp = ff_iso8601_to_unix_time(t->value); if (timestamp) mxf->timestamp = mxf_parse_timestamp(timestamp); mxf->duration = -1; mxf->timecode_track = av_mallocz(sizeof(*mxf->timecode_track)); if (!mxf->timecode_track) return AVERROR(ENOMEM); mxf->timecode_track->priv_data = av_mallocz(sizeof(MXFStreamContext)); if (!mxf->timecode_track->priv_data) return AVERROR(ENOMEM); mxf->timecode_track->index = -1; if (!spf) spf = ff_mxf_get_samples_per_frame(s, (AVRational){ 1, 25 }); if (ff_audio_interleave_init(s, spf->samples_per_frame, mxf->time_base) < 0) return -1; return 0; }"} {"target": 0, "idx": 1249, "func": "static void ide_flush_cb(void *opaque, int ret) { IDEState *s = opaque; s->pio_aiocb = NULL; if (ret == -ECANCELED) { return; } if (ret < 0) { /* XXX: What sector number to set here? */ if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) { return; } } if (s->bs) { block_acct_done(bdrv_get_stats(s->bs), &s->acct); } s->status = READY_STAT | SEEK_STAT; ide_cmd_done(s); ide_set_irq(s->bus); }"} {"target": 0, "idx": 1265, "func": "static int ehci_init_transfer(EHCIPacket *p) { uint32_t cpage, offset, bytes, plen; dma_addr_t page; USBBus *bus = &p->queue->ehci->bus; BusState *qbus = BUS(bus); cpage = get_field(p->qtd.token, QTD_TOKEN_CPAGE); bytes = get_field(p->qtd.token, QTD_TOKEN_TBYTES); offset = p->qtd.bufptr[0] & ~QTD_BUFPTR_MASK; qemu_sglist_init(&p->sgl, qbus->parent, 5, p->queue->ehci->as); while (bytes > 0) { if (cpage > 4) { fprintf(stderr, \"cpage out of range (%d)\\n\", cpage); return -1; } page = p->qtd.bufptr[cpage] & QTD_BUFPTR_MASK; page += offset; plen = bytes; if (plen > 4096 - offset) { plen = 4096 - offset; offset = 0; cpage++; } qemu_sglist_add(&p->sgl, page, plen); bytes -= plen; } return 0; }"} {"target": 0, "idx": 1274, "func": "int av_opencl_register_kernel_code(const char *kernel_code) { int i, ret = 0; LOCK_OPENCL; if (gpu_env.kernel_code_count >= MAX_KERNEL_CODE_NUM) { av_log(&openclutils, AV_LOG_ERROR, \"Could not register kernel code, maximum number of registered kernel code %d already reached\\n\", MAX_KERNEL_CODE_NUM); ret = AVERROR(EINVAL); goto end; } for (i = 0; i < gpu_env.kernel_code_count; i++) { if (gpu_env.kernel_code[i].kernel_string == kernel_code) { av_log(&openclutils, AV_LOG_WARNING, \"Same kernel code has been registered\\n\"); goto end; } } gpu_env.kernel_code[gpu_env.kernel_code_count].kernel_string = kernel_code; gpu_env.kernel_code[gpu_env.kernel_code_count].is_compiled = 0; gpu_env.kernel_code_count++; end: UNLOCK_OPENCL; return ret; }"} {"target": 1, "idx": 1275, "func": "static void compute_stats(HTTPContext *c) { HTTPContext *c1; FFStream *stream; char *p; time_t ti; int i, len; ByteIOContext pb1, *pb = &pb1; if (url_open_dyn_buf(pb) < 0) { /* XXX: return an error ? */ c->buffer_ptr = c->buffer; c->buffer_end = c->buffer; return; } url_fprintf(pb, \"HTTP/1.0 200 OK\\r\\n\"); url_fprintf(pb, \"Content-type: %s\\r\\n\", \"text/html\"); url_fprintf(pb, \"Pragma: no-cache\\r\\n\"); url_fprintf(pb, \"\\r\\n\"); url_fprintf(pb, \"FFServer Status\\n\"); if (c->stream->feed_filename) url_fprintf(pb, \"\\n\", c->stream->feed_filename); url_fprintf(pb, \"\\n\"); url_fprintf(pb, \"

FFServer Status

\\n\"); /* format status */ url_fprintf(pb, \"

Available Streams

\\n\"); url_fprintf(pb, \"\\n\"); url_fprintf(pb, \"
PathServed
Conns

bytes
FormatBit rate
kbits/s
Video
kbits/s

Codec
Audio
kbits/s

Codec
Feed\\n\"); stream = first_stream; while (stream != NULL) { char sfilename[1024]; char *eosf; if (stream->feed != stream) { av_strlcpy(sfilename, stream->filename, sizeof(sfilename) - 10); eosf = sfilename + strlen(sfilename); if (eosf - sfilename >= 4) { if (strcmp(eosf - 4, \".asf\") == 0) strcpy(eosf - 4, \".asx\"); else if (strcmp(eosf - 3, \".rm\") == 0) strcpy(eosf - 3, \".ram\"); else if (!strcmp(stream->fmt->name, \"rtp\")) { /* generate a sample RTSP director if unicast. Generate an SDP redirector if multicast */ eosf = strrchr(sfilename, '.'); if (!eosf) eosf = sfilename + strlen(sfilename); if (stream->is_multicast) strcpy(eosf, \".sdp\"); else strcpy(eosf, \".rtsp\"); } } url_fprintf(pb, \"
%s \", sfilename, stream->filename); url_fprintf(pb, \" %d \", stream->conns_served); fmt_bytecount(pb, stream->bytes_served); switch(stream->stream_type) { case STREAM_TYPE_LIVE: { int audio_bit_rate = 0; int video_bit_rate = 0; const char *audio_codec_name = \"\"; const char *video_codec_name = \"\"; const char *audio_codec_name_extra = \"\"; const char *video_codec_name_extra = \"\"; for(i=0;inb_streams;i++) { AVStream *st = stream->streams[i]; AVCodec *codec = avcodec_find_encoder(st->codec->codec_id); switch(st->codec->codec_type) { case CODEC_TYPE_AUDIO: audio_bit_rate += st->codec->bit_rate; if (codec) { if (*audio_codec_name) audio_codec_name_extra = \"...\"; audio_codec_name = codec->name; } break; case CODEC_TYPE_VIDEO: video_bit_rate += st->codec->bit_rate; if (codec) { if (*video_codec_name) video_codec_name_extra = \"...\"; video_codec_name = codec->name; } break; case CODEC_TYPE_DATA: video_bit_rate += st->codec->bit_rate; break; default: abort(); } } url_fprintf(pb, \" %s %d %d %s %s %d %s %s\", stream->fmt->name, stream->bandwidth, video_bit_rate / 1000, video_codec_name, video_codec_name_extra, audio_bit_rate / 1000, audio_codec_name, audio_codec_name_extra); if (stream->feed) url_fprintf(pb, \"%s\", stream->feed->filename); else url_fprintf(pb, \"%s\", stream->feed_filename); url_fprintf(pb, \"\\n\"); } break; default: url_fprintf(pb, \" - - - - \\n\"); break; } } stream = stream->next; } url_fprintf(pb, \"
\\n\"); stream = first_stream; while (stream != NULL) { if (stream->feed == stream) { url_fprintf(pb, \"

Feed %s

\", stream->filename); if (stream->pid) { url_fprintf(pb, \"Running as pid %d.\\n\", stream->pid); #if defined(linux) && !defined(CONFIG_NOCUTILS) { FILE *pid_stat; char ps_cmd[64]; /* This is somewhat linux specific I guess */ snprintf(ps_cmd, sizeof(ps_cmd), \"ps -o \\\"%%cpu,cputime\\\" --no-headers %d\", stream->pid); pid_stat = popen(ps_cmd, \"r\"); if (pid_stat) { char cpuperc[10]; char cpuused[64]; if (fscanf(pid_stat, \"%10s %64s\", cpuperc, cpuused) == 2) { url_fprintf(pb, \"Currently using %s%% of the cpu. Total time used %s.\\n\", cpuperc, cpuused); } fclose(pid_stat); } } #endif url_fprintf(pb, \"

\"); } url_fprintf(pb, \"
Streamtypekbits/scodecParameters\\n\"); for (i = 0; i < stream->nb_streams; i++) { AVStream *st = stream->streams[i]; AVCodec *codec = avcodec_find_encoder(st->codec->codec_id); const char *type = \"unknown\"; char parameters[64]; parameters[0] = 0; switch(st->codec->codec_type) { case CODEC_TYPE_AUDIO: type = \"audio\"; snprintf(parameters, sizeof(parameters), \"%d channel(s), %d Hz\", st->codec->channels, st->codec->sample_rate); break; case CODEC_TYPE_VIDEO: type = \"video\"; snprintf(parameters, sizeof(parameters), \"%dx%d, q=%d-%d, fps=%d\", st->codec->width, st->codec->height, st->codec->qmin, st->codec->qmax, st->codec->time_base.den / st->codec->time_base.num); break; default: abort(); } url_fprintf(pb, \"
%d%s%d%s%s\\n\", i, type, st->codec->bit_rate/1000, codec ? codec->name : \"\", parameters); } url_fprintf(pb, \"
\\n\"); } stream = stream->next; } #if 0 { float avg; AVCodecContext *enc; char buf[1024]; /* feed status */ stream = first_feed; while (stream != NULL) { url_fprintf(pb, \"

Feed '%s'

\\n\", stream->filename); url_fprintf(pb, \"\\n\"); url_fprintf(pb, \"
ParametersFrame countSizeAvg bitrate (kbits/s)\\n\"); for(i=0;inb_streams;i++) { AVStream *st = stream->streams[i]; FeedData *fdata = st->priv_data; enc = st->codec; avcodec_string(buf, sizeof(buf), enc); avg = fdata->avg_frame_size * (float)enc->rate * 8.0; if (enc->codec->type == CODEC_TYPE_AUDIO && enc->frame_size > 0) avg /= enc->frame_size; url_fprintf(pb, \"
%s %d %\"PRId64\" %0.1f\\n\", buf, enc->frame_number, fdata->data_count, avg / 1000.0); } url_fprintf(pb, \"
\\n\"); stream = stream->next_feed; } } #endif /* connection status */ url_fprintf(pb, \"

Connection Status

\\n\"); url_fprintf(pb, \"Number of connections: %d / %d
\\n\", nb_connections, nb_max_connections); url_fprintf(pb, \"Bandwidth in use: %dk / %dk
\\n\", current_bandwidth, max_bandwidth); url_fprintf(pb, \"\\n\"); url_fprintf(pb, \"
#FileIPProtoStateTarget bits/secActual bits/secBytes transferred\\n\"); c1 = first_http_ctx; i = 0; while (c1 != NULL) { int bitrate; int j; bitrate = 0; if (c1->stream) { for (j = 0; j < c1->stream->nb_streams; j++) { if (!c1->stream->feed) bitrate += c1->stream->streams[j]->codec->bit_rate; else if (c1->feed_streams[j] >= 0) bitrate += c1->stream->feed->streams[c1->feed_streams[j]]->codec->bit_rate; } } i++; p = inet_ntoa(c1->from_addr.sin_addr); url_fprintf(pb, \"
%d%s%s%s%s%s\", i, c1->stream ? c1->stream->filename : \"\", c1->state == HTTPSTATE_RECEIVE_DATA ? \"(input)\" : \"\", p, c1->protocol, http_state[c1->state]); fmt_bytecount(pb, bitrate); url_fprintf(pb, \"\"); fmt_bytecount(pb, compute_datarate(&c1->datarate, c1->data_count) * 8); url_fprintf(pb, \"\"); fmt_bytecount(pb, c1->data_count); url_fprintf(pb, \"\\n\"); c1 = c1->next; } url_fprintf(pb, \"
\\n\"); /* date */ ti = time(NULL); p = ctime(&ti); url_fprintf(pb, \"
Generated at %s\", p); url_fprintf(pb, \"\\n\\n\"); len = url_close_dyn_buf(pb, &c->pb_buffer); c->buffer_ptr = c->pb_buffer; c->buffer_end = c->pb_buffer + len; }"} {"target": 1, "idx": 1278, "func": "void gen_intermediate_code(CPUState *cs, TranslationBlock *tb) { CPUM68KState *env = cs->env_ptr; DisasContext dc1, *dc = &dc1; target_ulong pc_start; int pc_offset; int num_insns; int max_insns; /* generate intermediate code */ pc_start = tb->pc; dc->tb = tb; dc->env = env; dc->is_jmp = DISAS_NEXT; dc->pc = pc_start; dc->cc_op = CC_OP_DYNAMIC; dc->cc_op_synced = 1; dc->singlestep_enabled = cs->singlestep_enabled; dc->user = (env->sr & SR_S) == 0; dc->done_mac = 0; dc->writeback_mask = 0; num_insns = 0; max_insns = tb->cflags & CF_COUNT_MASK; if (max_insns == 0) { max_insns = CF_COUNT_MASK; } if (max_insns > TCG_MAX_INSNS) { max_insns = TCG_MAX_INSNS; } gen_tb_start(tb); do { pc_offset = dc->pc - pc_start; gen_throws_exception = NULL; tcg_gen_insn_start(dc->pc, dc->cc_op); num_insns++; if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) { gen_exception(dc, dc->pc, EXCP_DEBUG); dc->is_jmp = DISAS_JUMP; /* The address covered by the breakpoint must be included in [tb->pc, tb->pc + tb->size) in order to for it to be properly cleared -- thus we increment the PC here so that the logic setting tb->size below does the right thing. */ dc->pc += 2; break; } if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) { gen_io_start(); } dc->insn_pc = dc->pc; disas_m68k_insn(env, dc); } while (!dc->is_jmp && !tcg_op_buf_full() && !cs->singlestep_enabled && !singlestep && (pc_offset) < (TARGET_PAGE_SIZE - 32) && num_insns < max_insns); if (tb->cflags & CF_LAST_IO) gen_io_end(); if (unlikely(cs->singlestep_enabled)) { /* Make sure the pc is updated, and raise a debug exception. */ if (!dc->is_jmp) { update_cc_op(dc); tcg_gen_movi_i32(QREG_PC, dc->pc); } gen_helper_raise_exception(cpu_env, tcg_const_i32(EXCP_DEBUG)); } else { switch(dc->is_jmp) { case DISAS_NEXT: update_cc_op(dc); gen_jmp_tb(dc, 0, dc->pc); break; default: case DISAS_JUMP: case DISAS_UPDATE: update_cc_op(dc); /* indicate that the hash table must be used to find the next TB */ tcg_gen_exit_tb(0); break; case DISAS_TB_JUMP: /* nothing more to generate */ break; } } gen_tb_end(tb, num_insns); #ifdef DEBUG_DISAS if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) && qemu_log_in_addr_range(pc_start)) { qemu_log_lock(); qemu_log(\"----------------\\n\"); qemu_log(\"IN: %s\\n\", lookup_symbol(pc_start)); log_target_disas(cs, pc_start, dc->pc - pc_start, 0); qemu_log(\"\\n\"); qemu_log_unlock(); } #endif tb->size = dc->pc - pc_start; tb->icount = num_insns; }"} {"target": 0, "idx": 1292, "func": "BdrvChild *bdrv_open_child(const char *filename, QDict *options, const char *bdref_key, BlockDriverState* parent, const BdrvChildRole *child_role, bool allow_none, Error **errp) { BdrvChild *c = NULL; BlockDriverState *bs; QDict *image_options; int ret; char *bdref_key_dot; const char *reference; assert(child_role != NULL); bdref_key_dot = g_strdup_printf(\"%s.\", bdref_key); qdict_extract_subqdict(options, &image_options, bdref_key_dot); g_free(bdref_key_dot); reference = qdict_get_try_str(options, bdref_key); if (!filename && !reference && !qdict_size(image_options)) { if (!allow_none) { error_setg(errp, \"A block device must be specified for \\\"%s\\\"\", bdref_key); } QDECREF(image_options); goto done; } bs = NULL; ret = bdrv_open_inherit(&bs, filename, reference, image_options, 0, parent, child_role, errp); if (ret < 0) { goto done; } c = bdrv_attach_child(parent, bs, child_role); done: qdict_del(options, bdref_key); return c; }"} {"target": 0, "idx": 1324, "func": "av_cold void ff_videodsp_init_x86(VideoDSPContext *ctx, int bpc) { #if HAVE_YASM int cpu_flags = av_get_cpu_flags(); #if ARCH_X86_32 if (EXTERNAL_MMX(cpu_flags) && bpc <= 8) { ctx->emulated_edge_mc = emulated_edge_mc_mmx; } if (EXTERNAL_AMD3DNOW(cpu_flags)) { ctx->prefetch = ff_prefetch_3dnow; } #endif /* ARCH_X86_32 */ if (EXTERNAL_MMXEXT(cpu_flags)) { ctx->prefetch = ff_prefetch_mmxext; #if ARCH_X86_32 if (bpc <= 8) ctx->emulated_edge_mc = emulated_edge_mc_mmxext; #endif /* ARCH_X86_32 */ } #if ARCH_X86_32 if (EXTERNAL_SSE(cpu_flags) && bpc <= 8) { ctx->emulated_edge_mc = emulated_edge_mc_sse; } #endif /* ARCH_X86_32 */ if (EXTERNAL_SSE2(cpu_flags) && bpc <= 8) { ctx->emulated_edge_mc = emulated_edge_mc_sse2; } #endif /* HAVE_YASM */ }"} {"target": 1, "idx": 1337, "func": "static void vapic_write(void *opaque, hwaddr addr, uint64_t data, unsigned int size) { CPUState *cs = current_cpu; X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; hwaddr rom_paddr; VAPICROMState *s = opaque; cpu_synchronize_state(cs); /* * The VAPIC supports two PIO-based hypercalls, both via port 0x7E. * o 16-bit write access: * Reports the option ROM initialization to the hypervisor. Written * value is the offset of the state structure in the ROM. * o 8-bit write access: * Reactivates the VAPIC after a guest hibernation, i.e. after the * option ROM content has been re-initialized by a guest power cycle. * o 32-bit write access: * Poll for pending IRQs, considering the current VAPIC state. */ switch (size) { case 2: if (s->state == VAPIC_INACTIVE) { rom_paddr = (env->segs[R_CS].base + env->eip) & ROM_BLOCK_MASK; s->rom_state_paddr = rom_paddr + data; s->state = VAPIC_STANDBY; } if (vapic_prepare(s) < 0) { s->state = VAPIC_INACTIVE; s->rom_state_paddr = 0; break; } break; case 1: if (kvm_enabled()) { /* * Disable triggering instruction in ROM by writing a NOP. * * We cannot do this in TCG mode as the reported IP is not * accurate. */ pause_all_vcpus(); patch_byte(cpu, env->eip - 2, 0x66); patch_byte(cpu, env->eip - 1, 0x90); resume_all_vcpus(); } if (s->state == VAPIC_ACTIVE) { break; } if (update_rom_mapping(s, env, env->eip) < 0) { break; } if (find_real_tpr_addr(s, env) < 0) { break; } vapic_enable(s, cpu); break; default: case 4: if (!kvm_irqchip_in_kernel()) { apic_poll_irq(cpu->apic_state); } break; } }"} {"target": 0, "idx": 1347, "func": "static bool riccb_needed(void *opaque) { #ifdef CONFIG_KVM if (kvm_enabled()) { return kvm_s390_get_ri(); } #endif return 0; }"} {"target": 0, "idx": 1356, "func": "static av_always_inline void decode_cabac_luma_residual(const H264Context *h, H264SliceContext *sl, const uint8_t *scan, const uint8_t *scan8x8, int pixel_shift, int mb_type, int cbp, int p) { static const uint8_t ctx_cat[4][3] = {{0,6,10},{1,7,11},{2,8,12},{5,9,13}}; const uint32_t *qmul; int i8x8, i4x4; int qscale = p == 0 ? sl->qscale : sl->chroma_qp[p - 1]; if( IS_INTRA16x16( mb_type ) ) { AV_ZERO128(sl->mb_luma_dc[p]+0); AV_ZERO128(sl->mb_luma_dc[p]+8); AV_ZERO128(sl->mb_luma_dc[p]+16); AV_ZERO128(sl->mb_luma_dc[p]+24); decode_cabac_residual_dc(h, sl, sl->mb_luma_dc[p], ctx_cat[0][p], LUMA_DC_BLOCK_INDEX+p, scan, 16); if( cbp&15 ) { qmul = h->dequant4_coeff[p][qscale]; for( i4x4 = 0; i4x4 < 16; i4x4++ ) { const int index = 16*p + i4x4; decode_cabac_residual_nondc(h, sl, sl->mb + (16*index << pixel_shift), ctx_cat[1][p], index, scan + 1, qmul, 15); } } else { fill_rectangle(&sl->non_zero_count_cache[scan8[16*p]], 4, 4, 8, 0, 1); } } else { int cqm = (IS_INTRA( mb_type ) ? 0:3) + p; for( i8x8 = 0; i8x8 < 4; i8x8++ ) { if( cbp & (1<mb + (16*index << pixel_shift), ctx_cat[3][p], index, scan8x8, h->dequant8_coeff[cqm][qscale], 64); } else { qmul = h->dequant4_coeff[cqm][qscale]; for( i4x4 = 0; i4x4 < 4; i4x4++ ) { const int index = 16*p + 4*i8x8 + i4x4; //START_TIMER decode_cabac_residual_nondc(h, sl, sl->mb + (16*index << pixel_shift), ctx_cat[2][p], index, scan, qmul, 16); //STOP_TIMER(\"decode_residual\") } } } else { fill_rectangle(&sl->non_zero_count_cache[scan8[4*i8x8+16*p]], 2, 2, 8, 0, 1); } } } }"} {"target": 0, "idx": 1361, "func": "static int virtio_ccw_load_config(DeviceState *d, QEMUFile *f) { VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); CcwDevice *ccw_dev = CCW_DEVICE(d); CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev); SubchDev *s = ccw_dev->sch; VirtIODevice *vdev = virtio_ccw_get_vdev(s); int len; s->driver_data = dev; subch_device_load(s, f); /* Re-fill subch_id after loading the subchannel states.*/ if (ck->refill_ids) { ck->refill_ids(ccw_dev); } len = qemu_get_be32(f); if (len != 0) { dev->indicators = get_indicator(qemu_get_be64(f), len); } else { qemu_get_be64(f); dev->indicators = NULL; } len = qemu_get_be32(f); if (len != 0) { dev->indicators2 = get_indicator(qemu_get_be64(f), len); } else { qemu_get_be64(f); dev->indicators2 = NULL; } len = qemu_get_be32(f); if (len != 0) { dev->summary_indicator = get_indicator(qemu_get_be64(f), len); } else { qemu_get_be64(f); dev->summary_indicator = NULL; } qemu_get_be16s(f, &vdev->config_vector); dev->routes.adapter.ind_offset = qemu_get_be64(f); dev->thinint_isc = qemu_get_byte(f); dev->revision = qemu_get_be32(f); if (s->thinint_active) { return css_register_io_adapter(CSS_IO_ADAPTER_VIRTIO, dev->thinint_isc, true, false, &dev->routes.adapter.adapter_id); } return 0; }"} {"target": 0, "idx": 1366, "func": "PCIBus *pci_pmac_u3_init(qemu_irq *pic, MemoryRegion *address_space_mem, MemoryRegion *address_space_io) { DeviceState *dev; SysBusDevice *s; PCIHostState *h; UNINState *d; /* Uninorth AGP bus */ dev = qdev_create(NULL, TYPE_U3_AGP_HOST_BRIDGE); qdev_init_nofail(dev); s = SYS_BUS_DEVICE(dev); h = PCI_HOST_BRIDGE(dev); d = U3_AGP_HOST_BRIDGE(dev); memory_region_init(&d->pci_mmio, OBJECT(d), \"pci-mmio\", 0x100000000ULL); memory_region_init_alias(&d->pci_hole, OBJECT(d), \"pci-hole\", &d->pci_mmio, 0x80000000ULL, 0x70000000ULL); memory_region_add_subregion(address_space_mem, 0x80000000ULL, &d->pci_hole); h->bus = pci_register_bus(dev, \"pci\", pci_unin_set_irq, pci_unin_map_irq, pic, &d->pci_mmio, address_space_io, PCI_DEVFN(11, 0), 4, TYPE_PCI_BUS); sysbus_mmio_map(s, 0, 0xf0800000); sysbus_mmio_map(s, 1, 0xf0c00000); pci_create_simple(h->bus, 11 << 3, \"u3-agp\"); return h->bus; }"} {"target": 1, "idx": 1372, "func": "static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref) { AVFilterContext *ctx = inlink->dst; int i; for (i = 0; i < ctx->nb_outputs; i++) ff_filter_samples(inlink->dst->outputs[i], avfilter_ref_buffer(samplesref, ~AV_PERM_WRITE)); }"} {"target": 1, "idx": 1385, "func": "void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex) { Coroutine *self = qemu_coroutine_self(); trace_qemu_co_mutex_lock_entry(mutex, self); while (mutex->locked) { qemu_co_queue_wait(&mutex->queue); } mutex->locked = true; trace_qemu_co_mutex_lock_return(mutex, self); }"} {"target": 1, "idx": 1387, "func": "void hmp_info_block(Monitor *mon, const QDict *qdict) { BlockInfoList *block_list, *info; ImageInfo *image_info; const char *device = qdict_get_try_str(qdict, \"device\"); bool verbose = qdict_get_try_bool(qdict, \"verbose\", 0); block_list = qmp_query_block(NULL); for (info = block_list; info; info = info->next) { if (device && strcmp(device, info->value->device)) { continue; } if (info != block_list) { monitor_printf(mon, \"\\n\"); } monitor_printf(mon, \"%s\", info->value->device); if (info->value->has_inserted) { monitor_printf(mon, \": %s (%s%s%s)\\n\", info->value->inserted->file, info->value->inserted->drv, info->value->inserted->ro ? \", read-only\" : \"\", info->value->inserted->encrypted ? \", encrypted\" : \"\"); } else { monitor_printf(mon, \": [not inserted]\\n\"); } if (info->value->has_io_status && info->value->io_status != BLOCK_DEVICE_IO_STATUS_OK) { monitor_printf(mon, \" I/O status: %s\\n\", BlockDeviceIoStatus_lookup[info->value->io_status]); } if (info->value->removable) { monitor_printf(mon, \" Removable device: %slocked, tray %s\\n\", info->value->locked ? \"\" : \"not \", info->value->tray_open ? \"open\" : \"closed\"); } if (!info->value->has_inserted) { continue; } if (info->value->inserted->has_backing_file) { monitor_printf(mon, \" Backing file: %s \" \"(chain depth: %\" PRId64 \")\\n\", info->value->inserted->backing_file, info->value->inserted->backing_file_depth); } if (info->value->inserted->bps || info->value->inserted->bps_rd || info->value->inserted->bps_wr || info->value->inserted->iops || info->value->inserted->iops_rd || info->value->inserted->iops_wr) { monitor_printf(mon, \" I/O throttling: bps=%\" PRId64 \" bps_rd=%\" PRId64 \" bps_wr=%\" PRId64 \" iops=%\" PRId64 \" iops_rd=%\" PRId64 \" iops_wr=%\" PRId64 \"\\n\", info->value->inserted->bps, info->value->inserted->bps_rd, info->value->inserted->bps_wr, info->value->inserted->iops, info->value->inserted->iops_rd, info->value->inserted->iops_wr); } if (verbose) { monitor_printf(mon, \"\\nImages:\\n\"); image_info = info->value->inserted->image; while (1) { bdrv_image_info_dump((fprintf_function)monitor_printf, mon, image_info); if (image_info->has_backing_image) { image_info = image_info->backing_image; } else { break; } } } } qapi_free_BlockInfoList(block_list); }"} {"target": 1, "idx": 1388, "func": "static inline int decode_residual_inter(AVSContext *h) { int block; /* get coded block pattern */ int cbp= get_ue_golomb(&h->s.gb); if(cbp > 63){ av_log(h->s.avctx, AV_LOG_ERROR, \"illegal inter cbp\\n\"); return -1; } h->cbp = cbp_tab[cbp][1]; /* get quantizer */ if(h->cbp && !h->qp_fixed) h->qp = (h->qp + get_se_golomb(&h->s.gb)) & 63; for(block=0;block<4;block++) if(h->cbp & (1<s.gb,ff_cavs_inter_dec,0,h->qp, h->cy + h->luma_scan[block], h->l_stride); decode_residual_chroma(h); return 0; }"} {"target": 1, "idx": 1407, "func": "static void fw_cfg_write(FWCfgState *s, uint8_t value) { int arch = !!(s->cur_entry & FW_CFG_ARCH_LOCAL); FWCfgEntry *e = &s->entries[arch][s->cur_entry & FW_CFG_ENTRY_MASK]; FW_CFG_DPRINTF(\"write %d\\n\", value); if (s->cur_entry & FW_CFG_WRITE_CHANNEL && s->cur_offset < e->len) { e->data[s->cur_offset++] = value; if (s->cur_offset == e->len) { e->callback(e->callback_opaque, e->data); s->cur_offset = 0; } } }"} {"target": 1, "idx": 1409, "func": "int qemu_peek_buffer(QEMUFile *f, uint8_t *buf, int size, size_t offset) { int pending; int index; assert(!qemu_file_is_writable(f)); index = f->buf_index + offset; pending = f->buf_size - index; if (pending < size) { qemu_fill_buffer(f); index = f->buf_index + offset; pending = f->buf_size - index; } if (pending <= 0) { return 0; } if (size > pending) { size = pending; } memcpy(buf, f->buf + index, size); return size; }"} {"target": 1, "idx": 1410, "func": "static int ahci_dma_rw_buf(IDEDMA *dma, int is_write) { AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); IDEState *s = &ad->port.ifs[0]; uint8_t *p = s->io_buffer + s->io_buffer_index; int l = s->io_buffer_size - s->io_buffer_index; if (ahci_populate_sglist(ad, &s->sg)) { return 0; } if (is_write) { dma_buf_read(p, l, &s->sg); } else { dma_buf_write(p, l, &s->sg); } /* update number of transferred bytes */ ad->cur_cmd->status = cpu_to_le32(le32_to_cpu(ad->cur_cmd->status) + l); s->io_buffer_index += l; DPRINTF(ad->port_no, \"len=%#x\\n\", l); return 1; }"} {"target": 1, "idx": 1416, "func": "void cpu_save(QEMUFile *f, void *opaque) { CPUState *env = (CPUState *)opaque; unsigned int i, j; cpu_synchronize_state(env); for (i = 0; i < 32; i++) qemu_put_betls(f, &env->gpr[i]); #if !defined(TARGET_PPC64) for (i = 0; i < 32; i++) qemu_put_betls(f, &env->gprh[i]); #endif qemu_put_betls(f, &env->lr); qemu_put_betls(f, &env->ctr); for (i = 0; i < 8; i++) qemu_put_be32s(f, &env->crf[i]); qemu_put_betls(f, &env->xer); qemu_put_betls(f, &env->reserve_addr); qemu_put_betls(f, &env->msr); for (i = 0; i < 4; i++) qemu_put_betls(f, &env->tgpr[i]); for (i = 0; i < 32; i++) { union { float64 d; uint64_t l; } u; u.d = env->fpr[i]; qemu_put_be64(f, u.l); } qemu_put_be32s(f, &env->fpscr); qemu_put_sbe32s(f, &env->access_type); #if !defined(CONFIG_USER_ONLY) #if defined(TARGET_PPC64) qemu_put_betls(f, &env->asr); qemu_put_sbe32s(f, &env->slb_nr); #endif qemu_put_betls(f, &env->sdr1); for (i = 0; i < 32; i++) qemu_put_betls(f, &env->sr[i]); for (i = 0; i < 2; i++) for (j = 0; j < 8; j++) qemu_put_betls(f, &env->DBAT[i][j]); for (i = 0; i < 2; i++) for (j = 0; j < 8; j++) qemu_put_betls(f, &env->IBAT[i][j]); qemu_put_sbe32s(f, &env->nb_tlb); qemu_put_sbe32s(f, &env->tlb_per_way); qemu_put_sbe32s(f, &env->nb_ways); qemu_put_sbe32s(f, &env->last_way); qemu_put_sbe32s(f, &env->id_tlbs); qemu_put_sbe32s(f, &env->nb_pids); if (env->tlb) { // XXX assumes 6xx for (i = 0; i < env->nb_tlb; i++) { qemu_put_betls(f, &env->tlb[i].tlb6.pte0); qemu_put_betls(f, &env->tlb[i].tlb6.pte1); qemu_put_betls(f, &env->tlb[i].tlb6.EPN); } } for (i = 0; i < 4; i++) qemu_put_betls(f, &env->pb[i]); #endif for (i = 0; i < 1024; i++) qemu_put_betls(f, &env->spr[i]); qemu_put_be32s(f, &env->vscr); qemu_put_be64s(f, &env->spe_acc); qemu_put_be32s(f, &env->spe_fscr); qemu_put_betls(f, &env->msr_mask); qemu_put_be32s(f, &env->flags); qemu_put_sbe32s(f, &env->error_code); qemu_put_be32s(f, &env->pending_interrupts); #if !defined(CONFIG_USER_ONLY) qemu_put_be32s(f, &env->irq_input_state); for (i = 0; i < POWERPC_EXCP_NB; i++) qemu_put_betls(f, &env->excp_vectors[i]); qemu_put_betls(f, &env->excp_prefix); qemu_put_betls(f, &env->hreset_excp_prefix); qemu_put_betls(f, &env->ivor_mask); qemu_put_betls(f, &env->ivpr_mask); qemu_put_betls(f, &env->hreset_vector); #endif qemu_put_betls(f, &env->nip); qemu_put_betls(f, &env->hflags); qemu_put_betls(f, &env->hflags_nmsr); qemu_put_sbe32s(f, &env->mmu_idx); qemu_put_sbe32s(f, &env->power_mode); }"} {"target": 1, "idx": 1418, "func": "static DeviceState *sun4c_intctl_init(target_phys_addr_t addr, qemu_irq *parent_irq) { DeviceState *dev; SysBusDevice *s; unsigned int i; dev = qdev_create(NULL, \"sun4c_intctl\"); qdev_init(dev); s = sysbus_from_qdev(dev); for (i = 0; i < MAX_PILS; i++) { sysbus_connect_irq(s, i, parent_irq[i]); } sysbus_mmio_map(s, 0, addr); return dev; }"} {"target": 1, "idx": 1422, "func": "static int gif_image_write_header(AVFormatContext *s, int width, int height, int loop_count, uint32_t *palette) { AVIOContext *pb = s->pb; AVRational sar = s->streams[0]->codec->sample_aspect_ratio; int i, aspect = 0; if (sar.num > 0 && sar.den > 0) { aspect = sar.num * 64 / sar.den - 15; if (aspect < 0 || aspect > 255) aspect = 0; } avio_write(pb, \"GIF\", 3); avio_write(pb, \"89a\", 3); avio_wl16(pb, width); avio_wl16(pb, height); if (palette) { avio_w8(pb, 0xf7); /* flags: global clut, 256 entries */ avio_w8(pb, 0x1f); /* background color index */ avio_w8(pb, aspect); for (i = 0; i < 256; i++) { const uint32_t v = palette[i] & 0xffffff; avio_wb24(pb, v); } } else { avio_w8(pb, 0); /* flags */ avio_w8(pb, 0); /* background color index */ avio_w8(pb, aspect); } if (loop_count >= 0 ) { /* \"NETSCAPE EXTENSION\" for looped animation GIF */ avio_w8(pb, 0x21); /* GIF Extension code */ avio_w8(pb, 0xff); /* Application Extension Label */ avio_w8(pb, 0x0b); /* Length of Application Block */ avio_write(pb, \"NETSCAPE2.0\", sizeof(\"NETSCAPE2.0\") - 1); avio_w8(pb, 0x03); /* Length of Data Sub-Block */ avio_w8(pb, 0x01); avio_wl16(pb, (uint16_t)loop_count); avio_w8(pb, 0x00); /* Data Sub-block Terminator */ } return 0; }"} {"target": 0, "idx": 1441, "func": "static int ipvideo_decode_block_opcode_0xA(IpvideoContext *s) { int x, y; unsigned char P[4]; int flags = 0; /* 4-color encoding for each 4x4 quadrant, or 4-color encoding on * either top and bottom or left and right halves */ CHECK_STREAM_PTR(24); if (s->stream_ptr[0] <= s->stream_ptr[1]) { /* 4-color encoding for each quadrant; need 32 bytes */ CHECK_STREAM_PTR(32); for (y = 0; y < 16; y++) { // new values for each 4x4 block if (!(y & 3)) { memcpy(P, s->stream_ptr, 4); s->stream_ptr += 4; flags = bytestream_get_le32(&s->stream_ptr); } for (x = 0; x < 4; x++, flags >>= 2) *s->pixel_ptr++ = P[flags & 0x03]; s->pixel_ptr += s->stride - 4; // switch to right half if (y == 7) s->pixel_ptr -= 8 * s->stride - 4; } } else { // vertical split? int vert = s->stream_ptr[12] <= s->stream_ptr[13]; uint64_t flags = 0; /* 4-color encoding for either left and right or top and bottom * halves */ for (y = 0; y < 16; y++) { // load values for each half if (!(y & 7)) { memcpy(P, s->stream_ptr, 4); s->stream_ptr += 4; flags = bytestream_get_le64(&s->stream_ptr); } for (x = 0; x < 4; x++, flags >>= 2) *s->pixel_ptr++ = P[flags & 0x03]; if (vert) { s->pixel_ptr += s->stride - 4; // switch to right half if (y == 7) s->pixel_ptr -= 8 * s->stride - 4; } else if (y & 1) s->pixel_ptr += s->line_inc; } } /* report success */ return 0; }"} {"target": 0, "idx": 1447, "func": "static inline void dv_decode_video_segment(DVVideoContext *s, const uint8_t *buf_ptr1, const uint16_t *mb_pos_ptr) { int quant, dc, dct_mode, class1, j; int mb_index, mb_x, mb_y, v, last_index; int y_stride, i; DCTELEM *block, *block1; int c_offset; uint8_t *y_ptr; const uint8_t *buf_ptr; PutBitContext pb, vs_pb; GetBitContext gb; BlockInfo mb_data[5 * DV_MAX_BPM], *mb, *mb1; DECLARE_ALIGNED_16(DCTELEM, sblock[5*DV_MAX_BPM][64]); DECLARE_ALIGNED_8(uint8_t, mb_bit_buffer[80 + 4]); /* allow some slack */ DECLARE_ALIGNED_8(uint8_t, vs_bit_buffer[5 * 80 + 4]); /* allow some slack */ const int log2_blocksize= 3-s->avctx->lowres; int is_field_mode[5]; assert((((int)mb_bit_buffer)&7)==0); assert((((int)vs_bit_buffer)&7)==0); memset(sblock, 0, sizeof(sblock)); /* pass 1 : read DC and AC coefficients in blocks */ buf_ptr = buf_ptr1; block1 = &sblock[0][0]; mb1 = mb_data; init_put_bits(&vs_pb, vs_bit_buffer, 5 * 80); for(mb_index = 0; mb_index < 5; mb_index++, mb1 += s->sys->bpm, block1 += s->sys->bpm * 64) { /* skip header */ quant = buf_ptr[3] & 0x0f; buf_ptr += 4; init_put_bits(&pb, mb_bit_buffer, 80); mb = mb1; block = block1; is_field_mode[mb_index] = 0; for(j = 0;j < s->sys->bpm; j++) { last_index = s->sys->block_sizes[j]; init_get_bits(&gb, buf_ptr, last_index); /* get the dc */ dc = get_sbits(&gb, 9); dct_mode = get_bits1(&gb); class1 = get_bits(&gb, 2); if (DV_PROFILE_IS_HD(s->sys)) { mb->idct_put = s->idct_put[0]; mb->scan_table = s->dv_zigzag[0]; mb->factor_table = s->dv100_idct_factor[((s->sys->height == 720)<<1)|(j >= 4)][class1][quant]; is_field_mode[mb_index] |= !j && dct_mode; } else { mb->idct_put = s->idct_put[dct_mode && log2_blocksize==3]; mb->scan_table = s->dv_zigzag[dct_mode]; mb->factor_table = s->dv_idct_factor[class1 == 3][dct_mode] [quant + dv_quant_offset[class1]]; } dc = dc << 2; /* convert to unsigned because 128 is not added in the standard IDCT */ dc += 1024; block[0] = dc; buf_ptr += last_index >> 3; mb->pos = 0; mb->partial_bit_count = 0; #ifdef VLC_DEBUG printf(\"MB block: %d, %d \", mb_index, j); #endif dv_decode_ac(&gb, mb, block); /* write the remaining bits in a new buffer only if the block is finished */ if (mb->pos >= 64) bit_copy(&pb, &gb); block += 64; mb++; } /* pass 2 : we can do it just after */ #ifdef VLC_DEBUG printf(\"***pass 2 size=%d MB#=%d\\n\", put_bits_count(&pb), mb_index); #endif block = block1; mb = mb1; init_get_bits(&gb, mb_bit_buffer, put_bits_count(&pb)); flush_put_bits(&pb); for(j = 0;j < s->sys->bpm; j++, block += 64, mb++) { if (mb->pos < 64 && get_bits_left(&gb) > 0) { dv_decode_ac(&gb, mb, block); /* if still not finished, no need to parse other blocks */ if (mb->pos < 64) break; } } /* all blocks are finished, so the extra bytes can be used at the video segment level */ if (j >= s->sys->bpm) bit_copy(&vs_pb, &gb); } /* we need a pass other the whole video segment */ #ifdef VLC_DEBUG printf(\"***pass 3 size=%d\\n\", put_bits_count(&vs_pb)); #endif block = &sblock[0][0]; mb = mb_data; init_get_bits(&gb, vs_bit_buffer, put_bits_count(&vs_pb)); flush_put_bits(&vs_pb); for(mb_index = 0; mb_index < 5; mb_index++) { for(j = 0;j < s->sys->bpm; j++) { if (mb->pos < 64) { #ifdef VLC_DEBUG printf(\"start %d:%d\\n\", mb_index, j); #endif dv_decode_ac(&gb, mb, block); } if (mb->pos >= 64 && mb->pos < 127) av_log(NULL, AV_LOG_ERROR, \"AC EOB marker is absent pos=%d\\n\", mb->pos); block += 64; mb++; } } /* compute idct and place blocks */ block = &sblock[0][0]; mb = mb_data; for(mb_index = 0; mb_index < 5; mb_index++) { v = *mb_pos_ptr++; mb_x = v & 0xff; mb_y = v >> 8; /* We work with 720p frames split in half. The odd half-frame (chan==2,3) is displaced :-( */ if (s->sys->height == 720 && !(s->buf[1]&0x0C)) { mb_y -= (mb_y>17)?18:-72; /* shifting the Y coordinate down by 72/2 macroblocks */ } /* idct_put'ting luminance */ if ((s->sys->pix_fmt == PIX_FMT_YUV420P) || (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x >= (704 / 8)) || (s->sys->height >= 720 && mb_y != 134)) { y_stride = (s->picture.linesize[0]<<((!is_field_mode[mb_index])*log2_blocksize)) - (2<picture.data[0] + ((mb_y * s->picture.linesize[0] + mb_x)<sys->pix_fmt == PIX_FMT_YUV422P && s->sys->width == 720 && i) y_ptr -= (1<idct_put(y_ptr, s->picture.linesize[0]<>(s->sys->pix_fmt == PIX_FMT_YUV420P)) * s->picture.linesize[1] + (mb_x>>((s->sys->pix_fmt == PIX_FMT_YUV411P)?2:1)))<picture.data[j] + c_offset; if (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x >= (704 / 8)) { uint64_t aligned_pixels[64/8]; uint8_t *pixels = (uint8_t*)aligned_pixels; uint8_t *c_ptr1, *ptr1; int x, y; mb->idct_put(pixels, 8, block); for(y = 0; y < (1<picture.linesize[j], pixels += 8) { ptr1= pixels + (1<<(log2_blocksize-1)); c_ptr1 = c_ptr + (s->picture.linesize[j]<picture.linesize[j]<<((!is_field_mode[mb_index])*log2_blocksize); for (i=0; i<(1<<(s->sys->bpm==8)); i++, block += 64, mb++, c_ptr += y_stride) mb->idct_put(c_ptr, s->picture.linesize[j]< 1) { return false; } } return true; }"} {"target": 1, "idx": 1459, "func": "static void imc_get_coeffs(AVCodecContext *avctx, IMCContext *q, IMCChannel *chctx) { int i, j, cw_len, cw; for (i = 0; i < BANDS; i++) { if (!chctx->sumLenArr[i]) continue; if (chctx->bandFlagsBuf[i] || chctx->bandWidthT[i]) { for (j = band_tab[i]; j < band_tab[i + 1]; j++) { cw_len = chctx->CWlengthT[j]; cw = 0; if (cw_len && (!chctx->bandFlagsBuf[i] || !chctx->skipFlags[j])) { if (get_bits_count(&q->gb) + cw_len > 512) { av_log(avctx, AV_LOG_WARNING, \"Potential problem on band %i, coefficient %i\" \": cw_len=%i\\n\", i, j, cw_len); } cw = get_bits(&q->gb, cw_len); } chctx->codewords[j] = cw; } } } }"} {"target": 1, "idx": 1509, "func": "static int aio_write_f(int argc, char **argv) { int nr_iov, c; int pattern = 0xcd; struct aio_ctx *ctx = calloc(1, sizeof(struct aio_ctx)); BlockDriverAIOCB *acb; while ((c = getopt(argc, argv, \"CqP:\")) != EOF) { switch (c) { case 'C': ctx->Cflag = 1; break; case 'q': ctx->qflag = 1; break; case 'P': pattern = parse_pattern(optarg); if (pattern < 0) { return 0; } break; default: return command_usage(&aio_write_cmd); } } if (optind > argc - 2) { return command_usage(&aio_write_cmd); } ctx->offset = cvtnum(argv[optind]); if (ctx->offset < 0) { printf(\"non-numeric length argument -- %s\\n\", argv[optind]); return 0; } optind++; if (ctx->offset & 0x1ff) { printf(\"offset %\" PRId64 \" is not sector aligned\\n\", ctx->offset); return 0; } nr_iov = argc - optind; ctx->buf = create_iovec(&ctx->qiov, &argv[optind], nr_iov, pattern); gettimeofday(&ctx->t1, NULL); acb = bdrv_aio_writev(bs, ctx->offset >> 9, &ctx->qiov, ctx->qiov.size >> 9, aio_write_done, ctx); if (!acb) { free(ctx->buf); return -EIO; } return 0; }"} {"target": 1, "idx": 1528, "func": "static coroutine_fn int quorum_co_flush(BlockDriverState *bs) { BDRVQuorumState *s = bs->opaque; QuorumVoteVersion *winner = NULL; QuorumVotes error_votes; QuorumVoteValue result_value; int i; int result = 0; QLIST_INIT(&error_votes.vote_list); error_votes.compare = quorum_64bits_compare; for (i = 0; i < s->num_children; i++) { result = bdrv_co_flush(s->children[i]->bs); result_value.l = result; quorum_count_vote(&error_votes, &result_value, i); } winner = quorum_get_vote_winner(&error_votes); result = winner->value.l; quorum_free_vote_list(&error_votes); return result; }"} {"target": 0, "idx": 1563, "func": "static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, TCGMemOpIdx oi) { TCGMemOp memop = get_memop(oi); #ifdef CONFIG_SOFTMMU unsigned memi = get_mmuidx(oi); TCGReg addrz, param; tcg_insn_unit *func; tcg_insn_unit *label_ptr; addrz = tcg_out_tlb_load(s, addr, memi, memop & MO_SIZE, offsetof(CPUTLBEntry, addr_write)); /* The fast path is exactly one insn. Thus we can perform the entire TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */ /* beq,a,pt %[xi]cc, label0 */ label_ptr = s->code_ptr; tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0); /* delay slot */ tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); /* TLB Miss. */ param = TCG_REG_O1; if (!SPARC64 && TARGET_LONG_BITS == 64) { /* Skip the high-part; we'll perform the extract in the trampoline. */ param++; } tcg_out_mov(s, TCG_TYPE_REG, param++, addr); if (!SPARC64 && (memop & MO_SIZE) == MO_64) { /* Skip the high-part; we'll perform the extract in the trampoline. */ param++; } tcg_out_mov(s, TCG_TYPE_REG, param++, data); func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)]; assert(func != NULL); tcg_out_call_nodelay(s, func); /* delay slot */ tcg_out_movi(s, TCG_TYPE_I32, param, oi); *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); #else if (SPARC64 && TARGET_LONG_BITS == 32) { tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL); addr = TCG_REG_T1; } tcg_out_ldst_rr(s, data, addr, (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0), qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); #endif /* CONFIG_SOFTMMU */ }"} {"target": 0, "idx": 1565, "func": "void r4k_helper_tlbr(CPUMIPSState *env) { r4k_tlb_t *tlb; uint8_t ASID; int idx; ASID = env->CP0_EntryHi & 0xFF; idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb; tlb = &env->tlb->mmu.r4k.tlb[idx]; /* If this will change the current ASID, flush qemu's TLB. */ if (ASID != tlb->ASID) cpu_mips_tlb_flush (env, 1); r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb); env->CP0_EntryHi = tlb->VPN | tlb->ASID; env->CP0_PageMask = tlb->PageMask; env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) | ((target_ulong)tlb->RI0 << CP0EnLo_RI) | ((target_ulong)tlb->XI0 << CP0EnLo_XI) | (tlb->C0 << 3) | (tlb->PFN[0] >> 6); env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) | ((target_ulong)tlb->RI1 << CP0EnLo_RI) | ((target_ulong)tlb->XI1 << CP0EnLo_XI) | (tlb->C1 << 3) | (tlb->PFN[1] >> 6); }"} {"target": 0, "idx": 1571, "func": "static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb) { int i; int dc_y_table; int dc_c_table; int ac_y_table; int ac_c_table; int residual_eob_run = 0; /* fetch the DC table indexes */ dc_y_table = get_bits(gb, 4); dc_c_table = get_bits(gb, 4); /* unpack the Y plane DC coefficients */ residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_y_table], 0, 1, residual_eob_run); /* reverse prediction of the Y-plane DC coefficients */ reverse_dc_prediction(s, 0, s->fragment_width, s->fragment_height); /* unpack the C plane DC coefficients */ residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0, 0, residual_eob_run); /* reverse prediction of the C-plane DC coefficients */ if (!(s->avctx->flags & CODEC_FLAG_GRAY)) { reverse_dc_prediction(s, s->fragment_start[1], s->fragment_width / 2, s->fragment_height / 2); reverse_dc_prediction(s, s->fragment_start[2], s->fragment_width / 2, s->fragment_height / 2); } /* fetch the AC table indexes */ ac_y_table = get_bits(gb, 4); ac_c_table = get_bits(gb, 4); /* unpack the group 1 AC coefficients (coeffs 1-5) */ for (i = 1; i <= 5; i++) { residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_1[ac_y_table], i, 1, residual_eob_run); residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_1[ac_c_table], i, 0, residual_eob_run); } /* unpack the group 2 AC coefficients (coeffs 6-14) */ for (i = 6; i <= 14; i++) { residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_2[ac_y_table], i, 1, residual_eob_run); residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_2[ac_c_table], i, 0, residual_eob_run); } /* unpack the group 3 AC coefficients (coeffs 15-27) */ for (i = 15; i <= 27; i++) { residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_3[ac_y_table], i, 1, residual_eob_run); residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_3[ac_c_table], i, 0, residual_eob_run); } /* unpack the group 4 AC coefficients (coeffs 28-63) */ for (i = 28; i <= 63; i++) { residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_4[ac_y_table], i, 1, residual_eob_run); residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_4[ac_c_table], i, 0, residual_eob_run); } return 0; }"} {"target": 0, "idx": 1572, "func": "int kvmppc_fixup_cpu(PowerPCCPU *cpu) { CPUState *cs = CPU(cpu); int smt; /* Adjust cpu index for SMT */ smt = kvmppc_smt_threads(); cs->cpu_index = (cs->cpu_index / smp_threads) * smt + (cs->cpu_index % smp_threads); return 0; }"} {"target": 1, "idx": 1578, "func": "static void *spapr_create_fdt_skel(const char *cpu_model, hwaddr initrd_base, hwaddr initrd_size, hwaddr kernel_size, const char *boot_device, const char *kernel_cmdline, uint32_t epow_irq) { void *fdt; CPUPPCState *env; uint32_t start_prop = cpu_to_be32(initrd_base); uint32_t end_prop = cpu_to_be32(initrd_base + initrd_size); char hypertas_prop[] = \"hcall-pft\\0hcall-term\\0hcall-dabr\\0hcall-interrupt\" \"\\0hcall-tce\\0hcall-vio\\0hcall-splpar\\0hcall-bulk\"; char qemu_hypertas_prop[] = \"hcall-memop1\"; uint32_t refpoints[] = {cpu_to_be32(0x4), cpu_to_be32(0x4)}; uint32_t interrupt_server_ranges_prop[] = {0, cpu_to_be32(smp_cpus)}; char *modelname; int i, smt = kvmppc_smt_threads(); unsigned char vec5[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x80}; fdt = g_malloc0(FDT_MAX_SIZE); _FDT((fdt_create(fdt, FDT_MAX_SIZE))); if (kernel_size) { _FDT((fdt_add_reservemap_entry(fdt, KERNEL_LOAD_ADDR, kernel_size))); } if (initrd_size) { _FDT((fdt_add_reservemap_entry(fdt, initrd_base, initrd_size))); } _FDT((fdt_finish_reservemap(fdt))); /* Root node */ _FDT((fdt_begin_node(fdt, \"\"))); _FDT((fdt_property_string(fdt, \"device_type\", \"chrp\"))); _FDT((fdt_property_string(fdt, \"model\", \"IBM pSeries (emulated by qemu)\"))); _FDT((fdt_property_cell(fdt, \"#address-cells\", 0x2))); _FDT((fdt_property_cell(fdt, \"#size-cells\", 0x2))); /* /chosen */ _FDT((fdt_begin_node(fdt, \"chosen\"))); /* Set Form1_affinity */ _FDT((fdt_property(fdt, \"ibm,architecture-vec-5\", vec5, sizeof(vec5)))); _FDT((fdt_property_string(fdt, \"bootargs\", kernel_cmdline))); _FDT((fdt_property(fdt, \"linux,initrd-start\", &start_prop, sizeof(start_prop)))); _FDT((fdt_property(fdt, \"linux,initrd-end\", &end_prop, sizeof(end_prop)))); if (kernel_size) { uint64_t kprop[2] = { cpu_to_be64(KERNEL_LOAD_ADDR), cpu_to_be64(kernel_size) }; _FDT((fdt_property(fdt, \"qemu,boot-kernel\", &kprop, sizeof(kprop)))); } _FDT((fdt_property_string(fdt, \"qemu,boot-device\", boot_device))); _FDT((fdt_property_cell(fdt, \"qemu,graphic-width\", graphic_width))); _FDT((fdt_property_cell(fdt, \"qemu,graphic-height\", graphic_height))); _FDT((fdt_property_cell(fdt, \"qemu,graphic-depth\", graphic_depth))); _FDT((fdt_end_node(fdt))); /* cpus */ _FDT((fdt_begin_node(fdt, \"cpus\"))); _FDT((fdt_property_cell(fdt, \"#address-cells\", 0x1))); _FDT((fdt_property_cell(fdt, \"#size-cells\", 0x0))); modelname = g_strdup(cpu_model); for (i = 0; i < strlen(modelname); i++) { modelname[i] = toupper(modelname[i]); } /* This is needed during FDT finalization */ spapr->cpu_model = g_strdup(modelname); for (env = first_cpu; env != NULL; env = env->next_cpu) { CPUState *cpu = CPU(ppc_env_get_cpu(env)); int index = cpu->cpu_index; uint32_t servers_prop[smp_threads]; uint32_t gservers_prop[smp_threads * 2]; char *nodename; uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40), 0xffffffff, 0xffffffff}; uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq() : TIMEBASE_FREQ; uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000; uint32_t page_sizes_prop[64]; size_t page_sizes_prop_size; if ((index % smt) != 0) { continue; } if (asprintf(&nodename, \"%s@%x\", modelname, index) < 0) { fprintf(stderr, \"Allocation failure\\n\"); exit(1); } _FDT((fdt_begin_node(fdt, nodename))); free(nodename); _FDT((fdt_property_cell(fdt, \"reg\", index))); _FDT((fdt_property_string(fdt, \"device_type\", \"cpu\"))); _FDT((fdt_property_cell(fdt, \"cpu-version\", env->spr[SPR_PVR]))); _FDT((fdt_property_cell(fdt, \"dcache-block-size\", env->dcache_line_size))); _FDT((fdt_property_cell(fdt, \"icache-block-size\", env->icache_line_size))); _FDT((fdt_property_cell(fdt, \"timebase-frequency\", tbfreq))); _FDT((fdt_property_cell(fdt, \"clock-frequency\", cpufreq))); _FDT((fdt_property_cell(fdt, \"ibm,slb-size\", env->slb_nr))); _FDT((fdt_property_string(fdt, \"status\", \"okay\"))); _FDT((fdt_property(fdt, \"64-bit\", NULL, 0))); /* Build interrupt servers and gservers properties */ for (i = 0; i < smp_threads; i++) { servers_prop[i] = cpu_to_be32(index + i); /* Hack, direct the group queues back to cpu 0 */ gservers_prop[i*2] = cpu_to_be32(index + i); gservers_prop[i*2 + 1] = 0; } _FDT((fdt_property(fdt, \"ibm,ppc-interrupt-server#s\", servers_prop, sizeof(servers_prop)))); _FDT((fdt_property(fdt, \"ibm,ppc-interrupt-gserver#s\", gservers_prop, sizeof(gservers_prop)))); if (env->mmu_model & POWERPC_MMU_1TSEG) { _FDT((fdt_property(fdt, \"ibm,processor-segment-sizes\", segs, sizeof(segs)))); } /* Advertise VMX/VSX (vector extensions) if available * 0 / no property == no vector extensions * 1 == VMX / Altivec available * 2 == VSX available */ if (env->insns_flags & PPC_ALTIVEC) { uint32_t vmx = (env->insns_flags2 & PPC2_VSX) ? 2 : 1; _FDT((fdt_property_cell(fdt, \"ibm,vmx\", vmx))); } /* Advertise DFP (Decimal Floating Point) if available * 0 / no property == no DFP * 1 == DFP available */ if (env->insns_flags2 & PPC2_DFP) { _FDT((fdt_property_cell(fdt, \"ibm,dfp\", 1))); } page_sizes_prop_size = create_page_sizes_prop(env, page_sizes_prop, sizeof(page_sizes_prop)); if (page_sizes_prop_size) { _FDT((fdt_property(fdt, \"ibm,segment-page-sizes\", page_sizes_prop, page_sizes_prop_size))); } _FDT((fdt_end_node(fdt))); } g_free(modelname); _FDT((fdt_end_node(fdt))); /* RTAS */ _FDT((fdt_begin_node(fdt, \"rtas\"))); _FDT((fdt_property(fdt, \"ibm,hypertas-functions\", hypertas_prop, sizeof(hypertas_prop)))); _FDT((fdt_property(fdt, \"qemu,hypertas-functions\", qemu_hypertas_prop, sizeof(qemu_hypertas_prop)))); _FDT((fdt_property(fdt, \"ibm,associativity-reference-points\", refpoints, sizeof(refpoints)))); _FDT((fdt_property_cell(fdt, \"rtas-error-log-max\", RTAS_ERROR_LOG_MAX))); _FDT((fdt_end_node(fdt))); /* interrupt controller */ _FDT((fdt_begin_node(fdt, \"interrupt-controller\"))); _FDT((fdt_property_string(fdt, \"device_type\", \"PowerPC-External-Interrupt-Presentation\"))); _FDT((fdt_property_string(fdt, \"compatible\", \"IBM,ppc-xicp\"))); _FDT((fdt_property(fdt, \"interrupt-controller\", NULL, 0))); _FDT((fdt_property(fdt, \"ibm,interrupt-server-ranges\", interrupt_server_ranges_prop, sizeof(interrupt_server_ranges_prop)))); _FDT((fdt_property_cell(fdt, \"#interrupt-cells\", 2))); _FDT((fdt_property_cell(fdt, \"linux,phandle\", PHANDLE_XICP))); _FDT((fdt_property_cell(fdt, \"phandle\", PHANDLE_XICP))); _FDT((fdt_end_node(fdt))); /* vdevice */ _FDT((fdt_begin_node(fdt, \"vdevice\"))); _FDT((fdt_property_string(fdt, \"device_type\", \"vdevice\"))); _FDT((fdt_property_string(fdt, \"compatible\", \"IBM,vdevice\"))); _FDT((fdt_property_cell(fdt, \"#address-cells\", 0x1))); _FDT((fdt_property_cell(fdt, \"#size-cells\", 0x0))); _FDT((fdt_property_cell(fdt, \"#interrupt-cells\", 0x2))); _FDT((fdt_property(fdt, \"interrupt-controller\", NULL, 0))); _FDT((fdt_end_node(fdt))); /* event-sources */ spapr_events_fdt_skel(fdt, epow_irq); _FDT((fdt_end_node(fdt))); /* close root node */ _FDT((fdt_finish(fdt))); return fdt; }"} {"target": 0, "idx": 1608, "func": "static int create_header64(DumpState *s) { int ret = 0; DiskDumpHeader64 *dh = NULL; KdumpSubHeader64 *kh = NULL; size_t size; int endian = s->dump_info.d_endian; uint32_t block_size; uint32_t sub_hdr_size; uint32_t bitmap_blocks; uint32_t status = 0; uint64_t offset_note; /* write common header, the version of kdump-compressed format is 6th */ size = sizeof(DiskDumpHeader64); dh = g_malloc0(size); strncpy(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE)); dh->header_version = cpu_convert_to_target32(6, endian); block_size = s->page_size; dh->block_size = cpu_convert_to_target32(block_size, endian); sub_hdr_size = sizeof(struct KdumpSubHeader64) + s->note_size; sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size); dh->sub_hdr_size = cpu_convert_to_target32(sub_hdr_size, endian); /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */ dh->max_mapnr = cpu_convert_to_target32(MIN(s->max_mapnr, UINT_MAX), endian); dh->nr_cpus = cpu_convert_to_target32(s->nr_cpus, endian); bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2; dh->bitmap_blocks = cpu_convert_to_target32(bitmap_blocks, endian); strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine)); if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) { status |= DUMP_DH_COMPRESSED_ZLIB; } #ifdef CONFIG_LZO if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) { status |= DUMP_DH_COMPRESSED_LZO; } #endif #ifdef CONFIG_SNAPPY if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) { status |= DUMP_DH_COMPRESSED_SNAPPY; } #endif dh->status = cpu_convert_to_target32(status, endian); if (write_buffer(s->fd, 0, dh, size) < 0) { dump_error(s, \"dump: failed to write disk dump header.\\n\"); ret = -1; goto out; } /* write sub header */ size = sizeof(KdumpSubHeader64); kh = g_malloc0(size); /* 64bit max_mapnr_64 */ kh->max_mapnr_64 = cpu_convert_to_target64(s->max_mapnr, endian); kh->phys_base = cpu_convert_to_target64(PHYS_BASE, endian); kh->dump_level = cpu_convert_to_target32(DUMP_LEVEL, endian); offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size; kh->offset_note = cpu_convert_to_target64(offset_note, endian); kh->note_size = cpu_convert_to_target64(s->note_size, endian); if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS * block_size, kh, size) < 0) { dump_error(s, \"dump: failed to write kdump sub header.\\n\"); ret = -1; goto out; } /* write note */ s->note_buf = g_malloc0(s->note_size); s->note_buf_offset = 0; /* use s->note_buf to store notes temporarily */ if (write_elf64_notes(buf_write_note, s) < 0) { ret = -1; goto out; } if (write_buffer(s->fd, offset_note, s->note_buf, s->note_size) < 0) { dump_error(s, \"dump: failed to write notes\"); ret = -1; goto out; } /* get offset of dump_bitmap */ s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) * block_size; /* get offset of page */ s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) * block_size; out: g_free(dh); g_free(kh); g_free(s->note_buf); return ret; }"} {"target": 0, "idx": 1609, "func": "unsigned int SingleCPDO(const unsigned int opcode) { FPA11 *fpa11 = GET_FPA11(); float32 rFm, rFn = 0; unsigned int Fd, Fm, Fn, nRc = 1; Fm = getFm(opcode); if (CONSTANT_FM(opcode)) { rFm = getSingleConstant(Fm); } else { switch (fpa11->fType[Fm]) { case typeSingle: rFm = fpa11->fpreg[Fm].fSingle; break; default: return 0; } } if (!MONADIC_INSTRUCTION(opcode)) { Fn = getFn(opcode); switch (fpa11->fType[Fn]) { case typeSingle: rFn = fpa11->fpreg[Fn].fSingle; break; default: return 0; } } Fd = getFd(opcode); switch (opcode & MASK_ARITHMETIC_OPCODE) { /* dyadic opcodes */ case ADF_CODE: fpa11->fpreg[Fd].fSingle = float32_add(rFn,rFm, &fpa11->fp_status); break; case MUF_CODE: case FML_CODE: fpa11->fpreg[Fd].fSingle = float32_mul(rFn,rFm, &fpa11->fp_status); break; case SUF_CODE: fpa11->fpreg[Fd].fSingle = float32_sub(rFn,rFm, &fpa11->fp_status); break; case RSF_CODE: fpa11->fpreg[Fd].fSingle = float32_sub(rFm,rFn, &fpa11->fp_status); break; case DVF_CODE: case FDV_CODE: fpa11->fpreg[Fd].fSingle = float32_div(rFn,rFm, &fpa11->fp_status); break; case RDF_CODE: case FRD_CODE: fpa11->fpreg[Fd].fSingle = float32_div(rFm,rFn, &fpa11->fp_status); break; #if 0 case POW_CODE: fpa11->fpreg[Fd].fSingle = float32_pow(rFn,rFm); break; case RPW_CODE: fpa11->fpreg[Fd].fSingle = float32_pow(rFm,rFn); break; #endif case RMF_CODE: fpa11->fpreg[Fd].fSingle = float32_rem(rFn,rFm, &fpa11->fp_status); break; #if 0 case POL_CODE: fpa11->fpreg[Fd].fSingle = float32_pol(rFn,rFm); break; #endif /* monadic opcodes */ case MVF_CODE: fpa11->fpreg[Fd].fSingle = rFm; break; case MNF_CODE: rFm ^= 0x80000000; fpa11->fpreg[Fd].fSingle = rFm; break; case ABS_CODE: rFm &= 0x7fffffff; fpa11->fpreg[Fd].fSingle = rFm; break; case RND_CODE: case URD_CODE: fpa11->fpreg[Fd].fSingle = float32_round_to_int(rFm, &fpa11->fp_status); break; case SQT_CODE: fpa11->fpreg[Fd].fSingle = float32_sqrt(rFm, &fpa11->fp_status); break; #if 0 case LOG_CODE: fpa11->fpreg[Fd].fSingle = float32_log(rFm); break; case LGN_CODE: fpa11->fpreg[Fd].fSingle = float32_ln(rFm); break; case EXP_CODE: fpa11->fpreg[Fd].fSingle = float32_exp(rFm); break; case SIN_CODE: fpa11->fpreg[Fd].fSingle = float32_sin(rFm); break; case COS_CODE: fpa11->fpreg[Fd].fSingle = float32_cos(rFm); break; case TAN_CODE: fpa11->fpreg[Fd].fSingle = float32_tan(rFm); break; case ASN_CODE: fpa11->fpreg[Fd].fSingle = float32_arcsin(rFm); break; case ACS_CODE: fpa11->fpreg[Fd].fSingle = float32_arccos(rFm); break; case ATN_CODE: fpa11->fpreg[Fd].fSingle = float32_arctan(rFm); break; #endif case NRM_CODE: break; default: { nRc = 0; } } if (0 != nRc) fpa11->fType[Fd] = typeSingle; return nRc; }"} {"target": 0, "idx": 1622, "func": "static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target, const char *replaces, int64_t speed, uint32_t granularity, int64_t buf_size, BlockdevOnError on_source_error, BlockdevOnError on_target_error, BlockCompletionFunc *cb, void *opaque, Error **errp, const BlockJobDriver *driver, bool is_none_mode, BlockDriverState *base) { MirrorBlockJob *s; if (granularity == 0) { /* Choose the default granularity based on the target file's cluster * size, clamped between 4k and 64k. */ BlockDriverInfo bdi; if (bdrv_get_info(target, &bdi) >= 0 && bdi.cluster_size != 0) { granularity = MAX(4096, bdi.cluster_size); granularity = MIN(65536, granularity); } else { granularity = 65536; } } assert ((granularity & (granularity - 1)) == 0); if ((on_source_error == BLOCKDEV_ON_ERROR_STOP || on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) && !bdrv_iostatus_is_enabled(bs)) { error_set(errp, QERR_INVALID_PARAMETER, \"on-source-error\"); return; } s = block_job_create(driver, bs, speed, cb, opaque, errp); if (!s) { return; } s->replaces = g_strdup(replaces); s->on_source_error = on_source_error; s->on_target_error = on_target_error; s->target = target; s->is_none_mode = is_none_mode; s->base = base; s->granularity = granularity; s->buf_size = MAX(buf_size, granularity); s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); if (!s->dirty_bitmap) { return; } bdrv_set_enable_write_cache(s->target, true); bdrv_set_on_error(s->target, on_target_error, on_target_error); bdrv_iostatus_enable(s->target); s->common.co = qemu_coroutine_create(mirror_run); trace_mirror_start(bs, s, s->common.co, opaque); qemu_coroutine_enter(s->common.co, s); }"} {"target": 1, "idx": 1642, "func": "static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; AC3DecodeContext *s = avctx->priv_data; int16_t *out_samples = (int16_t *)data; int blk, ch, err; const uint8_t *channel_map; const float *output[AC3_MAX_CHANNELS]; /* initialize the GetBitContext with the start of valid AC-3 Frame */ if (s->input_buffer) { /* copy input buffer to decoder context to avoid reading past the end of the buffer, which can be caused by a damaged input stream. */ memcpy(s->input_buffer, buf, FFMIN(buf_size, AC3_FRAME_BUFFER_SIZE)); init_get_bits(&s->gbc, s->input_buffer, buf_size * 8); } else { init_get_bits(&s->gbc, buf, buf_size * 8); } /* parse the syncinfo */ *data_size = 0; err = parse_frame_header(s); if (err) { switch(err) { case AAC_AC3_PARSE_ERROR_SYNC: av_log(avctx, AV_LOG_ERROR, \"frame sync error\\n\"); return -1; case AAC_AC3_PARSE_ERROR_BSID: av_log(avctx, AV_LOG_ERROR, \"invalid bitstream id\\n\"); break; case AAC_AC3_PARSE_ERROR_SAMPLE_RATE: av_log(avctx, AV_LOG_ERROR, \"invalid sample rate\\n\"); break; case AAC_AC3_PARSE_ERROR_FRAME_SIZE: av_log(avctx, AV_LOG_ERROR, \"invalid frame size\\n\"); break; case AAC_AC3_PARSE_ERROR_FRAME_TYPE: /* skip frame if CRC is ok. otherwise use error concealment. */ /* TODO: add support for substreams and dependent frames */ if(s->frame_type == EAC3_FRAME_TYPE_DEPENDENT || s->substreamid) { av_log(avctx, AV_LOG_ERROR, \"unsupported frame type : skipping frame\\n\"); return s->frame_size; } else { av_log(avctx, AV_LOG_ERROR, \"invalid frame type\\n\"); } break; default: av_log(avctx, AV_LOG_ERROR, \"invalid header\\n\"); break; } } else { /* check that reported frame size fits in input buffer */ if (s->frame_size > buf_size) { av_log(avctx, AV_LOG_ERROR, \"incomplete frame\\n\"); err = AAC_AC3_PARSE_ERROR_FRAME_SIZE; } else if (avctx->error_recognition >= FF_ER_CAREFUL) { /* check for crc mismatch */ if (av_crc(av_crc_get_table(AV_CRC_16_ANSI), 0, &buf[2], s->frame_size-2)) { av_log(avctx, AV_LOG_ERROR, \"frame CRC mismatch\\n\"); err = AAC_AC3_PARSE_ERROR_CRC; } } } /* if frame is ok, set audio parameters */ if (!err) { avctx->sample_rate = s->sample_rate; avctx->bit_rate = s->bit_rate; /* channel config */ s->out_channels = s->channels; s->output_mode = s->channel_mode; if(s->lfe_on) s->output_mode |= AC3_OUTPUT_LFEON; if (avctx->request_channels > 0 && avctx->request_channels <= 2 && avctx->request_channels < s->channels) { s->out_channels = avctx->request_channels; s->output_mode = avctx->request_channels == 1 ? AC3_CHMODE_MONO : AC3_CHMODE_STEREO; s->channel_layout = ff_ac3_channel_layout_tab[s->output_mode]; } avctx->channels = s->out_channels; avctx->channel_layout = s->channel_layout; /* set downmixing coefficients if needed */ if(s->channels != s->out_channels && !((s->output_mode & AC3_OUTPUT_LFEON) && s->fbw_channels == s->out_channels)) { set_downmix_coeffs(s); } } else if (!s->out_channels) { s->out_channels = avctx->channels; if(s->out_channels < s->channels) s->output_mode = s->out_channels == 1 ? AC3_CHMODE_MONO : AC3_CHMODE_STEREO; } /* decode the audio blocks */ channel_map = ff_ac3_dec_channel_map[s->output_mode & ~AC3_OUTPUT_LFEON][s->lfe_on]; for (ch = 0; ch < s->out_channels; ch++) output[ch] = s->output[channel_map[ch]]; for (blk = 0; blk < s->num_blocks; blk++) { if (!err && decode_audio_block(s, blk)) { av_log(avctx, AV_LOG_ERROR, \"error decoding the audio block\\n\"); err = 1; } s->fmt_conv.float_to_int16_interleave(out_samples, output, 256, s->out_channels); out_samples += 256 * s->out_channels; } *data_size = s->num_blocks * 256 * avctx->channels * sizeof (int16_t); return FFMIN(buf_size, s->frame_size); }"} {"target": 1, "idx": 1673, "func": "static void v9fs_flush(void *opaque) { int16_t tag; size_t offset = 7; V9fsPDU *cancel_pdu; V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; pdu_unmarshal(pdu, offset, \"w\", &tag); QLIST_FOREACH(cancel_pdu, &s->active_list, next) { if (cancel_pdu->tag == tag) { break; } } if (cancel_pdu) { cancel_pdu->cancelled = 1; /* * Wait for pdu to complete. */ qemu_co_queue_wait(&cancel_pdu->complete); cancel_pdu->cancelled = 0; free_pdu(pdu->s, cancel_pdu); } complete_pdu(s, pdu, 7); return; }"} {"target": 1, "idx": 1675, "func": "void qemu_savevm_send_postcopy_advise(QEMUFile *f) { uint64_t tmp[2]; tmp[0] = cpu_to_be64(getpagesize()); tmp[1] = cpu_to_be64(1ul << qemu_target_page_bits()); trace_qemu_savevm_send_postcopy_advise(); qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_ADVISE, 16, (uint8_t *)tmp); }"} {"target": 1, "idx": 1677, "func": "static void add_qemu_cpu_model_features(S390FeatBitmap fbm) { static const int feats[] = { S390_FEAT_DAT_ENH, S390_FEAT_IDTE_SEGMENT, S390_FEAT_STFLE, S390_FEAT_SENSE_RUNNING_STATUS, S390_FEAT_EXTENDED_IMMEDIATE, S390_FEAT_EXTENDED_TRANSLATION_2, S390_FEAT_MSA, S390_FEAT_EXTENDED_TRANSLATION_3, S390_FEAT_LONG_DISPLACEMENT, S390_FEAT_LONG_DISPLACEMENT_FAST, S390_FEAT_ETF2_ENH, S390_FEAT_STORE_CLOCK_FAST, S390_FEAT_MOVE_WITH_OPTIONAL_SPEC, S390_FEAT_ETF3_ENH, S390_FEAT_COMPARE_AND_SWAP_AND_STORE, S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2, S390_FEAT_GENERAL_INSTRUCTIONS_EXT, S390_FEAT_EXECUTE_EXT, S390_FEAT_FLOATING_POINT_SUPPPORT_ENH, S390_FEAT_STFLE_45, S390_FEAT_STFLE_49, S390_FEAT_LOCAL_TLB_CLEARING, S390_FEAT_INTERLOCKED_ACCESS_2, S390_FEAT_STFLE_53, S390_FEAT_MSA_EXT_5, S390_FEAT_MSA_EXT_3, S390_FEAT_MSA_EXT_4, }; int i; for (i = 0; i < ARRAY_SIZE(feats); i++) { set_bit(feats[i], fbm); } }"} {"target": 1, "idx": 1683, "func": "static target_long monitor_get_decr (const struct MonitorDef *md, int val) { CPUState *env = mon_get_cpu(); if (!env) return 0; return cpu_ppc_load_decr(env); }"} {"target": 0, "idx": 1688, "func": "START_TEST(qobject_to_qlist_test) { QList *qlist; qlist = qlist_new(); fail_unless(qobject_to_qlist(QOBJECT(qlist)) == qlist); // destroy doesn't exist yet g_free(qlist); }"} {"target": 0, "idx": 1701, "func": "CharDriverState *qemu_chr_find(const char *name) { CharDriverState *chr; TAILQ_FOREACH(chr, &chardevs, next) { if (strcmp(chr->label, name) != 0) continue; return chr; } return NULL; }"} {"target": 0, "idx": 1706, "func": "static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet) { A64Context *c = avctx->priv_data; AVFrame *const p = avctx->coded_frame; int frame; int x, y; int b_height; int b_width; int req_size, ret; uint8_t *buf; int *charmap = c->mc_charmap; uint8_t *colram = c->mc_colram; uint8_t *charset = c->mc_charset; int *meta = c->mc_meta_charset; int *best_cb = c->mc_best_cb; int charset_size = 0x800 * (INTERLACED + 1); int colram_size = 0x100 * c->mc_use_5col; int screen_size; if(CROP_SCREENS) { b_height = FFMIN(avctx->height,C64YRES) >> 3; b_width = FFMIN(avctx->width ,C64XRES) >> 3; screen_size = b_width * b_height; } else { b_height = C64YRES >> 3; b_width = C64XRES >> 3; screen_size = 0x400; } /* no data, means end encoding asap */ if (!pict) { /* all done, end encoding */ if (!c->mc_lifetime) return 0; /* no more frames in queue, prepare to flush remaining frames */ if (!c->mc_frame_counter) { c->mc_lifetime = 0; } /* still frames in queue so limit lifetime to remaining frames */ else c->mc_lifetime = c->mc_frame_counter; /* still new data available */ } else { /* fill up mc_meta_charset with data until lifetime exceeds */ if (c->mc_frame_counter < c->mc_lifetime) { *p = *pict; p->pict_type = AV_PICTURE_TYPE_I; p->key_frame = 1; to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter); c->mc_frame_counter++; if (c->next_pts == AV_NOPTS_VALUE) c->next_pts = pict->pts; /* lifetime is not reached so wait for next frame first */ return 0; } } /* lifetime reached so now convert X frames at once */ if (c->mc_frame_counter == c->mc_lifetime) { req_size = 0; /* any frames to encode? */ if (c->mc_lifetime) { req_size = charset_size + c->mc_lifetime*(screen_size + colram_size); if ((ret = ff_alloc_packet(pkt, req_size)) < 0) { av_log(avctx, AV_LOG_ERROR, \"Error getting output packet of size %d.\\n\", req_size); return ret; } buf = pkt->data; /* calc optimal new charset + charmaps */ ret = ff_init_elbg(meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx); if (ret < 0) return ret; ret = ff_do_elbg(meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx); if (ret < 0) return ret; /* create colorram map and a c64 readable charset */ render_charset(avctx, charset, colram); /* copy charset to buf */ memcpy(buf, charset, charset_size); /* advance pointers */ buf += charset_size; charset += charset_size; } /* write x frames to buf */ for (frame = 0; frame < c->mc_lifetime; frame++) { /* copy charmap to buf. buf is uchar*, charmap is int*, so no memcpy here, sorry */ for (y = 0; y < b_height; y++) { for (x = 0; x < b_width; x++) { buf[y * b_width + x] = charmap[y * b_width + x]; } } /* advance pointers */ buf += screen_size; req_size += screen_size; /* compress and copy colram to buf */ if (c->mc_use_5col) { a64_compress_colram(buf, charmap, colram); /* advance pointers */ buf += colram_size; req_size += colram_size; } /* advance to next charmap */ charmap += 1000; } AV_WB32(avctx->extradata + 4, c->mc_frame_counter); AV_WB32(avctx->extradata + 8, charset_size); AV_WB32(avctx->extradata + 12, screen_size + colram_size); /* reset counter */ c->mc_frame_counter = 0; pkt->pts = pkt->dts = c->next_pts; c->next_pts = AV_NOPTS_VALUE; pkt->size = req_size; pkt->flags |= AV_PKT_FLAG_KEY; *got_packet = !!req_size; } return 0; }"} {"target": 1, "idx": 1750, "func": "static void vm_completion(ReadLineState *rs, const char *str) { size_t len; BlockDriverState *bs; BdrvNextIterator *it = NULL; len = strlen(str); readline_set_completion_index(rs, len); while ((it = bdrv_next(it, &bs))) { SnapshotInfoList *snapshots, *snapshot; AioContext *ctx = bdrv_get_aio_context(bs); bool ok = false; aio_context_acquire(ctx); if (bdrv_can_snapshot(bs)) { ok = bdrv_query_snapshot_info_list(bs, &snapshots, NULL) == 0; } aio_context_release(ctx); if (!ok) { continue; } snapshot = snapshots; while (snapshot) { char *completion = snapshot->value->name; if (!strncmp(str, completion, len)) { readline_add_completion(rs, completion); } completion = snapshot->value->id; if (!strncmp(str, completion, len)) { readline_add_completion(rs, completion); } snapshot = snapshot->next; } qapi_free_SnapshotInfoList(snapshots); } }"} {"target": 1, "idx": 1776, "func": "static int mpegps_read_packet(AVFormatContext *s, AVPacket *pkt) { AVStream *st; int len, startcode, i, type, codec_id; int64_t pts, dts; redo: len = mpegps_read_pes_header(s, NULL, &startcode, &pts, &dts, 1); if (len < 0) return len; /* now find stream */ for(i=0;inb_streams;i++) { st = s->streams[i]; if (st->id == startcode) goto found; } if (startcode >= 0x1e0 && startcode <= 0x1ef) { type = CODEC_TYPE_VIDEO; codec_id = CODEC_ID_MPEG1VIDEO; } else if (startcode >= 0x1c0 && startcode <= 0x1df) { type = CODEC_TYPE_AUDIO; codec_id = CODEC_ID_MP2; } else if (startcode >= 0x80 && startcode <= 0x9f) { type = CODEC_TYPE_AUDIO; codec_id = CODEC_ID_AC3; } else if (startcode >= 0xa0 && startcode <= 0xbf) { type = CODEC_TYPE_AUDIO; codec_id = CODEC_ID_PCM_S16BE; } else { skip: /* skip packet */ url_fskip(&s->pb, len); goto redo; } /* no stream found: add a new stream */ st = av_new_stream(s, startcode); if (!st) goto skip; st->codec.codec_type = type; st->codec.codec_id = codec_id; if (codec_id != CODEC_ID_PCM_S16BE) st->need_parsing = 1; found: if (startcode >= 0xa0 && startcode <= 0xbf) { int b1, freq; static const int lpcm_freq_tab[4] = { 48000, 96000, 44100, 32000 }; /* for LPCM, we just skip the header and consider it is raw audio data */ if (len <= 3) goto skip; get_byte(&s->pb); /* emphasis (1), muse(1), reserved(1), frame number(5) */ b1 = get_byte(&s->pb); /* quant (2), freq(2), reserved(1), channels(3) */ get_byte(&s->pb); /* dynamic range control (0x80 = off) */ len -= 3; freq = (b1 >> 4) & 3; st->codec.sample_rate = lpcm_freq_tab[freq]; st->codec.channels = 1 + (b1 & 7); st->codec.bit_rate = st->codec.channels * st->codec.sample_rate * 2; } av_new_packet(pkt, len); get_buffer(&s->pb, pkt->data, pkt->size); pkt->pts = pts; pkt->dts = dts; pkt->stream_index = st->index; #if 0 printf(\"%d: pts=%0.3f dts=%0.3f\\n\", pkt->stream_index, pkt->pts / 90000.0, pkt->dts / 90000.0); #endif return 0; }"} {"target": 1, "idx": 1787, "func": "static int check_for_evdev(void) { SDL_SysWMinfo info; XkbDescPtr desc; int has_evdev = 0; const char *keycodes; SDL_VERSION(&info.version); if (!SDL_GetWMInfo(&info)) return 0; desc = XkbGetKeyboard(info.info.x11.display, XkbGBN_AllComponentsMask, XkbUseCoreKbd); if (desc == NULL || desc->names == NULL) return 0; keycodes = XGetAtomName(info.info.x11.display, desc->names->keycodes); if (keycodes == NULL) fprintf(stderr, \"could not lookup keycode name\\n\"); else if (strstart(keycodes, \"evdev\", NULL)) has_evdev = 1; else if (!strstart(keycodes, \"xfree86\", NULL)) fprintf(stderr, \"unknown keycodes `%s', please report to qemu-devel@nongnu.org\\n\", keycodes); XkbFreeClientMap(desc, XkbGBN_AllComponentsMask, True); return has_evdev; }"} {"target": 1, "idx": 1798, "func": "static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr, hwaddr addr, uint64_t *value, unsigned size, unsigned shift, uint64_t mask, MemTxAttrs attrs) { uint64_t tmp; tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr); if (mr->subpage) { trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size); } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) { hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size); } *value |= (tmp & mask) << shift; return MEMTX_OK; }"} {"target": 1, "idx": 1800, "func": "static int flic_decode_frame_8BPP(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size) { FlicDecodeContext *s = avctx->priv_data; int stream_ptr = 0; int pixel_ptr; int palette_ptr; unsigned char palette_idx1; unsigned char palette_idx2; unsigned int frame_size; int num_chunks; unsigned int chunk_size; int chunk_type; int i, j; int color_packets; int color_changes; int color_shift; unsigned char r, g, b; int lines; int compressed_lines; int starting_line; signed short line_packets; int y_ptr; int byte_run; int pixel_skip; int pixel_countdown; unsigned char *pixels; unsigned int pixel_limit; s->frame.reference = 3; s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if (avctx->reget_buffer(avctx, &s->frame) < 0) { av_log(avctx, AV_LOG_ERROR, \"reget_buffer() failed\\n\"); return -1; } pixels = s->frame.data[0]; pixel_limit = s->avctx->height * s->frame.linesize[0]; if (buf_size < 16 || buf_size > INT_MAX - (3 * 256 + FF_INPUT_BUFFER_PADDING_SIZE)) return AVERROR_INVALIDDATA; frame_size = AV_RL32(&buf[stream_ptr]); if (frame_size > buf_size) frame_size = buf_size; stream_ptr += 6; /* skip the magic number */ num_chunks = AV_RL16(&buf[stream_ptr]); stream_ptr += 10; /* skip padding */ frame_size -= 16; /* iterate through the chunks */ while ((frame_size >= 6) && (num_chunks > 0)) { int stream_ptr_after_chunk; chunk_size = AV_RL32(&buf[stream_ptr]); if (chunk_size > frame_size) { av_log(avctx, AV_LOG_WARNING, \"Invalid chunk_size = %u > frame_size = %u\\n\", chunk_size, frame_size); chunk_size = frame_size; } stream_ptr_after_chunk = stream_ptr + chunk_size; stream_ptr += 4; chunk_type = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; switch (chunk_type) { case FLI_256_COLOR: case FLI_COLOR: /* check special case: If this file is from the Magic Carpet * game and uses 6-bit colors even though it reports 256-color * chunks in a 0xAF12-type file (fli_type is set to 0xAF13 during * initialization) */ if ((chunk_type == FLI_256_COLOR) && (s->fli_type != FLC_MAGIC_CARPET_SYNTHETIC_TYPE_CODE)) color_shift = 0; else color_shift = 2; /* set up the palette */ color_packets = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; palette_ptr = 0; for (i = 0; i < color_packets; i++) { /* first byte is how many colors to skip */ palette_ptr += buf[stream_ptr++]; /* next byte indicates how many entries to change */ color_changes = buf[stream_ptr++]; /* if there are 0 color changes, there are actually 256 */ if (color_changes == 0) color_changes = 256; if (stream_ptr + color_changes * 3 > stream_ptr_after_chunk) break; for (j = 0; j < color_changes; j++) { unsigned int entry; /* wrap around, for good measure */ if ((unsigned)palette_ptr >= 256) palette_ptr = 0; r = buf[stream_ptr++] << color_shift; g = buf[stream_ptr++] << color_shift; b = buf[stream_ptr++] << color_shift; entry = 0xFF << 24 | r << 16 | g << 8 | b; if (color_shift == 2) entry |= entry >> 6 & 0x30303; if (s->palette[palette_ptr] != entry) s->new_palette = 1; s->palette[palette_ptr++] = entry; } } break; case FLI_DELTA: y_ptr = 0; compressed_lines = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; while (compressed_lines > 0) { if (stream_ptr + 2 > stream_ptr_after_chunk) break; line_packets = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; if ((line_packets & 0xC000) == 0xC000) { // line skip opcode line_packets = -line_packets; y_ptr += line_packets * s->frame.linesize[0]; } else if ((line_packets & 0xC000) == 0x4000) { av_log(avctx, AV_LOG_ERROR, \"Undefined opcode (%x) in DELTA_FLI\\n\", line_packets); } else if ((line_packets & 0xC000) == 0x8000) { // \"last byte\" opcode pixel_ptr= y_ptr + s->frame.linesize[0] - 1; CHECK_PIXEL_PTR(0); pixels[pixel_ptr] = line_packets & 0xff; } else { compressed_lines--; pixel_ptr = y_ptr; CHECK_PIXEL_PTR(0); pixel_countdown = s->avctx->width; for (i = 0; i < line_packets; i++) { if (stream_ptr + 2 > stream_ptr_after_chunk) break; /* account for the skip bytes */ pixel_skip = buf[stream_ptr++]; pixel_ptr += pixel_skip; pixel_countdown -= pixel_skip; byte_run = (signed char)(buf[stream_ptr++]); if (byte_run < 0) { byte_run = -byte_run; palette_idx1 = buf[stream_ptr++]; palette_idx2 = buf[stream_ptr++]; CHECK_PIXEL_PTR(byte_run * 2); for (j = 0; j < byte_run; j++, pixel_countdown -= 2) { pixels[pixel_ptr++] = palette_idx1; pixels[pixel_ptr++] = palette_idx2; } } else { CHECK_PIXEL_PTR(byte_run * 2); if (stream_ptr + byte_run * 2 > stream_ptr_after_chunk) break; for (j = 0; j < byte_run * 2; j++, pixel_countdown--) { palette_idx1 = buf[stream_ptr++]; pixels[pixel_ptr++] = palette_idx1; } } } y_ptr += s->frame.linesize[0]; } } break; case FLI_LC: /* line compressed */ starting_line = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; y_ptr = 0; y_ptr += starting_line * s->frame.linesize[0]; compressed_lines = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; while (compressed_lines > 0) { pixel_ptr = y_ptr; CHECK_PIXEL_PTR(0); pixel_countdown = s->avctx->width; line_packets = buf[stream_ptr++]; if (stream_ptr + 2 * line_packets > stream_ptr_after_chunk) break; if (line_packets > 0) { for (i = 0; i < line_packets; i++) { /* account for the skip bytes */ pixel_skip = buf[stream_ptr++]; pixel_ptr += pixel_skip; pixel_countdown -= pixel_skip; byte_run = (signed char)(buf[stream_ptr++]); if (byte_run > 0) { CHECK_PIXEL_PTR(byte_run); if (stream_ptr + byte_run > stream_ptr_after_chunk) break; for (j = 0; j < byte_run; j++, pixel_countdown--) { palette_idx1 = buf[stream_ptr++]; pixels[pixel_ptr++] = palette_idx1; } } else if (byte_run < 0) { byte_run = -byte_run; palette_idx1 = buf[stream_ptr++]; CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++, pixel_countdown--) { pixels[pixel_ptr++] = palette_idx1; } } } } y_ptr += s->frame.linesize[0]; compressed_lines--; } break; case FLI_BLACK: /* set the whole frame to color 0 (which is usually black) */ memset(pixels, 0, s->frame.linesize[0] * s->avctx->height); break; case FLI_BRUN: /* Byte run compression: This chunk type only occurs in the first * FLI frame and it will update the entire frame. */ y_ptr = 0; for (lines = 0; lines < s->avctx->height; lines++) { pixel_ptr = y_ptr; /* disregard the line packets; instead, iterate through all * pixels on a row */ stream_ptr++; pixel_countdown = s->avctx->width; while (pixel_countdown > 0) { if (stream_ptr + 1 > stream_ptr_after_chunk) break; byte_run = (signed char)(buf[stream_ptr++]); if (byte_run > 0) { palette_idx1 = buf[stream_ptr++]; CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++) { pixels[pixel_ptr++] = palette_idx1; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, \"pixel_countdown < 0 (%d) at line %d\\n\", pixel_countdown, lines); } } else { /* copy bytes if byte_run < 0 */ byte_run = -byte_run; CHECK_PIXEL_PTR(byte_run); if (stream_ptr + byte_run > stream_ptr_after_chunk) break; for (j = 0; j < byte_run; j++) { palette_idx1 = buf[stream_ptr++]; pixels[pixel_ptr++] = palette_idx1; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, \"pixel_countdown < 0 (%d) at line %d\\n\", pixel_countdown, lines); } } } y_ptr += s->frame.linesize[0]; } break; case FLI_COPY: /* copy the chunk (uncompressed frame) */ if (chunk_size - 6 != s->avctx->width * s->avctx->height) { av_log(avctx, AV_LOG_ERROR, \"In chunk FLI_COPY : source data (%d bytes) \" \\ \"has incorrect size, skipping chunk\\n\", chunk_size - 6); } else { for (y_ptr = 0; y_ptr < s->frame.linesize[0] * s->avctx->height; y_ptr += s->frame.linesize[0]) { memcpy(&pixels[y_ptr], &buf[stream_ptr], s->avctx->width); stream_ptr += s->avctx->width; } } break; case FLI_MINI: /* some sort of a thumbnail? disregard this chunk... */ break; default: av_log(avctx, AV_LOG_ERROR, \"Unrecognized chunk type: %d\\n\", chunk_type); break; } stream_ptr = stream_ptr_after_chunk; frame_size -= chunk_size; num_chunks--; } /* by the end of the chunk, the stream ptr should equal the frame * size (minus 1, possibly); if it doesn't, issue a warning */ if ((stream_ptr != buf_size) && (stream_ptr != buf_size - 1)) av_log(avctx, AV_LOG_ERROR, \"Processed FLI chunk where chunk size = %d \" \\ \"and final chunk ptr = %d\\n\", buf_size, stream_ptr); /* make the palette available on the way out */ memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE); if (s->new_palette) { s->frame.palette_has_changed = 1; s->new_palette = 0; } *data_size=sizeof(AVFrame); *(AVFrame*)data = s->frame; return buf_size; }"} {"target": 1, "idx": 1826, "func": "static int dcstr_read_header(AVFormatContext *s) { unsigned codec, align; AVStream *st; st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->channels = avio_rl32(s->pb); st->codecpar->sample_rate = avio_rl32(s->pb); codec = avio_rl32(s->pb); align = avio_rl32(s->pb); avio_skip(s->pb, 4); st->duration = avio_rl32(s->pb); st->codecpar->channels *= avio_rl32(s->pb); if (!align || align > INT_MAX / st->codecpar->channels) return AVERROR_INVALIDDATA; st->codecpar->block_align = align * st->codecpar->channels; switch (codec) { case 4: st->codecpar->codec_id = AV_CODEC_ID_ADPCM_AICA; break; case 16: st->codecpar->codec_id = AV_CODEC_ID_PCM_S16LE_PLANAR; break; default: avpriv_request_sample(s, \"codec %X\", codec); return AVERROR_PATCHWELCOME; } avio_skip(s->pb, 0x800 - avio_tell(s->pb)); avpriv_set_pts_info(st, 64, 1, st->codecpar->sample_rate); return 0; }"} {"target": 1, "idx": 1830, "func": "int ff_oss_audio_open(AVFormatContext *s1, int is_output, const char *audio_device) { OSSAudioData *s = s1->priv_data; int audio_fd; int tmp, err; char *flip = getenv(\"AUDIO_FLIP_LEFT\"); if (is_output) audio_fd = avpriv_open(audio_device, O_WRONLY); else audio_fd = avpriv_open(audio_device, O_RDONLY); if (audio_fd < 0) { av_log(s1, AV_LOG_ERROR, \"%s: %s\\n\", audio_device, strerror(errno)); return AVERROR(EIO); } if (flip && *flip == '1') { s->flip_left = 1; } /* non blocking mode */ if (!is_output) fcntl(audio_fd, F_SETFL, O_NONBLOCK); s->frame_size = OSS_AUDIO_BLOCK_SIZE; /* select format : favour native format */ err = ioctl(audio_fd, SNDCTL_DSP_GETFMTS, &tmp); #if HAVE_BIGENDIAN if (tmp & AFMT_S16_BE) { tmp = AFMT_S16_BE; } else if (tmp & AFMT_S16_LE) { tmp = AFMT_S16_LE; } else { tmp = 0; } #else if (tmp & AFMT_S16_LE) { tmp = AFMT_S16_LE; } else if (tmp & AFMT_S16_BE) { tmp = AFMT_S16_BE; } else { tmp = 0; } #endif switch(tmp) { case AFMT_S16_LE: s->codec_id = AV_CODEC_ID_PCM_S16LE; break; case AFMT_S16_BE: s->codec_id = AV_CODEC_ID_PCM_S16BE; break; default: av_log(s1, AV_LOG_ERROR, \"Soundcard does not support 16 bit sample format\\n\"); close(audio_fd); return AVERROR(EIO); } err=ioctl(audio_fd, SNDCTL_DSP_SETFMT, &tmp); if (err < 0) { av_log(s1, AV_LOG_ERROR, \"SNDCTL_DSP_SETFMT: %s\\n\", strerror(errno)); goto fail; } tmp = (s->channels == 2); err = ioctl(audio_fd, SNDCTL_DSP_STEREO, &tmp); if (err < 0) { av_log(s1, AV_LOG_ERROR, \"SNDCTL_DSP_STEREO: %s\\n\", strerror(errno)); goto fail; } tmp = s->sample_rate; err = ioctl(audio_fd, SNDCTL_DSP_SPEED, &tmp); if (err < 0) { av_log(s1, AV_LOG_ERROR, \"SNDCTL_DSP_SPEED: %s\\n\", strerror(errno)); goto fail; } s->sample_rate = tmp; /* store real sample rate */ s->fd = audio_fd; return 0; fail: close(audio_fd); return AVERROR(EIO); }"} {"target": 0, "idx": 1836, "func": "static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t* dst[], int dstStride[]) { /* load a few things into local vars to make the code more readable? and faster */ const int srcW= c->srcW; const int dstW= c->dstW; const int dstH= c->dstH; const int chrDstW= c->chrDstW; const int chrSrcW= c->chrSrcW; const int lumXInc= c->lumXInc; const int chrXInc= c->chrXInc; const enum PixelFormat dstFormat= c->dstFormat; const enum PixelFormat srcFormat= c->srcFormat; const int flags= c->flags; int16_t *vLumFilterPos= c->vLumFilterPos; int16_t *vChrFilterPos= c->vChrFilterPos; int16_t *hLumFilterPos= c->hLumFilterPos; int16_t *hChrFilterPos= c->hChrFilterPos; int16_t *vLumFilter= c->vLumFilter; int16_t *vChrFilter= c->vChrFilter; int16_t *hLumFilter= c->hLumFilter; int16_t *hChrFilter= c->hChrFilter; int32_t *lumMmxFilter= c->lumMmxFilter; int32_t *chrMmxFilter= c->chrMmxFilter; int32_t *alpMmxFilter= c->alpMmxFilter; const int vLumFilterSize= c->vLumFilterSize; const int vChrFilterSize= c->vChrFilterSize; const int hLumFilterSize= c->hLumFilterSize; const int hChrFilterSize= c->hChrFilterSize; int16_t **lumPixBuf= c->lumPixBuf; int16_t **chrPixBuf= c->chrPixBuf; int16_t **alpPixBuf= c->alpPixBuf; const int vLumBufSize= c->vLumBufSize; const int vChrBufSize= c->vChrBufSize; uint8_t *formatConvBuffer= c->formatConvBuffer; const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample; const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample); int lastDstY; uint32_t *pal=c->pal_yuv; /* vars which will change and which we need to store back in the context */ int dstY= c->dstY; int lumBufIndex= c->lumBufIndex; int chrBufIndex= c->chrBufIndex; int lastInLumBuf= c->lastInLumBuf; int lastInChrBuf= c->lastInChrBuf; if (isPacked(c->srcFormat)) { src[0]= src[1]= src[2]= src[3]= src[0]; srcStride[0]= srcStride[1]= srcStride[2]= srcStride[3]= srcStride[0]; } srcStride[1]<<= c->vChrDrop; srcStride[2]<<= c->vChrDrop; DEBUG_BUFFERS(\"swScale() %p[%d] %p[%d] %p[%d] %p[%d] -> %p[%d] %p[%d] %p[%d] %p[%d]\\n\", src[0], srcStride[0], src[1], srcStride[1], src[2], srcStride[2], src[3], srcStride[3], dst[0], dstStride[0], dst[1], dstStride[1], dst[2], dstStride[2], dst[3], dstStride[3]); DEBUG_BUFFERS(\"srcSliceY: %d srcSliceH: %d dstY: %d dstH: %d\\n\", srcSliceY, srcSliceH, dstY, dstH); DEBUG_BUFFERS(\"vLumFilterSize: %d vLumBufSize: %d vChrFilterSize: %d vChrBufSize: %d\\n\", vLumFilterSize, vLumBufSize, vChrFilterSize, vChrBufSize); if (dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0 || dstStride[3]%8 != 0) { static int warnedAlready=0; //FIXME move this into the context perhaps if (flags & SWS_PRINT_INFO && !warnedAlready) { av_log(c, AV_LOG_WARNING, \"Warning: dstStride is not aligned!\\n\" \" ->cannot do aligned memory accesses anymore\\n\"); warnedAlready=1; } } /* Note the user might start scaling the picture in the middle so this will not get executed. This is not really intended but works currently, so people might do it. */ if (srcSliceY ==0) { lumBufIndex=0; chrBufIndex=0; dstY=0; lastInLumBuf= -1; lastInChrBuf= -1; } lastDstY= dstY; for (;dstY < dstH; dstY++) { unsigned char *dest =dst[0]+dstStride[0]*dstY; const int chrDstY= dstY>>c->chrDstVSubSample; unsigned char *uDest=dst[1]+dstStride[1]*chrDstY; unsigned char *vDest=dst[2]+dstStride[2]*chrDstY; unsigned char *aDest=(CONFIG_SWSCALE_ALPHA && alpPixBuf) ? dst[3]+dstStride[3]*dstY : NULL; const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input int enough_lines; //handle holes (FAST_BILINEAR & weird filters) if (firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1; if (firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1; assert(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1); assert(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1); // Do we have enough lines in this slice to output the dstY line enough_lines = lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample); if (!enough_lines) { lastLumSrcY = srcSliceY + srcSliceH - 1; lastChrSrcY = chrSrcSliceY + chrSrcSliceH - 1; } DEBUG_BUFFERS(\"dstY: %d\\n\", dstY); DEBUG_BUFFERS(\"\\tfirstLumSrcY: %d lastLumSrcY: %d lastInLumBuf: %d\\n\", firstLumSrcY, lastLumSrcY, lastInLumBuf); DEBUG_BUFFERS(\"\\tfirstChrSrcY: %d lastChrSrcY: %d lastInChrBuf: %d\\n\", firstChrSrcY, lastChrSrcY, lastInChrBuf); //Do horizontal scaling while(lastInLumBuf < lastLumSrcY) { uint8_t *src1= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0]; uint8_t *src2= src[3]+(lastInLumBuf + 1 - srcSliceY)*srcStride[3]; lumBufIndex++; DEBUG_BUFFERS(\"\\t\\tlumBufIndex %d: lastInLumBuf: %d\\n\", lumBufIndex, lastInLumBuf); assert(lumBufIndex < 2*vLumBufSize); assert(lastInLumBuf + 1 - srcSliceY < srcSliceH); assert(lastInLumBuf + 1 - srcSliceY >= 0); RENAME(hyscale)(c, lumPixBuf[ lumBufIndex ], dstW, src1, srcW, lumXInc, flags, hLumFilter, hLumFilterPos, hLumFilterSize, c->srcFormat, formatConvBuffer, pal, 0); if (CONFIG_SWSCALE_ALPHA && alpPixBuf) RENAME(hyscale)(c, alpPixBuf[ lumBufIndex ], dstW, src2, srcW, lumXInc, flags, hLumFilter, hLumFilterPos, hLumFilterSize, c->srcFormat, formatConvBuffer, pal, 1); lastInLumBuf++; } while(lastInChrBuf < lastChrSrcY) { uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1]; uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2]; chrBufIndex++; DEBUG_BUFFERS(\"\\t\\tchrBufIndex %d: lastInChrBuf: %d\\n\", chrBufIndex, lastInChrBuf); assert(chrBufIndex < 2*vChrBufSize); assert(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH)); assert(lastInChrBuf + 1 - chrSrcSliceY >= 0); //FIXME replace parameters through context struct (some at least) if (!(isGray(srcFormat) || isGray(dstFormat))) RENAME(hcscale)(c, chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc, flags, hChrFilter, hChrFilterPos, hChrFilterSize, c->srcFormat, formatConvBuffer, pal); lastInChrBuf++; } //wrap buf index around to stay inside the ring buffer if (lumBufIndex >= vLumBufSize) lumBufIndex-= vLumBufSize; if (chrBufIndex >= vChrBufSize) chrBufIndex-= vChrBufSize; if (!enough_lines) break; //we can't output a dstY line so let's try with the next slice #if COMPILE_TEMPLATE_MMX c->blueDither= ff_dither8[dstY&1]; if (c->dstFormat == PIX_FMT_RGB555 || c->dstFormat == PIX_FMT_BGR555) c->greenDither= ff_dither8[dstY&1]; else c->greenDither= ff_dither4[dstY&1]; c->redDither= ff_dither8[(dstY+1)&1]; #endif if (dstY < dstH-2) { const int16_t **lumSrcPtr= (const int16_t **) lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize; const int16_t **chrSrcPtr= (const int16_t **) chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize; const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? (const int16_t **) alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL; #if COMPILE_TEMPLATE_MMX int i; if (flags & SWS_ACCURATE_RND) { int s= APCK_SIZE / 8; for (i=0; i1)]; lumMmxFilter[s*i+APCK_COEF/4 ]= lumMmxFilter[s*i+APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize + i ] + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0); if (CONFIG_SWSCALE_ALPHA && alpPixBuf) { *(void**)&alpMmxFilter[s*i ]= alpSrcPtr[i ]; *(void**)&alpMmxFilter[s*i+APCK_PTR2/4 ]= alpSrcPtr[i+(vLumFilterSize>1)]; alpMmxFilter[s*i+APCK_COEF/4 ]= alpMmxFilter[s*i+APCK_COEF/4+1]= lumMmxFilter[s*i+APCK_COEF/4 ]; } } for (i=0; i1)]; chrMmxFilter[s*i+APCK_COEF/4 ]= chrMmxFilter[s*i+APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize + i ] + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0); } } else { for (i=0; i> 32; lumMmxFilter[4*i+2]= lumMmxFilter[4*i+3]= ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001; if (CONFIG_SWSCALE_ALPHA && alpPixBuf) { alpMmxFilter[4*i+0]= (int32_t)alpSrcPtr[i]; alpMmxFilter[4*i+1]= (uint64_t)alpSrcPtr[i] >> 32; alpMmxFilter[4*i+2]= alpMmxFilter[4*i+3]= lumMmxFilter[4*i+2]; } } for (i=0; i> 32; chrMmxFilter[4*i+2]= chrMmxFilter[4*i+3]= ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001; } } #endif if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21) { const int chrSkipMask= (1<chrDstVSubSample)-1; if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi c->yuv2nv12X(c, vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, dest, uDest, dstW, chrDstW, dstFormat); } else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) { //YV12 like const int chrSkipMask= (1<chrDstVSubSample)-1; if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi if (is16BPS(dstFormat)) { yuv2yuvX16inC( vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, alpSrcPtr, (uint16_t *) dest, (uint16_t *) uDest, (uint16_t *) vDest, (uint16_t *) aDest, dstW, chrDstW, dstFormat); } else if (vLumFilterSize == 1 && vChrFilterSize == 1) { // unscaled YV12 int16_t *lumBuf = lumSrcPtr[0]; int16_t *chrBuf= chrSrcPtr[0]; int16_t *alpBuf= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? alpSrcPtr[0] : NULL; c->yuv2yuv1(c, lumBuf, chrBuf, alpBuf, dest, uDest, vDest, aDest, dstW, chrDstW); } else { //General YV12 c->yuv2yuvX(c, vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, alpSrcPtr, dest, uDest, vDest, aDest, dstW, chrDstW); } } else { assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2); assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2); if (vLumFilterSize == 1 && vChrFilterSize == 2) { //unscaled RGB int chrAlpha= vChrFilter[2*dstY+1]; if(flags & SWS_FULL_CHR_H_INT) { yuv2rgbXinC_full(c, //FIXME write a packed1_full function vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, alpSrcPtr, dest, dstW, dstY); } else { c->yuv2packed1(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1), alpPixBuf ? *alpSrcPtr : NULL, dest, dstW, chrAlpha, dstFormat, flags, dstY); } } else if (vLumFilterSize == 2 && vChrFilterSize == 2) { //bilinear upscale RGB int lumAlpha= vLumFilter[2*dstY+1]; int chrAlpha= vChrFilter[2*dstY+1]; lumMmxFilter[2]= lumMmxFilter[3]= vLumFilter[2*dstY ]*0x10001; chrMmxFilter[2]= chrMmxFilter[3]= vChrFilter[2*chrDstY]*0x10001; if(flags & SWS_FULL_CHR_H_INT) { yuv2rgbXinC_full(c, //FIXME write a packed2_full function vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, alpSrcPtr, dest, dstW, dstY); } else { c->yuv2packed2(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1), alpPixBuf ? *alpSrcPtr : NULL, alpPixBuf ? *(alpSrcPtr+1) : NULL, dest, dstW, lumAlpha, chrAlpha, dstY); } } else { //general RGB if(flags & SWS_FULL_CHR_H_INT) { yuv2rgbXinC_full(c, vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, alpSrcPtr, dest, dstW, dstY); } else { c->yuv2packedX(c, vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, alpSrcPtr, dest, dstW, dstY); } } } } else { // hmm looks like we can't use MMX here without overwriting this array's tail const int16_t **lumSrcPtr= (const int16_t **)lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize; const int16_t **chrSrcPtr= (const int16_t **)chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize; const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? (const int16_t **)alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL; if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21) { const int chrSkipMask= (1<chrDstVSubSample)-1; if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi yuv2nv12XinC( vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, dest, uDest, dstW, chrDstW, dstFormat); } else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) { //YV12 const int chrSkipMask= (1<chrDstVSubSample)-1; if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi if (is16BPS(dstFormat)) { yuv2yuvX16inC( vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, alpSrcPtr, (uint16_t *) dest, (uint16_t *) uDest, (uint16_t *) vDest, (uint16_t *) aDest, dstW, chrDstW, dstFormat); } else { yuv2yuvXinC( vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, alpSrcPtr, dest, uDest, vDest, aDest, dstW, chrDstW); } } else { assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2); assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2); if(flags & SWS_FULL_CHR_H_INT) { yuv2rgbXinC_full(c, vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, alpSrcPtr, dest, dstW, dstY); } else { yuv2packedXinC(c, vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, alpSrcPtr, dest, dstW, dstY); } } } } if ((dstFormat == PIX_FMT_YUVA420P) && !alpPixBuf) fillPlane(dst[3], dstStride[3], dstW, dstY-lastDstY, lastDstY, 255); #if COMPILE_TEMPLATE_MMX if (flags & SWS_CPU_CAPS_MMX2 ) __asm__ volatile(\"sfence\":::\"memory\"); /* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */ if (flags & SWS_CPU_CAPS_3DNOW) __asm__ volatile(\"femms\" :::\"memory\"); else __asm__ volatile(\"emms\" :::\"memory\"); #endif /* store changed local vars back in the context */ c->dstY= dstY; c->lumBufIndex= lumBufIndex; c->chrBufIndex= chrBufIndex; c->lastInLumBuf= lastInLumBuf; c->lastInChrBuf= lastInChrBuf; return dstY - lastDstY; }"} {"target": 0, "idx": 1851, "func": "static uint32_t bonito_spciconf_readb(void *opaque, target_phys_addr_t addr) { PCIBonitoState *s = opaque; uint32_t pciaddr; uint16_t status; DPRINTF(\"bonito_spciconf_readb \"TARGET_FMT_plx\" \\n\", addr); pciaddr = bonito_sbridge_pciaddr(s, addr); if (pciaddr == 0xffffffff) { return 0xff; } /* set the pci address in s->config_reg */ s->pcihost->config_reg = (pciaddr) | (1u << 31); /* clear PCI_STATUS_REC_MASTER_ABORT and PCI_STATUS_REC_TARGET_ABORT */ status = pci_get_word(s->dev.config + PCI_STATUS); status &= ~(PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT); pci_set_word(s->dev.config + PCI_STATUS, status); return pci_data_read(s->pcihost->bus, s->pcihost->config_reg, 1); }"} {"target": 0, "idx": 1856, "func": "static void qdev_get_legacy_property(DeviceState *dev, Visitor *v, void *opaque, const char *name, Error **errp) { Property *prop = opaque; if (prop->info->print) { char buffer[1024]; char *ptr = buffer; prop->info->print(dev, prop, buffer, sizeof(buffer)); visit_type_str(v, &ptr, name, errp); } else { error_set(errp, QERR_PERMISSION_DENIED); } }"} {"target": 0, "idx": 1871, "func": "static char *value_string(char *buf, int buf_size, struct unit_value uv) { double vald; int show_float = 0; if (uv.unit == unit_second_str) { vald = uv.val.d; show_float = 1; } else { vald = uv.val.i; } if (uv.unit == unit_second_str && use_value_sexagesimal_format) { double secs; int hours, mins; secs = vald; mins = (int)secs / 60; secs = secs - mins * 60; hours = mins / 60; mins %= 60; snprintf(buf, buf_size, \"%d:%02d:%09.6f\", hours, mins, secs); } else { const char *prefix_string = \"\"; int l; if (use_value_prefix && vald > 1) { long long int index; if (uv.unit == unit_byte_str && use_byte_value_binary_prefix) { index = (long long int) (log2(vald)) / 10; index = av_clip(index, 0, FF_ARRAY_ELEMS(binary_unit_prefixes) - 1); vald /= exp2(index * 10); prefix_string = binary_unit_prefixes[index]; } else { index = (long long int) (log10(vald)) / 3; index = av_clip(index, 0, FF_ARRAY_ELEMS(decimal_unit_prefixes) - 1); vald /= pow(10, index * 3); prefix_string = decimal_unit_prefixes[index]; } } if (show_float || (use_value_prefix && vald != (long long int)vald)) l = snprintf(buf, buf_size, \"%f\", vald); else l = snprintf(buf, buf_size, \"%lld\", (long long int)vald); snprintf(buf+l, buf_size-l, \"%s%s%s\", *prefix_string || show_value_unit ? \" \" : \"\", prefix_string, show_value_unit ? uv.unit : \"\"); } return buf; }"} {"target": 1, "idx": 1872, "func": "static int mjpeg_decode_dht(MJpegDecodeContext *s) { int len, index, i, class, n, v, code_max; uint8_t bits_table[17]; uint8_t val_table[256]; len = get_bits(&s->gb, 16) - 2; while (len > 0) { if (len < 17) return -1; class = get_bits(&s->gb, 4); if (class >= 2) return -1; index = get_bits(&s->gb, 4); if (index >= 4) return -1; n = 0; for(i=1;i<=16;i++) { bits_table[i] = get_bits(&s->gb, 8); n += bits_table[i]; } len -= 17; if (len < n || n > 256) return -1; code_max = 0; for(i=0;igb, 8); if (v > code_max) code_max = v; val_table[i] = v; } len -= n; /* build VLC and flush previous vlc if present */ free_vlc(&s->vlcs[class][index]); dprintf(\"class=%d index=%d nb_codes=%d\\n\", class, index, code_max + 1); if(build_vlc(&s->vlcs[class][index], bits_table, val_table, code_max + 1) < 0){ return -1; } } return 0; }"} {"target": 1, "idx": 1882, "func": "static target_ulong disas_insn(CPUX86State *env, DisasContext *s, target_ulong pc_start) { int b, prefixes; int shift; TCGMemOp ot, aflag, dflag; int modrm, reg, rm, mod, op, opreg, val; target_ulong next_eip, tval; int rex_w, rex_r; s->pc_start = s->pc = pc_start; prefixes = 0; s->override = -1; rex_w = -1; rex_r = 0; #ifdef TARGET_X86_64 s->rex_x = 0; s->rex_b = 0; x86_64_hregs = 0; #endif s->rip_offset = 0; /* for relative ip address */ s->vex_l = 0; s->vex_v = 0; next_byte: /* x86 has an upper limit of 15 bytes for an instruction. Since we * do not want to decode and generate IR for an illegal * instruction, the following check limits the instruction size to * 25 bytes: 14 prefix + 1 opc + 6 (modrm+sib+ofs) + 4 imm */ if (s->pc - pc_start > 14) { goto illegal_op; b = cpu_ldub_code(env, s->pc); s->pc++; /* Collect prefixes. */ switch (b) { case 0xf3: prefixes |= PREFIX_REPZ; goto next_byte; case 0xf2: prefixes |= PREFIX_REPNZ; goto next_byte; case 0xf0: prefixes |= PREFIX_LOCK; goto next_byte; case 0x2e: s->override = R_CS; goto next_byte; case 0x36: s->override = R_SS; goto next_byte; case 0x3e: s->override = R_DS; goto next_byte; case 0x26: s->override = R_ES; goto next_byte; case 0x64: s->override = R_FS; goto next_byte; case 0x65: s->override = R_GS; goto next_byte; case 0x66: prefixes |= PREFIX_DATA; goto next_byte; case 0x67: prefixes |= PREFIX_ADR; goto next_byte; #ifdef TARGET_X86_64 case 0x40 ... 0x4f: if (CODE64(s)) { /* REX prefix */ rex_w = (b >> 3) & 1; rex_r = (b & 0x4) << 1; s->rex_x = (b & 0x2) << 2; REX_B(s) = (b & 0x1) << 3; x86_64_hregs = 1; /* select uniform byte register addressing */ goto next_byte; break; #endif case 0xc5: /* 2-byte VEX */ case 0xc4: /* 3-byte VEX */ /* VEX prefixes cannot be used except in 32-bit mode. Otherwise the instruction is LES or LDS. */ if (s->code32 && !s->vm86) { static const int pp_prefix[4] = { 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ }; int vex3, vex2 = cpu_ldub_code(env, s->pc); if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) { /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b, otherwise the instruction is LES or LDS. */ break; s->pc++; /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */ if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ | PREFIX_LOCK | PREFIX_DATA)) { goto illegal_op; #ifdef TARGET_X86_64 if (x86_64_hregs) { goto illegal_op; #endif rex_r = (~vex2 >> 4) & 8; if (b == 0xc5) { vex3 = vex2; b = cpu_ldub_code(env, s->pc++); } else { #ifdef TARGET_X86_64 s->rex_x = (~vex2 >> 3) & 8; s->rex_b = (~vex2 >> 2) & 8; #endif vex3 = cpu_ldub_code(env, s->pc++); rex_w = (vex3 >> 7) & 1; switch (vex2 & 0x1f) { case 0x01: /* Implied 0f leading opcode bytes. */ b = cpu_ldub_code(env, s->pc++) | 0x100; break; case 0x02: /* Implied 0f 38 leading opcode bytes. */ b = 0x138; break; case 0x03: /* Implied 0f 3a leading opcode bytes. */ b = 0x13a; break; default: /* Reserved for future use. */ goto unknown_op; s->vex_v = (~vex3 >> 3) & 0xf; s->vex_l = (vex3 >> 2) & 1; prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX; break; /* Post-process prefixes. */ if (CODE64(s)) { /* In 64-bit mode, the default data size is 32-bit. Select 64-bit data with rex_w, and 16-bit data with 0x66; rex_w takes precedence over 0x66 if both are present. */ dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32); /* In 64-bit mode, 0x67 selects 32-bit addressing. */ aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64); } else { /* In 16/32-bit mode, 0x66 selects the opposite data size. */ if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) { dflag = MO_32; } else { dflag = MO_16; /* In 16/32-bit mode, 0x67 selects the opposite addressing. */ if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) { aflag = MO_32; } else { aflag = MO_16; s->prefix = prefixes; s->aflag = aflag; s->dflag = dflag; /* now check op code */ reswitch: switch(b) { case 0x0f: /**************************/ /* extended op code */ b = cpu_ldub_code(env, s->pc++) | 0x100; goto reswitch; /**************************/ /* arith & logic */ case 0x00 ... 0x05: case 0x08 ... 0x0d: case 0x10 ... 0x15: case 0x18 ... 0x1d: case 0x20 ... 0x25: case 0x28 ... 0x2d: case 0x30 ... 0x35: case 0x38 ... 0x3d: { int op, f, val; op = (b >> 3) & 7; f = (b >> 1) & 3; ot = mo_b_d(b, dflag); switch(f) { case 0: /* OP Ev, Gv */ modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod != 3) { gen_lea_modrm(env, s, modrm); opreg = OR_TMP0; } else if (op == OP_XORL && rm == reg) { xor_zero: /* xor reg, reg optimisation */ set_cc_op(s, CC_OP_CLR); tcg_gen_movi_tl(cpu_T0, 0); gen_op_mov_reg_v(ot, reg, cpu_T0); break; } else { opreg = rm; gen_op_mov_v_reg(ot, cpu_T1, reg); gen_op(s, op, ot, opreg); break; case 1: /* OP Gv, Ev */ modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; reg = ((modrm >> 3) & 7) | rex_r; rm = (modrm & 7) | REX_B(s); if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, ot, cpu_T1, cpu_A0); } else if (op == OP_XORL && rm == reg) { goto xor_zero; } else { gen_op_mov_v_reg(ot, cpu_T1, rm); gen_op(s, op, ot, reg); break; case 2: /* OP A, Iv */ val = insn_get(env, s, ot); tcg_gen_movi_tl(cpu_T1, val); gen_op(s, op, ot, OR_EAX); break; break; case 0x82: if (CODE64(s)) goto illegal_op; case 0x80: /* GRP1 */ case 0x81: case 0x83: { int val; ot = mo_b_d(b, dflag); modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); op = (modrm >> 3) & 7; if (mod != 3) { if (b == 0x83) s->rip_offset = 1; else s->rip_offset = insn_const_size(ot); gen_lea_modrm(env, s, modrm); opreg = OR_TMP0; } else { opreg = rm; switch(b) { default: case 0x80: case 0x81: case 0x82: val = insn_get(env, s, ot); break; case 0x83: val = (int8_t)insn_get(env, s, MO_8); break; tcg_gen_movi_tl(cpu_T1, val); gen_op(s, op, ot, opreg); break; /**************************/ /* inc, dec, and other misc arith */ case 0x40 ... 0x47: /* inc Gv */ ot = dflag; gen_inc(s, ot, OR_EAX + (b & 7), 1); break; case 0x48 ... 0x4f: /* dec Gv */ ot = dflag; gen_inc(s, ot, OR_EAX + (b & 7), -1); break; case 0xf6: /* GRP3 */ case 0xf7: ot = mo_b_d(b, dflag); modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); op = (modrm >> 3) & 7; if (mod != 3) { if (op == 0) { s->rip_offset = insn_const_size(ot); gen_lea_modrm(env, s, modrm); /* For those below that handle locked memory, don't load here. */ if (!(s->prefix & PREFIX_LOCK) || op != 2) { gen_op_ld_v(s, ot, cpu_T0, cpu_A0); } else { gen_op_mov_v_reg(ot, cpu_T0, rm); switch(op) { case 0: /* test */ val = insn_get(env, s, ot); tcg_gen_movi_tl(cpu_T1, val); gen_op_testl_T0_T1_cc(); set_cc_op(s, CC_OP_LOGICB + ot); break; case 2: /* not */ if (s->prefix & PREFIX_LOCK) { if (mod == 3) { goto illegal_op; tcg_gen_movi_tl(cpu_T0, ~0); tcg_gen_atomic_xor_fetch_tl(cpu_T0, cpu_A0, cpu_T0, s->mem_index, ot | MO_LE); } else { tcg_gen_not_tl(cpu_T0, cpu_T0); if (mod != 3) { gen_op_st_v(s, ot, cpu_T0, cpu_A0); } else { gen_op_mov_reg_v(ot, rm, cpu_T0); break; case 3: /* neg */ if (s->prefix & PREFIX_LOCK) { TCGLabel *label1; TCGv a0, t0, t1, t2; if (mod == 3) { goto illegal_op; a0 = tcg_temp_local_new(); t0 = tcg_temp_local_new(); label1 = gen_new_label(); tcg_gen_mov_tl(a0, cpu_A0); tcg_gen_mov_tl(t0, cpu_T0); gen_set_label(label1); t1 = tcg_temp_new(); t2 = tcg_temp_new(); tcg_gen_mov_tl(t2, t0); tcg_gen_neg_tl(t1, t0); tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1, s->mem_index, ot | MO_LE); tcg_temp_free(t1); tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1); tcg_temp_free(t2); tcg_temp_free(a0); tcg_gen_mov_tl(cpu_T0, t0); tcg_temp_free(t0); } else { tcg_gen_neg_tl(cpu_T0, cpu_T0); if (mod != 3) { gen_op_st_v(s, ot, cpu_T0, cpu_A0); } else { gen_op_mov_reg_v(ot, rm, cpu_T0); gen_op_update_neg_cc(); set_cc_op(s, CC_OP_SUBB + ot); break; case 4: /* mul */ switch(ot) { case MO_8: gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX); tcg_gen_ext8u_tl(cpu_T0, cpu_T0); tcg_gen_ext8u_tl(cpu_T1, cpu_T1); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1); gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0); tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); tcg_gen_andi_tl(cpu_cc_src, cpu_T0, 0xff00); set_cc_op(s, CC_OP_MULB); break; case MO_16: gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX); tcg_gen_ext16u_tl(cpu_T0, cpu_T0); tcg_gen_ext16u_tl(cpu_T1, cpu_T1); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1); gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0); tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); tcg_gen_shri_tl(cpu_T0, cpu_T0, 16); gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0); tcg_gen_mov_tl(cpu_cc_src, cpu_T0); set_cc_op(s, CC_OP_MULW); break; default: case MO_32: tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]); tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32, cpu_tmp2_i32, cpu_tmp3_i32); tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32); tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]); set_cc_op(s, CC_OP_MULL); break; #ifdef TARGET_X86_64 case MO_64: tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_T0, cpu_regs[R_EAX]); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]); set_cc_op(s, CC_OP_MULQ); break; #endif break; case 5: /* imul */ switch(ot) { case MO_8: gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX); tcg_gen_ext8s_tl(cpu_T0, cpu_T0); tcg_gen_ext8s_tl(cpu_T1, cpu_T1); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1); gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0); tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); tcg_gen_ext8s_tl(cpu_tmp0, cpu_T0); tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0); set_cc_op(s, CC_OP_MULB); break; case MO_16: gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX); tcg_gen_ext16s_tl(cpu_T0, cpu_T0); tcg_gen_ext16s_tl(cpu_T1, cpu_T1); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1); gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0); tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0); tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0); tcg_gen_shri_tl(cpu_T0, cpu_T0, 16); gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0); set_cc_op(s, CC_OP_MULW); break; default: case MO_32: tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]); tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32, cpu_tmp2_i32, cpu_tmp3_i32); tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32); tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32); tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32); set_cc_op(s, CC_OP_MULL); break; #ifdef TARGET_X86_64 case MO_64: tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_T0, cpu_regs[R_EAX]); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]); tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63); tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]); set_cc_op(s, CC_OP_MULQ); break; #endif break; case 6: /* div */ switch(ot) { case MO_8: gen_helper_divb_AL(cpu_env, cpu_T0); break; case MO_16: gen_helper_divw_AX(cpu_env, cpu_T0); break; default: case MO_32: gen_helper_divl_EAX(cpu_env, cpu_T0); break; #ifdef TARGET_X86_64 case MO_64: gen_helper_divq_EAX(cpu_env, cpu_T0); break; #endif break; case 7: /* idiv */ switch(ot) { case MO_8: gen_helper_idivb_AL(cpu_env, cpu_T0); break; case MO_16: gen_helper_idivw_AX(cpu_env, cpu_T0); break; default: case MO_32: gen_helper_idivl_EAX(cpu_env, cpu_T0); break; #ifdef TARGET_X86_64 case MO_64: gen_helper_idivq_EAX(cpu_env, cpu_T0); break; #endif break; default: goto unknown_op; break; case 0xfe: /* GRP4 */ case 0xff: /* GRP5 */ ot = mo_b_d(b, dflag); modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); op = (modrm >> 3) & 7; if (op >= 2 && b == 0xfe) { goto unknown_op; if (CODE64(s)) { if (op == 2 || op == 4) { /* operand size for jumps is 64 bit */ ot = MO_64; } else if (op == 3 || op == 5) { ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16; } else if (op == 6) { /* default push size is 64 bit */ ot = mo_pushpop(s, dflag); if (mod != 3) { gen_lea_modrm(env, s, modrm); if (op >= 2 && op != 3 && op != 5) gen_op_ld_v(s, ot, cpu_T0, cpu_A0); } else { gen_op_mov_v_reg(ot, cpu_T0, rm); switch(op) { case 0: /* inc Ev */ if (mod != 3) opreg = OR_TMP0; else opreg = rm; gen_inc(s, ot, opreg, 1); break; case 1: /* dec Ev */ if (mod != 3) opreg = OR_TMP0; else opreg = rm; gen_inc(s, ot, opreg, -1); break; case 2: /* call Ev */ /* XXX: optimize if memory (no 'and' is necessary) */ if (dflag == MO_16) { tcg_gen_ext16u_tl(cpu_T0, cpu_T0); next_eip = s->pc - s->cs_base; tcg_gen_movi_tl(cpu_T1, next_eip); gen_push_v(s, cpu_T1); gen_op_jmp_v(cpu_T0); gen_bnd_jmp(s); gen_jr(s, cpu_T0); break; case 3: /* lcall Ev */ gen_op_ld_v(s, ot, cpu_T1, cpu_A0); gen_add_A0_im(s, 1 << ot); gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0); do_lcall: if (s->pe && !s->vm86) { tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T1, tcg_const_i32(dflag - 1), tcg_const_tl(s->pc - s->cs_base)); } else { tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T1, tcg_const_i32(dflag - 1), tcg_const_i32(s->pc - s->cs_base)); tcg_gen_ld_tl(cpu_tmp4, cpu_env, offsetof(CPUX86State, eip)); gen_jr(s, cpu_tmp4); break; case 4: /* jmp Ev */ if (dflag == MO_16) { tcg_gen_ext16u_tl(cpu_T0, cpu_T0); gen_op_jmp_v(cpu_T0); gen_bnd_jmp(s); gen_jr(s, cpu_T0); break; case 5: /* ljmp Ev */ gen_op_ld_v(s, ot, cpu_T1, cpu_A0); gen_add_A0_im(s, 1 << ot); gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0); do_ljmp: if (s->pe && !s->vm86) { tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T1, tcg_const_tl(s->pc - s->cs_base)); } else { gen_op_movl_seg_T0_vm(R_CS); gen_op_jmp_v(cpu_T1); tcg_gen_ld_tl(cpu_tmp4, cpu_env, offsetof(CPUX86State, eip)); gen_jr(s, cpu_tmp4); break; case 6: /* push Ev */ gen_push_v(s, cpu_T0); break; default: goto unknown_op; break; case 0x84: /* test Ev, Gv */ case 0x85: ot = mo_b_d(b, dflag); modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); gen_op_mov_v_reg(ot, cpu_T1, reg); gen_op_testl_T0_T1_cc(); set_cc_op(s, CC_OP_LOGICB + ot); break; case 0xa8: /* test eAX, Iv */ case 0xa9: ot = mo_b_d(b, dflag); val = insn_get(env, s, ot); gen_op_mov_v_reg(ot, cpu_T0, OR_EAX); tcg_gen_movi_tl(cpu_T1, val); gen_op_testl_T0_T1_cc(); set_cc_op(s, CC_OP_LOGICB + ot); break; case 0x98: /* CWDE/CBW */ switch (dflag) { #ifdef TARGET_X86_64 case MO_64: gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX); tcg_gen_ext32s_tl(cpu_T0, cpu_T0); gen_op_mov_reg_v(MO_64, R_EAX, cpu_T0); break; #endif case MO_32: gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX); tcg_gen_ext16s_tl(cpu_T0, cpu_T0); gen_op_mov_reg_v(MO_32, R_EAX, cpu_T0); break; case MO_16: gen_op_mov_v_reg(MO_8, cpu_T0, R_EAX); tcg_gen_ext8s_tl(cpu_T0, cpu_T0); gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0); break; default: tcg_abort(); break; case 0x99: /* CDQ/CWD */ switch (dflag) { #ifdef TARGET_X86_64 case MO_64: gen_op_mov_v_reg(MO_64, cpu_T0, R_EAX); tcg_gen_sari_tl(cpu_T0, cpu_T0, 63); gen_op_mov_reg_v(MO_64, R_EDX, cpu_T0); break; #endif case MO_32: gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX); tcg_gen_ext32s_tl(cpu_T0, cpu_T0); tcg_gen_sari_tl(cpu_T0, cpu_T0, 31); gen_op_mov_reg_v(MO_32, R_EDX, cpu_T0); break; case MO_16: gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX); tcg_gen_ext16s_tl(cpu_T0, cpu_T0); tcg_gen_sari_tl(cpu_T0, cpu_T0, 15); gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0); break; default: tcg_abort(); break; case 0x1af: /* imul Gv, Ev */ case 0x69: /* imul Gv, Ev, I */ case 0x6b: ot = dflag; modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; if (b == 0x69) s->rip_offset = insn_const_size(ot); else if (b == 0x6b) s->rip_offset = 1; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); if (b == 0x69) { val = insn_get(env, s, ot); tcg_gen_movi_tl(cpu_T1, val); } else if (b == 0x6b) { val = (int8_t)insn_get(env, s, MO_8); tcg_gen_movi_tl(cpu_T1, val); } else { gen_op_mov_v_reg(ot, cpu_T1, reg); switch (ot) { #ifdef TARGET_X86_64 case MO_64: tcg_gen_muls2_i64(cpu_regs[reg], cpu_T1, cpu_T0, cpu_T1); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]); tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63); tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T1); break; #endif case MO_32: tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1); tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32, cpu_tmp2_i32, cpu_tmp3_i32); tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32); tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31); tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]); tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32); break; default: tcg_gen_ext16s_tl(cpu_T0, cpu_T0); tcg_gen_ext16s_tl(cpu_T1, cpu_T1); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1); tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0); tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0); gen_op_mov_reg_v(ot, reg, cpu_T0); break; set_cc_op(s, CC_OP_MULB + ot); break; case 0x1c0: case 0x1c1: /* xadd Ev, Gv */ ot = mo_b_d(b, dflag); modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; gen_op_mov_v_reg(ot, cpu_T0, reg); if (mod == 3) { rm = (modrm & 7) | REX_B(s); gen_op_mov_v_reg(ot, cpu_T1, rm); tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1); gen_op_mov_reg_v(ot, reg, cpu_T1); gen_op_mov_reg_v(ot, rm, cpu_T0); } else { gen_lea_modrm(env, s, modrm); if (s->prefix & PREFIX_LOCK) { tcg_gen_atomic_fetch_add_tl(cpu_T1, cpu_A0, cpu_T0, s->mem_index, ot | MO_LE); tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1); } else { gen_op_ld_v(s, ot, cpu_T1, cpu_A0); tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1); gen_op_st_v(s, ot, cpu_T0, cpu_A0); gen_op_mov_reg_v(ot, reg, cpu_T1); gen_op_update2_cc(); set_cc_op(s, CC_OP_ADDB + ot); break; case 0x1b0: case 0x1b1: /* cmpxchg Ev, Gv */ { TCGv oldv, newv, cmpv; ot = mo_b_d(b, dflag); modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; oldv = tcg_temp_new(); newv = tcg_temp_new(); cmpv = tcg_temp_new(); gen_op_mov_v_reg(ot, newv, reg); tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]); if (s->prefix & PREFIX_LOCK) { if (mod == 3) { goto illegal_op; gen_lea_modrm(env, s, modrm); tcg_gen_atomic_cmpxchg_tl(oldv, cpu_A0, cmpv, newv, s->mem_index, ot | MO_LE); gen_op_mov_reg_v(ot, R_EAX, oldv); } else { if (mod == 3) { rm = (modrm & 7) | REX_B(s); gen_op_mov_v_reg(ot, oldv, rm); } else { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, ot, oldv, cpu_A0); rm = 0; /* avoid warning */ gen_extu(ot, oldv); gen_extu(ot, cmpv); /* store value = (old == cmp ? new : old); */ tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv); if (mod == 3) { gen_op_mov_reg_v(ot, R_EAX, oldv); gen_op_mov_reg_v(ot, rm, newv); } else { /* Perform an unconditional store cycle like physical cpu; must be before changing accumulator to ensure idempotency if the store faults and the instruction is restarted */ gen_op_st_v(s, ot, newv, cpu_A0); gen_op_mov_reg_v(ot, R_EAX, oldv); tcg_gen_mov_tl(cpu_cc_src, oldv); tcg_gen_mov_tl(cpu_cc_srcT, cmpv); tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv); set_cc_op(s, CC_OP_SUBB + ot); tcg_temp_free(oldv); tcg_temp_free(newv); tcg_temp_free(cmpv); break; case 0x1c7: /* cmpxchg8b */ modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; if ((mod == 3) || ((modrm & 0x38) != 0x8)) goto illegal_op; #ifdef TARGET_X86_64 if (dflag == MO_64) { if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) goto illegal_op; gen_lea_modrm(env, s, modrm); if ((s->prefix & PREFIX_LOCK) && parallel_cpus) { gen_helper_cmpxchg16b(cpu_env, cpu_A0); } else { gen_helper_cmpxchg16b_unlocked(cpu_env, cpu_A0); } else #endif { if (!(s->cpuid_features & CPUID_CX8)) goto illegal_op; gen_lea_modrm(env, s, modrm); if ((s->prefix & PREFIX_LOCK) && parallel_cpus) { gen_helper_cmpxchg8b(cpu_env, cpu_A0); } else { gen_helper_cmpxchg8b_unlocked(cpu_env, cpu_A0); set_cc_op(s, CC_OP_EFLAGS); break; /**************************/ /* push/pop */ case 0x50 ... 0x57: /* push */ gen_op_mov_v_reg(MO_32, cpu_T0, (b & 7) | REX_B(s)); gen_push_v(s, cpu_T0); break; case 0x58 ... 0x5f: /* pop */ ot = gen_pop_T0(s); /* NOTE: order is important for pop %sp */ gen_pop_update(s, ot); gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T0); break; case 0x60: /* pusha */ if (CODE64(s)) goto illegal_op; gen_pusha(s); break; case 0x61: /* popa */ if (CODE64(s)) goto illegal_op; gen_popa(s); break; case 0x68: /* push Iv */ case 0x6a: ot = mo_pushpop(s, dflag); if (b == 0x68) val = insn_get(env, s, ot); else val = (int8_t)insn_get(env, s, MO_8); tcg_gen_movi_tl(cpu_T0, val); gen_push_v(s, cpu_T0); break; case 0x8f: /* pop Ev */ modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; ot = gen_pop_T0(s); if (mod == 3) { /* NOTE: order is important for pop %sp */ gen_pop_update(s, ot); rm = (modrm & 7) | REX_B(s); gen_op_mov_reg_v(ot, rm, cpu_T0); } else { /* NOTE: order is important too for MMU exceptions */ s->popl_esp_hack = 1 << ot; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); s->popl_esp_hack = 0; gen_pop_update(s, ot); break; case 0xc8: /* enter */ { int level; val = cpu_lduw_code(env, s->pc); s->pc += 2; level = cpu_ldub_code(env, s->pc++); gen_enter(s, val, level); break; case 0xc9: /* leave */ gen_leave(s); break; case 0x06: /* push es */ case 0x0e: /* push cs */ case 0x16: /* push ss */ case 0x1e: /* push ds */ if (CODE64(s)) goto illegal_op; gen_op_movl_T0_seg(b >> 3); gen_push_v(s, cpu_T0); break; case 0x1a0: /* push fs */ case 0x1a8: /* push gs */ gen_op_movl_T0_seg((b >> 3) & 7); gen_push_v(s, cpu_T0); break; case 0x07: /* pop es */ case 0x17: /* pop ss */ case 0x1f: /* pop ds */ if (CODE64(s)) goto illegal_op; reg = b >> 3; ot = gen_pop_T0(s); gen_movl_seg_T0(s, reg); gen_pop_update(s, ot); /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */ if (s->is_jmp) { gen_jmp_im(s->pc - s->cs_base); if (reg == R_SS) { s->tf = 0; gen_eob_inhibit_irq(s, true); } else { gen_eob(s); break; case 0x1a1: /* pop fs */ case 0x1a9: /* pop gs */ ot = gen_pop_T0(s); gen_movl_seg_T0(s, (b >> 3) & 7); gen_pop_update(s, ot); if (s->is_jmp) { gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; /**************************/ /* mov */ case 0x88: case 0x89: /* mov Gv, Ev */ ot = mo_b_d(b, dflag); modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; /* generate a generic store */ gen_ldst_modrm(env, s, modrm, ot, reg, 1); break; case 0xc6: case 0xc7: /* mov Ev, Iv */ ot = mo_b_d(b, dflag); modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; if (mod != 3) { s->rip_offset = insn_const_size(ot); gen_lea_modrm(env, s, modrm); val = insn_get(env, s, ot); tcg_gen_movi_tl(cpu_T0, val); if (mod != 3) { gen_op_st_v(s, ot, cpu_T0, cpu_A0); } else { gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T0); break; case 0x8a: case 0x8b: /* mov Ev, Gv */ ot = mo_b_d(b, dflag); modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); gen_op_mov_reg_v(ot, reg, cpu_T0); break; case 0x8e: /* mov seg, Gv */ modrm = cpu_ldub_code(env, s->pc++); reg = (modrm >> 3) & 7; if (reg >= 6 || reg == R_CS) goto illegal_op; gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); gen_movl_seg_T0(s, reg); /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */ if (s->is_jmp) { gen_jmp_im(s->pc - s->cs_base); if (reg == R_SS) { s->tf = 0; gen_eob_inhibit_irq(s, true); } else { gen_eob(s); break; case 0x8c: /* mov Gv, seg */ modrm = cpu_ldub_code(env, s->pc++); reg = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; if (reg >= 6) goto illegal_op; gen_op_movl_T0_seg(reg); ot = mod == 3 ? dflag : MO_16; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); break; case 0x1b6: /* movzbS Gv, Eb */ case 0x1b7: /* movzwS Gv, Eb */ case 0x1be: /* movsbS Gv, Eb */ case 0x1bf: /* movswS Gv, Eb */ { TCGMemOp d_ot; TCGMemOp s_ot; /* d_ot is the size of destination */ d_ot = dflag; /* ot is the size of source */ ot = (b & 1) + MO_8; /* s_ot is the sign+size of source */ s_ot = b & 8 ? MO_SIGN | ot : ot; modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod == 3) { if (s_ot == MO_SB && byte_reg_is_xH(rm)) { tcg_gen_sextract_tl(cpu_T0, cpu_regs[rm - 4], 8, 8); } else { gen_op_mov_v_reg(ot, cpu_T0, rm); switch (s_ot) { case MO_UB: tcg_gen_ext8u_tl(cpu_T0, cpu_T0); break; case MO_SB: tcg_gen_ext8s_tl(cpu_T0, cpu_T0); break; case MO_UW: tcg_gen_ext16u_tl(cpu_T0, cpu_T0); break; default: case MO_SW: tcg_gen_ext16s_tl(cpu_T0, cpu_T0); break; gen_op_mov_reg_v(d_ot, reg, cpu_T0); } else { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, s_ot, cpu_T0, cpu_A0); gen_op_mov_reg_v(d_ot, reg, cpu_T0); break; case 0x8d: /* lea */ modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; reg = ((modrm >> 3) & 7) | rex_r; { AddressParts a = gen_lea_modrm_0(env, s, modrm); TCGv ea = gen_lea_modrm_1(a); gen_lea_v_seg(s, s->aflag, ea, -1, -1); gen_op_mov_reg_v(dflag, reg, cpu_A0); break; case 0xa0: /* mov EAX, Ov */ case 0xa1: case 0xa2: /* mov Ov, EAX */ case 0xa3: { target_ulong offset_addr; ot = mo_b_d(b, dflag); switch (s->aflag) { #ifdef TARGET_X86_64 case MO_64: offset_addr = cpu_ldq_code(env, s->pc); s->pc += 8; break; #endif default: offset_addr = insn_get(env, s, s->aflag); break; tcg_gen_movi_tl(cpu_A0, offset_addr); gen_add_A0_ds_seg(s); if ((b & 2) == 0) { gen_op_ld_v(s, ot, cpu_T0, cpu_A0); gen_op_mov_reg_v(ot, R_EAX, cpu_T0); } else { gen_op_mov_v_reg(ot, cpu_T0, R_EAX); gen_op_st_v(s, ot, cpu_T0, cpu_A0); break; case 0xd7: /* xlat */ tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EBX]); tcg_gen_ext8u_tl(cpu_T0, cpu_regs[R_EAX]); tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T0); gen_extu(s->aflag, cpu_A0); gen_add_A0_ds_seg(s); gen_op_ld_v(s, MO_8, cpu_T0, cpu_A0); gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0); break; case 0xb0 ... 0xb7: /* mov R, Ib */ val = insn_get(env, s, MO_8); tcg_gen_movi_tl(cpu_T0, val); gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T0); break; case 0xb8 ... 0xbf: /* mov R, Iv */ #ifdef TARGET_X86_64 if (dflag == MO_64) { uint64_t tmp; /* 64 bit case */ tmp = cpu_ldq_code(env, s->pc); s->pc += 8; reg = (b & 7) | REX_B(s); tcg_gen_movi_tl(cpu_T0, tmp); gen_op_mov_reg_v(MO_64, reg, cpu_T0); } else #endif { ot = dflag; val = insn_get(env, s, ot); reg = (b & 7) | REX_B(s); tcg_gen_movi_tl(cpu_T0, val); gen_op_mov_reg_v(ot, reg, cpu_T0); break; case 0x91 ... 0x97: /* xchg R, EAX */ do_xchg_reg_eax: ot = dflag; reg = (b & 7) | REX_B(s); rm = R_EAX; goto do_xchg_reg; case 0x86: case 0x87: /* xchg Ev, Gv */ ot = mo_b_d(b, dflag); modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; if (mod == 3) { rm = (modrm & 7) | REX_B(s); do_xchg_reg: gen_op_mov_v_reg(ot, cpu_T0, reg); gen_op_mov_v_reg(ot, cpu_T1, rm); gen_op_mov_reg_v(ot, rm, cpu_T0); gen_op_mov_reg_v(ot, reg, cpu_T1); } else { gen_lea_modrm(env, s, modrm); gen_op_mov_v_reg(ot, cpu_T0, reg); /* for xchg, lock is implicit */ tcg_gen_atomic_xchg_tl(cpu_T1, cpu_A0, cpu_T0, s->mem_index, ot | MO_LE); gen_op_mov_reg_v(ot, reg, cpu_T1); break; case 0xc4: /* les Gv */ /* In CODE64 this is VEX3; see above. */ op = R_ES; goto do_lxx; case 0xc5: /* lds Gv */ /* In CODE64 this is VEX2; see above. */ op = R_DS; goto do_lxx; case 0x1b2: /* lss Gv */ op = R_SS; goto do_lxx; case 0x1b4: /* lfs Gv */ op = R_FS; goto do_lxx; case 0x1b5: /* lgs Gv */ op = R_GS; do_lxx: ot = dflag != MO_16 ? MO_32 : MO_16; modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, ot, cpu_T1, cpu_A0); gen_add_A0_im(s, 1 << ot); /* load the segment first to handle exceptions properly */ gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0); gen_movl_seg_T0(s, op); /* then put the data */ gen_op_mov_reg_v(ot, reg, cpu_T1); if (s->is_jmp) { gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; /************************/ /* shifts */ case 0xc0: case 0xc1: /* shift Ev,Ib */ shift = 2; grp2: { ot = mo_b_d(b, dflag); modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; op = (modrm >> 3) & 7; if (mod != 3) { if (shift == 2) { s->rip_offset = 1; gen_lea_modrm(env, s, modrm); opreg = OR_TMP0; } else { opreg = (modrm & 7) | REX_B(s); /* simpler op */ if (shift == 0) { gen_shift(s, op, ot, opreg, OR_ECX); } else { if (shift == 2) { shift = cpu_ldub_code(env, s->pc++); gen_shifti(s, op, ot, opreg, shift); break; case 0xd0: case 0xd1: /* shift Ev,1 */ shift = 1; goto grp2; case 0xd2: case 0xd3: /* shift Ev,cl */ shift = 0; goto grp2; case 0x1a4: /* shld imm */ op = 0; shift = 1; goto do_shiftd; case 0x1a5: /* shld cl */ op = 0; shift = 0; goto do_shiftd; case 0x1ac: /* shrd imm */ op = 1; shift = 1; goto do_shiftd; case 0x1ad: /* shrd cl */ op = 1; shift = 0; do_shiftd: ot = dflag; modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); reg = ((modrm >> 3) & 7) | rex_r; if (mod != 3) { gen_lea_modrm(env, s, modrm); opreg = OR_TMP0; } else { opreg = rm; gen_op_mov_v_reg(ot, cpu_T1, reg); if (shift) { TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++)); gen_shiftd_rm_T1(s, ot, opreg, op, imm); tcg_temp_free(imm); } else { gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]); break; /************************/ /* floats */ case 0xd8 ... 0xdf: if (s->flags & (HF_EM_MASK | HF_TS_MASK)) { /* if CR0.EM or CR0.TS are set, generate an FPU exception */ /* XXX: what to do if illegal op ? */ gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; rm = modrm & 7; op = ((b & 7) << 3) | ((modrm >> 3) & 7); if (mod != 3) { /* memory op */ gen_lea_modrm(env, s, modrm); switch(op) { case 0x00 ... 0x07: /* fxxxs */ case 0x10 ... 0x17: /* fixxxl */ case 0x20 ... 0x27: /* fxxxl */ case 0x30 ... 0x37: /* fixxx */ { int op1; op1 = op & 7; switch(op >> 4) { case 0: tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32); break; case 1: tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32); break; case 2: tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64); break; case 3: default: tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LESW); gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32); break; gen_helper_fp_arith_ST0_FT0(op1); if (op1 == 3) { /* fcomp needs pop */ gen_helper_fpop(cpu_env); break; case 0x08: /* flds */ case 0x0a: /* fsts */ case 0x0b: /* fstps */ case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */ case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */ case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */ switch(op & 7) { case 0: switch(op >> 4) { case 0: tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32); break; case 1: tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32); break; case 2: tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64); break; case 3: default: tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LESW); gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32); break; break; case 1: /* XXX: the corresponding CPUID bit must be tested ! */ switch(op >> 4) { case 1: gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env); tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); break; case 2: gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env); tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); break; case 3: default: gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env); tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUW); break; gen_helper_fpop(cpu_env); break; default: switch(op >> 4) { case 0: gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env); tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); break; case 1: gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env); tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); break; case 2: gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env); tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); break; case 3: default: gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env); tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUW); break; if ((op & 7) == 3) gen_helper_fpop(cpu_env); break; break; case 0x0c: /* fldenv mem */ gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1)); break; case 0x0d: /* fldcw mem */ tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUW); gen_helper_fldcw(cpu_env, cpu_tmp2_i32); break; case 0x0e: /* fnstenv mem */ gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1)); break; case 0x0f: /* fnstcw mem */ gen_helper_fnstcw(cpu_tmp2_i32, cpu_env); tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUW); break; case 0x1d: /* fldt mem */ gen_helper_fldt_ST0(cpu_env, cpu_A0); break; case 0x1f: /* fstpt mem */ gen_helper_fstt_ST0(cpu_env, cpu_A0); gen_helper_fpop(cpu_env); break; case 0x2c: /* frstor mem */ gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(dflag - 1)); break; case 0x2e: /* fnsave mem */ gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(dflag - 1)); break; case 0x2f: /* fnstsw mem */ gen_helper_fnstsw(cpu_tmp2_i32, cpu_env); tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUW); break; case 0x3c: /* fbld */ gen_helper_fbld_ST0(cpu_env, cpu_A0); break; case 0x3e: /* fbstp */ gen_helper_fbst_ST0(cpu_env, cpu_A0); gen_helper_fpop(cpu_env); break; case 0x3d: /* fildll */ tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64); break; case 0x3f: /* fistpll */ gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env); tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); gen_helper_fpop(cpu_env); break; default: goto unknown_op; } else { /* register float ops */ opreg = rm; switch(op) { case 0x08: /* fld sti */ gen_helper_fpush(cpu_env); gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32((opreg + 1) & 7)); break; case 0x09: /* fxchg sti */ case 0x29: /* fxchg4 sti, undocumented op */ case 0x39: /* fxchg7 sti, undocumented op */ gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg)); break; case 0x0a: /* grp d9/2 */ switch(rm) { case 0: /* fnop */ /* check exceptions (FreeBSD FPU probe) */ gen_helper_fwait(cpu_env); break; default: goto unknown_op; break; case 0x0c: /* grp d9/4 */ switch(rm) { case 0: /* fchs */ gen_helper_fchs_ST0(cpu_env); break; case 1: /* fabs */ gen_helper_fabs_ST0(cpu_env); break; case 4: /* ftst */ gen_helper_fldz_FT0(cpu_env); gen_helper_fcom_ST0_FT0(cpu_env); break; case 5: /* fxam */ gen_helper_fxam_ST0(cpu_env); break; default: goto unknown_op; break; case 0x0d: /* grp d9/5 */ { switch(rm) { case 0: gen_helper_fpush(cpu_env); gen_helper_fld1_ST0(cpu_env); break; case 1: gen_helper_fpush(cpu_env); gen_helper_fldl2t_ST0(cpu_env); break; case 2: gen_helper_fpush(cpu_env); gen_helper_fldl2e_ST0(cpu_env); break; case 3: gen_helper_fpush(cpu_env); gen_helper_fldpi_ST0(cpu_env); break; case 4: gen_helper_fpush(cpu_env); gen_helper_fldlg2_ST0(cpu_env); break; case 5: gen_helper_fpush(cpu_env); gen_helper_fldln2_ST0(cpu_env); break; case 6: gen_helper_fpush(cpu_env); gen_helper_fldz_ST0(cpu_env); break; default: goto unknown_op; break; case 0x0e: /* grp d9/6 */ switch(rm) { case 0: /* f2xm1 */ gen_helper_f2xm1(cpu_env); break; case 1: /* fyl2x */ gen_helper_fyl2x(cpu_env); break; case 2: /* fptan */ gen_helper_fptan(cpu_env); break; case 3: /* fpatan */ gen_helper_fpatan(cpu_env); break; case 4: /* fxtract */ gen_helper_fxtract(cpu_env); break; case 5: /* fprem1 */ gen_helper_fprem1(cpu_env); break; case 6: /* fdecstp */ gen_helper_fdecstp(cpu_env); break; default: case 7: /* fincstp */ gen_helper_fincstp(cpu_env); break; break; case 0x0f: /* grp d9/7 */ switch(rm) { case 0: /* fprem */ gen_helper_fprem(cpu_env); break; case 1: /* fyl2xp1 */ gen_helper_fyl2xp1(cpu_env); break; case 2: /* fsqrt */ gen_helper_fsqrt(cpu_env); break; case 3: /* fsincos */ gen_helper_fsincos(cpu_env); break; case 5: /* fscale */ gen_helper_fscale(cpu_env); break; case 4: /* frndint */ gen_helper_frndint(cpu_env); break; case 6: /* fsin */ gen_helper_fsin(cpu_env); break; default: case 7: /* fcos */ gen_helper_fcos(cpu_env); break; break; case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */ case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */ case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */ { int op1; op1 = op & 7; if (op >= 0x20) { gen_helper_fp_arith_STN_ST0(op1, opreg); if (op >= 0x30) gen_helper_fpop(cpu_env); } else { gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fp_arith_ST0_FT0(op1); break; case 0x02: /* fcom */ case 0x22: /* fcom2, undocumented op */ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fcom_ST0_FT0(cpu_env); break; case 0x03: /* fcomp */ case 0x23: /* fcomp3, undocumented op */ case 0x32: /* fcomp5, undocumented op */ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fcom_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); break; case 0x15: /* da/5 */ switch(rm) { case 1: /* fucompp */ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1)); gen_helper_fucom_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); gen_helper_fpop(cpu_env); break; default: goto unknown_op; break; case 0x1c: switch(rm) { case 0: /* feni (287 only, just do nop here) */ break; case 1: /* fdisi (287 only, just do nop here) */ break; case 2: /* fclex */ gen_helper_fclex(cpu_env); break; case 3: /* fninit */ gen_helper_fninit(cpu_env); break; case 4: /* fsetpm (287 only, just do nop here) */ break; default: goto unknown_op; break; case 0x1d: /* fucomi */ if (!(s->cpuid_features & CPUID_CMOV)) { goto illegal_op; gen_update_cc_op(s); gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fucomi_ST0_FT0(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x1e: /* fcomi */ if (!(s->cpuid_features & CPUID_CMOV)) { goto illegal_op; gen_update_cc_op(s); gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fcomi_ST0_FT0(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x28: /* ffree sti */ gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg)); break; case 0x2a: /* fst sti */ gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg)); break; case 0x2b: /* fstp sti */ case 0x0b: /* fstp1 sti, undocumented op */ case 0x3a: /* fstp8 sti, undocumented op */ case 0x3b: /* fstp9 sti, undocumented op */ gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg)); gen_helper_fpop(cpu_env); break; case 0x2c: /* fucom st(i) */ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fucom_ST0_FT0(cpu_env); break; case 0x2d: /* fucomp st(i) */ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fucom_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); break; case 0x33: /* de/3 */ switch(rm) { case 1: /* fcompp */ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1)); gen_helper_fcom_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); gen_helper_fpop(cpu_env); break; default: goto unknown_op; break; case 0x38: /* ffreep sti, undocumented op */ gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fpop(cpu_env); break; case 0x3c: /* df/4 */ switch(rm) { case 0: gen_helper_fnstsw(cpu_tmp2_i32, cpu_env); tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32); gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0); break; default: goto unknown_op; break; case 0x3d: /* fucomip */ if (!(s->cpuid_features & CPUID_CMOV)) { goto illegal_op; gen_update_cc_op(s); gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fucomi_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x3e: /* fcomip */ if (!(s->cpuid_features & CPUID_CMOV)) { goto illegal_op; gen_update_cc_op(s); gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fcomi_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x10 ... 0x13: /* fcmovxx */ case 0x18 ... 0x1b: { int op1; TCGLabel *l1; static const uint8_t fcmov_cc[8] = { (JCC_B << 1), (JCC_Z << 1), (JCC_BE << 1), (JCC_P << 1), }; if (!(s->cpuid_features & CPUID_CMOV)) { goto illegal_op; op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1); l1 = gen_new_label(); gen_jcc1_noeob(s, op1, l1); gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg)); gen_set_label(l1); break; default: goto unknown_op; break; /************************/ /* string ops */ case 0xa4: /* movsS */ case 0xa5: ot = mo_b_d(b, dflag); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_movs(s, ot); break; case 0xaa: /* stosS */ case 0xab: ot = mo_b_d(b, dflag); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_stos(s, ot); break; case 0xac: /* lodsS */ case 0xad: ot = mo_b_d(b, dflag); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_lods(s, ot); break; case 0xae: /* scasS */ case 0xaf: ot = mo_b_d(b, dflag); if (prefixes & PREFIX_REPNZ) { gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1); } else if (prefixes & PREFIX_REPZ) { gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0); } else { gen_scas(s, ot); break; case 0xa6: /* cmpsS */ case 0xa7: ot = mo_b_d(b, dflag); if (prefixes & PREFIX_REPNZ) { gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1); } else if (prefixes & PREFIX_REPZ) { gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0); } else { gen_cmps(s, ot); break; case 0x6c: /* insS */ case 0x6d: ot = mo_b_d32(b, dflag); tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]); gen_check_io(s, ot, pc_start - s->cs_base, SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_ins(s, ot); gen_jmp(s, s->pc - s->cs_base); break; case 0x6e: /* outsS */ case 0x6f: ot = mo_b_d32(b, dflag); tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes) | 4); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_outs(s, ot); gen_jmp(s, s->pc - s->cs_base); break; /************************/ /* port I/O */ case 0xe4: case 0xe5: ot = mo_b_d32(b, dflag); val = cpu_ldub_code(env, s->pc++); tcg_gen_movi_tl(cpu_T0, val); gen_check_io(s, ot, pc_start - s->cs_base, SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); tcg_gen_movi_i32(cpu_tmp2_i32, val); gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32); gen_op_mov_reg_v(ot, R_EAX, cpu_T1); gen_bpt_io(s, cpu_tmp2_i32, ot); gen_jmp(s, s->pc - s->cs_base); break; case 0xe6: case 0xe7: ot = mo_b_d32(b, dflag); val = cpu_ldub_code(env, s->pc++); tcg_gen_movi_tl(cpu_T0, val); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes)); gen_op_mov_v_reg(ot, cpu_T1, R_EAX); tcg_gen_movi_i32(cpu_tmp2_i32, val); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1); gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); gen_bpt_io(s, cpu_tmp2_i32, ot); gen_jmp(s, s->pc - s->cs_base); break; case 0xec: case 0xed: ot = mo_b_d32(b, dflag); tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]); gen_check_io(s, ot, pc_start - s->cs_base, SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32); gen_op_mov_reg_v(ot, R_EAX, cpu_T1); gen_bpt_io(s, cpu_tmp2_i32, ot); gen_jmp(s, s->pc - s->cs_base); break; case 0xee: case 0xef: ot = mo_b_d32(b, dflag); tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes)); gen_op_mov_v_reg(ot, cpu_T1, R_EAX); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1); gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); gen_bpt_io(s, cpu_tmp2_i32, ot); gen_jmp(s, s->pc - s->cs_base); break; /************************/ /* control */ case 0xc2: /* ret im */ val = cpu_ldsw_code(env, s->pc); s->pc += 2; ot = gen_pop_T0(s); gen_stack_update(s, val + (1 << ot)); /* Note that gen_pop_T0 uses a zero-extending load. */ gen_op_jmp_v(cpu_T0); gen_bnd_jmp(s); gen_jr(s, cpu_T0); break; case 0xc3: /* ret */ ot = gen_pop_T0(s); gen_pop_update(s, ot); /* Note that gen_pop_T0 uses a zero-extending load. */ gen_op_jmp_v(cpu_T0); gen_bnd_jmp(s); gen_jr(s, cpu_T0); break; case 0xca: /* lret im */ val = cpu_ldsw_code(env, s->pc); s->pc += 2; do_lret: if (s->pe && !s->vm86) { gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1), tcg_const_i32(val)); } else { gen_stack_A0(s); /* pop offset */ gen_op_ld_v(s, dflag, cpu_T0, cpu_A0); /* NOTE: keeping EIP updated is not a problem in case of exception */ gen_op_jmp_v(cpu_T0); /* pop selector */ gen_add_A0_im(s, 1 << dflag); gen_op_ld_v(s, dflag, cpu_T0, cpu_A0); gen_op_movl_seg_T0_vm(R_CS); /* add stack offset */ gen_stack_update(s, val + (2 << dflag)); gen_eob(s); break; case 0xcb: /* lret */ val = 0; goto do_lret; case 0xcf: /* iret */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET); if (!s->pe) { /* real mode */ gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1)); set_cc_op(s, CC_OP_EFLAGS); } else if (s->vm86) { if (s->iopl != 3) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1)); set_cc_op(s, CC_OP_EFLAGS); } else { gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1), tcg_const_i32(s->pc - s->cs_base)); set_cc_op(s, CC_OP_EFLAGS); gen_eob(s); break; case 0xe8: /* call im */ { if (dflag != MO_16) { tval = (int32_t)insn_get(env, s, MO_32); } else { tval = (int16_t)insn_get(env, s, MO_16); next_eip = s->pc - s->cs_base; tval += next_eip; if (dflag == MO_16) { tval &= 0xffff; } else if (!CODE64(s)) { tval &= 0xffffffff; tcg_gen_movi_tl(cpu_T0, next_eip); gen_push_v(s, cpu_T0); gen_bnd_jmp(s); gen_jmp(s, tval); break; case 0x9a: /* lcall im */ { unsigned int selector, offset; if (CODE64(s)) goto illegal_op; ot = dflag; offset = insn_get(env, s, ot); selector = insn_get(env, s, MO_16); tcg_gen_movi_tl(cpu_T0, selector); tcg_gen_movi_tl(cpu_T1, offset); goto do_lcall; case 0xe9: /* jmp im */ if (dflag != MO_16) { tval = (int32_t)insn_get(env, s, MO_32); } else { tval = (int16_t)insn_get(env, s, MO_16); tval += s->pc - s->cs_base; if (dflag == MO_16) { tval &= 0xffff; } else if (!CODE64(s)) { tval &= 0xffffffff; gen_bnd_jmp(s); gen_jmp(s, tval); break; case 0xea: /* ljmp im */ { unsigned int selector, offset; if (CODE64(s)) goto illegal_op; ot = dflag; offset = insn_get(env, s, ot); selector = insn_get(env, s, MO_16); tcg_gen_movi_tl(cpu_T0, selector); tcg_gen_movi_tl(cpu_T1, offset); goto do_ljmp; case 0xeb: /* jmp Jb */ tval = (int8_t)insn_get(env, s, MO_8); tval += s->pc - s->cs_base; if (dflag == MO_16) { tval &= 0xffff; gen_jmp(s, tval); break; case 0x70 ... 0x7f: /* jcc Jb */ tval = (int8_t)insn_get(env, s, MO_8); goto do_jcc; case 0x180 ... 0x18f: /* jcc Jv */ if (dflag != MO_16) { tval = (int32_t)insn_get(env, s, MO_32); } else { tval = (int16_t)insn_get(env, s, MO_16); do_jcc: next_eip = s->pc - s->cs_base; tval += next_eip; if (dflag == MO_16) { tval &= 0xffff; gen_bnd_jmp(s); gen_jcc(s, b, tval, next_eip); break; case 0x190 ... 0x19f: /* setcc Gv */ modrm = cpu_ldub_code(env, s->pc++); gen_setcc1(s, b, cpu_T0); gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1); break; case 0x140 ... 0x14f: /* cmov Gv, Ev */ if (!(s->cpuid_features & CPUID_CMOV)) { goto illegal_op; ot = dflag; modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; gen_cmovcc1(env, s, ot, b, modrm, reg); break; /************************/ /* flags */ case 0x9c: /* pushf */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF); if (s->vm86 && s->iopl != 3) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_update_cc_op(s); gen_helper_read_eflags(cpu_T0, cpu_env); gen_push_v(s, cpu_T0); break; case 0x9d: /* popf */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF); if (s->vm86 && s->iopl != 3) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { ot = gen_pop_T0(s); if (s->cpl == 0) { if (dflag != MO_16) { gen_helper_write_eflags(cpu_env, cpu_T0, tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK))); } else { gen_helper_write_eflags(cpu_env, cpu_T0, tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK) & 0xffff)); } else { if (s->cpl <= s->iopl) { if (dflag != MO_16) { gen_helper_write_eflags(cpu_env, cpu_T0, tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK))); } else { gen_helper_write_eflags(cpu_env, cpu_T0, tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK) & 0xffff)); } else { if (dflag != MO_16) { gen_helper_write_eflags(cpu_env, cpu_T0, tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK))); } else { gen_helper_write_eflags(cpu_env, cpu_T0, tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff)); gen_pop_update(s, ot); set_cc_op(s, CC_OP_EFLAGS); /* abort translation because TF/AC flag may change */ gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; case 0x9e: /* sahf */ if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) goto illegal_op; gen_op_mov_v_reg(MO_8, cpu_T0, R_AH); gen_compute_eflags(s); tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O); tcg_gen_andi_tl(cpu_T0, cpu_T0, CC_S | CC_Z | CC_A | CC_P | CC_C); tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T0); break; case 0x9f: /* lahf */ if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) goto illegal_op; gen_compute_eflags(s); /* Note: gen_compute_eflags() only gives the condition codes */ tcg_gen_ori_tl(cpu_T0, cpu_cc_src, 0x02); gen_op_mov_reg_v(MO_8, R_AH, cpu_T0); break; case 0xf5: /* cmc */ gen_compute_eflags(s); tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C); break; case 0xf8: /* clc */ gen_compute_eflags(s); tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C); break; case 0xf9: /* stc */ gen_compute_eflags(s); tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C); break; case 0xfc: /* cld */ tcg_gen_movi_i32(cpu_tmp2_i32, 1); tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df)); break; case 0xfd: /* std */ tcg_gen_movi_i32(cpu_tmp2_i32, -1); tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df)); break; /************************/ /* bit operations */ case 0x1ba: /* bt/bts/btr/btc Gv, im */ ot = dflag; modrm = cpu_ldub_code(env, s->pc++); op = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod != 3) { s->rip_offset = 1; gen_lea_modrm(env, s, modrm); if (!(s->prefix & PREFIX_LOCK)) { gen_op_ld_v(s, ot, cpu_T0, cpu_A0); } else { gen_op_mov_v_reg(ot, cpu_T0, rm); /* load shift */ val = cpu_ldub_code(env, s->pc++); tcg_gen_movi_tl(cpu_T1, val); if (op < 4) goto unknown_op; op -= 4; goto bt_op; case 0x1a3: /* bt Gv, Ev */ op = 0; goto do_btx; case 0x1ab: /* bts */ op = 1; goto do_btx; case 0x1b3: /* btr */ op = 2; goto do_btx; case 0x1bb: /* btc */ op = 3; do_btx: ot = dflag; modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); gen_op_mov_v_reg(MO_32, cpu_T1, reg); if (mod != 3) { AddressParts a = gen_lea_modrm_0(env, s, modrm); /* specific case: we need to add a displacement */ gen_exts(ot, cpu_T1); tcg_gen_sari_tl(cpu_tmp0, cpu_T1, 3 + ot); tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot); tcg_gen_add_tl(cpu_A0, gen_lea_modrm_1(a), cpu_tmp0); gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override); if (!(s->prefix & PREFIX_LOCK)) { gen_op_ld_v(s, ot, cpu_T0, cpu_A0); } else { gen_op_mov_v_reg(ot, cpu_T0, rm); bt_op: tcg_gen_andi_tl(cpu_T1, cpu_T1, (1 << (3 + ot)) - 1); tcg_gen_movi_tl(cpu_tmp0, 1); tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1); if (s->prefix & PREFIX_LOCK) { switch (op) { case 0: /* bt */ /* Needs no atomic ops; we surpressed the normal memory load for LOCK above so do it now. */ gen_op_ld_v(s, ot, cpu_T0, cpu_A0); break; case 1: /* bts */ tcg_gen_atomic_fetch_or_tl(cpu_T0, cpu_A0, cpu_tmp0, s->mem_index, ot | MO_LE); break; case 2: /* btr */ tcg_gen_not_tl(cpu_tmp0, cpu_tmp0); tcg_gen_atomic_fetch_and_tl(cpu_T0, cpu_A0, cpu_tmp0, s->mem_index, ot | MO_LE); break; default: case 3: /* btc */ tcg_gen_atomic_fetch_xor_tl(cpu_T0, cpu_A0, cpu_tmp0, s->mem_index, ot | MO_LE); break; tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1); } else { tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1); switch (op) { case 0: /* bt */ /* Data already loaded; nothing to do. */ break; case 1: /* bts */ tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0); break; case 2: /* btr */ tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_tmp0); break; default: case 3: /* btc */ tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_tmp0); break; if (op != 0) { if (mod != 3) { gen_op_st_v(s, ot, cpu_T0, cpu_A0); } else { gen_op_mov_reg_v(ot, rm, cpu_T0); /* Delay all CC updates until after the store above. Note that C is the result of the test, Z is unchanged, and the others are all undefined. */ switch (s->cc_op) { case CC_OP_MULB ... CC_OP_MULQ: case CC_OP_ADDB ... CC_OP_ADDQ: case CC_OP_ADCB ... CC_OP_ADCQ: case CC_OP_SUBB ... CC_OP_SUBQ: case CC_OP_SBBB ... CC_OP_SBBQ: case CC_OP_LOGICB ... CC_OP_LOGICQ: case CC_OP_INCB ... CC_OP_INCQ: case CC_OP_DECB ... CC_OP_DECQ: case CC_OP_SHLB ... CC_OP_SHLQ: case CC_OP_SARB ... CC_OP_SARQ: case CC_OP_BMILGB ... CC_OP_BMILGQ: /* Z was going to be computed from the non-zero status of CC_DST. We can get that same Z value (and the new C value) by leaving CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the same width. */ tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4); set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB); break; default: /* Otherwise, generate EFLAGS and replace the C bit. */ gen_compute_eflags(s); tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, cpu_tmp4, ctz32(CC_C), 1); break; break; case 0x1bc: /* bsf / tzcnt */ case 0x1bd: /* bsr / lzcnt */ ot = dflag; modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); gen_extu(ot, cpu_T0); /* Note that lzcnt and tzcnt are in different extensions. */ if ((prefixes & PREFIX_REPZ) && (b & 1 ? s->cpuid_ext3_features & CPUID_EXT3_ABM : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) { int size = 8 << ot; /* For lzcnt/tzcnt, C bit is defined related to the input. */ tcg_gen_mov_tl(cpu_cc_src, cpu_T0); if (b & 1) { /* For lzcnt, reduce the target_ulong result by the number of zeros that we expect to find at the top. */ tcg_gen_clzi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS); tcg_gen_subi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - size); } else { /* For tzcnt, a zero input must return the operand size. */ tcg_gen_ctzi_tl(cpu_T0, cpu_T0, size); /* For lzcnt/tzcnt, Z bit is defined related to the result. */ gen_op_update1_cc(); set_cc_op(s, CC_OP_BMILGB + ot); } else { /* For bsr/bsf, only the Z bit is defined and it is related to the input and not the result. */ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0); set_cc_op(s, CC_OP_LOGICB + ot); /* ??? The manual says that the output is undefined when the input is zero, but real hardware leaves it unchanged, and real programs appear to depend on that. Accomplish this by passing the output as the value to return upon zero. */ if (b & 1) { /* For bsr, return the bit index of the first 1 bit, not the count of leading zeros. */ tcg_gen_xori_tl(cpu_T1, cpu_regs[reg], TARGET_LONG_BITS - 1); tcg_gen_clz_tl(cpu_T0, cpu_T0, cpu_T1); tcg_gen_xori_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - 1); } else { tcg_gen_ctz_tl(cpu_T0, cpu_T0, cpu_regs[reg]); gen_op_mov_reg_v(ot, reg, cpu_T0); break; /************************/ /* bcd */ case 0x27: /* daa */ if (CODE64(s)) goto illegal_op; gen_update_cc_op(s); gen_helper_daa(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x2f: /* das */ if (CODE64(s)) goto illegal_op; gen_update_cc_op(s); gen_helper_das(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x37: /* aaa */ if (CODE64(s)) goto illegal_op; gen_update_cc_op(s); gen_helper_aaa(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0x3f: /* aas */ if (CODE64(s)) goto illegal_op; gen_update_cc_op(s); gen_helper_aas(cpu_env); set_cc_op(s, CC_OP_EFLAGS); break; case 0xd4: /* aam */ if (CODE64(s)) goto illegal_op; val = cpu_ldub_code(env, s->pc++); if (val == 0) { gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base); } else { gen_helper_aam(cpu_env, tcg_const_i32(val)); set_cc_op(s, CC_OP_LOGICB); break; case 0xd5: /* aad */ if (CODE64(s)) goto illegal_op; val = cpu_ldub_code(env, s->pc++); gen_helper_aad(cpu_env, tcg_const_i32(val)); set_cc_op(s, CC_OP_LOGICB); break; /************************/ /* misc */ case 0x90: /* nop */ /* XXX: correct lock test for all insn */ if (prefixes & PREFIX_LOCK) { goto illegal_op; /* If REX_B is set, then this is xchg eax, r8d, not a nop. */ if (REX_B(s)) { goto do_xchg_reg_eax; if (prefixes & PREFIX_REPZ) { gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start)); s->is_jmp = DISAS_TB_JUMP; break; case 0x9b: /* fwait */ if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) == (HF_MP_MASK | HF_TS_MASK)) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); } else { gen_helper_fwait(cpu_env); break; case 0xcc: /* int3 */ gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base); break; case 0xcd: /* int N */ val = cpu_ldub_code(env, s->pc++); if (s->vm86 && s->iopl != 3) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base); break; case 0xce: /* into */ if (CODE64(s)) goto illegal_op; gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start)); break; #ifdef WANT_ICEBP case 0xf1: /* icebp (undocumented, exits to external debugger) */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP); #if 1 gen_debug(s, pc_start - s->cs_base); #else /* start debug */ tb_flush(CPU(x86_env_get_cpu(env))); qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM); #endif break; #endif case 0xfa: /* cli */ if (!s->vm86) { if (s->cpl <= s->iopl) { gen_helper_cli(cpu_env); } else { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { if (s->iopl == 3) { gen_helper_cli(cpu_env); } else { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; case 0xfb: /* sti */ if (s->vm86 ? s->iopl == 3 : s->cpl <= s->iopl) { gen_helper_sti(cpu_env); /* interruptions are enabled only the first insn after sti */ gen_jmp_im(s->pc - s->cs_base); gen_eob_inhibit_irq(s, true); } else { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; case 0x62: /* bound */ if (CODE64(s)) goto illegal_op; ot = dflag; modrm = cpu_ldub_code(env, s->pc++); reg = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; gen_op_mov_v_reg(ot, cpu_T0, reg); gen_lea_modrm(env, s, modrm); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); if (ot == MO_16) { gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32); } else { gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32); break; case 0x1c8 ... 0x1cf: /* bswap reg */ reg = (b & 7) | REX_B(s); #ifdef TARGET_X86_64 if (dflag == MO_64) { gen_op_mov_v_reg(MO_64, cpu_T0, reg); tcg_gen_bswap64_i64(cpu_T0, cpu_T0); gen_op_mov_reg_v(MO_64, reg, cpu_T0); } else #endif { gen_op_mov_v_reg(MO_32, cpu_T0, reg); tcg_gen_ext32u_tl(cpu_T0, cpu_T0); tcg_gen_bswap32_tl(cpu_T0, cpu_T0); gen_op_mov_reg_v(MO_32, reg, cpu_T0); break; case 0xd6: /* salc */ if (CODE64(s)) goto illegal_op; gen_compute_eflags_c(s, cpu_T0); tcg_gen_neg_tl(cpu_T0, cpu_T0); gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0); break; case 0xe0: /* loopnz */ case 0xe1: /* loopz */ case 0xe2: /* loop */ case 0xe3: /* jecxz */ { TCGLabel *l1, *l2, *l3; tval = (int8_t)insn_get(env, s, MO_8); next_eip = s->pc - s->cs_base; tval += next_eip; if (dflag == MO_16) { tval &= 0xffff; l1 = gen_new_label(); l2 = gen_new_label(); l3 = gen_new_label(); b &= 3; switch(b) { case 0: /* loopnz */ case 1: /* loopz */ gen_op_add_reg_im(s->aflag, R_ECX, -1); gen_op_jz_ecx(s->aflag, l3); gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1); break; case 2: /* loop */ gen_op_add_reg_im(s->aflag, R_ECX, -1); gen_op_jnz_ecx(s->aflag, l1); break; default: case 3: /* jcxz */ gen_op_jz_ecx(s->aflag, l1); break; gen_set_label(l3); gen_jmp_im(next_eip); tcg_gen_br(l2); gen_set_label(l1); gen_jmp_im(tval); gen_set_label(l2); gen_eob(s); break; case 0x130: /* wrmsr */ case 0x132: /* rdmsr */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); if (b & 2) { gen_helper_rdmsr(cpu_env); } else { gen_helper_wrmsr(cpu_env); break; case 0x131: /* rdtsc */ gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_rdtsc(cpu_env); gen_jmp(s, s->pc - s->cs_base); break; case 0x133: /* rdpmc */ gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_rdpmc(cpu_env); break; case 0x134: /* sysenter */ /* For Intel SYSENTER is valid on 64-bit */ if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) goto illegal_op; if (!s->pe) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_helper_sysenter(cpu_env); gen_eob(s); break; case 0x135: /* sysexit */ /* For Intel SYSEXIT is valid on 64-bit */ if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) goto illegal_op; if (!s->pe) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1)); gen_eob(s); break; #ifdef TARGET_X86_64 case 0x105: /* syscall */ /* XXX: is it usable in real mode ? */ gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start)); /* TF handling for the syscall insn is different. The TF bit is checked after the syscall insn completes. This allows #DB to not be generated after one has entered CPL0 if TF is set in FMASK. */ gen_eob_worker(s, false, true); break; case 0x107: /* sysret */ if (!s->pe) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1)); /* condition codes are modified only in long mode */ if (s->lma) { set_cc_op(s, CC_OP_EFLAGS); /* TF handling for the sysret insn is different. The TF bit is checked after the sysret insn completes. This allows #DB to be generated \"as if\" the syscall insn in userspace has just completed. */ gen_eob_worker(s, false, true); break; #endif case 0x1a2: /* cpuid */ gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_cpuid(cpu_env); break; case 0xf4: /* hlt */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start)); s->is_jmp = DISAS_TB_JUMP; break; case 0x100: modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; op = (modrm >> 3) & 7; switch(op) { case 0: /* sldt */ if (!s->pe || s->vm86) goto illegal_op; gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ); tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, ldt.selector)); ot = mod == 3 ? dflag : MO_16; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); break; case 2: /* lldt */ if (!s->pe || s->vm86) goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE); gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); gen_helper_lldt(cpu_env, cpu_tmp2_i32); break; case 1: /* str */ if (!s->pe || s->vm86) goto illegal_op; gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ); tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, tr.selector)); ot = mod == 3 ? dflag : MO_16; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); break; case 3: /* ltr */ if (!s->pe || s->vm86) goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE); gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); gen_helper_ltr(cpu_env, cpu_tmp2_i32); break; case 4: /* verr */ case 5: /* verw */ if (!s->pe || s->vm86) goto illegal_op; gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); gen_update_cc_op(s); if (op == 4) { gen_helper_verr(cpu_env, cpu_T0); } else { gen_helper_verw(cpu_env, cpu_T0); set_cc_op(s, CC_OP_EFLAGS); break; default: goto unknown_op; break; case 0x101: modrm = cpu_ldub_code(env, s->pc++); switch (modrm) { CASE_MODRM_MEM_OP(0): /* sgdt */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ); gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.limit)); gen_op_st_v(s, MO_16, cpu_T0, cpu_A0); gen_add_A0_im(s, 2); tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base)); if (dflag == MO_16) { tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff); gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0); break; case 0xc8: /* monitor */ if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) { goto illegal_op; gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]); gen_extu(s->aflag, cpu_A0); gen_add_A0_ds_seg(s); gen_helper_monitor(cpu_env, cpu_A0); break; case 0xc9: /* mwait */ if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) { goto illegal_op; gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start)); gen_eob(s); break; case 0xca: /* clac */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) || s->cpl != 0) { goto illegal_op; gen_helper_clac(cpu_env); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; case 0xcb: /* stac */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) || s->cpl != 0) { goto illegal_op; gen_helper_stac(cpu_env); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; CASE_MODRM_MEM_OP(1): /* sidt */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ); gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.limit)); gen_op_st_v(s, MO_16, cpu_T0, cpu_A0); gen_add_A0_im(s, 2); tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base)); if (dflag == MO_16) { tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff); gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0); break; case 0xd0: /* xgetbv */ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { goto illegal_op; tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]); gen_helper_xgetbv(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32); tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64); break; case 0xd1: /* xsetbv */ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]); gen_helper_xsetbv(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64); /* End TB because translation flags may change. */ gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; case 0xd8: /* VMRUN */ if (!(s->flags & HF_SVME_MASK) || !s->pe) { goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1), tcg_const_i32(s->pc - pc_start)); tcg_gen_exit_tb(0); s->is_jmp = DISAS_TB_JUMP; break; case 0xd9: /* VMMCALL */ if (!(s->flags & HF_SVME_MASK)) { goto illegal_op; gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_vmmcall(cpu_env); break; case 0xda: /* VMLOAD */ if (!(s->flags & HF_SVME_MASK) || !s->pe) { goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1)); break; case 0xdb: /* VMSAVE */ if (!(s->flags & HF_SVME_MASK) || !s->pe) { goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1)); break; case 0xdc: /* STGI */ if ((!(s->flags & HF_SVME_MASK) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) || !s->pe) { goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_stgi(cpu_env); break; case 0xdd: /* CLGI */ if (!(s->flags & HF_SVME_MASK) || !s->pe) { goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_clgi(cpu_env); break; case 0xde: /* SKINIT */ if ((!(s->flags & HF_SVME_MASK) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) || !s->pe) { goto illegal_op; gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_skinit(cpu_env); break; case 0xdf: /* INVLPGA */ if (!(s->flags & HF_SVME_MASK) || !s->pe) { goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag - 1)); break; CASE_MODRM_MEM_OP(2): /* lgdt */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_WRITE); gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0); gen_add_A0_im(s, 2); gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0); if (dflag == MO_16) { tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff); tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base)); tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, gdt.limit)); break; CASE_MODRM_MEM_OP(3): /* lidt */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_WRITE); gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0); gen_add_A0_im(s, 2); gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0); if (dflag == MO_16) { tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff); tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base)); tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, idt.limit)); break; CASE_MODRM_OP(4): /* smsw */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0); tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, cr[0])); if (CODE64(s)) { mod = (modrm >> 6) & 3; ot = (mod != 3 ? MO_16 : s->dflag); } else { ot = MO_16; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); break; case 0xee: /* rdpkru */ if (prefixes & PREFIX_LOCK) { goto illegal_op; tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]); gen_helper_rdpkru(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32); tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64); break; case 0xef: /* wrpkru */ if (prefixes & PREFIX_LOCK) { goto illegal_op; tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]); gen_helper_wrpkru(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64); break; CASE_MODRM_OP(6): /* lmsw */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); gen_helper_lmsw(cpu_env, cpu_T0); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; CASE_MODRM_MEM_OP(7): /* invlpg */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_lea_modrm(env, s, modrm); gen_helper_invlpg(cpu_env, cpu_A0); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; case 0xf8: /* swapgs */ #ifdef TARGET_X86_64 if (CODE64(s)) { if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { tcg_gen_mov_tl(cpu_T0, cpu_seg_base[R_GS]); tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env, offsetof(CPUX86State, kernelgsbase)); tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, kernelgsbase)); break; #endif goto illegal_op; case 0xf9: /* rdtscp */ if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) { goto illegal_op; gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_rdtscp(cpu_env); gen_jmp(s, s->pc - s->cs_base); break; default: goto unknown_op; break; case 0x108: /* invd */ case 0x109: /* wbinvd */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD); /* nothing to do */ break; case 0x63: /* arpl or movslS (x86_64) */ #ifdef TARGET_X86_64 if (CODE64(s)) { int d_ot; /* d_ot is the size of destination */ d_ot = dflag; modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod == 3) { gen_op_mov_v_reg(MO_32, cpu_T0, rm); /* sign extend */ if (d_ot == MO_64) { tcg_gen_ext32s_tl(cpu_T0, cpu_T0); gen_op_mov_reg_v(d_ot, reg, cpu_T0); } else { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T0, cpu_A0); gen_op_mov_reg_v(d_ot, reg, cpu_T0); } else #endif { TCGLabel *label1; TCGv t0, t1, t2, a0; if (!s->pe || s->vm86) goto illegal_op; t0 = tcg_temp_local_new(); t1 = tcg_temp_local_new(); t2 = tcg_temp_local_new(); ot = MO_16; modrm = cpu_ldub_code(env, s->pc++); reg = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; rm = modrm & 7; if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, ot, t0, cpu_A0); a0 = tcg_temp_local_new(); tcg_gen_mov_tl(a0, cpu_A0); } else { gen_op_mov_v_reg(ot, t0, rm); TCGV_UNUSED(a0); gen_op_mov_v_reg(ot, t1, reg); tcg_gen_andi_tl(cpu_tmp0, t0, 3); tcg_gen_andi_tl(t1, t1, 3); tcg_gen_movi_tl(t2, 0); label1 = gen_new_label(); tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1); tcg_gen_andi_tl(t0, t0, ~3); tcg_gen_or_tl(t0, t0, t1); tcg_gen_movi_tl(t2, CC_Z); gen_set_label(label1); if (mod != 3) { gen_op_st_v(s, ot, t0, a0); tcg_temp_free(a0); } else { gen_op_mov_reg_v(ot, rm, t0); gen_compute_eflags(s); tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z); tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2); tcg_temp_free(t0); tcg_temp_free(t1); tcg_temp_free(t2); break; case 0x102: /* lar */ case 0x103: /* lsl */ { TCGLabel *label1; TCGv t0; if (!s->pe || s->vm86) goto illegal_op; ot = dflag != MO_16 ? MO_32 : MO_16; modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); t0 = tcg_temp_local_new(); gen_update_cc_op(s); if (b == 0x102) { gen_helper_lar(t0, cpu_env, cpu_T0); } else { gen_helper_lsl(t0, cpu_env, cpu_T0); tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z); label1 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1); gen_op_mov_reg_v(ot, reg, t0); gen_set_label(label1); set_cc_op(s, CC_OP_EFLAGS); tcg_temp_free(t0); break; case 0x118: modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; op = (modrm >> 3) & 7; switch(op) { case 0: /* prefetchnta */ case 1: /* prefetchnt0 */ case 2: /* prefetchnt0 */ case 3: /* prefetchnt0 */ if (mod == 3) goto illegal_op; gen_nop_modrm(env, s, modrm); /* nothing more to do */ break; default: /* nop (multi byte) */ gen_nop_modrm(env, s, modrm); break; break; case 0x11a: modrm = cpu_ldub_code(env, s->pc++); if (s->flags & HF_MPX_EN_MASK) { mod = (modrm >> 6) & 3; reg = ((modrm >> 3) & 7) | rex_r; if (prefixes & PREFIX_REPZ) { /* bndcl */ if (reg >= 4 || (prefixes & PREFIX_LOCK) || s->aflag == MO_16) { goto illegal_op; gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]); } else if (prefixes & PREFIX_REPNZ) { /* bndcu */ if (reg >= 4 || (prefixes & PREFIX_LOCK) || s->aflag == MO_16) { goto illegal_op; TCGv_i64 notu = tcg_temp_new_i64(); tcg_gen_not_i64(notu, cpu_bndu[reg]); gen_bndck(env, s, modrm, TCG_COND_GTU, notu); tcg_temp_free_i64(notu); } else if (prefixes & PREFIX_DATA) { /* bndmov -- from reg/mem */ if (reg >= 4 || s->aflag == MO_16) { goto illegal_op; if (mod == 3) { int reg2 = (modrm & 7) | REX_B(s); if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) { goto illegal_op; if (s->flags & HF_MPX_IU_MASK) { tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]); tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]); } else { gen_lea_modrm(env, s, modrm); if (CODE64(s)) { tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0, s->mem_index, MO_LEQ); tcg_gen_addi_tl(cpu_A0, cpu_A0, 8); tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0, s->mem_index, MO_LEQ); } else { tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0, s->mem_index, MO_LEUL); tcg_gen_addi_tl(cpu_A0, cpu_A0, 4); tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0, s->mem_index, MO_LEUL); /* bnd registers are now in-use */ gen_set_hflag(s, HF_MPX_IU_MASK); } else if (mod != 3) { /* bndldx */ AddressParts a = gen_lea_modrm_0(env, s, modrm); if (reg >= 4 || (prefixes & PREFIX_LOCK) || s->aflag == MO_16 || a.base < -1) { goto illegal_op; if (a.base >= 0) { tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp); } else { tcg_gen_movi_tl(cpu_A0, 0); gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override); if (a.index >= 0) { tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]); } else { tcg_gen_movi_tl(cpu_T0, 0); if (CODE64(s)) { gen_helper_bndldx64(cpu_bndl[reg], cpu_env, cpu_A0, cpu_T0); tcg_gen_ld_i64(cpu_bndu[reg], cpu_env, offsetof(CPUX86State, mmx_t0.MMX_Q(0))); } else { gen_helper_bndldx32(cpu_bndu[reg], cpu_env, cpu_A0, cpu_T0); tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]); tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32); gen_set_hflag(s, HF_MPX_IU_MASK); gen_nop_modrm(env, s, modrm); break; case 0x11b: modrm = cpu_ldub_code(env, s->pc++); if (s->flags & HF_MPX_EN_MASK) { mod = (modrm >> 6) & 3; reg = ((modrm >> 3) & 7) | rex_r; if (mod != 3 && (prefixes & PREFIX_REPZ)) { /* bndmk */ if (reg >= 4 || (prefixes & PREFIX_LOCK) || s->aflag == MO_16) { goto illegal_op; AddressParts a = gen_lea_modrm_0(env, s, modrm); if (a.base >= 0) { tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]); if (!CODE64(s)) { tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]); } else if (a.base == -1) { /* no base register has lower bound of 0 */ tcg_gen_movi_i64(cpu_bndl[reg], 0); } else { /* rip-relative generates #ud */ goto illegal_op; tcg_gen_not_tl(cpu_A0, gen_lea_modrm_1(a)); if (!CODE64(s)) { tcg_gen_ext32u_tl(cpu_A0, cpu_A0); tcg_gen_extu_tl_i64(cpu_bndu[reg], cpu_A0); /* bnd registers are now in-use */ gen_set_hflag(s, HF_MPX_IU_MASK); break; } else if (prefixes & PREFIX_REPNZ) { /* bndcn */ if (reg >= 4 || (prefixes & PREFIX_LOCK) || s->aflag == MO_16) { goto illegal_op; gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]); } else if (prefixes & PREFIX_DATA) { /* bndmov -- to reg/mem */ if (reg >= 4 || s->aflag == MO_16) { goto illegal_op; if (mod == 3) { int reg2 = (modrm & 7) | REX_B(s); if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) { goto illegal_op; if (s->flags & HF_MPX_IU_MASK) { tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]); tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]); } else { gen_lea_modrm(env, s, modrm); if (CODE64(s)) { tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0, s->mem_index, MO_LEQ); tcg_gen_addi_tl(cpu_A0, cpu_A0, 8); tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0, s->mem_index, MO_LEQ); } else { tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0, s->mem_index, MO_LEUL); tcg_gen_addi_tl(cpu_A0, cpu_A0, 4); tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0, s->mem_index, MO_LEUL); } else if (mod != 3) { /* bndstx */ AddressParts a = gen_lea_modrm_0(env, s, modrm); if (reg >= 4 || (prefixes & PREFIX_LOCK) || s->aflag == MO_16 || a.base < -1) { goto illegal_op; if (a.base >= 0) { tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp); } else { tcg_gen_movi_tl(cpu_A0, 0); gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override); if (a.index >= 0) { tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]); } else { tcg_gen_movi_tl(cpu_T0, 0); if (CODE64(s)) { gen_helper_bndstx64(cpu_env, cpu_A0, cpu_T0, cpu_bndl[reg], cpu_bndu[reg]); } else { gen_helper_bndstx32(cpu_env, cpu_A0, cpu_T0, cpu_bndl[reg], cpu_bndu[reg]); gen_nop_modrm(env, s, modrm); break; case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */ modrm = cpu_ldub_code(env, s->pc++); gen_nop_modrm(env, s, modrm); break; case 0x120: /* mov reg, crN */ case 0x122: /* mov crN, reg */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { modrm = cpu_ldub_code(env, s->pc++); /* Ignore the mod bits (assume (modrm&0xc0)==0xc0). * AMD documentation (24594.pdf) and testing of * intel 386 and 486 processors all show that the mod bits * are assumed to be 1's, regardless of actual values. */ rm = (modrm & 7) | REX_B(s); reg = ((modrm >> 3) & 7) | rex_r; if (CODE64(s)) ot = MO_64; else ot = MO_32; if ((prefixes & PREFIX_LOCK) && (reg == 0) && (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) { reg = 8; switch(reg) { case 0: case 2: case 3: case 4: case 8: gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); if (b & 2) { gen_op_mov_v_reg(ot, cpu_T0, rm); gen_helper_write_crN(cpu_env, tcg_const_i32(reg), cpu_T0); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } else { gen_helper_read_crN(cpu_T0, cpu_env, tcg_const_i32(reg)); gen_op_mov_reg_v(ot, rm, cpu_T0); break; default: goto unknown_op; break; case 0x121: /* mov reg, drN */ case 0x123: /* mov drN, reg */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { modrm = cpu_ldub_code(env, s->pc++); /* Ignore the mod bits (assume (modrm&0xc0)==0xc0). * AMD documentation (24594.pdf) and testing of * intel 386 and 486 processors all show that the mod bits * are assumed to be 1's, regardless of actual values. */ rm = (modrm & 7) | REX_B(s); reg = ((modrm >> 3) & 7) | rex_r; if (CODE64(s)) ot = MO_64; else ot = MO_32; if (reg >= 8) { goto illegal_op; if (b & 2) { gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg); gen_op_mov_v_reg(ot, cpu_T0, rm); tcg_gen_movi_i32(cpu_tmp2_i32, reg); gen_helper_set_dr(cpu_env, cpu_tmp2_i32, cpu_T0); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg); tcg_gen_movi_i32(cpu_tmp2_i32, reg); gen_helper_get_dr(cpu_T0, cpu_env, cpu_tmp2_i32); gen_op_mov_reg_v(ot, rm, cpu_T0); break; case 0x106: /* clts */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); gen_helper_clts(cpu_env); /* abort block because static cpu state changed */ gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */ case 0x1c3: /* MOVNTI reg, mem */ if (!(s->cpuid_features & CPUID_SSE2)) goto illegal_op; ot = mo_64_32(dflag); modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; reg = ((modrm >> 3) & 7) | rex_r; /* generate a generic store */ gen_ldst_modrm(env, s, modrm, ot, reg, 1); break; case 0x1ae: modrm = cpu_ldub_code(env, s->pc++); switch (modrm) { CASE_MODRM_MEM_OP(0): /* fxsave */ if (!(s->cpuid_features & CPUID_FXSR) || (prefixes & PREFIX_LOCK)) { goto illegal_op; if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; gen_lea_modrm(env, s, modrm); gen_helper_fxsave(cpu_env, cpu_A0); break; CASE_MODRM_MEM_OP(1): /* fxrstor */ if (!(s->cpuid_features & CPUID_FXSR) || (prefixes & PREFIX_LOCK)) { goto illegal_op; if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; gen_lea_modrm(env, s, modrm); gen_helper_fxrstor(cpu_env, cpu_A0); break; CASE_MODRM_MEM_OP(2): /* ldmxcsr */ if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) { goto illegal_op; if (s->flags & HF_TS_MASK) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; gen_lea_modrm(env, s, modrm); tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32); break; CASE_MODRM_MEM_OP(3): /* stmxcsr */ if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) { goto illegal_op; if (s->flags & HF_TS_MASK) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, mxcsr)); gen_op_st_v(s, MO_32, cpu_T0, cpu_A0); break; CASE_MODRM_MEM_OP(4): /* xsave */ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 || (prefixes & (PREFIX_LOCK | PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { goto illegal_op; gen_lea_modrm(env, s, modrm); tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); gen_helper_xsave(cpu_env, cpu_A0, cpu_tmp1_i64); break; CASE_MODRM_MEM_OP(5): /* xrstor */ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 || (prefixes & (PREFIX_LOCK | PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) { goto illegal_op; gen_lea_modrm(env, s, modrm); tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); gen_helper_xrstor(cpu_env, cpu_A0, cpu_tmp1_i64); /* XRSTOR is how MPX is enabled, which changes how we translate. Thus we need to end the TB. */ gen_update_cc_op(s); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */ if (prefixes & PREFIX_LOCK) { goto illegal_op; if (prefixes & PREFIX_DATA) { /* clwb */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) { goto illegal_op; gen_nop_modrm(env, s, modrm); } else { /* xsaveopt */ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0 || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0 || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) { goto illegal_op; gen_lea_modrm(env, s, modrm); tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX], cpu_regs[R_EDX]); gen_helper_xsaveopt(cpu_env, cpu_A0, cpu_tmp1_i64); break; CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */ if (prefixes & PREFIX_LOCK) { goto illegal_op; if (prefixes & PREFIX_DATA) { /* clflushopt */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) { goto illegal_op; } else { /* clflush */ if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) || !(s->cpuid_features & CPUID_CLFLUSH)) { goto illegal_op; gen_nop_modrm(env, s, modrm); break; case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */ case 0xc8 ... 0xc8: /* rdgsbase (f3 0f ae /1) */ case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */ case 0xd8 ... 0xd8: /* wrgsbase (f3 0f ae /3) */ if (CODE64(s) && (prefixes & PREFIX_REPZ) && !(prefixes & PREFIX_LOCK) && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) { TCGv base, treg, src, dst; /* Preserve hflags bits by testing CR4 at runtime. */ tcg_gen_movi_i32(cpu_tmp2_i32, CR4_FSGSBASE_MASK); gen_helper_cr4_testbit(cpu_env, cpu_tmp2_i32); base = cpu_seg_base[modrm & 8 ? R_GS : R_FS]; treg = cpu_regs[(modrm & 7) | REX_B(s)]; if (modrm & 0x10) { /* wr*base */ dst = base, src = treg; } else { /* rd*base */ dst = treg, src = base; if (s->dflag == MO_32) { tcg_gen_ext32u_tl(dst, src); } else { tcg_gen_mov_tl(dst, src); break; goto unknown_op; case 0xf8: /* sfence / pcommit */ if (prefixes & PREFIX_DATA) { /* pcommit */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT) || (prefixes & PREFIX_LOCK)) { goto illegal_op; break; /* fallthru */ case 0xf9 ... 0xff: /* sfence */ if (!(s->cpuid_features & CPUID_SSE) || (prefixes & PREFIX_LOCK)) { goto illegal_op; tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC); break; case 0xe8 ... 0xef: /* lfence */ if (!(s->cpuid_features & CPUID_SSE) || (prefixes & PREFIX_LOCK)) { goto illegal_op; tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC); break; case 0xf0 ... 0xf7: /* mfence */ if (!(s->cpuid_features & CPUID_SSE2) || (prefixes & PREFIX_LOCK)) { goto illegal_op; tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); break; default: goto unknown_op; break; case 0x10d: /* 3DNow! prefetch(w) */ modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; gen_nop_modrm(env, s, modrm); break; case 0x1aa: /* rsm */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM); if (!(s->flags & HF_SMM_MASK)) goto illegal_op; gen_update_cc_op(s); gen_jmp_im(s->pc - s->cs_base); gen_helper_rsm(cpu_env); gen_eob(s); break; case 0x1b8: /* SSE4.2 popcnt */ if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) != PREFIX_REPZ) goto illegal_op; if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT)) goto illegal_op; modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; if (s->prefix & PREFIX_DATA) { ot = MO_16; } else { ot = mo_64_32(dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); gen_extu(ot, cpu_T0); tcg_gen_mov_tl(cpu_cc_src, cpu_T0); tcg_gen_ctpop_tl(cpu_T0, cpu_T0); gen_op_mov_reg_v(ot, reg, cpu_T0); set_cc_op(s, CC_OP_POPCNT); break; case 0x10e ... 0x10f: /* 3DNow! instructions, ignore prefixes */ s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA); case 0x110 ... 0x117: case 0x128 ... 0x12f: case 0x138 ... 0x13a: case 0x150 ... 0x179: case 0x17c ... 0x17f: case 0x1c2: case 0x1c4 ... 0x1c6: case 0x1d0 ... 0x1fe: gen_sse(env, s, b, pc_start, rex_r); break; default: goto unknown_op; return s->pc; illegal_op: gen_illegal_opcode(s); return s->pc; unknown_op: gen_unknown_opcode(env, s); return s->pc;"} {"target": 0, "idx": 1914, "func": "START_TEST(escaped_string) { int i; struct { const char *encoded; const char *decoded; } test_cases[] = { { \"\\\"\\\\\\\"\\\"\", \"\\\"\" }, { \"\\\"hello world \\\\\\\"embedded string\\\\\\\"\\\"\", \"hello world \\\"embedded string\\\"\" }, { \"\\\"hello world\\\\nwith new line\\\"\", \"hello world\\nwith new line\" }, { \"\\\"single byte utf-8 \\\\u0020\\\"\", \"single byte utf-8 \" }, { \"\\\"double byte utf-8 \\\\u00A2\\\"\", \"double byte utf-8 \\xc2\\xa2\" }, { \"\\\"triple byte utf-8 \\\\u20AC\\\"\", \"triple byte utf-8 \\xe2\\x82\\xac\" }, {} }; for (i = 0; test_cases[i].encoded; i++) { QObject *obj; QString *str; obj = qobject_from_json(test_cases[i].encoded); fail_unless(obj != NULL); fail_unless(qobject_type(obj) == QTYPE_QSTRING); str = qobject_to_qstring(obj); fail_unless(strcmp(qstring_get_str(str), test_cases[i].decoded) == 0); QDECREF(str); } }"} {"target": 0, "idx": 1915, "func": "static void handle_mrs(DisasContext *s, uint32_t insn, unsigned int op0, unsigned int op1, unsigned int op2, unsigned int crn, unsigned int crm, unsigned int rt) { unsupported_encoding(s, insn); }"} {"target": 1, "idx": 1956, "func": "int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg) { struct kvm_irq_routing_entry kroute; int virq; if (!kvm_gsi_routing_enabled()) { return -ENOSYS; } virq = kvm_irqchip_get_virq(s); if (virq < 0) { return virq; } kroute.gsi = virq; kroute.type = KVM_IRQ_ROUTING_MSI; kroute.flags = 0; kroute.u.msi.address_lo = (uint32_t)msg.address; kroute.u.msi.address_hi = msg.address >> 32; kroute.u.msi.data = le32_to_cpu(msg.data); kvm_add_routing_entry(s, &kroute); kvm_irqchip_commit_routes(s); return virq; }"} {"target": 1, "idx": 1960, "func": "static int oma_read_header(AVFormatContext *s) { int ret, framesize, jsflag, samplerate; uint32_t codec_params, channel_id; int16_t eid; uint8_t buf[EA3_HEADER_SIZE]; uint8_t *edata; AVStream *st; ID3v2ExtraMeta *extra_meta = NULL; OMAContext *oc = s->priv_data; ff_id3v2_read(s, ID3v2_EA3_MAGIC, &extra_meta, 0); ret = avio_read(s->pb, buf, EA3_HEADER_SIZE); if (ret < EA3_HEADER_SIZE) return -1; if (memcmp(buf, ((const uint8_t[]){'E', 'A', '3'}), 3) || buf[4] != 0 || buf[5] != EA3_HEADER_SIZE) { av_log(s, AV_LOG_ERROR, \"Couldn't find the EA3 header !\\n\"); return AVERROR_INVALIDDATA; } oc->content_start = avio_tell(s->pb); /* encrypted file */ eid = AV_RB16(&buf[6]); if (eid != -1 && eid != -128 && decrypt_init(s, extra_meta, buf) < 0) { ff_id3v2_free_extra_meta(&extra_meta); return -1; } ff_id3v2_free_extra_meta(&extra_meta); codec_params = AV_RB24(&buf[33]); st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); st->start_time = 0; st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_tag = buf[32]; st->codecpar->codec_id = ff_codec_get_id(ff_oma_codec_tags, st->codecpar->codec_tag); switch (buf[32]) { case OMA_CODECID_ATRAC3: samplerate = ff_oma_srate_tab[(codec_params >> 13) & 7] * 100; if (!samplerate) { av_log(s, AV_LOG_ERROR, \"Unsupported sample rate\\n\"); return AVERROR_INVALIDDATA; } if (samplerate != 44100) avpriv_request_sample(s, \"Sample rate %d\", samplerate); framesize = (codec_params & 0x3FF) * 8; /* get stereo coding mode, 1 for joint-stereo */ jsflag = (codec_params >> 17) & 1; st->codecpar->channels = 2; st->codecpar->channel_layout = AV_CH_LAYOUT_STEREO; st->codecpar->sample_rate = samplerate; st->codecpar->bit_rate = st->codecpar->sample_rate * framesize * 8 / 1024; /* fake the ATRAC3 extradata * (wav format, makes stream copy to wav work) */ if (ff_alloc_extradata(st->codecpar, 14)) return AVERROR(ENOMEM); edata = st->codecpar->extradata; AV_WL16(&edata[0], 1); // always 1 AV_WL32(&edata[2], samplerate); // samples rate AV_WL16(&edata[6], jsflag); // coding mode AV_WL16(&edata[8], jsflag); // coding mode AV_WL16(&edata[10], 1); // always 1 // AV_WL16(&edata[12], 0); // always 0 avpriv_set_pts_info(st, 64, 1, st->codecpar->sample_rate); break; case OMA_CODECID_ATRAC3P: channel_id = (codec_params >> 10) & 7; if (!channel_id) { av_log(s, AV_LOG_ERROR, \"Invalid ATRAC-X channel id: %\"PRIu32\"\\n\", channel_id); return AVERROR_INVALIDDATA; } st->codecpar->channel_layout = ff_oma_chid_to_native_layout[channel_id - 1]; st->codecpar->channels = ff_oma_chid_to_num_channels[channel_id - 1]; framesize = ((codec_params & 0x3FF) * 8) + 8; samplerate = ff_oma_srate_tab[(codec_params >> 13) & 7] * 100; if (!samplerate) { av_log(s, AV_LOG_ERROR, \"Unsupported sample rate\\n\"); return AVERROR_INVALIDDATA; } st->codecpar->sample_rate = samplerate; st->codecpar->bit_rate = samplerate * framesize * 8 / 2048; avpriv_set_pts_info(st, 64, 1, samplerate); break; case OMA_CODECID_MP3: st->need_parsing = AVSTREAM_PARSE_FULL_RAW; framesize = 1024; break; case OMA_CODECID_LPCM: /* PCM 44.1 kHz 16 bit stereo big-endian */ st->codecpar->channels = 2; st->codecpar->channel_layout = AV_CH_LAYOUT_STEREO; st->codecpar->sample_rate = 44100; framesize = 1024; /* bit rate = sample rate x PCM block align (= 4) x 8 */ st->codecpar->bit_rate = st->codecpar->sample_rate * 32; st->codecpar->bits_per_coded_sample = av_get_bits_per_sample(st->codecpar->codec_id); avpriv_set_pts_info(st, 64, 1, st->codecpar->sample_rate); break; default: av_log(s, AV_LOG_ERROR, \"Unsupported codec %d!\\n\", buf[32]); return AVERROR(ENOSYS); } st->codecpar->block_align = framesize; return 0; }"} {"target": 0, "idx": 1963, "func": "static int temporal_luma_motion_vector(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int refIdxLx, Mv *mvLXCol, int X) { MvField *tab_mvf; MvField temp_col; int x, y, x_pu, y_pu; int min_pu_width = s->sps->min_pu_width; int availableFlagLXCol = 0; int colPic; HEVCFrame *ref = s->ref->collocated_ref; if (!ref) return 0; tab_mvf = ref->tab_mvf; colPic = ref->poc; //bottom right collocated motion vector x = x0 + nPbW; y = y0 + nPbH; if (tab_mvf && (y0 >> s->sps->log2_ctb_size) == (y >> s->sps->log2_ctb_size) && y < s->sps->height && x < s->sps->width) { x &= ~15; y &= ~15; ff_thread_await_progress(&ref->tf, y, 0); x_pu = x >> s->sps->log2_min_pu_size; y_pu = y >> s->sps->log2_min_pu_size; temp_col = TAB_MVF(x_pu, y_pu); availableFlagLXCol = DERIVE_TEMPORAL_COLOCATED_MVS; } // derive center collocated motion vector if (tab_mvf && !availableFlagLXCol) { x = x0 + (nPbW >> 1); y = y0 + (nPbH >> 1); x &= ~15; y &= ~15; ff_thread_await_progress(&ref->tf, y, 0); x_pu = x >> s->sps->log2_min_pu_size; y_pu = y >> s->sps->log2_min_pu_size; temp_col = TAB_MVF(x_pu, y_pu); availableFlagLXCol = DERIVE_TEMPORAL_COLOCATED_MVS; } return availableFlagLXCol; }"} {"target": 1, "idx": 1973, "func": "static int decode_audio_specific_config(AACContext *ac, AVCodecContext *avctx, MPEG4AudioConfig *m4ac, const uint8_t *data, int bit_size, int sync_extension) { GetBitContext gb; int i, ret; ff_dlog(avctx, \"audio specific config size %d\\n\", bit_size >> 3); for (i = 0; i < bit_size >> 3; i++) ff_dlog(avctx, \"%02x \", data[i]); ff_dlog(avctx, \"\\n\"); if ((ret = init_get_bits(&gb, data, bit_size)) < 0) return ret; if ((i = avpriv_mpeg4audio_get_config(m4ac, data, bit_size, sync_extension)) < 0) return AVERROR_INVALIDDATA; if (m4ac->sampling_index > 12) { av_log(avctx, AV_LOG_ERROR, \"invalid sampling rate index %d\\n\", m4ac->sampling_index); return AVERROR_INVALIDDATA; } if (m4ac->object_type == AOT_ER_AAC_LD && (m4ac->sampling_index < 3 || m4ac->sampling_index > 7)) { av_log(avctx, AV_LOG_ERROR, \"invalid low delay sampling rate index %d\\n\", m4ac->sampling_index); return AVERROR_INVALIDDATA; } skip_bits_long(&gb, i); switch (m4ac->object_type) { case AOT_AAC_MAIN: case AOT_AAC_LC: case AOT_AAC_LTP: case AOT_ER_AAC_LC: case AOT_ER_AAC_LD: if ((ret = decode_ga_specific_config(ac, avctx, &gb, m4ac, m4ac->chan_config)) < 0) return ret; break; case AOT_ER_AAC_ELD: if ((ret = decode_eld_specific_config(ac, avctx, &gb, m4ac, m4ac->chan_config)) < 0) return ret; break; default: avpriv_report_missing_feature(avctx, \"Audio object type %s%d\", m4ac->sbr == 1 ? \"SBR+\" : \"\", m4ac->object_type); return AVERROR(ENOSYS); } ff_dlog(avctx, \"AOT %d chan config %d sampling index %d (%d) SBR %d PS %d\\n\", m4ac->object_type, m4ac->chan_config, m4ac->sampling_index, m4ac->sample_rate, m4ac->sbr, m4ac->ps); return get_bits_count(&gb); }"} {"target": 1, "idx": 2014, "func": "long do_sigreturn(CPUS390XState *env) { sigframe *frame; abi_ulong frame_addr = env->regs[15]; target_sigset_t target_set; sigset_t set; trace_user_do_sigreturn(env, frame_addr); if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { goto badframe; } __get_user(target_set.sig[0], &frame->sc.oldmask[0]); target_to_host_sigset_internal(&set, &target_set); set_sigmask(&set); /* ~_BLOCKABLE? */ if (restore_sigregs(env, &frame->sregs)) { goto badframe; } unlock_user_struct(frame, frame_addr, 0); return -TARGET_QEMU_ESIGRETURN; badframe: force_sig(TARGET_SIGSEGV); return 0; }"} {"target": 0, "idx": 2017, "func": "static inline int get_symbol_inline(RangeCoder *c, uint8_t *state, int is_signed){ if(get_rac(c, state+0)) return 0; else{ int i, e, a; e= 0; while(get_rac(c, state+1 + e) && e<9){ //1..10 e++; } a= 1; for(i=e-1; i>=0; i--){ a += a + get_rac(c, state+22 + i); //22..31 } e= -(is_signed && get_rac(c, state+11 + e)); //11..21 return (a^e)-e; } }"} {"target": 0, "idx": 2034, "func": "START_TEST(qobject_to_qint_test) { QInt *qi; qi = qint_from_int(0); fail_unless(qobject_to_qint(QOBJECT(qi)) == qi); QDECREF(qi); }"} {"target": 0, "idx": 2055, "func": "static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr) { intptr_t value = (intptr_t)ptr; TCGRelocation *r; assert(!l->has_value); for (r = l->u.first_reloc; r != NULL; r = r->next) { patch_reloc(r->ptr, r->type, value, r->addend); } l->has_value = 1; l->u.value_ptr = ptr; }"} {"target": 0, "idx": 2068, "func": "BlockDriverState *bdrv_lookup_bs(const char *device, const char *node_name, Error **errp) { BlockBackend *blk; BlockDriverState *bs; if (device) { blk = blk_by_name(device); if (blk) { return blk_bs(blk); } } if (node_name) { bs = bdrv_find_node(node_name); if (bs) { return bs; } } error_setg(errp, \"Cannot find device=%s nor node_name=%s\", device ? device : \"\", node_name ? node_name : \"\"); return NULL; }"} {"target": 0, "idx": 2082, "func": "static int idcin_read_packet(AVFormatContext *s, AVPacket *pkt) { int ret; unsigned int command; unsigned int chunk_size; IdcinDemuxContext *idcin = s->priv_data; AVIOContext *pb = s->pb; int i; int palette_scale; unsigned char r, g, b; unsigned char palette_buffer[768]; uint32_t palette[256]; if (s->pb->eof_reached) return AVERROR(EIO); if (idcin->next_chunk_is_video) { command = avio_rl32(pb); if (command == 2) { return AVERROR(EIO); } else if (command == 1) { /* trigger a palette change */ if (avio_read(pb, palette_buffer, 768) != 768) return AVERROR(EIO); /* scale the palette as necessary */ palette_scale = 2; for (i = 0; i < 768; i++) if (palette_buffer[i] > 63) { palette_scale = 0; break; } for (i = 0; i < 256; i++) { r = palette_buffer[i * 3 ] << palette_scale; g = palette_buffer[i * 3 + 1] << palette_scale; b = palette_buffer[i * 3 + 2] << palette_scale; palette[i] = (r << 16) | (g << 8) | (b); } } chunk_size = avio_rl32(pb); if (chunk_size < 4 || chunk_size > INT_MAX - 4) { av_log(s, AV_LOG_ERROR, \"invalid chunk size: %u\\n\", chunk_size); return AVERROR_INVALIDDATA; } /* skip the number of decoded bytes (always equal to width * height) */ avio_skip(pb, 4); chunk_size -= 4; ret= av_get_packet(pb, pkt, chunk_size); if (ret < 0) return ret; if (command == 1) { uint8_t *pal; pal = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE); if (ret < 0) return ret; memcpy(pal, palette, AVPALETTE_SIZE); pkt->flags |= AV_PKT_FLAG_KEY; } pkt->stream_index = idcin->video_stream_index; pkt->duration = 1; } else { /* send out the audio chunk */ if (idcin->current_audio_chunk) chunk_size = idcin->audio_chunk_size2; else chunk_size = idcin->audio_chunk_size1; ret= av_get_packet(pb, pkt, chunk_size); if (ret < 0) return ret; pkt->stream_index = idcin->audio_stream_index; pkt->duration = chunk_size / idcin->block_align; idcin->current_audio_chunk ^= 1; } if (idcin->audio_present) idcin->next_chunk_is_video ^= 1; return ret; }"} {"target": 0, "idx": 2091, "func": "void avcodec_register_all(void) { static int initialized; if (initialized) return; initialized = 1; /* hardware accelerators */ REGISTER_HWACCEL(H263_VAAPI, h263_vaapi); REGISTER_HWACCEL(H264_D3D11VA, h264_d3d11va); REGISTER_HWACCEL(H264_DXVA2, h264_dxva2); REGISTER_HWACCEL(H264_MMAL, h264_mmal); REGISTER_HWACCEL(H264_QSV, h264_qsv); REGISTER_HWACCEL(H264_VAAPI, h264_vaapi); REGISTER_HWACCEL(H264_VDA, h264_vda); REGISTER_HWACCEL(H264_VDA_OLD, h264_vda_old); REGISTER_HWACCEL(H264_VDPAU, h264_vdpau); REGISTER_HWACCEL(HEVC_D3D11VA, hevc_d3d11va); REGISTER_HWACCEL(HEVC_DXVA2, hevc_dxva2); REGISTER_HWACCEL(HEVC_QSV, hevc_qsv); REGISTER_HWACCEL(HEVC_VAAPI, hevc_vaapi); REGISTER_HWACCEL(HEVC_VDPAU, hevc_vdpau); REGISTER_HWACCEL(MPEG1_VDPAU, mpeg1_vdpau); REGISTER_HWACCEL(MPEG2_D3D11VA, mpeg2_d3d11va); REGISTER_HWACCEL(MPEG2_DXVA2, mpeg2_dxva2); REGISTER_HWACCEL(MPEG2_MMAL, mpeg2_mmal); REGISTER_HWACCEL(MPEG2_QSV, mpeg2_qsv); REGISTER_HWACCEL(MPEG2_VAAPI, mpeg2_vaapi); REGISTER_HWACCEL(MPEG2_VDPAU, mpeg2_vdpau); REGISTER_HWACCEL(MPEG4_VAAPI, mpeg4_vaapi); REGISTER_HWACCEL(MPEG4_VDPAU, mpeg4_vdpau); REGISTER_HWACCEL(VC1_D3D11VA, vc1_d3d11va); REGISTER_HWACCEL(VC1_DXVA2, vc1_dxva2); REGISTER_HWACCEL(VC1_QSV, vc1_qsv); REGISTER_HWACCEL(VC1_VAAPI, vc1_vaapi); REGISTER_HWACCEL(VC1_VDPAU, vc1_vdpau); REGISTER_HWACCEL(VC1_MMAL, vc1_mmal); REGISTER_HWACCEL(VP8_QSV, vp8_qsv); REGISTER_HWACCEL(VP8_VAAPI, vp8_vaapi); REGISTER_HWACCEL(WMV3_D3D11VA, wmv3_d3d11va); REGISTER_HWACCEL(WMV3_DXVA2, wmv3_dxva2); REGISTER_HWACCEL(WMV3_VAAPI, wmv3_vaapi); REGISTER_HWACCEL(WMV3_VDPAU, wmv3_vdpau); /* video codecs */ REGISTER_ENCODER(A64MULTI, a64multi); REGISTER_ENCODER(A64MULTI5, a64multi5); REGISTER_DECODER(AASC, aasc); REGISTER_DECODER(AIC, aic); REGISTER_ENCDEC (ALIAS_PIX, alias_pix); REGISTER_DECODER(AMV, amv); REGISTER_DECODER(ANM, anm); REGISTER_DECODER(ANSI, ansi); REGISTER_ENCDEC (ASV1, asv1); REGISTER_ENCDEC (ASV2, asv2); REGISTER_DECODER(AURA, aura); REGISTER_DECODER(AURA2, aura2); REGISTER_DECODER(AVS, avs); REGISTER_DECODER(BETHSOFTVID, bethsoftvid); REGISTER_DECODER(BFI, bfi); REGISTER_DECODER(BINK, bink); REGISTER_ENCDEC (BMP, bmp); REGISTER_DECODER(BMV_VIDEO, bmv_video); REGISTER_DECODER(BRENDER_PIX, brender_pix); REGISTER_DECODER(C93, c93); REGISTER_DECODER(CAVS, cavs); REGISTER_DECODER(CDGRAPHICS, cdgraphics); REGISTER_DECODER(CDXL, cdxl); REGISTER_DECODER(CFHD, cfhd); REGISTER_DECODER(CINEPAK, cinepak); REGISTER_ENCDEC (CLJR, cljr); REGISTER_DECODER(CLLC, cllc); REGISTER_ENCDEC (COMFORTNOISE, comfortnoise); REGISTER_DECODER(CSCD, cscd); REGISTER_DECODER(CYUV, cyuv); REGISTER_DECODER(DDS, dds); REGISTER_DECODER(DFA, dfa); REGISTER_ENCDEC (DNXHD, dnxhd); REGISTER_ENCDEC (DPX, dpx); REGISTER_DECODER(DSICINVIDEO, dsicinvideo); REGISTER_ENCDEC (DVVIDEO, dvvideo); REGISTER_DECODER(DXA, dxa); REGISTER_DECODER(DXTORY, dxtory); REGISTER_DECODER(DXV, dxv); REGISTER_DECODER(EACMV, eacmv); REGISTER_DECODER(EAMAD, eamad); REGISTER_DECODER(EATGQ, eatgq); REGISTER_DECODER(EATGV, eatgv); REGISTER_DECODER(EATQI, eatqi); REGISTER_DECODER(EIGHTBPS, eightbps); REGISTER_DECODER(EIGHTSVX_EXP, eightsvx_exp); REGISTER_DECODER(EIGHTSVX_FIB, eightsvx_fib); REGISTER_DECODER(ESCAPE124, escape124); REGISTER_DECODER(ESCAPE130, escape130); REGISTER_DECODER(EXR, exr); REGISTER_ENCDEC (FFV1, ffv1); REGISTER_ENCDEC (FFVHUFF, ffvhuff); REGISTER_DECODER(FIC, fic); REGISTER_ENCDEC (FLASHSV, flashsv); REGISTER_DECODER(FLASHSV2, flashsv2); REGISTER_DECODER(FLIC, flic); REGISTER_ENCDEC (FLV, flv); REGISTER_DECODER(FOURXM, fourxm); REGISTER_DECODER(FRAPS, fraps); REGISTER_DECODER(FRWU, frwu); REGISTER_DECODER(G2M, g2m); REGISTER_ENCDEC (GIF, gif); REGISTER_ENCDEC (H261, h261); REGISTER_ENCDEC (H263, h263); REGISTER_DECODER(H263I, h263i); REGISTER_ENCODER(H263P, h263p); REGISTER_DECODER(H264, h264); REGISTER_DECODER(H264_MMAL, h264_mmal); REGISTER_DECODER(H264_QSV, h264_qsv); REGISTER_ENCDEC (HAP, hap); REGISTER_DECODER(HEVC, hevc); REGISTER_DECODER(HEVC_QSV, hevc_qsv); REGISTER_DECODER(HNM4_VIDEO, hnm4_video); REGISTER_DECODER(HQ_HQA, hq_hqa); REGISTER_DECODER(HQX, hqx); REGISTER_ENCDEC (HUFFYUV, huffyuv); REGISTER_DECODER(IDCIN, idcin); REGISTER_DECODER(IFF_BYTERUN1, iff_byterun1); REGISTER_DECODER(IFF_ILBM, iff_ilbm); REGISTER_DECODER(INDEO2, indeo2); REGISTER_DECODER(INDEO3, indeo3); REGISTER_DECODER(INDEO4, indeo4); REGISTER_DECODER(INDEO5, indeo5); REGISTER_DECODER(INTERPLAY_VIDEO, interplay_video); REGISTER_DECODER(JPEG2000, jpeg2000); REGISTER_ENCDEC (JPEGLS, jpegls); REGISTER_DECODER(JV, jv); REGISTER_DECODER(KGV1, kgv1); REGISTER_DECODER(KMVC, kmvc); REGISTER_DECODER(LAGARITH, lagarith); REGISTER_ENCODER(LJPEG, ljpeg); REGISTER_DECODER(LOCO, loco); REGISTER_DECODER(MAGICYUV, magicyuv); REGISTER_DECODER(MDEC, mdec); REGISTER_DECODER(MIMIC, mimic); REGISTER_ENCDEC (MJPEG, mjpeg); REGISTER_DECODER(MJPEGB, mjpegb); REGISTER_DECODER(MMVIDEO, mmvideo); REGISTER_DECODER(MOTIONPIXELS, motionpixels); #if FF_API_XVMC REGISTER_DECODER(MPEG_XVMC, mpeg_xvmc); #endif /* FF_API_XVMC */ REGISTER_ENCDEC (MPEG1VIDEO, mpeg1video); REGISTER_ENCDEC (MPEG2VIDEO, mpeg2video); REGISTER_DECODER(MPEG2_MMAL, mpeg2_mmal); REGISTER_DECODER(MPEG2_QSV, mpeg2_qsv); REGISTER_ENCDEC (MPEG4, mpeg4); REGISTER_DECODER(MSA1, msa1); REGISTER_DECODER(MSMPEG4V1, msmpeg4v1); REGISTER_ENCDEC (MSMPEG4V2, msmpeg4v2); REGISTER_ENCDEC (MSMPEG4V3, msmpeg4v3); REGISTER_DECODER(MSRLE, msrle); REGISTER_DECODER(MSS1, mss1); REGISTER_DECODER(MSS2, mss2); REGISTER_DECODER(MSVIDEO1, msvideo1); REGISTER_DECODER(MSZH, mszh); REGISTER_DECODER(MTS2, mts2); REGISTER_DECODER(MVC1, mvc1); REGISTER_DECODER(MVC2, mvc2); REGISTER_DECODER(MXPEG, mxpeg); REGISTER_DECODER(NUV, nuv); REGISTER_DECODER(PAF_VIDEO, paf_video); REGISTER_ENCDEC (PAM, pam); REGISTER_ENCDEC (PBM, pbm); REGISTER_ENCDEC (PCX, pcx); REGISTER_ENCDEC (PGM, pgm); REGISTER_ENCDEC (PGMYUV, pgmyuv); REGISTER_DECODER(PICTOR, pictor); REGISTER_DECODER(PIXLET, pixlet); REGISTER_ENCDEC (PNG, png); REGISTER_ENCDEC (PPM, ppm); REGISTER_ENCDEC (PRORES, prores); REGISTER_DECODER(PTX, ptx); REGISTER_DECODER(QDRAW, qdraw); REGISTER_DECODER(QPEG, qpeg); REGISTER_ENCDEC (QTRLE, qtrle); REGISTER_DECODER(R10K, r10k); REGISTER_DECODER(R210, r210); REGISTER_ENCDEC (RAWVIDEO, rawvideo); REGISTER_DECODER(RL2, rl2); REGISTER_ENCDEC (ROQ, roq); REGISTER_DECODER(RPZA, rpza); REGISTER_DECODER(RSCC, rscc); REGISTER_ENCDEC (RV10, rv10); REGISTER_ENCDEC (RV20, rv20); REGISTER_DECODER(RV30, rv30); REGISTER_DECODER(RV40, rv40); REGISTER_DECODER(S302M, s302m); REGISTER_DECODER(SANM, sanm); REGISTER_DECODER(SCREENPRESSO, screenpresso); REGISTER_ENCDEC (SGI, sgi); REGISTER_DECODER(SGIRLE, sgirle); REGISTER_DECODER(SMACKER, smacker); REGISTER_DECODER(SMC, smc); REGISTER_DECODER(SP5X, sp5x); REGISTER_ENCDEC (SUNRAST, sunrast); REGISTER_ENCDEC (SVQ1, svq1); REGISTER_DECODER(SVQ3, svq3); REGISTER_ENCDEC (TARGA, targa); REGISTER_DECODER(TDSC, tdsc); REGISTER_DECODER(THEORA, theora); REGISTER_DECODER(THP, thp); REGISTER_DECODER(TIERTEXSEQVIDEO, tiertexseqvideo); REGISTER_ENCDEC (TIFF, tiff); REGISTER_DECODER(TMV, tmv); REGISTER_DECODER(TRUEMOTION1, truemotion1); REGISTER_DECODER(TRUEMOTION2, truemotion2); REGISTER_DECODER(TRUEMOTION2RT, truemotion2rt); REGISTER_DECODER(TSCC, tscc); REGISTER_DECODER(TSCC2, tscc2); REGISTER_DECODER(TXD, txd); REGISTER_DECODER(ULTI, ulti); REGISTER_ENCDEC (UTVIDEO, utvideo); REGISTER_ENCDEC (V210, v210); REGISTER_DECODER(V210X, v210x); REGISTER_ENCDEC (V410, v410); REGISTER_DECODER(VB, vb); REGISTER_DECODER(VBLE, vble); REGISTER_DECODER(VC1, vc1); REGISTER_DECODER(VC1IMAGE, vc1image); REGISTER_DECODER(VC1_MMAL, vc1_mmal); REGISTER_DECODER(VC1_QSV, vc1_qsv); REGISTER_DECODER(VCR1, vcr1); REGISTER_DECODER(VMDVIDEO, vmdvideo); REGISTER_DECODER(VMNC, vmnc); REGISTER_DECODER(VP3, vp3); REGISTER_DECODER(VP5, vp5); REGISTER_DECODER(VP6, vp6); REGISTER_DECODER(VP6A, vp6a); REGISTER_DECODER(VP6F, vp6f); REGISTER_DECODER(VP7, vp7); REGISTER_DECODER(VP8, vp8); REGISTER_DECODER(VP8_QSV, vp8_qsv); REGISTER_DECODER(VP9, vp9); REGISTER_DECODER(VQA, vqa); REGISTER_DECODER(WEBP, webp); REGISTER_ENCODER(WRAPPED_AVFRAME, wrapped_avframe); REGISTER_ENCDEC (WMV1, wmv1); REGISTER_ENCDEC (WMV2, wmv2); REGISTER_DECODER(WMV3, wmv3); REGISTER_DECODER(WMV3IMAGE, wmv3image); REGISTER_DECODER(WNV1, wnv1); REGISTER_DECODER(XAN_WC3, xan_wc3); REGISTER_DECODER(XAN_WC4, xan_wc4); REGISTER_ENCDEC (XBM, xbm); REGISTER_DECODER(XL, xl); REGISTER_ENCDEC (XWD, xwd); REGISTER_DECODER(YOP, yop); REGISTER_DECODER(ZEROCODEC, zerocodec); REGISTER_ENCDEC (ZLIB, zlib); REGISTER_ENCDEC (ZMBV, zmbv); /* audio codecs */ REGISTER_ENCDEC (AAC, aac); REGISTER_DECODER(AAC_LATM, aac_latm); REGISTER_ENCDEC (AC3, ac3); REGISTER_ENCODER(AC3_FIXED, ac3_fixed); REGISTER_ENCDEC (ALAC, alac); REGISTER_DECODER(ALS, als); REGISTER_DECODER(AMRNB, amrnb); REGISTER_DECODER(AMRWB, amrwb); REGISTER_DECODER(APE, ape); REGISTER_DECODER(ATRAC1, atrac1); REGISTER_DECODER(ATRAC3, atrac3); REGISTER_DECODER(ATRAC3P, atrac3p); REGISTER_DECODER(BINKAUDIO_DCT, binkaudio_dct); REGISTER_DECODER(BINKAUDIO_RDFT, binkaudio_rdft); REGISTER_DECODER(BMV_AUDIO, bmv_audio); REGISTER_DECODER(COOK, cook); REGISTER_DECODER(DCA, dca); REGISTER_DECODER(DSICINAUDIO, dsicinaudio); REGISTER_DECODER(DSS_SP, dss_sp); REGISTER_ENCDEC (EAC3, eac3); REGISTER_ENCDEC (FLAC, flac); REGISTER_ENCDEC (G723_1, g723_1); REGISTER_DECODER(GSM, gsm); REGISTER_DECODER(GSM_MS, gsm_ms); REGISTER_DECODER(IAC, iac); REGISTER_DECODER(IMC, imc); REGISTER_DECODER(MACE3, mace3); REGISTER_DECODER(MACE6, mace6); REGISTER_DECODER(METASOUND, metasound); REGISTER_DECODER(MLP, mlp); REGISTER_DECODER(MP1, mp1); REGISTER_DECODER(MP1FLOAT, mp1float); REGISTER_ENCDEC (MP2, mp2); REGISTER_DECODER(MP2FLOAT, mp2float); REGISTER_DECODER(MP3, mp3); REGISTER_DECODER(MP3FLOAT, mp3float); REGISTER_DECODER(MP3ADU, mp3adu); REGISTER_DECODER(MP3ADUFLOAT, mp3adufloat); REGISTER_DECODER(MP3ON4, mp3on4); REGISTER_DECODER(MP3ON4FLOAT, mp3on4float); REGISTER_DECODER(MPC7, mpc7); REGISTER_DECODER(MPC8, mpc8); REGISTER_ENCDEC (NELLYMOSER, nellymoser); REGISTER_DECODER(ON2AVC, on2avc); REGISTER_DECODER(OPUS, opus); REGISTER_DECODER(PAF_AUDIO, paf_audio); REGISTER_DECODER(QCELP, qcelp); REGISTER_DECODER(QDM2, qdm2); REGISTER_ENCDEC (RA_144, ra_144); REGISTER_DECODER(RA_288, ra_288); REGISTER_DECODER(RALF, ralf); REGISTER_DECODER(SHORTEN, shorten); REGISTER_DECODER(SIPR, sipr); REGISTER_DECODER(SMACKAUD, smackaud); REGISTER_DECODER(TAK, tak); REGISTER_DECODER(TRUEHD, truehd); REGISTER_DECODER(TRUESPEECH, truespeech); REGISTER_DECODER(TTA, tta); REGISTER_DECODER(TWINVQ, twinvq); REGISTER_DECODER(VMDAUDIO, vmdaudio); REGISTER_ENCDEC (VORBIS, vorbis); REGISTER_DECODER(WAVPACK, wavpack); REGISTER_DECODER(WMALOSSLESS, wmalossless); REGISTER_DECODER(WMAPRO, wmapro); REGISTER_ENCDEC (WMAV1, wmav1); REGISTER_ENCDEC (WMAV2, wmav2); REGISTER_DECODER(WMAVOICE, wmavoice); REGISTER_DECODER(WS_SND1, ws_snd1); /* PCM codecs */ REGISTER_ENCDEC (PCM_ALAW, pcm_alaw); REGISTER_DECODER(PCM_BLURAY, pcm_bluray); REGISTER_DECODER(PCM_DVD, pcm_dvd); REGISTER_ENCDEC (PCM_F32BE, pcm_f32be); REGISTER_ENCDEC (PCM_F32LE, pcm_f32le); REGISTER_ENCDEC (PCM_F64BE, pcm_f64be); REGISTER_ENCDEC (PCM_F64LE, pcm_f64le); REGISTER_DECODER(PCM_LXF, pcm_lxf); REGISTER_ENCDEC (PCM_MULAW, pcm_mulaw); REGISTER_ENCDEC (PCM_S8, pcm_s8); REGISTER_DECODER(PCM_S8_PLANAR, pcm_s8_planar); REGISTER_ENCDEC (PCM_S16BE, pcm_s16be); REGISTER_DECODER(PCM_S16BE_PLANAR, pcm_s16be_planar); REGISTER_ENCDEC (PCM_S16LE, pcm_s16le); REGISTER_DECODER(PCM_S16LE_PLANAR, pcm_s16le_planar); REGISTER_ENCDEC (PCM_S24BE, pcm_s24be); REGISTER_ENCDEC (PCM_S24DAUD, pcm_s24daud); REGISTER_ENCDEC (PCM_S24LE, pcm_s24le); REGISTER_DECODER(PCM_S24LE_PLANAR, pcm_s24le_planar); REGISTER_ENCDEC (PCM_S32BE, pcm_s32be); REGISTER_ENCDEC (PCM_S32LE, pcm_s32le); REGISTER_DECODER(PCM_S32LE_PLANAR, pcm_s32le_planar); REGISTER_ENCDEC (PCM_U8, pcm_u8); REGISTER_ENCDEC (PCM_U16BE, pcm_u16be); REGISTER_ENCDEC (PCM_U16LE, pcm_u16le); REGISTER_ENCDEC (PCM_U24BE, pcm_u24be); REGISTER_ENCDEC (PCM_U24LE, pcm_u24le); REGISTER_ENCDEC (PCM_U32BE, pcm_u32be); REGISTER_ENCDEC (PCM_U32LE, pcm_u32le); REGISTER_DECODER(PCM_ZORK , pcm_zork); /* DPCM codecs */ REGISTER_DECODER(INTERPLAY_DPCM, interplay_dpcm); REGISTER_ENCDEC (ROQ_DPCM, roq_dpcm); REGISTER_DECODER(SOL_DPCM, sol_dpcm); REGISTER_DECODER(XAN_DPCM, xan_dpcm); /* ADPCM codecs */ REGISTER_DECODER(ADPCM_4XM, adpcm_4xm); REGISTER_ENCDEC (ADPCM_ADX, adpcm_adx); REGISTER_DECODER(ADPCM_CT, adpcm_ct); REGISTER_DECODER(ADPCM_EA, adpcm_ea); REGISTER_DECODER(ADPCM_EA_MAXIS_XA, adpcm_ea_maxis_xa); REGISTER_DECODER(ADPCM_EA_R1, adpcm_ea_r1); REGISTER_DECODER(ADPCM_EA_R2, adpcm_ea_r2); REGISTER_DECODER(ADPCM_EA_R3, adpcm_ea_r3); REGISTER_DECODER(ADPCM_EA_XAS, adpcm_ea_xas); REGISTER_ENCDEC (ADPCM_G722, adpcm_g722); REGISTER_ENCDEC (ADPCM_G726, adpcm_g726); REGISTER_DECODER(ADPCM_IMA_AMV, adpcm_ima_amv); REGISTER_DECODER(ADPCM_IMA_APC, adpcm_ima_apc); REGISTER_DECODER(ADPCM_IMA_DK3, adpcm_ima_dk3); REGISTER_DECODER(ADPCM_IMA_DK4, adpcm_ima_dk4); REGISTER_DECODER(ADPCM_IMA_EA_EACS, adpcm_ima_ea_eacs); REGISTER_DECODER(ADPCM_IMA_EA_SEAD, adpcm_ima_ea_sead); REGISTER_DECODER(ADPCM_IMA_ISS, adpcm_ima_iss); REGISTER_ENCDEC (ADPCM_IMA_QT, adpcm_ima_qt); REGISTER_DECODER(ADPCM_IMA_SMJPEG, adpcm_ima_smjpeg); REGISTER_ENCDEC (ADPCM_IMA_WAV, adpcm_ima_wav); REGISTER_DECODER(ADPCM_IMA_WS, adpcm_ima_ws); REGISTER_ENCDEC (ADPCM_MS, adpcm_ms); REGISTER_DECODER(ADPCM_SBPRO_2, adpcm_sbpro_2); REGISTER_DECODER(ADPCM_SBPRO_3, adpcm_sbpro_3); REGISTER_DECODER(ADPCM_SBPRO_4, adpcm_sbpro_4); REGISTER_ENCDEC (ADPCM_SWF, adpcm_swf); REGISTER_DECODER(ADPCM_THP, adpcm_thp); REGISTER_DECODER(ADPCM_VIMA, adpcm_vima); REGISTER_DECODER(ADPCM_XA, adpcm_xa); REGISTER_ENCDEC (ADPCM_YAMAHA, adpcm_yamaha); /* subtitles */ REGISTER_ENCDEC (ASS, ass); REGISTER_ENCDEC (DVBSUB, dvbsub); REGISTER_ENCDEC (DVDSUB, dvdsub); REGISTER_DECODER(PGSSUB, pgssub); REGISTER_DECODER(SRT, srt); REGISTER_ENCDEC (XSUB, xsub); /* external libraries */ REGISTER_DECODER(LIBDCADEC, libdcadec) REGISTER_ENCODER(LIBFAAC, libfaac); REGISTER_ENCDEC (LIBFDK_AAC, libfdk_aac); REGISTER_ENCDEC (LIBGSM, libgsm); REGISTER_ENCDEC (LIBGSM_MS, libgsm_ms); REGISTER_ENCDEC (LIBILBC, libilbc); REGISTER_ENCODER(LIBMP3LAME, libmp3lame); REGISTER_ENCDEC (LIBOPENCORE_AMRNB, libopencore_amrnb); REGISTER_DECODER(LIBOPENCORE_AMRWB, libopencore_amrwb); REGISTER_ENCDEC (LIBOPENJPEG, libopenjpeg); REGISTER_ENCDEC (LIBOPUS, libopus); REGISTER_ENCDEC (LIBSCHROEDINGER, libschroedinger); REGISTER_ENCDEC (LIBSPEEX, libspeex); REGISTER_ENCODER(LIBTHEORA, libtheora); REGISTER_ENCODER(LIBTWOLAME, libtwolame); REGISTER_ENCODER(LIBVO_AACENC, libvo_aacenc); REGISTER_ENCODER(LIBVO_AMRWBENC, libvo_amrwbenc); REGISTER_ENCODER(LIBVORBIS, libvorbis); REGISTER_ENCDEC (LIBVPX_VP8, libvpx_vp8); REGISTER_ENCDEC (LIBVPX_VP9, libvpx_vp9); REGISTER_ENCODER(LIBWAVPACK, libwavpack); REGISTER_ENCODER(LIBWEBP, libwebp); REGISTER_ENCODER(LIBX262, libx262); REGISTER_ENCODER(LIBX264, libx264); REGISTER_ENCODER(LIBX265, libx265); REGISTER_ENCODER(LIBXAVS, libxavs); REGISTER_ENCODER(LIBXVID, libxvid); /* external libraries, that shouldn't be used by default if one of the * above is available */ REGISTER_ENCDEC (LIBOPENH264, libopenh264); REGISTER_ENCODER(H264_NVENC, h264_nvenc); REGISTER_ENCODER(H264_OMX, h264_omx); REGISTER_ENCODER(H264_QSV, h264_qsv); REGISTER_ENCODER(H264_VAAPI, h264_vaapi); REGISTER_ENCODER(LIBKVAZAAR, libkvazaar); REGISTER_ENCODER(HEVC_NVENC, hevc_nvenc); REGISTER_ENCODER(HEVC_QSV, hevc_qsv); REGISTER_ENCODER(HEVC_VAAPI, hevc_vaapi); REGISTER_ENCODER(MJPEG_VAAPI, mjpeg_vaapi); REGISTER_ENCODER(MPEG2_QSV, mpeg2_qsv); REGISTER_ENCODER(MPEG2_VAAPI, mpeg2_vaapi); REGISTER_ENCODER(MPEG4_OMX, mpeg4_omx); #if FF_API_NVENC_OLD_NAME REGISTER_ENCODER(NVENC_H264, nvenc_h264); REGISTER_ENCODER(NVENC_HEVC, nvenc_hevc); #endif REGISTER_ENCODER(VP8_VAAPI, vp8_vaapi); /* parsers */ REGISTER_PARSER(AAC, aac); REGISTER_PARSER(AAC_LATM, aac_latm); REGISTER_PARSER(AC3, ac3); REGISTER_PARSER(ADX, adx); REGISTER_PARSER(BMP, bmp); REGISTER_PARSER(CAVSVIDEO, cavsvideo); REGISTER_PARSER(COOK, cook); REGISTER_PARSER(DCA, dca); REGISTER_PARSER(DIRAC, dirac); REGISTER_PARSER(DNXHD, dnxhd); REGISTER_PARSER(DPX, dpx); REGISTER_PARSER(DVBSUB, dvbsub); REGISTER_PARSER(DVDSUB, dvdsub); REGISTER_PARSER(FLAC, flac); REGISTER_PARSER(GSM, gsm); REGISTER_PARSER(H261, h261); REGISTER_PARSER(H263, h263); REGISTER_PARSER(H264, h264); REGISTER_PARSER(HEVC, hevc); REGISTER_PARSER(MJPEG, mjpeg); REGISTER_PARSER(MLP, mlp); REGISTER_PARSER(MPEG4VIDEO, mpeg4video); REGISTER_PARSER(MPEGAUDIO, mpegaudio); REGISTER_PARSER(MPEGVIDEO, mpegvideo); REGISTER_PARSER(OPUS, opus); REGISTER_PARSER(PNG, png); REGISTER_PARSER(PNM, pnm); REGISTER_PARSER(RV30, rv30); REGISTER_PARSER(RV40, rv40); REGISTER_PARSER(TAK, tak); REGISTER_PARSER(VC1, vc1); REGISTER_PARSER(VORBIS, vorbis); REGISTER_PARSER(VP3, vp3); REGISTER_PARSER(VP8, vp8); }"} {"target": 0, "idx": 2120, "func": "int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic = 1; int len = in->nb_samples; int p; if (ac->dc) { /* dithered conversion */ av_dlog(ac->avr, \"%d samples - audio_convert: %s to %s (dithered)\\n\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt)); return ff_convert_dither(ac->dc, out, in); } /* determine whether to use the optimized function based on pointer and samples alignment in both the input and output */ if (ac->has_optimized_func) { int ptr_align = FFMIN(in->ptr_align, out->ptr_align); int samples_align = FFMIN(in->samples_align, out->samples_align); int aligned_len = FFALIGN(len, ac->samples_align); if (!(ptr_align % ac->ptr_align) && samples_align >= aligned_len) { len = aligned_len; use_generic = 0; } } av_dlog(ac->avr, \"%d samples - audio_convert: %s to %s (%s)\\n\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic ? ac->func_descr_generic : ac->func_descr); if (ac->apply_map) { ChannelMapInfo *map = &ac->avr->ch_map_info; if (!av_sample_fmt_is_planar(ac->out_fmt)) { av_log(ac->avr, AV_LOG_ERROR, \"cannot remap packed format during conversion\\n\"); return AVERROR(EINVAL); } if (map->do_remap) { if (av_sample_fmt_is_planar(ac->in_fmt)) { conv_func_flat *convert = use_generic ? ac->conv_flat_generic : ac->conv_flat; for (p = 0; p < ac->planes; p++) if (map->channel_map[p] >= 0) convert(out->data[p], in->data[map->channel_map[p]], len); } else { uint8_t *data[AVRESAMPLE_MAX_CHANNELS]; conv_func_deinterleave *convert = use_generic ? ac->conv_deinterleave_generic : ac->conv_deinterleave; for (p = 0; p < ac->channels; p++) data[map->input_map[p]] = out->data[p]; convert(data, in->data[0], len, ac->channels); } } if (map->do_copy || map->do_zero) { for (p = 0; p < ac->planes; p++) { if (map->channel_copy[p]) memcpy(out->data[p], out->data[map->channel_copy[p]], len * out->stride); else if (map->channel_zero[p]) av_samples_set_silence(&out->data[p], 0, len, 1, ac->out_fmt); } } } else { switch (ac->func_type) { case CONV_FUNC_TYPE_FLAT: { if (!in->is_planar) len *= in->channels; if (use_generic) { for (p = 0; p < ac->planes; p++) ac->conv_flat_generic(out->data[p], in->data[p], len); } else { for (p = 0; p < ac->planes; p++) ac->conv_flat(out->data[p], in->data[p], len); } break; } case CONV_FUNC_TYPE_INTERLEAVE: if (use_generic) ac->conv_interleave_generic(out->data[0], in->data, len, ac->channels); else ac->conv_interleave(out->data[0], in->data, len, ac->channels); break; case CONV_FUNC_TYPE_DEINTERLEAVE: if (use_generic) ac->conv_deinterleave_generic(out->data, in->data[0], len, ac->channels); else ac->conv_deinterleave(out->data, in->data[0], len, ac->channels); break; } } out->nb_samples = in->nb_samples; return 0; }"} {"target": 1, "idx": 2143, "func": "static int qcow2_set_up_encryption(BlockDriverState *bs, const char *encryptfmt, QemuOpts *opts, Error **errp) { BDRVQcow2State *s = bs->opaque; QCryptoBlockCreateOptions *cryptoopts = NULL; QCryptoBlock *crypto = NULL; int ret = -EINVAL; QDict *options, *encryptopts; options = qemu_opts_to_qdict(opts, NULL); qdict_extract_subqdict(options, &encryptopts, \"encrypt.\"); QDECREF(options); if (!g_str_equal(encryptfmt, \"aes\")) { error_setg(errp, \"Unknown encryption format '%s', expected 'aes'\", encryptfmt); ret = -EINVAL; goto out; } cryptoopts = block_crypto_create_opts_init( Q_CRYPTO_BLOCK_FORMAT_QCOW, encryptopts, errp); if (!cryptoopts) { ret = -EINVAL; goto out; } s->crypt_method_header = QCOW_CRYPT_AES; crypto = qcrypto_block_create(cryptoopts, NULL, NULL, bs, errp); if (!crypto) { ret = -EINVAL; goto out; } ret = qcow2_update_header(bs); if (ret < 0) { error_setg_errno(errp, -ret, \"Could not write encryption header\"); goto out; } out: QDECREF(encryptopts); qcrypto_block_free(crypto); qapi_free_QCryptoBlockCreateOptions(cryptoopts); return ret; }"} {"target": 1, "idx": 2147, "func": "static void tilegx_cpu_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); TileGXCPUClass *tcc = TILEGX_CPU_CLASS(oc); tcc->parent_realize = dc->realize; dc->realize = tilegx_cpu_realizefn; tcc->parent_reset = cc->reset; cc->reset = tilegx_cpu_reset; cc->has_work = tilegx_cpu_has_work; cc->do_interrupt = tilegx_cpu_do_interrupt; cc->cpu_exec_interrupt = tilegx_cpu_exec_interrupt; cc->dump_state = tilegx_cpu_dump_state; cc->set_pc = tilegx_cpu_set_pc; cc->handle_mmu_fault = tilegx_cpu_handle_mmu_fault; cc->gdb_num_core_regs = 0; }"} {"target": 0, "idx": 2158, "func": "static inline void downmix_2f_1r_to_stereo(float *samples) { int i; for (i = 0; i < 256; i++) { samples[i] += samples[i + 512]; samples[i + 256] += samples[i + 512]; samples[i + 512] = 0; } }"} {"target": 0, "idx": 2160, "func": "static PayloadContext *h264_new_extradata(void) { PayloadContext *data = av_mallocz(sizeof(PayloadContext) + FF_INPUT_BUFFER_PADDING_SIZE); if (data) { data->cookie = MAGIC_COOKIE; } return data; }"} {"target": 0, "idx": 2183, "func": "static void dma_complete(DMAAIOCB *dbs, int ret) { trace_dma_complete(dbs, ret, dbs->common.cb); dma_bdrv_unmap(dbs); if (dbs->common.cb) { dbs->common.cb(dbs->common.opaque, ret); } qemu_iovec_destroy(&dbs->iov); if (dbs->bh) { qemu_bh_delete(dbs->bh); dbs->bh = NULL; } qemu_aio_unref(dbs); }"} {"target": 1, "idx": 2209, "func": "static int proxy_mkdir(FsContext *fs_ctx, V9fsPath *dir_path, const char *name, FsCred *credp) { int retval; V9fsString fullname; v9fs_string_init(&fullname); v9fs_string_sprintf(&fullname, \"%s/%s\", dir_path->data, name); retval = v9fs_request(fs_ctx->private, T_MKDIR, NULL, &fullname, credp->fc_mode, credp->fc_uid, credp->fc_gid); v9fs_string_free(&fullname); if (retval < 0) { errno = -retval; retval = -1; } v9fs_string_free(&fullname); return retval; }"} {"target": 1, "idx": 2235, "func": "static int decode_init_thread_copy(AVCodecContext *avctx) { H264Context *h = avctx->priv_data; if (!avctx->internal->is_copy) return 0; memset(h->sps_buffers, 0, sizeof(h->sps_buffers)); memset(h->pps_buffers, 0, sizeof(h->pps_buffers)); h->rbsp_buffer[0] = NULL; h->rbsp_buffer[1] = NULL; h->rbsp_buffer_size[0] = 0; h->rbsp_buffer_size[1] = 0; h->context_initialized = 0; return 0; }"} {"target": 1, "idx": 2238, "func": "static int dvvideo_decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size) { DVVideoContext *s = avctx->priv_data; s->sys = dv_frame_profile(buf); if (!s->sys || buf_size < s->sys->frame_size) return -1; /* NOTE: we only accept several full frames */ if(s->picture.data[0]) avctx->release_buffer(avctx, &s->picture); s->picture.reference = 0; s->picture.key_frame = 1; s->picture.pict_type = FF_I_TYPE; avctx->pix_fmt = s->sys->pix_fmt; avcodec_set_dimensions(avctx, s->sys->width, s->sys->height); if(avctx->get_buffer(avctx, &s->picture) < 0) { av_log(avctx, AV_LOG_ERROR, \"get_buffer() failed\\n\"); return -1; } s->picture.interlaced_frame = 1; s->picture.top_field_first = 0; s->buf = buf; avctx->execute(avctx, dv_decode_mt, (void**)&dv_anchor[0], NULL, s->sys->difseg_size * 27); emms_c(); /* return image */ *data_size = sizeof(AVFrame); *(AVFrame*)data= s->picture; return s->sys->frame_size; }"} {"target": 1, "idx": 2253, "func": "static uint32_t eepro100_read4(EEPRO100State * s, uint32_t addr) { uint32_t val; if (addr <= sizeof(s->mem) - sizeof(val)) { memcpy(&val, &s->mem[addr], sizeof(val)); } switch (addr) { case SCBStatus: TRACE(OTHER, logout(\"addr=%s val=0x%08x\\n\", regname(addr), val)); break; case SCBPointer: #if 0 val = eepro100_read_pointer(s); #endif TRACE(OTHER, logout(\"addr=%s val=0x%08x\\n\", regname(addr), val)); break; case SCBPort: val = eepro100_read_port(s); TRACE(OTHER, logout(\"addr=%s val=0x%08x\\n\", regname(addr), val)); break; case SCBCtrlMDI: val = eepro100_read_mdi(s); break; default: logout(\"addr=%s val=0x%08x\\n\", regname(addr), val); missing(\"unknown longword read\"); } return val; }"} {"target": 1, "idx": 2260, "func": "static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, int (*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped) { MpegEncContext *s = opaque; s->mv_dir = mv_dir; s->mv_type = mv_type; s->mb_intra = mb_intra; s->mb_skipped = mb_skipped; s->mb_x = mb_x; s->mb_y = mb_y; memcpy(s->mv, mv, sizeof(*mv)); ff_init_block_index(s); ff_update_block_index(s); s->dsp.clear_blocks(s->block[0]); s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16; s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift); s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift); assert(ref == 0); ff_MPV_decode_mb(s, s->block); }"} {"target": 0, "idx": 2285, "func": "static abi_long do_setsockopt(int sockfd, int level, int optname, abi_ulong optval_addr, socklen_t optlen) { abi_long ret; int val; struct ip_mreqn *ip_mreq; struct ip_mreq_source *ip_mreq_source; switch(level) { case SOL_TCP: /* TCP options all take an 'int' value. */ if (optlen < sizeof(uint32_t)) return -TARGET_EINVAL; if (get_user_u32(val, optval_addr)) return -TARGET_EFAULT; ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); break; case SOL_IP: switch(optname) { case IP_TOS: case IP_TTL: case IP_HDRINCL: case IP_ROUTER_ALERT: case IP_RECVOPTS: case IP_RETOPTS: case IP_PKTINFO: case IP_MTU_DISCOVER: case IP_RECVERR: case IP_RECVTOS: #ifdef IP_FREEBIND case IP_FREEBIND: #endif case IP_MULTICAST_TTL: case IP_MULTICAST_LOOP: val = 0; if (optlen >= sizeof(uint32_t)) { if (get_user_u32(val, optval_addr)) return -TARGET_EFAULT; } else if (optlen >= 1) { if (get_user_u8(val, optval_addr)) return -TARGET_EFAULT; } ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val))); break; case IP_ADD_MEMBERSHIP: case IP_DROP_MEMBERSHIP: if (optlen < sizeof (struct target_ip_mreq) || optlen > sizeof (struct target_ip_mreqn)) return -TARGET_EINVAL; ip_mreq = (struct ip_mreqn *) alloca(optlen); target_to_host_ip_mreq(ip_mreq, optval_addr, optlen); ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen)); break; case IP_BLOCK_SOURCE: case IP_UNBLOCK_SOURCE: case IP_ADD_SOURCE_MEMBERSHIP: case IP_DROP_SOURCE_MEMBERSHIP: if (optlen != sizeof (struct target_ip_mreq_source)) return -TARGET_EINVAL; ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1); ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen)); unlock_user (ip_mreq_source, optval_addr, 0); break; default: goto unimplemented; } break; case TARGET_SOL_SOCKET: switch (optname) { /* Options with 'int' argument. */ case TARGET_SO_DEBUG: optname = SO_DEBUG; break; case TARGET_SO_REUSEADDR: optname = SO_REUSEADDR; break; case TARGET_SO_TYPE: optname = SO_TYPE; break; case TARGET_SO_ERROR: optname = SO_ERROR; break; case TARGET_SO_DONTROUTE: optname = SO_DONTROUTE; break; case TARGET_SO_BROADCAST: optname = SO_BROADCAST; break; case TARGET_SO_SNDBUF: optname = SO_SNDBUF; break; case TARGET_SO_RCVBUF: optname = SO_RCVBUF; break; case TARGET_SO_KEEPALIVE: optname = SO_KEEPALIVE; break; case TARGET_SO_OOBINLINE: optname = SO_OOBINLINE; break; case TARGET_SO_NO_CHECK: optname = SO_NO_CHECK; break; case TARGET_SO_PRIORITY: optname = SO_PRIORITY; break; #ifdef SO_BSDCOMPAT case TARGET_SO_BSDCOMPAT: optname = SO_BSDCOMPAT; break; #endif case TARGET_SO_PASSCRED: optname = SO_PASSCRED; break; case TARGET_SO_TIMESTAMP: optname = SO_TIMESTAMP; break; case TARGET_SO_RCVLOWAT: optname = SO_RCVLOWAT; break; case TARGET_SO_RCVTIMEO: optname = SO_RCVTIMEO; break; case TARGET_SO_SNDTIMEO: optname = SO_SNDTIMEO; break; break; default: goto unimplemented; } if (optlen < sizeof(uint32_t)) return -TARGET_EINVAL; if (get_user_u32(val, optval_addr)) return -TARGET_EFAULT; ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val))); break; default: unimplemented: gemu_log(\"Unsupported setsockopt level=%d optname=%d \\n\", level, optname); ret = -TARGET_ENOPROTOOPT; } return ret; }"} {"target": 1, "idx": 2326, "func": "void vty_putchars(VIOsPAPRDevice *sdev, uint8_t *buf, int len) { VIOsPAPRVTYDevice *dev = VIO_SPAPR_VTY_DEVICE(sdev); /* FIXME: should check the qemu_chr_fe_write() return value */ qemu_chr_fe_write(dev->chardev, buf, len); }"} {"target": 1, "idx": 2330, "func": "static inline int cris_addc_m(int a, const int *b) { asm volatile (\"addc [%1], %0\\n\" : \"+r\" (a) : \"r\" (b)); return a; }"} {"target": 1, "idx": 2335, "func": "int av_find_stream_info(AVFormatContext *ic) { int i, count, ret, read_size, j; AVStream *st; AVPacket pkt1, *pkt; AVPacketList *pktl=NULL, **ppktl; int64_t last_dts[MAX_STREAMS]; int duration_count[MAX_STREAMS]={0}; double duration_error[MAX_STREAMS][MAX_STD_TIMEBASES]={{0}}; //FIXME malloc()? offset_t old_offset = url_ftell(&ic->pb); for(i=0;inb_streams;i++) { st = ic->streams[i]; if(st->codec->codec_type == CODEC_TYPE_VIDEO){ /* if(!st->time_base.num) st->time_base= */ if(!st->codec->time_base.num) st->codec->time_base= st->time_base; } //only for the split stuff if (!st->parser) { st->parser = av_parser_init(st->codec->codec_id); if(st->need_parsing == 2 && st->parser){ st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; } } } for(i=0;ipacket_buffer; for(;;) { /* check if one codec still needs to be handled */ for(i=0;inb_streams;i++) { st = ic->streams[i]; if (!has_codec_parameters(st->codec)) break; /* variable fps and no guess at the real fps */ if( st->codec->time_base.den >= 101LL*st->codec->time_base.num && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO) break; if(st->parser && st->parser->parser->split && !st->codec->extradata) break; } if (i == ic->nb_streams) { /* NOTE: if the format has no header, then we need to read some packets to get most of the streams, so we cannot stop here */ if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) { /* if we found the info for all the codecs, we can stop */ ret = count; break; } } /* we did not get all the codec info, but we read too much data */ if (read_size >= MAX_READ_SIZE) { ret = count; break; } /* NOTE: a new stream can be added there if no header in file (AVFMTCTX_NOHEADER) */ ret = av_read_frame_internal(ic, &pkt1); if (ret < 0) { /* EOF or error */ ret = -1; /* we could not have all the codec parameters before EOF */ for(i=0;inb_streams;i++) { st = ic->streams[i]; if (!has_codec_parameters(st->codec)){ char buf[256]; avcodec_string(buf, sizeof(buf), st->codec, 0); av_log(ic, AV_LOG_INFO, \"Could not find codec parameters (%s)\\n\", buf); } else { ret = 0; } } break; } pktl = av_mallocz(sizeof(AVPacketList)); if (!pktl) { ret = AVERROR_NOMEM; break; } /* add the packet in the buffered packet list */ *ppktl = pktl; ppktl = &pktl->next; pkt = &pktl->pkt; *pkt = pkt1; /* duplicate the packet */ if (av_dup_packet(pkt) < 0) { ret = AVERROR_NOMEM; break; } read_size += pkt->size; st = ic->streams[pkt->stream_index]; if(st->codec_info_nb_frames>1) //FIXME move codec_info_nb_frames and codec_info_duration from AVStream into this func st->codec_info_duration += pkt->duration; if (pkt->duration != 0) st->codec_info_nb_frames++; { int index= pkt->stream_index; int64_t last= last_dts[index]; int64_t duration= pkt->dts - last; if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){ double dur= duration * av_q2d(st->time_base); // if(st->codec->codec_type == CODEC_TYPE_VIDEO) // av_log(NULL, AV_LOG_ERROR, \"%f\\n\", dur); if(duration_count[index] < 2) memset(duration_error, 0, sizeof(duration_error)); for(i=1; icodec_info_nb_frames == 0 && 0) st->codec_info_duration += duration; } if(last == AV_NOPTS_VALUE || duration_count[index]<=1) last_dts[pkt->stream_index]= pkt->dts; } if(st->parser && st->parser->parser->split && !st->codec->extradata){ int i= st->parser->parser->split(st->codec, pkt->data, pkt->size); if(i){ st->codec->extradata_size= i; st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size); memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE); } } /* if still no information, we try to open the codec and to decompress the frame. We try to avoid that in most cases as it takes longer and uses more memory. For MPEG4, we need to decompress for Quicktime. */ if (!has_codec_parameters(st->codec) /*&& (st->codec->codec_id == CODEC_ID_FLV1 || st->codec->codec_id == CODEC_ID_H264 || st->codec->codec_id == CODEC_ID_H263 || st->codec->codec_id == CODEC_ID_H261 || st->codec->codec_id == CODEC_ID_VORBIS || st->codec->codec_id == CODEC_ID_MJPEG || st->codec->codec_id == CODEC_ID_PNG || st->codec->codec_id == CODEC_ID_PAM || st->codec->codec_id == CODEC_ID_PGM || st->codec->codec_id == CODEC_ID_PGMYUV || st->codec->codec_id == CODEC_ID_PBM || st->codec->codec_id == CODEC_ID_PPM || st->codec->codec_id == CODEC_ID_SHORTEN || (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/) try_decode_frame(st, pkt->data, pkt->size); if (av_rescale_q(st->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) { break; } count++; } // close codecs which where opened in try_decode_frame() for(i=0;inb_streams;i++) { st = ic->streams[i]; if(st->codec->codec) avcodec_close(st->codec); } for(i=0;inb_streams;i++) { st = ic->streams[i]; if (st->codec->codec_type == CODEC_TYPE_VIDEO) { if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample) st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt); if(duration_count[i] && (st->codec->time_base.num*101LL <= st->codec->time_base.den || st->codec->codec_id == CODEC_ID_MPEG2VIDEO) /*&& //FIXME we should not special case mpeg2, but this needs testing with non mpeg2 ... st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){ double best_error= 2*av_q2d(st->time_base); best_error= best_error*best_error*duration_count[i]*1000*12*30; for(j=1; jcodec->codec_type == CODEC_TYPE_VIDEO) // av_log(NULL, AV_LOG_ERROR, \"%f %f\\n\", get_std_framerate(j) / 12.0/1001, error); if(error < best_error){ best_error= error; av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX); } } } if (!st->r_frame_rate.num){ if( st->codec->time_base.den * (int64_t)st->time_base.num <= st->codec->time_base.num * (int64_t)st->time_base.den){ st->r_frame_rate.num = st->codec->time_base.den; st->r_frame_rate.den = st->codec->time_base.num; }else{ st->r_frame_rate.num = st->time_base.den; st->r_frame_rate.den = st->time_base.num; } } }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) { if(!st->codec->bits_per_sample) st->codec->bits_per_sample= av_get_bits_per_sample(st->codec->codec_id); } } av_estimate_timings(ic, old_offset); #if 0 /* correct DTS for b frame streams with no timestamps */ for(i=0;inb_streams;i++) { st = ic->streams[i]; if (st->codec->codec_type == CODEC_TYPE_VIDEO) { if(b-frames){ ppktl = &ic->packet_buffer; while(ppkt1){ if(ppkt1->stream_index != i) continue; if(ppkt1->pkt->dts < 0) break; if(ppkt1->pkt->pts != AV_NOPTS_VALUE) break; ppkt1->pkt->dts -= delta; ppkt1= ppkt1->next; } if(ppkt1) continue; st->cur_dts -= delta; } } } #endif return ret; }"} {"target": 1, "idx": 2336, "func": "static void create_vorbis_context(vorbis_enc_context *venc, AVCodecContext *avccontext) { vorbis_enc_floor *fc; vorbis_enc_residue *rc; vorbis_enc_mapping *mc; int i, book; venc->channels = avccontext->channels; venc->sample_rate = avccontext->sample_rate; venc->log2_blocksize[0] = venc->log2_blocksize[1] = 11; venc->ncodebooks = FF_ARRAY_ELEMS(cvectors); venc->codebooks = av_malloc(sizeof(vorbis_enc_codebook) * venc->ncodebooks); // codebook 0..14 - floor1 book, values 0..255 // codebook 15 residue masterbook // codebook 16..29 residue for (book = 0; book < venc->ncodebooks; book++) { vorbis_enc_codebook *cb = &venc->codebooks[book]; int vals; cb->ndimentions = cvectors[book].dim; cb->nentries = cvectors[book].real_len; cb->min = cvectors[book].min; cb->delta = cvectors[book].delta; cb->lookup = cvectors[book].lookup; cb->seq_p = 0; cb->lens = av_malloc(sizeof(uint8_t) * cb->nentries); cb->codewords = av_malloc(sizeof(uint32_t) * cb->nentries); memcpy(cb->lens, cvectors[book].clens, cvectors[book].len); memset(cb->lens + cvectors[book].len, 0, cb->nentries - cvectors[book].len); if (cb->lookup) { vals = cb_lookup_vals(cb->lookup, cb->ndimentions, cb->nentries); cb->quantlist = av_malloc(sizeof(int) * vals); for (i = 0; i < vals; i++) cb->quantlist[i] = cvectors[book].quant[i]; } else { cb->quantlist = NULL; } ready_codebook(cb); } venc->nfloors = 1; venc->floors = av_malloc(sizeof(vorbis_enc_floor) * venc->nfloors); // just 1 floor fc = &venc->floors[0]; fc->partitions = NUM_FLOOR_PARTITIONS; fc->partition_to_class = av_malloc(sizeof(int) * fc->partitions); fc->nclasses = 0; for (i = 0; i < fc->partitions; i++) { static const int a[] = {0, 1, 2, 2, 3, 3, 4, 4}; fc->partition_to_class[i] = a[i]; fc->nclasses = FFMAX(fc->nclasses, fc->partition_to_class[i]); } fc->nclasses++; fc->classes = av_malloc(sizeof(vorbis_enc_floor_class) * fc->nclasses); for (i = 0; i < fc->nclasses; i++) { vorbis_enc_floor_class * c = &fc->classes[i]; int j, books; c->dim = floor_classes[i].dim; c->subclass = floor_classes[i].subclass; c->masterbook = floor_classes[i].masterbook; books = (1 << c->subclass); c->books = av_malloc(sizeof(int) * books); for (j = 0; j < books; j++) c->books[j] = floor_classes[i].nbooks[j]; } fc->multiplier = 2; fc->rangebits = venc->log2_blocksize[0] - 1; fc->values = 2; for (i = 0; i < fc->partitions; i++) fc->values += fc->classes[fc->partition_to_class[i]].dim; fc->list = av_malloc(sizeof(vorbis_floor1_entry) * fc->values); fc->list[0].x = 0; fc->list[1].x = 1 << fc->rangebits; for (i = 2; i < fc->values; i++) { static const int a[] = { 93, 23,372, 6, 46,186,750, 14, 33, 65, 130,260,556, 3, 10, 18, 28, 39, 55, 79, 111,158,220,312,464,650,850 }; fc->list[i].x = a[i - 2]; } ff_vorbis_ready_floor1_list(fc->list, fc->values); venc->nresidues = 1; venc->residues = av_malloc(sizeof(vorbis_enc_residue) * venc->nresidues); // single residue rc = &venc->residues[0]; rc->type = 2; rc->begin = 0; rc->end = 1600; rc->partition_size = 32; rc->classifications = 10; rc->classbook = 15; rc->books = av_malloc(sizeof(*rc->books) * rc->classifications); { static const int8_t a[10][8] = { { -1, -1, -1, -1, -1, -1, -1, -1, }, { -1, -1, 16, -1, -1, -1, -1, -1, }, { -1, -1, 17, -1, -1, -1, -1, -1, }, { -1, -1, 18, -1, -1, -1, -1, -1, }, { -1, -1, 19, -1, -1, -1, -1, -1, }, { -1, -1, 20, -1, -1, -1, -1, -1, }, { -1, -1, 21, -1, -1, -1, -1, -1, }, { 22, 23, -1, -1, -1, -1, -1, -1, }, { 24, 25, -1, -1, -1, -1, -1, -1, }, { 26, 27, 28, -1, -1, -1, -1, -1, }, }; memcpy(rc->books, a, sizeof a); } ready_residue(rc, venc); venc->nmappings = 1; venc->mappings = av_malloc(sizeof(vorbis_enc_mapping) * venc->nmappings); // single mapping mc = &venc->mappings[0]; mc->submaps = 1; mc->mux = av_malloc(sizeof(int) * venc->channels); for (i = 0; i < venc->channels; i++) mc->mux[i] = 0; mc->floor = av_malloc(sizeof(int) * mc->submaps); mc->residue = av_malloc(sizeof(int) * mc->submaps); for (i = 0; i < mc->submaps; i++) { mc->floor[i] = 0; mc->residue[i] = 0; } mc->coupling_steps = venc->channels == 2 ? 1 : 0; mc->magnitude = av_malloc(sizeof(int) * mc->coupling_steps); mc->angle = av_malloc(sizeof(int) * mc->coupling_steps); if (mc->coupling_steps) { mc->magnitude[0] = 0; mc->angle[0] = 1; } venc->nmodes = 1; venc->modes = av_malloc(sizeof(vorbis_enc_mode) * venc->nmodes); // single mode venc->modes[0].blockflag = 0; venc->modes[0].mapping = 0; venc->have_saved = 0; venc->saved = av_malloc(sizeof(float) * venc->channels * (1 << venc->log2_blocksize[1]) / 2); venc->samples = av_malloc(sizeof(float) * venc->channels * (1 << venc->log2_blocksize[1])); venc->floor = av_malloc(sizeof(float) * venc->channels * (1 << venc->log2_blocksize[1]) / 2); venc->coeffs = av_malloc(sizeof(float) * venc->channels * (1 << venc->log2_blocksize[1]) / 2); venc->win[0] = ff_vorbis_vwin[venc->log2_blocksize[0] - 6]; venc->win[1] = ff_vorbis_vwin[venc->log2_blocksize[1] - 6]; ff_mdct_init(&venc->mdct[0], venc->log2_blocksize[0], 0, 1.0); ff_mdct_init(&venc->mdct[1], venc->log2_blocksize[1], 0, 1.0); }"} {"target": 1, "idx": 2350, "func": "QEMUOptionParameter *append_option_parameters(QEMUOptionParameter *dest, QEMUOptionParameter *list) { size_t num_options, num_dest_options; num_options = count_option_parameters(dest); num_dest_options = num_options; num_options += count_option_parameters(list); dest = qemu_realloc(dest, (num_options + 1) * sizeof(QEMUOptionParameter)); while (list && list->name) { if (get_option_parameter(dest, list->name) == NULL) { dest[num_dest_options++] = *list; } list++; } return dest; }"} {"target": 1, "idx": 2352, "func": "void stream_start(const char *job_id, BlockDriverState *bs, BlockDriverState *base, const char *backing_file_str, int64_t speed, BlockdevOnError on_error, Error **errp) { StreamBlockJob *s; BlockDriverState *iter; int orig_bs_flags; /* Make sure that the image is opened in read-write mode */ orig_bs_flags = bdrv_get_flags(bs); if (!(orig_bs_flags & BDRV_O_RDWR)) { if (bdrv_reopen(bs, orig_bs_flags | BDRV_O_RDWR, errp) != 0) { return; } } /* Prevent concurrent jobs trying to modify the graph structure here, we * already have our own plans. Also don't allow resize as the image size is * queried only at the job start and then cached. */ s = block_job_create(job_id, &stream_job_driver, bs, BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD, BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE, speed, BLOCK_JOB_DEFAULT, NULL, NULL, errp); if (!s) { goto fail; } /* Block all intermediate nodes between bs and base, because they will * disappear from the chain after this operation. The streaming job reads * every block only once, assuming that it doesn't change, so block writes * and resizes. */ for (iter = backing_bs(bs); iter && iter != base; iter = backing_bs(iter)) { block_job_add_bdrv(&s->common, \"intermediate node\", iter, 0, BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED, &error_abort); } s->base = base; s->backing_file_str = g_strdup(backing_file_str); s->bs_flags = orig_bs_flags; s->on_error = on_error; trace_stream_start(bs, base, s); block_job_start(&s->common); return; fail: if (orig_bs_flags != bdrv_get_flags(bs)) { bdrv_reopen(bs, s->bs_flags, NULL); } }"} {"target": 1, "idx": 2367, "func": "static void tcg_target_qemu_prologue(TCGContext *s) { int i, frame_size; /* reserve some stack space */ frame_size = ARRAY_SIZE(tcg_target_callee_save_regs) * 4 + TCG_STATIC_CALL_ARGS_SIZE; frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) & ~(TCG_TARGET_STACK_ALIGN - 1); /* TB prologue */ tcg_out_addi(s, TCG_REG_SP, -frame_size); for(i = 0 ; i < ARRAY_SIZE(tcg_target_callee_save_regs) ; i++) { tcg_out_st(s, TCG_TYPE_I32, tcg_target_callee_save_regs[i], TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE + i * 4); } /* Call generated code */ tcg_out_opc_reg(s, OPC_JR, 0, tcg_target_call_iarg_regs[1]), 0); tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); tb_ret_addr = s->code_ptr; /* TB epilogue */ for(i = 0 ; i < ARRAY_SIZE(tcg_target_callee_save_regs) ; i++) { tcg_out_ld(s, TCG_TYPE_I32, tcg_target_callee_save_regs[i], TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE + i * 4); } tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0); tcg_out_addi(s, TCG_REG_SP, frame_size); }"} {"target": 1, "idx": 2376, "func": "static void virtio_scsi_vring_teardown(VirtIOSCSI *s) { VirtIODevice *vdev = VIRTIO_DEVICE(s); VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); int i; if (s->ctrl_vring) { vring_teardown(&s->ctrl_vring->vring, vdev, 0); } if (s->event_vring) { vring_teardown(&s->event_vring->vring, vdev, 1); } if (s->cmd_vrings) { for (i = 0; i < vs->conf.num_queues && s->cmd_vrings[i]; i++) { vring_teardown(&s->cmd_vrings[i]->vring, vdev, 2 + i); } free(s->cmd_vrings); s->cmd_vrings = NULL; } }"} {"target": 1, "idx": 2385, "func": "static void opt_frame_pix_fmt(const char *arg) { if (strcmp(arg, \"list\")) frame_pix_fmt = avcodec_get_pix_fmt(arg); else { list_fmts(avcodec_pix_fmt_string, PIX_FMT_NB); av_exit(0); } }"} {"target": 0, "idx": 2399, "func": "static int scan_mmco_reset(AVCodecParserContext *s) { H264ParseContext *p = s->priv_data; H264Context *h = &p->h; H264SliceContext *sl = &h->slice_ctx[0]; sl->slice_type_nos = s->pict_type & 3; if (h->pps.redundant_pic_cnt_present) get_ue_golomb(&sl->gb); // redundant_pic_count if (ff_set_ref_count(h, sl) < 0) return AVERROR_INVALIDDATA; if (sl->slice_type_nos != AV_PICTURE_TYPE_I) { int list; for (list = 0; list < sl->list_count; list++) { if (get_bits1(&sl->gb)) { int index; for (index = 0; ; index++) { unsigned int reordering_of_pic_nums_idc = get_ue_golomb_31(&sl->gb); if (reordering_of_pic_nums_idc < 3) get_ue_golomb(&sl->gb); else if (reordering_of_pic_nums_idc > 3) { av_log(h->avctx, AV_LOG_ERROR, \"illegal reordering_of_pic_nums_idc %d\\n\", reordering_of_pic_nums_idc); return AVERROR_INVALIDDATA; } else break; if (index >= sl->ref_count[list]) { av_log(h->avctx, AV_LOG_ERROR, \"reference count %d overflow\\n\", index); return AVERROR_INVALIDDATA; } } } } } if ((h->pps.weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) || (h->pps.weighted_bipred_idc == 1 && sl->slice_type_nos == AV_PICTURE_TYPE_B)) ff_pred_weight_table(h, sl); if (get_bits1(&sl->gb)) { // adaptive_ref_pic_marking_mode_flag int i; for (i = 0; i < MAX_MMCO_COUNT; i++) { MMCOOpcode opcode = get_ue_golomb_31(&sl->gb); if (opcode > (unsigned) MMCO_LONG) { av_log(h->avctx, AV_LOG_ERROR, \"illegal memory management control operation %d\\n\", opcode); return AVERROR_INVALIDDATA; } if (opcode == MMCO_END) return 0; else if (opcode == MMCO_RESET) return 1; if (opcode == MMCO_SHORT2UNUSED || opcode == MMCO_SHORT2LONG) get_ue_golomb(&sl->gb); if (opcode == MMCO_SHORT2LONG || opcode == MMCO_LONG2UNUSED || opcode == MMCO_LONG || opcode == MMCO_SET_MAX_LONG) get_ue_golomb_31(&sl->gb); } } return 0; }"} {"target": 1, "idx": 2411, "func": "static int qdm2_decode_init(AVCodecContext *avctx) { QDM2Context *s = avctx->priv_data; uint8_t *extradata; int extradata_size; int tmp_val, tmp, size; int i; float alpha; /* extradata parsing Structure: wave { frma (QDM2) QDCA QDCP } 32 size (including this field) 32 tag (=frma) 32 type (=QDM2 or QDMC) 32 size (including this field, in bytes) 32 tag (=QDCA) // maybe mandatory parameters 32 unknown (=1) 32 channels (=2) 32 samplerate (=44100) 32 bitrate (=96000) 32 block size (=4096) 32 frame size (=256) (for one channel) 32 packet size (=1300) 32 size (including this field, in bytes) 32 tag (=QDCP) // maybe some tuneable parameters 32 float1 (=1.0) 32 zero ? 32 float2 (=1.0) 32 float3 (=1.0) 32 unknown (27) 32 unknown (8) 32 zero ? */ if (!avctx->extradata || (avctx->extradata_size < 48)) { av_log(avctx, AV_LOG_ERROR, \"extradata missing or truncated\\n\"); return -1; } extradata = avctx->extradata; extradata_size = avctx->extradata_size; while (extradata_size > 7) { if (!memcmp(extradata, \"frmaQDM\", 7)) break; extradata++; extradata_size--; } if (extradata_size < 12) { av_log(avctx, AV_LOG_ERROR, \"not enough extradata (%i)\\n\", extradata_size); return -1; } if (memcmp(extradata, \"frmaQDM\", 7)) { av_log(avctx, AV_LOG_ERROR, \"invalid headers, QDM? not found\\n\"); return -1; } if (extradata[7] == 'C') { // s->is_qdmc = 1; av_log(avctx, AV_LOG_ERROR, \"stream is QDMC version 1, which is not supported\\n\"); return -1; } extradata += 8; extradata_size -= 8; size = BE_32(extradata); if(size > extradata_size){ av_log(avctx, AV_LOG_ERROR, \"extradata size too small, %i < %i\\n\", extradata_size, size); return -1; } extradata += 4; av_log(avctx, AV_LOG_DEBUG, \"size: %d\\n\", size); if (BE_32(extradata) != MKBETAG('Q','D','C','A')) { av_log(avctx, AV_LOG_ERROR, \"invalid extradata, expecting QDCA\\n\"); return -1; } extradata += 8; avctx->channels = s->nb_channels = s->channels = BE_32(extradata); extradata += 4; avctx->sample_rate = BE_32(extradata); extradata += 4; avctx->bit_rate = BE_32(extradata); extradata += 4; s->group_size = BE_32(extradata); extradata += 4; s->fft_size = BE_32(extradata); extradata += 4; s->checksum_size = BE_32(extradata); extradata += 4; s->fft_order = av_log2(s->fft_size) + 1; s->fft_frame_size = 2 * s->fft_size; // complex has two floats // something like max decodable tones s->group_order = av_log2(s->group_size) + 1; s->frame_size = s->group_size / 16; // 16 iterations per super block if (s->fft_order == 8) s->sub_sampling = 1; else s->sub_sampling = 2; s->frequency_range = 255 / (1 << (2 - s->sub_sampling)); switch ((s->sub_sampling * 2 + s->channels - 1)) { case 0: tmp = 40; break; case 1: tmp = 48; break; case 2: tmp = 56; break; case 3: tmp = 72; break; case 4: tmp = 80; break; case 5: tmp = 100;break; default: tmp=s->sub_sampling; break; } tmp_val = 0; if ((tmp * 1000) < avctx->bit_rate) tmp_val = 1; if ((tmp * 1440) < avctx->bit_rate) tmp_val = 2; if ((tmp * 1760) < avctx->bit_rate) tmp_val = 3; if ((tmp * 2240) < avctx->bit_rate) tmp_val = 4; s->cm_table_select = tmp_val; if (s->sub_sampling == 0) tmp = 16000; else tmp = ((-(s->sub_sampling -1)) & 8000) + 20000; /* 0: 16000 -> 1 1: 20000 -> 2 2: 28000 -> 2 */ if (tmp < 8000) s->coeff_per_sb_select = 0; else if (tmp <= 16000) s->coeff_per_sb_select = 1; else s->coeff_per_sb_select = 2; if (s->fft_order != 8 && s->fft_order != 9) av_log(avctx, AV_LOG_ERROR, \"Unknown FFT order (%d), contact the developers!\\n\", s->fft_order); ff_fft_init(&s->fft_ctx, s->fft_order - 1, 1); for (i = 1; i < (1 << (s->fft_order - 2)); i++) { alpha = 2 * M_PI * (float)i / (float)(1 << (s->fft_order - 1)); s->exptab[i].re = cos(alpha); s->exptab[i].im = sin(alpha); } qdm2_init(s); // dump_context(s); return 0; }"} {"target": 1, "idx": 2414, "func": "static void new_audio_stream(AVFormatContext *oc) { AVStream *st; AVCodec *codec= NULL; AVCodecContext *audio_enc; enum CodecID codec_id; st = av_new_stream(oc, oc->nb_streams < nb_streamid_map ? streamid_map[oc->nb_streams] : 0); if (!st) { fprintf(stderr, \"Could not alloc stream\\n\"); ffmpeg_exit(1); } output_codecs = grow_array(output_codecs, sizeof(*output_codecs), &nb_output_codecs, nb_output_codecs + 1); if(!audio_stream_copy){ if (audio_codec_name) { codec_id = find_codec_or_die(audio_codec_name, AVMEDIA_TYPE_AUDIO, 1, avcodec_opts[AVMEDIA_TYPE_AUDIO]->strict_std_compliance); codec = avcodec_find_encoder_by_name(audio_codec_name); output_codecs[nb_output_codecs-1] = codec; } else { codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_AUDIO); codec = avcodec_find_encoder(codec_id); } } avcodec_get_context_defaults3(st->codec, codec); bitstream_filters[nb_output_files] = grow_array(bitstream_filters[nb_output_files], sizeof(*bitstream_filters[nb_output_files]), &nb_bitstream_filters[nb_output_files], oc->nb_streams); bitstream_filters[nb_output_files][oc->nb_streams - 1]= audio_bitstream_filters; audio_bitstream_filters= NULL; avcodec_thread_init(st->codec, thread_count); audio_enc = st->codec; audio_enc->codec_type = AVMEDIA_TYPE_AUDIO; if(audio_codec_tag) audio_enc->codec_tag= audio_codec_tag; if (oc->oformat->flags & AVFMT_GLOBALHEADER) { audio_enc->flags |= CODEC_FLAG_GLOBAL_HEADER; avcodec_opts[AVMEDIA_TYPE_AUDIO]->flags|= CODEC_FLAG_GLOBAL_HEADER; } if (audio_stream_copy) { st->stream_copy = 1; audio_enc->channels = audio_channels; audio_enc->sample_rate = audio_sample_rate; } else { audio_enc->codec_id = codec_id; set_context_opts(audio_enc, avcodec_opts[AVMEDIA_TYPE_AUDIO], AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, codec); if (audio_qscale > QSCALE_NONE) { audio_enc->flags |= CODEC_FLAG_QSCALE; audio_enc->global_quality = st->quality = FF_QP2LAMBDA * audio_qscale; } audio_enc->channels = audio_channels; audio_enc->sample_fmt = audio_sample_fmt; audio_enc->sample_rate = audio_sample_rate; audio_enc->channel_layout = channel_layout; if (avcodec_channel_layout_num_channels(channel_layout) != audio_channels) audio_enc->channel_layout = 0; choose_sample_fmt(st, codec); choose_sample_rate(st, codec); } audio_enc->time_base= (AVRational){1, audio_sample_rate}; if (audio_language) { av_metadata_set2(&st->metadata, \"language\", audio_language, 0); av_freep(&audio_language); } /* reset some key parameters */ audio_disable = 0; av_freep(&audio_codec_name); audio_stream_copy = 0; }"} {"target": 0, "idx": 2431, "func": "static void frame_erasure(EVRCContext *e, float *samples) { float ilspf[FILTER_ORDER], ilpc[FILTER_ORDER], idelay[NB_SUBFRAMES], tmp[SUBFRAME_SIZE + 6], f; int i, j; for (i = 0; i < FILTER_ORDER; i++) { if (e->bitrate != RATE_QUANT) e->lspf[i] = e->prev_lspf[i] * 0.875 + 0.125 * (i + 1) * 0.048; else e->lspf[i] = e->prev_lspf[i]; } if (e->prev_error_flag) e->avg_acb_gain *= 0.75; if (e->bitrate == RATE_FULL) memcpy(e->pitch_back, e->pitch, ACB_SIZE * sizeof(float)); if (e->last_valid_bitrate == RATE_QUANT) e->bitrate = RATE_QUANT; else e->bitrate = RATE_FULL; if (e->bitrate == RATE_FULL || e->bitrate == RATE_HALF) { e->pitch_delay = e->prev_pitch_delay; } else { float sum = 0; idelay[0] = idelay[1] = idelay[2] = MIN_DELAY; for (i = 0; i < NB_SUBFRAMES; i++) sum += evrc_energy_quant[e->prev_energy_gain][i]; sum /= (float) NB_SUBFRAMES; sum = pow(10, sum); for (i = 0; i < NB_SUBFRAMES; i++) e->energy_vector[i] = sum; } if (fabs(e->pitch_delay - e->prev_pitch_delay) > 15) e->prev_pitch_delay = e->pitch_delay; for (i = 0; i < NB_SUBFRAMES; i++) { int subframe_size = subframe_sizes[i]; int pitch_lag; interpolate_lsp(ilspf, e->lspf, e->prev_lspf, i); if (e->bitrate != RATE_QUANT) { if (e->avg_acb_gain < 0.3) { idelay[0] = estimation_delay[i]; idelay[1] = estimation_delay[i + 1]; idelay[2] = estimation_delay[i + 2]; } else { interpolate_delay(idelay, e->pitch_delay, e->prev_pitch_delay, i); } } pitch_lag = lrintf((idelay[1] + idelay[0]) / 2.0); decode_predictor_coeffs(ilspf, ilpc); if (e->bitrate != RATE_QUANT) { acb_excitation(e, e->pitch + ACB_SIZE, e->avg_acb_gain, idelay, subframe_size); for (j = 0; j < subframe_size; j++) e->pitch[ACB_SIZE + j] *= e->fade_scale; e->fade_scale = FFMAX(e->fade_scale - 0.05, 0.0); } else { for (j = 0; j < subframe_size; j++) e->pitch[ACB_SIZE + j] = e->energy_vector[i]; } memcpy(e->pitch, e->pitch + subframe_size, ACB_SIZE * sizeof(float)); if (e->bitrate != RATE_QUANT && e->avg_acb_gain < 0.4) { f = 0.1 * e->avg_fcb_gain; for (j = 0; j < subframe_size; j++) e->pitch[ACB_SIZE + j] += f; } else if (e->bitrate == RATE_QUANT) { for (j = 0; j < subframe_size; j++) e->pitch[ACB_SIZE + j] = e->energy_vector[i]; } synthesis_filter(e->pitch + ACB_SIZE, ilpc, e->synthesis, subframe_size, tmp); postfilter(e, tmp, ilpc, samples, pitch_lag, &postfilter_coeffs[e->bitrate], subframe_size); samples += subframe_size; } }"} {"target": 1, "idx": 2450, "func": "void ff_mdct_calcw_c(FFTContext *s, FFTDouble *out, const FFTSample *input) { int i, j, n, n8, n4, n2, n3; FFTDouble re, im; const uint16_t *revtab = s->revtab; const FFTSample *tcos = s->tcos; const FFTSample *tsin = s->tsin; FFTComplex *x = s->tmp_buf; FFTDComplex *o = (FFTDComplex *)out; n = 1 << s->mdct_bits; n2 = n >> 1; n4 = n >> 2; n8 = n >> 3; n3 = 3 * n4; /* pre rotation */ for(i=0;ifft_calc(s, x); /* post rotation */ for(i=0;icheck_rxov = 0; s->mac_reg[index] = val & 0xffff; if (e1000_has_rxbufs(s, 1)) { qemu_flush_queued_packets(&s->nic->nc); } }"} {"target": 1, "idx": 2493, "func": "unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu) { trace_cpu_set_state(CPU(cpu)->cpu_index, cpu_state); switch (cpu_state) { case CPU_STATE_STOPPED: case CPU_STATE_CHECK_STOP: /* halt the cpu for common infrastructure */ s390_cpu_halt(cpu); break; case CPU_STATE_OPERATING: case CPU_STATE_LOAD: /* unhalt the cpu for common infrastructure */ s390_cpu_unhalt(cpu); break; default: error_report(\"Requested CPU state is not a valid S390 CPU state: %u\", cpu_state); exit(1); cpu->env.cpu_state = cpu_state; return s390_count_running_cpus();"} {"target": 1, "idx": 2496, "func": "static int mmap_read_frame(struct video_data *s, void *frame, int64_t *ts) { struct v4l2_buffer buf; int res; memset(&buf, 0, sizeof(struct v4l2_buffer)); buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; buf.memory = V4L2_MEMORY_MMAP; /* FIXME: Some special treatment might be needed in case of loss of signal... */ while ((res = ioctl(s->fd, VIDIOC_DQBUF, &buf)) < 0 && ((errno == EAGAIN) || (errno == EINTR))); if (res < 0) { av_log(NULL, AV_LOG_ERROR, \"ioctl(VIDIOC_DQBUF): %s\\n\", strerror(errno)); return -1; } assert (buf.index < s->buffers); assert(buf.bytesused == s->frame_size); /* Image is at s->buff_start[buf.index] */ memcpy(frame, s->buf_start[buf.index], buf.bytesused); *ts = buf.timestamp.tv_sec * int64_t_C(1000000) + buf.timestamp.tv_usec; res = ioctl (s->fd, VIDIOC_QBUF, &buf); if (res < 0) { av_log(NULL, AV_LOG_ERROR, \"ioctl(VIDIOC_QBUF)\\n\"); return -1; } return s->buf_len[buf.index]; }"} {"target": 0, "idx": 2503, "func": "static void avc_luma_midh_qrt_4w_msa(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, int32_t height, uint8_t horiz_offset) { uint32_t row; v16i8 src0, src1, src2, src3, src4, src5, src6; v8i16 vt_res0, vt_res1, vt_res2, vt_res3; v4i32 hz_res0, hz_res1; v8i16 dst0, dst1; v8i16 shf_vec0, shf_vec1, shf_vec2, shf_vec3, shf_vec4, shf_vec5; v8i16 mask0 = { 0, 5, 1, 6, 2, 7, 3, 8 }; v8i16 mask1 = { 1, 4, 2, 5, 3, 6, 4, 7 }; v8i16 mask2 = { 2, 3, 3, 4, 4, 5, 5, 6 }; v8i16 minus5h = __msa_ldi_h(-5); v8i16 plus20h = __msa_ldi_h(20); v8i16 zeros = { 0 }; v16u8 out; LD_SB5(src, src_stride, src0, src1, src2, src3, src4); src += (5 * src_stride); XORI_B5_128_SB(src0, src1, src2, src3, src4); for (row = (height >> 1); row--;) { LD_SB2(src, src_stride, src5, src6); src += (2 * src_stride); XORI_B2_128_SB(src5, src6); AVC_CALC_DPADD_B_6PIX_2COEFF_SH(src0, src1, src2, src3, src4, src5, vt_res0, vt_res1); AVC_CALC_DPADD_B_6PIX_2COEFF_SH(src1, src2, src3, src4, src5, src6, vt_res2, vt_res3); VSHF_H3_SH(vt_res0, vt_res1, vt_res0, vt_res1, vt_res0, vt_res1, mask0, mask1, mask2, shf_vec0, shf_vec1, shf_vec2); VSHF_H3_SH(vt_res2, vt_res3, vt_res2, vt_res3, vt_res2, vt_res3, mask0, mask1, mask2, shf_vec3, shf_vec4, shf_vec5); hz_res0 = __msa_hadd_s_w(shf_vec0, shf_vec0); DPADD_SH2_SW(shf_vec1, shf_vec2, minus5h, plus20h, hz_res0, hz_res0); hz_res1 = __msa_hadd_s_w(shf_vec3, shf_vec3); DPADD_SH2_SW(shf_vec4, shf_vec5, minus5h, plus20h, hz_res1, hz_res1); SRARI_W2_SW(hz_res0, hz_res1, 10); SAT_SW2_SW(hz_res0, hz_res1, 7); dst0 = __msa_srari_h(shf_vec2, 5); dst1 = __msa_srari_h(shf_vec5, 5); SAT_SH2_SH(dst0, dst1, 7); if (horiz_offset) { dst0 = __msa_ilvod_h(zeros, dst0); dst1 = __msa_ilvod_h(zeros, dst1); } else { ILVEV_H2_SH(dst0, zeros, dst1, zeros, dst0, dst1); } hz_res0 = __msa_aver_s_w(hz_res0, (v4i32) dst0); hz_res1 = __msa_aver_s_w(hz_res1, (v4i32) dst1); dst0 = __msa_pckev_h((v8i16) hz_res1, (v8i16) hz_res0); out = PCKEV_XORI128_UB(dst0, dst0); ST4x2_UB(out, dst, dst_stride); dst += (2 * dst_stride); src0 = src2; src1 = src3; src2 = src4; src3 = src5; src4 = src6; } }"} {"target": 0, "idx": 2507, "func": "static void e1000_io_write(void *opaque, target_phys_addr_t addr, uint64_t val, unsigned size) { E1000State *s = opaque; (void)s; }"} {"target": 0, "idx": 2508, "func": "static void omap_clkm_init(MemoryRegion *memory, target_phys_addr_t mpu_base, target_phys_addr_t dsp_base, struct omap_mpu_state_s *s) { memory_region_init_io(&s->clkm_iomem, &omap_clkm_ops, s, \"omap-clkm\", 0x100); memory_region_init_io(&s->clkdsp_iomem, &omap_clkdsp_ops, s, \"omap-clkdsp\", 0x1000); s->clkm.arm_idlect1 = 0x03ff; s->clkm.arm_idlect2 = 0x0100; s->clkm.dsp_idlect1 = 0x0002; omap_clkm_reset(s); s->clkm.cold_start = 0x3a; memory_region_add_subregion(memory, mpu_base, &s->clkm_iomem); memory_region_add_subregion(memory, dsp_base, &s->clkdsp_iomem); }"} {"target": 0, "idx": 2521, "func": "static uint64_t bonito_pciconf_readl(void *opaque, target_phys_addr_t addr, unsigned size) { PCIBonitoState *s = opaque; PCIDevice *d = PCI_DEVICE(s); DPRINTF(\"bonito_pciconf_readl \"TARGET_FMT_plx\"\\n\", addr); return d->config_read(d, addr, 4); }"} {"target": 1, "idx": 2529, "func": "static void vdi_close(BlockDriverState *bs) { BDRVVdiState *s = bs->opaque; g_free(s->bmap); migrate_del_blocker(s->migration_blocker); error_free(s->migration_blocker); }"} {"target": 0, "idx": 2548, "func": "static int handle_instruction(S390CPU *cpu, struct kvm_run *run) { unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00); uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff; int r = -1; DPRINTF(\"handle_instruction 0x%x 0x%x\\n\", run->s390_sieic.ipa, run->s390_sieic.ipb); switch (ipa0) { case IPA0_B2: r = handle_b2(cpu, run, ipa1); break; case IPA0_B9: r = handle_b9(cpu, run, ipa1); break; case IPA0_EB: r = handle_eb(cpu, run, run->s390_sieic.ipb & 0xff); break; case IPA0_E3: r = handle_e3(cpu, run, run->s390_sieic.ipb & 0xff); break; case IPA0_DIAG: r = handle_diag(cpu, run, run->s390_sieic.ipb); break; case IPA0_SIGP: r = handle_sigp(cpu, ipa1, run->s390_sieic.ipb); break; } if (r < 0) { r = 0; kvm_s390_program_interrupt(cpu, PGM_OPERATION); } return r; }"} {"target": 0, "idx": 2559, "func": "static int v9fs_synth_fstat(FsContext *fs_ctx, int fid_type, V9fsFidOpenState *fs, struct stat *stbuf) { V9fsSynthOpenState *synth_open = fs->private; v9fs_synth_fill_statbuf(synth_open->node, stbuf); return 0; }"} {"target": 0, "idx": 2560, "func": "static uint32_t virtio_balloon_get_features(VirtIODevice *vdev) { return 0; }"} {"target": 0, "idx": 2561, "func": "BlockDriverAIOCB *bdrv_aio_write(BlockDriverState *bs, int64_t sector_num, const uint8_t *buf, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque) { BlockDriver *drv = bs->drv; BlockDriverAIOCB *ret; if (!drv) return NULL; if (bs->read_only) return NULL; if (sector_num == 0 && bs->boot_sector_enabled && nb_sectors > 0) { memcpy(bs->boot_sector_data, buf, 512); } ret = drv->bdrv_aio_write(bs, sector_num, buf, nb_sectors, cb, opaque); if (ret) { /* Update stats even though technically transfer has not happened. */ bs->wr_bytes += (unsigned) nb_sectors * SECTOR_SIZE; bs->wr_ops ++; } return ret; }"} {"target": 0, "idx": 2582, "func": "static uint64_t icp_pic_read(void *opaque, target_phys_addr_t offset, unsigned size) { icp_pic_state *s = (icp_pic_state *)opaque; switch (offset >> 2) { case 0: /* IRQ_STATUS */ return s->level & s->irq_enabled; case 1: /* IRQ_RAWSTAT */ return s->level; case 2: /* IRQ_ENABLESET */ return s->irq_enabled; case 4: /* INT_SOFTSET */ return s->level & 1; case 8: /* FRQ_STATUS */ return s->level & s->fiq_enabled; case 9: /* FRQ_RAWSTAT */ return s->level; case 10: /* FRQ_ENABLESET */ return s->fiq_enabled; case 3: /* IRQ_ENABLECLR */ case 5: /* INT_SOFTCLR */ case 11: /* FRQ_ENABLECLR */ default: printf (\"icp_pic_read: Bad register offset 0x%x\\n\", (int)offset); return 0; } }"} {"target": 0, "idx": 2584, "func": "static int vmsvga_post_load(void *opaque, int version_id) { struct vmsvga_state_s *s = opaque; s->invalidated = 1; if (s->config) s->fifo = (uint32_t *) s->fifo_ptr; return 0; }"} {"target": 0, "idx": 2596, "func": "static inline void check_alignment(CPUS390XState *env, uint64_t v, int wordsize, uintptr_t ra) { if (v % wordsize) { CPUState *cs = CPU(s390_env_get_cpu(env)); cpu_restore_state(cs, ra); program_interrupt(env, PGM_SPECIFICATION, 6); } }"} {"target": 0, "idx": 2609, "func": "static void print_codec(const AVCodec *c) { int encoder = av_codec_is_encoder(c); printf(\"%s %s [%s]:\\n\", encoder ? \"Encoder\" : \"Decoder\", c->name, c->long_name ? c->long_name : \"\"); printf(\" General capabilities: \"); if (c->capabilities & AV_CODEC_CAP_DRAW_HORIZ_BAND) printf(\"horizband \"); if (c->capabilities & AV_CODEC_CAP_DR1) printf(\"dr1 \"); if (c->capabilities & AV_CODEC_CAP_TRUNCATED) printf(\"trunc \"); if (c->capabilities & AV_CODEC_CAP_DELAY) printf(\"delay \"); if (c->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME) printf(\"small \"); if (c->capabilities & AV_CODEC_CAP_SUBFRAMES) printf(\"subframes \"); if (c->capabilities & AV_CODEC_CAP_EXPERIMENTAL) printf(\"exp \"); if (c->capabilities & AV_CODEC_CAP_CHANNEL_CONF) printf(\"chconf \"); if (c->capabilities & AV_CODEC_CAP_PARAM_CHANGE) printf(\"small \"); if (c->capabilities & AV_CODEC_CAP_PARAM_CHANGE) printf(\"variable \"); if (c->capabilities & (AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_AUTO_THREADS)) printf(\"threads \"); if (!c->capabilities) printf(\"none\"); printf(\"\\n\"); if (c->type == AVMEDIA_TYPE_VIDEO) { printf(\" Threading capabilities: \"); switch (c->capabilities & (AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_AUTO_THREADS)) { case AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_SLICE_THREADS: printf(\"frame and slice\"); break; case AV_CODEC_CAP_FRAME_THREADS: printf(\"frame\"); break; case AV_CODEC_CAP_SLICE_THREADS: printf(\"slice\"); break; case AV_CODEC_CAP_AUTO_THREADS : printf(\"auto\"); break; default: printf(\"none\"); break; } printf(\"\\n\"); } if (c->supported_framerates) { const AVRational *fps = c->supported_framerates; printf(\" Supported framerates:\"); while (fps->num) { printf(\" %d/%d\", fps->num, fps->den); fps++; } printf(\"\\n\"); } PRINT_CODEC_SUPPORTED(c, pix_fmts, enum AVPixelFormat, \"pixel formats\", AV_PIX_FMT_NONE, GET_PIX_FMT_NAME); PRINT_CODEC_SUPPORTED(c, supported_samplerates, int, \"sample rates\", 0, GET_SAMPLE_RATE_NAME); PRINT_CODEC_SUPPORTED(c, sample_fmts, enum AVSampleFormat, \"sample formats\", AV_SAMPLE_FMT_NONE, GET_SAMPLE_FMT_NAME); PRINT_CODEC_SUPPORTED(c, channel_layouts, uint64_t, \"channel layouts\", 0, GET_CH_LAYOUT_DESC); if (c->priv_class) { show_help_children(c->priv_class, AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_DECODING_PARAM); } }"} {"target": 1, "idx": 2651, "func": "static int mm_start_timer(struct qemu_alarm_timer *t) { TIMECAPS tc; memset(&tc, 0, sizeof(tc)); timeGetDevCaps(&tc, sizeof(tc)); mm_period = tc.wPeriodMin; timeBeginPeriod(mm_period); mm_timer = timeSetEvent(1, /* interval (ms) */ mm_period, /* resolution */ mm_alarm_handler, /* function */ (DWORD_PTR)t, /* parameter */ TIME_ONESHOT | TIME_CALLBACK_FUNCTION); if (!mm_timer) { fprintf(stderr, \"Failed to initialize win32 alarm timer: %ld\\n\", GetLastError()); timeEndPeriod(mm_period); return -1; } return 0; }"} {"target": 1, "idx": 2669, "func": "static void test_validate_fail_union_native_list(TestInputVisitorData *data, const void *unused) { UserDefNativeListUnion *tmp = NULL; Error *err = NULL; Visitor *v; v = validate_test_init(data, \"{ 'type': 'integer', 'data' : [ 'string' ] }\"); visit_type_UserDefNativeListUnion(v, &tmp, NULL, &err); g_assert(err); qapi_free_UserDefNativeListUnion(tmp); }"} {"target": 1, "idx": 2671, "func": "static int init_processing_chain(AVFilterContext *ctx, int in_width, int in_height, int out_width, int out_height) { NPPScaleContext *s = ctx->priv; AVHWFramesContext *in_frames_ctx; enum AVPixelFormat in_format; enum AVPixelFormat out_format; enum AVPixelFormat in_deinterleaved_format; enum AVPixelFormat out_deinterleaved_format; int i, ret, last_stage = -1; /* check that we have a hw context */ if (!ctx->inputs[0]->hw_frames_ctx) { av_log(ctx, AV_LOG_ERROR, \"No hw context provided on input\\n\"); return AVERROR(EINVAL); } in_frames_ctx = (AVHWFramesContext*)ctx->inputs[0]->hw_frames_ctx->data; in_format = in_frames_ctx->sw_format; out_format = (s->format == AV_PIX_FMT_NONE) ? in_format : s->format; if (!format_is_supported(in_format)) { av_log(ctx, AV_LOG_ERROR, \"Unsupported input format: %s\\n\", av_get_pix_fmt_name(in_format)); return AVERROR(ENOSYS); } if (!format_is_supported(out_format)) { av_log(ctx, AV_LOG_ERROR, \"Unsupported output format: %s\\n\", av_get_pix_fmt_name(out_format)); return AVERROR(ENOSYS); } in_deinterleaved_format = get_deinterleaved_format(in_format); out_deinterleaved_format = get_deinterleaved_format(out_format); if (in_deinterleaved_format == AV_PIX_FMT_NONE || out_deinterleaved_format == AV_PIX_FMT_NONE) return AVERROR_BUG; /* figure out which stages need to be done */ if (in_width != out_width || in_height != out_height || in_deinterleaved_format != out_deinterleaved_format) { s->stages[STAGE_RESIZE].stage_needed = 1; if (s->interp_algo == NPPI_INTER_SUPER && (out_width > in_width && out_height > in_height)) { s->interp_algo = NPPI_INTER_LANCZOS; av_log(ctx, AV_LOG_WARNING, \"super-sampling not supported for output dimensions, using lanczos instead.\\n\"); } if (s->interp_algo == NPPI_INTER_SUPER && !(out_width < in_width && out_height < in_height)) { s->interp_algo = NPPI_INTER_CUBIC; av_log(ctx, AV_LOG_WARNING, \"super-sampling not supported for output dimensions, using cubic instead.\\n\"); } } if (!s->stages[STAGE_RESIZE].stage_needed && in_format == out_format) s->passthrough = 1; if (!s->passthrough) { if (in_format != in_deinterleaved_format) s->stages[STAGE_DEINTERLEAVE].stage_needed = 1; if (out_format != out_deinterleaved_format) s->stages[STAGE_INTERLEAVE].stage_needed = 1; } s->stages[STAGE_DEINTERLEAVE].in_fmt = in_format; s->stages[STAGE_DEINTERLEAVE].out_fmt = in_deinterleaved_format; s->stages[STAGE_DEINTERLEAVE].planes_in[0].width = in_width; s->stages[STAGE_DEINTERLEAVE].planes_in[0].height = in_height; s->stages[STAGE_RESIZE].in_fmt = in_deinterleaved_format; s->stages[STAGE_RESIZE].out_fmt = out_deinterleaved_format; s->stages[STAGE_RESIZE].planes_in[0].width = in_width; s->stages[STAGE_RESIZE].planes_in[0].height = in_height; s->stages[STAGE_RESIZE].planes_out[0].width = out_width; s->stages[STAGE_RESIZE].planes_out[0].height = out_height; s->stages[STAGE_INTERLEAVE].in_fmt = out_deinterleaved_format; s->stages[STAGE_INTERLEAVE].out_fmt = out_format; s->stages[STAGE_INTERLEAVE].planes_in[0].width = out_width; s->stages[STAGE_INTERLEAVE].planes_in[0].height = out_height; /* init the hardware contexts */ for (i = 0; i < FF_ARRAY_ELEMS(s->stages); i++) { if (!s->stages[i].stage_needed) continue; ret = init_stage(&s->stages[i], in_frames_ctx->device_ref); if (ret < 0) return ret; last_stage = i; } if (last_stage < 0) { ctx->outputs[0]->hw_frames_ctx = av_buffer_ref(ctx->inputs[0]->hw_frames_ctx); return 0; } ctx->outputs[0]->hw_frames_ctx = av_buffer_ref(s->stages[last_stage].frames_ctx); if (!ctx->outputs[0]->hw_frames_ctx) return AVERROR(ENOMEM); return 0; }"} {"target": 1, "idx": 2676, "func": "static int read_uncompressed_sgi(unsigned char* out_buf, uint8_t* out_end, const uint8_t *in_buf, const uint8_t *in_end, SgiState* s) { int x, y, z; const uint8_t *ptr; unsigned int offset = s->height * s->width * s->bytes_per_channel; /* Test buffer size. */ if (offset * s->depth > in_end - in_buf) { return -1; } for (y = s->height - 1; y >= 0; y--) { out_end = out_buf + (y * s->linesize); for (x = s->width; x > 0; x--) { ptr = in_buf += s->bytes_per_channel; for(z = 0; z < s->depth; z ++) { memcpy(out_end, ptr, s->bytes_per_channel); out_end += s->bytes_per_channel; ptr += offset; } } } return 0; }"} {"target": 1, "idx": 2681, "func": "int ff_mov_write_packet(AVFormatContext *s, AVPacket *pkt) { MOVMuxContext *mov = s->priv_data; AVIOContext *pb = s->pb; MOVTrack *trk = &mov->tracks[pkt->stream_index]; AVCodecContext *enc = trk->enc; unsigned int samples_in_chunk = 0; int size = pkt->size; uint8_t *reformatted_data = NULL; if (mov->flags & FF_MOV_FLAG_FRAGMENT) { int ret; if (mov->fragments > 0) { if (!trk->mdat_buf) { if ((ret = avio_open_dyn_buf(&trk->mdat_buf)) < 0) return ret; } pb = trk->mdat_buf; } else { if (!mov->mdat_buf) { if ((ret = avio_open_dyn_buf(&mov->mdat_buf)) < 0) return ret; } pb = mov->mdat_buf; } } if (enc->codec_id == AV_CODEC_ID_AMR_NB) { /* We must find out how many AMR blocks there are in one packet */ static uint16_t packed_size[16] = {13, 14, 16, 18, 20, 21, 27, 32, 6, 0, 0, 0, 0, 0, 0, 1}; int len = 0; while (len < size && samples_in_chunk < 100) { len += packed_size[(pkt->data[len] >> 3) & 0x0F]; samples_in_chunk++; } if (samples_in_chunk > 1) { av_log(s, AV_LOG_ERROR, \"fatal error, input is not a single packet, implement a AVParser for it\\n\"); return -1; } } else if (trk->sample_size) samples_in_chunk = size / trk->sample_size; else samples_in_chunk = 1; /* copy extradata if it exists */ if (trk->vos_len == 0 && enc->extradata_size > 0) { trk->vos_len = enc->extradata_size; trk->vos_data = av_malloc(trk->vos_len); memcpy(trk->vos_data, enc->extradata, trk->vos_len); } if (enc->codec_id == AV_CODEC_ID_H264 && trk->vos_len > 0 && *(uint8_t *)trk->vos_data != 1) { /* from x264 or from bytestream h264 */ /* nal reformating needed */ if (trk->hint_track >= 0 && trk->hint_track < mov->nb_streams) { ff_avc_parse_nal_units_buf(pkt->data, &reformatted_data, &size); avio_write(pb, reformatted_data, size); } else { size = ff_avc_parse_nal_units(pb, pkt->data, pkt->size); } } else if (enc->codec_id == AV_CODEC_ID_HEVC && trk->vos_len > 6 && (AV_RB24(trk->vos_data) == 1 || AV_RB32(trk->vos_data) == 1)) { /* extradata is Annex B, assume the bitstream is too and convert it */ if (trk->hint_track >= 0 && trk->hint_track < mov->nb_streams) { ff_hevc_annexb2mp4_buf(pkt->data, &reformatted_data, &size, 0, NULL); avio_write(pb, reformatted_data, size); } else { size = ff_hevc_annexb2mp4(pb, pkt->data, pkt->size, 0, NULL); } } else { avio_write(pb, pkt->data, size); } if ((enc->codec_id == AV_CODEC_ID_DNXHD || enc->codec_id == AV_CODEC_ID_AC3) && !trk->vos_len) { /* copy frame to create needed atoms */ trk->vos_len = size; trk->vos_data = av_malloc(size); if (!trk->vos_data) return AVERROR(ENOMEM); memcpy(trk->vos_data, pkt->data, size); } if (trk->entry >= trk->cluster_capacity) { unsigned new_capacity = 2 * (trk->entry + MOV_INDEX_CLUSTER_SIZE); if (av_reallocp_array(&trk->cluster, new_capacity, sizeof(*trk->cluster))) return AVERROR(ENOMEM); trk->cluster_capacity = new_capacity; } trk->cluster[trk->entry].pos = avio_tell(pb) - size; trk->cluster[trk->entry].samples_in_chunk = samples_in_chunk; trk->cluster[trk->entry].size = size; trk->cluster[trk->entry].entries = samples_in_chunk; trk->cluster[trk->entry].dts = pkt->dts; if (!trk->entry && trk->start_dts != AV_NOPTS_VALUE) { /* First packet of a new fragment. We already wrote the duration * of the last packet of the previous fragment based on track_duration, * which might not exactly match our dts. Therefore adjust the dts * of this packet to be what the previous packets duration implies. */ trk->cluster[trk->entry].dts = trk->start_dts + trk->track_duration; } if (!trk->entry && trk->start_dts == AV_NOPTS_VALUE && !mov->use_editlist && s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_MAKE_ZERO) { /* Not using edit lists and shifting the first track to start from zero. * If the other streams start from a later timestamp, we won't be able * to signal the difference in starting time without an edit list. * Thus move the timestamp for this first sample to 0, increasing * its duration instead. */ trk->cluster[trk->entry].dts = trk->start_dts = 0; } if (trk->start_dts == AV_NOPTS_VALUE) { trk->start_dts = pkt->dts; if (pkt->dts && mov->flags & FF_MOV_FLAG_EMPTY_MOOV) av_log(s, AV_LOG_WARNING, \"Track %d starts with a nonzero dts %\"PRId64\". This \" \"currently isn't handled correctly in combination with \" \"empty_moov.\\n\", pkt->stream_index, pkt->dts); } trk->track_duration = pkt->dts - trk->start_dts + pkt->duration; if (pkt->pts == AV_NOPTS_VALUE) { av_log(s, AV_LOG_WARNING, \"pts has no value\\n\"); pkt->pts = pkt->dts; } if (pkt->dts != pkt->pts) trk->flags |= MOV_TRACK_CTTS; trk->cluster[trk->entry].cts = pkt->pts - pkt->dts; trk->cluster[trk->entry].flags = 0; if (enc->codec_id == AV_CODEC_ID_VC1) { mov_parse_vc1_frame(pkt, trk, mov->fragments); } else if (pkt->flags & AV_PKT_FLAG_KEY) { if (mov->mode == MODE_MOV && enc->codec_id == AV_CODEC_ID_MPEG2VIDEO && trk->entry > 0) { // force sync sample for the first key frame mov_parse_mpeg2_frame(pkt, &trk->cluster[trk->entry].flags); if (trk->cluster[trk->entry].flags & MOV_PARTIAL_SYNC_SAMPLE) trk->flags |= MOV_TRACK_STPS; } else { trk->cluster[trk->entry].flags = MOV_SYNC_SAMPLE; } if (trk->cluster[trk->entry].flags & MOV_SYNC_SAMPLE) trk->has_keyframes++; } trk->entry++; trk->sample_count += samples_in_chunk; mov->mdat_size += size; if (trk->hint_track >= 0 && trk->hint_track < mov->nb_streams) ff_mov_add_hinted_packet(s, pkt, trk->hint_track, trk->entry, reformatted_data, size); av_free(reformatted_data); return 0; }"} {"target": 1, "idx": 2685, "func": "static CURLState *curl_init_state(BDRVCURLState *s) { CURLState *state = NULL; int i, j; do { for (i=0; istates[i].acb[j]) continue; if (s->states[i].in_use) continue; state = &s->states[i]; state->in_use = 1; break; } if (!state) { g_usleep(100); curl_multi_do(s); } } while(!state); if (state->curl) goto has_curl; state->curl = curl_easy_init(); if (!state->curl) return NULL; curl_easy_setopt(state->curl, CURLOPT_URL, s->url); curl_easy_setopt(state->curl, CURLOPT_TIMEOUT, 5); curl_easy_setopt(state->curl, CURLOPT_WRITEFUNCTION, (void *)curl_read_cb); curl_easy_setopt(state->curl, CURLOPT_WRITEDATA, (void *)state); curl_easy_setopt(state->curl, CURLOPT_PRIVATE, (void *)state); curl_easy_setopt(state->curl, CURLOPT_AUTOREFERER, 1); curl_easy_setopt(state->curl, CURLOPT_FOLLOWLOCATION, 1); curl_easy_setopt(state->curl, CURLOPT_NOSIGNAL, 1); curl_easy_setopt(state->curl, CURLOPT_ERRORBUFFER, state->errmsg); curl_easy_setopt(state->curl, CURLOPT_FAILONERROR, 1); /* Restrict supported protocols to avoid security issues in the more * obscure protocols. For example, do not allow POP3/SMTP/IMAP see * CVE-2013-0249. */ curl_easy_setopt(state->curl, CURLOPT_PROTOCOLS, PROTOCOLS); curl_easy_setopt(state->curl, CURLOPT_REDIR_PROTOCOLS, PROTOCOLS); #ifdef DEBUG_VERBOSE curl_easy_setopt(state->curl, CURLOPT_VERBOSE, 1); has_curl: state->s = s; return state; }"} {"target": 0, "idx": 2713, "func": "static int send_mono_rect(VncState *vs, int w, int h, uint32_t bg, uint32_t fg) { size_t bytes; int stream = 1; int level = tight_conf[vs->tight_compression].mono_zlib_level; bytes = ((w + 7) / 8) * h; vnc_write_u8(vs, (stream | VNC_TIGHT_EXPLICIT_FILTER) << 4); vnc_write_u8(vs, VNC_TIGHT_FILTER_PALETTE); vnc_write_u8(vs, 1); switch(vs->clientds.pf.bytes_per_pixel) { case 4: { uint32_t buf[2] = {bg, fg}; size_t ret = sizeof (buf); if (vs->tight_pixel24) { tight_pack24(vs, (unsigned char*)buf, 2, &ret); } vnc_write(vs, buf, ret); tight_encode_mono_rect32(vs->tight.buffer, w, h, bg, fg); break; } case 2: vnc_write(vs, &bg, 2); vnc_write(vs, &fg, 2); tight_encode_mono_rect16(vs->tight.buffer, w, h, bg, fg); break; default: vnc_write_u8(vs, bg); vnc_write_u8(vs, fg); tight_encode_mono_rect8(vs->tight.buffer, w, h, bg, fg); break; } vs->tight.offset = bytes; bytes = tight_compress_data(vs, stream, bytes, level, Z_DEFAULT_STRATEGY); return (bytes >= 0); }"} {"target": 0, "idx": 2753, "func": "static void do_video_out(AVFormatContext *s, OutputStream *ost, AVFrame *in_picture, int *frame_size) { int ret, format_video_sync; AVPacket pkt; AVCodecContext *enc = ost->enc_ctx; *frame_size = 0; format_video_sync = video_sync_method; if (format_video_sync == VSYNC_AUTO) format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : (s->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR; if (format_video_sync != VSYNC_PASSTHROUGH && ost->frame_number && in_picture->pts != AV_NOPTS_VALUE && in_picture->pts < ost->sync_opts) { nb_frames_drop++; av_log(NULL, AV_LOG_WARNING, \"*** dropping frame %d from stream %d at ts %\"PRId64\"\\n\", ost->frame_number, ost->st->index, in_picture->pts); return; } if (in_picture->pts == AV_NOPTS_VALUE) in_picture->pts = ost->sync_opts; ost->sync_opts = in_picture->pts; if (!ost->frame_number) ost->first_pts = in_picture->pts; av_init_packet(&pkt); pkt.data = NULL; pkt.size = 0; if (ost->frame_number >= ost->max_frames) return; if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) && ost->top_field_first >= 0) in_picture->top_field_first = !!ost->top_field_first; in_picture->quality = enc->global_quality; in_picture->pict_type = 0; if (ost->forced_kf_index < ost->forced_kf_count && in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) { in_picture->pict_type = AV_PICTURE_TYPE_I; ost->forced_kf_index++; } ost->frames_encoded++; ret = avcodec_send_frame(enc, in_picture); if (ret < 0) goto error; /* * For video, there may be reordering, so we can't throw away frames on * encoder flush, we need to limit them here, before they go into encoder. */ ost->frame_number++; while (1) { ret = avcodec_receive_packet(enc, &pkt); if (ret == AVERROR(EAGAIN)) break; if (ret < 0) goto error; av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base); output_packet(s, &pkt, ost); *frame_size = pkt.size; /* if two pass, output log */ if (ost->logfile && enc->stats_out) { fprintf(ost->logfile, \"%s\", enc->stats_out); } ost->sync_opts++; } return; error: av_assert0(ret != AVERROR(EAGAIN) && ret != AVERROR_EOF); av_log(NULL, AV_LOG_FATAL, \"Video encoding failed\\n\"); exit_program(1); }"} {"target": 0, "idx": 2763, "func": "void qmp_screendump(const char *filename, Error **errp) { QemuConsole *previous_active_console; bool cswitch; previous_active_console = active_console; cswitch = previous_active_console && previous_active_console->index != 0; /* There is currently no way of specifying which screen we want to dump, so always dump the first one. */ if (cswitch) { console_select(0); } if (consoles[0] && consoles[0]->hw_screen_dump) { consoles[0]->hw_screen_dump(consoles[0]->hw, filename, cswitch, errp); } else { error_setg(errp, \"device doesn't support screendump\"); } if (cswitch) { console_select(previous_active_console->index); } }"} {"target": 0, "idx": 2789, "func": "static int decode_frame_mp3on4(AVCodecContext * avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MP3On4DecodeContext *s = avctx->priv_data; MPADecodeContext *m; int fsize, len = buf_size, out_size = 0; uint32_t header; OUT_INT *out_samples = data; OUT_INT *outptr, *bp; int fr, j, n; if(*data_size < MPA_FRAME_SIZE * MPA_MAX_CHANNELS * s->frames * sizeof(OUT_INT)) return -1; *data_size = 0; // Discard too short frames if (buf_size < HEADER_SIZE) return -1; // If only one decoder interleave is not needed outptr = s->frames == 1 ? out_samples : s->decoded_buf; avctx->bit_rate = 0; for (fr = 0; fr < s->frames; fr++) { fsize = AV_RB16(buf) >> 4; fsize = FFMIN3(fsize, len, MPA_MAX_CODED_FRAME_SIZE); m = s->mp3decctx[fr]; assert (m != NULL); header = (AV_RB32(buf) & 0x000fffff) | s->syncword; // patch header if (ff_mpa_check_header(header) < 0) // Bad header, discard block break; avpriv_mpegaudio_decode_header((MPADecodeHeader *)m, header); out_size += mp_decode_frame(m, outptr, buf, fsize); buf += fsize; len -= fsize; if(s->frames > 1) { n = m->avctx->frame_size*m->nb_channels; /* interleave output data */ bp = out_samples + s->coff[fr]; if(m->nb_channels == 1) { for(j = 0; j < n; j++) { *bp = s->decoded_buf[j]; bp += avctx->channels; } } else { for(j = 0; j < n; j++) { bp[0] = s->decoded_buf[j++]; bp[1] = s->decoded_buf[j]; bp += avctx->channels; } } } avctx->bit_rate += m->bit_rate; } /* update codec info */ avctx->sample_rate = s->mp3decctx[0]->sample_rate; *data_size = out_size; return buf_size; }"} {"target": 0, "idx": 2795, "func": "DISAS_INSN(cas2l) { uint16_t ext1, ext2; TCGv addr1, addr2, regs; /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */ ext1 = read_im16(env, s); if (ext1 & 0x8000) { /* Address Register */ addr1 = AREG(ext1, 12); } else { /* Data Register */ addr1 = DREG(ext1, 12); } ext2 = read_im16(env, s); if (ext2 & 0x8000) { /* Address Register */ addr2 = AREG(ext2, 12); } else { /* Data Register */ addr2 = DREG(ext2, 12); } /* if (R1) == Dc1 && (R2) == Dc2 then * (R1) = Du1 * (R2) = Du2 * else * Dc1 = (R1) * Dc2 = (R2) */ regs = tcg_const_i32(REG(ext2, 6) | (REG(ext1, 6) << 3) | (REG(ext2, 0) << 6) | (REG(ext1, 0) << 9)); gen_helper_cas2l(cpu_env, regs, addr1, addr2); tcg_temp_free(regs); /* Note that cas2l also assigned to env->cc_op. */ s->cc_op = CC_OP_CMPL; s->cc_op_synced = 1; }"} {"target": 0, "idx": 2797, "func": "static void cirrus_bitblt_rop_nop(CirrusVGAState *s, uint8_t *dst,const uint8_t *src, int dstpitch,int srcpitch, int bltwidth,int bltheight) { }"} {"target": 0, "idx": 2800, "func": "static void page_flush_tb(void) { int i; for (i = 0; i < V_L1_SIZE; i++) { page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i); } }"} {"target": 0, "idx": 2805, "func": "static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp opc, tcg_insn_unit **label_ptr, int mem_index, bool is_read) { int tlb_offset = is_read ? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read) : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write); int s_mask = (1 << (opc & MO_SIZE)) - 1; TCGReg base = TCG_AREG0, x3; uint64_t tlb_mask; /* For aligned accesses, we check the first byte and include the alignment bits within the address. For unaligned access, we check that we don't cross pages using the address of the last byte of the access. */ if ((opc & MO_AMASK) == MO_ALIGN || s_mask == 0) { tlb_mask = TARGET_PAGE_MASK | s_mask; x3 = addr_reg; } else { tcg_out_insn(s, 3401, ADDI, TARGET_LONG_BITS == 64, TCG_REG_X3, addr_reg, s_mask); tlb_mask = TARGET_PAGE_MASK; x3 = TCG_REG_X3; } /* Extract the TLB index from the address into X0. X0 = addr_reg */ tcg_out_ubfm(s, TARGET_LONG_BITS == 64, TCG_REG_X0, addr_reg, TARGET_PAGE_BITS, TARGET_PAGE_BITS + CPU_TLB_BITS); /* Store the page mask part of the address into X3. */ tcg_out_logicali(s, I3404_ANDI, TARGET_LONG_BITS == 64, TCG_REG_X3, x3, tlb_mask); /* Add any \"high bits\" from the tlb offset to the env address into X2, to take advantage of the LSL12 form of the ADDI instruction. X2 = env + (tlb_offset & 0xfff000) */ if (tlb_offset & 0xfff000) { tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_X2, base, tlb_offset & 0xfff000); base = TCG_REG_X2; } /* Merge the tlb index contribution into X2. X2 = X2 + (X0 << CPU_TLB_ENTRY_BITS) */ tcg_out_insn(s, 3502S, ADD_LSL, TCG_TYPE_I64, TCG_REG_X2, base, TCG_REG_X0, CPU_TLB_ENTRY_BITS); /* Merge \"low bits\" from tlb offset, load the tlb comparator into X0. X0 = load [X2 + (tlb_offset & 0x000fff)] */ tcg_out_ldst(s, TARGET_LONG_BITS == 32 ? I3312_LDRW : I3312_LDRX, TCG_REG_X0, TCG_REG_X2, tlb_offset & 0xfff); /* Load the tlb addend. Do that early to avoid stalling. X1 = load [X2 + (tlb_offset & 0xfff) + offsetof(addend)] */ tcg_out_ldst(s, I3312_LDRX, TCG_REG_X1, TCG_REG_X2, (tlb_offset & 0xfff) + (offsetof(CPUTLBEntry, addend)) - (is_read ? offsetof(CPUTLBEntry, addr_read) : offsetof(CPUTLBEntry, addr_write))); /* Perform the address comparison. */ tcg_out_cmp(s, (TARGET_LONG_BITS == 64), TCG_REG_X0, TCG_REG_X3, 0); /* If not equal, we jump to the slow path. */ *label_ptr = s->code_ptr; tcg_out_goto_cond_noaddr(s, TCG_COND_NE); }"} {"target": 0, "idx": 2808, "func": "static int usb_host_scan_sys(void *opaque, USBScanFunc *func) { DIR *dir = 0; char line[1024]; int bus_num, addr, speed, class_id, product_id, vendor_id; int ret = 0; char product_name[512]; struct dirent *de; dir = opendir(USBSYSBUS_PATH \"/devices\"); if (!dir) { perror(\"husb: cannot open devices directory\"); goto the_end; } while ((de = readdir(dir))) { if (de->d_name[0] != '.' && !strchr(de->d_name, ':')) { char *tmpstr = de->d_name; if (!strncmp(de->d_name, \"usb\", 3)) tmpstr += 3; bus_num = atoi(tmpstr); if (!usb_host_read_file(line, sizeof(line), USBSYSBUS_PATH \"/devices/%s/devnum\", de->d_name)) goto the_end; if (sscanf(line, \"%d\", &addr) != 1) goto the_end; if (!usb_host_read_file(line, sizeof(line), USBSYSBUS_PATH \"/devices/%s/bDeviceClass\", de->d_name)) goto the_end; if (sscanf(line, \"%x\", &class_id) != 1) goto the_end; if (!usb_host_read_file(line, sizeof(line), USBSYSBUS_PATH \"/devices/%s/idVendor\", de->d_name)) goto the_end; if (sscanf(line, \"%x\", &vendor_id) != 1) goto the_end; if (!usb_host_read_file(line, sizeof(line), USBSYSBUS_PATH \"/devices/%s/idProduct\", de->d_name)) goto the_end; if (sscanf(line, \"%x\", &product_id) != 1) goto the_end; if (!usb_host_read_file(line, sizeof(line), USBSYSBUS_PATH \"/devices/%s/product\", de->d_name)) { *product_name = 0; } else { if (strlen(line) > 0) line[strlen(line) - 1] = '\\0'; pstrcpy(product_name, sizeof(product_name), line); } if (!usb_host_read_file(line, sizeof(line), USBSYSBUS_PATH \"/devices/%s/speed\", de->d_name)) goto the_end; if (!strcmp(line, \"480\\n\")) speed = USB_SPEED_HIGH; else if (!strcmp(line, \"1.5\\n\")) speed = USB_SPEED_LOW; else speed = USB_SPEED_FULL; ret = func(opaque, bus_num, addr, class_id, vendor_id, product_id, product_name, speed); if (ret) goto the_end; } } the_end: if (dir) closedir(dir); return ret; }"} {"target": 0, "idx": 2811, "func": "int svm_check_intercept_param(uint32_t type, uint64_t param) { switch(type) { case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8: if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) { vmexit(type, param); return 1; } break; case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8: if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) { vmexit(type, param); return 1; } break; case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8: if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) { vmexit(type, param); return 1; } break; case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8: if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) { vmexit(type, param); return 1; } break; case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16: if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) { vmexit(type, param); return 1; } break; case SVM_EXIT_IOIO: if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) { /* FIXME: this should be read in at vmrun (faster this way?) */ uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa)); uint16_t port = (uint16_t) (param >> 16); if(ldub_phys(addr + port / 8) & (1 << (port % 8))) vmexit(type, param); } break; case SVM_EXIT_MSR: if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) { /* FIXME: this should be read in at vmrun (faster this way?) */ uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa)); switch((uint32_t)ECX) { case 0 ... 0x1fff: T0 = (ECX * 2) % 8; T1 = ECX / 8; break; case 0xc0000000 ... 0xc0001fff: T0 = (8192 + ECX - 0xc0000000) * 2; T1 = (T0 / 8); T0 %= 8; break; case 0xc0010000 ... 0xc0011fff: T0 = (16384 + ECX - 0xc0010000) * 2; T1 = (T0 / 8); T0 %= 8; break; default: vmexit(type, param); return 1; } if (ldub_phys(addr + T1) & ((1 << param) << T0)) vmexit(type, param); return 1; } break; default: if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) { vmexit(type, param); return 1; } break; } return 0; }"} {"target": 0, "idx": 2815, "func": "int kvm_arch_get_registers(CPUState *env) { struct kvm_regs regs; struct kvm_sregs sregs; int i, ret; ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s); if (ret < 0) return ret; ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs); if (ret < 0) return ret; env->ctr = regs.ctr; env->lr = regs.lr; env->xer = regs.xer; env->msr = regs.msr; env->nip = regs.pc; env->spr[SPR_SRR0] = regs.srr0; env->spr[SPR_SRR1] = regs.srr1; env->spr[SPR_SPRG0] = regs.sprg0; env->spr[SPR_SPRG1] = regs.sprg1; env->spr[SPR_SPRG2] = regs.sprg2; env->spr[SPR_SPRG3] = regs.sprg3; env->spr[SPR_SPRG4] = regs.sprg4; env->spr[SPR_SPRG5] = regs.sprg5; env->spr[SPR_SPRG6] = regs.sprg6; env->spr[SPR_SPRG7] = regs.sprg7; for (i = 0;i < 32; i++) env->gpr[i] = regs.gpr[i]; #ifdef KVM_CAP_PPC_SEGSTATE if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_SEGSTATE)) { env->sdr1 = sregs.u.s.sdr1; /* Sync SLB */ #ifdef TARGET_PPC64 for (i = 0; i < 64; i++) { ppc_store_slb(env, sregs.u.s.ppc64.slb[i].slbe, sregs.u.s.ppc64.slb[i].slbv); } #endif /* Sync SRs */ for (i = 0; i < 16; i++) { env->sr[i] = sregs.u.s.ppc32.sr[i]; } /* Sync BATs */ for (i = 0; i < 8; i++) { env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff; env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32; env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff; env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32; } } #endif return 0; }"} {"target": 1, "idx": 2844, "func": "void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex) { /* * This access is protected under the mutex. */ cond->waiters++; /* * Unlock external mutex and wait for signal. * NOTE: we've held mutex locked long enough to increment * waiters count above, so there's no problem with * leaving mutex unlocked before we wait on semaphore. */ qemu_mutex_unlock(mutex); WaitForSingleObject(cond->sema, INFINITE); /* Now waiters must rendez-vous with the signaling thread and * let it continue. For cond_broadcast this has heavy contention * and triggers thundering herd. So goes life. * * Decrease waiters count. The mutex is not taken, so we have * to do this atomically. * * All waiters contend for the mutex at the end of this function * until the signaling thread relinquishes it. To ensure * each waiter consumes exactly one slice of the semaphore, * the signaling thread stops until it is told by the last * waiter that it can go on. */ if (InterlockedDecrement(&cond->waiters) == cond->target) { SetEvent(cond->continue_event); } qemu_mutex_lock(mutex); }"} {"target": 1, "idx": 2862, "func": "static void gen_wsr(DisasContext *dc, uint32_t sr, TCGv_i32 s) { static void (* const wsr_handler[256])(DisasContext *dc, uint32_t sr, TCGv_i32 v) = { [LBEG] = gen_wsr_lbeg, [LEND] = gen_wsr_lend, [SAR] = gen_wsr_sar, [BR] = gen_wsr_br, [LITBASE] = gen_wsr_litbase, [ACCHI] = gen_wsr_acchi, [WINDOW_BASE] = gen_wsr_windowbase, [WINDOW_START] = gen_wsr_windowstart, [PTEVADDR] = gen_wsr_ptevaddr, [RASID] = gen_wsr_rasid, [ITLBCFG] = gen_wsr_tlbcfg, [DTLBCFG] = gen_wsr_tlbcfg, [IBREAKENABLE] = gen_wsr_ibreakenable, [ATOMCTL] = gen_wsr_atomctl, [IBREAKA] = gen_wsr_ibreaka, [IBREAKA + 1] = gen_wsr_ibreaka, [DBREAKA] = gen_wsr_dbreaka, [DBREAKA + 1] = gen_wsr_dbreaka, [DBREAKC] = gen_wsr_dbreakc, [DBREAKC + 1] = gen_wsr_dbreakc, [CPENABLE] = gen_wsr_cpenable, [INTSET] = gen_wsr_intset, [INTCLEAR] = gen_wsr_intclear, [INTENABLE] = gen_wsr_intenable, [PS] = gen_wsr_ps, [DEBUGCAUSE] = gen_wsr_debugcause, [PRID] = gen_wsr_prid, [ICOUNT] = gen_wsr_icount, [ICOUNTLEVEL] = gen_wsr_icountlevel, [CCOMPARE] = gen_wsr_ccompare, [CCOMPARE + 1] = gen_wsr_ccompare, [CCOMPARE + 2] = gen_wsr_ccompare, }; if (sregnames[sr]) { if (wsr_handler[sr]) { wsr_handler[sr](dc, sr, s); } else { tcg_gen_mov_i32(cpu_SR[sr], s); } } else { qemu_log(\"WSR %d not implemented, \", sr); } }"} {"target": 0, "idx": 2871, "func": "static int connect_namedsocket(const char *path) { int sockfd, size; struct sockaddr_un helper; if (strlen(path) >= sizeof(helper.sun_path)) { fprintf(stderr, \"Socket name too large\\n\"); return -1; } sockfd = socket(AF_UNIX, SOCK_STREAM, 0); if (sockfd < 0) { fprintf(stderr, \"failed to create socket: %s\\n\", strerror(errno)); return -1; } strcpy(helper.sun_path, path); helper.sun_family = AF_UNIX; size = strlen(helper.sun_path) + sizeof(helper.sun_family); if (connect(sockfd, (struct sockaddr *)&helper, size) < 0) { fprintf(stderr, \"failed to connect to %s: %s\\n\", path, strerror(errno)); close(sockfd); return -1; } /* remove the socket for security reasons */ unlink(path); return sockfd; }"} {"target": 0, "idx": 2892, "func": "static void guest_file_init(void) { QTAILQ_INIT(&guest_file_state.filehandles); }"} {"target": 0, "idx": 2920, "func": "static void pchip_write(void *opaque, target_phys_addr_t addr, uint64_t v32, unsigned size) { TyphoonState *s = opaque; uint64_t val, oldval; if (addr & 4) { val = v32 << 32 | s->latch_tmp; addr ^= 4; } else { s->latch_tmp = v32; return; } switch (addr) { case 0x0000: /* WSBA0: Window Space Base Address Register. */ s->pchip.win[0].base_addr = val; break; case 0x0040: /* WSBA1 */ s->pchip.win[1].base_addr = val; break; case 0x0080: /* WSBA2 */ s->pchip.win[2].base_addr = val; break; case 0x00c0: /* WSBA3 */ s->pchip.win[3].base_addr = val; break; case 0x0100: /* WSM0: Window Space Mask Register. */ s->pchip.win[0].mask = val; break; case 0x0140: /* WSM1 */ s->pchip.win[1].mask = val; break; case 0x0180: /* WSM2 */ s->pchip.win[2].mask = val; break; case 0x01c0: /* WSM3 */ s->pchip.win[3].mask = val; break; case 0x0200: /* TBA0: Translated Base Address Register. */ s->pchip.win[0].translated_base_pfn = val >> 10; break; case 0x0240: /* TBA1 */ s->pchip.win[1].translated_base_pfn = val >> 10; break; case 0x0280: /* TBA2 */ s->pchip.win[2].translated_base_pfn = val >> 10; break; case 0x02c0: /* TBA3 */ s->pchip.win[3].translated_base_pfn = val >> 10; break; case 0x0300: /* PCTL: Pchip Control Register. */ oldval = s->pchip.ctl; oldval &= ~0x00001cff0fc7ffull; /* RW fields */ oldval |= val & 0x00001cff0fc7ffull; s->pchip.ctl = oldval; break; case 0x0340: /* PLAT: Pchip Master Latency Register. */ break; case 0x03c0: /* PERROR: Pchip Error Register. */ break; case 0x0400: /* PERRMASK: Pchip Error Mask Register. */ break; case 0x0440: /* PERRSET: Pchip Error Set Register. */ break; case 0x0480: /* TLBIV: Translation Buffer Invalidate Virtual Register. */ break; case 0x04c0: /* TLBIA: Translation Buffer Invalidate All Register (WO). */ break; case 0x0500: /* PMONCTL */ case 0x0540: /* PMONCNT */ case 0x0800: /* SPRST */ break; default: cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size); return; } }"} {"target": 1, "idx": 2936, "func": "static int usb_host_handle_data(USBDevice *dev, USBPacket *p) { USBHostDevice *s = (USBHostDevice *)dev; int ret, fd, mode; int one = 1, shortpacket = 0, timeout = 50; sigset_t new_mask, old_mask; uint8_t devep = p->devep; /* protect data transfers from SIGALRM signal */ sigemptyset(&new_mask); sigaddset(&new_mask, SIGALRM); sigprocmask(SIG_BLOCK, &new_mask, &old_mask); if (p->pid == USB_TOKEN_IN) { devep |= 0x80; mode = O_RDONLY; shortpacket = 1; } else { mode = O_WRONLY; } fd = ensure_ep_open(s, devep, mode); if (fd < 0) { sigprocmask(SIG_SETMASK, &old_mask, NULL); return USB_RET_NODEV; } if (ioctl(fd, USB_SET_TIMEOUT, &timeout) < 0) { #ifdef DEBUG printf(\"handle_data: failed to set timeout - %s\\n\", strerror(errno)); #endif } if (shortpacket) { if (ioctl(fd, USB_SET_SHORT_XFER, &one) < 0) { #ifdef DEBUG printf(\"handle_data: failed to set short xfer mode - %s\\n\", strerror(errno)); #endif sigprocmask(SIG_SETMASK, &old_mask, NULL); } } if (p->pid == USB_TOKEN_IN) ret = read(fd, p->data, p->len); else ret = write(fd, p->data, p->len); sigprocmask(SIG_SETMASK, &old_mask, NULL); if (ret < 0) { #ifdef DEBUG printf(\"handle_data: error after %s data - %s\\n\", pid == USB_TOKEN_IN ? \"reading\" : \"writing\", strerror(errno)); #endif switch(errno) { case ETIMEDOUT: case EINTR: return USB_RET_NAK; default: return USB_RET_STALL; } } else { return ret; } }"} {"target": 1, "idx": 2943, "func": "int ff_mpv_common_frame_size_change(MpegEncContext *s) { int i, err = 0; if (!s->context_initialized) return AVERROR(EINVAL); if (s->slice_context_count > 1) { for (i = 0; i < s->slice_context_count; i++) { free_duplicate_context(s->thread_context[i]); } for (i = 1; i < s->slice_context_count; i++) { av_freep(&s->thread_context[i]); } } else free_duplicate_context(s); free_context_frame(s); if (s->picture) for (i = 0; i < MAX_PICTURE_COUNT; i++) { s->picture[i].needs_realloc = 1; } s->last_picture_ptr = s->next_picture_ptr = s->current_picture_ptr = NULL; // init if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence) s->mb_height = (s->height + 31) / 32 * 2; else s->mb_height = (s->height + 15) / 16; if ((s->width || s->height) && (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0) goto fail; if ((err = init_context_frame(s))) goto fail; memset(s->thread_context, 0, sizeof(s->thread_context)); s->thread_context[0] = s; if (s->width && s->height) { int nb_slices = s->slice_context_count; if (nb_slices > 1) { for (i = 0; i < nb_slices; i++) { if (i) { s->thread_context[i] = av_malloc(sizeof(MpegEncContext)); memcpy(s->thread_context[i], s, sizeof(MpegEncContext)); } if ((err = init_duplicate_context(s->thread_context[i])) < 0) goto fail; s->thread_context[i]->start_mb_y = (s->mb_height * (i) + nb_slices / 2) / nb_slices; s->thread_context[i]->end_mb_y = (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices; } } else { err = init_duplicate_context(s); if (err < 0) goto fail; s->start_mb_y = 0; s->end_mb_y = s->mb_height; } s->slice_context_count = nb_slices; } return 0; fail: ff_mpv_common_end(s); return err; }"} {"target": 1, "idx": 2948, "func": "static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr) { VirtIODevice *vdev = proxy->vdev; uint32_t ret = 0xFFFFFFFF; switch (addr) { case VIRTIO_PCI_HOST_FEATURES: ret = vdev->get_features(vdev); ret |= (1 << VIRTIO_F_NOTIFY_ON_EMPTY); ret |= (1 << VIRTIO_RING_F_INDIRECT_DESC); ret |= (1 << VIRTIO_F_BAD_FEATURE); break; case VIRTIO_PCI_GUEST_FEATURES: ret = vdev->features; break; case VIRTIO_PCI_QUEUE_PFN: ret = virtio_queue_get_addr(vdev, vdev->queue_sel) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; break; case VIRTIO_PCI_QUEUE_NUM: ret = virtio_queue_get_num(vdev, vdev->queue_sel); break; case VIRTIO_PCI_QUEUE_SEL: ret = vdev->queue_sel; break; case VIRTIO_PCI_STATUS: ret = vdev->status; break; case VIRTIO_PCI_ISR: /* reading from the ISR also clears it. */ ret = vdev->isr; vdev->isr = 0; qemu_set_irq(proxy->pci_dev.irq[0], 0); break; case VIRTIO_MSI_CONFIG_VECTOR: ret = vdev->config_vector; break; case VIRTIO_MSI_QUEUE_VECTOR: ret = virtio_queue_vector(vdev, vdev->queue_sel); break; default: break; } return ret; }"} {"target": 0, "idx": 2964, "func": "static int dvbsub_parse_display_definition_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size) { DVBSubContext *ctx = avctx->priv_data; DVBSubDisplayDefinition *display_def = ctx->display_definition; int dds_version, info_byte; if (buf_size < 5) return AVERROR_INVALIDDATA; info_byte = bytestream_get_byte(&buf); dds_version = info_byte >> 4; if (display_def && display_def->version == dds_version) return 0; // already have this display definition version if (!display_def) { display_def = av_mallocz(sizeof(*display_def)); if (!display_def) return AVERROR(ENOMEM); ctx->display_definition = display_def; } display_def->version = dds_version; display_def->x = 0; display_def->y = 0; display_def->width = bytestream_get_be16(&buf) + 1; display_def->height = bytestream_get_be16(&buf) + 1; if (!avctx->width || !avctx->height) { avctx->width = display_def->width; avctx->height = display_def->height; } if (buf_size < 13) return AVERROR_INVALIDDATA; if (info_byte & 1<<3) { // display_window_flag display_def->x = bytestream_get_be16(&buf); display_def->width = bytestream_get_be16(&buf) - display_def->x + 1; display_def->y = bytestream_get_be16(&buf); display_def->height = bytestream_get_be16(&buf) - display_def->y + 1; } return 0; }"} {"target": 1, "idx": 2965, "func": "static void gen_st (DisasContext *ctx, uint32_t opc, int rt, int base, int16_t offset) { TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); int mem_idx = ctx->mem_idx; gen_base_offset_addr(ctx, t0, base, offset); gen_load_gpr(t1, rt); switch (opc) { #if defined(TARGET_MIPS64) case OPC_SD: tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEQ | ctx->default_tcg_memop_mask); break; case OPC_SDL: gen_helper_0e2i(sdl, t1, t0, mem_idx); break; case OPC_SDR: gen_helper_0e2i(sdr, t1, t0, mem_idx); break; #endif case OPC_SW: tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUL | ctx->default_tcg_memop_mask); break; case OPC_SH: tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUW | ctx->default_tcg_memop_mask); break; case OPC_SB: tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_8); break; case OPC_SWL: gen_helper_0e2i(swl, t1, t0, mem_idx); break; case OPC_SWR: gen_helper_0e2i(swr, t1, t0, mem_idx); break; } tcg_temp_free(t0); tcg_temp_free(t1); }"} {"target": 1, "idx": 2973, "func": "static void *qpa_audio_init (void) { return &conf; }"} {"target": 0, "idx": 2978, "func": "void timer_deinit(QEMUTimer *ts) { assert(ts->expire_time == -1); ts->timer_list = NULL; }"} {"target": 0, "idx": 2981, "func": "int ide_init_drive(IDEState *s, BlockDriverState *bs, IDEDriveKind kind, const char *version, const char *serial, const char *model, uint64_t wwn) { int cylinders, heads, secs; uint64_t nb_sectors; s->bs = bs; s->drive_kind = kind; bdrv_get_geometry(bs, &nb_sectors); bdrv_guess_geometry(bs, &cylinders, &heads, &secs); if (cylinders < 1 || cylinders > 16383) { error_report(\"cyls must be between 1 and 16383\"); return -1; } if (heads < 1 || heads > 16) { error_report(\"heads must be between 1 and 16\"); return -1; } if (secs < 1 || secs > 63) { error_report(\"secs must be between 1 and 63\"); return -1; } s->cylinders = cylinders; s->heads = heads; s->sectors = secs; s->nb_sectors = nb_sectors; s->wwn = wwn; /* The SMART values should be preserved across power cycles but they aren't. */ s->smart_enabled = 1; s->smart_autosave = 1; s->smart_errors = 0; s->smart_selftest_count = 0; if (kind == IDE_CD) { bdrv_set_dev_ops(bs, &ide_cd_block_ops, s); bdrv_set_buffer_alignment(bs, 2048); } else { if (!bdrv_is_inserted(s->bs)) { error_report(\"Device needs media, but drive is empty\"); return -1; } if (bdrv_is_read_only(bs)) { error_report(\"Can't use a read-only drive\"); return -1; } } if (serial) { pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial); } else { snprintf(s->drive_serial_str, sizeof(s->drive_serial_str), \"QM%05d\", s->drive_serial); } if (model) { pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model); } else { switch (kind) { case IDE_CD: strcpy(s->drive_model_str, \"QEMU DVD-ROM\"); break; case IDE_CFATA: strcpy(s->drive_model_str, \"QEMU MICRODRIVE\"); break; default: strcpy(s->drive_model_str, \"QEMU HARDDISK\"); break; } } if (version) { pstrcpy(s->version, sizeof(s->version), version); } else { pstrcpy(s->version, sizeof(s->version), qemu_get_version()); } ide_reset(s); bdrv_iostatus_enable(bs); return 0; }"} {"target": 0, "idx": 2982, "func": "static QDict *qmp_check_input_obj(QObject *input_obj, Error **errp) { const QDictEntry *ent; int has_exec_key = 0; QDict *input_dict; if (qobject_type(input_obj) != QTYPE_QDICT) { error_setg(errp, QERR_QMP_BAD_INPUT_OBJECT, \"object\"); return NULL; } input_dict = qobject_to_qdict(input_obj); for (ent = qdict_first(input_dict); ent; ent = qdict_next(input_dict, ent)){ const char *arg_name = qdict_entry_key(ent); const QObject *arg_obj = qdict_entry_value(ent); if (!strcmp(arg_name, \"execute\")) { if (qobject_type(arg_obj) != QTYPE_QSTRING) { error_setg(errp, QERR_QMP_BAD_INPUT_OBJECT_MEMBER, \"execute\", \"string\"); return NULL; } has_exec_key = 1; } else if (!strcmp(arg_name, \"arguments\")) { if (qobject_type(arg_obj) != QTYPE_QDICT) { error_setg(errp, QERR_QMP_BAD_INPUT_OBJECT_MEMBER, \"arguments\", \"object\"); return NULL; } } else if (!strcmp(arg_name, \"id\")) { /* Any string is acceptable as \"id\", so nothing to check */ } else { error_setg(errp, QERR_QMP_EXTRA_MEMBER, arg_name); return NULL; } } if (!has_exec_key) { error_setg(errp, QERR_QMP_BAD_INPUT_OBJECT, \"execute\"); return NULL; } return input_dict; }"} {"target": 0, "idx": 2985, "func": "static void term_read(void *opaque, const uint8_t *buf, int size) { int i; for(i = 0; i < size; i++) term_handle_byte(buf[i]); }"} {"target": 0, "idx": 3013, "func": "static void test_validate_fail_struct_missing(TestInputVisitorData *data, const void *unused) { Error *err = NULL; Visitor *v; QObject *any; GenericAlternate *alt; bool present; int en; int64_t i64; uint32_t u32; int8_t i8; char *str; double dbl; v = validate_test_init(data, \"{}\"); visit_start_struct(v, NULL, NULL, 0, &error_abort); visit_start_struct(v, \"struct\", NULL, 0, &err); error_free_or_abort(&err); visit_start_list(v, \"list\", NULL, 0, &err); error_free_or_abort(&err); visit_start_alternate(v, \"alternate\", &alt, sizeof(*alt), false, &err); error_free_or_abort(&err); visit_optional(v, \"optional\", &present); g_assert(!present); visit_type_enum(v, \"enum\", &en, EnumOne_lookup, &err); error_free_or_abort(&err); visit_type_int(v, \"i64\", &i64, &err); error_free_or_abort(&err); visit_type_uint32(v, \"u32\", &u32, &err); error_free_or_abort(&err); visit_type_int8(v, \"i8\", &i8, &err); error_free_or_abort(&err); visit_type_str(v, \"i8\", &str, &err); error_free_or_abort(&err); visit_type_number(v, \"dbl\", &dbl, &err); error_free_or_abort(&err); visit_type_any(v, \"any\", &any, &err); error_free_or_abort(&err); visit_type_null(v, \"null\", &err); error_free_or_abort(&err); visit_end_struct(v, NULL); }"} {"target": 0, "idx": 3019, "func": "void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, QEMUSGList *sg, enum BlockAcctType type) { block_acct_start(bdrv_get_stats(bs), cookie, sg->size, type); }"} {"target": 0, "idx": 3039, "func": "xilinx_axienet_data_stream_push(StreamSlave *obj, uint8_t *buf, size_t size, uint32_t *hdr) { XilinxAXIEnetStreamSlave *ds = XILINX_AXI_ENET_DATA_STREAM(obj); XilinxAXIEnet *s = ds->enet; /* TX enable ? */ if (!(s->tc & TC_TX)) { return size; } /* Jumbo or vlan sizes ? */ if (!(s->tc & TC_JUM)) { if (size > 1518 && size <= 1522 && !(s->tc & TC_VLAN)) { return size; } } if (hdr[0] & 1) { unsigned int start_off = hdr[1] >> 16; unsigned int write_off = hdr[1] & 0xffff; uint32_t tmp_csum; uint16_t csum; tmp_csum = net_checksum_add(size - start_off, (uint8_t *)buf + start_off); /* Accumulate the seed. */ tmp_csum += hdr[2] & 0xffff; /* Fold the 32bit partial checksum. */ csum = net_checksum_finish(tmp_csum); /* Writeback. */ buf[write_off] = csum >> 8; buf[write_off + 1] = csum & 0xff; } qemu_send_packet(qemu_get_queue(s->nic), buf, size); s->stats.tx_bytes += size; s->regs[R_IS] |= IS_TX_COMPLETE; enet_update_irq(s); return size; }"} {"target": 0, "idx": 3047, "func": "void cpu_x86_set_a20(CPUX86State *env, int a20_state) { a20_state = (a20_state != 0); if (a20_state != a20_enabled) { #if defined(DEBUG_MMU) printf(\"A20 update: a20=%d\\n\", a20_state); #endif /* if the cpu is currently executing code, we must unlink it and all the potentially executing TB */ cpu_interrupt(env, 0); /* when a20 is changed, all the MMU mappings are invalid, so we must flush everything */ tlb_flush(env); a20_enabled = a20_state; if (a20_enabled) a20_mask = 0xffffffff; else a20_mask = 0xffefffff; } }"} {"target": 0, "idx": 3059, "func": "uint64_t HELPER(cvd)(int32_t bin) { /* positive 0 */ uint64_t dec = 0x0c; int shift = 4; if (bin < 0) { bin = -bin; dec = 0x0d; } for (shift = 4; (shift < 64) && bin; shift += 4) { int current_number = bin % 10; dec |= (current_number) << shift; bin /= 10; } return dec; }"} {"target": 0, "idx": 3061, "func": "static void tcg_target_qemu_prologue(TCGContext *s) { int frame_size, i; /* Allocate space for the fixed frame marker. */ frame_size = -TCG_TARGET_CALL_STACK_OFFSET; frame_size += TCG_TARGET_STATIC_CALL_ARGS_SIZE; /* Allocate space for the saved registers. */ frame_size += ARRAY_SIZE(tcg_target_callee_save_regs) * 4; /* Align the allocated space. */ frame_size = ((frame_size + TCG_TARGET_STACK_ALIGN - 1) & -TCG_TARGET_STACK_ALIGN); /* The return address is stored in the caller's frame. */ tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_SP, -20); /* Allocate stack frame, saving the first register at the same time. */ tcg_out_ldst(s, tcg_target_callee_save_regs[0], TCG_REG_SP, frame_size, INSN_STWM); /* Save all callee saved registers. */ for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { tcg_out_st(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i], TCG_REG_SP, -frame_size + i * 4); } #ifdef CONFIG_USE_GUEST_BASE if (GUEST_BASE != 0) { tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE); tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); } #endif /* Jump to TB, and adjust R18 to be the return address. */ tcg_out32(s, INSN_BLE_SR4 | INSN_R2(TCG_REG_R26)); tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R18, TCG_REG_R31); /* Restore callee saved registers. */ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_SP, -frame_size - 20); for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { tcg_out_ld(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i], TCG_REG_SP, -frame_size + i * 4); } /* Deallocate stack frame and return. */ tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_RP)); tcg_out_ldst(s, tcg_target_callee_save_regs[0], TCG_REG_SP, -frame_size, INSN_LDWM); }"} {"target": 1, "idx": 3071, "func": "bool io_mem_read(MemoryRegion *mr, hwaddr addr, uint64_t *pval, unsigned size) { return memory_region_dispatch_read(mr, addr, pval, size); }"} {"target": 0, "idx": 3105, "func": "static void init_scan_tables(H264Context *h) { int i; for (i = 0; i < 16; i++) { #define TRANSPOSE(x) (x >> 2) | ((x << 2) & 0xF) h->zigzag_scan[i] = TRANSPOSE(ff_zigzag_scan[i]); h->field_scan[i] = TRANSPOSE(field_scan[i]); #undef TRANSPOSE } for (i = 0; i < 64; i++) { #define TRANSPOSE(x) (x >> 3) | ((x & 7) << 3) h->zigzag_scan8x8[i] = TRANSPOSE(ff_zigzag_direct[i]); h->zigzag_scan8x8_cavlc[i] = TRANSPOSE(zigzag_scan8x8_cavlc[i]); h->field_scan8x8[i] = TRANSPOSE(field_scan8x8[i]); h->field_scan8x8_cavlc[i] = TRANSPOSE(field_scan8x8_cavlc[i]); #undef TRANSPOSE } if (h->sps.transform_bypass) { // FIXME same ugly h->zigzag_scan_q0 = ff_zigzag_scan; h->zigzag_scan8x8_q0 = ff_zigzag_direct; h->zigzag_scan8x8_cavlc_q0 = zigzag_scan8x8_cavlc; h->field_scan_q0 = field_scan; h->field_scan8x8_q0 = field_scan8x8; h->field_scan8x8_cavlc_q0 = field_scan8x8_cavlc; } else { h->zigzag_scan_q0 = h->zigzag_scan; h->zigzag_scan8x8_q0 = h->zigzag_scan8x8; h->zigzag_scan8x8_cavlc_q0 = h->zigzag_scan8x8_cavlc; h->field_scan_q0 = h->field_scan; h->field_scan8x8_q0 = h->field_scan8x8; h->field_scan8x8_cavlc_q0 = h->field_scan8x8_cavlc; } }"} {"target": 0, "idx": 3106, "func": "void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h) { POWERPC_TBL_DECLARE(altivec_avg_pixels8_num, 1); #ifdef ALTIVEC_USE_REFERENCE_C_CODE int i; POWERPC_TBL_START_COUNT(altivec_avg_pixels8_num, 1); for (i = 0; i < h; i++) { *((uint32_t *) (block)) = (((*((uint32_t *) (block))) | ((((const struct unaligned_32 *) (pixels))->l))) - ((((*((uint32_t *) (block))) ^ ((((const struct unaligned_32 *) (pixels))-> l))) & 0xFEFEFEFEUL) >> 1)); *((uint32_t *) (block + 4)) = (((*((uint32_t *) (block + 4))) | ((((const struct unaligned_32 *) (pixels + 4))->l))) - ((((*((uint32_t *) (block + 4))) ^ ((((const struct unaligned_32 *) (pixels + 4))-> l))) & 0xFEFEFEFEUL) >> 1)); pixels += line_size; block += line_size; } POWERPC_TBL_STOP_COUNT(altivec_avg_pixels8_num, 1); #else /* ALTIVEC_USE_REFERENCE_C_CODE */ register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv; int i; POWERPC_TBL_START_COUNT(altivec_avg_pixels8_num, 1); for (i = 0; i < h; i++) { /* block is 8 bytes-aligned, so we're either in the left block (16 bytes-aligned) or in the right block (not) */ int rightside = ((unsigned long)block & 0x0000000F); blockv = vec_ld(0, block); pixelsv1 = vec_ld(0, (unsigned char*)pixels); pixelsv2 = vec_ld(16, (unsigned char*)pixels); pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels)); if (rightside) { pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1)); } else { pixelsv = vec_perm(blockv, pixelsv, vcprm(s0,s1,2,3)); } blockv = vec_avg(blockv, pixelsv); vec_st(blockv, 0, block); pixels += line_size; block += line_size; } POWERPC_TBL_STOP_COUNT(altivec_avg_pixels8_num, 1); #endif /* ALTIVEC_USE_REFERENCE_C_CODE */ }"} {"target": 0, "idx": 3124, "func": "int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options) { AVFormatContext *s = *ps; int ret = 0; AVDictionary *tmp = NULL; ID3v2ExtraMeta *id3v2_extra_meta = NULL; if (!s && !(s = avformat_alloc_context())) return AVERROR(ENOMEM); if (!s->av_class) { av_log(NULL, AV_LOG_ERROR, \"Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\\n\"); return AVERROR(EINVAL); } if (fmt) s->iformat = fmt; if (options) av_dict_copy(&tmp, *options, 0); if ((ret = av_opt_set_dict(s, &tmp)) < 0) goto fail; if ((ret = init_input(s, filename, &tmp)) < 0) goto fail; s->probe_score = ret; avio_skip(s->pb, s->skip_initial_bytes); /* Check filename in case an image number is expected. */ if (s->iformat->flags & AVFMT_NEEDNUMBER) { if (!av_filename_number_test(filename)) { ret = AVERROR(EINVAL); goto fail; } } s->duration = s->start_time = AV_NOPTS_VALUE; av_strlcpy(s->filename, filename ? filename : \"\", sizeof(s->filename)); /* Allocate private data. */ if (s->iformat->priv_data_size > 0) { if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) { ret = AVERROR(ENOMEM); goto fail; } if (s->iformat->priv_class) { *(const AVClass **) s->priv_data = s->iformat->priv_class; av_opt_set_defaults(s->priv_data); if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0) goto fail; } } /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */ if (s->pb) ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta); if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header) if ((ret = s->iformat->read_header(s)) < 0) goto fail; if (id3v2_extra_meta) { if (!strcmp(s->iformat->name, \"mp3\") || !strcmp(s->iformat->name, \"aac\") || !strcmp(s->iformat->name, \"tta\")) { if ((ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0) goto fail; } else av_log(s, AV_LOG_DEBUG, \"demuxer does not support additional id3 data, skipping\\n\"); } ff_id3v2_free_extra_meta(&id3v2_extra_meta); if ((ret = avformat_queue_attached_pictures(s)) < 0) goto fail; if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset) s->data_offset = avio_tell(s->pb); s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE; if (options) { av_dict_free(options); *options = tmp; } *ps = s; return 0; fail: ff_id3v2_free_extra_meta(&id3v2_extra_meta); av_dict_free(&tmp); if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO)) avio_close(s->pb); avformat_free_context(s); *ps = NULL; return ret; }"} {"target": 0, "idx": 3187, "func": "static av_always_inline void hl_decode_mb_predict_luma(const H264Context *h, H264SliceContext *sl, int mb_type, int simple, int transform_bypass, int pixel_shift, const int *block_offset, int linesize, uint8_t *dest_y, int p) { void (*idct_add)(uint8_t *dst, int16_t *block, int stride); void (*idct_dc_add)(uint8_t *dst, int16_t *block, int stride); int i; int qscale = p == 0 ? sl->qscale : sl->chroma_qp[p - 1]; block_offset += 16 * p; if (IS_INTRA4x4(mb_type)) { if (IS_8x8DCT(mb_type)) { if (transform_bypass) { idct_dc_add = idct_add = h->h264dsp.h264_add_pixels8_clear; } else { idct_dc_add = h->h264dsp.h264_idct8_dc_add; idct_add = h->h264dsp.h264_idct8_add; } for (i = 0; i < 16; i += 4) { uint8_t *const ptr = dest_y + block_offset[i]; const int dir = sl->intra4x4_pred_mode_cache[scan8[i]]; if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) { h->hpc.pred8x8l_add[dir](ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize); } else { const int nnz = sl->non_zero_count_cache[scan8[i + p * 16]]; h->hpc.pred8x8l[dir](ptr, (sl->topleft_samples_available << i) & 0x8000, (sl->topright_samples_available << i) & 0x4000, linesize); if (nnz) { if (nnz == 1 && dctcoef_get(sl->mb, pixel_shift, i * 16 + p * 256)) idct_dc_add(ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize); else idct_add(ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize); } } } } else { if (transform_bypass) { idct_dc_add = idct_add = h->h264dsp.h264_add_pixels4_clear; } else { idct_dc_add = h->h264dsp.h264_idct_dc_add; idct_add = h->h264dsp.h264_idct_add; } for (i = 0; i < 16; i++) { uint8_t *const ptr = dest_y + block_offset[i]; const int dir = sl->intra4x4_pred_mode_cache[scan8[i]]; if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) { h->hpc.pred4x4_add[dir](ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize); } else { uint8_t *topright; int nnz, tr; uint64_t tr_high; if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) { const int topright_avail = (sl->topright_samples_available << i) & 0x8000; assert(sl->mb_y || linesize <= block_offset[i]); if (!topright_avail) { if (pixel_shift) { tr_high = ((uint16_t *)ptr)[3 - linesize / 2] * 0x0001000100010001ULL; topright = (uint8_t *)&tr_high; } else { tr = ptr[3 - linesize] * 0x01010101u; topright = (uint8_t *)&tr; } } else topright = ptr + (4 << pixel_shift) - linesize; } else topright = NULL; h->hpc.pred4x4[dir](ptr, topright, linesize); nnz = sl->non_zero_count_cache[scan8[i + p * 16]]; if (nnz) { if (nnz == 1 && dctcoef_get(sl->mb, pixel_shift, i * 16 + p * 256)) idct_dc_add(ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize); else idct_add(ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize); } } } } } else { h->hpc.pred16x16[sl->intra16x16_pred_mode](dest_y, linesize); if (sl->non_zero_count_cache[scan8[LUMA_DC_BLOCK_INDEX + p]]) { if (!transform_bypass) h->h264dsp.h264_luma_dc_dequant_idct(sl->mb + (p * 256 << pixel_shift), sl->mb_luma_dc[p], h->dequant4_coeff[p][qscale][0]); else { static const uint8_t dc_mapping[16] = { 0 * 16, 1 * 16, 4 * 16, 5 * 16, 2 * 16, 3 * 16, 6 * 16, 7 * 16, 8 * 16, 9 * 16, 12 * 16, 13 * 16, 10 * 16, 11 * 16, 14 * 16, 15 * 16 }; for (i = 0; i < 16; i++) dctcoef_set(sl->mb + (p * 256 << pixel_shift), pixel_shift, dc_mapping[i], dctcoef_get(sl->mb_luma_dc[p], pixel_shift, i)); } } } }"} {"target": 0, "idx": 3189, "func": "static void peak_write_chunk(AVFormatContext *s) { WAVMuxContext *wav = s->priv_data; AVIOContext *pb = s->pb; AVCodecContext *enc = s->streams[0]->codec; int64_t peak = ff_start_tag(s->pb, \"levl\"); int64_t now0; time_t now_secs; char timestamp[28]; /* Peak frame of incomplete block at end */ if (wav->peak_block_pos) peak_write_frame(s); memset(timestamp, 0, sizeof(timestamp)); if (!(s->flags & AVFMT_FLAG_BITEXACT)) { struct tm tmpbuf; av_log(s, AV_LOG_INFO, \"Writing local time and date to Peak Envelope Chunk\\n\"); now0 = av_gettime(); now_secs = now0 / 1000000; strftime(timestamp, sizeof(timestamp), \"%Y:%m:%d:%H:%M:%S:\", localtime_r(&now_secs, &tmpbuf)); av_strlcatf(timestamp, sizeof(timestamp), \"%03d\", (int)((now0 / 1000) % 1000)); } avio_wl32(pb, 1); /* version */ avio_wl32(pb, wav->peak_format); /* 8 or 16 bit */ avio_wl32(pb, wav->peak_ppv); /* positive and negative */ avio_wl32(pb, wav->peak_block_size); /* frames per value */ avio_wl32(pb, enc->channels); /* number of channels */ avio_wl32(pb, wav->peak_num_frames); /* number of peak frames */ avio_wl32(pb, wav->peak_pos_pop); /* audio sample frame index */ avio_wl32(pb, 128); /* equal to size of header */ avio_write(pb, timestamp, 28); /* ASCII time stamp */ ffio_fill(pb, 0, 60); avio_write(pb, wav->peak_output, wav->peak_outbuf_bytes); ff_end_tag(pb, peak); if (!wav->data) wav->data = peak; }"} {"target": 1, "idx": 3192, "func": "static void qmp_input_end_list(Visitor *v) { QmpInputVisitor *qiv = to_qiv(v); qmp_input_pop(qiv, &error_abort); }"} {"target": 1, "idx": 3195, "func": "static int get_uint32(QEMUFile *f, void *pv, size_t size) { uint32_t *v = pv; qemu_get_be32s(f, v); return 0; }"} {"target": 1, "idx": 3207, "func": "static void *data_plane_thread(void *opaque) { VirtIOBlockDataPlane *s = opaque; do { event_poll(&s->event_poll); } while (s->started || s->num_reqs > 0); return NULL; }"} {"target": 1, "idx": 3215, "func": "void bdrv_close_all(void) { BlockDriverState *bs; QTAILQ_FOREACH(bs, &bdrv_states, device_list) { AioContext *aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); bdrv_close(bs); aio_context_release(aio_context); } }"} {"target": 1, "idx": 3219, "func": "void monitor_init(CharDriverState *chr, int flags) { static int is_first_init = 1; Monitor *mon; if (is_first_init) { monitor_qapi_event_init(); sortcmdlist(); is_first_init = 0; } mon = g_malloc(sizeof(*mon)); monitor_data_init(mon); mon->chr = chr; mon->flags = flags; if (flags & MONITOR_USE_READLINE) { mon->rs = readline_init(monitor_readline_printf, monitor_readline_flush, mon, monitor_find_completion); monitor_read_command(mon, 0); } if (monitor_is_qmp(mon)) { qemu_chr_add_handlers(chr, monitor_can_read, monitor_qmp_read, monitor_qmp_event, mon); qemu_chr_fe_set_echo(chr, true); json_message_parser_init(&mon->qmp.parser, handle_qmp_command); } else { qemu_chr_add_handlers(chr, monitor_can_read, monitor_read, monitor_event, mon); } qemu_mutex_lock(&monitor_lock); QLIST_INSERT_HEAD(&mon_list, mon, entry); qemu_mutex_unlock(&monitor_lock); if (!default_mon || (flags & MONITOR_IS_DEFAULT)) default_mon = mon; }"} {"target": 1, "idx": 3224, "func": "static sPAPRDRConnector *spapr_phb_get_pci_drc(sPAPRPHBState *phb, PCIDevice *pdev) { uint32_t busnr = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(pdev)))); return spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_PCI, (phb->index << 16) | (busnr << 8) | pdev->devfn); }"} {"target": 0, "idx": 3232, "func": "int ff_socket_nonblock(int socket, int enable) { #ifdef __MINGW32__ return ioctlsocket(socket, FIONBIO, &enable); #else if (enable) return fcntl(socket, F_SETFL, fcntl(socket, F_GETFL) | O_NONBLOCK); else return fcntl(socket, F_SETFL, fcntl(socket, F_GETFL) & ~O_NONBLOCK); #endif }"} {"target": 1, "idx": 3234, "func": "static void adb_kbd_put_keycode(void *opaque, int keycode) { KBDState *s = opaque; if (s->count < sizeof(s->data)) { s->data[s->wptr] = keycode; if (++s->wptr == sizeof(s->data)) s->wptr = 0; s->count++; } }"} {"target": 1, "idx": 3249, "func": "static void nvdimm_build_nfit(GSList *device_list, GArray *table_offsets, GArray *table_data, GArray *linker) { GArray *structures = nvdimm_build_device_structure(device_list); void *header; acpi_add_table(table_offsets, table_data); /* NFIT header. */ header = acpi_data_push(table_data, sizeof(NvdimmNfitHeader)); /* NVDIMM device structures. */ g_array_append_vals(table_data, structures->data, structures->len); build_header(linker, table_data, header, \"NFIT\", sizeof(NvdimmNfitHeader) + structures->len, 1, NULL); g_array_free(structures, true); }"} {"target": 0, "idx": 3255, "func": "static int v9fs_do_mksock(V9fsState *s, V9fsString *path) { return s->ops->mksock(&s->ctx, path->data); }"} {"target": 0, "idx": 3270, "func": "static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp) { RAMBlock *block; ram_addr_t old_ram_size, new_ram_size; old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS; /* This assumes the iothread lock is taken here too. */ qemu_mutex_lock_ramlist(); new_block->offset = find_ram_offset(new_block->max_length); if (!new_block->host) { if (xen_enabled()) { xen_ram_alloc(new_block->offset, new_block->max_length, new_block->mr); } else { new_block->host = phys_mem_alloc(new_block->max_length, &new_block->mr->align); if (!new_block->host) { error_setg_errno(errp, errno, \"cannot set up guest memory '%s'\", memory_region_name(new_block->mr)); qemu_mutex_unlock_ramlist(); return -1; } memory_try_enable_merging(new_block->host, new_block->max_length); } } /* Keep the list sorted from biggest to smallest block. */ QTAILQ_FOREACH(block, &ram_list.blocks, next) { if (block->max_length < new_block->max_length) { break; } } if (block) { QTAILQ_INSERT_BEFORE(block, new_block, next); } else { QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next); } ram_list.mru_block = NULL; ram_list.version++; qemu_mutex_unlock_ramlist(); new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS; if (new_ram_size > old_ram_size) { int i; for (i = 0; i < DIRTY_MEMORY_NUM; i++) { ram_list.dirty_memory[i] = bitmap_zero_extend(ram_list.dirty_memory[i], old_ram_size, new_ram_size); } } cpu_physical_memory_set_dirty_range(new_block->offset, new_block->used_length); qemu_ram_setup_dump(new_block->host, new_block->max_length); qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE); qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK); if (kvm_enabled()) { kvm_setup_guest_memory(new_block->host, new_block->max_length); } return new_block->offset; }"} {"target": 1, "idx": 3275, "func": "static void filter(FSPPContext *p, uint8_t *dst, uint8_t *src, int dst_stride, int src_stride, int width, int height, uint8_t *qp_store, int qp_stride, int is_luma) { int x, x0, y, es, qy, t; const int stride = is_luma ? p->temp_stride : (width + 16); const int step = 6 - p->log2_count; const int qpsh = 4 - p->hsub * !is_luma; const int qpsv = 4 - p->vsub * !is_luma; DECLARE_ALIGNED(32, int32_t, block_align)[4 * 8 * BLOCKSZ + 4 * 8 * BLOCKSZ]; int16_t *block = (int16_t *)block_align; int16_t *block3 = (int16_t *)(block_align + 4 * 8 * BLOCKSZ); memset(block3, 0, 4 * 8 * BLOCKSZ); if (!src || !dst) return; for (y = 0; y < height; y++) { int index = 8 + 8 * stride + y * stride; memcpy(p->src + index, src + y * src_stride, width); for (x = 0; x < 8; x++) { p->src[index - x - 1] = p->src[index + x ]; p->src[index + width + x ] = p->src[index + width - x - 1]; } } for (y = 0; y < 8; y++) { memcpy(p->src + ( 7 - y ) * stride, p->src + ( y + 8 ) * stride, stride); memcpy(p->src + (height + 8 + y) * stride, p->src + (height - y + 7) * stride, stride); } //FIXME (try edge emu) for (y = 8; y < 24; y++) memset(p->temp + 8 + y * stride, 0, width * sizeof(int16_t)); for (y = step; y < height + 8; y += step) { //step= 1,2 const int y1 = y - 8 + step; //l5-7 l4-6; qy = y - 4; if (qy > height - 1) qy = height - 1; if (qy < 0) qy = 0; qy = (qy >> qpsv) * qp_stride; p->row_fdct(block, p->src + y * stride + 2 - (y&1), stride, 2); for (x0 = 0; x0 < width + 8 - 8 * (BLOCKSZ - 1); x0 += 8 * (BLOCKSZ - 1)) { p->row_fdct(block + 8 * 8, p->src + y * stride + 8 + x0 + 2 - (y&1), stride, 2 * (BLOCKSZ - 1)); if (p->qp) p->column_fidct((int16_t *)(&p->threshold_mtx[0]), block + 0 * 8, block3 + 0 * 8, 8 * (BLOCKSZ - 1)); //yes, this is a HOTSPOT else for (x = 0; x < 8 * (BLOCKSZ - 1); x += 8) { t = x + x0 - 2; //correct t=x+x0-2-(y&1), but its the same if (t < 0) t = 0; //t always < width-2 t = qp_store[qy + (t >> qpsh)]; t = ff_norm_qscale(t, p->qscale_type); if (t != p->prev_q) p->prev_q = t, p->mul_thrmat((int16_t *)(&p->threshold_mtx_noq[0]), (int16_t *)(&p->threshold_mtx[0]), t); p->column_fidct((int16_t *)(&p->threshold_mtx[0]), block + x * 8, block3 + x * 8, 8); //yes, this is a HOTSPOT } p->row_idct(block3 + 0 * 8, p->temp + (y & 15) * stride + x0 + 2 - (y & 1), stride, 2 * (BLOCKSZ - 1)); memmove(block, block + (BLOCKSZ - 1) * 64, 8 * 8 * sizeof(int16_t)); //cycling memmove(block3, block3 + (BLOCKSZ - 1) * 64, 6 * 8 * sizeof(int16_t)); } es = width + 8 - x0; // 8, ... if (es > 8) p->row_fdct(block + 8 * 8, p->src + y * stride + 8 + x0 + 2 - (y & 1), stride, (es - 4) >> 2); p->column_fidct((int16_t *)(&p->threshold_mtx[0]), block, block3, es&(~1)); p->row_idct(block3 + 0 * 8, p->temp + (y & 15) * stride + x0 + 2 - (y & 1), stride, es >> 2); if (!(y1 & 7) && y1) { if (y1 & 8) p->store_slice(dst + (y1 - 8) * dst_stride, p->temp + 8 + 8 * stride, dst_stride, stride, width, 8, 5 - p->log2_count); else p->store_slice2(dst + (y1 - 8) * dst_stride, p->temp + 8 + 0 * stride, dst_stride, stride, width, 8, 5 - p->log2_count); } } if (y & 7) { // height % 8 != 0 if (y & 8) p->store_slice(dst + ((y - 8) & ~7) * dst_stride, p->temp + 8 + 8 * stride, dst_stride, stride, width, y&7, 5 - p->log2_count); else p->store_slice2(dst + ((y - 8) & ~7) * dst_stride, p->temp + 8 + 0 * stride, dst_stride, stride, width, y&7, 5 - p->log2_count); } }"} {"target": 1, "idx": 3284, "func": "static void make_dirty(uint8_t device) { QPCIDevice *dev; QPCIBar bmdma_bar, ide_bar; uint8_t status; size_t len = 512; uintptr_t guest_buf; void* buf; dev = get_pci_device(&bmdma_bar, &ide_bar); guest_buf = guest_alloc(guest_malloc, len); buf = g_malloc(len); memset(buf, rand() % 255 + 1, len); g_assert(guest_buf); g_assert(buf); memwrite(guest_buf, buf, len); PrdtEntry prdt[] = { { .addr = cpu_to_le32(guest_buf), .size = cpu_to_le32(len | PRDT_EOT), }, }; status = send_dma_request(CMD_WRITE_DMA, 1, 1, prdt, ARRAY_SIZE(prdt), NULL); g_assert_cmphex(status, ==, BM_STS_INTR); assert_bit_clear(qpci_io_readb(dev, ide_bar, reg_status), DF | ERR); g_free(buf); }"} {"target": 0, "idx": 3302, "func": "unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset) { const unsigned long *p = addr + BITOP_WORD(offset); unsigned long result = offset & ~(BITS_PER_LONG-1); unsigned long tmp; if (offset >= size) { return size; } size -= result; offset %= BITS_PER_LONG; if (offset) { tmp = *(p++); tmp &= (~0UL << offset); if (size < BITS_PER_LONG) { goto found_first; } if (tmp) { goto found_middle; } size -= BITS_PER_LONG; result += BITS_PER_LONG; } while (size & ~(BITS_PER_LONG-1)) { if ((tmp = *(p++))) { goto found_middle; } result += BITS_PER_LONG; size -= BITS_PER_LONG; } if (!size) { return result; } tmp = *p; found_first: tmp &= (~0UL >> (BITS_PER_LONG - size)); if (tmp == 0UL) { /* Are any bits set? */ return result + size; /* Nope. */ } found_middle: return result + ctzl(tmp); }"} {"target": 0, "idx": 3309, "func": "uint64_t helper_cmpbge(uint64_t op1, uint64_t op2) { uint8_t opa, opb, res; int i; res = 0; for (i = 0; i < 8; i++) { opa = op1 >> (i * 8); opb = op2 >> (i * 8); if (opa >= opb) { res |= 1 << i; } } return res; }"} {"target": 0, "idx": 3312, "func": "void helper_check_iol(CPUX86State *env, uint32_t t0) { check_io(env, t0, 4); }"} {"target": 1, "idx": 3328, "func": "static void ccid_card_vscard_handle_message(PassthruState *card, VSCMsgHeader *scr_msg_header) { uint8_t *data = (uint8_t *)&scr_msg_header[1]; switch (scr_msg_header->type) { case VSC_ATR: DPRINTF(card, D_INFO, \"VSC_ATR %d\\n\", scr_msg_header->length); if (scr_msg_header->length > MAX_ATR_SIZE) { error_report(\"ATR size exceeds spec, ignoring\"); ccid_card_vscard_send_error(card, scr_msg_header->reader_id, VSC_GENERAL_ERROR); } memcpy(card->atr, data, scr_msg_header->length); card->atr_length = scr_msg_header->length; ccid_card_card_inserted(&card->base); ccid_card_vscard_send_error(card, scr_msg_header->reader_id, VSC_SUCCESS); case VSC_APDU: ccid_card_send_apdu_to_guest( &card->base, data, scr_msg_header->length); case VSC_CardRemove: DPRINTF(card, D_INFO, \"VSC_CardRemove\\n\"); ccid_card_card_removed(&card->base); ccid_card_vscard_send_error(card, scr_msg_header->reader_id, VSC_SUCCESS); case VSC_Init: ccid_card_vscard_handle_init( card, scr_msg_header, (VSCMsgInit *)data); case VSC_Error: ccid_card_card_error(&card->base, *(uint32_t *)data); case VSC_ReaderAdd: if (ccid_card_ccid_attach(&card->base) < 0) { ccid_card_vscard_send_error(card, VSCARD_UNDEFINED_READER_ID, VSC_CANNOT_ADD_MORE_READERS); } else { ccid_card_vscard_send_error(card, VSCARD_MINIMAL_READER_ID, VSC_SUCCESS); } case VSC_ReaderRemove: ccid_card_ccid_detach(&card->base); ccid_card_vscard_send_error(card, scr_msg_header->reader_id, VSC_SUCCESS); default: printf(\"usb-ccid: chardev: unexpected message of type %X\\n\", scr_msg_header->type); ccid_card_vscard_send_error(card, scr_msg_header->reader_id, VSC_GENERAL_ERROR); } }"} {"target": 0, "idx": 3339, "func": "static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg, int rounding_mode) { CPU_DoubleU farg; farg.ll = arg; if (unlikely(float64_is_signaling_nan(farg.d))) { /* sNaN round */ farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI); } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) { /* qNan / infinity round */ farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI); } else { set_float_rounding_mode(rounding_mode, &env->fp_status); farg.ll = float64_round_to_int(farg.d, &env->fp_status); /* Restore rounding mode from FPSCR */ fpscr_set_rounding_mode(env); } return farg.ll; }"} {"target": 0, "idx": 3344, "func": "int DMA_write_memory (int nchan, void *buf, int pos, int len) { struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3]; target_phys_addr_t addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR]; if (r->mode & 0x20) { int i; uint8_t *p = buf; cpu_physical_memory_write (addr - pos - len, buf, len); /* What about 16bit transfers? */ for (i = 0; i < len; i++) { uint8_t b = p[len - i - 1]; p[i] = b; } } else cpu_physical_memory_write (addr + pos, buf, len); return len; }"} {"target": 0, "idx": 3356, "func": "static void tcx_realizefn(DeviceState *dev, Error **errp) { SysBusDevice *sbd = SYS_BUS_DEVICE(dev); TCXState *s = TCX(dev); ram_addr_t vram_offset = 0; int size, ret; uint8_t *vram_base; char *fcode_filename; memory_region_init_ram(&s->vram_mem, OBJECT(s), \"tcx.vram\", s->vram_size * (1 + 4 + 4), &error_fatal); vmstate_register_ram_global(&s->vram_mem); memory_region_set_log(&s->vram_mem, true, DIRTY_MEMORY_VGA); vram_base = memory_region_get_ram_ptr(&s->vram_mem); /* 10/ROM : FCode ROM */ vmstate_register_ram_global(&s->rom); fcode_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, TCX_ROM_FILE); if (fcode_filename) { ret = load_image_targphys(fcode_filename, s->prom_addr, FCODE_MAX_ROM_SIZE); g_free(fcode_filename); if (ret < 0 || ret > FCODE_MAX_ROM_SIZE) { error_report(\"tcx: could not load prom '%s'\", TCX_ROM_FILE); } } /* 0/DFB8 : 8-bit plane */ s->vram = vram_base; size = s->vram_size; memory_region_init_alias(&s->vram_8bit, OBJECT(s), \"tcx.vram.8bit\", &s->vram_mem, vram_offset, size); sysbus_init_mmio(sbd, &s->vram_8bit); vram_offset += size; vram_base += size; /* 1/DFB24 : 24bit plane */ size = s->vram_size * 4; s->vram24 = (uint32_t *)vram_base; s->vram24_offset = vram_offset; memory_region_init_alias(&s->vram_24bit, OBJECT(s), \"tcx.vram.24bit\", &s->vram_mem, vram_offset, size); sysbus_init_mmio(sbd, &s->vram_24bit); vram_offset += size; vram_base += size; /* 4/RDFB32 : Raw Framebuffer */ size = s->vram_size * 4; s->cplane = (uint32_t *)vram_base; s->cplane_offset = vram_offset; memory_region_init_alias(&s->vram_cplane, OBJECT(s), \"tcx.vram.cplane\", &s->vram_mem, vram_offset, size); sysbus_init_mmio(sbd, &s->vram_cplane); /* 9/THC24bits : NetBSD writes here even with 8-bit display: dummy */ if (s->depth == 8) { memory_region_init_io(&s->thc24, OBJECT(s), &tcx_dummy_ops, s, \"tcx.thc24\", TCX_THC_NREGS); sysbus_init_mmio(sbd, &s->thc24); } sysbus_init_irq(sbd, &s->irq); if (s->depth == 8) { s->con = graphic_console_init(DEVICE(dev), 0, &tcx_ops, s); } else { s->con = graphic_console_init(DEVICE(dev), 0, &tcx24_ops, s); } s->thcmisc = 0; qemu_console_resize(s->con, s->width, s->height); }"} {"target": 0, "idx": 3365, "func": "LinuxAioState *aio_get_linux_aio(AioContext *ctx) { if (!ctx->linux_aio) { ctx->linux_aio = laio_init(); laio_attach_aio_context(ctx->linux_aio, ctx); } return ctx->linux_aio; }"} {"target": 0, "idx": 3368, "func": "static int replaygain_export(AVStream *st, const uint8_t *track_gain, const uint8_t *track_peak, const uint8_t *album_gain, const uint8_t *album_peak) { AVPacketSideData *sd, *tmp; AVReplayGain *replaygain; int32_t tg, ag; uint32_t tp, ap; tg = parse_value(track_gain, INT32_MIN); ag = parse_value(album_gain, INT32_MIN); tp = parse_value(track_peak, 0); ap = parse_value(album_peak, 0); if (tg == INT32_MIN && ag == INT32_MIN) return 0; replaygain = av_mallocz(sizeof(*replaygain)); if (!replaygain) return AVERROR(ENOMEM); tmp = av_realloc_array(st->side_data, st->nb_side_data + 1, sizeof(*tmp)); if (!tmp) { av_freep(&replaygain); return AVERROR(ENOMEM); } st->side_data = tmp; st->nb_side_data++; sd = &st->side_data[st->nb_side_data - 1]; sd->type = AV_PKT_DATA_REPLAYGAIN; sd->data = (uint8_t*)replaygain; sd->size = sizeof(*replaygain); replaygain->track_gain = tg; replaygain->track_peak = tp; replaygain->album_gain = ag; replaygain->album_peak = ap; return 0; }"} {"target": 0, "idx": 3383, "func": "static int gif_read_header1(GifState *s) { uint8_t sig[6]; int v, n; int background_color_index; if (bytestream2_get_bytes_left(&s->gb) < 13) return AVERROR_INVALIDDATA; /* read gif signature */ bytestream2_get_bufferu(&s->gb, sig, 6); if (memcmp(sig, gif87a_sig, 6) != 0 && memcmp(sig, gif89a_sig, 6) != 0) return AVERROR_INVALIDDATA; /* read screen header */ s->transparent_color_index = -1; s->screen_width = bytestream2_get_le16u(&s->gb); s->screen_height = bytestream2_get_le16u(&s->gb); if( (unsigned)s->screen_width > 32767 || (unsigned)s->screen_height > 32767){ av_log(s->avctx, AV_LOG_ERROR, \"picture size too large\\n\"); return AVERROR_INVALIDDATA; } av_fast_malloc(&s->idx_line, &s->idx_line_size, s->screen_width); if (!s->idx_line) return AVERROR(ENOMEM); v = bytestream2_get_byteu(&s->gb); s->color_resolution = ((v & 0x70) >> 4) + 1; s->has_global_palette = (v & 0x80); s->bits_per_pixel = (v & 0x07) + 1; background_color_index = bytestream2_get_byteu(&s->gb); n = bytestream2_get_byteu(&s->gb); if (n) { s->avctx->sample_aspect_ratio.num = n + 15; s->avctx->sample_aspect_ratio.den = 64; } av_dlog(s->avctx, \"screen_w=%d screen_h=%d bpp=%d global_palette=%d\\n\", s->screen_width, s->screen_height, s->bits_per_pixel, s->has_global_palette); if (s->has_global_palette) { s->background_color_index = background_color_index; n = 1 << s->bits_per_pixel; if (bytestream2_get_bytes_left(&s->gb) < n * 3) return AVERROR_INVALIDDATA; gif_read_palette(s, s->global_palette, n); s->bg_color = s->global_palette[s->background_color_index]; } else s->background_color_index = -1; return 0; }"} {"target": 0, "idx": 3436, "func": "static ssize_t qio_channel_command_readv(QIOChannel *ioc, const struct iovec *iov, size_t niov, int **fds, size_t *nfds, Error **errp) { QIOChannelCommand *cioc = QIO_CHANNEL_COMMAND(ioc); ssize_t ret; retry: ret = readv(cioc->readfd, iov, niov); if (ret < 0) { if (errno == EAGAIN || errno == EWOULDBLOCK) { return QIO_CHANNEL_ERR_BLOCK; } if (errno == EINTR) { goto retry; } error_setg_errno(errp, errno, \"Unable to read from command\"); return -1; } return ret; }"} {"target": 0, "idx": 3437, "func": "vnc_display_setup_auth(VncDisplay *vs, bool password, bool sasl, bool websocket, Error **errp) { /* * We have a choice of 3 authentication options * * 1. none * 2. vnc * 3. sasl * * The channel can be run in 2 modes * * 1. clear * 2. tls * * And TLS can use 2 types of credentials * * 1. anon * 2. x509 * * We thus have 9 possible logical combinations * * 1. clear + none * 2. clear + vnc * 3. clear + sasl * 4. tls + anon + none * 5. tls + anon + vnc * 6. tls + anon + sasl * 7. tls + x509 + none * 8. tls + x509 + vnc * 9. tls + x509 + sasl * * These need to be mapped into the VNC auth schemes * in an appropriate manner. In regular VNC, all the * TLS options get mapped into VNC_AUTH_VENCRYPT * sub-auth types. * * In websockets, the https:// protocol already provides * TLS support, so there is no need to make use of the * VeNCrypt extension. Furthermore, websockets browser * clients could not use VeNCrypt even if they wanted to, * as they cannot control when the TLS handshake takes * place. Thus there is no option but to rely on https://, * meaning combinations 4->6 and 7->9 will be mapped to * VNC auth schemes in the same way as combos 1->3. * * Regardless of fact that we have a different mapping to * VNC auth mechs for plain VNC vs websockets VNC, the end * result has the same security characteristics. */ if (password) { if (vs->tlscreds) { vs->auth = VNC_AUTH_VENCRYPT; if (websocket) { vs->ws_tls = true; } if (object_dynamic_cast(OBJECT(vs->tlscreds), TYPE_QCRYPTO_TLS_CREDS_X509)) { VNC_DEBUG(\"Initializing VNC server with x509 password auth\\n\"); vs->subauth = VNC_AUTH_VENCRYPT_X509VNC; } else if (object_dynamic_cast(OBJECT(vs->tlscreds), TYPE_QCRYPTO_TLS_CREDS_ANON)) { VNC_DEBUG(\"Initializing VNC server with TLS password auth\\n\"); vs->subauth = VNC_AUTH_VENCRYPT_TLSVNC; } else { error_setg(errp, \"Unsupported TLS cred type %s\", object_get_typename(OBJECT(vs->tlscreds))); return -1; } } else { VNC_DEBUG(\"Initializing VNC server with password auth\\n\"); vs->auth = VNC_AUTH_VNC; vs->subauth = VNC_AUTH_INVALID; } if (websocket) { vs->ws_auth = VNC_AUTH_VNC; } else { vs->ws_auth = VNC_AUTH_INVALID; } } else if (sasl) { if (vs->tlscreds) { vs->auth = VNC_AUTH_VENCRYPT; if (websocket) { vs->ws_tls = true; } if (object_dynamic_cast(OBJECT(vs->tlscreds), TYPE_QCRYPTO_TLS_CREDS_X509)) { VNC_DEBUG(\"Initializing VNC server with x509 SASL auth\\n\"); vs->subauth = VNC_AUTH_VENCRYPT_X509SASL; } else if (object_dynamic_cast(OBJECT(vs->tlscreds), TYPE_QCRYPTO_TLS_CREDS_ANON)) { VNC_DEBUG(\"Initializing VNC server with TLS SASL auth\\n\"); vs->subauth = VNC_AUTH_VENCRYPT_TLSSASL; } else { error_setg(errp, \"Unsupported TLS cred type %s\", object_get_typename(OBJECT(vs->tlscreds))); return -1; } } else { VNC_DEBUG(\"Initializing VNC server with SASL auth\\n\"); vs->auth = VNC_AUTH_SASL; vs->subauth = VNC_AUTH_INVALID; } if (websocket) { vs->ws_auth = VNC_AUTH_SASL; } else { vs->ws_auth = VNC_AUTH_INVALID; } } else { if (vs->tlscreds) { vs->auth = VNC_AUTH_VENCRYPT; if (websocket) { vs->ws_tls = true; } if (object_dynamic_cast(OBJECT(vs->tlscreds), TYPE_QCRYPTO_TLS_CREDS_X509)) { VNC_DEBUG(\"Initializing VNC server with x509 no auth\\n\"); vs->subauth = VNC_AUTH_VENCRYPT_X509NONE; } else if (object_dynamic_cast(OBJECT(vs->tlscreds), TYPE_QCRYPTO_TLS_CREDS_ANON)) { VNC_DEBUG(\"Initializing VNC server with TLS no auth\\n\"); vs->subauth = VNC_AUTH_VENCRYPT_TLSNONE; } else { error_setg(errp, \"Unsupported TLS cred type %s\", object_get_typename(OBJECT(vs->tlscreds))); return -1; } } else { VNC_DEBUG(\"Initializing VNC server with no auth\\n\"); vs->auth = VNC_AUTH_NONE; vs->subauth = VNC_AUTH_INVALID; } if (websocket) { vs->ws_auth = VNC_AUTH_NONE; } else { vs->ws_auth = VNC_AUTH_INVALID; } } return 0; }"} {"target": 1, "idx": 3461, "func": "void bdrv_flush(BlockDriverState *bs) { if (bs->drv && bs->drv->bdrv_flush) bs->drv->bdrv_flush(bs);"} {"target": 1, "idx": 3464, "func": "void block_job_cancel_sync(BlockJob *job) { BlockDriverState *bs = job->bs; assert(bs->job == job); block_job_cancel(job); while (bs->job != NULL && bs->job->busy) { qemu_aio_wait(); } }"} {"target": 0, "idx": 3471, "func": "static int read_header(ShortenContext *s) { int i, ret; int maxnlpc = 0; /* shorten signature */ if (get_bits_long(&s->gb, 32) != AV_RB32(\"ajkg\")) { av_log(s->avctx, AV_LOG_ERROR, \"missing shorten magic 'ajkg'\\n\"); return -1; } s->lpcqoffset = 0; s->blocksize = DEFAULT_BLOCK_SIZE; s->nmean = -1; s->version = get_bits(&s->gb, 8); s->internal_ftype = get_uint(s, TYPESIZE); s->channels = get_uint(s, CHANSIZE); if (s->channels > MAX_CHANNELS) { av_log(s->avctx, AV_LOG_ERROR, \"too many channels: %d\\n\", s->channels); return -1; } s->avctx->channels = s->channels; /* get blocksize if version > 0 */ if (s->version > 0) { int skip_bytes, blocksize; blocksize = get_uint(s, av_log2(DEFAULT_BLOCK_SIZE)); if (!blocksize || blocksize > MAX_BLOCKSIZE) { av_log(s->avctx, AV_LOG_ERROR, \"invalid or unsupported block size: %d\\n\", blocksize); return AVERROR(EINVAL); } s->blocksize = blocksize; maxnlpc = get_uint(s, LPCQSIZE); s->nmean = get_uint(s, 0); skip_bytes = get_uint(s, NSKIPSIZE); for (i=0; igb, 8); } } s->nwrap = FFMAX(NWRAP, maxnlpc); if ((ret = allocate_buffers(s)) < 0) return ret; if ((ret = init_offset(s)) < 0) return ret; if (s->version > 1) s->lpcqoffset = V2LPCQOFFSET; if (get_ur_golomb_shorten(&s->gb, FNSIZE) != FN_VERBATIM) { av_log(s->avctx, AV_LOG_ERROR, \"missing verbatim section at beginning of stream\\n\"); return -1; } s->header_size = get_ur_golomb_shorten(&s->gb, VERBATIM_CKSIZE_SIZE); if (s->header_size >= OUT_BUFFER_SIZE || s->header_size < CANONICAL_HEADER_SIZE) { av_log(s->avctx, AV_LOG_ERROR, \"header is wrong size: %d\\n\", s->header_size); return -1; } for (i=0; iheader_size; i++) s->header[i] = (char)get_ur_golomb_shorten(&s->gb, VERBATIM_BYTE_SIZE); if (decode_wave_header(s->avctx, s->header, s->header_size) < 0) return -1; s->cur_chan = 0; s->bitshift = 0; s->got_header = 1; return 0; }"} {"target": 1, "idx": 3472, "func": "void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void (*parse_arg_function)(void *, const char*)) { const char *opt; int optindex, handleoptions = 1, ret; /* perform system-dependent conversions for arguments list */ prepare_app_arguments(&argc, &argv); /* parse options */ optindex = 1; while (optindex < argc) { opt = argv[optindex++]; if (handleoptions && opt[0] == '-' && opt[1] != '\\0') { if (opt[1] == '-' && opt[2] == '\\0') { handleoptions = 0; continue; } opt++; if ((ret = parse_option(optctx, opt, argv[optindex], options)) < 0) exit(1); optindex += ret; } else { if (parse_arg_function) parse_arg_function(optctx, opt); } } }"} {"target": 1, "idx": 3477, "func": "static void vtd_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); X86IOMMUClass *x86_class = X86_IOMMU_CLASS(klass); dc->reset = vtd_reset; dc->vmsd = &vtd_vmstate; dc->props = vtd_properties; dc->hotpluggable = false; x86_class->realize = vtd_realize; x86_class->int_remap = vtd_int_remap; }"} {"target": 1, "idx": 3494, "func": "static void audio_capture(void *opaque, void *buf, int size) { VncState *vs = opaque; vnc_lock_output(vs); vnc_write_u8(vs, VNC_MSG_SERVER_QEMU); vnc_write_u8(vs, VNC_MSG_SERVER_QEMU_AUDIO); vnc_write_u16(vs, VNC_MSG_SERVER_QEMU_AUDIO_DATA); vnc_write_u32(vs, size); vnc_write(vs, buf, size); vnc_unlock_output(vs); vnc_flush(vs); }"} {"target": 0, "idx": 3526, "func": "void ff_aac_search_for_ltp(AACEncContext *s, SingleChannelElement *sce, int common_window) { int w, g, w2, i, start = 0, count = 0; int saved_bits = -(15 + FFMIN(sce->ics.max_sfb, MAX_LTP_LONG_SFB)); float *C34 = &s->scoefs[128*0], *PCD = &s->scoefs[128*1]; float *PCD34 = &s->scoefs[128*2]; const int max_ltp = FFMIN(sce->ics.max_sfb, MAX_LTP_LONG_SFB); if (sce->ics.window_sequence[0] == EIGHT_SHORT_SEQUENCE || !sce->ics.ltp.lag) return; for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { start = 0; for (g = 0; g < sce->ics.num_swb; g++) { int bits1 = 0, bits2 = 0; float dist1 = 0.0f, dist2 = 0.0f; if (w*16+g > max_ltp) { start += sce->ics.swb_sizes[g]; continue; } for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { int bits_tmp1, bits_tmp2; FFPsyBand *band = &s->psy.ch[s->cur_channel].psy_bands[(w+w2)*16+g]; for (i = 0; i < sce->ics.swb_sizes[g]; i++) PCD[i] = sce->coeffs[start+(w+w2)*128+i] - sce->lcoeffs[start+(w+w2)*128+i]; abs_pow34_v(C34, &sce->coeffs[start+(w+w2)*128], sce->ics.swb_sizes[g]); abs_pow34_v(PCD34, PCD, sce->ics.swb_sizes[g]); dist1 += quantize_band_cost(s, &sce->coeffs[start+(w+w2)*128], C34, sce->ics.swb_sizes[g], sce->sf_idx[(w+w2)*16+g], sce->band_type[(w+w2)*16+g], s->lambda/band->threshold, INFINITY, &bits_tmp1, NULL, 0); dist2 += quantize_band_cost(s, PCD, PCD34, sce->ics.swb_sizes[g], sce->sf_idx[(w+w2)*16+g], sce->band_type[(w+w2)*16+g], s->lambda/band->threshold, INFINITY, &bits_tmp2, NULL, 0); bits1 += bits_tmp1; bits2 += bits_tmp2; } if (dist2 < dist1 && bits2 < bits1) { for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) for (i = 0; i < sce->ics.swb_sizes[g]; i++) sce->coeffs[start+(w+w2)*128+i] -= sce->lcoeffs[start+(w+w2)*128+i]; sce->ics.ltp.used[w*16+g] = 1; saved_bits += bits1 - bits2; count++; } start += sce->ics.swb_sizes[g]; } } sce->ics.ltp.present = !!count && (saved_bits >= 0); sce->ics.predictor_present = !!sce->ics.ltp.present; /* Reset any marked sfbs */ if (!sce->ics.ltp.present && !!count) { for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { start = 0; for (g = 0; g < sce->ics.num_swb; g++) { if (sce->ics.ltp.used[w*16+g]) { for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { for (i = 0; i < sce->ics.swb_sizes[g]; i++) { sce->coeffs[start+(w+w2)*128+i] += sce->lcoeffs[start+(w+w2)*128+i]; } } } start += sce->ics.swb_sizes[g]; } } } }"} {"target": 0, "idx": 3530, "func": "void av_opencl_buffer_release(cl_mem *cl_buf) { cl_int status = 0; if (!cl_buf) return; status = clReleaseMemObject(*cl_buf); if (status != CL_SUCCESS) { av_log(&openclutils, AV_LOG_ERROR, \"Could not release OpenCL buffer: %s\\n\", opencl_errstr(status)); } memset(cl_buf, 0, sizeof(*cl_buf)); }"} {"target": 1, "idx": 3546, "func": "int inet_connect_opts(QemuOpts *opts, bool *in_progress, Error **errp) { struct addrinfo ai,*res,*e; const char *addr; const char *port; char uaddr[INET6_ADDRSTRLEN+1]; char uport[33]; int sock,rc; bool block; memset(&ai,0, sizeof(ai)); ai.ai_flags = AI_CANONNAME | AI_ADDRCONFIG; ai.ai_family = PF_UNSPEC; ai.ai_socktype = SOCK_STREAM; if (in_progress) { *in_progress = false; } addr = qemu_opt_get(opts, \"host\"); port = qemu_opt_get(opts, \"port\"); block = qemu_opt_get_bool(opts, \"block\", 0); if (addr == NULL || port == NULL) { fprintf(stderr, \"inet_connect: host and/or port not specified\\n\"); error_set(errp, QERR_SOCKET_CREATE_FAILED); return -1; } if (qemu_opt_get_bool(opts, \"ipv4\", 0)) ai.ai_family = PF_INET; if (qemu_opt_get_bool(opts, \"ipv6\", 0)) ai.ai_family = PF_INET6; /* lookup */ if (0 != (rc = getaddrinfo(addr, port, &ai, &res))) { fprintf(stderr,\"getaddrinfo(%s,%s): %s\\n\", addr, port, gai_strerror(rc)); error_set(errp, QERR_SOCKET_CREATE_FAILED); return -1; } for (e = res; e != NULL; e = e->ai_next) { if (getnameinfo((struct sockaddr*)e->ai_addr,e->ai_addrlen, uaddr,INET6_ADDRSTRLEN,uport,32, NI_NUMERICHOST | NI_NUMERICSERV) != 0) { fprintf(stderr,\"%s: getnameinfo: oops\\n\", __FUNCTION__); continue; } sock = qemu_socket(e->ai_family, e->ai_socktype, e->ai_protocol); if (sock < 0) { fprintf(stderr,\"%s: socket(%s): %s\\n\", __FUNCTION__, inet_strfamily(e->ai_family), strerror(errno)); continue; } setsockopt(sock,SOL_SOCKET,SO_REUSEADDR,(void*)&on,sizeof(on)); if (!block) { socket_set_nonblock(sock); } /* connect to peer */ do { rc = 0; if (connect(sock, e->ai_addr, e->ai_addrlen) < 0) { rc = -socket_error(); } } while (rc == -EINTR); #ifdef _WIN32 if (!block && (rc == -EINPROGRESS || rc == -EWOULDBLOCK || rc == -WSAEALREADY)) { #else if (!block && (rc == -EINPROGRESS)) { #endif if (in_progress) { *in_progress = true; } error_set(errp, QERR_SOCKET_CONNECT_IN_PROGRESS); } else if (rc < 0) { if (NULL == e->ai_next) fprintf(stderr, \"%s: connect(%s,%s,%s,%s): %s\\n\", __FUNCTION__, inet_strfamily(e->ai_family), e->ai_canonname, uaddr, uport, strerror(errno)); closesocket(sock); continue; } freeaddrinfo(res); return sock; } error_set(errp, QERR_SOCKET_CONNECT_FAILED); freeaddrinfo(res); return -1; }"} {"target": 1, "idx": 3560, "func": "int ff_rv34_decode_frame(AVCodecContext *avctx, void *data, int *got_picture_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; RV34DecContext *r = avctx->priv_data; MpegEncContext *s = &r->s; AVFrame *pict = data; SliceInfo si; int i, ret; int slice_count; const uint8_t *slices_hdr = NULL; int last = 0; /* no supplementary picture */ if (buf_size == 0) { /* special case for last picture */ if (s->low_delay==0 && s->next_picture_ptr) { if ((ret = av_frame_ref(pict, &s->next_picture_ptr->f)) < 0) return ret; s->next_picture_ptr = NULL; *got_picture_ptr = 1; } return 0; } if(!avctx->slice_count){ slice_count = (*buf++) + 1; slices_hdr = buf + 4; buf += 8 * slice_count; buf_size -= 1 + 8 * slice_count; }else slice_count = avctx->slice_count; //parse first slice header to check whether this frame can be decoded if(get_slice_offset(avctx, slices_hdr, 0) < 0 || get_slice_offset(avctx, slices_hdr, 0) > buf_size){ av_log(avctx, AV_LOG_ERROR, \"Slice offset is invalid\\n\"); return AVERROR_INVALIDDATA; } init_get_bits(&s->gb, buf+get_slice_offset(avctx, slices_hdr, 0), (buf_size-get_slice_offset(avctx, slices_hdr, 0))*8); if(r->parse_slice_header(r, &r->s.gb, &si) < 0 || si.start){ av_log(avctx, AV_LOG_ERROR, \"First slice header is incorrect\\n\"); return AVERROR_INVALIDDATA; } if ((!s->last_picture_ptr || !s->last_picture_ptr->f.data[0]) && si.type == AV_PICTURE_TYPE_B) { av_log(avctx, AV_LOG_ERROR, \"Invalid decoder state: B-frame without \" \"reference data.\\n\"); return AVERROR_INVALIDDATA; } if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==AV_PICTURE_TYPE_B) || (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=AV_PICTURE_TYPE_I) || avctx->skip_frame >= AVDISCARD_ALL) return avpkt->size; /* first slice */ if (si.start == 0) { if (s->mb_num_left > 0) { av_log(avctx, AV_LOG_ERROR, \"New frame but still %d MB left.\", s->mb_num_left); ff_er_frame_end(&s->er); ff_MPV_frame_end(s); } if (s->width != si.width || s->height != si.height) { int err; av_log(s->avctx, AV_LOG_WARNING, \"Changing dimensions to %dx%d\\n\", si.width, si.height); s->width = si.width; s->height = si.height; err = ff_set_dimensions(s->avctx, s->width, s->height); if (err < 0) return err; if ((err = ff_MPV_common_frame_size_change(s)) < 0) return err; if ((err = rv34_decoder_realloc(r)) < 0) return err; } s->pict_type = si.type ? si.type : AV_PICTURE_TYPE_I; if (ff_MPV_frame_start(s, s->avctx) < 0) return -1; ff_mpeg_er_frame_start(s); if (!r->tmp_b_block_base) { int i; r->tmp_b_block_base = av_malloc(s->linesize * 48); for (i = 0; i < 2; i++) r->tmp_b_block_y[i] = r->tmp_b_block_base + i * 16 * s->linesize; for (i = 0; i < 4; i++) r->tmp_b_block_uv[i] = r->tmp_b_block_base + 32 * s->linesize + (i >> 1) * 8 * s->uvlinesize + (i & 1) * 16; } r->cur_pts = si.pts; if (s->pict_type != AV_PICTURE_TYPE_B) { r->last_pts = r->next_pts; r->next_pts = r->cur_pts; } else { int refdist = GET_PTS_DIFF(r->next_pts, r->last_pts); int dist0 = GET_PTS_DIFF(r->cur_pts, r->last_pts); int dist1 = GET_PTS_DIFF(r->next_pts, r->cur_pts); if(!refdist){ r->mv_weight1 = r->mv_weight2 = r->weight1 = r->weight2 = 8192; r->scaled_weight = 0; }else{ r->mv_weight1 = (dist0 << 14) / refdist; r->mv_weight2 = (dist1 << 14) / refdist; if((r->mv_weight1|r->mv_weight2) & 511){ r->weight1 = r->mv_weight1; r->weight2 = r->mv_weight2; r->scaled_weight = 0; }else{ r->weight1 = r->mv_weight1 >> 9; r->weight2 = r->mv_weight2 >> 9; r->scaled_weight = 1; } } } s->mb_x = s->mb_y = 0; ff_thread_finish_setup(s->avctx); } else if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME)) { av_log(s->avctx, AV_LOG_ERROR, \"Decoder needs full frames in frame \" \"multithreading mode (start MB is %d).\\n\", si.start); return AVERROR_INVALIDDATA; } for(i = 0; i < slice_count; i++){ int offset = get_slice_offset(avctx, slices_hdr, i); int size; if(i+1 == slice_count) size = buf_size - offset; else size = get_slice_offset(avctx, slices_hdr, i+1) - offset; if(offset < 0 || offset > buf_size){ av_log(avctx, AV_LOG_ERROR, \"Slice offset is invalid\\n\"); break; } r->si.end = s->mb_width * s->mb_height; s->mb_num_left = r->s.mb_x + r->s.mb_y*r->s.mb_width - r->si.start; if(i+1 < slice_count){ if (get_slice_offset(avctx, slices_hdr, i+1) < 0 || get_slice_offset(avctx, slices_hdr, i+1) > buf_size) { av_log(avctx, AV_LOG_ERROR, \"Slice offset is invalid\\n\"); break; } init_get_bits(&s->gb, buf+get_slice_offset(avctx, slices_hdr, i+1), (buf_size-get_slice_offset(avctx, slices_hdr, i+1))*8); if(r->parse_slice_header(r, &r->s.gb, &si) < 0){ if(i+2 < slice_count) size = get_slice_offset(avctx, slices_hdr, i+2) - offset; else size = buf_size - offset; }else r->si.end = si.start; } if (size < 0 || size > buf_size - offset) { av_log(avctx, AV_LOG_ERROR, \"Slice size is invalid\\n\"); break; } last = rv34_decode_slice(r, r->si.end, buf + offset, size); if(last) break; } if (s->current_picture_ptr) { if (last) { if(r->loop_filter) r->loop_filter(r, s->mb_height - 1); ret = finish_frame(avctx, pict); if (ret < 0) return ret; *got_picture_ptr = ret; } else if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME)) { av_log(avctx, AV_LOG_INFO, \"marking unfished frame as finished\\n\"); /* always mark the current frame as finished, frame-mt supports * only complete frames */ ff_er_frame_end(&s->er); ff_MPV_frame_end(s); s->mb_num_left = 0; ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0); return AVERROR_INVALIDDATA; } } return avpkt->size; }"} {"target": 0, "idx": 3566, "func": "static int get_preset_file_2(const char *preset_name, const char *codec_name, AVIOContext **s) { int i, ret = 1; char filename[1000]; const char *base[3] = { getenv(\"AVCONV_DATADIR\"), getenv(\"HOME\"), AVCONV_DATADIR, }; for (i = 0; i < FF_ARRAY_ELEMS(base) && ret; i++) { if (!base[i]) continue; if (codec_name) { snprintf(filename, sizeof(filename), \"%s%s/%s-%s.avpreset\", base[i], i != 1 ? \"\" : \"/.avconv\", codec_name, preset_name); ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL); } if (ret) { snprintf(filename, sizeof(filename), \"%s%s/%s.avpreset\", base[i], i != 1 ? \"\" : \"/.avconv\", preset_name); ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL); } } return ret; }"} {"target": 0, "idx": 3568, "func": "static int mov_write_mvhd_tag(ByteIOContext *pb, MOVContext *mov) { int maxTrackID = 1, i; int64_t maxTrackLenTemp, maxTrackLen = 0; int version; for (i=0; itracks[i].entry > 0) { maxTrackLenTemp = av_rescale_rnd(mov->tracks[i].trackDuration, globalTimescale, mov->tracks[i].timescale, AV_ROUND_UP); if(maxTrackLen < maxTrackLenTemp) maxTrackLen = maxTrackLenTemp; if(maxTrackID < mov->tracks[i].trackID) maxTrackID = mov->tracks[i].trackID; } } version = maxTrackLen < UINT32_MAX ? 0 : 1; (version == 1) ? put_be32(pb, 120) : put_be32(pb, 108); /* size */ put_tag(pb, \"mvhd\"); put_byte(pb, version); put_be24(pb, 0); /* flags */ if (version == 1) { put_be64(pb, mov->time); put_be64(pb, mov->time); } else { put_be32(pb, mov->time); /* creation time */ put_be32(pb, mov->time); /* modification time */ } put_be32(pb, mov->timescale); /* timescale */ (version == 1) ? put_be64(pb, maxTrackLen) : put_be32(pb, maxTrackLen); /* duration of longest track */ put_be32(pb, 0x00010000); /* reserved (preferred rate) 1.0 = normal */ put_be16(pb, 0x0100); /* reserved (preferred volume) 1.0 = normal */ put_be16(pb, 0); /* reserved */ put_be32(pb, 0); /* reserved */ put_be32(pb, 0); /* reserved */ /* Matrix structure */ put_be32(pb, 0x00010000); /* reserved */ put_be32(pb, 0x0); /* reserved */ put_be32(pb, 0x0); /* reserved */ put_be32(pb, 0x0); /* reserved */ put_be32(pb, 0x00010000); /* reserved */ put_be32(pb, 0x0); /* reserved */ put_be32(pb, 0x0); /* reserved */ put_be32(pb, 0x0); /* reserved */ put_be32(pb, 0x40000000); /* reserved */ put_be32(pb, 0); /* reserved (preview time) */ put_be32(pb, 0); /* reserved (preview duration) */ put_be32(pb, 0); /* reserved (poster time) */ put_be32(pb, 0); /* reserved (selection time) */ put_be32(pb, 0); /* reserved (selection duration) */ put_be32(pb, 0); /* reserved (current time) */ put_be32(pb, maxTrackID+1); /* Next track id */ return 0x6c; }"} {"target": 0, "idx": 3569, "func": "static void apply_tns(float coef[1024], TemporalNoiseShaping *tns, IndividualChannelStream *ics, int decode) { const int mmm = FFMIN(ics->tns_max_bands, ics->max_sfb); int w, filt, m, i; int bottom, top, order, start, end, size, inc; float lpc[TNS_MAX_ORDER]; float tmp[TNS_MAX_ORDER]; for (w = 0; w < ics->num_windows; w++) { bottom = ics->num_swb; for (filt = 0; filt < tns->n_filt[w]; filt++) { top = bottom; bottom = FFMAX(0, top - tns->length[w][filt]); order = tns->order[w][filt]; if (order == 0) continue; // tns_decode_coef compute_lpc_coefs(tns->coef[w][filt], order, lpc, 0, 0, 0); start = ics->swb_offset[FFMIN(bottom, mmm)]; end = ics->swb_offset[FFMIN( top, mmm)]; if ((size = end - start) <= 0) continue; if (tns->direction[w][filt]) { inc = -1; start = end - 1; } else { inc = 1; } start += w * 128; if (decode) { // ar filter for (m = 0; m < size; m++, start += inc) for (i = 1; i <= FFMIN(m, order); i++) coef[start] -= coef[start - i * inc] * lpc[i - 1]; } else { // ma filter for (m = 0; m < size; m++, start += inc) { tmp[0] = coef[start]; for (i = 1; i <= FFMIN(m, order); i++) coef[start] += tmp[i] * lpc[i - 1]; for (i = order; i > 0; i--) tmp[i] = tmp[i - 1]; } } } } }"} {"target": 0, "idx": 3585, "func": "static int g2m_init_buffers(G2MContext *c) { int aligned_height; if (!c->framebuf || c->old_width < c->width || c->old_height < c->height) { c->framebuf_stride = FFALIGN(c->width * 3, 16); aligned_height = FFALIGN(c->height, 16); av_free(c->framebuf); c->framebuf = av_mallocz(c->framebuf_stride * aligned_height); if (!c->framebuf) return AVERROR(ENOMEM); } if (!c->synth_tile || !c->jpeg_tile || c->old_tile_w < c->tile_width || c->old_tile_h < c->tile_height) { c->tile_stride = FFALIGN(c->tile_width * 3, 16); aligned_height = FFALIGN(c->tile_height, 16); av_free(c->synth_tile); av_free(c->jpeg_tile); av_free(c->kempf_buf); av_free(c->kempf_flags); c->synth_tile = av_mallocz(c->tile_stride * aligned_height); c->jpeg_tile = av_mallocz(c->tile_stride * aligned_height); c->kempf_buf = av_mallocz((c->tile_width + 1) * aligned_height + FF_INPUT_BUFFER_PADDING_SIZE); c->kempf_flags = av_mallocz( c->tile_width * aligned_height); if (!c->synth_tile || !c->jpeg_tile || !c->kempf_buf || !c->kempf_flags) return AVERROR(ENOMEM); } return 0; }"} {"target": 1, "idx": 3600, "func": "static uint32_t rtl8139_io_readb(void *opaque, uint8_t addr) { RTL8139State *s = opaque; int ret; addr &= 0xff; switch (addr) { case MAC0 ... MAC0+5: ret = s->phys[addr - MAC0]; break; case MAC0+6 ... MAC0+7: ret = 0; break; case MAR0 ... MAR0+7: ret = s->mult[addr - MAR0]; break; case ChipCmd: ret = rtl8139_ChipCmd_read(s); break; case Cfg9346: ret = rtl8139_Cfg9346_read(s); break; case Config0: ret = rtl8139_Config0_read(s); break; case Config1: ret = rtl8139_Config1_read(s); break; case Config3: ret = rtl8139_Config3_read(s); break; case Config4: ret = rtl8139_Config4_read(s); break; case Config5: ret = rtl8139_Config5_read(s); break; case MediaStatus: ret = 0xd0; DPRINTF(\"MediaStatus read 0x%x\\n\", ret); break; case HltClk: ret = s->clock_enabled; DPRINTF(\"HltClk read 0x%x\\n\", ret); break; case PCIRevisionID: ret = RTL8139_PCI_REVID; DPRINTF(\"PCI Revision ID read 0x%x\\n\", ret); break; case TxThresh: ret = s->TxThresh; DPRINTF(\"C+ TxThresh read(b) val=0x%02x\\n\", ret); break; case 0x43: /* Part of TxConfig register. Windows driver tries to read it */ ret = s->TxConfig >> 24; DPRINTF(\"RTL8139C TxConfig at 0x43 read(b) val=0x%02x\\n\", ret); break; default: DPRINTF(\"not implemented read(b) addr=0x%x\\n\", addr); ret = 0; break; } return ret; }"} {"target": 0, "idx": 3616, "func": "int ff_mpv_common_frame_size_change(MpegEncContext *s) { int i, err = 0; if (!s->context_initialized) return AVERROR(EINVAL); if (s->slice_context_count > 1) { for (i = 0; i < s->slice_context_count; i++) { free_duplicate_context(s->thread_context[i]); } for (i = 1; i < s->slice_context_count; i++) { av_freep(&s->thread_context[i]); } } else free_duplicate_context(s); free_context_frame(s); if (s->picture) for (i = 0; i < MAX_PICTURE_COUNT; i++) { s->picture[i].needs_realloc = 1; } s->last_picture_ptr = s->next_picture_ptr = s->current_picture_ptr = NULL; // init if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence) s->mb_height = (s->height + 31) / 32 * 2; else s->mb_height = (s->height + 15) / 16; if ((s->width || s->height) && (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0) goto fail; if ((err = init_context_frame(s))) goto fail; s->thread_context[0] = s; if (s->width && s->height) { int nb_slices = s->slice_context_count; if (nb_slices > 1) { for (i = 1; i < nb_slices; i++) { s->thread_context[i] = av_malloc(sizeof(MpegEncContext)); memcpy(s->thread_context[i], s, sizeof(MpegEncContext)); } for (i = 0; i < nb_slices; i++) { if ((err = init_duplicate_context(s->thread_context[i])) < 0) goto fail; s->thread_context[i]->start_mb_y = (s->mb_height * (i) + nb_slices / 2) / nb_slices; s->thread_context[i]->end_mb_y = (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices; } } else { err = init_duplicate_context(s); if (err < 0) goto fail; s->start_mb_y = 0; s->end_mb_y = s->mb_height; } s->slice_context_count = nb_slices; } return 0; fail: ff_mpv_common_end(s); return err; }"} {"target": 1, "idx": 3619, "func": "void ff_snow_horizontal_compose97i_mmx(DWTELEM *b, int width){ const int w2= (width+1)>>1; DWTELEM temp[width >> 1]; const int w_l= (width>>1); const int w_r= w2 - 1; int i; { // Lift 0 DWTELEM * const ref = b + w2 - 1; i = 1; b[0] = b[0] - ((W_DM * 2 * ref[1]+W_DO)>>W_DS); asm volatile( \"pcmpeqd %%mm7, %%mm7 \\n\\t\" \"pslld $31, %%mm7 \\n\\t\" \"psrld $29, %%mm7 \\n\\t\" ::); for(; i> W_BS); asm volatile( \"pslld $1, %%mm7 \\n\\t\" ::); for(; i>1]; b[i] = b[i>>1]; } for (i-=14; i>=0; i-=16){ asm volatile( \"movq (%1), %%mm0 \\n\\t\" \"movq 8(%1), %%mm2 \\n\\t\" \"movq 16(%1), %%mm4 \\n\\t\" \"movq 24(%1), %%mm6 \\n\\t\" \"movq (%1), %%mm1 \\n\\t\" \"movq 8(%1), %%mm3 \\n\\t\" \"movq 16(%1), %%mm5 \\n\\t\" \"movq 24(%1), %%mm7 \\n\\t\" \"punpckldq (%2), %%mm0 \\n\\t\" \"punpckldq 8(%2), %%mm2 \\n\\t\" \"punpckldq 16(%2), %%mm4 \\n\\t\" \"punpckldq 24(%2), %%mm6 \\n\\t\" \"movq %%mm0, (%0) \\n\\t\" \"movq %%mm2, 16(%0) \\n\\t\" \"movq %%mm4, 32(%0) \\n\\t\" \"movq %%mm6, 48(%0) \\n\\t\" \"punpckhdq (%2), %%mm1 \\n\\t\" \"punpckhdq 8(%2), %%mm3 \\n\\t\" \"punpckhdq 16(%2), %%mm5 \\n\\t\" \"punpckhdq 24(%2), %%mm7 \\n\\t\" \"movq %%mm1, 8(%0) \\n\\t\" \"movq %%mm3, 24(%0) \\n\\t\" \"movq %%mm5, 40(%0) \\n\\t\" \"movq %%mm7, 56(%0) \\n\\t\" :: \"r\"(&b[i]), \"r\"(&b[i>>1]), \"r\"(&temp[i>>1]) : \"memory\" ); } } }"} {"target": 0, "idx": 3620, "func": "static int oma_read_packet(AVFormatContext *s, AVPacket *pkt) { OMAContext *oc = s->priv_data; int packet_size = s->streams[0]->codec->block_align; int ret = av_get_packet(s->pb, pkt, packet_size); if (ret <= 0) return AVERROR(EIO); pkt->stream_index = 0; if (oc->encrypted) { /* previous unencrypted block saved in IV for * the next packet (CBC mode) */ av_des_crypt(&oc->av_des, pkt->data, pkt->data, (packet_size >> 3), oc->iv, 1); } return ret; }"} {"target": 0, "idx": 3637, "func": "static int rtsp_fetch_packet(AVFormatContext *s, AVPacket *pkt) { RTSPState *rt = s->priv_data; int ret, len; uint8_t buf[10 * RTP_MAX_PACKET_LENGTH]; RTSPStream *rtsp_st; /* get next frames from the same RTP packet */ if (rt->cur_transport_priv) { if (rt->transport == RTSP_TRANSPORT_RDT) ret = ff_rdt_parse_packet(rt->cur_transport_priv, pkt, NULL, 0); else ret = rtp_parse_packet(rt->cur_transport_priv, pkt, NULL, 0); if (ret == 0) { rt->cur_transport_priv = NULL; return 0; } else if (ret == 1) { return 0; } else { rt->cur_transport_priv = NULL; } } /* read next RTP packet */ redo: switch(rt->lower_transport) { default: #if CONFIG_RTSP_DEMUXER case RTSP_LOWER_TRANSPORT_TCP: len = tcp_read_packet(s, &rtsp_st, buf, sizeof(buf)); break; #endif case RTSP_LOWER_TRANSPORT_UDP: case RTSP_LOWER_TRANSPORT_UDP_MULTICAST: len = udp_read_packet(s, &rtsp_st, buf, sizeof(buf)); if (len >=0 && rtsp_st->transport_priv && rt->transport == RTSP_TRANSPORT_RTP) rtp_check_and_send_back_rr(rtsp_st->transport_priv, len); break; } if (len < 0) return len; if (len == 0) return AVERROR_EOF; if (rt->transport == RTSP_TRANSPORT_RDT) ret = ff_rdt_parse_packet(rtsp_st->transport_priv, pkt, buf, len); else ret = rtp_parse_packet(rtsp_st->transport_priv, pkt, buf, len); if (ret < 0) goto redo; if (ret == 1) { /* more packets may follow, so we save the RTP context */ rt->cur_transport_priv = rtsp_st->transport_priv; } return ret; }"} {"target": 0, "idx": 3638, "func": "rtsp_open_transport_ctx(AVFormatContext *s, RTSPStream *rtsp_st) { RTSPState *rt = s->priv_data; AVStream *st = NULL; /* open the RTP context */ if (rtsp_st->stream_index >= 0) st = s->streams[rtsp_st->stream_index]; if (!st) s->ctx_flags |= AVFMTCTX_NOHEADER; if (rt->transport == RTSP_TRANSPORT_RDT) rtsp_st->transport_priv = ff_rdt_parse_open(s, st->index, rtsp_st->dynamic_protocol_context, rtsp_st->dynamic_handler); else rtsp_st->transport_priv = rtp_parse_open(s, st, rtsp_st->rtp_handle, rtsp_st->sdp_payload_type, &rtsp_st->rtp_payload_data); if (!rtsp_st->transport_priv) { return AVERROR(ENOMEM); } else if (rt->transport != RTSP_TRANSPORT_RDT) { if(rtsp_st->dynamic_handler) { rtp_parse_set_dynamic_protocol(rtsp_st->transport_priv, rtsp_st->dynamic_protocol_context, rtsp_st->dynamic_handler); } } return 0; }"} {"target": 0, "idx": 3662, "func": "void net_tx_pkt_update_ip_checksums(struct NetTxPkt *pkt) { uint16_t csum; uint32_t ph_raw_csum; assert(pkt); uint8_t gso_type = pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN; struct ip_header *ip_hdr; if (VIRTIO_NET_HDR_GSO_TCPV4 != gso_type && VIRTIO_NET_HDR_GSO_UDP != gso_type) { return; } ip_hdr = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base; if (pkt->payload_len + pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len > ETH_MAX_IP_DGRAM_LEN) { return; } ip_hdr->ip_len = cpu_to_be16(pkt->payload_len + pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len); /* Calculate IP header checksum */ ip_hdr->ip_sum = 0; csum = net_raw_checksum((uint8_t *)ip_hdr, pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len); ip_hdr->ip_sum = cpu_to_be16(csum); /* Calculate IP pseudo header checksum */ ph_raw_csum = eth_calc_pseudo_hdr_csum(ip_hdr, pkt->payload_len); csum = cpu_to_be16(~net_checksum_finish(ph_raw_csum)); iov_from_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG], pkt->payload_frags, pkt->virt_hdr.csum_offset, &csum, sizeof(csum)); }"} {"target": 0, "idx": 3678, "func": "void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table, uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2], int *low_delay, int mb_width, int mb_height, int mb_stride, int quarter_sample) { if ((avctx->flags2 & AV_CODEC_FLAG2_EXPORT_MVS) && mbtype_table && motion_val[0]) { const int shift = 1 + quarter_sample; const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1; const int mv_stride = (mb_width << mv_sample_log2) + (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1); int mb_x, mb_y, mbcount = 0; /* size is width * height * 2 * 4 where 2 is for directions and 4 is * for the maximum number of MB (4 MB in case of IS_8x8) */ AVMotionVector *mvs = av_malloc_array(mb_width * mb_height, 2 * 4 * sizeof(AVMotionVector)); if (!mvs) return; for (mb_y = 0; mb_y < mb_height; mb_y++) { for (mb_x = 0; mb_x < mb_width; mb_x++) { int i, direction, mb_type = mbtype_table[mb_x + mb_y * mb_stride]; for (direction = 0; direction < 2; direction++) { if (!USES_LIST(mb_type, direction)) continue; if (IS_8X8(mb_type)) { for (i = 0; i < 4; i++) { int sx = mb_x * 16 + 4 + 8 * (i & 1); int sy = mb_y * 16 + 4 + 8 * (i >> 1); int xy = (mb_x * 2 + (i & 1) + (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1); int mx = (motion_val[direction][xy][0] >> shift) + sx; int my = (motion_val[direction][xy][1] >> shift) + sy; mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction); } } else if (IS_16X8(mb_type)) { for (i = 0; i < 2; i++) { int sx = mb_x * 16 + 8; int sy = mb_y * 16 + 4 + 8 * i; int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1); int mx = (motion_val[direction][xy][0] >> shift); int my = (motion_val[direction][xy][1] >> shift); if (IS_INTERLACED(mb_type)) my *= 2; mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction); } } else if (IS_8X16(mb_type)) { for (i = 0; i < 2; i++) { int sx = mb_x * 16 + 4 + 8 * i; int sy = mb_y * 16 + 8; int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1); int mx = motion_val[direction][xy][0] >> shift; int my = motion_val[direction][xy][1] >> shift; if (IS_INTERLACED(mb_type)) my *= 2; mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction); } } else { int sx = mb_x * 16 + 8; int sy = mb_y * 16 + 8; int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2; int mx = (motion_val[direction][xy][0]>>shift) + sx; int my = (motion_val[direction][xy][1]>>shift) + sy; mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction); } } } } if (mbcount) { AVFrameSideData *sd; av_log(avctx, AV_LOG_DEBUG, \"Adding %d MVs info to frame %d\\n\", mbcount, avctx->frame_number); sd = av_frame_new_side_data(pict, AV_FRAME_DATA_MOTION_VECTORS, mbcount * sizeof(AVMotionVector)); if (!sd) { av_freep(&mvs); return; } memcpy(sd->data, mvs, mbcount * sizeof(AVMotionVector)); } av_freep(&mvs); } /* TODO: export all the following to make them accessible for users (and filters) */ if (avctx->hwaccel || !mbtype_table #if FF_API_CAP_VDPAU || (avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU) #endif ) return; if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) { int x,y; av_log(avctx, AV_LOG_DEBUG, \"New frame, type: %c\\n\", av_get_picture_type_char(pict->pict_type)); for (y = 0; y < mb_height; y++) { for (x = 0; x < mb_width; x++) { if (avctx->debug & FF_DEBUG_SKIP) { int count = mbskip_table ? mbskip_table[x + y * mb_stride] : 0; if (count > 9) count = 9; av_log(avctx, AV_LOG_DEBUG, \"%1d\", count); } if (avctx->debug & FF_DEBUG_QP) { av_log(avctx, AV_LOG_DEBUG, \"%2d\", qscale_table[x + y * mb_stride]); } if (avctx->debug & FF_DEBUG_MB_TYPE) { int mb_type = mbtype_table[x + y * mb_stride]; // Type & MV direction if (IS_PCM(mb_type)) av_log(avctx, AV_LOG_DEBUG, \"P\"); else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type)) av_log(avctx, AV_LOG_DEBUG, \"A\"); else if (IS_INTRA4x4(mb_type)) av_log(avctx, AV_LOG_DEBUG, \"i\"); else if (IS_INTRA16x16(mb_type)) av_log(avctx, AV_LOG_DEBUG, \"I\"); else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) av_log(avctx, AV_LOG_DEBUG, \"d\"); else if (IS_DIRECT(mb_type)) av_log(avctx, AV_LOG_DEBUG, \"D\"); else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) av_log(avctx, AV_LOG_DEBUG, \"g\"); else if (IS_GMC(mb_type)) av_log(avctx, AV_LOG_DEBUG, \"G\"); else if (IS_SKIP(mb_type)) av_log(avctx, AV_LOG_DEBUG, \"S\"); else if (!USES_LIST(mb_type, 1)) av_log(avctx, AV_LOG_DEBUG, \">\"); else if (!USES_LIST(mb_type, 0)) av_log(avctx, AV_LOG_DEBUG, \"<\"); else { av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1)); av_log(avctx, AV_LOG_DEBUG, \"X\"); } // segmentation if (IS_8X8(mb_type)) av_log(avctx, AV_LOG_DEBUG, \"+\"); else if (IS_16X8(mb_type)) av_log(avctx, AV_LOG_DEBUG, \"-\"); else if (IS_8X16(mb_type)) av_log(avctx, AV_LOG_DEBUG, \"|\"); else if (IS_INTRA(mb_type) || IS_16X16(mb_type)) av_log(avctx, AV_LOG_DEBUG, \" \"); else av_log(avctx, AV_LOG_DEBUG, \"?\"); if (IS_INTERLACED(mb_type)) av_log(avctx, AV_LOG_DEBUG, \"=\"); else av_log(avctx, AV_LOG_DEBUG, \" \"); } } av_log(avctx, AV_LOG_DEBUG, \"\\n\"); } } if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) || (avctx->debug_mv)) { int mb_y; int i; int h_chroma_shift, v_chroma_shift, block_height; #if FF_API_VISMV const int shift = 1 + quarter_sample; uint8_t *ptr; const int width = avctx->width; const int height = avctx->height; #endif const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1; const int mv_stride = (mb_width << mv_sample_log2) + (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1); *low_delay = 0; // needed to see the vectors without trashing the buffers avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift); av_frame_make_writable(pict); pict->opaque = NULL; #if FF_API_VISMV ptr = pict->data[0]; #endif block_height = 16 >> v_chroma_shift; for (mb_y = 0; mb_y < mb_height; mb_y++) { int mb_x; for (mb_x = 0; mb_x < mb_width; mb_x++) { const int mb_index = mb_x + mb_y * mb_stride; #if FF_API_VISMV if ((avctx->debug_mv) && motion_val[0]) { int type; for (type = 0; type < 3; type++) { int direction = 0; switch (type) { case 0: if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!= AV_PICTURE_TYPE_P)) continue; direction = 0; break; case 1: if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!= AV_PICTURE_TYPE_B)) continue; direction = 0; break; case 2: if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!= AV_PICTURE_TYPE_B)) continue; direction = 1; break; } if (!USES_LIST(mbtype_table[mb_index], direction)) continue; if (IS_8X8(mbtype_table[mb_index])) { int i; for (i = 0; i < 4; i++) { int sx = mb_x * 16 + 4 + 8 * (i & 1); int sy = mb_y * 16 + 4 + 8 * (i >> 1); int xy = (mb_x * 2 + (i & 1) + (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1); int mx = (motion_val[direction][xy][0] >> shift) + sx; int my = (motion_val[direction][xy][1] >> shift) + sy; draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100, 0, direction); } } else if (IS_16X8(mbtype_table[mb_index])) { int i; for (i = 0; i < 2; i++) { int sx = mb_x * 16 + 8; int sy = mb_y * 16 + 4 + 8 * i; int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1); int mx = (motion_val[direction][xy][0] >> shift); int my = (motion_val[direction][xy][1] >> shift); if (IS_INTERLACED(mbtype_table[mb_index])) my *= 2; draw_arrow(ptr, sx, sy, mx + sx, my + sy, width, height, pict->linesize[0], 100, 0, direction); } } else if (IS_8X16(mbtype_table[mb_index])) { int i; for (i = 0; i < 2; i++) { int sx = mb_x * 16 + 4 + 8 * i; int sy = mb_y * 16 + 8; int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1); int mx = motion_val[direction][xy][0] >> shift; int my = motion_val[direction][xy][1] >> shift; if (IS_INTERLACED(mbtype_table[mb_index])) my *= 2; draw_arrow(ptr, sx, sy, mx + sx, my + sy, width, height, pict->linesize[0], 100, 0, direction); } } else { int sx= mb_x * 16 + 8; int sy= mb_y * 16 + 8; int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2; int mx= (motion_val[direction][xy][0]>>shift) + sx; int my= (motion_val[direction][xy][1]>>shift) + sy; draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100, 0, direction); } } } #endif if ((avctx->debug & FF_DEBUG_VIS_QP)) { uint64_t c = (qscale_table[mb_index] * 128 / 31) * 0x0101010101010101ULL; int y; for (y = 0; y < block_height; y++) { *(uint64_t *)(pict->data[1] + 8 * mb_x + (block_height * mb_y + y) * pict->linesize[1]) = c; *(uint64_t *)(pict->data[2] + 8 * mb_x + (block_height * mb_y + y) * pict->linesize[2]) = c; } } if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) && motion_val[0]) { int mb_type = mbtype_table[mb_index]; uint64_t u,v; int y; #define COLOR(theta, r) \\ u = (int)(128 + r * cos(theta * 3.141592 / 180)); \\ v = (int)(128 + r * sin(theta * 3.141592 / 180)); u = v = 128; if (IS_PCM(mb_type)) { COLOR(120, 48) } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)) { COLOR(30, 48) } else if (IS_INTRA4x4(mb_type)) { COLOR(90, 48) } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) { // COLOR(120, 48) } else if (IS_DIRECT(mb_type)) { COLOR(150, 48) } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) { COLOR(170, 48) } else if (IS_GMC(mb_type)) { COLOR(190, 48) } else if (IS_SKIP(mb_type)) { // COLOR(180, 48) } else if (!USES_LIST(mb_type, 1)) { COLOR(240, 48) } else if (!USES_LIST(mb_type, 0)) { COLOR(0, 48) } else { av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1)); COLOR(300,48) } u *= 0x0101010101010101ULL; v *= 0x0101010101010101ULL; for (y = 0; y < block_height; y++) { *(uint64_t *)(pict->data[1] + 8 * mb_x + (block_height * mb_y + y) * pict->linesize[1]) = u; *(uint64_t *)(pict->data[2] + 8 * mb_x + (block_height * mb_y + y) * pict->linesize[2]) = v; } // segmentation if (IS_8X8(mb_type) || IS_16X8(mb_type)) { *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 + (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL; *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 + (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL; } if (IS_8X8(mb_type) || IS_8X16(mb_type)) { for (y = 0; y < 16; y++) pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) * pict->linesize[0]] ^= 0x80; } if (IS_8X8(mb_type) && mv_sample_log2 >= 2) { int dm = 1 << (mv_sample_log2 - 2); for (i = 0; i < 4; i++) { int sx = mb_x * 16 + 8 * (i & 1); int sy = mb_y * 16 + 8 * (i >> 1); int xy = (mb_x * 2 + (i & 1) + (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1); // FIXME bidir int32_t *mv = (int32_t *) &motion_val[0][xy]; if (mv[0] != mv[dm] || mv[dm * mv_stride] != mv[dm * (mv_stride + 1)]) for (y = 0; y < 8; y++) pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80; if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)]) *(uint64_t *)(pict->data[0] + sx + (sy + 4) * pict->linesize[0]) ^= 0x8080808080808080ULL; } } if (IS_INTERLACED(mb_type) && avctx->codec->id == AV_CODEC_ID_H264) { // hmm } } if (mbskip_table) mbskip_table[mb_index] = 0; } } } }"} {"target": 1, "idx": 3690, "func": "static void alloc_picture(void *opaque) { VideoState *is = opaque; VideoPicture *vp; vp = &is->pictq[is->pictq_windex]; if (vp->bmp) SDL_FreeYUVOverlay(vp->bmp); #if CONFIG_AVFILTER if (vp->picref) avfilter_unref_buffer(vp->picref); vp->picref = NULL; vp->width = is->out_video_filter->inputs[0]->w; vp->height = is->out_video_filter->inputs[0]->h; vp->pix_fmt = is->out_video_filter->inputs[0]->format; #else vp->width = is->video_st->codec->width; vp->height = is->video_st->codec->height; vp->pix_fmt = is->video_st->codec->pix_fmt; #endif vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height, SDL_YV12_OVERLAY, screen); SDL_LockMutex(is->pictq_mutex); vp->allocated = 1; SDL_CondSignal(is->pictq_cond); SDL_UnlockMutex(is->pictq_mutex);"} {"target": 1, "idx": 3698, "func": "int css_create_css_image(uint8_t cssid, bool default_image) { trace_css_new_image(cssid, default_image ? \"(default)\" : \"\"); if (cssid > MAX_CSSID) { return -EINVAL; } if (channel_subsys.css[cssid]) { return -EBUSY; } channel_subsys.css[cssid] = g_malloc0(sizeof(CssImage)); if (default_image) { channel_subsys.default_cssid = cssid; } return 0; }"} {"target": 0, "idx": 3738, "func": "static int decode_nal_unit(HEVCContext *s, const HEVCNAL *nal) { HEVCLocalContext *lc = &s->HEVClc; GetBitContext *gb = &lc->gb; int ctb_addr_ts, ret; ret = init_get_bits8(gb, nal->data, nal->size); if (ret < 0) return ret; ret = hls_nal_unit(s); if (ret < 0) { av_log(s->avctx, AV_LOG_ERROR, \"Invalid NAL unit %d, skipping.\\n\", s->nal_unit_type); goto fail; } else if (!ret) return 0; switch (s->nal_unit_type) { case NAL_VPS: ret = ff_hevc_decode_nal_vps(gb, s->avctx, &s->ps); if (ret < 0) goto fail; break; case NAL_SPS: ret = ff_hevc_decode_nal_sps(gb, s->avctx, &s->ps, s->apply_defdispwin); if (ret < 0) goto fail; break; case NAL_PPS: ret = ff_hevc_decode_nal_pps(gb, s->avctx, &s->ps); if (ret < 0) goto fail; break; case NAL_SEI_PREFIX: case NAL_SEI_SUFFIX: ret = ff_hevc_decode_nal_sei(s); if (ret < 0) goto fail; break; case NAL_TRAIL_R: case NAL_TRAIL_N: case NAL_TSA_N: case NAL_TSA_R: case NAL_STSA_N: case NAL_STSA_R: case NAL_BLA_W_LP: case NAL_BLA_W_RADL: case NAL_BLA_N_LP: case NAL_IDR_W_RADL: case NAL_IDR_N_LP: case NAL_CRA_NUT: case NAL_RADL_N: case NAL_RADL_R: case NAL_RASL_N: case NAL_RASL_R: ret = hls_slice_header(s); if (ret < 0) return ret; if (s->max_ra == INT_MAX) { if (s->nal_unit_type == NAL_CRA_NUT || IS_BLA(s)) { s->max_ra = s->poc; } else { if (IS_IDR(s)) s->max_ra = INT_MIN; } } if ((s->nal_unit_type == NAL_RASL_R || s->nal_unit_type == NAL_RASL_N) && s->poc <= s->max_ra) { s->is_decoded = 0; break; } else { if (s->nal_unit_type == NAL_RASL_R && s->poc > s->max_ra) s->max_ra = INT_MIN; } if (s->sh.first_slice_in_pic_flag) { ret = hevc_frame_start(s); if (ret < 0) return ret; } else if (!s->ref) { av_log(s->avctx, AV_LOG_ERROR, \"First slice in a frame missing.\\n\"); goto fail; } if (s->nal_unit_type != s->first_nal_type) { av_log(s->avctx, AV_LOG_ERROR, \"Non-matching NAL types of the VCL NALUs: %d %d\\n\", s->first_nal_type, s->nal_unit_type); return AVERROR_INVALIDDATA; } if (!s->sh.dependent_slice_segment_flag && s->sh.slice_type != I_SLICE) { ret = ff_hevc_slice_rpl(s); if (ret < 0) { av_log(s->avctx, AV_LOG_WARNING, \"Error constructing the reference lists for the current slice.\\n\"); goto fail; } } if (s->sh.first_slice_in_pic_flag && s->avctx->hwaccel) { ret = s->avctx->hwaccel->start_frame(s->avctx, NULL, 0); if (ret < 0) goto fail; } if (s->avctx->hwaccel) { ret = s->avctx->hwaccel->decode_slice(s->avctx, nal->raw_data, nal->raw_size); if (ret < 0) goto fail; } else { ctb_addr_ts = hls_slice_data(s); if (ctb_addr_ts >= (s->ps.sps->ctb_width * s->ps.sps->ctb_height)) { s->is_decoded = 1; if ((s->ps.pps->transquant_bypass_enable_flag || (s->ps.sps->pcm.loop_filter_disable_flag && s->ps.sps->pcm_enabled_flag)) && s->ps.sps->sao_enabled) restore_tqb_pixels(s); } if (ctb_addr_ts < 0) { ret = ctb_addr_ts; goto fail; } } break; case NAL_EOS_NUT: case NAL_EOB_NUT: s->seq_decode = (s->seq_decode + 1) & 0xff; s->max_ra = INT_MAX; break; case NAL_AUD: case NAL_FD_NUT: break; default: av_log(s->avctx, AV_LOG_INFO, \"Skipping NAL unit %d\\n\", s->nal_unit_type); } return 0; fail: if (s->avctx->err_recognition & AV_EF_EXPLODE) return ret; return 0; }"} {"target": 0, "idx": 3747, "func": "static void pxa2xx_pic_save(QEMUFile *f, void *opaque) { PXA2xxPICState *s = (PXA2xxPICState *) opaque; int i; for (i = 0; i < 2; i ++) qemu_put_be32s(f, &s->int_enabled[i]); for (i = 0; i < 2; i ++) qemu_put_be32s(f, &s->int_pending[i]); for (i = 0; i < 2; i ++) qemu_put_be32s(f, &s->is_fiq[i]); qemu_put_be32s(f, &s->int_idle); for (i = 0; i < PXA2XX_PIC_SRCS; i ++) qemu_put_be32s(f, &s->priority[i]); }"} {"target": 0, "idx": 3757, "func": "static int nbd_co_send_request(BDRVNBDState *s, struct nbd_request *request, struct iovec *iov, int offset) { int rc, ret; qemu_co_mutex_lock(&s->send_mutex); s->send_coroutine = qemu_coroutine_self(); qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, nbd_restart_write, nbd_have_request, NULL, s); rc = nbd_send_request(s->sock, request); if (rc >= 0 && iov) { ret = qemu_co_sendv(s->sock, iov, request->len, offset); if (ret != request->len) { errno = -EIO; rc = -1; } } qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL, nbd_have_request, NULL, s); s->send_coroutine = NULL; qemu_co_mutex_unlock(&s->send_mutex); return rc; }"} {"target": 0, "idx": 3761, "func": "void replay_save_input_event(InputEvent *evt) { InputKeyEvent *key; InputBtnEvent *btn; InputMoveEvent *move; replay_put_dword(evt->type); switch (evt->type) { case INPUT_EVENT_KIND_KEY: key = evt->u.key; replay_put_dword(key->key->type); switch (key->key->type) { case KEY_VALUE_KIND_NUMBER: replay_put_qword(key->key->u.number); replay_put_byte(key->down); break; case KEY_VALUE_KIND_QCODE: replay_put_dword(key->key->u.qcode); replay_put_byte(key->down); break; case KEY_VALUE_KIND__MAX: /* keep gcc happy */ break; } break; case INPUT_EVENT_KIND_BTN: btn = evt->u.btn; replay_put_dword(btn->button); replay_put_byte(btn->down); break; case INPUT_EVENT_KIND_REL: move = evt->u.rel; replay_put_dword(move->axis); replay_put_qword(move->value); break; case INPUT_EVENT_KIND_ABS: move = evt->u.abs; replay_put_dword(move->axis); replay_put_qword(move->value); break; case INPUT_EVENT_KIND__MAX: /* keep gcc happy */ break; } }"} {"target": 0, "idx": 3771, "func": "static void internal_snapshot_prepare(BlkActionState *common, Error **errp) { Error *local_err = NULL; const char *device; const char *name; BlockBackend *blk; BlockDriverState *bs; QEMUSnapshotInfo old_sn, *sn; bool ret; qemu_timeval tv; BlockdevSnapshotInternal *internal; InternalSnapshotState *state; int ret1; g_assert(common->action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC); internal = common->action->u.blockdev_snapshot_internal_sync.data; state = DO_UPCAST(InternalSnapshotState, common, common); /* 1. parse input */ device = internal->device; name = internal->name; /* 2. check for validation */ if (action_check_completion_mode(common, errp) < 0) { return; } blk = blk_by_name(device); if (!blk) { error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, \"Device '%s' not found\", device); return; } /* AioContext is released in .clean() */ state->aio_context = blk_get_aio_context(blk); aio_context_acquire(state->aio_context); if (!blk_is_available(blk)) { error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device); return; } bs = blk_bs(blk); state->bs = bs; bdrv_drained_begin(bs); if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT, errp)) { return; } if (bdrv_is_read_only(bs)) { error_setg(errp, \"Device '%s' is read only\", device); return; } if (!bdrv_can_snapshot(bs)) { error_setg(errp, \"Block format '%s' used by device '%s' \" \"does not support internal snapshots\", bs->drv->format_name, device); return; } if (!strlen(name)) { error_setg(errp, \"Name is empty\"); return; } /* check whether a snapshot with name exist */ ret = bdrv_snapshot_find_by_id_and_name(bs, NULL, name, &old_sn, &local_err); if (local_err) { error_propagate(errp, local_err); return; } else if (ret) { error_setg(errp, \"Snapshot with name '%s' already exists on device '%s'\", name, device); return; } /* 3. take the snapshot */ sn = &state->sn; pstrcpy(sn->name, sizeof(sn->name), name); qemu_gettimeofday(&tv); sn->date_sec = tv.tv_sec; sn->date_nsec = tv.tv_usec * 1000; sn->vm_clock_nsec = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); ret1 = bdrv_snapshot_create(bs, sn); if (ret1 < 0) { error_setg_errno(errp, -ret1, \"Failed to create snapshot '%s' on device '%s'\", name, device); return; } /* 4. succeed, mark a snapshot is created */ state->created = true; }"} {"target": 0, "idx": 3774, "func": "void qdev_set_parent_bus(DeviceState *dev, BusState *bus) { Property *prop; if (qdev_hotplug) { assert(bus->allow_hotplug); } dev->parent_bus = bus; QTAILQ_INSERT_HEAD(&bus->children, dev, sibling); qdev_prop_set_defaults(dev, dev->parent_bus->info->props); for (prop = qdev_get_bus_info(dev)->props; prop && prop->name; prop++) { qdev_property_add_legacy(dev, prop, NULL); qdev_property_add_static(dev, prop, NULL); } }"} {"target": 0, "idx": 3786, "func": "int object_property_get_enum(Object *obj, const char *name, const char *typename, Error **errp) { Error *err = NULL; StringOutputVisitor *sov; Visitor *v; char *str; int ret; ObjectProperty *prop = object_property_find(obj, name, errp); EnumProperty *enumprop; if (prop == NULL) { return 0; } if (!g_str_equal(prop->type, typename)) { error_setg(errp, \"Property %s on %s is not '%s' enum type\", name, object_class_get_name( object_get_class(obj)), typename); return 0; } enumprop = prop->opaque; sov = string_output_visitor_new(false); object_property_get(obj, string_output_get_visitor(sov), name, &err); if (err) { error_propagate(errp, err); string_output_visitor_cleanup(sov); return 0; } str = string_output_get_string(sov); string_output_visitor_cleanup(sov); v = string_input_visitor_new(str); visit_type_enum(v, name, &ret, enumprop->strings, errp); g_free(str); visit_free(v); return ret; }"} {"target": 0, "idx": 3789, "func": "static uint64_t omap_sti_read(void *opaque, target_phys_addr_t addr, unsigned size) { struct omap_sti_s *s = (struct omap_sti_s *) opaque; if (size != 4) { return omap_badwidth_read32(opaque, addr); } switch (addr) { case 0x00: /* STI_REVISION */ return 0x10; case 0x10: /* STI_SYSCONFIG */ return s->sysconfig; case 0x14: /* STI_SYSSTATUS / STI_RX_STATUS / XTI_SYSSTATUS */ return 0x00; case 0x18: /* STI_IRQSTATUS */ return s->irqst; case 0x1c: /* STI_IRQSETEN / STI_IRQCLREN */ return s->irqen; case 0x24: /* STI_ER / STI_DR / XTI_TRACESELECT */ case 0x28: /* STI_RX_DR / XTI_RXDATA */ /* TODO */ return 0; case 0x2c: /* STI_CLK_CTRL / XTI_SCLKCRTL */ return s->clkcontrol; case 0x30: /* STI_SERIAL_CFG / XTI_SCONFIG */ return s->serial_config; } OMAP_BAD_REG(addr); return 0; }"} {"target": 0, "idx": 3799, "func": "static int pollfds_fill(GArray *pollfds, fd_set *rfds, fd_set *wfds, fd_set *xfds) { int nfds = -1; int i; for (i = 0; i < pollfds->len; i++) { GPollFD *pfd = &g_array_index(pollfds, GPollFD, i); int fd = pfd->fd; int events = pfd->events; if (events & G_IO_IN) { FD_SET(fd, rfds); nfds = MAX(nfds, fd); } if (events & G_IO_OUT) { FD_SET(fd, wfds); nfds = MAX(nfds, fd); } if (events & G_IO_PRI) { FD_SET(fd, xfds); nfds = MAX(nfds, fd); } } return nfds; }"} {"target": 0, "idx": 3800, "func": "void qemu_bh_schedule_idle(QEMUBH *bh) { if (bh->scheduled) return; bh->scheduled = 1; bh->idle = 1; }"} {"target": 0, "idx": 3814, "func": "void stl_phys(target_phys_addr_t addr, uint32_t val) { stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN); }"} {"target": 0, "idx": 3819, "func": "build_madt(GArray *table_data, GArray *linker, PCMachineState *pcms, AcpiCpuInfo *cpu) { int madt_start = table_data->len; AcpiMultipleApicTable *madt; AcpiMadtIoApic *io_apic; AcpiMadtIntsrcovr *intsrcovr; AcpiMadtLocalNmi *local_nmi; int i; madt = acpi_data_push(table_data, sizeof *madt); madt->local_apic_address = cpu_to_le32(APIC_DEFAULT_ADDRESS); madt->flags = cpu_to_le32(1); for (i = 0; i < pcms->apic_id_limit; i++) { AcpiMadtProcessorApic *apic = acpi_data_push(table_data, sizeof *apic); apic->type = ACPI_APIC_PROCESSOR; apic->length = sizeof(*apic); apic->processor_id = i; apic->local_apic_id = i; if (test_bit(i, cpu->found_cpus)) { apic->flags = cpu_to_le32(1); } else { apic->flags = cpu_to_le32(0); } } io_apic = acpi_data_push(table_data, sizeof *io_apic); io_apic->type = ACPI_APIC_IO; io_apic->length = sizeof(*io_apic); #define ACPI_BUILD_IOAPIC_ID 0x0 io_apic->io_apic_id = ACPI_BUILD_IOAPIC_ID; io_apic->address = cpu_to_le32(IO_APIC_DEFAULT_ADDRESS); io_apic->interrupt = cpu_to_le32(0); if (pcms->apic_xrupt_override) { intsrcovr = acpi_data_push(table_data, sizeof *intsrcovr); intsrcovr->type = ACPI_APIC_XRUPT_OVERRIDE; intsrcovr->length = sizeof(*intsrcovr); intsrcovr->source = 0; intsrcovr->gsi = cpu_to_le32(2); intsrcovr->flags = cpu_to_le16(0); /* conforms to bus specifications */ } for (i = 1; i < 16; i++) { #define ACPI_BUILD_PCI_IRQS ((1<<5) | (1<<9) | (1<<10) | (1<<11)) if (!(ACPI_BUILD_PCI_IRQS & (1 << i))) { /* No need for a INT source override structure. */ continue; } intsrcovr = acpi_data_push(table_data, sizeof *intsrcovr); intsrcovr->type = ACPI_APIC_XRUPT_OVERRIDE; intsrcovr->length = sizeof(*intsrcovr); intsrcovr->source = i; intsrcovr->gsi = cpu_to_le32(i); intsrcovr->flags = cpu_to_le16(0xd); /* active high, level triggered */ } local_nmi = acpi_data_push(table_data, sizeof *local_nmi); local_nmi->type = ACPI_APIC_LOCAL_NMI; local_nmi->length = sizeof(*local_nmi); local_nmi->processor_id = 0xff; /* all processors */ local_nmi->flags = cpu_to_le16(0); local_nmi->lint = 1; /* ACPI_LINT1 */ build_header(linker, table_data, (void *)(table_data->data + madt_start), \"APIC\", table_data->len - madt_start, 1, NULL, NULL); }"} {"target": 0, "idx": 3820, "func": "static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n, int16_t **dc_val_ptr, int *dir_ptr) { int a, b, c, wrap, pred, scale; int16_t *dc_val; static const uint16_t dcpred[32] = { -1, 1024, 512, 341, 256, 205, 171, 146, 128, 114, 102, 93, 85, 79, 73, 68, 64, 60, 57, 54, 51, 49, 47, 45, 43, 41, 39, 38, 37, 35, 34, 33 }; /* find prediction - wmv3_dc_scale always used here in fact */ if (n < 4) scale = s->y_dc_scale; else scale = s->c_dc_scale; wrap = s->block_wrap[n]; dc_val= s->dc_val[0] + s->block_index[n]; /* B A * C X */ c = dc_val[ - 1]; b = dc_val[ - 1 - wrap]; a = dc_val[ - wrap]; if (pq < 9 || !overlap) { /* Set outer values */ if (!s->mb_y && (n!=2 && n!=3)) b=a=dcpred[scale]; if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=dcpred[scale]; } else { /* Set outer values */ if (!s->mb_y && (n!=2 && n!=3)) b=a=0; if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=0; } if (abs(a - b) <= abs(b - c)) { pred = c; *dir_ptr = 1;//left } else { pred = a; *dir_ptr = 0;//top } /* update predictor */ *dc_val_ptr = &dc_val[0]; return pred; }"} {"target": 0, "idx": 3858, "func": "static void multiwrite_user_cb(MultiwriteCB *mcb) { int i; for (i = 0; i < mcb->num_callbacks; i++) { mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error); if (mcb->callbacks[i].free_qiov) { qemu_iovec_destroy(mcb->callbacks[i].free_qiov); } g_free(mcb->callbacks[i].free_qiov); } }"} {"target": 0, "idx": 3859, "func": "char *get_boot_devices_list(uint32_t *size) { FWBootEntry *i; uint32_t total = 0; char *list = NULL; QTAILQ_FOREACH(i, &fw_boot_order, link) { char *devpath = NULL, *bootpath; int len; if (i->dev) { devpath = qdev_get_fw_dev_path(i->dev); assert(devpath); } if (i->suffix && devpath) { bootpath = qemu_malloc(strlen(devpath) + strlen(i->suffix) + 1); sprintf(bootpath, \"%s%s\", devpath, i->suffix); qemu_free(devpath); } else if (devpath) { bootpath = devpath; } else { bootpath = strdup(i->suffix); assert(bootpath); } if (total) { list[total-1] = '\\n'; } len = strlen(bootpath) + 1; list = qemu_realloc(list, total + len); memcpy(&list[total], bootpath, len); total += len; qemu_free(bootpath); } *size = total; return list; }"} {"target": 1, "idx": 3873, "func": "void rgb24toyv12_c(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, int width, int height, int lumStride, int chromStride, int srcStride) { int y; const int chromWidth = width >> 1; for (y = 0; y < height; y += 2) { int i; for (i = 0; i < chromWidth; i++) { unsigned int b = src[6 * i + 0]; unsigned int g = src[6 * i + 1]; unsigned int r = src[6 * i + 2]; unsigned int Y = ((RY * r + GY * g + BY * b) >> RGB2YUV_SHIFT) + 16; unsigned int V = ((RV * r + GV * g + BV * b) >> RGB2YUV_SHIFT) + 128; unsigned int U = ((RU * r + GU * g + BU * b) >> RGB2YUV_SHIFT) + 128; udst[i] = U; vdst[i] = V; ydst[2 * i] = Y; b = src[6 * i + 3]; g = src[6 * i + 4]; r = src[6 * i + 5]; Y = ((RY * r + GY * g + BY * b) >> RGB2YUV_SHIFT) + 16; ydst[2 * i + 1] = Y; } ydst += lumStride; src += srcStride; if (y+1 == height) break; for (i = 0; i < chromWidth; i++) { unsigned int b = src[6 * i + 0]; unsigned int g = src[6 * i + 1]; unsigned int r = src[6 * i + 2]; unsigned int Y = ((RY * r + GY * g + BY * b) >> RGB2YUV_SHIFT) + 16; ydst[2 * i] = Y; b = src[6 * i + 3]; g = src[6 * i + 4]; r = src[6 * i + 5]; Y = ((RY * r + GY * g + BY * b) >> RGB2YUV_SHIFT) + 16; ydst[2 * i + 1] = Y; } udst += chromStride; vdst += chromStride; ydst += lumStride; src += srcStride; } }"} {"target": 1, "idx": 3907, "func": "static void framebuffer_update_request(VncState *vs, int incremental, int x_position, int y_position, int w, int h) { int i; const size_t width = surface_width(vs->vd->ds) / VNC_DIRTY_PIXELS_PER_BIT; const size_t height = surface_height(vs->vd->ds); if (y_position > height) { y_position = height; } if (y_position + h >= height) { h = height - y_position; } vs->need_update = 1; if (!incremental) { vs->force_update = 1; for (i = 0; i < h; i++) { bitmap_set(vs->dirty[y_position + i], 0, width); bitmap_clear(vs->dirty[y_position + i], width, VNC_DIRTY_BITS - width); } } }"} {"target": 1, "idx": 3909, "func": "static void sdp_parse_fmtp_config(AVCodecContext *codec, char *attr, char *value) { switch (codec->codec_id) { case CODEC_ID_MPEG4: case CODEC_ID_AAC: if (!strcmp(attr, \"config\")) { /* decode the hexa encoded parameter */ int len = hex_to_data(NULL, value); codec->extradata = av_mallocz(len + FF_INPUT_BUFFER_PADDING_SIZE); if (!codec->extradata) return; codec->extradata_size = len; hex_to_data(codec->extradata, value); } break; default: break; } return; }"} {"target": 1, "idx": 3917, "func": "static int tpm_passthrough_unix_write(int fd, const uint8_t *buf, uint32_t len) { int ret, remain; remain = len; while (len > 0) { ret = write(fd, buf, remain); if (ret < 0) { if (errno != EINTR && errno != EAGAIN) { return -1; } } else if (ret == 0) { break; } else { buf += ret; remain -= ret; } } return len - remain; }"} {"target": 0, "idx": 3921, "func": "static int mp3_header_decompress(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe){ uint32_t header; int sample_rate= avctx->sample_rate; int sample_rate_index=0; int lsf, mpeg25, bitrate_index, frame_size; header = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]; if(ff_mpa_check_header(header) >= 0){ *poutbuf= (uint8_t *) buf; *poutbuf_size= buf_size; return 0; } header= 0xFFE00000 | ((4-3)<<17) | (1<<16); //FIXME simplify lsf = sample_rate < (24000+32000)/2; mpeg25 = sample_rate < (12000+16000)/2; header |= (!mpeg25)<<20; header |= (!lsf )<<19; if(sample_rate<<(lsf+mpeg25) < (44100+32000)/2) sample_rate_index |= 2; else if(sample_rate<<(lsf+mpeg25) > (44100+48000)/2) sample_rate_index |= 1; header |= sample_rate_index<<10; sample_rate= mpa_freq_tab[sample_rate_index] >> (lsf + mpeg25); //in case sample rate is a little off for(bitrate_index=2; bitrate_index<30; bitrate_index++){ frame_size = mpa_bitrate_tab[lsf][2][bitrate_index>>1]; frame_size = (frame_size * 144000) / (sample_rate << lsf) + (bitrate_index&1); if(frame_size == buf_size + 4) break; } if(bitrate_index == 30){ av_log(avctx, AV_LOG_ERROR, \"couldnt find bitrate_index\\n\"); return -1; } header |= (bitrate_index&1)<<9; header |= (bitrate_index>>1)<<12; header |= (avctx->channels==1 ? MPA_MONO : MPA_JSTEREO)<<6; *poutbuf_size= buf_size + 4; *poutbuf= av_malloc(buf_size + 4 + FF_INPUT_BUFFER_PADDING_SIZE); memcpy(*poutbuf + 4, buf, buf_size + FF_INPUT_BUFFER_PADDING_SIZE); if(avctx->channels==2){ if(lsf){ FFSWAP(int, (*poutbuf)[5], (*poutbuf)[6]); header |= ((*poutbuf)[5] & 0xC0)>>2; }else{ header |= (*poutbuf)[5] & 0x30; } } (*poutbuf)[0]= header>>24; (*poutbuf)[1]= header>>16; (*poutbuf)[2]= header>> 8; (*poutbuf)[3]= header ; return 1; }"} {"target": 1, "idx": 3955, "func": "static struct iovec *lock_iovec(int type, abi_ulong target_addr, int count, int copy) { struct target_iovec *target_vec; struct iovec *vec; abi_ulong total_len, max_len; int i; int err = 0; bool bad_address = false; if (count == 0) { errno = 0; return NULL; if (count < 0 || count > IOV_MAX) { errno = EINVAL; return NULL; vec = calloc(count, sizeof(struct iovec)); if (vec == NULL) { errno = ENOMEM; return NULL; target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1); if (target_vec == NULL) { err = EFAULT; goto fail2; /* ??? If host page size > target page size, this will result in a value larger than what we can actually support. */ max_len = 0x7fffffff & TARGET_PAGE_MASK; total_len = 0; for (i = 0; i < count; i++) { abi_ulong base = tswapal(target_vec[i].iov_base); abi_long len = tswapal(target_vec[i].iov_len); if (len < 0) { err = EINVAL; goto fail; } else if (len == 0) { /* Zero length pointer is ignored. */ vec[i].iov_base = 0; } else { vec[i].iov_base = lock_user(type, base, len, copy); /* If the first buffer pointer is bad, this is a fault. But * subsequent bad buffers will result in a partial write; this * is realized by filling the vector with null pointers and * zero lengths. */ if (!vec[i].iov_base) { if (i == 0) { err = EFAULT; goto fail; } else { bad_address = true; if (bad_address) { len = 0; if (len > max_len - total_len) { len = max_len - total_len; vec[i].iov_len = len; total_len += len; unlock_user(target_vec, target_addr, 0); return vec; fail: unlock_user(target_vec, target_addr, 0); fail2: free(vec); errno = err; return NULL;"} {"target": 1, "idx": 3968, "func": "static void cris_cpu_initfn(Object *obj) { CPUState *cs = CPU(obj); CRISCPU *cpu = CRIS_CPU(obj); CRISCPUClass *ccc = CRIS_CPU_GET_CLASS(obj); CPUCRISState *env = &cpu->env; static bool tcg_initialized; cs->env_ptr = env; cpu_exec_init(cs, &error_abort); env->pregs[PR_VR] = ccc->vr; #ifndef CONFIG_USER_ONLY /* IRQ and NMI lines. */ qdev_init_gpio_in(DEVICE(cpu), cris_cpu_set_irq, 2); #endif if (tcg_enabled() && !tcg_initialized) { tcg_initialized = true; if (env->pregs[PR_VR] < 32) { cris_initialize_crisv10_tcg(); } else { cris_initialize_tcg(); } } }"} {"target": 0, "idx": 3969, "func": "static int get_cod(Jpeg2000DecoderContext *s, Jpeg2000CodingStyle *c, uint8_t *properties) { Jpeg2000CodingStyle tmp; int compno; if (s->buf_end - s->buf < 5) return AVERROR(EINVAL); tmp.log2_prec_width = tmp.log2_prec_height = 15; tmp.csty = bytestream_get_byte(&s->buf); // get progression order tmp.prog_order = bytestream_get_byte(&s->buf); tmp.nlayers = bytestream_get_be16(&s->buf); tmp.mct = bytestream_get_byte(&s->buf); // multiple component transformation get_cox(s, &tmp); for (compno = 0; compno < s->ncomponents; compno++) if (!(properties[compno] & HAD_COC)) memcpy(c + compno, &tmp, sizeof(tmp)); return 0; }"} {"target": 1, "idx": 3974, "func": "static int32_t scalarproduct_and_madd_int16_c(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul) { int res = 0; while (order--) { res += *v1 * *v2++; *v1++ += mul * *v3++; } return res; }"} {"target": 1, "idx": 3992, "func": "void exec_start_incoming_migration(const char *command, Error **errp) { QEMUFile *f; DPRINTF(\"Attempting to start an incoming migration\\n\"); f = qemu_popen_cmd(command, \"r\"); if(f == NULL) { error_setg_errno(errp, errno, \"failed to popen the migration source\"); return; } qemu_set_fd_handler2(qemu_get_fd(f), NULL, exec_accept_incoming_migration, NULL, f); }"} {"target": 1, "idx": 3994, "func": "static void io_mem_init(void) { cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL); cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL); cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL); io_mem_nb = 5; /* alloc dirty bits array */ phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS); memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS); }"} {"target": 0, "idx": 4026, "func": "static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap) { AVIContext *avi = s->priv_data; ByteIOContext *pb = &s->pb; uint32_t tag, tag1, handler; int codec_type, stream_index, frame_period, bit_rate; unsigned int size, nb_frames; int i, n; AVStream *st; AVIStream *ast = NULL; int xan_video = 0; /* hack to support Xan A/V */ char str_track[4]; avi->stream_index= -1; if (get_riff(avi, pb) < 0) return -1; /* first list tag */ stream_index = -1; codec_type = -1; frame_period = 0; for(;;) { if (url_feof(pb)) goto fail; tag = get_le32(pb); size = get_le32(pb); #ifdef DEBUG print_tag(\"tag\", tag, size); #endif switch(tag) { case MKTAG('L', 'I', 'S', 'T'): /* ignored, except when start of video packets */ tag1 = get_le32(pb); #ifdef DEBUG print_tag(\"list\", tag1, 0); #endif if (tag1 == MKTAG('m', 'o', 'v', 'i')) { avi->movi_list = url_ftell(pb) - 4; if(size) avi->movi_end = avi->movi_list + size + (size & 1); else avi->movi_end = url_fsize(pb); #ifdef DEBUG printf(\"movi end=%\"PRIx64\"\\n\", avi->movi_end); #endif goto end_of_header; } break; case MKTAG('d', 'm', 'l', 'h'): avi->is_odml = 1; url_fskip(pb, size + (size & 1)); break; case MKTAG('a', 'v', 'i', 'h'): /* avi header */ /* using frame_period is bad idea */ frame_period = get_le32(pb); bit_rate = get_le32(pb) * 8; get_le32(pb); avi->non_interleaved |= get_le32(pb) & AVIF_MUSTUSEINDEX; url_fskip(pb, 2 * 4); n = get_le32(pb); for(i=0;ipriv_data = ast; } url_fskip(pb, size - 7 * 4); break; case MKTAG('s', 't', 'r', 'h'): /* stream header */ stream_index++; tag1 = get_le32(pb); handler = get_le32(pb); /* codec tag */ #ifdef DEBUG print_tag(\"strh\", tag1, -1); #endif if(tag1 == MKTAG('i', 'a', 'v', 's') || tag1 == MKTAG('i', 'v', 'a', 's')){ /* * After some consideration -- I don't think we * have to support anything but DV in a type1 AVIs. */ if (s->nb_streams != 1) goto fail; if (handler != MKTAG('d', 'v', 's', 'd') && handler != MKTAG('d', 'v', 'h', 'd') && handler != MKTAG('d', 'v', 's', 'l')) goto fail; ast = s->streams[0]->priv_data; av_freep(&s->streams[0]->codec->extradata); av_freep(&s->streams[0]); s->nb_streams = 0; if (ENABLE_DV_DEMUXER) { avi->dv_demux = dv_init_demux(s); if (!avi->dv_demux) goto fail; } s->streams[0]->priv_data = ast; url_fskip(pb, 3 * 4); ast->scale = get_le32(pb); ast->rate = get_le32(pb); stream_index = s->nb_streams - 1; url_fskip(pb, size - 7*4); break; } if (stream_index >= s->nb_streams) { url_fskip(pb, size - 8); /* ignore padding stream */ if (tag1 == MKTAG('p', 'a', 'd', 's')) stream_index--; break; } st = s->streams[stream_index]; ast = st->priv_data; st->codec->stream_codec_tag= handler; get_le32(pb); /* flags */ get_le16(pb); /* priority */ get_le16(pb); /* language */ get_le32(pb); /* initial frame */ ast->scale = get_le32(pb); ast->rate = get_le32(pb); if(ast->scale && ast->rate){ }else if(frame_period){ ast->rate = 1000000; ast->scale = frame_period; }else{ ast->rate = 25; ast->scale = 1; } av_set_pts_info(st, 64, ast->scale, ast->rate); ast->cum_len=get_le32(pb); /* start */ nb_frames = get_le32(pb); st->start_time = 0; st->duration = nb_frames; get_le32(pb); /* buffer size */ get_le32(pb); /* quality */ ast->sample_size = get_le32(pb); /* sample ssize */ ast->cum_len *= FFMAX(1, ast->sample_size); // av_log(NULL, AV_LOG_DEBUG, \"%d %d %d %d\\n\", ast->rate, ast->scale, ast->start, ast->sample_size); switch(tag1) { case MKTAG('v', 'i', 'd', 's'): codec_type = CODEC_TYPE_VIDEO; ast->sample_size = 0; break; case MKTAG('a', 'u', 'd', 's'): codec_type = CODEC_TYPE_AUDIO; break; case MKTAG('t', 'x', 't', 's'): //FIXME codec_type = CODEC_TYPE_DATA; //CODEC_TYPE_SUB ? FIXME break; case MKTAG('p', 'a', 'd', 's'): codec_type = CODEC_TYPE_UNKNOWN; stream_index--; break; default: av_log(s, AV_LOG_ERROR, \"unknown stream type %X\\n\", tag1); goto fail; } ast->frame_offset= ast->cum_len; url_fskip(pb, size - 12 * 4); break; case MKTAG('s', 't', 'r', 'f'): /* stream header */ if (stream_index >= s->nb_streams || avi->dv_demux) { url_fskip(pb, size); } else { st = s->streams[stream_index]; switch(codec_type) { case CODEC_TYPE_VIDEO: get_le32(pb); /* size */ st->codec->width = get_le32(pb); st->codec->height = get_le32(pb); get_le16(pb); /* panes */ st->codec->bits_per_sample= get_le16(pb); /* depth */ tag1 = get_le32(pb); get_le32(pb); /* ImageSize */ get_le32(pb); /* XPelsPerMeter */ get_le32(pb); /* YPelsPerMeter */ get_le32(pb); /* ClrUsed */ get_le32(pb); /* ClrImportant */ if(size > 10*4 && size<(1<<30)){ st->codec->extradata_size= size - 10*4; st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); get_buffer(pb, st->codec->extradata, st->codec->extradata_size); } if(st->codec->extradata_size & 1) //FIXME check if the encoder really did this correctly get_byte(pb); /* Extract palette from extradata if bpp <= 8 */ /* This code assumes that extradata contains only palette */ /* This is true for all paletted codecs implemented in ffmpeg */ if (st->codec->extradata_size && (st->codec->bits_per_sample <= 8)) { st->codec->palctrl = av_mallocz(sizeof(AVPaletteControl)); #ifdef WORDS_BIGENDIAN for (i = 0; i < FFMIN(st->codec->extradata_size, AVPALETTE_SIZE)/4; i++) st->codec->palctrl->palette[i] = bswap_32(((uint32_t*)st->codec->extradata)[i]); #else memcpy(st->codec->palctrl->palette, st->codec->extradata, FFMIN(st->codec->extradata_size, AVPALETTE_SIZE)); #endif st->codec->palctrl->palette_changed = 1; } #ifdef DEBUG print_tag(\"video\", tag1, 0); #endif st->codec->codec_type = CODEC_TYPE_VIDEO; st->codec->codec_tag = tag1; st->codec->codec_id = codec_get_id(codec_bmp_tags, tag1); if (st->codec->codec_id == CODEC_ID_XAN_WC4) xan_video = 1; st->need_parsing = 2; //only parse headers dont do slower repacketization, this is needed to get the pict type which is needed for generating correct pts // url_fskip(pb, size - 5 * 4); break; case CODEC_TYPE_AUDIO: get_wav_header(pb, st->codec, size); if(ast->sample_size && st->codec->block_align && ast->sample_size % st->codec->block_align) av_log(s, AV_LOG_DEBUG, \"invalid sample size or block align detected\\n\"); if (size%2) /* 2-aligned (fix for Stargate SG-1 - 3x18 - Shades of Grey.avi) */ url_fskip(pb, 1); /* special case time: To support Xan DPCM, hardcode * the format if Xxan is the video codec */ st->need_parsing = 1; /* ADTS header is in extradata, AAC without header must be stored as exact frames, parser not needed and it will fail */ if (st->codec->codec_id == CODEC_ID_AAC && st->codec->extradata_size) st->need_parsing = 0; /* force parsing as several audio frames can be in one packet */ if (xan_video) st->codec->codec_id = CODEC_ID_XAN_DPCM; break; default: st->codec->codec_type = CODEC_TYPE_DATA; st->codec->codec_id= CODEC_ID_NONE; st->codec->codec_tag= 0; url_fskip(pb, size); break; } } break; case MKTAG('i', 'n', 'd', 'x'): i= url_ftell(pb); if(!url_is_streamed(pb) && !(s->flags & AVFMT_FLAG_IGNIDX)){ read_braindead_odml_indx(s, 0); } url_fseek(pb, i+size, SEEK_SET); break; case MKTAG('I', 'N', 'A', 'M'): avi_read_tag(pb, s->title, sizeof(s->title), size); break; case MKTAG('I', 'A', 'R', 'T'): avi_read_tag(pb, s->author, sizeof(s->author), size); break; case MKTAG('I', 'C', 'O', 'P'): avi_read_tag(pb, s->copyright, sizeof(s->copyright), size); break; case MKTAG('I', 'C', 'M', 'T'): avi_read_tag(pb, s->comment, sizeof(s->comment), size); break; case MKTAG('I', 'G', 'N', 'R'): avi_read_tag(pb, s->genre, sizeof(s->genre), size); break; case MKTAG('I', 'P', 'R', 'D'): avi_read_tag(pb, s->album, sizeof(s->album), size); break; case MKTAG('I', 'P', 'R', 'T'): avi_read_tag(pb, str_track, sizeof(str_track), size); sscanf(str_track, \"%d\", &s->track); break; default: /* skip tag */ size += (size & 1); url_fskip(pb, size); break; } } end_of_header: /* check stream number */ if (stream_index != s->nb_streams - 1) { fail: for(i=0;inb_streams;i++) { av_freep(&s->streams[i]->codec->extradata); av_freep(&s->streams[i]); } return -1; } if(!avi->index_loaded && !url_is_streamed(pb)) avi_load_index(s); avi->index_loaded = 1; avi->non_interleaved |= guess_ni_flag(s); if(avi->non_interleaved) clean_index(s); return 0; }"} {"target": 1, "idx": 4030, "func": "static int ffserver_parse_config_feed(FFServerConfig *config, const char *cmd, const char **p, FFServerStream **pfeed) { FFServerStream *feed; char arg[1024]; av_assert0(pfeed); feed = *pfeed; if (!av_strcasecmp(cmd, \"filename, sizeof(feed->filename), p); q = strrchr(feed->filename, '>'); if (*q) *q = '\\0'; for (s = config->first_feed; s; s = s->next) { if (!strcmp(feed->filename, s->filename)) ERROR(\"Feed '%s' already registered\\n\", s->filename); } feed->fmt = av_guess_format(\"ffm\", NULL, NULL); /* default feed file */ snprintf(feed->feed_filename, sizeof(feed->feed_filename), \"/tmp/%s.ffm\", feed->filename); feed->feed_max_size = 5 * 1024 * 1024; feed->is_feed = 1; feed->feed = feed; /* self feeding :-) */ *pfeed = feed; return 0; } av_assert0(feed); if (!av_strcasecmp(cmd, \"Launch\")) { int i; feed->child_argv = av_mallocz(64 * sizeof(char *)); if (!feed->child_argv) return AVERROR(ENOMEM); for (i = 0; i < 62; i++) { ffserver_get_arg(arg, sizeof(arg), p); if (!arg[0]) break; feed->child_argv[i] = av_strdup(arg); if (!feed->child_argv[i]) return AVERROR(ENOMEM); } feed->child_argv[i] = av_asprintf(\"http://%s:%d/%s\", (config->http_addr.sin_addr.s_addr == INADDR_ANY) ? \"127.0.0.1\" : inet_ntoa(config->http_addr.sin_addr), ntohs(config->http_addr.sin_port), feed->filename); if (!feed->child_argv[i]) return AVERROR(ENOMEM); } else if (!av_strcasecmp(cmd, \"ACL\")) { ffserver_parse_acl_row(NULL, feed, NULL, *p, config->filename, config->line_num); } else if (!av_strcasecmp(cmd, \"File\") || !av_strcasecmp(cmd, \"ReadOnlyFile\")) { ffserver_get_arg(feed->feed_filename, sizeof(feed->feed_filename), p); feed->readonly = !av_strcasecmp(cmd, \"ReadOnlyFile\"); } else if (!av_strcasecmp(cmd, \"Truncate\")) { ffserver_get_arg(arg, sizeof(arg), p); /* assume Truncate is true in case no argument is specified */ if (!arg[0]) { feed->truncate = 1; } else { WARNING(\"Truncate N syntax in configuration file is deprecated, \" \"use Truncate alone with no arguments\\n\"); feed->truncate = strtod(arg, NULL); } } else if (!av_strcasecmp(cmd, \"FileMaxSize\")) { char *p1; double fsize; ffserver_get_arg(arg, sizeof(arg), p); p1 = arg; fsize = strtod(p1, &p1); switch(av_toupper(*p1)) { case 'K': fsize *= 1024; break; case 'M': fsize *= 1024 * 1024; break; case 'G': fsize *= 1024 * 1024 * 1024; break; default: ERROR(\"Invalid file size: %s\\n\", arg); break; } feed->feed_max_size = (int64_t)fsize; if (feed->feed_max_size < FFM_PACKET_SIZE*4) ERROR(\"Feed max file size is too small, must be at least %d\\n\", FFM_PACKET_SIZE*4); } else if (!av_strcasecmp(cmd, \"\")) { *pfeed = NULL; } else { ERROR(\"Invalid entry '%s' inside \\n\", cmd); } return 0; }"} {"target": 1, "idx": 4034, "func": "static void build_feed_streams(void) { FFServerStream *stream, *feed; int i; /* gather all streams */ for(stream = config.first_stream; stream; stream = stream->next) { feed = stream->feed; if (feed) { if (stream->is_feed) { for(i=0;inb_streams;i++) stream->feed_streams[i] = i; } else { /* we handle a stream coming from a feed */ for(i=0;inb_streams;i++) stream->feed_streams[i] = add_av_stream(feed, stream->streams[i]); } } } /* create feed files if needed */ for(feed = config.first_feed; feed; feed = feed->next_feed) { int fd; if (avio_check(feed->feed_filename, AVIO_FLAG_READ) > 0) { /* See if it matches */ AVFormatContext *s = NULL; int matches = 0; if (avformat_open_input(&s, feed->feed_filename, NULL, NULL) >= 0) { /* set buffer size */ ffio_set_buf_size(s->pb, FFM_PACKET_SIZE); /* Now see if it matches */ if (s->nb_streams == feed->nb_streams) { matches = 1; for(i=0;inb_streams;i++) { AVStream *sf, *ss; sf = feed->streams[i]; ss = s->streams[i]; if (sf->index != ss->index || sf->id != ss->id) { http_log(\"Index & Id do not match for stream %d (%s)\\n\", i, feed->feed_filename); matches = 0; } else { AVCodecContext *ccf, *ccs; ccf = sf->codec; ccs = ss->codec; #define CHECK_CODEC(x) (ccf->x != ccs->x) if (CHECK_CODEC(codec_id) || CHECK_CODEC(codec_type)) { http_log(\"Codecs do not match for stream %d\\n\", i); matches = 0; } else if (CHECK_CODEC(bit_rate) || CHECK_CODEC(flags)) { http_log(\"Codec bitrates do not match for stream %d\\n\", i); matches = 0; } else if (ccf->codec_type == AVMEDIA_TYPE_VIDEO) { if (CHECK_CODEC(time_base.den) || CHECK_CODEC(time_base.num) || CHECK_CODEC(width) || CHECK_CODEC(height)) { http_log(\"Codec width, height and framerate do not match for stream %d\\n\", i); matches = 0; } } else if (ccf->codec_type == AVMEDIA_TYPE_AUDIO) { if (CHECK_CODEC(sample_rate) || CHECK_CODEC(channels) || CHECK_CODEC(frame_size)) { http_log(\"Codec sample_rate, channels, frame_size do not match for stream %d\\n\", i); matches = 0; } } else { http_log(\"Unknown codec type\\n\"); matches = 0; } } if (!matches) break; } } else http_log(\"Deleting feed file '%s' as stream counts differ (%d != %d)\\n\", feed->feed_filename, s->nb_streams, feed->nb_streams); avformat_close_input(&s); } else http_log(\"Deleting feed file '%s' as it appears to be corrupt\\n\", feed->feed_filename); if (!matches) { if (feed->readonly) { http_log(\"Unable to delete feed file '%s' as it is marked readonly\\n\", feed->feed_filename); exit(1); } unlink(feed->feed_filename); } } if (avio_check(feed->feed_filename, AVIO_FLAG_WRITE) <= 0) { AVFormatContext *s = avformat_alloc_context(); if (!s) { http_log(\"Failed to allocate context\\n\"); exit(1); } if (feed->readonly) { http_log(\"Unable to create feed file '%s' as it is marked readonly\\n\", feed->feed_filename); exit(1); } /* only write the header of the ffm file */ if (avio_open(&s->pb, feed->feed_filename, AVIO_FLAG_WRITE) < 0) { http_log(\"Could not open output feed file '%s'\\n\", feed->feed_filename); exit(1); } s->oformat = feed->fmt; s->nb_streams = feed->nb_streams; s->streams = feed->streams; if (avformat_write_header(s, NULL) < 0) { http_log(\"Container doesn't support the required parameters\\n\"); exit(1); } /* XXX: need better API */ av_freep(&s->priv_data); avio_closep(&s->pb); s->streams = NULL; s->nb_streams = 0; avformat_free_context(s); } /* get feed size and write index */ fd = open(feed->feed_filename, O_RDONLY); if (fd < 0) { http_log(\"Could not open output feed file '%s'\\n\", feed->feed_filename); exit(1); } feed->feed_write_index = FFMAX(ffm_read_write_index(fd), FFM_PACKET_SIZE); feed->feed_size = lseek(fd, 0, SEEK_END); /* ensure that we do not wrap before the end of file */ if (feed->feed_max_size && feed->feed_max_size < feed->feed_size) feed->feed_max_size = feed->feed_size; close(fd); } }"} {"target": 1, "idx": 4061, "func": "static void virtio_balloon_device_realize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIOBalloon *s = VIRTIO_BALLOON(dev); int ret; virtio_init(vdev, \"virtio-balloon\", VIRTIO_ID_BALLOON, sizeof(struct virtio_balloon_config)); ret = qemu_add_balloon_handler(virtio_balloon_to_target, virtio_balloon_stat, s); if (ret < 0) { error_setg(errp, \"Adding balloon handler failed\"); virtio_cleanup(vdev); return; } s->ivq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output); s->dvq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output); s->svq = virtio_add_queue(vdev, 128, virtio_balloon_receive_stats); reset_stats(s); register_savevm(dev, \"virtio-balloon\", -1, 1, virtio_balloon_save, virtio_balloon_load, s); object_property_add(OBJECT(dev), \"guest-stats\", \"guest statistics\", balloon_stats_get_all, NULL, NULL, s, NULL); object_property_add(OBJECT(dev), \"guest-stats-polling-interval\", \"int\", balloon_stats_get_poll_interval, balloon_stats_set_poll_interval, NULL, s, NULL); }"} {"target": 1, "idx": 4071, "func": "static inline void RENAME(LEToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, int width, uint32_t *unused) { #if COMPILE_TEMPLATE_MMX __asm__ volatile( \"mov %0, %%\"REG_a\" \\n\\t\" \"1: \\n\\t\" \"movq (%1, %%\"REG_a\",2), %%mm0 \\n\\t\" \"movq 8(%1, %%\"REG_a\",2), %%mm1 \\n\\t\" \"movq (%2, %%\"REG_a\",2), %%mm2 \\n\\t\" \"movq 8(%2, %%\"REG_a\",2), %%mm3 \\n\\t\" \"psrlw $8, %%mm0 \\n\\t\" \"psrlw $8, %%mm1 \\n\\t\" \"psrlw $8, %%mm2 \\n\\t\" \"psrlw $8, %%mm3 \\n\\t\" \"packuswb %%mm1, %%mm0 \\n\\t\" \"packuswb %%mm3, %%mm2 \\n\\t\" \"movq %%mm0, (%3, %%\"REG_a\") \\n\\t\" \"movq %%mm2, (%4, %%\"REG_a\") \\n\\t\" \"add $8, %%\"REG_a\" \\n\\t\" \" js 1b \\n\\t\" : : \"g\" ((x86_reg)-width), \"r\" (src1+width*2), \"r\" (src2+width*2), \"r\" (dstU+width), \"r\" (dstV+width) : \"%\"REG_a ); #else int i; for (i=0; iopaque; QCowSnapshot *snapshots1, sn1, *sn = &sn1; int i, ret; uint64_t *l1_table = NULL; memset(sn, 0, sizeof(*sn)); if (sn_info->id_str[0] == '\\0') { /* compute a new id */ find_new_snapshot_id(bs, sn_info->id_str, sizeof(sn_info->id_str)); } /* check that the ID is unique */ if (find_snapshot_by_id(bs, sn_info->id_str) >= 0) return -ENOENT; sn->id_str = qemu_strdup(sn_info->id_str); if (!sn->id_str) goto fail; sn->name = qemu_strdup(sn_info->name); if (!sn->name) goto fail; sn->vm_state_size = sn_info->vm_state_size; sn->date_sec = sn_info->date_sec; sn->date_nsec = sn_info->date_nsec; sn->vm_clock_nsec = sn_info->vm_clock_nsec; ret = update_snapshot_refcount(bs, s->l1_table_offset, s->l1_size, 1); if (ret < 0) goto fail; /* create the L1 table of the snapshot */ sn->l1_table_offset = alloc_clusters(bs, s->l1_size * sizeof(uint64_t)); sn->l1_size = s->l1_size; l1_table = qemu_malloc(s->l1_size * sizeof(uint64_t)); if (!l1_table) goto fail; for(i = 0; i < s->l1_size; i++) { l1_table[i] = cpu_to_be64(s->l1_table[i]); } if (bdrv_pwrite(s->hd, sn->l1_table_offset, l1_table, s->l1_size * sizeof(uint64_t)) != (s->l1_size * sizeof(uint64_t))) goto fail; qemu_free(l1_table); l1_table = NULL; snapshots1 = qemu_malloc((s->nb_snapshots + 1) * sizeof(QCowSnapshot)); if (!snapshots1) goto fail; memcpy(snapshots1, s->snapshots, s->nb_snapshots * sizeof(QCowSnapshot)); s->snapshots = snapshots1; s->snapshots[s->nb_snapshots++] = *sn; if (qcow_write_snapshots(bs) < 0) goto fail; #ifdef DEBUG_ALLOC check_refcounts(bs); #endif return 0; fail: qemu_free(sn->name); qemu_free(l1_table); return -1; }"} {"target": 1, "idx": 4076, "func": "static int resize_peers(IVShmemState *s, int new_min_size) { int j, old_size; /* limit number of max peers */ if (new_min_size <= 0 || new_min_size > IVSHMEM_MAX_PEERS) { return -1; } if (new_min_size <= s->nb_peers) { return 0; } old_size = s->nb_peers; s->nb_peers = new_min_size; IVSHMEM_DPRINTF(\"bumping storage to %d peers\\n\", s->nb_peers); s->peers = g_realloc(s->peers, s->nb_peers * sizeof(Peer)); for (j = old_size; j < s->nb_peers; j++) { s->peers[j].eventfds = g_new0(EventNotifier, s->vectors); s->peers[j].nb_eventfds = 0; } return 0; }"} {"target": 1, "idx": 4081, "func": "static int s390_virtio_blk_init(VirtIOS390Device *s390_dev) { VirtIOBlkS390 *dev = VIRTIO_BLK_S390(s390_dev); DeviceState *vdev = DEVICE(&dev->vdev); virtio_blk_set_conf(vdev, &(dev->blk)); qdev_set_parent_bus(vdev, BUS(&s390_dev->bus)); if (qdev_init(vdev) < 0) { return -1; } return s390_virtio_device_init(s390_dev, VIRTIO_DEVICE(vdev)); }"} {"target": 1, "idx": 4090, "func": "static void aarch64_cpu_class_init(ObjectClass *oc, void *data) { CPUClass *cc = CPU_CLASS(oc); cc->dump_state = aarch64_cpu_dump_state; cc->set_pc = aarch64_cpu_set_pc; cc->gdb_read_register = aarch64_cpu_gdb_read_register; cc->gdb_write_register = aarch64_cpu_gdb_write_register; cc->gdb_num_core_regs = 34; cc->gdb_core_xml_file = \"aarch64-core.xml\"; }"} {"target": 0, "idx": 4102, "func": "void ppc_hw_interrupt(CPUPPCState *env) { PowerPCCPU *cpu = ppc_env_get_cpu(env); int hdice; #if 0 CPUState *cs = CPU(cpu); qemu_log_mask(CPU_LOG_INT, \"%s: %p pending %08x req %08x me %d ee %d\\n\", __func__, env, env->pending_interrupts, cs->interrupt_request, (int)msr_me, (int)msr_ee); #endif /* External reset */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET); powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); return; } /* Machine check exception */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK); powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_MCHECK); return; } #if 0 /* TODO */ /* External debug exception */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG); powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DEBUG); return; } #endif if (0) { /* XXX: find a suitable condition to enable the hypervisor mode */ hdice = env->spr[SPR_LPCR] & 1; } else { hdice = 0; } if ((msr_ee != 0 || msr_hv == 0 || msr_pr != 0) && hdice != 0) { /* Hypervisor decrementer exception */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR); return; } } if (msr_ce != 0) { /* External critical interrupt */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) { /* Taking a critical external interrupt does not clear the external * critical interrupt status */ #if 0 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CEXT); #endif powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_CRITICAL); return; } } if (msr_ee != 0) { /* Watchdog timer on embedded PowerPC */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT); powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_WDT); return; } if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL); powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORCI); return; } /* Fixed interval timer on embedded PowerPC */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT); powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_FIT); return; } /* Programmable interval timer on embedded PowerPC */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT); powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PIT); return; } /* Decrementer exception */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR); powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR); return; } /* External interrupt */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) { /* Taking an external interrupt does not clear the external * interrupt status */ #if 0 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_EXT); #endif powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL); return; } if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL); powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI); return; } if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM); powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM); return; } /* Thermal interrupt */ if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) { env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM); powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_THERM); return; } } }"} {"target": 0, "idx": 4111, "func": "static int mjpeg_decode_dht(MJpegDecodeContext *s) { int len, index, i, class, n, v, code_max; uint8_t bits_table[17]; uint8_t val_table[256]; len = get_bits(&s->gb, 16) - 2; while (len > 0) { if (len < 17) return -1; class = get_bits(&s->gb, 4); if (class >= 2) return -1; index = get_bits(&s->gb, 4); if (index >= 4) return -1; n = 0; for(i=1;i<=16;i++) { bits_table[i] = get_bits(&s->gb, 8); n += bits_table[i]; } len -= 17; if (len < n || n > 256) return -1; code_max = 0; for(i=0;igb, 8); if (v > code_max) code_max = v; val_table[i] = v; } len -= n; /* build VLC and flush previous vlc if present */ free_vlc(&s->vlcs[class][index]); dprintf(\"class=%d index=%d nb_codes=%d\\n\", class, index, code_max + 1); build_vlc(&s->vlcs[class][index], bits_table, val_table, code_max + 1); } return 0; }"} {"target": 0, "idx": 4112, "func": "static int usb_net_handle_dataout(USBNetState *s, USBPacket *p) { int ret = p->len; int sz = sizeof(s->out_buf) - s->out_ptr; struct rndis_packet_msg_type *msg = (struct rndis_packet_msg_type *) s->out_buf; uint32_t len; #ifdef TRAFFIC_DEBUG fprintf(stderr, \"usbnet: data out len %u\\n\", p->len); { int i; fprintf(stderr, \":\"); for (i = 0; i < p->len; i++) { if (!(i & 15)) fprintf(stderr, \"\\n%04x:\", i); fprintf(stderr, \" %02x\", p->data[i]); } fprintf(stderr, \"\\n\\n\"); } #endif if (sz > ret) sz = ret; memcpy(&s->out_buf[s->out_ptr], p->data, sz); s->out_ptr += sz; if (!s->rndis) { if (ret < 64) { qemu_send_packet(&s->nic->nc, s->out_buf, s->out_ptr); s->out_ptr = 0; } return ret; } len = le32_to_cpu(msg->MessageLength); if (s->out_ptr < 8 || s->out_ptr < len) return ret; if (le32_to_cpu(msg->MessageType) == RNDIS_PACKET_MSG) { uint32_t offs = 8 + le32_to_cpu(msg->DataOffset); uint32_t size = le32_to_cpu(msg->DataLength); if (offs + size <= len) qemu_send_packet(&s->nic->nc, s->out_buf + offs, size); } s->out_ptr -= len; memmove(s->out_buf, &s->out_buf[len], s->out_ptr); return ret; }"} {"target": 0, "idx": 4118, "func": "int bdrv_get_backing_file_depth(BlockDriverState *bs) { if (!bs->drv) { return 0; } if (!bs->backing_hd) { return 0; } return 1 + bdrv_get_backing_file_depth(bs->backing_hd); }"} {"target": 0, "idx": 4121, "func": "static int parallels_create(const char *filename, QemuOpts *opts, Error **errp) { int64_t total_size, cl_size; uint8_t tmp[BDRV_SECTOR_SIZE]; Error *local_err = NULL; BlockDriverState *file; uint32_t cat_entries, cat_sectors; ParallelsHeader header; int ret; total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), BDRV_SECTOR_SIZE); cl_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_CLUSTER_SIZE, DEFAULT_CLUSTER_SIZE), BDRV_SECTOR_SIZE); ret = bdrv_create_file(filename, opts, &local_err); if (ret < 0) { error_propagate(errp, local_err); return ret; } file = NULL; ret = bdrv_open(&file, filename, NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, NULL, &local_err); if (ret < 0) { error_propagate(errp, local_err); return ret; } ret = bdrv_truncate(file, 0); if (ret < 0) { goto exit; } cat_entries = DIV_ROUND_UP(total_size, cl_size); cat_sectors = DIV_ROUND_UP(cat_entries * sizeof(uint32_t) + sizeof(ParallelsHeader), cl_size); cat_sectors = (cat_sectors * cl_size) >> BDRV_SECTOR_BITS; memset(&header, 0, sizeof(header)); memcpy(header.magic, HEADER_MAGIC2, sizeof(header.magic)); header.version = cpu_to_le32(HEADER_VERSION); /* don't care much about geometry, it is not used on image level */ header.heads = cpu_to_le32(16); header.cylinders = cpu_to_le32(total_size / BDRV_SECTOR_SIZE / 16 / 32); header.tracks = cpu_to_le32(cl_size >> BDRV_SECTOR_BITS); header.catalog_entries = cpu_to_le32(cat_entries); header.nb_sectors = cpu_to_le64(DIV_ROUND_UP(total_size, BDRV_SECTOR_SIZE)); header.data_off = cpu_to_le32(cat_sectors); /* write all the data */ memset(tmp, 0, sizeof(tmp)); memcpy(tmp, &header, sizeof(header)); ret = bdrv_pwrite(file, 0, tmp, BDRV_SECTOR_SIZE); if (ret < 0) { goto exit; } ret = bdrv_write_zeroes(file, 1, cat_sectors - 1, 0); if (ret < 0) { goto exit; } ret = 0; done: bdrv_unref(file); return ret; exit: error_setg_errno(errp, -ret, \"Failed to create Parallels image\"); goto done; }"} {"target": 0, "idx": 4125, "func": "static void pxa2xx_pwrmode_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { PXA2xxState *s = (PXA2xxState *)ri->opaque; static const char *pwrmode[8] = { \"Normal\", \"Idle\", \"Deep-idle\", \"Standby\", \"Sleep\", \"reserved (!)\", \"reserved (!)\", \"Deep-sleep\", }; if (value & 8) { printf(\"%s: CPU voltage change attempt\\n\", __func__); } switch (value & 7) { case 0: /* Do nothing */ break; case 1: /* Idle */ if (!(s->cm_regs[CCCR >> 2] & (1U << 31))) { /* CPDIS */ cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_HALT); break; } /* Fall through. */ case 2: /* Deep-Idle */ cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_HALT); s->pm_regs[RCSR >> 2] |= 0x8; /* Set GPR */ goto message; case 3: s->cpu->env.uncached_cpsr = ARM_CPU_MODE_SVC; s->cpu->env.daif = PSTATE_A | PSTATE_F | PSTATE_I; s->cpu->env.cp15.sctlr_ns = 0; s->cpu->env.cp15.c1_coproc = 0; s->cpu->env.cp15.ttbr0_el[1] = 0; s->cpu->env.cp15.c3 = 0; s->pm_regs[PSSR >> 2] |= 0x8; /* Set STS */ s->pm_regs[RCSR >> 2] |= 0x8; /* Set GPR */ /* * The scratch-pad register is almost universally used * for storing the return address on suspend. For the * lack of a resuming bootloader, perform a jump * directly to that address. */ memset(s->cpu->env.regs, 0, 4 * 15); s->cpu->env.regs[15] = s->pm_regs[PSPR >> 2]; #if 0 buffer = 0xe59ff000; /* ldr pc, [pc, #0] */ cpu_physical_memory_write(0, &buffer, 4); buffer = s->pm_regs[PSPR >> 2]; cpu_physical_memory_write(8, &buffer, 4); #endif /* Suspend */ cpu_interrupt(current_cpu, CPU_INTERRUPT_HALT); goto message; default: message: printf(\"%s: machine entered %s mode\\n\", __func__, pwrmode[value & 7]); } }"} {"target": 0, "idx": 4137, "func": "static void hmp_change_read_arg(Monitor *mon, const char *password, void *opaque) { qmp_change_vnc_password(password, NULL); monitor_read_command(mon, 1); }"} {"target": 1, "idx": 4144, "func": "static int readv_f(int argc, char **argv) { struct timeval t1, t2; int Cflag = 0, qflag = 0, vflag = 0; int c, cnt; char *buf; int64_t offset; /* Some compilers get confused and warn if this is not initialized. */ int total = 0; int nr_iov; QEMUIOVector qiov; int pattern = 0; int Pflag = 0; while ((c = getopt(argc, argv, \"CP:qv\")) != EOF) { switch (c) { case 'C': Cflag = 1; break; case 'P': Pflag = 1; pattern = parse_pattern(optarg); if (pattern < 0) { return 0; } break; case 'q': qflag = 1; break; case 'v': vflag = 1; break; default: return command_usage(&readv_cmd); } } if (optind > argc - 2) { return command_usage(&readv_cmd); } offset = cvtnum(argv[optind]); if (offset < 0) { printf(\"non-numeric length argument -- %s\\n\", argv[optind]); return 0; } optind++; if (offset & 0x1ff) { printf(\"offset %\" PRId64 \" is not sector aligned\\n\", offset); return 0; } nr_iov = argc - optind; buf = create_iovec(&qiov, &argv[optind], nr_iov, 0xab); if (buf == NULL) { return 0; } gettimeofday(&t1, NULL); cnt = do_aio_readv(&qiov, offset, &total); gettimeofday(&t2, NULL); if (cnt < 0) { printf(\"readv failed: %s\\n\", strerror(-cnt)); goto out; } if (Pflag) { void *cmp_buf = g_malloc(qiov.size); memset(cmp_buf, pattern, qiov.size); if (memcmp(buf, cmp_buf, qiov.size)) { printf(\"Pattern verification failed at offset %\" PRId64 \", %zd bytes\\n\", offset, qiov.size); } g_free(cmp_buf); } if (qflag) { goto out; } if (vflag) { dump_buffer(buf, offset, qiov.size); } /* Finally, report back -- -C gives a parsable format */ t2 = tsub(t2, t1); print_report(\"read\", &t2, offset, qiov.size, total, cnt, Cflag); out: qemu_io_free(buf); return 0; }"} {"target": 1, "idx": 4170, "func": "ssize_t pcnet_receive(NetClientState *nc, const uint8_t *buf, size_t size_) { PCNetState *s = qemu_get_nic_opaque(nc); int is_padr = 0, is_bcast = 0, is_ladr = 0; uint8_t buf1[60]; int remaining; int crc_err = 0; int size = size_; if (CSR_DRX(s) || CSR_STOP(s) || CSR_SPND(s) || !size || (CSR_LOOP(s) && !s->looptest)) { return -1; } #ifdef PCNET_DEBUG printf(\"pcnet_receive size=%d\\n\", size); #endif /* if too small buffer, then expand it */ if (size < MIN_BUF_SIZE) { memcpy(buf1, buf, size); memset(buf1 + size, 0, MIN_BUF_SIZE - size); buf = buf1; size = MIN_BUF_SIZE; } if (CSR_PROM(s) || (is_padr=padr_match(s, buf, size)) || (is_bcast=padr_bcast(s, buf, size)) || (is_ladr=ladr_match(s, buf, size))) { pcnet_rdte_poll(s); if (!(CSR_CRST(s) & 0x8000) && s->rdra) { struct pcnet_RMD rmd; int rcvrc = CSR_RCVRC(s)-1,i; hwaddr nrda; for (i = CSR_RCVRL(s)-1; i > 0; i--, rcvrc--) { if (rcvrc <= 1) rcvrc = CSR_RCVRL(s); nrda = s->rdra + (CSR_RCVRL(s) - rcvrc) * (BCR_SWSTYLE(s) ? 16 : 8 ); RMDLOAD(&rmd, nrda); if (GET_FIELD(rmd.status, RMDS, OWN)) { #ifdef PCNET_DEBUG_RMD printf(\"pcnet - scan buffer: RCVRC=%d PREV_RCVRC=%d\\n\", rcvrc, CSR_RCVRC(s)); #endif CSR_RCVRC(s) = rcvrc; pcnet_rdte_poll(s); break; } } } if (!(CSR_CRST(s) & 0x8000)) { #ifdef PCNET_DEBUG_RMD printf(\"pcnet - no buffer: RCVRC=%d\\n\", CSR_RCVRC(s)); #endif s->csr[0] |= 0x1000; /* Set MISS flag */ CSR_MISSC(s)++; } else { uint8_t *src = s->buffer; hwaddr crda = CSR_CRDA(s); struct pcnet_RMD rmd; int pktcount = 0; if (!s->looptest) { memcpy(src, buf, size); /* no need to compute the CRC */ src[size] = 0; src[size + 1] = 0; src[size + 2] = 0; src[size + 3] = 0; size += 4; } else if (s->looptest == PCNET_LOOPTEST_CRC || !CSR_DXMTFCS(s) || size < MIN_BUF_SIZE+4) { uint32_t fcs = ~0; uint8_t *p = src; while (p != &src[size]) CRC(fcs, *p++); *(uint32_t *)p = htonl(fcs); size += 4; } else { uint32_t fcs = ~0; uint8_t *p = src; while (p != &src[size-4]) CRC(fcs, *p++); crc_err = (*(uint32_t *)p != htonl(fcs)); } #ifdef PCNET_DEBUG_MATCH PRINT_PKTHDR(buf); #endif RMDLOAD(&rmd, PHYSADDR(s,crda)); /*if (!CSR_LAPPEN(s))*/ SET_FIELD(&rmd.status, RMDS, STP, 1); #define PCNET_RECV_STORE() do { \\ int count = MIN(4096 - GET_FIELD(rmd.buf_length, RMDL, BCNT),remaining); \\ hwaddr rbadr = PHYSADDR(s, rmd.rbadr); \\ s->phys_mem_write(s->dma_opaque, rbadr, src, count, CSR_BSWP(s)); \\ src += count; remaining -= count; \\ SET_FIELD(&rmd.status, RMDS, OWN, 0); \\ RMDSTORE(&rmd, PHYSADDR(s,crda)); \\ pktcount++; \\ } while (0) remaining = size; PCNET_RECV_STORE(); if ((remaining > 0) && CSR_NRDA(s)) { hwaddr nrda = CSR_NRDA(s); #ifdef PCNET_DEBUG_RMD PRINT_RMD(&rmd); #endif RMDLOAD(&rmd, PHYSADDR(s,nrda)); if (GET_FIELD(rmd.status, RMDS, OWN)) { crda = nrda; PCNET_RECV_STORE(); #ifdef PCNET_DEBUG_RMD PRINT_RMD(&rmd); #endif if ((remaining > 0) && (nrda=CSR_NNRD(s))) { RMDLOAD(&rmd, PHYSADDR(s,nrda)); if (GET_FIELD(rmd.status, RMDS, OWN)) { crda = nrda; PCNET_RECV_STORE(); } } } } #undef PCNET_RECV_STORE RMDLOAD(&rmd, PHYSADDR(s,crda)); if (remaining == 0) { SET_FIELD(&rmd.msg_length, RMDM, MCNT, size); SET_FIELD(&rmd.status, RMDS, ENP, 1); SET_FIELD(&rmd.status, RMDS, PAM, !CSR_PROM(s) && is_padr); SET_FIELD(&rmd.status, RMDS, LFAM, !CSR_PROM(s) && is_ladr); SET_FIELD(&rmd.status, RMDS, BAM, !CSR_PROM(s) && is_bcast); if (crc_err) { SET_FIELD(&rmd.status, RMDS, CRC, 1); SET_FIELD(&rmd.status, RMDS, ERR, 1); } } else { SET_FIELD(&rmd.status, RMDS, OFLO, 1); SET_FIELD(&rmd.status, RMDS, BUFF, 1); SET_FIELD(&rmd.status, RMDS, ERR, 1); } RMDSTORE(&rmd, PHYSADDR(s,crda)); s->csr[0] |= 0x0400; #ifdef PCNET_DEBUG printf(\"RCVRC=%d CRDA=0x%08x BLKS=%d\\n\", CSR_RCVRC(s), PHYSADDR(s,CSR_CRDA(s)), pktcount); #endif #ifdef PCNET_DEBUG_RMD PRINT_RMD(&rmd); #endif while (pktcount--) { if (CSR_RCVRC(s) <= 1) CSR_RCVRC(s) = CSR_RCVRL(s); else CSR_RCVRC(s)--; } pcnet_rdte_poll(s); } } pcnet_poll(s); pcnet_update_irq(s); return size_; }"} {"target": 0, "idx": 4194, "func": "static bool pcie_has_upstream_port(PCIDevice *dev) { PCIDevice *parent_dev = pci_bridge_get_device(dev->bus); /* Device associated with an upstream port. * As there are several types of these, it's easier to check the * parent device: upstream ports are always connected to * root or downstream ports. */ return parent_dev && pci_is_express(parent_dev) && parent_dev->exp.exp_cap && (pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_ROOT_PORT || pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_DOWNSTREAM); }"} {"target": 0, "idx": 4202, "func": "int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries) { AVStream *st; MOVStreamContext *sc; int j, pseudo_stream_id; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; sc = st->priv_data; for (pseudo_stream_id=0; pseudo_stream_id= 16) { avio_rb32(pb); /* reserved */ avio_rb16(pb); /* reserved */ dref_id = avio_rb16(pb); } if (st->codec->codec_tag && st->codec->codec_tag != format && (c->fc->video_codec_id ? ff_codec_get_id(ff_codec_movvideo_tags, format) != c->fc->video_codec_id : st->codec->codec_tag != MKTAG('j','p','e','g')) ){ /* Multiple fourcc, we skip JPEG. This is not correct, we should * export it as a separate AVStream but this needs a few changes * in the MOV demuxer, patch welcome. */ multiple_stsd: av_log(c->fc, AV_LOG_WARNING, \"multiple fourcc not supported\\n\"); avio_skip(pb, size - (avio_tell(pb) - start_pos)); continue; } /* we cannot demux concatenated h264 streams because of different extradata */ if (st->codec->codec_tag && st->codec->codec_tag == AV_RL32(\"avc1\")) goto multiple_stsd; sc->pseudo_stream_id = st->codec->codec_tag ? -1 : pseudo_stream_id; sc->dref_id= dref_id; st->codec->codec_tag = format; id = ff_codec_get_id(ff_codec_movaudio_tags, format); if (id<=0 && ((format&0xFFFF) == 'm'+('s'<<8) || (format&0xFFFF) == 'T'+('S'<<8))) id = ff_codec_get_id(ff_codec_wav_tags, av_bswap32(format)&0xFFFF); if (st->codec->codec_type != AVMEDIA_TYPE_VIDEO && id > 0) { st->codec->codec_type = AVMEDIA_TYPE_AUDIO; } else if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO && /* do not overwrite codec type */ format && format != MKTAG('m','p','4','s')) { /* skip old asf mpeg4 tag */ id = ff_codec_get_id(ff_codec_movvideo_tags, format); if (id <= 0) id = ff_codec_get_id(ff_codec_bmp_tags, format); if (id > 0) st->codec->codec_type = AVMEDIA_TYPE_VIDEO; else if (st->codec->codec_type == AVMEDIA_TYPE_DATA){ id = ff_codec_get_id(ff_codec_movsubtitle_tags, format); if (id > 0) st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE; } } av_dlog(c->fc, \"size=%d 4CC= %c%c%c%c codec_type=%d\\n\", size, (format >> 0) & 0xff, (format >> 8) & 0xff, (format >> 16) & 0xff, (format >> 24) & 0xff, st->codec->codec_type); if (st->codec->codec_type==AVMEDIA_TYPE_VIDEO) { unsigned int color_depth, len; int color_greyscale; int color_table_id; st->codec->codec_id = id; avio_rb16(pb); /* version */ avio_rb16(pb); /* revision level */ avio_rb32(pb); /* vendor */ avio_rb32(pb); /* temporal quality */ avio_rb32(pb); /* spatial quality */ st->codec->width = avio_rb16(pb); /* width */ st->codec->height = avio_rb16(pb); /* height */ avio_rb32(pb); /* horiz resolution */ avio_rb32(pb); /* vert resolution */ avio_rb32(pb); /* data size, always 0 */ avio_rb16(pb); /* frames per samples */ len = avio_r8(pb); /* codec name, pascal string */ if (len > 31) len = 31; mov_read_mac_string(c, pb, len, st->codec->codec_name, 32); if (len < 31) avio_skip(pb, 31 - len); /* codec_tag YV12 triggers an UV swap in rawdec.c */ if (!memcmp(st->codec->codec_name, \"Planar Y'CbCr 8-bit 4:2:0\", 25)) st->codec->codec_tag=MKTAG('I', '4', '2', '0'); st->codec->bits_per_coded_sample = avio_rb16(pb); /* depth */ color_table_id = avio_rb16(pb); /* colortable id */ av_dlog(c->fc, \"depth %d, ctab id %d\\n\", st->codec->bits_per_coded_sample, color_table_id); /* figure out the palette situation */ color_depth = st->codec->bits_per_coded_sample & 0x1F; color_greyscale = st->codec->bits_per_coded_sample & 0x20; /* if the depth is 2, 4, or 8 bpp, file is palettized */ if ((color_depth == 2) || (color_depth == 4) || (color_depth == 8)) { /* for palette traversal */ unsigned int color_start, color_count, color_end; unsigned char r, g, b; if (color_greyscale) { int color_index, color_dec; /* compute the greyscale palette */ st->codec->bits_per_coded_sample = color_depth; color_count = 1 << color_depth; color_index = 255; color_dec = 256 / (color_count - 1); for (j = 0; j < color_count; j++) { r = g = b = color_index; sc->palette[j] = (r << 16) | (g << 8) | (b); color_index -= color_dec; if (color_index < 0) color_index = 0; } } else if (color_table_id) { const uint8_t *color_table; /* if flag bit 3 is set, use the default palette */ color_count = 1 << color_depth; if (color_depth == 2) color_table = ff_qt_default_palette_4; else if (color_depth == 4) color_table = ff_qt_default_palette_16; else color_table = ff_qt_default_palette_256; for (j = 0; j < color_count; j++) { r = color_table[j * 3 + 0]; g = color_table[j * 3 + 1]; b = color_table[j * 3 + 2]; sc->palette[j] = (r << 16) | (g << 8) | (b); } } else { /* load the palette from the file */ color_start = avio_rb32(pb); color_count = avio_rb16(pb); color_end = avio_rb16(pb); if ((color_start <= 255) && (color_end <= 255)) { for (j = color_start; j <= color_end; j++) { /* each R, G, or B component is 16 bits; * only use the top 8 bits; skip alpha bytes * up front */ avio_r8(pb); avio_r8(pb); r = avio_r8(pb); avio_r8(pb); g = avio_r8(pb); avio_r8(pb); b = avio_r8(pb); avio_r8(pb); sc->palette[j] = (r << 16) | (g << 8) | (b); } } } sc->has_palette = 1; } } else if (st->codec->codec_type==AVMEDIA_TYPE_AUDIO) { int bits_per_sample, flags; uint16_t version = avio_rb16(pb); st->codec->codec_id = id; avio_rb16(pb); /* revision level */ avio_rb32(pb); /* vendor */ st->codec->channels = avio_rb16(pb); /* channel count */ av_dlog(c->fc, \"audio channels %d\\n\", st->codec->channels); st->codec->bits_per_coded_sample = avio_rb16(pb); /* sample size */ sc->audio_cid = avio_rb16(pb); avio_rb16(pb); /* packet size = 0 */ st->codec->sample_rate = ((avio_rb32(pb) >> 16)); //Read QT version 1 fields. In version 0 these do not exist. av_dlog(c->fc, \"version =%d, isom =%d\\n\",version,c->isom); if (!c->isom) { if (version==1) { sc->samples_per_frame = avio_rb32(pb); avio_rb32(pb); /* bytes per packet */ sc->bytes_per_frame = avio_rb32(pb); avio_rb32(pb); /* bytes per sample */ } else if (version==2) { avio_rb32(pb); /* sizeof struct only */ st->codec->sample_rate = av_int2double(avio_rb64(pb)); /* float 64 */ st->codec->channels = avio_rb32(pb); avio_rb32(pb); /* always 0x7F000000 */ st->codec->bits_per_coded_sample = avio_rb32(pb); /* bits per channel if sound is uncompressed */ flags = avio_rb32(pb); /* lpcm format specific flag */ sc->bytes_per_frame = avio_rb32(pb); /* bytes per audio packet if constant */ sc->samples_per_frame = avio_rb32(pb); /* lpcm frames per audio packet if constant */ if (format == MKTAG('l','p','c','m')) st->codec->codec_id = ff_mov_get_lpcm_codec_id(st->codec->bits_per_coded_sample, flags); } } switch (st->codec->codec_id) { case AV_CODEC_ID_PCM_S8: case AV_CODEC_ID_PCM_U8: if (st->codec->bits_per_coded_sample == 16) st->codec->codec_id = AV_CODEC_ID_PCM_S16BE; break; case AV_CODEC_ID_PCM_S16LE: case AV_CODEC_ID_PCM_S16BE: if (st->codec->bits_per_coded_sample == 8) st->codec->codec_id = AV_CODEC_ID_PCM_S8; else if (st->codec->bits_per_coded_sample == 24) st->codec->codec_id = st->codec->codec_id == AV_CODEC_ID_PCM_S16BE ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE; break; /* set values for old format before stsd version 1 appeared */ case AV_CODEC_ID_MACE3: sc->samples_per_frame = 6; sc->bytes_per_frame = 2*st->codec->channels; break; case AV_CODEC_ID_MACE6: sc->samples_per_frame = 6; sc->bytes_per_frame = 1*st->codec->channels; break; case AV_CODEC_ID_ADPCM_IMA_QT: sc->samples_per_frame = 64; sc->bytes_per_frame = 34*st->codec->channels; break; case AV_CODEC_ID_GSM: sc->samples_per_frame = 160; sc->bytes_per_frame = 33; break; default: break; } bits_per_sample = av_get_bits_per_sample(st->codec->codec_id); if (bits_per_sample) { st->codec->bits_per_coded_sample = bits_per_sample; sc->sample_size = (bits_per_sample >> 3) * st->codec->channels; } } else if (st->codec->codec_type==AVMEDIA_TYPE_SUBTITLE){ // ttxt stsd contains display flags, justification, background // color, fonts, and default styles, so fake an atom to read it MOVAtom fake_atom = { .size = size - (avio_tell(pb) - start_pos) }; if (format != AV_RL32(\"mp4s\")) // mp4s contains a regular esds atom mov_read_glbl(c, pb, fake_atom); st->codec->codec_id= id; st->codec->width = sc->width; st->codec->height = sc->height; } else { /* other codec type, just skip (rtp, mp4s, tmcd ...) */ avio_skip(pb, size - (avio_tell(pb) - start_pos)); } /* this will read extra atoms at the end (wave, alac, damr, avcC, SMI ...) */ a.size = size - (avio_tell(pb) - start_pos); if (a.size > 8) { int ret; if ((ret = mov_read_default(c, pb, a)) < 0) return ret; } else if (a.size > 0) avio_skip(pb, a.size); } if (st->codec->codec_type==AVMEDIA_TYPE_AUDIO && st->codec->sample_rate==0 && sc->time_scale>1) st->codec->sample_rate= sc->time_scale; /* special codec parameters handling */ switch (st->codec->codec_id) { #if CONFIG_DV_DEMUXER case AV_CODEC_ID_DVAUDIO: c->dv_fctx = avformat_alloc_context(); c->dv_demux = avpriv_dv_init_demux(c->dv_fctx); if (!c->dv_demux) { av_log(c->fc, AV_LOG_ERROR, \"dv demux context init error\\n\"); return AVERROR(ENOMEM); } sc->dv_audio_container = 1; st->codec->codec_id = AV_CODEC_ID_PCM_S16LE; break; #endif /* no ifdef since parameters are always those */ case AV_CODEC_ID_QCELP: // force sample rate for qcelp when not stored in mov if (st->codec->codec_tag != MKTAG('Q','c','l','p')) st->codec->sample_rate = 8000; st->codec->channels= 1; /* really needed */ break; case AV_CODEC_ID_AMR_NB: st->codec->channels= 1; /* really needed */ /* force sample rate for amr, stsd in 3gp does not store sample rate */ st->codec->sample_rate = 8000; break; case AV_CODEC_ID_AMR_WB: st->codec->channels = 1; st->codec->sample_rate = 16000; break; case AV_CODEC_ID_MP2: case AV_CODEC_ID_MP3: st->codec->codec_type = AVMEDIA_TYPE_AUDIO; /* force type after stsd for m1a hdlr */ st->need_parsing = AVSTREAM_PARSE_FULL; break; case AV_CODEC_ID_GSM: case AV_CODEC_ID_ADPCM_MS: case AV_CODEC_ID_ADPCM_IMA_WAV: case AV_CODEC_ID_ILBC: st->codec->block_align = sc->bytes_per_frame; break; case AV_CODEC_ID_ALAC: if (st->codec->extradata_size == 36) { st->codec->channels = AV_RB8 (st->codec->extradata+21); st->codec->sample_rate = AV_RB32(st->codec->extradata+32); } break; case AV_CODEC_ID_VC1: st->need_parsing = AVSTREAM_PARSE_FULL; break; default: break; } return 0; }"} {"target": 0, "idx": 4219, "func": "build_gtdt(GArray *table_data, GArray *linker) { int gtdt_start = table_data->len; AcpiGenericTimerTable *gtdt; gtdt = acpi_data_push(table_data, sizeof *gtdt); /* The interrupt values are the same with the device tree when adding 16 */ gtdt->secure_el1_interrupt = ARCH_TIMER_S_EL1_IRQ + 16; gtdt->secure_el1_flags = ACPI_EDGE_SENSITIVE; gtdt->non_secure_el1_interrupt = ARCH_TIMER_NS_EL1_IRQ + 16; gtdt->non_secure_el1_flags = ACPI_EDGE_SENSITIVE; gtdt->virtual_timer_interrupt = ARCH_TIMER_VIRT_IRQ + 16; gtdt->virtual_timer_flags = ACPI_EDGE_SENSITIVE; gtdt->non_secure_el2_interrupt = ARCH_TIMER_NS_EL2_IRQ + 16; gtdt->non_secure_el2_flags = ACPI_EDGE_SENSITIVE; build_header(linker, table_data, (void *)(table_data->data + gtdt_start), \"GTDT\", table_data->len - gtdt_start, 2, NULL); }"} {"target": 1, "idx": 4232, "func": "static target_ulong h_random(PowerPCCPU *cpu, sPAPRMachineState *spapr, target_ulong opcode, target_ulong *args) { sPAPRRngState *rngstate; HRandomData hrdata; rngstate = SPAPR_RNG(object_resolve_path_type(\"\", TYPE_SPAPR_RNG, NULL)); if (!rngstate || !rngstate->backend) { return H_HARDWARE; } qemu_sem_init(&hrdata.sem, 0); hrdata.val.v64 = 0; hrdata.received = 0; qemu_mutex_unlock_iothread(); while (hrdata.received < 8) { rng_backend_request_entropy(rngstate->backend, 8 - hrdata.received, random_recv, &hrdata); qemu_sem_wait(&hrdata.sem); } qemu_mutex_lock_iothread(); qemu_sem_destroy(&hrdata.sem); args[0] = hrdata.val.v64; return H_SUCCESS; }"} {"target": 0, "idx": 4239, "func": "static int normalize_bits(int num, int width) { int i = 0; int bits = (width) ? 31 : 15; int limit = 1 << (bits - 1); if (num) { if (num == -1) return bits; if (num < 0) num = ~num; for (i = 0; num < limit; i++) num <<= 1; } return i; }"} {"target": 1, "idx": 4242, "func": "static void sdhci_sysbus_realize(DeviceState *dev, Error ** errp) { SDHCIState *s = SYSBUS_SDHCI(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev); sdhci_common_realize(s, errp); if (errp && *errp) { return; } if (s->dma_mr) { address_space_init(s->dma_as, s->dma_mr, \"sdhci-dma\"); } else { /* use system_memory() if property \"dma\" not set */ s->dma_as = &address_space_memory; } sysbus_init_irq(sbd, &s->irq); sysbus_init_mmio(sbd, &s->iomem); }"} {"target": 1, "idx": 4258, "func": "static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){ int time_incr, time_increment; s->pict_type = get_bits(gb, 2) + AV_PICTURE_TYPE_I; /* pict type: I = 0 , P = 1 */ if(s->pict_type==AV_PICTURE_TYPE_B && s->low_delay && s->vol_control_parameters==0 && !(s->flags & CODEC_FLAG_LOW_DELAY)){ av_log(s->avctx, AV_LOG_ERROR, \"low_delay flag incorrectly, clearing it\\n\"); s->low_delay=0; } s->partitioned_frame= s->data_partitioning && s->pict_type!=AV_PICTURE_TYPE_B; if(s->partitioned_frame) s->decode_mb= mpeg4_decode_partitioned_mb; else s->decode_mb= mpeg4_decode_mb; time_incr=0; while (get_bits1(gb) != 0) time_incr++; check_marker(gb, \"before time_increment\"); if(s->time_increment_bits==0 || !(show_bits(gb, s->time_increment_bits+1)&1)){ av_log(s->avctx, AV_LOG_ERROR, \"hmm, seems the headers are not complete, trying to guess time_increment_bits\\n\"); for(s->time_increment_bits=1 ;s->time_increment_bits<16; s->time_increment_bits++){ if ( s->pict_type == AV_PICTURE_TYPE_P || (s->pict_type == AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE)) { if((show_bits(gb, s->time_increment_bits+6)&0x37) == 0x30) break; }else if((show_bits(gb, s->time_increment_bits+5)&0x1F) == 0x18) break; } av_log(s->avctx, AV_LOG_ERROR, \"my guess is %d bits ;)\\n\",s->time_increment_bits); } if(IS_3IV1) time_increment= get_bits1(gb); //FIXME investigate further else time_increment= get_bits(gb, s->time_increment_bits); if(s->pict_type!=AV_PICTURE_TYPE_B){ s->last_time_base= s->time_base; s->time_base+= time_incr; s->time= s->time_base*s->avctx->time_base.den + time_increment; if(s->workaround_bugs&FF_BUG_UMP4){ if(s->time < s->last_non_b_time){ /* header is not mpeg-4-compatible, broken encoder, * trying to workaround */ s->time_base++; s->time+= s->avctx->time_base.den; } } s->pp_time= s->time - s->last_non_b_time; s->last_non_b_time= s->time; }else{ s->time= (s->last_time_base + time_incr)*s->avctx->time_base.den + time_increment; s->pb_time= s->pp_time - (s->last_non_b_time - s->time); if(s->pp_time <=s->pb_time || s->pp_time <= s->pp_time - s->pb_time || s->pp_time<=0){ /* messed up order, maybe after seeking? skipping current b-frame */ return FRAME_SKIPPED; } ff_mpeg4_init_direct_mv(s); if(s->t_frame==0) s->t_frame= s->pb_time; if(s->t_frame==0) s->t_frame=1; // 1/0 protection s->pp_field_time= ( ROUNDED_DIV(s->last_non_b_time, s->t_frame) - ROUNDED_DIV(s->last_non_b_time - s->pp_time, s->t_frame))*2; s->pb_field_time= ( ROUNDED_DIV(s->time, s->t_frame) - ROUNDED_DIV(s->last_non_b_time - s->pp_time, s->t_frame))*2; if(!s->progressive_sequence){ if(s->pp_field_time <= s->pb_field_time || s->pb_field_time <= 1) return FRAME_SKIPPED; } } if(s->avctx->time_base.num) s->current_picture_ptr->f.pts = (s->time + s->avctx->time_base.num / 2) / s->avctx->time_base.num; else s->current_picture_ptr->f.pts = AV_NOPTS_VALUE; if(s->avctx->debug&FF_DEBUG_PTS) av_log(s->avctx, AV_LOG_DEBUG, \"MPEG4 PTS: %\"PRId64\"\\n\", s->current_picture_ptr->f.pts); check_marker(gb, \"before vop_coded\"); /* vop coded */ if (get_bits1(gb) != 1){ if(s->avctx->debug&FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_ERROR, \"vop not coded\\n\"); return FRAME_SKIPPED; } if (s->shape != BIN_ONLY_SHAPE && ( s->pict_type == AV_PICTURE_TYPE_P || (s->pict_type == AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE))) { /* rounding type for motion estimation */ s->no_rounding = get_bits1(gb); } else { s->no_rounding = 0; } //FIXME reduced res stuff if (s->shape != RECT_SHAPE) { if (s->vol_sprite_usage != 1 || s->pict_type != AV_PICTURE_TYPE_I) { skip_bits(gb, 13); /* width */ skip_bits1(gb); /* marker */ skip_bits(gb, 13); /* height */ skip_bits1(gb); /* marker */ skip_bits(gb, 13); /* hor_spat_ref */ skip_bits1(gb); /* marker */ skip_bits(gb, 13); /* ver_spat_ref */ } skip_bits1(gb); /* change_CR_disable */ if (get_bits1(gb) != 0) { skip_bits(gb, 8); /* constant_alpha_value */ } } //FIXME complexity estimation stuff if (s->shape != BIN_ONLY_SHAPE) { skip_bits_long(gb, s->cplx_estimation_trash_i); if(s->pict_type != AV_PICTURE_TYPE_I) skip_bits_long(gb, s->cplx_estimation_trash_p); if(s->pict_type == AV_PICTURE_TYPE_B) skip_bits_long(gb, s->cplx_estimation_trash_b); s->intra_dc_threshold= ff_mpeg4_dc_threshold[ get_bits(gb, 3) ]; if(!s->progressive_sequence){ s->top_field_first= get_bits1(gb); s->alternate_scan= get_bits1(gb); }else s->alternate_scan= 0; } if(s->alternate_scan){ ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan); ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan); ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_vertical_scan); ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan); } else{ ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct); ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct); ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan); ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan); } if(s->pict_type == AV_PICTURE_TYPE_S && (s->vol_sprite_usage==STATIC_SPRITE || s->vol_sprite_usage==GMC_SPRITE)){ mpeg4_decode_sprite_trajectory(s, gb); if(s->sprite_brightness_change) av_log(s->avctx, AV_LOG_ERROR, \"sprite_brightness_change not supported\\n\"); if(s->vol_sprite_usage==STATIC_SPRITE) av_log(s->avctx, AV_LOG_ERROR, \"static sprite not supported\\n\"); } if (s->shape != BIN_ONLY_SHAPE) { s->chroma_qscale= s->qscale = get_bits(gb, s->quant_precision); if(s->qscale==0){ av_log(s->avctx, AV_LOG_ERROR, \"Error, header damaged or not MPEG4 header (qscale=0)\\n\"); return -1; // makes no sense to continue, as there is nothing left from the image then } if (s->pict_type != AV_PICTURE_TYPE_I) { s->f_code = get_bits(gb, 3); /* fcode_for */ if(s->f_code==0){ av_log(s->avctx, AV_LOG_ERROR, \"Error, header damaged or not MPEG4 header (f_code=0)\\n\"); return -1; // makes no sense to continue, as the MV decoding will break very quickly } }else s->f_code=1; if (s->pict_type == AV_PICTURE_TYPE_B) { s->b_code = get_bits(gb, 3); }else s->b_code=1; if(s->avctx->debug&FF_DEBUG_PICT_INFO){ av_log(s->avctx, AV_LOG_DEBUG, \"qp:%d fc:%d,%d %s size:%d pro:%d alt:%d top:%d %spel part:%d resync:%d w:%d a:%d rnd:%d vot:%d%s dc:%d ce:%d/%d/%d\\n\", s->qscale, s->f_code, s->b_code, s->pict_type == AV_PICTURE_TYPE_I ? \"I\" : (s->pict_type == AV_PICTURE_TYPE_P ? \"P\" : (s->pict_type == AV_PICTURE_TYPE_B ? \"B\" : \"S\")), gb->size_in_bits,s->progressive_sequence, s->alternate_scan, s->top_field_first, s->quarter_sample ? \"q\" : \"h\", s->data_partitioning, s->resync_marker, s->num_sprite_warping_points, s->sprite_warping_accuracy, 1-s->no_rounding, s->vo_type, s->vol_control_parameters ? \" VOLC\" : \" \", s->intra_dc_threshold, s->cplx_estimation_trash_i, s->cplx_estimation_trash_p, s->cplx_estimation_trash_b); } if(!s->scalability){ if (s->shape!=RECT_SHAPE && s->pict_type!=AV_PICTURE_TYPE_I) { skip_bits1(gb); // vop shape coding type } }else{ if(s->enhancement_type){ int load_backward_shape= get_bits1(gb); if(load_backward_shape){ av_log(s->avctx, AV_LOG_ERROR, \"load backward shape isn't supported\\n\"); } } skip_bits(gb, 2); //ref_select_code } } /* detect buggy encoders which don't set the low_delay flag (divx4/xvid/opendivx)*/ // note we cannot detect divx5 without b-frames easily (although it's buggy too) if(s->vo_type==0 && s->vol_control_parameters==0 && s->divx_version==-1 && s->picture_number==0){ av_log(s->avctx, AV_LOG_WARNING, \"looks like this file was encoded with (divx4/(old)xvid/opendivx) -> forcing low_delay flag\\n\"); s->low_delay=1; } s->picture_number++; // better than pic number==0 always ;) s->y_dc_scale_table= ff_mpeg4_y_dc_scale_table; //FIXME add short header support s->c_dc_scale_table= ff_mpeg4_c_dc_scale_table; if(s->workaround_bugs&FF_BUG_EDGE){ s->h_edge_pos= s->width; s->v_edge_pos= s->height; } return 0; }"} {"target": 1, "idx": 4263, "func": "static int handle_cmd(AHCIState *s, int port, int slot) { IDEState *ide_state; uint32_t opts; uint64_t tbl_addr; AHCICmdHdr *cmd; uint8_t *cmd_fis; dma_addr_t cmd_len; if (s->dev[port].port.ifs[0].status & (BUSY_STAT|DRQ_STAT)) { /* Engine currently busy, try again later */ DPRINTF(port, \"engine busy\\n\"); return -1; } cmd = &((AHCICmdHdr *)s->dev[port].lst)[slot]; if (!s->dev[port].lst) { DPRINTF(port, \"error: lst not given but cmd handled\"); return -1; } /* remember current slot handle for later */ s->dev[port].cur_cmd = cmd; opts = le32_to_cpu(cmd->opts); tbl_addr = le64_to_cpu(cmd->tbl_addr); cmd_len = 0x80; cmd_fis = dma_memory_map(s->as, tbl_addr, &cmd_len, DMA_DIRECTION_FROM_DEVICE); if (!cmd_fis) { DPRINTF(port, \"error: guest passed us an invalid cmd fis\\n\"); return -1; } /* The device we are working for */ ide_state = &s->dev[port].port.ifs[0]; if (!ide_state->blk) { DPRINTF(port, \"error: guest accessed unused port\"); goto out; } debug_print_fis(cmd_fis, 0x90); //debug_print_fis(cmd_fis, (opts & AHCI_CMD_HDR_CMD_FIS_LEN) * 4); switch (cmd_fis[0]) { case SATA_FIS_TYPE_REGISTER_H2D: break; default: DPRINTF(port, \"unknown command cmd_fis[0]=%02x cmd_fis[1]=%02x \" \"cmd_fis[2]=%02x\\n\", cmd_fis[0], cmd_fis[1], cmd_fis[2]); goto out; break; } switch (cmd_fis[1]) { case SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER: break; case 0: break; default: DPRINTF(port, \"unknown command cmd_fis[0]=%02x cmd_fis[1]=%02x \" \"cmd_fis[2]=%02x\\n\", cmd_fis[0], cmd_fis[1], cmd_fis[2]); goto out; break; } if (!(cmd_fis[1] & SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER)) { switch (s->dev[port].port_state) { case STATE_RUN: if (cmd_fis[15] & ATA_SRST) { s->dev[port].port_state = STATE_RESET; } break; case STATE_RESET: if (!(cmd_fis[15] & ATA_SRST)) { ahci_reset_port(s, port); } break; } } else if (cmd_fis[1] & SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER) { /* Check for NCQ command */ if (is_ncq(cmd_fis[2])) { process_ncq_command(s, port, cmd_fis, slot); goto out; } /* Decompose the FIS: * AHCI does not interpret FIS packets, it only forwards them. * SATA 1.0 describes how to decode LBA28 and CHS FIS packets. * Later specifications, e.g, SATA 3.2, describe LBA48 FIS packets. * * ATA4 describes sector number for LBA28/CHS commands. * ATA6 describes sector number for LBA48 commands. * ATA8 deprecates CHS fully, describing only LBA28/48. * * We dutifully convert the FIS into IDE registers, and allow the * core layer to interpret them as needed. */ ide_state->feature = cmd_fis[3]; ide_state->sector = cmd_fis[4]; /* LBA 7:0 */ ide_state->lcyl = cmd_fis[5]; /* LBA 15:8 */ ide_state->hcyl = cmd_fis[6]; /* LBA 23:16 */ ide_state->select = cmd_fis[7]; /* LBA 27:24 (LBA28) */ ide_state->hob_sector = cmd_fis[8]; /* LBA 31:24 */ ide_state->hob_lcyl = cmd_fis[9]; /* LBA 39:32 */ ide_state->hob_hcyl = cmd_fis[10]; /* LBA 47:40 */ ide_state->hob_feature = cmd_fis[11]; ide_state->nsector = (int64_t)((cmd_fis[13] << 8) | cmd_fis[12]); /* 14, 16, 17, 18, 19: Reserved (SATA 1.0) */ /* 15: Only valid when UPDATE_COMMAND not set. */ /* Copy the ACMD field (ATAPI packet, if any) from the AHCI command * table to ide_state->io_buffer */ if (opts & AHCI_CMD_ATAPI) { memcpy(ide_state->io_buffer, &cmd_fis[AHCI_COMMAND_TABLE_ACMD], 0x10); debug_print_fis(ide_state->io_buffer, 0x10); s->dev[port].done_atapi_packet = false; /* XXX send PIO setup FIS */ } ide_state->error = 0; /* Reset transferred byte counter */ cmd->status = 0; /* We're ready to process the command in FIS byte 2. */ ide_exec_cmd(&s->dev[port].port, cmd_fis[2]); } out: dma_memory_unmap(s->as, cmd_fis, cmd_len, DMA_DIRECTION_FROM_DEVICE, cmd_len); if (s->dev[port].port.ifs[0].status & (BUSY_STAT|DRQ_STAT)) { /* async command, complete later */ s->dev[port].busy_slot = slot; return -1; } /* done handling the command */ return 0; }"} {"target": 0, "idx": 4268, "func": "static int decompress_p(AVCodecContext *avctx, uint32_t *dst, int linesize, uint32_t *prev, int plinesize) { SCPRContext *s = avctx->priv_data; GetByteContext *gb = &s->gb; int ret, temp, min, max, x, y, cx = 0, cx1 = 0; int backstep = linesize - avctx->width; const int cxshift = s->cxshift; if (bytestream2_get_byte(gb) == 0) return 0; bytestream2_skip(gb, 1); init_rangecoder(&s->rc, gb); ret = decode_value(s, s->range_model, 256, 1, &min); ret |= decode_value(s, s->range_model, 256, 1, &temp); min += temp << 8; ret |= decode_value(s, s->range_model, 256, 1, &max); ret |= decode_value(s, s->range_model, 256, 1, &temp); if (ret < 0) return ret; max += temp << 8; memset(s->blocks, 0, sizeof(*s->blocks) * s->nbcount); while (min <= max) { int fill, count; ret = decode_value(s, s->fill_model, 5, 10, &fill); ret |= decode_value(s, s->count_model, 256, 20, &count); if (ret < 0) return ret; while (min < s->nbcount && count-- > 0) { s->blocks[min++] = fill; } } for (y = 0; y < s->nby; y++) { for (x = 0; x < s->nbx; x++) { int sy1 = 0, sy2 = 16, sx1 = 0, sx2 = 16; if (s->blocks[y * s->nbx + x] == 0) continue; if (((s->blocks[y * s->nbx + x] - 1) & 1) > 0) { ret = decode_value(s, s->sxy_model[0], 16, 100, &sx1); ret |= decode_value(s, s->sxy_model[1], 16, 100, &sy1); ret |= decode_value(s, s->sxy_model[2], 16, 100, &sx2); ret |= decode_value(s, s->sxy_model[3], 16, 100, &sy2); if (ret < 0) return ret; sx2++; sy2++; } if (((s->blocks[y * s->nbx + x] - 1) & 2) > 0) { int i, j, by = y * 16, bx = x * 16; int mvx, mvy; ret = decode_value(s, s->mv_model[0], 512, 100, &mvx); ret |= decode_value(s, s->mv_model[1], 512, 100, &mvy); if (ret < 0) return ret; mvx -= 256; mvy -= 256; if (by + mvy + sy1 < 0 || bx + mvx + sx1 < 0) return AVERROR_INVALIDDATA; for (i = 0; i < sy2 - sy1 && (by + sy1 + i) < avctx->height; i++) { for (j = 0; j < sx2 - sx1 && (bx + sx1 + j) < avctx->width; j++) { dst[(by + i + sy1) * linesize + bx + sx1 + j] = prev[(by + mvy + sy1 + i) * plinesize + bx + sx1 + mvx + j]; } } } else { int run, r, g, b, z, bx = x * 16 + sx1, by = y * 16 + sy1; unsigned clr, ptype = 0; for (; by < y * 16 + sy2 && by < avctx->height;) { ret = decode_value(s, s->op_model[ptype], 6, 1000, &ptype); if (ptype == 0) { ret = decode_unit(s, &s->pixel_model[0][cx + cx1], 400, &r); if (ret < 0) return ret; cx1 = (cx << 6) & 0xFC0; cx = r >> cxshift; ret = decode_unit(s, &s->pixel_model[1][cx + cx1], 400, &g); if (ret < 0) return ret; cx1 = (cx << 6) & 0xFC0; cx = g >> cxshift; ret = decode_unit(s, &s->pixel_model[2][cx + cx1], 400, &b); if (ret < 0) return ret; cx1 = (cx << 6) & 0xFC0; cx = b >> cxshift; clr = (b << 16) + (g << 8) + r; } if (ptype > 5) return AVERROR_INVALIDDATA; ret = decode_value(s, s->run_model[ptype], 256, 400, &run); if (ret < 0) return ret; switch (ptype) { case 0: while (run-- > 0) { if (by >= avctx->height) return AVERROR_INVALIDDATA; dst[by * linesize + bx] = clr; bx++; if (bx >= x * 16 + sx2 || bx >= avctx->width) { bx = x * 16 + sx1; by++; } } break; case 1: while (run-- > 0) { if (bx == 0) { if (by < 1) return AVERROR_INVALIDDATA; z = backstep; } else { z = 0; } if (by >= avctx->height) return AVERROR_INVALIDDATA; clr = dst[by * linesize + bx - 1 - z]; dst[by * linesize + bx] = clr; bx++; if (bx >= x * 16 + sx2 || bx >= avctx->width) { bx = x * 16 + sx1; by++; } } break; case 2: while (run-- > 0) { if (by < 1 || by >= avctx->height) return AVERROR_INVALIDDATA; clr = dst[(by - 1) * linesize + bx]; dst[by * linesize + bx] = clr; bx++; if (bx >= x * 16 + sx2 || bx >= avctx->width) { bx = x * 16 + sx1; by++; } } break; case 3: while (run-- > 0) { if (by >= avctx->height) return AVERROR_INVALIDDATA; clr = prev[by * linesize + bx]; dst[by * linesize + bx] = clr; bx++; if (bx >= x * 16 + sx2 || bx >= avctx->width) { bx = x * 16 + sx1; by++; } } break; case 4: while (run-- > 0) { uint8_t *odst = (uint8_t *)dst; if (by < 1 || by >= avctx->height) return AVERROR_INVALIDDATA; if (bx == 0) { z = backstep; } else { z = 0; } r = odst[((by - 1) * linesize + bx) * 4] + odst[(by * linesize + bx - 1 - z) * 4] - odst[((by - 1) * linesize + bx - 1 - z) * 4]; g = odst[((by - 1) * linesize + bx) * 4 + 1] + odst[(by * linesize + bx - 1 - z) * 4 + 1] - odst[((by - 1) * linesize + bx - 1 - z) * 4 + 1]; b = odst[((by - 1) * linesize + bx) * 4 + 2] + odst[(by * linesize + bx - 1 - z) * 4 + 2] - odst[((by - 1) * linesize + bx - 1 - z) * 4 + 2]; clr = ((b & 0xFF) << 16) + ((g & 0xFF) << 8) + (r & 0xFF); dst[by * linesize + bx] = clr; bx++; if (bx >= x * 16 + sx2 || bx >= avctx->width) { bx = x * 16 + sx1; by++; } } break; case 5: while (run-- > 0) { if (by < 1 || by >= avctx->height) return AVERROR_INVALIDDATA; if (bx == 0) { z = backstep; } else { z = 0; } clr = dst[(by - 1) * linesize + bx - 1 - z]; dst[by * linesize + bx] = clr; bx++; if (bx >= x * 16 + sx2 || bx >= avctx->width) { bx = x * 16 + sx1; by++; } } break; } if (avctx->bits_per_coded_sample == 16) { cx1 = (clr & 0xFF00) >> 2; cx = (clr & 0xFFFFFF) >> 16; } else { cx1 = (clr & 0xFC00) >> 4; cx = (clr & 0xFFFFFF) >> 18; } } } } } return 0; }"} {"target": 0, "idx": 4270, "func": "static void *rndis_queue_response(USBNetState *s, unsigned int length) { struct rndis_response *r = qemu_mallocz(sizeof(struct rndis_response) + length); TAILQ_INSERT_TAIL(&s->rndis_resp, r, entries); r->length = length; return &r->buf[0]; }"} {"target": 0, "idx": 4273, "func": "static char *visitor_get(TestOutputVisitorData *data) { data->str = string_output_get_string(data->sov); g_assert(data->str); return data->str; }"} {"target": 0, "idx": 4294, "func": "static void do_change_vnc(const char *target) { if (strcmp(target, \"passwd\") == 0 || strcmp(target, \"password\") == 0) { char password[9]; monitor_readline(\"Password: \", 1, password, sizeof(password)); if (vnc_display_password(NULL, password) < 0) term_printf(\"could not set VNC server password\\n\"); } else { if (vnc_display_open(NULL, target) < 0) term_printf(\"could not start VNC server on %s\\n\", target); } }"} {"target": 0, "idx": 4318, "func": "void do_interrupt (CPUState *env) { #if defined (CONFIG_USER_ONLY) env->exception_index |= 0x100; #else uint32_t msr; int excp = env->exception_index; msr = _load_msr(env); #if defined (DEBUG_EXCEPTIONS) if ((excp == EXCP_PROGRAM || excp == EXCP_DSI) && msr_pr == 1) { if (loglevel > 0) { fprintf(logfile, \"Raise exception at 0x%08x => 0x%08x (%02x)\\n\", env->nip, excp << 8, env->error_code); } if (loglevel > 0) cpu_ppc_dump_state(env, logfile, 0); } #endif /* Generate informations in save/restore registers */ switch (excp) { case EXCP_OFCALL: #if defined (USE_OPEN_FIRMWARE) env->gpr[3] = OF_client_entry((void *)env->gpr[3]); #endif return; case EXCP_RTASCALL: #if defined (USE_OPEN_FIRMWARE) printf(\"RTAS call !\\n\"); env->gpr[3] = RTAS_entry((void *)env->gpr[3]); printf(\"RTAS call done\\n\"); #endif return; case EXCP_NONE: /* Do nothing */ #if defined (DEBUG_EXCEPTIONS) printf(\"%s: escape EXCP_NONE\\n\", __func__); #endif return; case EXCP_RESET: if (msr_ip) excp += 0xFFC00; goto store_next; case EXCP_MACHINE_CHECK: if (msr_me == 0) { cpu_abort(env, \"Machine check exception while not allowed\\n\"); } msr_me = 0; break; case EXCP_DSI: /* Store exception cause */ /* data location address has been stored * when the fault has been detected */ msr &= ~0xFFFF0000; env->spr[DSISR] = 0; if (env->error_code & EXCP_DSI_TRANSLATE) env->spr[DSISR] |= 0x40000000; else if (env->error_code & EXCP_DSI_PROT) env->spr[DSISR] |= 0x08000000; else if (env->error_code & EXCP_DSI_NOTSUP) { env->spr[DSISR] |= 0x80000000; if (env->error_code & EXCP_DSI_DIRECT) env->spr[DSISR] |= 0x04000000; } if (env->error_code & EXCP_DSI_STORE) env->spr[DSISR] |= 0x02000000; if ((env->error_code & 0xF) == EXCP_DSI_DABR) env->spr[DSISR] |= 0x00400000; if (env->error_code & EXCP_DSI_ECXW) env->spr[DSISR] |= 0x00100000; #if defined (DEBUG_EXCEPTIONS) if (loglevel) { fprintf(logfile, \"DSI exception: DSISR=0x%08x, DAR=0x%08x\\n\", env->spr[DSISR], env->spr[DAR]); } else { printf(\"DSI exception: DSISR=0x%08x, DAR=0x%08x nip=0x%08x\\n\", env->spr[DSISR], env->spr[DAR], env->nip); } #endif goto store_next; case EXCP_ISI: /* Store exception cause */ msr &= ~0xFFFF0000; if (env->error_code == EXCP_ISI_TRANSLATE) msr |= 0x40000000; else if (env->error_code == EXCP_ISI_NOEXEC || env->error_code == EXCP_ISI_GUARD || env->error_code == EXCP_ISI_DIRECT) msr |= 0x10000000; else msr |= 0x08000000; #if defined (DEBUG_EXCEPTIONS) if (loglevel) { fprintf(logfile, \"ISI exception: msr=0x%08x, nip=0x%08x\\n\", msr, env->nip); } else { printf(\"ISI exception: msr=0x%08x, nip=0x%08x tbl:0x%08x\\n\", msr, env->nip, env->spr[V_TBL]); } #endif goto store_next; case EXCP_EXTERNAL: if (msr_ee == 0) { #if defined (DEBUG_EXCEPTIONS) if (loglevel > 0) { fprintf(logfile, \"Skipping hardware interrupt\\n\"); } #endif /* Requeue it */ do_raise_exception(EXCP_EXTERNAL); return; } goto store_next; case EXCP_ALIGN: /* Store exception cause */ /* Get rS/rD and rA from faulting opcode */ env->spr[DSISR] |= (ldl_code((void *)(env->nip - 4)) & 0x03FF0000) >> 16; /* data location address has been stored * when the fault has been detected */ goto store_current; case EXCP_PROGRAM: msr &= ~0xFFFF0000; switch (env->error_code & ~0xF) { case EXCP_FP: if (msr_fe0 == 0 && msr_fe1 == 0) { #if defined (DEBUG_EXCEPTIONS) printf(\"Ignore floating point exception\\n\"); #endif return; } msr |= 0x00100000; /* Set FX */ env->fpscr[7] |= 0x8; /* Finally, update FEX */ if ((((env->fpscr[7] & 0x3) << 3) | (env->fpscr[6] >> 1)) & ((env->fpscr[1] << 1) | (env->fpscr[0] >> 3))) env->fpscr[7] |= 0x4; break; case EXCP_INVAL: // printf(\"Invalid instruction at 0x%08x\\n\", env->nip); msr |= 0x00080000; break; case EXCP_PRIV: msr |= 0x00040000; break; case EXCP_TRAP: msr |= 0x00020000; break; default: /* Should never occur */ break; } msr |= 0x00010000; goto store_current; case EXCP_NO_FP: goto store_current; case EXCP_DECR: if (msr_ee == 0) { /* Requeue it */ do_raise_exception(EXCP_DECR); return; } goto store_next; case EXCP_SYSCALL: #if defined (DEBUG_EXCEPTIONS) if (msr_pr) { if (loglevel) { fprintf(logfile, \"syscall %d 0x%08x 0x%08x 0x%08x 0x%08x\\n\", env->gpr[0], env->gpr[3], env->gpr[4], env->gpr[5], env->gpr[6]); } else { printf(\"syscall %d from 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\\n\", env->gpr[0], env->nip, env->gpr[3], env->gpr[4], env->gpr[5], env->gpr[6]); } } #endif goto store_next; case EXCP_TRACE: goto store_next; case EXCP_FP_ASSIST: goto store_next; case EXCP_MTMSR: /* Nothing to do */ return; case EXCP_BRANCH: /* Nothing to do */ return; case EXCP_RFI: /* Restore user-mode state */ tb_flush(env); #if defined (DEBUG_EXCEPTIONS) if (msr_pr == 1) printf(\"Return from exception => 0x%08x\\n\", (uint32_t)env->nip); #endif return; store_current: /* SRR0 is set to current instruction */ env->spr[SRR0] = (uint32_t)env->nip - 4; break; store_next: /* SRR0 is set to next instruction */ env->spr[SRR0] = (uint32_t)env->nip; break; } env->spr[SRR1] = msr; /* reload MSR with correct bits */ msr_pow = 0; msr_ee = 0; msr_pr = 0; msr_fp = 0; msr_fe0 = 0; msr_se = 0; msr_be = 0; msr_fe1 = 0; msr_ir = 0; msr_dr = 0; msr_ri = 0; msr_le = msr_ile; /* Jump to handler */ env->nip = excp << 8; env->exception_index = EXCP_NONE; /* Invalidate all TLB as we may have changed translation mode */ tlb_flush(env, 1); /* ensure that no TB jump will be modified as the program flow was changed */ #ifdef __sparc__ tmp_T0 = 0; #else T0 = 0; #endif #endif env->exception_index = -1; }"} {"target": 0, "idx": 4321, "func": "int rom_add_blob(const char *name, const void *blob, size_t len, target_phys_addr_t addr) { Rom *rom; rom = g_malloc0(sizeof(*rom)); rom->name = g_strdup(name); rom->addr = addr; rom->romsize = len; rom->data = g_malloc0(rom->romsize); memcpy(rom->data, blob, len); rom_insert(rom); return 0; }"} {"target": 0, "idx": 4336, "func": "static av_cold int decode_init_mp3on4(AVCodecContext * avctx) { MP3On4DecodeContext *s = avctx->priv_data; MPEG4AudioConfig cfg; int i; if ((avctx->extradata_size < 2) || (avctx->extradata == NULL)) { av_log(avctx, AV_LOG_ERROR, \"Codec extradata missing or too short.\\n\"); return AVERROR_INVALIDDATA; } avpriv_mpeg4audio_get_config(&cfg, avctx->extradata, avctx->extradata_size * 8, 1); if (!cfg.chan_config || cfg.chan_config > 7) { av_log(avctx, AV_LOG_ERROR, \"Invalid channel config number.\\n\"); return AVERROR_INVALIDDATA; } s->frames = mp3Frames[cfg.chan_config]; s->coff = chan_offset[cfg.chan_config]; avctx->channels = ff_mpeg4audio_channels[cfg.chan_config]; avctx->channel_layout = chan_layout[cfg.chan_config]; if (cfg.sample_rate < 16000) s->syncword = 0xffe00000; else s->syncword = 0xfff00000; /* Init the first mp3 decoder in standard way, so that all tables get builded * We replace avctx->priv_data with the context of the first decoder so that * decode_init() does not have to be changed. * Other decoders will be initialized here copying data from the first context */ // Allocate zeroed memory for the first decoder context s->mp3decctx[0] = av_mallocz(sizeof(MPADecodeContext)); if (!s->mp3decctx[0]) goto alloc_fail; // Put decoder context in place to make init_decode() happy avctx->priv_data = s->mp3decctx[0]; decode_init(avctx); // Restore mp3on4 context pointer avctx->priv_data = s; s->mp3decctx[0]->adu_mode = 1; // Set adu mode /* Create a separate codec/context for each frame (first is already ok). * Each frame is 1 or 2 channels - up to 5 frames allowed */ for (i = 1; i < s->frames; i++) { s->mp3decctx[i] = av_mallocz(sizeof(MPADecodeContext)); if (!s->mp3decctx[i]) goto alloc_fail; s->mp3decctx[i]->adu_mode = 1; s->mp3decctx[i]->avctx = avctx; s->mp3decctx[i]->mpadsp = s->mp3decctx[0]->mpadsp; } return 0; alloc_fail: decode_close_mp3on4(avctx); return AVERROR(ENOMEM); }"} {"target": 0, "idx": 4343, "func": "static void avc_luma_midv_qrt_and_aver_dst_8w_msa(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, int32_t height, uint8_t vert_offset) { int32_t loop_cnt; v16i8 src0, src1, src2, src3, src4; v16u8 dst0, dst1, dst2, dst3; v16i8 mask0, mask1, mask2; v8i16 hz_out0, hz_out1, hz_out2, hz_out3; v8i16 hz_out4, hz_out5, hz_out6, hz_out7, hz_out8; v8i16 res0, res1, res2, res3; v8i16 res4, res5, res6, res7; LD_SB3(&luma_mask_arr[0], 16, mask0, mask1, mask2); LD_SB5(src, src_stride, src0, src1, src2, src3, src4); XORI_B5_128_SB(src0, src1, src2, src3, src4); src += (5 * src_stride); hz_out0 = AVC_HORZ_FILTER_SH(src0, mask0, mask1, mask2); hz_out1 = AVC_HORZ_FILTER_SH(src1, mask0, mask1, mask2); hz_out2 = AVC_HORZ_FILTER_SH(src2, mask0, mask1, mask2); hz_out3 = AVC_HORZ_FILTER_SH(src3, mask0, mask1, mask2); hz_out4 = AVC_HORZ_FILTER_SH(src4, mask0, mask1, mask2); for (loop_cnt = (height >> 2); loop_cnt--;) { LD_SB4(src, src_stride, src0, src1, src2, src3); XORI_B4_128_SB(src0, src1, src2, src3); src += (4 * src_stride); LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); hz_out5 = AVC_HORZ_FILTER_SH(src0, mask0, mask1, mask2); hz_out6 = AVC_HORZ_FILTER_SH(src1, mask0, mask1, mask2); hz_out7 = AVC_HORZ_FILTER_SH(src2, mask0, mask1, mask2); hz_out8 = AVC_HORZ_FILTER_SH(src3, mask0, mask1, mask2); res0 = AVC_CALC_DPADD_H_6PIX_2COEFF_SH(hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5); res2 = AVC_CALC_DPADD_H_6PIX_2COEFF_SH(hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6); res4 = AVC_CALC_DPADD_H_6PIX_2COEFF_SH(hz_out2, hz_out3, hz_out4, hz_out5, hz_out6, hz_out7); res6 = AVC_CALC_DPADD_H_6PIX_2COEFF_SH(hz_out3, hz_out4, hz_out5, hz_out6, hz_out7, hz_out8); if (vert_offset) { res1 = __msa_srari_h(hz_out3, 5); res3 = __msa_srari_h(hz_out4, 5); res5 = __msa_srari_h(hz_out5, 5); res7 = __msa_srari_h(hz_out6, 5); } else { res1 = __msa_srari_h(hz_out2, 5); res3 = __msa_srari_h(hz_out3, 5); res5 = __msa_srari_h(hz_out4, 5); res7 = __msa_srari_h(hz_out5, 5); } SAT_SH4_SH(res1, res3, res5, res7, 7); res0 = __msa_aver_s_h(res0, res1); res1 = __msa_aver_s_h(res2, res3); res2 = __msa_aver_s_h(res4, res5); res3 = __msa_aver_s_h(res6, res7); ILVR_D2_UB(dst1, dst0, dst3, dst2, dst0, dst1); CONVERT_UB_AVG_ST8x4_UB(res0, res1, res2, res3, dst0, dst1, dst, dst_stride); dst += (4 * dst_stride); hz_out0 = hz_out4; hz_out1 = hz_out5; hz_out2 = hz_out6; hz_out3 = hz_out7; hz_out4 = hz_out8; } }"} {"target": 0, "idx": 4346, "func": "static const AVClass *filter_child_class_next(const AVClass *prev) { AVFilter **f = NULL; while (prev && *(f = av_filter_next(f))) if ((*f)->priv_class == prev) break; while (*(f = av_filter_next(f))) if ((*f)->priv_class) return (*f)->priv_class; return NULL; }"} {"target": 1, "idx": 4351, "func": "void qmp_output_visitor_cleanup(QmpOutputVisitor *v) { QStackEntry *e, *tmp; QTAILQ_FOREACH_SAFE(e, &v->stack, node, tmp) { QTAILQ_REMOVE(&v->stack, e, node); if (e->value) { qobject_decref(e->value); } g_free(e); } g_free(v); }"} {"target": 1, "idx": 4352, "func": "static int qemu_chr_open_udp(QemuOpts *opts, CharDriverState **_chr) { CharDriverState *chr = NULL; NetCharDriver *s = NULL; int fd = -1; int ret; chr = g_malloc0(sizeof(CharDriverState)); s = g_malloc0(sizeof(NetCharDriver)); fd = inet_dgram_opts(opts); if (fd < 0) { fprintf(stderr, \"inet_dgram_opts failed\\n\"); ret = -errno; goto return_err; } s->fd = fd; s->bufcnt = 0; s->bufptr = 0; chr->opaque = s; chr->chr_write = udp_chr_write; chr->chr_update_read_handler = udp_chr_update_read_handler; chr->chr_close = udp_chr_close; *_chr = chr; return 0; return_err: g_free(chr); g_free(s); if (fd >= 0) { closesocket(fd); } return ret; }"} {"target": 1, "idx": 4354, "func": "void helper_wrpsr(CPUSPARCState *env, target_ulong new_psr) { if ((new_psr & PSR_CWP) >= env->nwindows) { cpu_raise_exception_ra(env, TT_ILL_INSN, GETPC()); } else { cpu_put_psr(env, new_psr); } }"} {"target": 1, "idx": 4373, "func": "void helper_wrpstate(CPUSPARCState *env, target_ulong new_state) { cpu_change_pstate(env, new_state & 0xf3f); #if !defined(CONFIG_USER_ONLY) if (cpu_interrupts_enabled(env)) { cpu_check_irqs(env); } #endif }"} {"target": 1, "idx": 4380, "func": "void tb_flush(CPUState *cpu) { #if defined(DEBUG_FLUSH) printf(\"qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\\n\", (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer), tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ? ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) / tcg_ctx.tb_ctx.nb_tbs : 0); #endif if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) > tcg_ctx.code_gen_buffer_size) { cpu_abort(cpu, \"Internal error: code buffer overflow\\n\"); tcg_ctx.tb_ctx.nb_tbs = 0; CPU_FOREACH(cpu) { memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache)); cpu->tb_flushed = true; qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE); page_flush_tb(); tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer; /* XXX: flush processor icache at this point if cache flush is expensive */ tcg_ctx.tb_ctx.tb_flush_count++;"} {"target": 1, "idx": 4381, "func": "int register_savevm(DeviceState *dev, const char *idstr, int instance_id, int version_id, SaveStateHandler *save_state, LoadStateHandler *load_state, void *opaque) { SaveVMHandlers *ops = g_malloc0(sizeof(SaveVMHandlers)); ops->save_state = save_state; ops->load_state = load_state; return register_savevm_live(dev, idstr, instance_id, version_id, ops, opaque); }"} {"target": 0, "idx": 4408, "func": "static void test_validate_union(TestInputVisitorData *data, const void *unused) { UserDefUnion *tmp = NULL; Visitor *v; Error *err = NULL; v = validate_test_init(data, \"{ 'type': 'b', 'integer': 41, 'data' : { 'integer': 42 } }\"); visit_type_UserDefUnion(v, &tmp, NULL, &err); g_assert(!err); qapi_free_UserDefUnion(tmp); }"} {"target": 0, "idx": 4414, "func": "static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h) { uint8_t *ptr_y, *ptr_cb, *ptr_cr; int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy; const int lowres= s->avctx->lowres; const int block_s= 8>>lowres; const int s_mask= (2<h_edge_pos >> lowres; const int v_edge_pos = s->v_edge_pos >> lowres; linesize = s->current_picture.linesize[0] << field_based; uvlinesize = s->current_picture.linesize[1] << field_based; if(s->quarter_sample){ //FIXME obviously not perfect but qpel wont work in lowres anyway motion_x/=2; motion_y/=2; } if(field_based){ motion_y += (bottom_field - field_select)*((1<mb_x*2*block_s + (motion_x >> (lowres+1)); src_y =(s->mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1)); if (s->out_format == FMT_H263) { uvsx = ((motion_x>>1) & s_mask) | (sx&1); uvsy = ((motion_y>>1) & s_mask) | (sy&1); uvsrc_x = src_x>>1; uvsrc_y = src_y>>1; }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261 mx = motion_x / 4; my = motion_y / 4; uvsx = (2*mx) & s_mask; uvsy = (2*my) & s_mask; uvsrc_x = s->mb_x*block_s + (mx >> lowres); uvsrc_y = s->mb_y*block_s + (my >> lowres); } else { mx = motion_x / 2; my = motion_y / 2; uvsx = mx & s_mask; uvsy = my & s_mask; uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1)); uvsrc_y =(s->mb_y*block_s>>field_based) + (my >> (lowres+1)); } ptr_y = ref_picture[0] + src_y * linesize + src_x; ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x; ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x; if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){ ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based, src_x, src_y<edge_emu_buffer; if(!(s->flags&CODEC_FLAG_GRAY)){ uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize; ff_emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based, uvsrc_x, uvsrc_y<>1, v_edge_pos>>1); ff_emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based, uvsrc_x, uvsrc_y<>1, v_edge_pos>>1); ptr_cb= uvbuf; ptr_cr= uvbuf+16; } } if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data dest_y += s->linesize; dest_cb+= s->uvlinesize; dest_cr+= s->uvlinesize; } if(field_select){ ptr_y += s->linesize; ptr_cb+= s->uvlinesize; ptr_cr+= s->uvlinesize; } sx <<= 2 - lowres; sy <<= 2 - lowres; pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy); if(!(s->flags&CODEC_FLAG_GRAY)){ uvsx <<= 2 - lowres; uvsy <<= 2 - lowres; pix_op[lowres](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy); pix_op[lowres](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy); } //FIXME h261 lowres loop filter }"} {"target": 1, "idx": 4421, "func": "static int encode_slice_plane(AVCodecContext *avctx, int mb_count, uint8_t *src, int src_stride, uint8_t *buf, unsigned buf_size, int *qmat, int chroma) { ProresContext* ctx = avctx->priv_data; FDCTDSPContext *fdsp = &ctx->fdsp; DECLARE_ALIGNED(16, int16_t, blocks)[DEFAULT_SLICE_MB_WIDTH << 8], *block; int i, blocks_per_slice; PutBitContext pb; block = blocks; for (i = 0; i < mb_count; i++) { fdct_get(fdsp, src, src_stride, block + (0 << 6)); fdct_get(fdsp, src + 8 * src_stride, src_stride, block + ((2 - chroma) << 6)); if (!chroma) { fdct_get(fdsp, src + 16, src_stride, block + (1 << 6)); fdct_get(fdsp, src + 16 + 8 * src_stride, src_stride, block + (3 << 6)); } block += (256 >> chroma); src += (32 >> chroma); } blocks_per_slice = mb_count << (2 - chroma); init_put_bits(&pb, buf, buf_size << 3); encode_dc_coeffs(&pb, blocks, blocks_per_slice, qmat); encode_ac_coeffs(avctx, &pb, blocks, blocks_per_slice, qmat); flush_put_bits(&pb); return put_bits_ptr(&pb) - pb.buf; }"} {"target": 1, "idx": 4424, "func": "static inline void render_line_unrolled(intptr_t x, intptr_t y, int x1, intptr_t sy, int ady, int adx, float *buf) { int err = -adx; x -= x1 - 1; buf += x1 - 1; while (++x < 0) { err += ady; if (err >= 0) { err += ady - adx; y += sy; buf[x++] = ff_vorbis_floor1_inverse_db_table[y]; } buf[x] = ff_vorbis_floor1_inverse_db_table[y]; } if (x <= 0) { if (err + ady >= 0) y += sy; buf[x] = ff_vorbis_floor1_inverse_db_table[y]; } }"} {"target": 1, "idx": 4427, "func": "static int webm_dash_manifest_write_trailer(AVFormatContext *s) { WebMDashMuxContext *w = s->priv_data; int i; for (i = 0; i < w->nb_as; i++) { av_freep(&w->as[i].streams); } av_freep(&w->as); return 0; }"} {"target": 0, "idx": 4442, "func": "void test_misc(void) { char table[256]; long res, i; for(i=0;i<256;i++) table[i] = 256 - i; res = 0x12345678; asm (\"xlat\" : \"=a\" (res) : \"b\" (table), \"0\" (res)); printf(\"xlat: EAX=\" FMTLX \"\\n\", res); #if defined(__x86_64__) #if 0 { /* XXX: see if Intel Core2 and AMD64 behavior really differ. Here we implemented the Intel way which is not compatible yet with QEMU. */ static struct __attribute__((packed)) { uint64_t offset; uint16_t seg; } desc; long cs_sel; asm volatile (\"mov %%cs, %0\" : \"=r\" (cs_sel)); asm volatile (\"push %1\\n\" \"call func_lret\\n\" : \"=a\" (res) : \"r\" (cs_sel) : \"memory\", \"cc\"); printf(\"func_lret=\" FMTLX \"\\n\", res); desc.offset = (long)&func_lret; desc.seg = cs_sel; asm volatile (\"xor %%rax, %%rax\\n\" \"rex64 lcall *(%%rcx)\\n\" : \"=a\" (res) : \"c\" (&desc) : \"memory\", \"cc\"); printf(\"func_lret2=\" FMTLX \"\\n\", res); asm volatile (\"push %2\\n\" \"mov $ 1f, %%rax\\n\" \"push %%rax\\n\" \"rex64 ljmp *(%%rcx)\\n\" \"1:\\n\" : \"=a\" (res) : \"c\" (&desc), \"b\" (cs_sel) : \"memory\", \"cc\"); printf(\"func_lret3=\" FMTLX \"\\n\", res); } #endif #else asm volatile (\"push %%cs ; call %1\" : \"=a\" (res) : \"m\" (func_lret): \"memory\", \"cc\"); printf(\"func_lret=\" FMTLX \"\\n\", res); asm volatile (\"pushf ; push %%cs ; call %1\" : \"=a\" (res) : \"m\" (func_iret): \"memory\", \"cc\"); printf(\"func_iret=\" FMTLX \"\\n\", res); #endif #if defined(__x86_64__) /* specific popl test */ asm volatile (\"push $12345432 ; push $0x9abcdef ; pop (%%rsp) ; pop %0\" : \"=g\" (res)); printf(\"popl esp=\" FMTLX \"\\n\", res); #else /* specific popl test */ asm volatile (\"pushl $12345432 ; pushl $0x9abcdef ; popl (%%esp) ; popl %0\" : \"=g\" (res)); printf(\"popl esp=\" FMTLX \"\\n\", res); /* specific popw test */ asm volatile (\"pushl $12345432 ; pushl $0x9abcdef ; popw (%%esp) ; addl $2, %%esp ; popl %0\" : \"=g\" (res)); printf(\"popw esp=\" FMTLX \"\\n\", res); #endif }"} {"target": 0, "idx": 4443, "func": "static int pci_rtl8139_init(PCIDevice *dev) { RTL8139State * s = DO_UPCAST(RTL8139State, dev, dev); uint8_t *pci_conf; pci_conf = s->dev.config; pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_REALTEK); pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_REALTEK_8139); pci_conf[PCI_REVISION_ID] = RTL8139_PCI_REVID; /* >=0x20 is for 8139C+ */ pci_config_set_class(pci_conf, PCI_CLASS_NETWORK_ETHERNET); pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin 0 */ /* TODO: start of capability list, but no capability * list bit in status register, and offset 0xdc seems unused. */ pci_conf[PCI_CAPABILITY_LIST] = 0xdc; /* I/O handler for memory-mapped I/O */ s->rtl8139_mmio_io_addr = cpu_register_io_memory(rtl8139_mmio_read, rtl8139_mmio_write, s, DEVICE_LITTLE_ENDIAN); pci_register_bar(&s->dev, 0, 0x100, PCI_BASE_ADDRESS_SPACE_IO, rtl8139_ioport_map); pci_register_bar_simple(&s->dev, 1, 0x100, 0, s->rtl8139_mmio_io_addr); qemu_macaddr_default_if_unset(&s->conf.macaddr); /* prepare eeprom */ s->eeprom.contents[0] = 0x8129; #if 1 /* PCI vendor and device ID should be mirrored here */ s->eeprom.contents[1] = PCI_VENDOR_ID_REALTEK; s->eeprom.contents[2] = PCI_DEVICE_ID_REALTEK_8139; #endif s->eeprom.contents[7] = s->conf.macaddr.a[0] | s->conf.macaddr.a[1] << 8; s->eeprom.contents[8] = s->conf.macaddr.a[2] | s->conf.macaddr.a[3] << 8; s->eeprom.contents[9] = s->conf.macaddr.a[4] | s->conf.macaddr.a[5] << 8; s->nic = qemu_new_nic(&net_rtl8139_info, &s->conf, dev->qdev.info->name, dev->qdev.id, s); qemu_format_nic_info_str(&s->nic->nc, s->conf.macaddr.a); s->cplus_txbuffer = NULL; s->cplus_txbuffer_len = 0; s->cplus_txbuffer_offset = 0; s->TimerExpire = 0; s->timer = qemu_new_timer_ns(vm_clock, rtl8139_timer, s); rtl8139_set_next_tctr_time(s, qemu_get_clock_ns(vm_clock)); add_boot_device_path(s->conf.bootindex, &dev->qdev, \"/ethernet-phy@0\"); return 0; }"} {"target": 0, "idx": 4447, "func": "void ff_rv40dsp_init_neon(RV34DSPContext *c, DSPContext* dsp) { c->put_pixels_tab[0][ 1] = ff_put_rv40_qpel16_mc10_neon; c->put_pixels_tab[0][ 3] = ff_put_rv40_qpel16_mc30_neon; c->put_pixels_tab[0][ 4] = ff_put_rv40_qpel16_mc01_neon; c->put_pixels_tab[0][ 5] = ff_put_rv40_qpel16_mc11_neon; c->put_pixels_tab[0][ 6] = ff_put_rv40_qpel16_mc21_neon; c->put_pixels_tab[0][ 7] = ff_put_rv40_qpel16_mc31_neon; c->put_pixels_tab[0][ 9] = ff_put_rv40_qpel16_mc12_neon; c->put_pixels_tab[0][10] = ff_put_rv40_qpel16_mc22_neon; c->put_pixels_tab[0][11] = ff_put_rv40_qpel16_mc32_neon; c->put_pixels_tab[0][12] = ff_put_rv40_qpel16_mc03_neon; c->put_pixels_tab[0][13] = ff_put_rv40_qpel16_mc13_neon; c->put_pixels_tab[0][14] = ff_put_rv40_qpel16_mc23_neon; c->put_pixels_tab[0][15] = ff_put_rv40_qpel16_mc33_neon; c->avg_pixels_tab[0][ 1] = ff_avg_rv40_qpel16_mc10_neon; c->avg_pixels_tab[0][ 3] = ff_avg_rv40_qpel16_mc30_neon; c->avg_pixels_tab[0][ 4] = ff_avg_rv40_qpel16_mc01_neon; c->avg_pixels_tab[0][ 5] = ff_avg_rv40_qpel16_mc11_neon; c->avg_pixels_tab[0][ 6] = ff_avg_rv40_qpel16_mc21_neon; c->avg_pixels_tab[0][ 7] = ff_avg_rv40_qpel16_mc31_neon; c->avg_pixels_tab[0][ 9] = ff_avg_rv40_qpel16_mc12_neon; c->avg_pixels_tab[0][10] = ff_avg_rv40_qpel16_mc22_neon; c->avg_pixels_tab[0][11] = ff_avg_rv40_qpel16_mc32_neon; c->avg_pixels_tab[0][12] = ff_avg_rv40_qpel16_mc03_neon; c->avg_pixels_tab[0][13] = ff_avg_rv40_qpel16_mc13_neon; c->avg_pixels_tab[0][14] = ff_avg_rv40_qpel16_mc23_neon; c->avg_pixels_tab[0][15] = ff_avg_rv40_qpel16_mc33_neon; c->put_pixels_tab[1][ 1] = ff_put_rv40_qpel8_mc10_neon; c->put_pixels_tab[1][ 3] = ff_put_rv40_qpel8_mc30_neon; c->put_pixels_tab[1][ 4] = ff_put_rv40_qpel8_mc01_neon; c->put_pixels_tab[1][ 5] = ff_put_rv40_qpel8_mc11_neon; c->put_pixels_tab[1][ 6] = ff_put_rv40_qpel8_mc21_neon; c->put_pixels_tab[1][ 7] = ff_put_rv40_qpel8_mc31_neon; c->put_pixels_tab[1][ 9] = ff_put_rv40_qpel8_mc12_neon; c->put_pixels_tab[1][10] = ff_put_rv40_qpel8_mc22_neon; c->put_pixels_tab[1][11] = ff_put_rv40_qpel8_mc32_neon; c->put_pixels_tab[1][12] = ff_put_rv40_qpel8_mc03_neon; c->put_pixels_tab[1][13] = ff_put_rv40_qpel8_mc13_neon; c->put_pixels_tab[1][14] = ff_put_rv40_qpel8_mc23_neon; c->put_pixels_tab[1][15] = ff_put_rv40_qpel8_mc33_neon; c->avg_pixels_tab[1][ 1] = ff_avg_rv40_qpel8_mc10_neon; c->avg_pixels_tab[1][ 3] = ff_avg_rv40_qpel8_mc30_neon; c->avg_pixels_tab[1][ 4] = ff_avg_rv40_qpel8_mc01_neon; c->avg_pixels_tab[1][ 5] = ff_avg_rv40_qpel8_mc11_neon; c->avg_pixels_tab[1][ 6] = ff_avg_rv40_qpel8_mc21_neon; c->avg_pixels_tab[1][ 7] = ff_avg_rv40_qpel8_mc31_neon; c->avg_pixels_tab[1][ 9] = ff_avg_rv40_qpel8_mc12_neon; c->avg_pixels_tab[1][10] = ff_avg_rv40_qpel8_mc22_neon; c->avg_pixels_tab[1][11] = ff_avg_rv40_qpel8_mc32_neon; c->avg_pixels_tab[1][12] = ff_avg_rv40_qpel8_mc03_neon; c->avg_pixels_tab[1][13] = ff_avg_rv40_qpel8_mc13_neon; c->avg_pixels_tab[1][14] = ff_avg_rv40_qpel8_mc23_neon; c->avg_pixels_tab[1][15] = ff_avg_rv40_qpel8_mc33_neon; c->put_chroma_pixels_tab[0] = ff_put_rv40_chroma_mc8_neon; c->put_chroma_pixels_tab[1] = ff_put_rv40_chroma_mc4_neon; c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_neon; c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_neon; c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_16_neon; c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_8_neon; c->rv40_loop_filter_strength[0] = ff_rv40_h_loop_filter_strength_neon; c->rv40_loop_filter_strength[1] = ff_rv40_v_loop_filter_strength_neon; c->rv40_weak_loop_filter[0] = ff_rv40_h_weak_loop_filter_neon; c->rv40_weak_loop_filter[1] = ff_rv40_v_weak_loop_filter_neon; }"} {"target": 0, "idx": 4461, "func": "VMChangeStateEntry *qemu_add_vm_change_state_handler(VMChangeStateHandler *cb, void *opaque) { VMChangeStateEntry *e; e = qemu_mallocz(sizeof (*e)); e->cb = cb; e->opaque = opaque; LIST_INSERT_HEAD(&vm_change_state_head, e, entries); return e; }"} {"target": 0, "idx": 4478, "func": "static int ivi_decode_blocks(GetBitContext *gb, IVIBandDesc *band, IVITile *tile, AVCodecContext *avctx) { int mbn, blk, num_blocks, blk_size, ret, is_intra, mc_type = 0; int mv_x = 0, mv_y = 0; int32_t prev_dc; uint32_t cbp, quant, buf_offs; IVIMbInfo *mb; ivi_mc_func mc_with_delta_func, mc_no_delta_func; const uint8_t *scale_tab; /* init intra prediction for the DC coefficient */ prev_dc = 0; blk_size = band->blk_size; /* number of blocks per mb */ num_blocks = (band->mb_size != blk_size) ? 4 : 1; if (blk_size == 8) { mc_with_delta_func = ff_ivi_mc_8x8_delta; mc_no_delta_func = ff_ivi_mc_8x8_no_delta; } else { mc_with_delta_func = ff_ivi_mc_4x4_delta; mc_no_delta_func = ff_ivi_mc_4x4_no_delta; } for (mbn = 0, mb = tile->mbs; mbn < tile->num_MBs; mb++, mbn++) { is_intra = !mb->type; cbp = mb->cbp; buf_offs = mb->buf_offs; quant = band->glob_quant + mb->q_delta; if (avctx->codec_id == AV_CODEC_ID_INDEO4) quant = av_clip(quant, 0, 31); else quant = av_clip(quant, 0, 23); scale_tab = is_intra ? band->intra_scale : band->inter_scale; if (scale_tab) quant = scale_tab[quant]; if (!is_intra) { mv_x = mb->mv_x; mv_y = mb->mv_y; if (band->is_halfpel) { mc_type = ((mv_y & 1) << 1) | (mv_x & 1); mv_x >>= 1; mv_y >>= 1; /* convert halfpel vectors into fullpel ones */ } if (mb->type) { int dmv_x, dmv_y, cx, cy; dmv_x = mb->mv_x >> band->is_halfpel; dmv_y = mb->mv_y >> band->is_halfpel; cx = mb->mv_x & band->is_halfpel; cy = mb->mv_y & band->is_halfpel; if (mb->xpos + dmv_x < 0 || mb->xpos + dmv_x + band->mb_size + cx > band->pitch || mb->ypos + dmv_y < 0 || mb->ypos + dmv_y + band->mb_size + cy > band->aheight) { return AVERROR_INVALIDDATA; } } } for (blk = 0; blk < num_blocks; blk++) { /* adjust block position in the buffer according to its number */ if (blk & 1) { buf_offs += blk_size; } else if (blk == 2) { buf_offs -= blk_size; buf_offs += blk_size * band->pitch; } if (cbp & 1) { /* block coded ? */ ret = ivi_decode_coded_blocks(gb, band, mc_with_delta_func, mv_x, mv_y, &prev_dc, is_intra, mc_type, quant, buf_offs, avctx); if (ret < 0) return ret; } else { /* block not coded */ /* for intra blocks apply the dc slant transform */ /* for inter - perform the motion compensation without delta */ if (is_intra) { if (band->dc_transform) band->dc_transform(&prev_dc, band->buf + buf_offs, band->pitch, blk_size); } else { ret = ivi_mc(mc_no_delta_func, band->buf, band->ref_buf, buf_offs, mv_x, mv_y, band->pitch, mc_type); if (ret < 0) return ret; } } cbp >>= 1; }// for blk }// for mbn align_get_bits(gb); return 0; }"} {"target": 0, "idx": 4494, "func": "static void rtas_read_pci_config(sPAPREnvironment *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { uint32_t val, size, addr; PCIDevice *dev = find_dev(spapr, 0, rtas_ld(args, 0)); if (!dev) { rtas_st(rets, 0, -1); return; } size = rtas_ld(args, 1); addr = rtas_pci_cfgaddr(rtas_ld(args, 0)); val = pci_host_config_read_common(dev, addr, pci_config_size(dev), size); rtas_st(rets, 0, 0); rtas_st(rets, 1, val); }"} {"target": 0, "idx": 4495, "func": "void qmp_dump_guest_memory(bool paging, const char *file, bool has_begin, int64_t begin, bool has_length, int64_t length, Error **errp) { const char *p; int fd = -1; DumpState *s; int ret; if (has_begin && !has_length) { error_set(errp, QERR_MISSING_PARAMETER, \"length\"); return; } if (!has_begin && has_length) { error_set(errp, QERR_MISSING_PARAMETER, \"begin\"); return; } #if !defined(WIN32) if (strstart(file, \"fd:\", &p)) { fd = monitor_get_fd(cur_mon, p, errp); if (fd == -1) { return; } } #endif if (strstart(file, \"file:\", &p)) { fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR); if (fd < 0) { error_setg_file_open(errp, errno, p); return; } } if (fd == -1) { error_set(errp, QERR_INVALID_PARAMETER, \"protocol\"); return; } s = g_malloc(sizeof(DumpState)); ret = dump_init(s, fd, paging, has_begin, begin, length, errp); if (ret < 0) { g_free(s); return; } if (create_vmcore(s) < 0 && !error_is_set(s->errp)) { error_set(errp, QERR_IO_ERROR); } g_free(s); }"} {"target": 0, "idx": 4511, "func": "static bool bdrv_requests_pending(BlockDriverState *bs) { if (!QLIST_EMPTY(&bs->tracked_requests)) { return true; } if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) { return true; } if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) { return true; } if (bs->file && bdrv_requests_pending(bs->file)) { return true; } if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) { return true; } return false; }"} {"target": 0, "idx": 4514, "func": "void checkasm_check_fixed_dsp(void) { LOCAL_ALIGNED_32(int32_t, src0, [BUF_SIZE]); LOCAL_ALIGNED_32(int32_t, src1, [BUF_SIZE]); LOCAL_ALIGNED_32(int32_t, src2, [BUF_SIZE]); AVFixedDSPContext *fdsp = avpriv_alloc_fixed_dsp(1); randomize_buffers(); if (check_func(fdsp->vector_fmul, \"vector_fmul\")) check_vector_fmul(src0, src1); if (check_func(fdsp->vector_fmul_add, \"vector_fmul_add\")) check_vector_fmul_add(src0, src1, src2); if (check_func(fdsp->vector_fmul_reverse, \"vector_fmul_reverse\")) check_vector_fmul(src0, src1); if (check_func(fdsp->vector_fmul_window, \"vector_fmul_window\")) check_vector_fmul_window(src0, src1, src2); if (check_func(fdsp->vector_fmul_window_scaled, \"vector_fmul_window_scaled\")) check_vector_fmul_window_scaled(src0, src1, src2); report(\"vector_fmul\"); if (check_func(fdsp->butterflies_fixed, \"butterflies_fixed\")) check_butterflies(src0, src1); report(\"butterflies_fixed\"); if (check_func(fdsp->scalarproduct_fixed, \"scalarproduct_fixed\")) check_scalarproduct_fixed(src0, src1); report(\"scalarproduct_fixed\"); av_freep(&fdsp); }"} {"target": 1, "idx": 4522, "func": "static void etsec_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = etsec_realize; dc->reset = etsec_reset; dc->props = etsec_properties; }"} {"target": 0, "idx": 4525, "func": "int ff_read_riff_info(AVFormatContext *s, int64_t size) { int64_t start, end, cur; AVIOContext *pb = s->pb; start = avio_tell(pb); end = start + size; while ((cur = avio_tell(pb)) >= 0 && cur <= end - 8 /* = tag + size */) { uint32_t chunk_code; int64_t chunk_size; char key[5] = {0}; char *value; chunk_code = avio_rl32(pb); chunk_size = avio_rl32(pb); if (chunk_size > end || end - chunk_size < cur || chunk_size == UINT_MAX) { avio_seek(pb, -9, SEEK_CUR); chunk_code = avio_rl32(pb); chunk_size = avio_rl32(pb); if (chunk_size > end || end - chunk_size < cur || chunk_size == UINT_MAX) { av_log(s, AV_LOG_WARNING, \"too big INFO subchunk\\n\"); return AVERROR_INVALIDDATA; } } chunk_size += (chunk_size & 1); if (!chunk_code) { if (chunk_size) avio_skip(pb, chunk_size); continue; } value = av_mallocz(chunk_size + 1); if (!value) { av_log(s, AV_LOG_ERROR, \"out of memory, unable to read INFO tag\\n\"); return AVERROR(ENOMEM); } AV_WL32(key, chunk_code); if (avio_read(pb, value, chunk_size) != chunk_size) { av_log(s, AV_LOG_WARNING, \"premature end of file while reading INFO tag\\n\"); } av_dict_set(&s->metadata, key, value, AV_DICT_DONT_STRDUP_VAL); } return 0; }"} {"target": 0, "idx": 4530, "func": "void ff_mpeg_flush(AVCodecContext *avctx){ int i; MpegEncContext *s = avctx->priv_data; if(s==NULL || s->picture==NULL) return; for (i = 0; i < MAX_PICTURE_COUNT; i++) ff_mpeg_unref_picture(s, &s->picture[i]); s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL; ff_mpeg_unref_picture(s, &s->current_picture); ff_mpeg_unref_picture(s, &s->last_picture); ff_mpeg_unref_picture(s, &s->next_picture); s->mb_x= s->mb_y= 0; s->parse_context.state= -1; s->parse_context.frame_start_found= 0; s->parse_context.overread= 0; s->parse_context.overread_index= 0; s->parse_context.index= 0; s->parse_context.last_index= 0; s->bitstream_buffer_size=0; s->pp_time=0; }"} {"target": 0, "idx": 4557, "func": "static int bdrv_qed_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum) { BDRVQEDState *s = bs->opaque; uint64_t pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE; size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE; QEDIsAllocatedCB cb = { .is_allocated = -1, .pnum = pnum, }; QEDRequest request = { .l2_table = NULL }; async_context_push(); qed_find_cluster(s, &request, pos, len, qed_is_allocated_cb, &cb); while (cb.is_allocated == -1) { qemu_aio_wait(); } async_context_pop(); qed_unref_l2_cache_entry(request.l2_table); return cb.is_allocated; }"} {"target": 0, "idx": 4575, "func": "static int rwpipe_read_ppm_header( rwpipe *rw, int *width, int *height ) { char line[ 3 ]; FILE *in = rwpipe_reader( rw ); int max; fgets( line, 3, in ); if ( !strncmp( line, \"P6\", 2 ) ) { *width = rwpipe_read_number( rw ); *height = rwpipe_read_number( rw ); max = rwpipe_read_number( rw ); return max != 255 || *width <= 0 || *height <= 0; } return 1; }"} {"target": 1, "idx": 4578, "func": "static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64) { VhostUserMsg msg = { .request = request, .flags = VHOST_USER_VERSION, }; if (vhost_user_one_time_request(request) && dev->vq_index != 0) { return 0; } vhost_user_write(dev, &msg, NULL, 0); if (vhost_user_read(dev, &msg) < 0) { return 0; } if (msg.request != request) { error_report(\"Received unexpected msg type. Expected %d received %d\", request, msg.request); return -1; } if (msg.size != sizeof(m.u64)) { error_report(\"Received bad msg size.\"); return -1; } *u64 = msg.u64; return 0; }"} {"target": 0, "idx": 4580, "func": "static int crc_write_header(struct AVFormatContext *s) { CRCState *crc = s->priv_data; /* init CRC */ crc->crcval = adler32(0, NULL, 0); return 0; }"} {"target": 1, "idx": 4583, "func": "ObjectClass *object_class_dynamic_cast_assert(ObjectClass *class, const char *typename, const char *file, int line, const char *func) { ObjectClass *ret = object_class_dynamic_cast(class, typename); if (!ret && class) { fprintf(stderr, \"%s:%d:%s: Object %p is not an instance of type %s\\n\", file, line, func, class, typename); abort(); } return ret; }"} {"target": 1, "idx": 4594, "func": "static void nvme_set_bootindex(Object *obj, Visitor *v, void *opaque, const char *name, Error **errp) { NvmeCtrl *s = NVME(obj); int32_t boot_index; Error *local_err = NULL; visit_type_int32(v, &boot_index, name, &local_err); if (local_err) { goto out; } /* check whether bootindex is present in fw_boot_order list */ check_boot_index(boot_index, &local_err); if (local_err) { goto out; } /* change bootindex to a new one */ s->conf.bootindex = boot_index; out: if (local_err) { error_propagate(errp, local_err); } }"} {"target": 1, "idx": 4598, "func": "PPC_OP(divwu) { if (T1 == 0) { T0 = 0; } else { T0 /= T1; } RETURN(); }"} {"target": 1, "idx": 4599, "func": "static av_cold int initFilter(int16_t **outFilter, int32_t **filterPos, int *outFilterSize, int xInc, int srcW, int dstW, int filterAlign, int one, int flags, int cpu_flags, SwsVector *srcFilter, SwsVector *dstFilter, double param[2], int is_horizontal) { int i; int filterSize; int filter2Size; int minFilterSize; int64_t *filter = NULL; int64_t *filter2 = NULL; const int64_t fone = 1LL << 54; int ret = -1; emms_c(); // FIXME should not be required but IS (even for non-MMX versions) // NOTE: the +3 is for the MMX(+1) / SSE(+3) scaler which reads over the end FF_ALLOC_OR_GOTO(NULL, *filterPos, (dstW + 3) * sizeof(**filterPos), fail); if (FFABS(xInc - 0x10000) < 10) { // unscaled int i; filterSize = 1; FF_ALLOCZ_OR_GOTO(NULL, filter, dstW * sizeof(*filter) * filterSize, fail); for (i = 0; i < dstW; i++) { filter[i * filterSize] = fone; (*filterPos)[i] = i; } } else if (flags & SWS_POINT) { // lame looking point sampling mode int i; int xDstInSrc; filterSize = 1; FF_ALLOC_OR_GOTO(NULL, filter, dstW * sizeof(*filter) * filterSize, fail); xDstInSrc = xInc / 2 - 0x8000; for (i = 0; i < dstW; i++) { int xx = (xDstInSrc - ((filterSize - 1) << 15) + (1 << 15)) >> 16; (*filterPos)[i] = xx; filter[i] = fone; xDstInSrc += xInc; } } else if ((xInc <= (1 << 16) && (flags & SWS_AREA)) || (flags & SWS_FAST_BILINEAR)) { // bilinear upscale int i; int xDstInSrc; filterSize = 2; FF_ALLOC_OR_GOTO(NULL, filter, dstW * sizeof(*filter) * filterSize, fail); xDstInSrc = xInc / 2 - 0x8000; for (i = 0; i < dstW; i++) { int xx = (xDstInSrc - ((filterSize - 1) << 15) + (1 << 15)) >> 16; int j; (*filterPos)[i] = xx; // bilinear upscale / linear interpolate / area averaging for (j = 0; j < filterSize; j++) { int64_t coeff = fone - FFABS((xx << 16) - xDstInSrc) * (fone >> 16); if (coeff < 0) coeff = 0; filter[i * filterSize + j] = coeff; xx++; } xDstInSrc += xInc; } } else { int64_t xDstInSrc; int sizeFactor; if (flags & SWS_BICUBIC) sizeFactor = 4; else if (flags & SWS_X) sizeFactor = 8; else if (flags & SWS_AREA) sizeFactor = 1; // downscale only, for upscale it is bilinear else if (flags & SWS_GAUSS) sizeFactor = 8; // infinite ;) else if (flags & SWS_LANCZOS) sizeFactor = param[0] != SWS_PARAM_DEFAULT ? ceil(2 * param[0]) : 6; else if (flags & SWS_SINC) sizeFactor = 20; // infinite ;) else if (flags & SWS_SPLINE) sizeFactor = 20; // infinite ;) else if (flags & SWS_BILINEAR) sizeFactor = 2; else { sizeFactor = 0; // GCC warning killer assert(0); } if (xInc <= 1 << 16) filterSize = 1 + sizeFactor; // upscale else filterSize = 1 + (sizeFactor * srcW + dstW - 1) / dstW; filterSize = FFMIN(filterSize, srcW - 2); filterSize = FFMAX(filterSize, 1); FF_ALLOC_OR_GOTO(NULL, filter, dstW * sizeof(*filter) * filterSize, fail); xDstInSrc = xInc - 0x10000; for (i = 0; i < dstW; i++) { int xx = (xDstInSrc - ((filterSize - 2) << 16)) / (1 << 17); int j; (*filterPos)[i] = xx; for (j = 0; j < filterSize; j++) { int64_t d = (FFABS(((int64_t)xx << 17) - xDstInSrc)) << 13; double floatd; int64_t coeff; if (xInc > 1 << 16) d = d * dstW / srcW; floatd = d * (1.0 / (1 << 30)); if (flags & SWS_BICUBIC) { int64_t B = (param[0] != SWS_PARAM_DEFAULT ? param[0] : 0) * (1 << 24); int64_t C = (param[1] != SWS_PARAM_DEFAULT ? param[1] : 0.6) * (1 << 24); if (d >= 1LL << 31) { coeff = 0.0; } else { int64_t dd = (d * d) >> 30; int64_t ddd = (dd * d) >> 30; if (d < 1LL << 30) coeff = (12 * (1 << 24) - 9 * B - 6 * C) * ddd + (-18 * (1 << 24) + 12 * B + 6 * C) * dd + (6 * (1 << 24) - 2 * B) * (1 << 30); else coeff = (-B - 6 * C) * ddd + (6 * B + 30 * C) * dd + (-12 * B - 48 * C) * d + (8 * B + 24 * C) * (1 << 30); } coeff *= fone >> (30 + 24); } #if 0 else if (flags & SWS_X) { double p = param ? param * 0.01 : 0.3; coeff = d ? sin(d * M_PI) / (d * M_PI) : 1.0; coeff *= pow(2.0, -p * d * d); } #endif else if (flags & SWS_X) { double A = param[0] != SWS_PARAM_DEFAULT ? param[0] : 1.0; double c; if (floatd < 1.0) c = cos(floatd * M_PI); else c = -1.0; if (c < 0.0) c = -pow(-c, A); else c = pow(c, A); coeff = (c * 0.5 + 0.5) * fone; } else if (flags & SWS_AREA) { int64_t d2 = d - (1 << 29); if (d2 * xInc < -(1LL << (29 + 16))) coeff = 1.0 * (1LL << (30 + 16)); else if (d2 * xInc < (1LL << (29 + 16))) coeff = -d2 * xInc + (1LL << (29 + 16)); else coeff = 0.0; coeff *= fone >> (30 + 16); } else if (flags & SWS_GAUSS) { double p = param[0] != SWS_PARAM_DEFAULT ? param[0] : 3.0; coeff = (pow(2.0, -p * floatd * floatd)) * fone; } else if (flags & SWS_SINC) { coeff = (d ? sin(floatd * M_PI) / (floatd * M_PI) : 1.0) * fone; } else if (flags & SWS_LANCZOS) { double p = param[0] != SWS_PARAM_DEFAULT ? param[0] : 3.0; coeff = (d ? sin(floatd * M_PI) * sin(floatd * M_PI / p) / (floatd * floatd * M_PI * M_PI / p) : 1.0) * fone; if (floatd > p) coeff = 0; } else if (flags & SWS_BILINEAR) { coeff = (1 << 30) - d; if (coeff < 0) coeff = 0; coeff *= fone >> 30; } else if (flags & SWS_SPLINE) { double p = -2.196152422706632; coeff = getSplineCoeff(1.0, 0.0, p, -p - 1.0, floatd) * fone; } else { coeff = 0.0; // GCC warning killer assert(0); } filter[i * filterSize + j] = coeff; xx++; } xDstInSrc += 2 * xInc; } } /* apply src & dst Filter to filter -> filter2 * av_free(filter); */ assert(filterSize > 0); filter2Size = filterSize; if (srcFilter) filter2Size += srcFilter->length - 1; if (dstFilter) filter2Size += dstFilter->length - 1; assert(filter2Size > 0); FF_ALLOCZ_OR_GOTO(NULL, filter2, filter2Size * dstW * sizeof(*filter2), fail); for (i = 0; i < dstW; i++) { int j, k; if (srcFilter) { for (k = 0; k < srcFilter->length; k++) { for (j = 0; j < filterSize; j++) filter2[i * filter2Size + k + j] += srcFilter->coeff[k] * filter[i * filterSize + j]; } } else { for (j = 0; j < filterSize; j++) filter2[i * filter2Size + j] = filter[i * filterSize + j]; } // FIXME dstFilter (*filterPos)[i] += (filterSize - 1) / 2 - (filter2Size - 1) / 2; } av_freep(&filter); /* try to reduce the filter-size (step1 find size and shift left) */ // Assume it is near normalized (*0.5 or *2.0 is OK but * 0.001 is not). minFilterSize = 0; for (i = dstW - 1; i >= 0; i--) { int min = filter2Size; int j; int64_t cutOff = 0.0; /* get rid of near zero elements on the left by shifting left */ for (j = 0; j < filter2Size; j++) { int k; cutOff += FFABS(filter2[i * filter2Size]); if (cutOff > SWS_MAX_REDUCE_CUTOFF * fone) break; /* preserve monotonicity because the core can't handle the * filter otherwise */ if (i < dstW - 1 && (*filterPos)[i] >= (*filterPos)[i + 1]) break; // move filter coefficients left for (k = 1; k < filter2Size; k++) filter2[i * filter2Size + k - 1] = filter2[i * filter2Size + k]; filter2[i * filter2Size + k - 1] = 0; (*filterPos)[i]++; } cutOff = 0; /* count near zeros on the right */ for (j = filter2Size - 1; j > 0; j--) { cutOff += FFABS(filter2[i * filter2Size + j]); if (cutOff > SWS_MAX_REDUCE_CUTOFF * fone) break; min--; } if (min > minFilterSize) minFilterSize = min; } if (PPC_ALTIVEC(cpu_flags)) { // we can handle the special case 4, so we don't want to go the full 8 if (minFilterSize < 5) filterAlign = 4; /* We really don't want to waste our time doing useless computation, so * fall back on the scalar C code for very small filters. * Vectorizing is worth it only if you have a decent-sized vector. */ if (minFilterSize < 3) filterAlign = 1; } if (INLINE_MMX(cpu_flags)) { // special case for unscaled vertical filtering if (minFilterSize == 1 && filterAlign == 2) filterAlign = 1; } assert(minFilterSize > 0); filterSize = (minFilterSize + (filterAlign - 1)) & (~(filterAlign - 1)); assert(filterSize > 0); filter = av_malloc(filterSize * dstW * sizeof(*filter)); if (filterSize >= MAX_FILTER_SIZE * 16 / ((flags & SWS_ACCURATE_RND) ? APCK_SIZE : 16) || !filter) goto fail; *outFilterSize = filterSize; if (flags & SWS_PRINT_INFO) av_log(NULL, AV_LOG_VERBOSE, \"SwScaler: reducing / aligning filtersize %d -> %d\\n\", filter2Size, filterSize); /* try to reduce the filter-size (step2 reduce it) */ for (i = 0; i < dstW; i++) { int j; for (j = 0; j < filterSize; j++) { if (j >= filter2Size) filter[i * filterSize + j] = 0; else filter[i * filterSize + j] = filter2[i * filter2Size + j]; if ((flags & SWS_BITEXACT) && j >= minFilterSize) filter[i * filterSize + j] = 0; } } // FIXME try to align filterPos if possible // fix borders if (is_horizontal) { for (i = 0; i < dstW; i++) { int j; if ((*filterPos)[i] < 0) { // move filter coefficients left to compensate for filterPos for (j = 1; j < filterSize; j++) { int left = FFMAX(j + (*filterPos)[i], 0); filter[i * filterSize + left] += filter[i * filterSize + j]; filter[i * filterSize + j] = 0; } (*filterPos)[i] = 0; } if ((*filterPos)[i] + filterSize > srcW) { int shift = (*filterPos)[i] + filterSize - srcW; // move filter coefficients right to compensate for filterPos for (j = filterSize - 2; j >= 0; j--) { int right = FFMIN(j + shift, filterSize - 1); filter[i * filterSize + right] += filter[i * filterSize + j]; filter[i * filterSize + j] = 0; } (*filterPos)[i] = srcW - filterSize; } } } // Note the +1 is for the MMX scaler which reads over the end /* align at 16 for AltiVec (needed by hScale_altivec_real) */ FF_ALLOCZ_OR_GOTO(NULL, *outFilter, *outFilterSize * (dstW + 3) * sizeof(int16_t), fail); /* normalize & store in outFilter */ for (i = 0; i < dstW; i++) { int j; int64_t error = 0; int64_t sum = 0; for (j = 0; j < filterSize; j++) { sum += filter[i * filterSize + j]; } sum = (sum + one / 2) / one; for (j = 0; j < *outFilterSize; j++) { int64_t v = filter[i * filterSize + j] + error; int intV = ROUNDED_DIV(v, sum); (*outFilter)[i * (*outFilterSize) + j] = intV; error = v - intV * sum; } } (*filterPos)[dstW + 0] = (*filterPos)[dstW + 1] = (*filterPos)[dstW + 2] = (*filterPos)[dstW - 1]; /* the MMX/SSE scaler will * read over the end */ for (i = 0; i < *outFilterSize; i++) { int k = (dstW - 1) * (*outFilterSize) + i; (*outFilter)[k + 1 * (*outFilterSize)] = (*outFilter)[k + 2 * (*outFilterSize)] = (*outFilter)[k + 3 * (*outFilterSize)] = (*outFilter)[k]; } ret = 0; fail: av_free(filter); av_free(filter2); return ret; }"} {"target": 1, "idx": 4600, "func": "void *rom_add_blob(const char *name, const void *blob, size_t len, hwaddr addr, const char *fw_file_name, FWCfgReadCallback fw_callback, void *callback_opaque) { Rom *rom; void *data = NULL; rom = g_malloc0(sizeof(*rom)); rom->name = g_strdup(name); rom->addr = addr; rom->romsize = len; rom->datasize = len; rom->data = g_malloc0(rom->datasize); memcpy(rom->data, blob, len); rom_insert(rom); if (fw_file_name && fw_cfg) { char devpath[100]; snprintf(devpath, sizeof(devpath), \"/rom@%s\", fw_file_name); if (rom_file_has_mr) { data = rom_set_mr(rom, OBJECT(fw_cfg), devpath); } else { data = rom->data; } fw_cfg_add_file_callback(fw_cfg, fw_file_name, fw_callback, callback_opaque, data, rom->romsize); } return data; }"} {"target": 0, "idx": 4606, "func": "static int qed_write_table(BDRVQEDState *s, uint64_t offset, QEDTable *table, unsigned int index, unsigned int n, bool flush) { unsigned int sector_mask = BDRV_SECTOR_SIZE / sizeof(uint64_t) - 1; unsigned int start, end, i; QEDTable *new_table; struct iovec iov; QEMUIOVector qiov; size_t len_bytes; int ret; trace_qed_write_table(s, offset, table, index, n); /* Calculate indices of the first and one after last elements */ start = index & ~sector_mask; end = (index + n + sector_mask) & ~sector_mask; len_bytes = (end - start) * sizeof(uint64_t); new_table = qemu_blockalign(s->bs, len_bytes); iov = (struct iovec) { .iov_base = new_table->offsets, .iov_len = len_bytes, }; qemu_iovec_init_external(&qiov, &iov, 1); /* Byteswap table */ for (i = start; i < end; i++) { uint64_t le_offset = cpu_to_le64(table->offsets[i]); new_table->offsets[i - start] = le_offset; } /* Adjust for offset into table */ offset += start * sizeof(uint64_t); ret = bdrv_pwritev(s->bs->file, offset, &qiov); trace_qed_write_table_cb(s, table, flush, ret); if (ret < 0) { goto out; } if (flush) { qed_acquire(s); ret = bdrv_flush(s->bs); qed_release(s); if (ret < 0) { goto out; } } ret = 0; out: qemu_vfree(new_table); return ret; }"} {"target": 0, "idx": 4612, "func": "static void vnc_init_basic_info(SocketAddress *addr, VncBasicInfo *info, Error **errp) { switch (addr->type) { case SOCKET_ADDRESS_KIND_INET: info->host = g_strdup(addr->u.inet->host); info->service = g_strdup(addr->u.inet->port); if (addr->u.inet->ipv6) { info->family = NETWORK_ADDRESS_FAMILY_IPV6; } else { info->family = NETWORK_ADDRESS_FAMILY_IPV4; } break; case SOCKET_ADDRESS_KIND_UNIX: info->host = g_strdup(\"\"); info->service = g_strdup(addr->u.q_unix->path); info->family = NETWORK_ADDRESS_FAMILY_UNIX; break; default: error_setg(errp, \"Unsupported socket kind %d\", addr->type); break; } return; }"} {"target": 0, "idx": 4636, "func": "static void disable_interrupt(EEPRO100State * s) { if (s->int_stat) { logout(\"interrupt disabled\\n\"); qemu_irq_lower(s->pci_dev->irq[0]); s->int_stat = 0; } }"} {"target": 1, "idx": 4640, "func": "void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, int len, int is_write) { int l, io_index; uint8_t *ptr; uint32_t val; target_phys_addr_t page; unsigned long pd; PhysPageDesc *p; while (len > 0) { page = addr & TARGET_PAGE_MASK; l = (page + TARGET_PAGE_SIZE) - addr; if (l > len) l = len; p = phys_page_find(page >> TARGET_PAGE_BITS); if (!p) { pd = IO_MEM_UNASSIGNED; } else { pd = p->phys_offset; } if (is_write) { if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); if (p) addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; /* XXX: could force cpu_single_env to NULL to avoid potential bugs */ if (l >= 4 && ((addr & 3) == 0)) { /* 32 bit write access */ val = ldl_p(buf); io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); l = 4; } else if (l >= 2 && ((addr & 1) == 0)) { /* 16 bit write access */ val = lduw_p(buf); io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val); l = 2; } else { /* 8 bit write access */ val = ldub_p(buf); io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val); l = 1; } } else { unsigned long addr1; addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); /* RAM case */ ptr = phys_ram_base + addr1; memcpy(ptr, buf, l); if (!cpu_physical_memory_is_dirty(addr1)) { /* invalidate code */ tb_invalidate_phys_page_range(addr1, addr1 + l, 0); /* set dirty bit */ phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= (0xff & ~CODE_DIRTY_FLAG); } } } else { if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { /* I/O case */ io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); if (p) addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; if (l >= 4 && ((addr & 3) == 0)) { /* 32 bit read access */ val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); stl_p(buf, val); l = 4; } else if (l >= 2 && ((addr & 1) == 0)) { /* 16 bit read access */ val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr); stw_p(buf, val); l = 2; } else { /* 8 bit read access */ val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr); stb_p(buf, val); l = 1; } } else { /* RAM case */ ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); memcpy(buf, ptr, l); } } len -= l; buf += l; addr += l; } }"} {"target": 1, "idx": 4647, "func": "static int av_dict_set_fxp(AVDictionary **pm, const char *key, uint64_t value, unsigned int digits, int flags) { char valuestr[44]; snprintf(valuestr, sizeof(valuestr), \"%\"PRId64\".%0*\"PRId64, value / PRECISION, digits, ( value % PRECISION ) / ( PRECISION / uintpow(10,digits) )); return av_dict_set(pm, key, valuestr, flags); }"} {"target": 1, "idx": 4650, "func": "int av_packet_ref(AVPacket *dst, const AVPacket *src) { int ret; ret = av_packet_copy_props(dst, src); if (ret < 0) return ret; if (!src->buf) { ret = packet_alloc(&dst->buf, src->size); if (ret < 0) goto fail; memcpy(dst->buf->data, src->data, src->size); dst->data = dst->buf->data; } else { dst->buf = av_buffer_ref(src->buf); if (!dst->buf) { ret = AVERROR(ENOMEM); goto fail; } dst->data = src->data; } dst->size = src->size; return 0; fail: av_packet_free_side_data(dst); return ret; }"} {"target": 1, "idx": 4654, "func": "int ff_h264_pred_weight_table(GetBitContext *gb, const SPS *sps, const int *ref_count, int slice_type_nos, H264PredWeightTable *pwt, int picture_structure, void *logctx) { int list, i, j; int luma_def, chroma_def; pwt->use_weight = 0; pwt->use_weight_chroma = 0; pwt->luma_log2_weight_denom = get_ue_golomb(gb); if (sps->chroma_format_idc) pwt->chroma_log2_weight_denom = get_ue_golomb(gb); if (pwt->luma_log2_weight_denom > 7U) { av_log(logctx, AV_LOG_ERROR, \"luma_log2_weight_denom %d is out of range\\n\", pwt->luma_log2_weight_denom); pwt->luma_log2_weight_denom = 0; } if (pwt->chroma_log2_weight_denom > 7U) { av_log(logctx, AV_LOG_ERROR, \"chroma_log2_weight_denom %d is out of range\\n\", pwt->chroma_log2_weight_denom); pwt->chroma_log2_weight_denom = 0; } luma_def = 1 << pwt->luma_log2_weight_denom; chroma_def = 1 << pwt->chroma_log2_weight_denom; for (list = 0; list < 2; list++) { pwt->luma_weight_flag[list] = 0; pwt->chroma_weight_flag[list] = 0; for (i = 0; i < ref_count[list]; i++) { int luma_weight_flag, chroma_weight_flag; luma_weight_flag = get_bits1(gb); if (luma_weight_flag) { pwt->luma_weight[i][list][0] = get_se_golomb(gb); pwt->luma_weight[i][list][1] = get_se_golomb(gb); if ((int8_t)pwt->luma_weight[i][list][0] != pwt->luma_weight[i][list][0] || (int8_t)pwt->luma_weight[i][list][1] != pwt->luma_weight[i][list][1]) goto out_range_weight; if (pwt->luma_weight[i][list][0] != luma_def || pwt->luma_weight[i][list][1] != 0) { pwt->use_weight = 1; pwt->luma_weight_flag[list] = 1; } } else { pwt->luma_weight[i][list][0] = luma_def; pwt->luma_weight[i][list][1] = 0; } if (sps->chroma_format_idc) { chroma_weight_flag = get_bits1(gb); if (chroma_weight_flag) { int j; for (j = 0; j < 2; j++) { pwt->chroma_weight[i][list][j][0] = get_se_golomb(gb); pwt->chroma_weight[i][list][j][1] = get_se_golomb(gb); if ((int8_t)pwt->chroma_weight[i][list][j][0] != pwt->chroma_weight[i][list][j][0] || (int8_t)pwt->chroma_weight[i][list][j][1] != pwt->chroma_weight[i][list][j][1]) goto out_range_weight; if (pwt->chroma_weight[i][list][j][0] != chroma_def || pwt->chroma_weight[i][list][j][1] != 0) { pwt->use_weight_chroma = 1; pwt->chroma_weight_flag[list] = 1; } } } else { int j; for (j = 0; j < 2; j++) { pwt->chroma_weight[i][list][j][0] = chroma_def; pwt->chroma_weight[i][list][j][1] = 0; } } } // for MBAFF if (picture_structure == PICT_FRAME) { pwt->luma_weight[16 + 2 * i][list][0] = pwt->luma_weight[16 + 2 * i + 1][list][0] = pwt->luma_weight[i][list][0]; pwt->luma_weight[16 + 2 * i][list][1] = pwt->luma_weight[16 + 2 * i + 1][list][1] = pwt->luma_weight[i][list][1]; for (j = 0; j < 2; j++) { pwt->chroma_weight[16 + 2 * i][list][j][0] = pwt->chroma_weight[16 + 2 * i + 1][list][j][0] = pwt->chroma_weight[i][list][j][0]; pwt->chroma_weight[16 + 2 * i][list][j][1] = pwt->chroma_weight[16 + 2 * i + 1][list][j][1] = pwt->chroma_weight[i][list][j][1]; } } } if (slice_type_nos != AV_PICTURE_TYPE_B) break; } pwt->use_weight = pwt->use_weight || pwt->use_weight_chroma; return 0; out_range_weight: avpriv_request_sample(logctx, \"Out of range weight\\n\"); return AVERROR_INVALIDDATA; }"} {"target": 1, "idx": 4665, "func": "static int libschroedinger_encode_init(AVCodecContext *avccontext) { FfmpegSchroEncoderParams* p_schro_params = avccontext->priv_data; SchroVideoFormatEnum preset; /* Initialize the libraries that libschroedinger depends on. */ schro_init(); /* Create an encoder object. */ p_schro_params->encoder = schro_encoder_new(); if (!p_schro_params->encoder) { av_log(avccontext, AV_LOG_ERROR, \"Unrecoverable Error: schro_encoder_new failed. \"); return -1; } /* Initialize the format. */ preset = ff_get_schro_video_format_preset(avccontext); p_schro_params->format = schro_encoder_get_video_format(p_schro_params->encoder); schro_video_format_set_std_video_format (p_schro_params->format, preset); p_schro_params->format->width = avccontext->width; p_schro_params->format->height = avccontext->height; if (SetSchroChromaFormat(avccontext) == -1) return -1; if (ff_get_schro_frame_format(p_schro_params->format->chroma_format, &p_schro_params->frame_format) == -1) { av_log (avccontext, AV_LOG_ERROR, \"This codec currently supports only planar YUV 4:2:0, 4:2:2\" \" and 4:4:4 formats.\\n\"); return -1; } p_schro_params->format->frame_rate_numerator = avccontext->time_base.den; p_schro_params->format->frame_rate_denominator = avccontext->time_base.num; p_schro_params->frame_size = avpicture_get_size(avccontext->pix_fmt, avccontext->width, avccontext->height); avccontext->coded_frame = &p_schro_params->picture; if (avccontext->gop_size == 0){ schro_encoder_setting_set_double (p_schro_params->encoder, \"gop_structure\", SCHRO_ENCODER_GOP_INTRA_ONLY); if (avccontext->coder_type == FF_CODER_TYPE_VLC) { schro_encoder_setting_set_double (p_schro_params->encoder, \"enable_noarith\", 1); } } else { schro_encoder_setting_set_double (p_schro_params->encoder, \"gop_structure\", SCHRO_ENCODER_GOP_BIREF); avccontext->has_b_frames = 1; } /* FIXME - Need to handle SCHRO_ENCODER_RATE_CONTROL_LOW_DELAY. */ if (avccontext->flags & CODEC_FLAG_QSCALE) { if (avccontext->global_quality == 0) { /* lossless coding */ schro_encoder_setting_set_double (p_schro_params->encoder, \"rate_control\", SCHRO_ENCODER_RATE_CONTROL_LOSSLESS); } else { int noise_threshold; schro_encoder_setting_set_double (p_schro_params->encoder, \"rate_control\", SCHRO_ENCODER_RATE_CONTROL_CONSTANT_NOISE_THRESHOLD); noise_threshold = avccontext->global_quality/FF_QP2LAMBDA; if (noise_threshold > 100) noise_threshold = 100; schro_encoder_setting_set_double (p_schro_params->encoder, \"noise_threshold\", noise_threshold); } } else { schro_encoder_setting_set_double ( p_schro_params->encoder, \"rate_control\", SCHRO_ENCODER_RATE_CONTROL_CONSTANT_BITRATE); schro_encoder_setting_set_double (p_schro_params->encoder, \"bitrate\", avccontext->bit_rate); } if (avccontext->flags & CODEC_FLAG_INTERLACED_ME) { /* All material can be coded as interlaced or progressive irrespective of the type of source material. */ schro_encoder_setting_set_double (p_schro_params->encoder, \"interlaced_coding\", 1); } /* FIXME: Signal range hardcoded to 8-bit data until both libschroedinger * and libdirac support other bit-depth data. */ schro_video_format_set_std_signal_range(p_schro_params->format, SCHRO_SIGNAL_RANGE_8BIT_VIDEO); /* Hardcode motion vector precision to quarter pixel. */ schro_encoder_setting_set_double (p_schro_params->encoder, \"mv_precision\", 2); /* Set the encoder format. */ schro_encoder_set_video_format(p_schro_params->encoder, p_schro_params->format); /* Set the debug level. */ schro_debug_set_level (avccontext->debug); schro_encoder_start (p_schro_params->encoder); /* Initialize the encoded frame queue. */ ff_dirac_schro_queue_init (&p_schro_params->enc_frame_queue); return 0 ; }"} {"target": 1, "idx": 4674, "func": "int h263_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) { int cbpc, cbpy, i, cbp, pred_x, pred_y, mx, my, dquant; unsigned int val; INT16 *mot_val; static INT8 quant_tab[4] = { -1, -2, 1, 2 }; /* Check for GOB Start Code */ val = show_bits(&s->gb, 16); if (val == 0) { /* We have a GBSC probably with GSTUFF */ #ifdef DEBUG unsigned int gn, gfid; #endif //skip_bits(&s->gb, 16); /* Drop the zeros */ while (get_bits1(&s->gb) == 0); /* Seek the '1' bit */ #ifdef DEBUG fprintf(stderr,\"\\nGOB Start Code at MB %d\\n\", (s->mb_y * s->mb_width) + s->mb_x); gn = get_bits(&s->gb, 5); /* GN */ gfid = get_bits(&s->gb, 2); /* GFID */ #else skip_bits(&s->gb, 5); /* GN */ skip_bits(&s->gb, 2); /* GFID */ #endif s->qscale = get_bits(&s->gb, 5); /* GQUANT */ #ifdef DEBUG fprintf(stderr, \"\\nGN: %u GFID: %u Quant: %u\\n\", gn, gfid, s->qscale); #endif } if (s->pict_type == P_TYPE) { if (get_bits1(&s->gb)) { /* skip mb */ s->mb_intra = 0; for(i=0;i<6;i++) s->block_last_index[i] = -1; s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; s->mb_skiped = 1; return 0; } cbpc = get_vlc(&s->gb, &inter_MCBPC_vlc); //fprintf(stderr, \"\\tCBPC: %d\", cbpc); if (cbpc < 0) return -1; dquant = cbpc & 8; s->mb_intra = ((cbpc & 4) != 0); } else { cbpc = get_vlc(&s->gb, &intra_MCBPC_vlc); if (cbpc < 0) return -1; dquant = cbpc & 4; s->mb_intra = 1; } if (!s->mb_intra) { cbpy = get_vlc(&s->gb, &cbpy_vlc); cbp = (cbpc & 3) | ((cbpy ^ 0xf) << 2); if (dquant) { s->qscale += quant_tab[get_bits(&s->gb, 2)]; if (s->qscale < 1) s->qscale = 1; else if (s->qscale > 31) s->qscale = 31; } s->mv_dir = MV_DIR_FORWARD; if ((cbpc & 16) == 0) { /* 16x16 motion prediction */ s->mv_type = MV_TYPE_16X16; h263_pred_motion(s, 0, &pred_x, &pred_y); if (s->umvplus_dec) mx = h263p_decode_umotion(s, pred_x); else mx = h263_decode_motion(s, pred_x); if (mx >= 0xffff) return -1; if (s->umvplus_dec) my = h263p_decode_umotion(s, pred_y); else my = h263_decode_motion(s, pred_y); if (my >= 0xffff) return -1; s->mv[0][0][0] = mx; s->mv[0][0][1] = my; if (s->umvplus_dec && (mx - pred_x) == 1 && (my - pred_y) == 1) skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */ } else { s->mv_type = MV_TYPE_8X8; for(i=0;i<4;i++) { mot_val = h263_pred_motion(s, i, &pred_x, &pred_y); if (s->umvplus_dec) mx = h263p_decode_umotion(s, pred_x); else mx = h263_decode_motion(s, pred_x); if (mx >= 0xffff) return -1; if (s->umvplus_dec) my = h263p_decode_umotion(s, pred_y); else my = h263_decode_motion(s, pred_y); if (my >= 0xffff) return -1; s->mv[0][i][0] = mx; s->mv[0][i][1] = my; if (s->umvplus_dec && (mx - pred_x) == 1 && (my - pred_y) == 1) skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */ mot_val[0] = mx; mot_val[1] = my; } } } else { s->ac_pred = 0; if (s->h263_pred) { s->ac_pred = get_bits1(&s->gb); } cbpy = get_vlc(&s->gb, &cbpy_vlc); cbp = (cbpc & 3) | (cbpy << 2); if (dquant) { s->qscale += quant_tab[get_bits(&s->gb, 2)]; if (s->qscale < 1) s->qscale = 1; else if (s->qscale > 31) s->qscale = 31; } } /* decode each block */ if (s->h263_pred) { for (i = 0; i < 6; i++) { if (mpeg4_decode_block(s, block[i], i, (cbp >> (5 - i)) & 1) < 0) return -1; } } else { for (i = 0; i < 6; i++) { if (h263_decode_block(s, block[i], i, (cbp >> (5 - i)) & 1) < 0) return -1; } } return 0; }"} {"target": 1, "idx": 4676, "func": "VirtIODevice *virtio_serial_init(DeviceState *dev, virtio_serial_conf *conf) { VirtIOSerial *vser; VirtIODevice *vdev; uint32_t i, max_supported_ports; if (!conf->max_virtserial_ports) return NULL; /* Each port takes 2 queues, and one pair is for the control queue */ max_supported_ports = VIRTIO_PCI_QUEUE_MAX / 2 - 1; if (conf->max_virtserial_ports > max_supported_ports) { error_report(\"maximum ports supported: %u\", max_supported_ports); return NULL; } vdev = virtio_common_init(\"virtio-serial\", VIRTIO_ID_CONSOLE, sizeof(struct virtio_console_config), sizeof(VirtIOSerial)); vser = DO_UPCAST(VirtIOSerial, vdev, vdev); /* Spawn a new virtio-serial bus on which the ports will ride as devices */ vser->bus = virtser_bus_new(dev); vser->bus->vser = vser; QTAILQ_INIT(&vser->ports); vser->bus->max_nr_ports = conf->max_virtserial_ports; vser->ivqs = qemu_malloc(conf->max_virtserial_ports * sizeof(VirtQueue *)); vser->ovqs = qemu_malloc(conf->max_virtserial_ports * sizeof(VirtQueue *)); /* Add a queue for host to guest transfers for port 0 (backward compat) */ vser->ivqs[0] = virtio_add_queue(vdev, 128, handle_input); /* Add a queue for guest to host transfers for port 0 (backward compat) */ vser->ovqs[0] = virtio_add_queue(vdev, 128, handle_output); /* TODO: host to guest notifications can get dropped * if the queue fills up. Implement queueing in host, * this might also make it possible to reduce the control * queue size: as guest preposts buffers there, * this will save 4Kbyte of guest memory per entry. */ /* control queue: host to guest */ vser->c_ivq = virtio_add_queue(vdev, 32, control_in); /* control queue: guest to host */ vser->c_ovq = virtio_add_queue(vdev, 32, control_out); for (i = 1; i < vser->bus->max_nr_ports; i++) { /* Add a per-port queue for host to guest transfers */ vser->ivqs[i] = virtio_add_queue(vdev, 128, handle_input); /* Add a per-per queue for guest to host transfers */ vser->ovqs[i] = virtio_add_queue(vdev, 128, handle_output); } vser->config.max_nr_ports = tswap32(conf->max_virtserial_ports); vser->ports_map = qemu_mallocz(((conf->max_virtserial_ports + 31) / 32) * sizeof(vser->ports_map[0])); /* * Reserve location 0 for a console port for backward compat * (old kernel, new qemu) */ mark_port_added(vser, 0); vser->vdev.get_features = get_features; vser->vdev.get_config = get_config; vser->vdev.set_config = set_config; vser->qdev = dev; /* * Register for the savevm section with the virtio-console name * to preserve backward compat */ register_savevm(dev, \"virtio-console\", -1, 3, virtio_serial_save, virtio_serial_load, vser); return vdev; }"} {"target": 0, "idx": 4679, "func": "static void kempf_restore_buf(const uint8_t *src, int len, uint8_t *dst, int stride, const uint8_t *jpeg_tile, int tile_stride, int width, int height, const uint8_t *pal, int npal, int tidx) { GetBitContext gb; int i, j, nb, col; init_get_bits8(&gb, src, len); if (npal <= 2) nb = 1; else if (npal <= 4) nb = 2; else if (npal <= 16) nb = 4; else nb = 8; for (j = 0; j < height; j++, dst += stride, jpeg_tile += tile_stride) { if (get_bits(&gb, 8)) continue; for (i = 0; i < width; i++) { col = get_bits(&gb, nb); if (col != tidx) memcpy(dst + i * 3, pal + col * 3, 3); else memcpy(dst + i * 3, jpeg_tile + i * 3, 3); } } }"} {"target": 0, "idx": 4686, "func": "static JavaVM *get_java_vm(const char *name, void *log_ctx) { JavaVM *vm = NULL; jsize nb_vm = 0; void *handle = NULL; jint (*get_created_java_vms) (JavaVM ** vmBuf, jsize bufLen, jsize *nVMs) = NULL; handle = dlopen(name, RTLD_LOCAL); if (!handle) { return NULL; } get_created_java_vms = (jint (*)(JavaVM **, jsize, jsize *)) dlsym(handle, \"JNI_GetCreatedJavaVMs\"); if (!get_created_java_vms) { av_log(log_ctx, AV_LOG_ERROR, \"Could not find JNI_GetCreatedJavaVMs symbol in library '%s'\\n\", name); goto done; } if (get_created_java_vms(&vm, 1, &nb_vm) != JNI_OK) { av_log(log_ctx, AV_LOG_ERROR, \"Could not get created Java virtual machines\\n\"); goto done; } done: if (handle) { dlclose(handle); } return vm; }"} {"target": 0, "idx": 4701, "func": "static int mpegts_push_data(MpegTSFilter *filter, const uint8_t *buf, int buf_size, int is_start, int64_t pos) { PESContext *pes = filter->u.pes_filter.opaque; MpegTSContext *ts = pes->ts; const uint8_t *p; int len, code; if(!ts->pkt) return 0; if (is_start) { if (pes->state == MPEGTS_PAYLOAD && pes->data_index > 0) { new_pes_packet(pes, ts->pkt); ts->stop_parse = 1; } pes->state = MPEGTS_HEADER; pes->data_index = 0; pes->ts_packet_pos = pos; } p = buf; while (buf_size > 0) { switch(pes->state) { case MPEGTS_HEADER: len = PES_START_SIZE - pes->data_index; if (len > buf_size) len = buf_size; memcpy(pes->header + pes->data_index, p, len); pes->data_index += len; p += len; buf_size -= len; if (pes->data_index == PES_START_SIZE) { /* we got all the PES or section header. We can now decide */ #if 0 av_hex_dump_log(pes->stream, AV_LOG_DEBUG, pes->header, pes->data_index); #endif if (pes->header[0] == 0x00 && pes->header[1] == 0x00 && pes->header[2] == 0x01) { /* it must be an mpeg2 PES stream */ code = pes->header[3] | 0x100; dprintf(pes->stream, \"pid=%x pes_code=%#x\\n\", pes->pid, code); if ((pes->st && pes->st->discard == AVDISCARD_ALL) || code == 0x1be) /* padding_stream */ goto skip; /* stream not present in PMT */ if (!pes->st) pes->st = new_pes_av_stream(pes, 0, code); if (!pes->st) return AVERROR(ENOMEM); pes->total_size = AV_RB16(pes->header + 4); /* NOTE: a zero total size means the PES size is unbounded */ if (!pes->total_size) pes->total_size = MAX_PES_PAYLOAD; /* allocate pes buffer */ pes->buffer = av_malloc(pes->total_size+FF_INPUT_BUFFER_PADDING_SIZE); if (!pes->buffer) return AVERROR(ENOMEM); if (code != 0x1bc && code != 0x1bf && /* program_stream_map, private_stream_2 */ code != 0x1f0 && code != 0x1f1 && /* ECM, EMM */ code != 0x1ff && code != 0x1f2 && /* program_stream_directory, DSMCC_stream */ code != 0x1f8) { /* ITU-T Rec. H.222.1 type E stream */ pes->state = MPEGTS_PESHEADER_FILL; pes->pes_header_size = pes->header[8] + 9; } else { pes->state = MPEGTS_PAYLOAD; pes->data_index = 0; } } else { /* otherwise, it should be a table */ /* skip packet */ skip: pes->state = MPEGTS_SKIP; continue; } } break; /**********************************************/ /* PES packing parsing */ case MPEGTS_PESHEADER_FILL: len = pes->pes_header_size - pes->data_index; if (len < 0) return -1; if (len > buf_size) len = buf_size; memcpy(pes->header + pes->data_index, p, len); pes->data_index += len; p += len; buf_size -= len; if (pes->data_index == pes->pes_header_size) { const uint8_t *r; unsigned int flags; flags = pes->header[7]; r = pes->header + 9; pes->pts = AV_NOPTS_VALUE; pes->dts = AV_NOPTS_VALUE; if ((flags & 0xc0) == 0x80) { pes->dts = pes->pts = get_pts(r); r += 5; } else if ((flags & 0xc0) == 0xc0) { pes->pts = get_pts(r); r += 5; pes->dts = get_pts(r); r += 5; } /* we got the full header. We parse it and get the payload */ pes->state = MPEGTS_PAYLOAD; pes->data_index = 0; } break; case MPEGTS_PAYLOAD: if (buf_size > 0) { if (pes->data_index+buf_size > pes->total_size) { new_pes_packet(pes, ts->pkt); pes->total_size = MAX_PES_PAYLOAD; pes->buffer = av_malloc(pes->total_size+FF_INPUT_BUFFER_PADDING_SIZE); if (!pes->buffer) return AVERROR(ENOMEM); ts->stop_parse = 1; } memcpy(pes->buffer+pes->data_index, p, buf_size); pes->data_index += buf_size; } buf_size = 0; break; case MPEGTS_SKIP: buf_size = 0; break; } } return 0; }"} {"target": 0, "idx": 4708, "func": "static int proxy_fstat(FsContext *fs_ctx, int fid_type, V9fsFidOpenState *fs, struct stat *stbuf) { int fd; if (fid_type == P9_FID_DIR) { fd = dirfd(fs->dir); } else { fd = fs->fd; } return fstat(fd, stbuf); }"} {"target": 0, "idx": 4713, "func": "static uint64_t cmd646_data_read(void *opaque, target_phys_addr_t addr, unsigned size) { CMD646BAR *cmd646bar = opaque; if (size == 1) { return ide_ioport_read(cmd646bar->bus, addr); } else if (addr == 0) { if (size == 2) { return ide_data_readw(cmd646bar->bus, addr); } else { return ide_data_readl(cmd646bar->bus, addr); } } return ((uint64_t)1 << (size * 8)) - 1; }"} {"target": 0, "idx": 4723, "func": "int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp) { static const uint8_t int3 = 0xcc; if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) || cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&int3, 1, 1)) return -EINVAL; return 0; }"} {"target": 0, "idx": 4729, "func": "static av_always_inline void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64], int lowres_flag) { int mb_x, mb_y; const int mb_xy = s->mb_y * s->mb_stride + s->mb_x; #ifdef HAVE_XVMC if(s->avctx->xvmc_acceleration){ XVMC_decode_mb(s);//xvmc uses pblocks return; } #endif mb_x = s->mb_x; mb_y = s->mb_y; if(s->avctx->debug&FF_DEBUG_DCT_COEFF) { /* save DCT coefficients */ int i,j; DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6]; for(i=0; i<6; i++) for(j=0; j<64; j++) *dct++ = block[i][s->dsp.idct_permutation[j]]; } s->current_picture.qscale_table[mb_xy]= s->qscale; /* update DC predictors for P macroblocks */ if (!s->mb_intra) { if (s->h263_pred || s->h263_aic) { if(s->mbintra_table[mb_xy]) ff_clean_intra_table_entries(s); } else { s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 128 << s->intra_dc_precision; } } else if (s->h263_pred || s->h263_aic) s->mbintra_table[mb_xy]=1; if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==B_TYPE) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc uint8_t *dest_y, *dest_cb, *dest_cr; int dct_linesize, dct_offset; op_pixels_func (*op_pix)[4]; qpel_mc_func (*op_qpix)[16]; const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics const int uvlinesize= s->current_picture.linesize[1]; const int readable= s->pict_type != B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag; const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8; /* avoid copy if macroblock skipped in last frame too */ /* skip only during decoding as we might trash the buffers during encoding a bit */ if(!s->encoding){ uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy]; const int age= s->current_picture.age; assert(age); if (s->mb_skipped) { s->mb_skipped= 0; assert(s->pict_type!=I_TYPE); (*mbskip_ptr) ++; /* indicate that this time we skipped it */ if(*mbskip_ptr >99) *mbskip_ptr= 99; /* if previous was skipped too, then nothing to do ! */ if (*mbskip_ptr >= age && s->current_picture.reference){ return; } } else if(!s->current_picture.reference){ (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */ if(*mbskip_ptr >99) *mbskip_ptr= 99; } else{ *mbskip_ptr = 0; /* not skipped */ } } dct_linesize = linesize << s->interlaced_dct; dct_offset =(s->interlaced_dct)? linesize : linesize*block_size; if(readable){ dest_y= s->dest[0]; dest_cb= s->dest[1]; dest_cr= s->dest[2]; }else{ dest_y = s->b_scratchpad; dest_cb= s->b_scratchpad+16*linesize; dest_cr= s->b_scratchpad+32*linesize; } if (!s->mb_intra) { /* motion handling */ /* decoding or more than one mb_type (MC was already done otherwise) */ if(!s->encoding){ if(lowres_flag){ h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab; if (s->mv_dir & MV_DIR_FORWARD) { MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix); op_pix = s->dsp.avg_h264_chroma_pixels_tab; } if (s->mv_dir & MV_DIR_BACKWARD) { MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix); } }else{ op_qpix= s->me.qpel_put; if ((!s->no_rounding) || s->pict_type==B_TYPE){ op_pix = s->dsp.put_pixels_tab; }else{ op_pix = s->dsp.put_no_rnd_pixels_tab; } if (s->mv_dir & MV_DIR_FORWARD) { MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix); op_pix = s->dsp.avg_pixels_tab; op_qpix= s->me.qpel_avg; } if (s->mv_dir & MV_DIR_BACKWARD) { MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix); } } } /* skip dequant / idct if we are really late ;) */ if(s->hurry_up>1) goto skip_idct; if(s->avctx->skip_idct){ if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == B_TYPE) ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != I_TYPE) || s->avctx->skip_idct >= AVDISCARD_ALL) goto skip_idct; } /* add dct residue */ if(s->encoding || !( s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){ add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale); add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale); add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale); add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale); if(!(s->flags&CODEC_FLAG_GRAY)){ if (s->chroma_y_shift){ add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale); add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale); }else{ dct_linesize >>= 1; dct_offset >>=1; add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale); add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale); add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale); add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale); } } } else if(s->codec_id != CODEC_ID_WMV2){ add_dct(s, block[0], 0, dest_y , dct_linesize); add_dct(s, block[1], 1, dest_y + block_size, dct_linesize); add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize); add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize); if(!(s->flags&CODEC_FLAG_GRAY)){ if(s->chroma_y_shift){//Chroma420 add_dct(s, block[4], 4, dest_cb, uvlinesize); add_dct(s, block[5], 5, dest_cr, uvlinesize); }else{ //chroma422 dct_linesize = uvlinesize << s->interlaced_dct; dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8; add_dct(s, block[4], 4, dest_cb, dct_linesize); add_dct(s, block[5], 5, dest_cr, dct_linesize); add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize); add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize); if(!s->chroma_x_shift){//Chroma444 add_dct(s, block[8], 8, dest_cb+8, dct_linesize); add_dct(s, block[9], 9, dest_cr+8, dct_linesize); add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize); add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize); } } }//fi gray } else if (ENABLE_WMV2) { ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr); } } else { /* dct only in intra block */ if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){ put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale); put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale); put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale); put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale); if(!(s->flags&CODEC_FLAG_GRAY)){ if(s->chroma_y_shift){ put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale); put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale); }else{ dct_offset >>=1; dct_linesize >>=1; put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale); put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale); put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale); put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale); } } }else{ s->dsp.idct_put(dest_y , dct_linesize, block[0]); s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]); s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]); s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]); if(!(s->flags&CODEC_FLAG_GRAY)){ if(s->chroma_y_shift){ s->dsp.idct_put(dest_cb, uvlinesize, block[4]); s->dsp.idct_put(dest_cr, uvlinesize, block[5]); }else{ dct_linesize = uvlinesize << s->interlaced_dct; dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8; s->dsp.idct_put(dest_cb, dct_linesize, block[4]); s->dsp.idct_put(dest_cr, dct_linesize, block[5]); s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]); s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]); if(!s->chroma_x_shift){//Chroma444 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]); s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]); s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]); s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]); } } }//gray } } skip_idct: if(!readable){ s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16); s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift); s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift); } } }"} {"target": 1, "idx": 4734, "func": "static int compute_send_delay(HTTPContext *c) { int datarate = 8 * get_longterm_datarate(&c->datarate, c->data_count); if (datarate > c->stream->bandwidth * 2000) { return 1000; } return 0; }"} {"target": 1, "idx": 4757, "func": "void OPPROTO op_check_addo (void) { if (likely(!(((uint32_t)T2 ^ (uint32_t)T1 ^ UINT32_MAX) & ((uint32_t)T2 ^ (uint32_t)T0) & (1UL << 31)))) { xer_ov = 0; } else { xer_so = 1; xer_ov = 1; } RETURN(); }"} {"target": 1, "idx": 4767, "func": "static gpointer writeout_thread(gpointer opaque) { TraceRecord *recordptr; union { TraceRecord rec; uint8_t bytes[sizeof(TraceRecord) + sizeof(uint64_t)]; } dropped; unsigned int idx = 0; int dropped_count; size_t unused __attribute__ ((unused)); for (;;) { wait_for_trace_records_available(); if (g_atomic_int_get(&dropped_events)) { dropped.rec.event = DROPPED_EVENT_ID, dropped.rec.timestamp_ns = get_clock(); dropped.rec.length = sizeof(TraceRecord) + sizeof(uint64_t), dropped.rec.reserved = 0; while (1) { dropped_count = g_atomic_int_get(&dropped_events); if (g_atomic_int_compare_and_exchange(&dropped_events, dropped_count, 0)) { break; } } dropped.rec.arguments[0] = dropped_count; unused = fwrite(&dropped.rec, dropped.rec.length, 1, trace_fp); } while (get_trace_record(idx, &recordptr)) { unused = fwrite(recordptr, recordptr->length, 1, trace_fp); writeout_idx += recordptr->length; free(recordptr); /* dont use g_free, can deadlock when traced */ idx = writeout_idx % TRACE_BUF_LEN; } fflush(trace_fp); } return NULL; }"} {"target": 1, "idx": 4783, "func": "static int handle_renames_and_mkdirs(BDRVVVFATState* s) { int i; #ifdef DEBUG fprintf(stderr, \"handle_renames\\n\"); for (i = 0; i < s->commits.next; i++) { commit_t* commit = array_get(&(s->commits), i); fprintf(stderr, \"%d, %s (%d, %d)\\n\", i, commit->path ? commit->path : \"(null)\", commit->param.rename.cluster, commit->action); } #endif for (i = 0; i < s->commits.next;) { commit_t* commit = array_get(&(s->commits), i); if (commit->action == ACTION_RENAME) { mapping_t* mapping = find_mapping_for_cluster(s, commit->param.rename.cluster); char* old_path = mapping->path; assert(commit->path); mapping->path = commit->path; if (rename(old_path, mapping->path)) return -2; if (mapping->mode & MODE_DIRECTORY) { int l1 = strlen(mapping->path); int l2 = strlen(old_path); int diff = l1 - l2; direntry_t* direntry = array_get(&(s->directory), mapping->info.dir.first_dir_index); uint32_t c = mapping->begin; int i = 0; /* recurse */ while (!fat_eof(s, c)) { do { direntry_t* d = direntry + i; if (is_file(d) || (is_directory(d) && !is_dot(d))) { mapping_t* m = find_mapping_for_cluster(s, begin_of_direntry(d)); int l = strlen(m->path); char* new_path = g_malloc(l + diff + 1); assert(!strncmp(m->path, mapping->path, l2)); pstrcpy(new_path, l + diff + 1, mapping->path); pstrcpy(new_path + l1, l + diff + 1 - l1, m->path + l2); schedule_rename(s, m->begin, new_path); } i++; } while((i % (0x10 * s->sectors_per_cluster)) != 0); c = fat_get(s, c); } } free(old_path); array_remove(&(s->commits), i); continue; } else if (commit->action == ACTION_MKDIR) { mapping_t* mapping; int j, parent_path_len; #ifdef __MINGW32__ if (mkdir(commit->path)) return -5; #else if (mkdir(commit->path, 0755)) return -5; #endif mapping = insert_mapping(s, commit->param.mkdir.cluster, commit->param.mkdir.cluster + 1); if (mapping == NULL) return -6; mapping->mode = MODE_DIRECTORY; mapping->read_only = 0; mapping->path = commit->path; j = s->directory.next; assert(j); insert_direntries(s, s->directory.next, 0x10 * s->sectors_per_cluster); mapping->info.dir.first_dir_index = j; parent_path_len = strlen(commit->path) - strlen(get_basename(commit->path)) - 1; for (j = 0; j < s->mapping.next; j++) { mapping_t* m = array_get(&(s->mapping), j); if (m->first_mapping_index < 0 && m != mapping && !strncmp(m->path, mapping->path, parent_path_len) && strlen(m->path) == parent_path_len) break; } assert(j < s->mapping.next); mapping->info.dir.parent_mapping_index = j; array_remove(&(s->commits), i); continue; } i++; } return 0; }"} {"target": 1, "idx": 4794, "func": "AUXBus *aux_init_bus(DeviceState *parent, const char *name) { AUXBus *bus; bus = AUX_BUS(qbus_create(TYPE_AUX_BUS, parent, name)); bus->bridge = AUXTOI2C(qdev_create(BUS(bus), TYPE_AUXTOI2C)); /* Memory related. */ bus->aux_io = g_malloc(sizeof(*bus->aux_io)); memory_region_init(bus->aux_io, OBJECT(bus), \"aux-io\", (1 << 20)); address_space_init(&bus->aux_addr_space, bus->aux_io, \"aux-io\"); return bus; }"} {"target": 1, "idx": 4816, "func": "static int vc1_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size, n_slices = 0, i, ret; VC1Context *v = avctx->priv_data; MpegEncContext *s = &v->s; AVFrame *pict = data; uint8_t *buf2 = NULL; const uint8_t *buf_start = buf; int mb_height, n_slices1; struct { uint8_t *buf; GetBitContext gb; int mby_start; } *slices = NULL, *tmp; /* no supplementary picture */ if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) { /* special case for last picture */ if (s->low_delay == 0 && s->next_picture_ptr) { if ((ret = av_frame_ref(pict, s->next_picture_ptr->f)) < 0) return ret; s->next_picture_ptr = NULL; *got_frame = 1; return 0; //for advanced profile we may need to parse and unescape data if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) { int buf_size2 = 0; buf2 = av_mallocz(buf_size + AV_INPUT_BUFFER_PADDING_SIZE); if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */ const uint8_t *start, *end, *next; int size; next = buf; for (start = buf, end = buf + buf_size; next < end; start = next) { next = find_next_marker(start + 4, end); size = next - start - 4; if (size <= 0) continue; switch (AV_RB32(start)) { case VC1_CODE_FRAME: if (avctx->hwaccel) buf_start = start; buf_size2 = vc1_unescape_buffer(start + 4, size, buf2); break; case VC1_CODE_FIELD: { int buf_size3; tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1)); if (!tmp) slices = tmp; slices[n_slices].buf = av_mallocz(buf_size + AV_INPUT_BUFFER_PADDING_SIZE); if (!slices[n_slices].buf) buf_size3 = vc1_unescape_buffer(start + 4, size, slices[n_slices].buf); init_get_bits(&slices[n_slices].gb, slices[n_slices].buf, buf_size3 << 3); /* assuming that the field marker is at the exact middle, hope it's correct */ slices[n_slices].mby_start = s->mb_height >> 1; n_slices1 = n_slices - 1; // index of the last slice of the first field n_slices++; break; case VC1_CODE_ENTRYPOINT: /* it should be before frame data */ buf_size2 = vc1_unescape_buffer(start + 4, size, buf2); init_get_bits(&s->gb, buf2, buf_size2 * 8); ff_vc1_decode_entry_point(avctx, v, &s->gb); break; case VC1_CODE_SLICE: { int buf_size3; tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1)); if (!tmp) slices = tmp; slices[n_slices].buf = av_mallocz(buf_size + AV_INPUT_BUFFER_PADDING_SIZE); if (!slices[n_slices].buf) buf_size3 = vc1_unescape_buffer(start + 4, size, slices[n_slices].buf); init_get_bits(&slices[n_slices].gb, slices[n_slices].buf, buf_size3 << 3); slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9); n_slices++; break; } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */ const uint8_t *divider; int buf_size3; divider = find_next_marker(buf, buf + buf_size); if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) { av_log(avctx, AV_LOG_ERROR, \"Error in WVC1 interlaced frame\\n\"); } else { // found field marker, unescape second field tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1)); if (!tmp) slices = tmp; slices[n_slices].buf = av_mallocz(buf_size + AV_INPUT_BUFFER_PADDING_SIZE); if (!slices[n_slices].buf) buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf); init_get_bits(&slices[n_slices].gb, slices[n_slices].buf, buf_size3 << 3); slices[n_slices].mby_start = s->mb_height >> 1; n_slices1 = n_slices - 1; n_slices++; buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2); } else { buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2); init_get_bits(&s->gb, buf2, buf_size2*8); } else init_get_bits(&s->gb, buf, buf_size*8); if (v->res_sprite) { v->new_sprite = !get_bits1(&s->gb); v->two_sprites = get_bits1(&s->gb); /* res_sprite means a Windows Media Image stream, AV_CODEC_ID_*IMAGE means we're using the sprite compositor. These are intentionally kept separate so you can get the raw sprites by using the wmv3 decoder for WMVP or the vc1 one for WVP2 */ if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) { if (v->new_sprite) { // switch AVCodecContext parameters to those of the sprites avctx->width = avctx->coded_width = v->sprite_width; avctx->height = avctx->coded_height = v->sprite_height; } else { goto image; if (s->context_initialized && (s->width != avctx->coded_width || s->height != avctx->coded_height)) { ff_vc1_decode_end(avctx); if (!s->context_initialized) { if (ff_msmpeg4_decode_init(avctx) < 0) if (ff_vc1_decode_init_alloc_tables(v) < 0) { ff_mpv_common_end(s); s->low_delay = !avctx->has_b_frames || v->res_sprite; if (v->profile == PROFILE_ADVANCED) { s->h_edge_pos = avctx->coded_width; s->v_edge_pos = avctx->coded_height; // do parse frame header v->pic_header_flag = 0; v->first_pic_header_flag = 1; if (v->profile < PROFILE_ADVANCED) { if (ff_vc1_parse_frame_header(v, &s->gb) < 0) { } else { if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) { v->first_pic_header_flag = 0; if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) && s->pict_type != AV_PICTURE_TYPE_I) { av_log(v->s.avctx, AV_LOG_ERROR, \"Sprite decoder: expected I-frame\\n\"); // for skipping the frame s->current_picture.f->pict_type = s->pict_type; s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; /* skip B-frames if we don't have reference frames */ if (!s->last_picture_ptr && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) { goto end; if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) || avctx->skip_frame >= AVDISCARD_ALL) { goto end; if (s->next_p_frame_damaged) { if (s->pict_type == AV_PICTURE_TYPE_B) goto end; else s->next_p_frame_damaged = 0; if (ff_mpv_frame_start(s, avctx) < 0) { // process pulldown flags s->current_picture_ptr->f->repeat_pict = 0; // Pulldown flags are only valid when 'broadcast' has been set. // So ticks_per_frame will be 2 if (v->rff) { // repeat field s->current_picture_ptr->f->repeat_pict = 1; } else if (v->rptfrm) { // repeat frames s->current_picture_ptr->f->repeat_pict = v->rptfrm * 2; s->me.qpel_put = s->qdsp.put_qpel_pixels_tab; s->me.qpel_avg = s->qdsp.avg_qpel_pixels_tab; if (avctx->hwaccel) { if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0) if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0) if (avctx->hwaccel->end_frame(avctx) < 0) } else { int header_ret = 0; ff_mpeg_er_frame_start(s); v->bits = buf_size * 8; v->end_mb_x = s->mb_width; if (v->field_mode) { s->current_picture.f->linesize[0] <<= 1; s->current_picture.f->linesize[1] <<= 1; s->current_picture.f->linesize[2] <<= 1; s->linesize <<= 1; s->uvlinesize <<= 1; mb_height = s->mb_height >> v->field_mode; if (!mb_height) { av_log(v->s.avctx, AV_LOG_ERROR, \"Invalid mb_height.\\n\"); for (i = 0; i <= n_slices; i++) { if (i > 0 && slices[i - 1].mby_start >= mb_height) { if (v->field_mode <= 0) { av_log(v->s.avctx, AV_LOG_ERROR, \"Slice %d starts beyond \" \"picture boundary (%d >= %d)\\n\", i, slices[i - 1].mby_start, mb_height); continue; v->second_field = 1; v->blocks_off = s->mb_width * s->mb_height << 1; v->mb_off = s->mb_stride * s->mb_height >> 1; } else { v->second_field = 0; v->blocks_off = 0; v->mb_off = 0; if (i) { v->pic_header_flag = 0; if (v->field_mode && i == n_slices1 + 2) { if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) { av_log(v->s.avctx, AV_LOG_ERROR, \"Field header damaged\\n\"); if (avctx->err_recognition & AV_EF_EXPLODE) continue; } else if (get_bits1(&s->gb)) { v->pic_header_flag = 1; if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) { av_log(v->s.avctx, AV_LOG_ERROR, \"Slice header damaged\\n\"); if (avctx->err_recognition & AV_EF_EXPLODE) continue; if (header_ret < 0) continue; s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height); if (!v->field_mode || v->second_field) s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height); else s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height); ff_vc1_decode_blocks(v); if (i != n_slices) s->gb = slices[i].gb; if (v->field_mode) { v->second_field = 0; s->current_picture.f->linesize[0] >>= 1; s->current_picture.f->linesize[1] >>= 1; s->current_picture.f->linesize[2] >>= 1; s->linesize >>= 1; s->uvlinesize >>= 1; if (v->s.pict_type != AV_PICTURE_TYPE_BI && v->s.pict_type != AV_PICTURE_TYPE_B) { FFSWAP(uint8_t *, v->mv_f_next[0], v->mv_f[0]); FFSWAP(uint8_t *, v->mv_f_next[1], v->mv_f[1]); ff_dlog(s->avctx, \"Consumed %i/%i bits\\n\", get_bits_count(&s->gb), s->gb.size_in_bits); // if (get_bits_count(&s->gb) > buf_size * 8) // return -1; if (!v->field_mode) ff_er_frame_end(&s->er); ff_mpv_frame_end(s); if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) { image: avctx->width = avctx->coded_width = v->output_width; avctx->height = avctx->coded_height = v->output_height; if (avctx->skip_frame >= AVDISCARD_NONREF) goto end; #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER if (vc1_decode_sprites(v, &s->gb)) #endif if ((ret = av_frame_ref(pict, v->sprite_output_frame)) < 0) *got_frame = 1; } else { if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) { if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0) ff_print_debug_info(s, s->current_picture_ptr); *got_frame = 1; } else if (s->last_picture_ptr) { if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0) ff_print_debug_info(s, s->last_picture_ptr); *got_frame = 1; end: av_free(buf2); for (i = 0; i < n_slices; i++) av_free(slices[i].buf); av_free(slices); return buf_size; err: av_free(buf2); for (i = 0; i < n_slices; i++) av_free(slices[i].buf); av_free(slices); return -1;"} {"target": 1, "idx": 4818, "func": "static void qdev_print_devinfo(DeviceClass *dc) { error_printf(\"name \\\"%s\\\"\", object_class_get_name(OBJECT_CLASS(dc))); if (dc->bus_type) { error_printf(\", bus %s\", dc->bus_type); } if (qdev_class_has_alias(dc)) { error_printf(\", alias \\\"%s\\\"\", qdev_class_get_alias(dc)); } if (dc->desc) { error_printf(\", desc \\\"%s\\\"\", dc->desc); } if (dc->no_user) { error_printf(\", no-user\"); } error_printf(\"\\n\"); }"} {"target": 0, "idx": 4830, "func": "static void switch_tss(int tss_selector, uint32_t e1, uint32_t e2, int source) { int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i; uint8_t *tss_base; uint32_t new_regs[8], new_segs[6]; uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; uint32_t old_eflags, eflags_mask; SegmentCache *dt; int index; uint8_t *ptr; type = (e2 >> DESC_TYPE_SHIFT) & 0xf; #ifdef DEBUG_PCALL if (loglevel) fprintf(logfile, \"switch_tss: sel=0x%04x type=%d src=%d\\n\", tss_selector, type, source); #endif /* if task gate, we read the TSS segment and we load it */ if (type == 5) { if (!(e2 & DESC_P_MASK)) raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc); tss_selector = e1 >> 16; if (tss_selector & 4) raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); if (load_segment(&e1, &e2, tss_selector) != 0) raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc); if (e2 & DESC_S_MASK) raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc); type = (e2 >> DESC_TYPE_SHIFT) & 0xf; if ((type & 7) != 1) raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc); } if (!(e2 & DESC_P_MASK)) raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc); if (type & 8) tss_limit_max = 103; else tss_limit_max = 43; tss_limit = get_seg_limit(e1, e2); tss_base = get_seg_base(e1, e2); if ((tss_selector & 4) != 0 || tss_limit < tss_limit_max) raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc); old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; if (old_type & 8) old_tss_limit_max = 103; else old_tss_limit_max = 43; /* read all the registers from the new TSS */ if (type & 8) { /* 32 bit */ new_cr3 = ldl_kernel(tss_base + 0x1c); new_eip = ldl_kernel(tss_base + 0x20); new_eflags = ldl_kernel(tss_base + 0x24); for(i = 0; i < 8; i++) new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4)); for(i = 0; i < 6; i++) new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4)); new_ldt = lduw_kernel(tss_base + 0x60); new_trap = ldl_kernel(tss_base + 0x64); } else { /* 16 bit */ new_cr3 = 0; new_eip = lduw_kernel(tss_base + 0x0e); new_eflags = lduw_kernel(tss_base + 0x10); for(i = 0; i < 8; i++) new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000; for(i = 0; i < 4; i++) new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4)); new_ldt = lduw_kernel(tss_base + 0x2a); new_segs[R_FS] = 0; new_segs[R_GS] = 0; new_trap = 0; } /* NOTE: we must avoid memory exceptions during the task switch, so we make dummy accesses before */ /* XXX: it can still fail in some cases, so a bigger hack is necessary to valid the TLB after having done the accesses */ v1 = ldub_kernel(env->tr.base); v2 = ldub(env->tr.base + old_tss_limit_max); stb_kernel(env->tr.base, v1); stb_kernel(env->tr.base + old_tss_limit_max, v2); /* clear busy bit (it is restartable) */ if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { uint8_t *ptr; uint32_t e2; ptr = env->gdt.base + (env->tr.selector << 3); e2 = ldl_kernel(ptr + 4); e2 &= ~DESC_TSS_BUSY_MASK; stl_kernel(ptr + 4, e2); } old_eflags = compute_eflags(); if (source == SWITCH_TSS_IRET) old_eflags &= ~NT_MASK; /* save the current state in the old TSS */ if (type & 8) { /* 32 bit */ stl_kernel(env->tr.base + 0x20, env->eip); stl_kernel(env->tr.base + 0x24, old_eflags); for(i = 0; i < 8; i++) stl_kernel(env->tr.base + (0x28 + i * 4), env->regs[i]); for(i = 0; i < 6; i++) stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector); } else { /* 16 bit */ stw_kernel(env->tr.base + 0x0e, new_eip); stw_kernel(env->tr.base + 0x10, old_eflags); for(i = 0; i < 8; i++) stw_kernel(env->tr.base + (0x12 + i * 2), env->regs[i]); for(i = 0; i < 4; i++) stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector); } /* now if an exception occurs, it will occurs in the next task context */ if (source == SWITCH_TSS_CALL) { stw_kernel(tss_base, env->tr.selector); new_eflags |= NT_MASK; } /* set busy bit */ if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { uint8_t *ptr; uint32_t e2; ptr = env->gdt.base + (tss_selector << 3); e2 = ldl_kernel(ptr + 4); e2 |= DESC_TSS_BUSY_MASK; stl_kernel(ptr + 4, e2); } /* set the new CPU state */ /* from this point, any exception which occurs can give problems */ env->cr[0] |= CR0_TS_MASK; env->tr.selector = tss_selector; env->tr.base = tss_base; env->tr.limit = tss_limit; env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { env->cr[3] = new_cr3; cpu_x86_update_cr3(env); } /* load all registers without an exception, then reload them with possible exception */ env->eip = new_eip; eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK; if (!(type & 8)) eflags_mask &= 0xffff; load_eflags(new_eflags, eflags_mask); for(i = 0; i < 8; i++) env->regs[i] = new_regs[i]; if (new_eflags & VM_MASK) { for(i = 0; i < 6; i++) load_seg_vm(i, new_segs[i]); /* in vm86, CPL is always 3 */ cpu_x86_set_cpl(env, 3); } else { /* CPL is set the RPL of CS */ cpu_x86_set_cpl(env, new_segs[R_CS] & 3); /* first just selectors as the rest may trigger exceptions */ for(i = 0; i < 6; i++) cpu_x86_load_seg_cache(env, i, new_segs[i], NULL, 0, 0); } env->ldt.selector = new_ldt & ~4; env->ldt.base = NULL; env->ldt.limit = 0; env->ldt.flags = 0; /* load the LDT */ if (new_ldt & 4) raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc); if ((new_ldt & 0xfffc) != 0) { dt = &env->gdt; index = new_ldt & ~7; if ((index + 7) > dt->limit) raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc); ptr = dt->base + index; e1 = ldl_kernel(ptr); e2 = ldl_kernel(ptr + 4); if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc); if (!(e2 & DESC_P_MASK)) raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc); load_seg_cache_raw_dt(&env->ldt, e1, e2); } /* load the segments */ if (!(new_eflags & VM_MASK)) { tss_load_seg(R_CS, new_segs[R_CS]); tss_load_seg(R_SS, new_segs[R_SS]); tss_load_seg(R_ES, new_segs[R_ES]); tss_load_seg(R_DS, new_segs[R_DS]); tss_load_seg(R_FS, new_segs[R_FS]); tss_load_seg(R_GS, new_segs[R_GS]); } /* check that EIP is in the CS segment limits */ if (new_eip > env->segs[R_CS].limit) { raise_exception_err(EXCP0D_GPF, 0); } }"} {"target": 0, "idx": 4832, "func": "uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b) { float32 f0 = float32_abs(make_float32(a)); float32 f1 = float32_abs(make_float32(b)); if (float32_compare_quiet(f0, f1, NFS) == float_relation_greater) { return ~0; } return 0; }"} {"target": 0, "idx": 4847, "func": "sosendto(struct socket *so, struct mbuf *m) { Slirp *slirp = so->slirp; int ret; struct sockaddr_in addr; DEBUG_CALL(\"sosendto\"); DEBUG_ARG(\"so = %p\", so); DEBUG_ARG(\"m = %p\", m); addr.sin_family = AF_INET; if ((so->so_faddr.s_addr & slirp->vnetwork_mask.s_addr) == slirp->vnetwork_addr.s_addr) { /* It's an alias */ if (so->so_faddr.s_addr == slirp->vnameserver_addr.s_addr) { if (get_dns_addr(&addr.sin_addr) < 0) addr.sin_addr = loopback_addr; } else { addr.sin_addr = loopback_addr; } } else addr.sin_addr = so->so_faddr; addr.sin_port = so->so_fport; DEBUG_MISC((dfd, \" sendto()ing, addr.sin_port=%d, addr.sin_addr.s_addr=%.16s\\n\", ntohs(addr.sin_port), inet_ntoa(addr.sin_addr))); /* Don't care what port we get */ ret = sendto(so->s, m->m_data, m->m_len, 0, (struct sockaddr *)&addr, sizeof (struct sockaddr)); if (ret < 0) return -1; /* * Kill the socket if there's no reply in 4 minutes, * but only if it's an expirable socket */ if (so->so_expire) so->so_expire = curtime + SO_EXPIRE; so->so_state &= SS_PERSISTENT_MASK; so->so_state |= SS_ISFCONNECTED; /* So that it gets select()ed */ return 0; }"} {"target": 0, "idx": 4848, "func": "void ff_hevc_hls_filter(HEVCContext *s, int x, int y, int ctb_size) { deblocking_filter_CTB(s, x, y); if (s->sps->sao_enabled) { int x_end = x >= s->sps->width - ctb_size; int y_end = y >= s->sps->height - ctb_size; if (y && x) sao_filter_CTB(s, x - ctb_size, y - ctb_size); if (x && y_end) sao_filter_CTB(s, x - ctb_size, y); if (y && x_end) { sao_filter_CTB(s, x, y - ctb_size); if (s->threads_type & FF_THREAD_FRAME ) ff_thread_report_progress(&s->ref->tf, y - ctb_size, 0); } if (x_end && y_end) { sao_filter_CTB(s, x , y); if (s->threads_type & FF_THREAD_FRAME ) ff_thread_report_progress(&s->ref->tf, y, 0); } } else { if (y && x >= s->sps->width - ctb_size) if (s->threads_type & FF_THREAD_FRAME ) ff_thread_report_progress(&s->ref->tf, y, 0); } }"} {"target": 1, "idx": 4861, "func": "static int advanced_decode_picture_primary_header(VC9Context *v) { GetBitContext *gb = &v->s.gb; static const int type_table[4] = { P_TYPE, B_TYPE, I_TYPE, BI_TYPE }; int type, i; if (v->interlace) { v->fcm = get_bits(gb, 1); if (v->fcm) v->fcm = 2+get_bits(gb, 1); } type = get_prefix(gb, 0, 4); if (type > 4 || type < 0) return FRAME_SKIPED; v->s.pict_type = type_table[type]; av_log(v->s.avctx, AV_LOG_INFO, \"AP Frame Type: %i\\n\", v->s.pict_type); if (v->tfcntrflag) v->tfcntr = get_bits(gb, 8); if (v->broadcast) { if (!v->interlace) v->rptfrm = get_bits(gb, 2); else { v->tff = get_bits(gb, 1); v->rff = get_bits(gb, 1); } } if (v->panscanflag) { #if 0 for (i=0; inumpanscanwin; i++) { v->topleftx[i] = get_bits(gb, 16); v->toplefty[i] = get_bits(gb, 16); v->bottomrightx[i] = get_bits(gb, 16); v->bottomrighty[i] = get_bits(gb, 16); } #else skip_bits(gb, 16*4*v->numpanscanwin); #endif } v->s.no_rounding = !get_bits(gb, 1); v->uvsamp = get_bits(gb, 1); if (v->finterpflag == 1) v->interpfrm = get_bits(gb, 1); switch(v->s.pict_type) { case I_TYPE: if (decode_i_picture_header(v) < 0) return -1; case P_TYPE: if (decode_p_picture_primary_header(v) < 0) return -1; case BI_TYPE: case B_TYPE: if (decode_b_picture_primary_header(v) < 0) return FRAME_SKIPED; default: break; } return 0; }"} {"target": 1, "idx": 4876, "func": "static double get_video_clock(VideoState *is) { if (is->paused) { return is->video_current_pts; } else { return is->video_current_pts + (av_gettime() - is->video_current_pts_time) / 1000000.0; } }"} {"target": 0, "idx": 4886, "func": "long do_rt_sigreturn(CPUSH4State *regs) { struct target_rt_sigframe *frame; abi_ulong frame_addr; sigset_t blocked; target_ulong r0; frame_addr = regs->gregs[15]; trace_user_do_rt_sigreturn(regs, frame_addr); if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { goto badframe; } target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask); do_sigprocmask(SIG_SETMASK, &blocked, NULL); restore_sigcontext(regs, &frame->uc.tuc_mcontext, &r0); if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, uc.tuc_stack), 0, get_sp_from_cpustate(regs)) == -EFAULT) { goto badframe; } unlock_user_struct(frame, frame_addr, 0); return r0; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); return 0; }"} {"target": 0, "idx": 4891, "func": "static void patch_reloc(uint8_t *code_ptr, int type, intptr_t value, intptr_t addend) { uint32_t insn; value += addend; switch (type) { case R_SPARC_32: if (value != (uint32_t)value) { tcg_abort(); } *(uint32_t *)code_ptr = value; break; case R_SPARC_WDISP16: value -= (intptr_t)code_ptr; if (!check_fit_tl(value >> 2, 16)) { tcg_abort(); } insn = *(uint32_t *)code_ptr; insn &= ~INSN_OFF16(-1); insn |= INSN_OFF16(value); *(uint32_t *)code_ptr = insn; break; case R_SPARC_WDISP19: value -= (intptr_t)code_ptr; if (!check_fit_tl(value >> 2, 19)) { tcg_abort(); } insn = *(uint32_t *)code_ptr; insn &= ~INSN_OFF19(-1); insn |= INSN_OFF19(value); *(uint32_t *)code_ptr = insn; break; default: tcg_abort(); } }"} {"target": 0, "idx": 4903, "func": "static int v9fs_synth_unlinkat(FsContext *ctx, V9fsPath *dir, const char *name, int flags) { errno = EPERM; return -1; }"} {"target": 0, "idx": 4909, "func": "void json_start_object(QJSON *json, const char *name) { json_emit_element(json, name); qstring_append(json->str, \"{ \"); json->omit_comma = true; }"} {"target": 0, "idx": 4913, "func": "START_TEST(qdict_get_int_test) { int ret; const int value = 100; const char *key = \"int\"; qdict_put(tests_dict, key, qint_from_int(value)); ret = qdict_get_int(tests_dict, key); fail_unless(ret == value); }"} {"target": 0, "idx": 4916, "func": "static void float64_maddsub_update_excp(CPUPPCState *env, float64 arg1, float64 arg2, float64 arg3, unsigned int madd_flags) { if (unlikely((float64_is_infinity(arg1) && float64_is_zero(arg2)) || (float64_is_zero(arg1) && float64_is_infinity(arg2)))) { /* Multiplication of zero by infinity */ arg1 = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); } else if (unlikely(float64_is_signaling_nan(arg1, &env->fp_status) || float64_is_signaling_nan(arg2, &env->fp_status) || float64_is_signaling_nan(arg3, &env->fp_status))) { /* sNaN operation */ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); } else if ((float64_is_infinity(arg1) || float64_is_infinity(arg2)) && float64_is_infinity(arg3)) { uint8_t aSign, bSign, cSign; aSign = float64_is_neg(arg1); bSign = float64_is_neg(arg2); cSign = float64_is_neg(arg3); if (madd_flags & float_muladd_negate_c) { cSign ^= 1; } if (aSign ^ bSign ^ cSign) { float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); } } }"} {"target": 0, "idx": 4930, "func": "static uint32_t check_alarm(RTCState *s) { uint8_t alarm_hour, alarm_min, alarm_sec; uint8_t cur_hour, cur_min, cur_sec; alarm_sec = rtc_from_bcd(s, s->cmos_data[RTC_SECONDS_ALARM]); alarm_min = rtc_from_bcd(s, s->cmos_data[RTC_MINUTES_ALARM]); alarm_hour = rtc_from_bcd(s, s->cmos_data[RTC_HOURS_ALARM]); alarm_hour = convert_hour(s, alarm_hour); cur_sec = rtc_from_bcd(s, s->cmos_data[RTC_SECONDS]); cur_min = rtc_from_bcd(s, s->cmos_data[RTC_MINUTES]); cur_hour = rtc_from_bcd(s, s->cmos_data[RTC_HOURS]); cur_hour = convert_hour(s, cur_hour); if (((s->cmos_data[RTC_SECONDS_ALARM] & 0xc0) == 0xc0 || alarm_sec == cur_sec) && ((s->cmos_data[RTC_MINUTES_ALARM] & 0xc0) == 0xc0 || alarm_min == cur_min) && ((s->cmos_data[RTC_HOURS_ALARM] & 0xc0) == 0xc0 || alarm_hour == cur_hour)) { return 1; } return 0; }"} {"target": 0, "idx": 4937, "func": "static int get_dns_addr(struct in_addr *pdns_addr) { char buff[512]; char buff2[257]; FILE *f; int found = 0; struct in_addr tmp_addr; f = fopen(\"/etc/resolv.conf\", \"r\"); if (!f) return -1; #ifdef DEBUG lprint(\"IP address of your DNS(s): \"); #endif while (fgets(buff, 512, f) != NULL) { if (sscanf(buff, \"nameserver%*[ \\t]%256s\", buff2) == 1) { if (!inet_aton(buff2, &tmp_addr)) continue; if (tmp_addr.s_addr == loopback_addr.s_addr) tmp_addr = our_addr; /* If it's the first one, set it to dns_addr */ if (!found) *pdns_addr = tmp_addr; #ifdef DEBUG else lprint(\", \"); #endif if (++found > 3) { #ifdef DEBUG lprint(\"(more)\"); #endif break; } #ifdef DEBUG else lprint(\"%s\", inet_ntoa(tmp_addr)); #endif } } fclose(f); if (!found) return -1; return 0; }"} {"target": 0, "idx": 4945, "func": "static bool aio_epoll_try_enable(AioContext *ctx) { AioHandler *node; struct epoll_event event; QLIST_FOREACH(node, &ctx->aio_handlers, node) { int r; if (node->deleted || !node->pfd.events) { continue; } event.events = epoll_events_from_pfd(node->pfd.events); event.data.ptr = node; r = epoll_ctl(ctx->epollfd, EPOLL_CTL_ADD, node->pfd.fd, &event); if (r) { return false; } } ctx->epoll_enabled = true; return true; }"} {"target": 0, "idx": 4948, "func": "static int bt_parse(const char *opt) { const char *endp, *p; int vlan; if (strstart(opt, \"hci\", &endp)) { if (!*endp || *endp == ',') { if (*endp) if (!strstart(endp, \",vlan=\", 0)) opt = endp + 1; return bt_hci_parse(opt); } } else if (strstart(opt, \"vhci\", &endp)) { if (!*endp || *endp == ',') { if (*endp) { if (strstart(endp, \",vlan=\", &p)) { vlan = strtol(p, (char **) &endp, 0); if (*endp) { fprintf(stderr, \"qemu: bad scatternet '%s'\\n\", p); return 1; } } else { fprintf(stderr, \"qemu: bad parameter '%s'\\n\", endp + 1); return 1; } } else vlan = 0; bt_vhci_add(vlan); return 0; } } else if (strstart(opt, \"device:\", &endp)) return !bt_device_add(endp); fprintf(stderr, \"qemu: bad bluetooth parameter '%s'\\n\", opt); return 1; }"} {"target": 0, "idx": 4949, "func": "static void omap_mpui_io_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { /* FIXME: infinite loop */ omap_badwidth_write16(opaque, addr, value); }"} {"target": 0, "idx": 4958, "func": "static int ipvideo_decode_block_opcode_0xC(IpvideoContext *s) { int x, y; /* 16-color block encoding: each 2x2 block is a different color */ CHECK_STREAM_PTR(16); for (y = 0; y < 8; y += 2) { for (x = 0; x < 8; x += 2) { s->pixel_ptr[x ] = s->pixel_ptr[x + 1 ] = s->pixel_ptr[x + s->stride] = s->pixel_ptr[x + 1 + s->stride] = *s->stream_ptr++; } s->pixel_ptr += s->stride * 2; } /* report success */ return 0; }"} {"target": 0, "idx": 4960, "func": "static void term_handle_command(char *cmdline) { char *p, *pstart; int argc; const char *args[MAX_ARGS + 1]; term_cmd_t *cmd; #ifdef DEBUG term_printf(\"command='%s'\\n\", cmdline); #endif /* split command in words */ argc = 0; p = cmdline; for(;;) { while (isspace(*p)) p++; if (*p == '\\0') break; pstart = p; while (*p != '\\0' && !isspace(*p)) p++; args[argc] = pstart; argc++; if (argc >= MAX_ARGS) break; if (*p == '\\0') break; *p++ = '\\0'; } args[argc] = NULL; #ifdef DEBUG for(i=0;iname != NULL; cmd++) { if (compare_cmd(args[0], cmd->name)) goto found; } term_printf(\"unknown command: '%s'\\n\", args[0]); return; found: cmd->handler(argc, args); }"} {"target": 1, "idx": 4971, "func": "static void m5206_mbar_writew(void *opaque, target_phys_addr_t offset, uint32_t value) { m5206_mbar_state *s = (m5206_mbar_state *)opaque; int width; offset &= 0x3ff; if (offset > 0x200) { hw_error(\"Bad MBAR write offset 0x%x\", (int)offset); } width = m5206_mbar_width[offset >> 2]; if (width > 2) { uint32_t tmp; tmp = m5206_mbar_readl(opaque, offset & ~3); if (offset & 3) { tmp = (tmp & 0xffff0000) | value; } else { tmp = (tmp & 0x0000ffff) | (value << 16); } m5206_mbar_writel(opaque, offset & ~3, tmp); return; } else if (width < 2) { m5206_mbar_writeb(opaque, offset, value >> 8); m5206_mbar_writeb(opaque, offset + 1, value & 0xff); return; } m5206_mbar_write(s, offset, value, 2); }"} {"target": 1, "idx": 4973, "func": "static void test_ivshmem_server(bool msi) { IVState state1, state2, *s1, *s2; ServerThread thread; IvshmemServer server; int ret, vm1, vm2; int nvectors = 2; guint64 end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND; ret = ivshmem_server_init(&server, tmpserver, tmpshm, true, TMPSHMSIZE, nvectors, g_test_verbose()); g_assert_cmpint(ret, ==, 0); ret = ivshmem_server_start(&server); g_assert_cmpint(ret, ==, 0); setup_vm_with_server(&state1, nvectors, msi); s1 = &state1; setup_vm_with_server(&state2, nvectors, msi); s2 = &state2; /* check state before server sends stuff */ g_assert_cmpuint(in_reg(s1, IVPOSITION), ==, 0xffffffff); g_assert_cmpuint(in_reg(s2, IVPOSITION), ==, 0xffffffff); g_assert_cmpuint(qtest_readb(s1->qtest, (uintptr_t)s1->mem_base), ==, 0x00); thread.server = &server; ret = pipe(thread.pipe); g_assert_cmpint(ret, ==, 0); thread.thread = g_thread_new(\"ivshmem-server\", server_thread, &thread); g_assert(thread.thread != NULL); /* waiting for devices to become operational */ while (g_get_monotonic_time() < end_time) { g_usleep(1000); if ((int)in_reg(s1, IVPOSITION) >= 0 && (int)in_reg(s2, IVPOSITION) >= 0) { break; } } /* check got different VM ids */ vm1 = in_reg(s1, IVPOSITION); vm2 = in_reg(s2, IVPOSITION); g_assert_cmpuint(vm1, !=, vm2); /* check number of MSI-X vectors */ global_qtest = s1->qtest; if (msi) { ret = qpci_msix_table_size(s1->dev); g_assert_cmpuint(ret, ==, nvectors); } /* TODO test behavior before MSI-X is enabled */ /* ping vm2 -> vm1 on vector 0 */ if (msi) { ret = qpci_msix_pending(s1->dev, 0); g_assert_cmpuint(ret, ==, 0); } else { g_assert_cmpuint(in_reg(s1, INTRSTATUS), ==, 0); } out_reg(s2, DOORBELL, vm1 << 16); do { g_usleep(10000); ret = msi ? qpci_msix_pending(s1->dev, 0) : in_reg(s1, INTRSTATUS); } while (ret == 0 && g_get_monotonic_time() < end_time); g_assert_cmpuint(ret, !=, 0); /* ping vm1 -> vm2 on vector 1 */ global_qtest = s2->qtest; if (msi) { ret = qpci_msix_pending(s2->dev, 1); g_assert_cmpuint(ret, ==, 0); } else { g_assert_cmpuint(in_reg(s2, INTRSTATUS), ==, 0); } out_reg(s1, DOORBELL, vm2 << 16 | 1); do { g_usleep(10000); ret = msi ? qpci_msix_pending(s2->dev, 1) : in_reg(s2, INTRSTATUS); } while (ret == 0 && g_get_monotonic_time() < end_time); g_assert_cmpuint(ret, !=, 0); cleanup_vm(s2); cleanup_vm(s1); if (qemu_write_full(thread.pipe[1], \"q\", 1) != 1) { g_error(\"qemu_write_full: %s\", g_strerror(errno)); } g_thread_join(thread.thread); ivshmem_server_close(&server); close(thread.pipe[1]); close(thread.pipe[0]); }"} {"target": 1, "idx": 4976, "func": "int qemu_peek_byte(QEMUFile *f, int offset) { int index = f->buf_index + offset; assert(!qemu_file_is_writable(f)); assert(offset < IO_BUF_SIZE); if (index >= f->buf_size) { qemu_fill_buffer(f); index = f->buf_index + offset; if (index >= f->buf_size) { return 0; } } return f->buf[index]; }"} {"target": 1, "idx": 4991, "func": "static void FUNCC(pred8x8_horizontal)(uint8_t *_src, int stride){ int i; pixel *src = (pixel*)_src; stride /= sizeof(pixel); for(i=0; i<8; i++){ ((pixel4*)(src+i*stride))[0]= ((pixel4*)(src+i*stride))[1]= PIXEL_SPLAT_X4(src[-1+i*stride]); } }"} {"target": 1, "idx": 4992, "func": "monitor_protocol_event_queue(MonitorEvent event, QObject *data) { MonitorEventState *evstate; int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); assert(event < QEVENT_MAX); qemu_mutex_lock(&monitor_event_state_lock); evstate = &(monitor_event_state[event]); trace_monitor_protocol_event_queue(event, data, evstate->rate, evstate->last, now); /* Rate limit of 0 indicates no throttling */ if (!evstate->rate) { monitor_protocol_event_emit(event, data); evstate->last = now; } else { int64_t delta = now - evstate->last; if (evstate->data || delta < evstate->rate) { /* If there's an existing event pending, replace * it with the new event, otherwise schedule a * timer for delayed emission */ if (evstate->data) { qobject_decref(evstate->data); } else { int64_t then = evstate->last + evstate->rate; timer_mod_ns(evstate->timer, then); } evstate->data = data; qobject_incref(evstate->data); } else { monitor_protocol_event_emit(event, data); evstate->last = now; } } qemu_mutex_unlock(&monitor_event_state_lock); }"} {"target": 1, "idx": 4996, "func": "static int qemu_gluster_create(const char *filename, QemuOpts *opts, Error **errp) { struct glfs *glfs; struct glfs_fd *fd; int ret = 0; int prealloc = 0; int64_t total_size = 0; char *tmp = NULL; GlusterConf *gconf = g_malloc0(sizeof(GlusterConf)); glfs = qemu_gluster_init(gconf, filename, errp); if (!glfs) { ret = -errno; goto out; } total_size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0) / BDRV_SECTOR_SIZE; tmp = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); if (!tmp || !strcmp(tmp, \"off\")) { prealloc = 0; } else if (!strcmp(tmp, \"full\") && gluster_supports_zerofill()) { prealloc = 1; } else { error_setg(errp, \"Invalid preallocation mode: '%s'\" \" or GlusterFS doesn't support zerofill API\", tmp); ret = -EINVAL; goto out; } fd = glfs_creat(glfs, gconf->image, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR); if (!fd) { ret = -errno; } else { if (!glfs_ftruncate(fd, total_size * BDRV_SECTOR_SIZE)) { if (prealloc && qemu_gluster_zerofill(fd, 0, total_size * BDRV_SECTOR_SIZE)) { ret = -errno; } } else { ret = -errno; } if (glfs_close(fd) != 0) { ret = -errno; } } out: g_free(tmp); qemu_gluster_gconf_free(gconf); if (glfs) { glfs_fini(glfs); } return ret; }"} {"target": 1, "idx": 5002, "func": "void *qemu_memalign(size_t alignment, size_t size) { if (!size) { abort(); } return oom_check(VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE)); }"} {"target": 0, "idx": 5006, "func": "static void decode_parameters(SiprParameters* parms, GetBitContext *pgb, const SiprModeParam *p) { int i, j; parms->ma_pred_switch = get_bits(pgb, p->ma_predictor_bits); for (i = 0; i < 5; i++) parms->vq_indexes[i] = get_bits(pgb, p->vq_indexes_bits[i]); for (i = 0; i < p->subframe_count; i++) { parms->pitch_delay[i] = get_bits(pgb, p->pitch_delay_bits[i]); parms->gp_index[i] = get_bits(pgb, p->gp_index_bits); for (j = 0; j < p->number_of_fc_indexes; j++) parms->fc_indexes[i][j] = get_bits(pgb, p->fc_index_bits[j]); parms->gc_index[i] = get_bits(pgb, p->gc_index_bits); } }"} {"target": 0, "idx": 5008, "func": "static void slow_bar_writel(void *opaque, target_phys_addr_t addr, uint32_t val) { AssignedDevRegion *d = opaque; uint32_t *out = (uint32_t *)(d->u.r_virtbase + addr); DEBUG(\"slow_bar_writel addr=0x\" TARGET_FMT_plx \" val=0x%08x\\n\", addr, val); *out = val; }"} {"target": 0, "idx": 5020, "func": "SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, DriveInfo *dinfo, int unit) { const char *driver; DeviceState *dev; driver = bdrv_is_sg(dinfo->bdrv) ? \"scsi-generic\" : \"scsi-disk\"; dev = qdev_create(&bus->qbus, driver); qdev_prop_set_uint32(dev, \"scsi-id\", unit); qdev_prop_set_drive(dev, \"drive\", dinfo); if (qdev_init(dev) < 0) return NULL; return DO_UPCAST(SCSIDevice, qdev, dev); }"} {"target": 0, "idx": 5027, "func": "static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section) { XenIOState *state = container_of(listener, XenIOState, memory_listener); state->log_for_dirtybit = NULL; /* Disable dirty bit tracking */ xc_hvm_track_dirty_vram(xen_xc, xen_domid, 0, 0, NULL); }"} {"target": 0, "idx": 5036, "func": "static int vtd_int_remap(X86IOMMUState *iommu, MSIMessage *src, MSIMessage *dst, uint16_t sid) { return vtd_interrupt_remap_msi(INTEL_IOMMU_DEVICE(iommu), src, dst); }"} {"target": 0, "idx": 5041, "func": "static void core_region_nop(MemoryListener *listener, MemoryRegionSection *section) { cpu_register_physical_memory_log(section, section->readonly); }"} {"target": 0, "idx": 5048, "func": "float32 uint64_to_float32( uint64 a STATUS_PARAM ) { int8 shiftCount; if ( a == 0 ) return 0; shiftCount = countLeadingZeros64( a ) - 40; if ( 0 <= shiftCount ) { return packFloat32( 1 > 0, 0x95 - shiftCount, a< 0, 0x9C - shiftCount, a STATUS_VAR ); } }"} {"target": 0, "idx": 5051, "func": "static void do_change_vnc(const char *target, const char *arg) { if (strcmp(target, \"passwd\") == 0 || strcmp(target, \"password\") == 0) { char password[9]; if (arg) { strncpy(password, arg, sizeof(password)); password[sizeof(password) - 1] = '\\0'; } else monitor_readline(\"Password: \", 1, password, sizeof(password)); if (vnc_display_password(NULL, password) < 0) term_printf(\"could not set VNC server password\\n\"); } else { if (vnc_display_open(NULL, target) < 0) term_printf(\"could not start VNC server on %s\\n\", target); } }"} {"target": 0, "idx": 5052, "func": "static struct scsi_task *iscsi_do_inquiry(struct iscsi_context *iscsi, int lun, int evpd, int pc, Error **errp) { int full_size; struct scsi_task *task = NULL; task = iscsi_inquiry_sync(iscsi, lun, evpd, pc, 64); if (task == NULL || task->status != SCSI_STATUS_GOOD) { goto fail; } full_size = scsi_datain_getfullsize(task); if (full_size > task->datain.size) { scsi_free_scsi_task(task); /* we need more data for the full list */ task = iscsi_inquiry_sync(iscsi, lun, evpd, pc, full_size); if (task == NULL || task->status != SCSI_STATUS_GOOD) { goto fail; } } return task; fail: error_setg(errp, \"iSCSI: Inquiry command failed : %s\", iscsi_get_error(iscsi)); if (task) { scsi_free_scsi_task(task); return NULL; } return NULL; }"} {"target": 0, "idx": 5055, "func": "av_cold void rgb2rgb_init_x86(void) { #if HAVE_INLINE_ASM int cpu_flags = av_get_cpu_flags(); if (cpu_flags & AV_CPU_FLAG_MMX) rgb2rgb_init_MMX(); if (HAVE_AMD3DNOW && cpu_flags & AV_CPU_FLAG_3DNOW) rgb2rgb_init_3DNOW(); if (HAVE_MMXEXT && cpu_flags & AV_CPU_FLAG_MMXEXT) rgb2rgb_init_MMX2(); if (HAVE_SSE && cpu_flags & AV_CPU_FLAG_SSE2) rgb2rgb_init_SSE2(); #endif /* HAVE_INLINE_ASM */ }"} {"target": 0, "idx": 5066, "func": "static int mp3_header_compress(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe){ uint32_t header; int mode_extension; if(avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL){ av_log(avctx, AV_LOG_ERROR, \"not standards compliant\\n\"); return -1; } header = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]; mode_extension= (header>>4)&3; if(ff_mpa_check_header(header) < 0 || (header&0x70000) != 0x30000){ *poutbuf= (uint8_t *) buf; *poutbuf_size= buf_size; av_log(avctx, AV_LOG_INFO, \"cannot compress %08X\\n\", header); return 0; } *poutbuf_size= buf_size - 4; *poutbuf= av_malloc(buf_size - 4 + FF_INPUT_BUFFER_PADDING_SIZE); memcpy(*poutbuf, buf + 4, buf_size - 4 + FF_INPUT_BUFFER_PADDING_SIZE); if(avctx->channels==2){ if((header & (3<<19)) != 3<<19){ (*poutbuf)[1] &= 0x3F; (*poutbuf)[1] |= mode_extension<<6; FFSWAP(int, (*poutbuf)[1], (*poutbuf)[2]); }else{ (*poutbuf)[1] &= 0x8F; (*poutbuf)[1] |= mode_extension<<4; } } return 1; }"} {"target": 1, "idx": 5072, "func": "static void virtio_net_add_queue(VirtIONet *n, int index) { VirtIODevice *vdev = VIRTIO_DEVICE(n); n->vqs[index].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx); if (n->net_conf.tx && !strcmp(n->net_conf.tx, \"timer\")) { n->vqs[index].tx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer); n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, virtio_net_tx_timer, &n->vqs[index]); } else { n->vqs[index].tx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh); n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]); } n->vqs[index].tx_waiting = 0; n->vqs[index].n = n; }"} {"target": 0, "idx": 5081, "func": "static int grackle_pci_host_init(PCIDevice *d) { pci_config_set_vendor_id(d->config, PCI_VENDOR_ID_MOTOROLA); pci_config_set_device_id(d->config, PCI_DEVICE_ID_MOTOROLA_MPC106); d->config[0x08] = 0x00; // revision d->config[0x09] = 0x01; pci_config_set_class(d->config, PCI_CLASS_BRIDGE_HOST); return 0; }"} {"target": 0, "idx": 5085, "func": "static void icp_realize(DeviceState *dev, Error **errp) { ICPState *icp = ICP(dev); ICPStateClass *icpc = ICP_GET_CLASS(dev); Object *obj; Error *err = NULL; obj = object_property_get_link(OBJECT(dev), ICP_PROP_XICS, &err); if (!obj) { error_setg(errp, \"%s: required link '\" ICP_PROP_XICS \"' not found: %s\", __func__, error_get_pretty(err)); return; } icp->xics = XICS_FABRIC(obj); if (icpc->realize) { icpc->realize(dev, errp); } qemu_register_reset(icp_reset, dev); }"} {"target": 0, "idx": 5087, "func": "static void test_visitor_in_native_list_int(TestInputVisitorData *data, const void *unused) { test_native_list_integer_helper(data, unused, USER_DEF_NATIVE_LIST_UNION_KIND_INTEGER); }"} {"target": 0, "idx": 5091, "func": "static void test_qemu_strtoull_max(void) { char *str = g_strdup_printf(\"%llu\", ULLONG_MAX); char f = 'X'; const char *endptr = &f; uint64_t res = 999; int err; err = qemu_strtoull(str, &endptr, 0, &res); g_assert_cmpint(err, ==, 0); g_assert_cmpint(res, ==, ULLONG_MAX); g_assert(endptr == str + strlen(str)); g_free(str); }"} {"target": 0, "idx": 5098, "func": "static inline void* array_get_next(array_t* array) { unsigned int next = array->next; void* result; if (array_ensure_allocated(array, next) < 0) return NULL; array->next = next + 1; result = array_get(array, next); return result; }"} {"target": 0, "idx": 5101, "func": "static int raw_read(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, int nb_sectors) { return bdrv_read(bs->file, sector_num, buf, nb_sectors); }"} {"target": 1, "idx": 5116, "func": "static uint32_t omap2_gpio_module_readp(void *opaque, target_phys_addr_t addr) { return omap2_gpio_module_readp(opaque, addr) >> ((addr & 3) << 3); }"} {"target": 1, "idx": 5122, "func": "static void guest_fsfreeze_cleanup(void) { int64_t ret; Error *err = NULL; if (ga_is_frozen(ga_state) == GUEST_FSFREEZE_STATUS_FROZEN) { ret = qmp_guest_fsfreeze_thaw(&err); if (ret < 0 || err) { slog(\"failed to clean up frozen filesystems\"); } } }"} {"target": 1, "idx": 5137, "func": "static int set_segment_filename(AVFormatContext *s) { SegmentContext *seg = s->priv_data; AVFormatContext *oc = seg->avf; size_t size; if (seg->segment_idx_wrap) seg->segment_idx %= seg->segment_idx_wrap; if (seg->use_strftime) { time_t now0; struct tm *tm, tmpbuf; time(&now0); tm = localtime_r(&now0, &tmpbuf); if (!strftime(oc->filename, sizeof(oc->filename), s->filename, tm)) { av_log(oc, AV_LOG_ERROR, \"Could not get segment filename with strftime\\n\"); return AVERROR(EINVAL); } } else if (av_get_frame_filename(oc->filename, sizeof(oc->filename), s->filename, seg->segment_idx) < 0) { av_log(oc, AV_LOG_ERROR, \"Invalid segment filename template '%s'\\n\", s->filename); return AVERROR(EINVAL); } /* copy modified name in list entry */ size = strlen(av_basename(oc->filename)) + 1; if (seg->entry_prefix) size += strlen(seg->entry_prefix); seg->cur_entry.filename = av_mallocz(size); if (!seg->cur_entry.filename) return AVERROR(ENOMEM); snprintf(seg->cur_entry.filename, size, \"%s%s\", seg->entry_prefix ? seg->entry_prefix : \"\", av_basename(oc->filename)); return 0; }"} {"target": 1, "idx": 5156, "func": "petalogix_s3adsp1800_init(MachineState *machine) { ram_addr_t ram_size = machine->ram_size; DeviceState *dev; MicroBlazeCPU *cpu; DriveInfo *dinfo; int i; hwaddr ddr_base = MEMORY_BASEADDR; MemoryRegion *phys_lmb_bram = g_new(MemoryRegion, 1); MemoryRegion *phys_ram = g_new(MemoryRegion, 1); qemu_irq irq[32]; MemoryRegion *sysmem = get_system_memory(); cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU)); object_property_set_bool(OBJECT(cpu), true, \"realized\", &error_abort); /* Attach emulated BRAM through the LMB. */ memory_region_init_ram(phys_lmb_bram, NULL, \"petalogix_s3adsp1800.lmb_bram\", LMB_BRAM_SIZE, &error_abort); vmstate_register_ram_global(phys_lmb_bram); memory_region_add_subregion(sysmem, 0x00000000, phys_lmb_bram); memory_region_init_ram(phys_ram, NULL, \"petalogix_s3adsp1800.ram\", ram_size, &error_abort); vmstate_register_ram_global(phys_ram); memory_region_add_subregion(sysmem, ddr_base, phys_ram); dinfo = drive_get(IF_PFLASH, 0, 0); pflash_cfi01_register(FLASH_BASEADDR, NULL, \"petalogix_s3adsp1800.flash\", FLASH_SIZE, dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, (64 * 1024), FLASH_SIZE >> 16, 1, 0x89, 0x18, 0x0000, 0x0, 1); dev = qdev_create(NULL, \"xlnx.xps-intc\"); qdev_prop_set_uint32(dev, \"kind-of-intr\", 1 << ETHLITE_IRQ | 1 << UARTLITE_IRQ); qdev_init_nofail(dev); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, INTC_BASEADDR); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, qdev_get_gpio_in(DEVICE(cpu), MB_CPU_IRQ)); for (i = 0; i < 32; i++) { irq[i] = qdev_get_gpio_in(dev, i); } sysbus_create_simple(\"xlnx.xps-uartlite\", UARTLITE_BASEADDR, irq[UARTLITE_IRQ]); /* 2 timers at irq 2 @ 62 Mhz. */ dev = qdev_create(NULL, \"xlnx.xps-timer\"); qdev_prop_set_uint32(dev, \"one-timer-only\", 0); qdev_prop_set_uint32(dev, \"clock-frequency\", 62 * 1000000); qdev_init_nofail(dev); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, TIMER_BASEADDR); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[TIMER_IRQ]); qemu_check_nic_model(&nd_table[0], \"xlnx.xps-ethernetlite\"); dev = qdev_create(NULL, \"xlnx.xps-ethernetlite\"); qdev_set_nic_properties(dev, &nd_table[0]); qdev_prop_set_uint32(dev, \"tx-ping-pong\", 0); qdev_prop_set_uint32(dev, \"rx-ping-pong\", 0); qdev_init_nofail(dev); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, ETHLITE_BASEADDR); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[ETHLITE_IRQ]); microblaze_load_kernel(cpu, ddr_base, ram_size, machine->initrd_filename, BINARY_DEVICE_TREE_FILE, NULL); }"} {"target": 1, "idx": 5157, "func": "static ssize_t flush_buf(VirtIOSerialPort *port, const uint8_t *buf, ssize_t len) { VirtConsole *vcon = VIRTIO_CONSOLE(port); ssize_t ret; if (!vcon->chr) { /* If there's no backend, we can just say we consumed all data. */ return len; } ret = qemu_chr_fe_write(vcon->chr, buf, len); trace_virtio_console_flush_buf(port->id, len, ret); if (ret < len) { VirtIOSerialPortClass *k = VIRTIO_SERIAL_PORT_GET_CLASS(port); /* * Ideally we'd get a better error code than just -1, but * that's what the chardev interface gives us right now. If * we had a finer-grained message, like -EPIPE, we could close * this connection. if (ret < 0) ret = 0; if (!k->is_console) { virtio_serial_throttle_port(port, true); if (!vcon->watch) { vcon->watch = qemu_chr_fe_add_watch(vcon->chr, G_IO_OUT|G_IO_HUP, chr_write_unblocked, vcon); } } } return ret; }"} {"target": 1, "idx": 5158, "func": "static void tcp_chr_read(void *opaque) { CharDriverState *chr = opaque; TCPCharDriver *s = chr->opaque; uint8_t buf[READ_BUF_LEN]; int len, size; if (!s->connected || s->max_size <= 0) return; len = sizeof(buf); if (len > s->max_size) len = s->max_size; size = tcp_chr_recv(chr, (void *)buf, len); if (size == 0) { /* connection closed */ s->connected = 0; if (s->listen_fd >= 0) { qemu_set_fd_handler(s->listen_fd, tcp_chr_accept, NULL, chr); } qemu_set_fd_handler(s->fd, NULL, NULL, NULL); closesocket(s->fd); s->fd = -1; qemu_chr_event(chr, CHR_EVENT_CLOSED); } else if (size > 0) { if (s->do_telnetopt) tcp_chr_process_IAC_bytes(chr, s, buf, &size); if (size > 0) qemu_chr_read(chr, buf, size); if (s->msgfd != -1) { close(s->msgfd); s->msgfd = -1; } } }"} {"target": 1, "idx": 5176, "func": "static int hwmap_filter_frame(AVFilterLink *link, AVFrame *input) { AVFilterContext *avctx = link->dst; AVFilterLink *outlink = avctx->outputs[0]; HWMapContext *ctx = avctx->priv; AVFrame *map = NULL; int err; av_log(ctx, AV_LOG_DEBUG, \"Filter input: %s, %ux%u (%\"PRId64\").\\n\", av_get_pix_fmt_name(input->format), input->width, input->height, input->pts); map = av_frame_alloc(); if (!map) { err = AVERROR(ENOMEM); goto fail; } map->format = outlink->format; map->hw_frames_ctx = av_buffer_ref(ctx->hwframes_ref); if (!map->hw_frames_ctx) { err = AVERROR(ENOMEM); goto fail; } if (ctx->map_backwards && !input->hw_frames_ctx) { // If we mapped backwards from hardware to software, we need // to attach the hardware frame context to the input frame to // make the mapping visible to av_hwframe_map(). input->hw_frames_ctx = av_buffer_ref(ctx->hwframes_ref); if (!input->hw_frames_ctx) { err = AVERROR(ENOMEM); goto fail; } } err = av_hwframe_map(map, input, ctx->mode); if (err < 0) { av_log(avctx, AV_LOG_ERROR, \"Failed to map frame: %d.\\n\", err); goto fail; } err = av_frame_copy_props(map, input); if (err < 0) goto fail; av_frame_free(&input); av_log(ctx, AV_LOG_DEBUG, \"Filter output: %s, %ux%u (%\"PRId64\").\\n\", av_get_pix_fmt_name(map->format), map->width, map->height, map->pts); return ff_filter_frame(outlink, map); fail: av_frame_free(&input); av_frame_free(&map); return err; }"} {"target": 1, "idx": 5177, "func": "void virtio_panic(const char *string) { sclp_print(string); disabled_wait(); while (1) { } }"} {"target": 1, "idx": 5178, "func": "static int opus_decode_packet(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { OpusContext *c = avctx->priv_data; AVFrame *frame = data; const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; int coded_samples = 0; int decoded_samples = 0; int i, ret; for (i = 0; i < c->nb_streams; i++) { OpusStreamContext *s = &c->streams[i]; s->out[0] = s->out[1] = NULL; } /* decode the header of the first sub-packet to find out the sample count */ if (buf) { OpusPacket *pkt = &c->streams[0].packet; ret = ff_opus_parse_packet(pkt, buf, buf_size, c->nb_streams > 1); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, \"Error parsing the packet header.\\n\"); return ret; } coded_samples += pkt->frame_count * pkt->frame_duration; c->streams[0].silk_samplerate = get_silk_samplerate(pkt->config); } frame->nb_samples = coded_samples + c->streams[0].delayed_samples; /* no input or buffered data => nothing to do */ if (!frame->nb_samples) { *got_frame_ptr = 0; return 0; } /* setup the data buffers */ ret = ff_get_buffer(avctx, frame, 0); if (ret < 0) return ret; frame->nb_samples = 0; for (i = 0; i < avctx->channels; i++) { ChannelMap *map = &c->channel_maps[i]; if (!map->copy) c->streams[map->stream_idx].out[map->channel_idx] = (float*)frame->extended_data[i]; } for (i = 0; i < c->nb_streams; i++) c->streams[i].out_size = frame->linesize[0]; /* decode each sub-packet */ for (i = 0; i < c->nb_streams; i++) { OpusStreamContext *s = &c->streams[i]; if (i && buf) { ret = ff_opus_parse_packet(&s->packet, buf, buf_size, i != c->nb_streams - 1); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, \"Error parsing the packet header.\\n\"); return ret; } if (coded_samples != s->packet.frame_count * s->packet.frame_duration) { av_log(avctx, AV_LOG_ERROR, \"Mismatching coded sample count in substream %d.\\n\", i); return AVERROR_INVALIDDATA; } s->silk_samplerate = get_silk_samplerate(s->packet.config); } ret = opus_decode_subpacket(&c->streams[i], buf, s->packet.data_size, coded_samples); if (ret < 0) return ret; if (decoded_samples && ret != decoded_samples) { av_log(avctx, AV_LOG_ERROR, \"Different numbers of decoded samples \" \"in a multi-channel stream\\n\"); return AVERROR_INVALIDDATA; } decoded_samples = ret; buf += s->packet.packet_size; buf_size -= s->packet.packet_size; } for (i = 0; i < avctx->channels; i++) { ChannelMap *map = &c->channel_maps[i]; /* handle copied channels */ if (map->copy) { memcpy(frame->extended_data[i], frame->extended_data[map->copy_idx], frame->linesize[0]); } else if (map->silence) { memset(frame->extended_data[i], 0, frame->linesize[0]); } if (c->gain_i) { c->fdsp->vector_fmul_scalar((float*)frame->extended_data[i], (float*)frame->extended_data[i], c->gain, FFALIGN(decoded_samples, 8)); } } frame->nb_samples = decoded_samples; *got_frame_ptr = !!decoded_samples; return avpkt->size; }"} {"target": 1, "idx": 5180, "func": "bool migrate_zero_blocks(void) { MigrationState *s; s = migrate_get_current(); return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS]; }"} {"target": 0, "idx": 5195, "func": "static void av_always_inline filter_mb_dir(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize, int mb_xy, int mb_type, int mvy_limit, int first_vertical_edge_done, int dir) { MpegEncContext * const s = &h->s; int edge; const int mbm_xy = dir == 0 ? mb_xy -1 : h->top_mb_xy; const int mbm_type = s->current_picture.mb_type[mbm_xy]; int (*ref2frm) [64] = h->ref2frm[ h->slice_num &(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2); int (*ref2frmm)[64] = h->ref2frm[ h->slice_table[mbm_xy]&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2); int start = h->slice_table[mbm_xy] == 0xFFFF ? 1 : 0; const int edges = (mb_type & (MB_TYPE_16x16|MB_TYPE_SKIP)) == (MB_TYPE_16x16|MB_TYPE_SKIP) ? 1 : 4; // how often to recheck mv-based bS when iterating between edges const int mask_edge = (mb_type & (MB_TYPE_16x16 | (MB_TYPE_16x8 << dir))) ? 3 : (mb_type & (MB_TYPE_8x16 >> dir)) ? 1 : 0; // how often to recheck mv-based bS when iterating along each edge const int mask_par0 = mb_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir)); if (first_vertical_edge_done) { start = 1; } if (h->deblocking_filter==2 && h->slice_table[mbm_xy] != h->slice_table[mb_xy]) start = 1; if (FRAME_MBAFF && (dir == 1) && ((mb_y&1) == 0) && start == 0 && !IS_INTERLACED(mb_type) && IS_INTERLACED(mbm_type) ) { // This is a special case in the norm where the filtering must // be done twice (one each of the field) even if we are in a // frame macroblock. // static const int nnz_idx[4] = {4,5,6,3}; unsigned int tmp_linesize = 2 * linesize; unsigned int tmp_uvlinesize = 2 * uvlinesize; int mbn_xy = mb_xy - 2 * s->mb_stride; int qp; int i, j; int16_t bS[4]; for(j=0; j<2; j++, mbn_xy += s->mb_stride){ if( IS_INTRA(mb_type) || IS_INTRA(s->current_picture.mb_type[mbn_xy]) ) { bS[0] = bS[1] = bS[2] = bS[3] = 3; } else { const uint8_t *mbn_nnz = h->non_zero_count[mbn_xy]; for( i = 0; i < 4; i++ ) { if( h->non_zero_count_cache[scan8[0]+i] != 0 || mbn_nnz[nnz_idx[i]] != 0 ) bS[i] = 2; else bS[i] = 1; } } // Do not use s->qscale as luma quantizer because it has not the same // value in IPCM macroblocks. qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbn_xy] + 1 ) >> 1; tprintf(s->avctx, \"filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d\", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize); { int i; for (i = 0; i < 4; i++) tprintf(s->avctx, \" bS[%d]:%d\", i, bS[i]); tprintf(s->avctx, \"\\n\"); } filter_mb_edgeh( h, &img_y[j*linesize], tmp_linesize, bS, qp ); filter_mb_edgech( h, &img_cb[j*uvlinesize], tmp_uvlinesize, bS, ( h->chroma_qp[0] + get_chroma_qp( h, 0, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1); filter_mb_edgech( h, &img_cr[j*uvlinesize], tmp_uvlinesize, bS, ( h->chroma_qp[1] + get_chroma_qp( h, 1, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1); } start = 1; } /* Calculate bS */ for( edge = start; edge < edges; edge++ ) { /* mbn_xy: neighbor macroblock */ const int mbn_xy = edge > 0 ? mb_xy : mbm_xy; const int mbn_type = s->current_picture.mb_type[mbn_xy]; int (*ref2frmn)[64] = edge > 0 ? ref2frm : ref2frmm; int16_t bS[4]; int qp; if( (edge&1) && IS_8x8DCT(mb_type) ) continue; if( IS_INTRA(mb_type) || IS_INTRA(mbn_type) ) { int value; if (edge == 0) { if ( (!IS_INTERLACED(mb_type) && !IS_INTERLACED(mbm_type)) || ((FRAME_MBAFF || (s->picture_structure != PICT_FRAME)) && (dir == 0)) ) { value = 4; } else { value = 3; } } else { value = 3; } bS[0] = bS[1] = bS[2] = bS[3] = value; } else { int i, l; int mv_done; if( edge & mask_edge ) { bS[0] = bS[1] = bS[2] = bS[3] = 0; mv_done = 1; } else if( FRAME_MBAFF && IS_INTERLACED(mb_type ^ mbn_type)) { bS[0] = bS[1] = bS[2] = bS[3] = 1; mv_done = 1; } else if( mask_par0 && (edge || (mbn_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir)))) ) { int b_idx= 8 + 4 + edge * (dir ? 8:1); int bn_idx= b_idx - (dir ? 8:1); int v = 0; for( l = 0; !v && l < 1 + (h->slice_type_nos == FF_B_TYPE); l++ ) { v |= ref2frm[l][h->ref_cache[l][b_idx]] != ref2frmn[l][h->ref_cache[l][bn_idx]] || FFABS( h->mv_cache[l][b_idx][0] - h->mv_cache[l][bn_idx][0] ) >= 4 || FFABS( h->mv_cache[l][b_idx][1] - h->mv_cache[l][bn_idx][1] ) >= mvy_limit; } if(h->slice_type_nos == FF_B_TYPE && v){ v=0; for( l = 0; !v && l < 2; l++ ) { int ln= 1-l; v |= ref2frm[l][h->ref_cache[l][b_idx]] != ref2frmn[ln][h->ref_cache[ln][bn_idx]] || FFABS( h->mv_cache[l][b_idx][0] - h->mv_cache[ln][bn_idx][0] ) >= 4 || FFABS( h->mv_cache[l][b_idx][1] - h->mv_cache[ln][bn_idx][1] ) >= mvy_limit; } } bS[0] = bS[1] = bS[2] = bS[3] = v; mv_done = 1; } else mv_done = 0; for( i = 0; i < 4; i++ ) { int x = dir == 0 ? edge : i; int y = dir == 0 ? i : edge; int b_idx= 8 + 4 + x + 8*y; int bn_idx= b_idx - (dir ? 8:1); if( h->non_zero_count_cache[b_idx] != 0 || h->non_zero_count_cache[bn_idx] != 0 ) { bS[i] = 2; } else if(!mv_done) { bS[i] = 0; for( l = 0; l < 1 + (h->slice_type_nos == FF_B_TYPE); l++ ) { if( ref2frm[l][h->ref_cache[l][b_idx]] != ref2frmn[l][h->ref_cache[l][bn_idx]] || FFABS( h->mv_cache[l][b_idx][0] - h->mv_cache[l][bn_idx][0] ) >= 4 || FFABS( h->mv_cache[l][b_idx][1] - h->mv_cache[l][bn_idx][1] ) >= mvy_limit ) { bS[i] = 1; break; } } if(h->slice_type_nos == FF_B_TYPE && bS[i]){ bS[i] = 0; for( l = 0; l < 2; l++ ) { int ln= 1-l; if( ref2frm[l][h->ref_cache[l][b_idx]] != ref2frmn[ln][h->ref_cache[ln][bn_idx]] || FFABS( h->mv_cache[l][b_idx][0] - h->mv_cache[ln][bn_idx][0] ) >= 4 || FFABS( h->mv_cache[l][b_idx][1] - h->mv_cache[ln][bn_idx][1] ) >= mvy_limit ) { bS[i] = 1; break; } } } } } if(bS[0]+bS[1]+bS[2]+bS[3] == 0) continue; } /* Filter edge */ // Do not use s->qscale as luma quantizer because it has not the same // value in IPCM macroblocks. qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbn_xy] + 1 ) >> 1; //tprintf(s->avctx, \"filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\\n\", mb_x, mb_y, dir, edge, qp, h->chroma_qp, s->current_picture.qscale_table[mbn_xy]); tprintf(s->avctx, \"filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d\", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize); { int i; for (i = 0; i < 4; i++) tprintf(s->avctx, \" bS[%d]:%d\", i, bS[i]); tprintf(s->avctx, \"\\n\"); } if( dir == 0 ) { filter_mb_edgev( h, &img_y[4*edge], linesize, bS, qp ); if( (edge&1) == 0 ) { filter_mb_edgecv( h, &img_cb[2*edge], uvlinesize, bS, ( h->chroma_qp[0] + get_chroma_qp( h, 0, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1); filter_mb_edgecv( h, &img_cr[2*edge], uvlinesize, bS, ( h->chroma_qp[1] + get_chroma_qp( h, 1, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1); } } else { filter_mb_edgeh( h, &img_y[4*edge*linesize], linesize, bS, qp ); if( (edge&1) == 0 ) { filter_mb_edgech( h, &img_cb[2*edge*uvlinesize], uvlinesize, bS, ( h->chroma_qp[0] + get_chroma_qp( h, 0, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1); filter_mb_edgech( h, &img_cr[2*edge*uvlinesize], uvlinesize, bS, ( h->chroma_qp[1] + get_chroma_qp( h, 1, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1); } } } }"} {"target": 0, "idx": 5197, "func": "static int ehci_state_executing(EHCIQueue *q) { EHCIPacket *p = QTAILQ_FIRST(&q->packets); int again = 0; assert(p != NULL); assert(p->qtdaddr == q->qtdaddr); ehci_execute_complete(q); if (p->usb_status == USB_RET_ASYNC) { goto out; } if (p->usb_status == USB_RET_PROCERR) { again = -1; goto out; } // 4.10.3 if (!q->async) { int transactCtr = get_field(q->qh.epcap, QH_EPCAP_MULT); transactCtr--; set_field(&q->qh.epcap, transactCtr, QH_EPCAP_MULT); // 4.10.3, bottom of page 82, should exit this state when transaction // counter decrements to 0 } /* 4.10.5 */ if (p->usb_status == USB_RET_NAK) { ehci_set_state(q->ehci, q->async, EST_HORIZONTALQH); } else { ehci_set_state(q->ehci, q->async, EST_WRITEBACK); } again = 1; out: ehci_flush_qh(q); return again; }"} {"target": 0, "idx": 5230, "func": "static void qemu_spice_create_update(SimpleSpiceDisplay *ssd) { static const int blksize = 32; int blocks = (surface_width(ssd->ds) + blksize - 1) / blksize; int dirty_top[blocks]; int y, yoff, x, xoff, blk, bw; int bpp = surface_bytes_per_pixel(ssd->ds); uint8_t *guest, *mirror; if (qemu_spice_rect_is_empty(&ssd->dirty)) { return; }; for (blk = 0; blk < blocks; blk++) { dirty_top[blk] = -1; } guest = surface_data(ssd->ds); mirror = (void *)pixman_image_get_data(ssd->mirror); for (y = ssd->dirty.top; y < ssd->dirty.bottom; y++) { yoff = y * surface_stride(ssd->ds); for (x = ssd->dirty.left; x < ssd->dirty.right; x += blksize) { xoff = x * bpp; blk = x / blksize; bw = MIN(blksize, ssd->dirty.right - x); if (memcmp(guest + yoff + xoff, mirror + yoff + xoff, bw * bpp) == 0) { if (dirty_top[blk] != -1) { QXLRect update = { .top = dirty_top[blk], .bottom = y, .left = x, .right = x + bw, }; qemu_spice_create_one_update(ssd, &update); dirty_top[blk] = -1; } } else { if (dirty_top[blk] == -1) { dirty_top[blk] = y; } } } } for (x = ssd->dirty.left; x < ssd->dirty.right; x += blksize) { blk = x / blksize; bw = MIN(blksize, ssd->dirty.right - x); if (dirty_top[blk] != -1) { QXLRect update = { .top = dirty_top[blk], .bottom = ssd->dirty.bottom, .left = x, .right = x + bw, }; qemu_spice_create_one_update(ssd, &update); dirty_top[blk] = -1; } } memset(&ssd->dirty, 0, sizeof(ssd->dirty)); }"} {"target": 0, "idx": 5239, "func": "void tcp_connect(struct socket *inso) { Slirp *slirp = inso->slirp; struct socket *so; struct sockaddr_in addr; socklen_t addrlen = sizeof(struct sockaddr_in); struct tcpcb *tp; int s, opt; DEBUG_CALL(\"tcp_connect\"); DEBUG_ARG(\"inso = %p\", inso); /* * If it's an SS_ACCEPTONCE socket, no need to socreate() * another socket, just use the accept() socket. */ if (inso->so_state & SS_FACCEPTONCE) { /* FACCEPTONCE already have a tcpcb */ so = inso; } else { so = socreate(slirp); if (so == NULL) { /* If it failed, get rid of the pending connection */ closesocket(accept(inso->s, (struct sockaddr *)&addr, &addrlen)); return; } if (tcp_attach(so) < 0) { free(so); /* NOT sofree */ return; } so->so_lfamily = AF_INET; so->so_laddr = inso->so_laddr; so->so_lport = inso->so_lport; } tcp_mss(sototcpcb(so), 0); s = accept(inso->s, (struct sockaddr *)&addr, &addrlen); if (s < 0) { tcp_close(sototcpcb(so)); /* This will sofree() as well */ return; } qemu_set_nonblock(s); socket_set_fast_reuse(s); opt = 1; qemu_setsockopt(s, SOL_SOCKET, SO_OOBINLINE, &opt, sizeof(int)); socket_set_nodelay(s); so->so_ffamily = AF_INET; so->so_fport = addr.sin_port; so->so_faddr = addr.sin_addr; /* Translate connections from localhost to the real hostname */ if (so->so_faddr.s_addr == 0 || (so->so_faddr.s_addr & loopback_mask) == (loopback_addr.s_addr & loopback_mask)) { so->so_faddr = slirp->vhost_addr; } /* Close the accept() socket, set right state */ if (inso->so_state & SS_FACCEPTONCE) { /* If we only accept once, close the accept() socket */ closesocket(so->s); /* Don't select it yet, even though we have an FD */ /* if it's not FACCEPTONCE, it's already NOFDREF */ so->so_state = SS_NOFDREF; } so->s = s; so->so_state |= SS_INCOMING; so->so_iptos = tcp_tos(so); tp = sototcpcb(so); tcp_template(tp); tp->t_state = TCPS_SYN_SENT; tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT; tp->iss = slirp->tcp_iss; slirp->tcp_iss += TCP_ISSINCR/2; tcp_sendseqinit(tp); tcp_output(tp); }"} {"target": 0, "idx": 5248, "func": "static int vorbis_parse_setup_hdr_modes(vorbis_context *vc) { GetBitContext *gb=&vc->gb; uint_fast8_t i; vc->mode_count=get_bits(gb, 6)+1; vc->modes=(vorbis_mode *)av_mallocz(vc->mode_count * sizeof(vorbis_mode)); AV_DEBUG(\" There are %d modes.\\n\", vc->mode_count); for(i=0;imode_count;++i) { vorbis_mode *mode_setup=&vc->modes[i]; mode_setup->blockflag=get_bits1(gb); mode_setup->windowtype=get_bits(gb, 16); //FIXME check mode_setup->transformtype=get_bits(gb, 16); //FIXME check mode_setup->mapping=get_bits(gb, 8); //FIXME check AV_DEBUG(\" %d mode: blockflag %d, windowtype %d, transformtype %d, mapping %d \\n\", i, mode_setup->blockflag, mode_setup->windowtype, mode_setup->transformtype, mode_setup->mapping); } return 0; }"} {"target": 0, "idx": 5252, "func": "static void check_exception(sPAPREnvironment *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { uint32_t mask, buf, len; uint64_t xinfo; if ((nargs < 6) || (nargs > 7) || nret != 1) { rtas_st(rets, 0, -3); return; } xinfo = rtas_ld(args, 1); mask = rtas_ld(args, 2); buf = rtas_ld(args, 4); len = rtas_ld(args, 5); if (nargs == 7) { xinfo |= (uint64_t)rtas_ld(args, 6) << 32; } if ((mask & EVENT_MASK_EPOW) && pending_epow) { if (sizeof(*pending_epow) < len) { len = sizeof(*pending_epow); } cpu_physical_memory_write(buf, pending_epow, len); g_free(pending_epow); pending_epow = NULL; rtas_st(rets, 0, 0); } else { rtas_st(rets, 0, 1); } }"} {"target": 1, "idx": 5255, "func": "static void nvdimm_build_nfit(GSList *device_list, GArray *table_offsets, GArray *table_data, BIOSLinker *linker) { GArray *structures = nvdimm_build_device_structure(device_list); unsigned int header; acpi_add_table(table_offsets, table_data); /* NFIT header. */ header = table_data->len; acpi_data_push(table_data, sizeof(NvdimmNfitHeader)); /* NVDIMM device structures. */ g_array_append_vals(table_data, structures->data, structures->len); build_header(linker, table_data, (void *)(table_data->data + header), \"NFIT\", sizeof(NvdimmNfitHeader) + structures->len, 1, NULL, NULL); g_array_free(structures, true); }"} {"target": 0, "idx": 5264, "func": "const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_length, int *consumed, int length) { int i, si, di; uint8_t *dst; int bufidx; // src[0]&0x80; // forbidden bit h->nal_ref_idc = src[0] >> 5; h->nal_unit_type = src[0] & 0x1F; src++; length--; #define STARTCODE_TEST \\ if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \\ if (src[i + 2] != 3) { \\ /* startcode, so we must be past the end */ \\ length = i; \\ } \\ break; \\ } #if HAVE_FAST_UNALIGNED #define FIND_FIRST_ZERO \\ if (i > 0 && !src[i]) \\ i--; \\ while (src[i]) \\ i++ #if HAVE_FAST_64BIT for (i = 0; i + 1 < length; i += 9) { if (!((~AV_RN64A(src + i) & (AV_RN64A(src + i) - 0x0100010001000101ULL)) & 0x8000800080008080ULL)) continue; FIND_FIRST_ZERO; STARTCODE_TEST; i -= 7; } #else for (i = 0; i + 1 < length; i += 5) { if (!((~AV_RN32A(src + i) & (AV_RN32A(src + i) - 0x01000101U)) & 0x80008080U)) continue; FIND_FIRST_ZERO; STARTCODE_TEST; i -= 3; } #endif #else for (i = 0; i + 1 < length; i += 2) { if (src[i]) continue; if (i > 0 && src[i - 1] == 0) i--; STARTCODE_TEST; } #endif if (i >= length - 1) { // no escaped 0 *dst_length = length; *consumed = length + 1; // +1 for the header return src; } // use second escape buffer for inter data bufidx = h->nal_unit_type == NAL_DPC ? 1 : 0; av_fast_malloc(&h->rbsp_buffer[bufidx], &h->rbsp_buffer_size[bufidx], length + FF_INPUT_BUFFER_PADDING_SIZE); dst = h->rbsp_buffer[bufidx]; if (dst == NULL) return NULL; memcpy(dst, src, i); si = di = i; while (si + 2 < length) { // remove escapes (very rare 1:2^22) if (src[si + 2] > 3) { dst[di++] = src[si++]; dst[di++] = src[si++]; } else if (src[si] == 0 && src[si + 1] == 0) { if (src[si + 2] == 3) { // escape dst[di++] = 0; dst[di++] = 0; si += 3; continue; } else // next start code goto nsc; } dst[di++] = src[si++]; } while (si < length) dst[di++] = src[si++]; nsc: memset(dst + di, 0, FF_INPUT_BUFFER_PADDING_SIZE); *dst_length = di; *consumed = si + 1; // +1 for the header /* FIXME store exact number of bits in the getbitcontext * (it is needed for decoding) */ return dst; }"} {"target": 1, "idx": 5275, "func": "static coroutine_fn int qcow2_co_writev(BlockDriverState *bs, int64_t sector_num, int remaining_sectors, QEMUIOVector *qiov) { BDRVQcowState *s = bs->opaque; int index_in_cluster; int ret; int cur_nr_sectors; /* number of sectors in current iteration */ uint64_t cluster_offset; QEMUIOVector hd_qiov; uint64_t bytes_done = 0; uint8_t *cluster_data = NULL; QCowL2Meta *l2meta = NULL; trace_qcow2_writev_start_req(qemu_coroutine_self(), sector_num, remaining_sectors); qemu_iovec_init(&hd_qiov, qiov->niov); s->cluster_cache_offset = -1; /* disable compressed cache */ qemu_co_mutex_lock(&s->lock); while (remaining_sectors != 0) { l2meta = NULL; trace_qcow2_writev_start_part(qemu_coroutine_self()); index_in_cluster = sector_num & (s->cluster_sectors - 1); cur_nr_sectors = remaining_sectors; if (s->crypt_method && cur_nr_sectors > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors - index_in_cluster) { cur_nr_sectors = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors - index_in_cluster; } ret = qcow2_alloc_cluster_offset(bs, sector_num << 9, &cur_nr_sectors, &cluster_offset, &l2meta); if (ret < 0) { goto fail; } assert((cluster_offset & 511) == 0); qemu_iovec_reset(&hd_qiov); qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_nr_sectors * 512); if (s->crypt_method) { if (!cluster_data) { cluster_data = qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); } assert(hd_qiov.size <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); qemu_iovec_to_buf(&hd_qiov, 0, cluster_data, hd_qiov.size); qcow2_encrypt_sectors(s, sector_num, cluster_data, cluster_data, cur_nr_sectors, 1, &s->aes_encrypt_key); qemu_iovec_reset(&hd_qiov); qemu_iovec_add(&hd_qiov, cluster_data, cur_nr_sectors * 512); } ret = qcow2_pre_write_overlap_check(bs, 0, cluster_offset + index_in_cluster * BDRV_SECTOR_SIZE, cur_nr_sectors * BDRV_SECTOR_SIZE); if (ret < 0) { goto fail; } qemu_co_mutex_unlock(&s->lock); BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); trace_qcow2_writev_data(qemu_coroutine_self(), (cluster_offset >> 9) + index_in_cluster); ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + index_in_cluster, cur_nr_sectors, &hd_qiov); qemu_co_mutex_lock(&s->lock); if (ret < 0) { goto fail; } while (l2meta != NULL) { QCowL2Meta *next; ret = qcow2_alloc_cluster_link_l2(bs, l2meta); if (ret < 0) { goto fail; } /* Take the request off the list of running requests */ if (l2meta->nb_clusters != 0) { QLIST_REMOVE(l2meta, next_in_flight); } qemu_co_queue_restart_all(&l2meta->dependent_requests); next = l2meta->next; g_free(l2meta); l2meta = next; } remaining_sectors -= cur_nr_sectors; sector_num += cur_nr_sectors; bytes_done += cur_nr_sectors * 512; trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_nr_sectors); } ret = 0; fail: qemu_co_mutex_unlock(&s->lock); while (l2meta != NULL) { QCowL2Meta *next; if (l2meta->nb_clusters != 0) { QLIST_REMOVE(l2meta, next_in_flight); } qemu_co_queue_restart_all(&l2meta->dependent_requests); next = l2meta->next; g_free(l2meta); l2meta = next; } qemu_iovec_destroy(&hd_qiov); qemu_vfree(cluster_data); trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); return ret; }"} {"target": 0, "idx": 5289, "func": "do_send_recv(int sockfd, struct iovec *iov, unsigned iov_cnt, bool do_send) { #if defined CONFIG_IOVEC && defined CONFIG_POSIX ssize_t ret; struct msghdr msg; memset(&msg, 0, sizeof(msg)); msg.msg_iov = iov; msg.msg_iovlen = iov_cnt; do { ret = do_send ? sendmsg(sockfd, &msg, 0) : recvmsg(sockfd, &msg, 0); } while (ret < 0 && errno == EINTR); return ret; #else /* else send piece-by-piece */ /*XXX Note: windows has WSASend() and WSARecv() */ unsigned i = 0; ssize_t ret = 0; while (i < iov_cnt) { ssize_t r = do_send ? send(sockfd, iov[i].iov_base, iov[i].iov_len, 0) : recv(sockfd, iov[i].iov_base, iov[i].iov_len, 0); if (r > 0) { ret += r; } else if (!r) { break; } else if (errno == EINTR) { continue; } else { /* else it is some \"other\" error, * only return if there was no data processed. */ if (ret == 0) { ret = -1; } break; } i++; } return ret; #endif }"} {"target": 0, "idx": 5290, "func": "START_TEST(keyword_literal) { QObject *obj; QBool *qbool; QString *str; obj = qobject_from_json(\"true\"); fail_unless(obj != NULL); fail_unless(qobject_type(obj) == QTYPE_QBOOL); qbool = qobject_to_qbool(obj); fail_unless(qbool_get_int(qbool) != 0); str = qobject_to_json(obj); fail_unless(strcmp(qstring_get_str(str), \"true\") == 0); QDECREF(str); QDECREF(qbool); obj = qobject_from_json(\"false\"); fail_unless(obj != NULL); fail_unless(qobject_type(obj) == QTYPE_QBOOL); qbool = qobject_to_qbool(obj); fail_unless(qbool_get_int(qbool) == 0); str = qobject_to_json(obj); fail_unless(strcmp(qstring_get_str(str), \"false\") == 0); QDECREF(str); QDECREF(qbool); obj = qobject_from_jsonf(\"%i\", false); fail_unless(obj != NULL); fail_unless(qobject_type(obj) == QTYPE_QBOOL); qbool = qobject_to_qbool(obj); fail_unless(qbool_get_int(qbool) == 0); QDECREF(qbool); obj = qobject_from_jsonf(\"%i\", true); fail_unless(obj != NULL); fail_unless(qobject_type(obj) == QTYPE_QBOOL); qbool = qobject_to_qbool(obj); fail_unless(qbool_get_int(qbool) != 0); QDECREF(qbool); }"} {"target": 0, "idx": 5292, "func": "static int handle_sigp_single_dst(S390CPU *dst_cpu, uint8_t order, uint64_t param, uint64_t *status_reg) { SigpInfo si = { .param = param, .status_reg = status_reg, }; /* cpu available? */ if (dst_cpu == NULL) { return SIGP_CC_NOT_OPERATIONAL; } /* only resets can break pending orders */ if (dst_cpu->env.sigp_order != 0 && order != SIGP_CPU_RESET && order != SIGP_INITIAL_CPU_RESET) { return SIGP_CC_BUSY; } switch (order) { case SIGP_START: run_on_cpu(CPU(dst_cpu), sigp_start, RUN_ON_CPU_HOST_PTR(&si)); break; case SIGP_STOP: run_on_cpu(CPU(dst_cpu), sigp_stop, RUN_ON_CPU_HOST_PTR(&si)); break; case SIGP_RESTART: run_on_cpu(CPU(dst_cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si)); break; case SIGP_STOP_STORE_STATUS: run_on_cpu(CPU(dst_cpu), sigp_stop_and_store_status, RUN_ON_CPU_HOST_PTR(&si)); break; case SIGP_STORE_STATUS_ADDR: run_on_cpu(CPU(dst_cpu), sigp_store_status_at_address, RUN_ON_CPU_HOST_PTR(&si)); break; case SIGP_STORE_ADTL_STATUS: run_on_cpu(CPU(dst_cpu), sigp_store_adtl_status, RUN_ON_CPU_HOST_PTR(&si)); break; case SIGP_SET_PREFIX: run_on_cpu(CPU(dst_cpu), sigp_set_prefix, RUN_ON_CPU_HOST_PTR(&si)); break; case SIGP_INITIAL_CPU_RESET: run_on_cpu(CPU(dst_cpu), sigp_initial_cpu_reset, RUN_ON_CPU_HOST_PTR(&si)); break; case SIGP_CPU_RESET: run_on_cpu(CPU(dst_cpu), sigp_cpu_reset, RUN_ON_CPU_HOST_PTR(&si)); break; default: set_sigp_status(&si, SIGP_STAT_INVALID_ORDER); } return si.cc; }"} {"target": 0, "idx": 5333, "func": "static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics, GetBitContext *gb) { if (get_bits1(gb)) { av_log(ac->avctx, AV_LOG_ERROR, \"Reserved bit set.\\n\"); return AVERROR_INVALIDDATA; } ics->window_sequence[1] = ics->window_sequence[0]; ics->window_sequence[0] = get_bits(gb, 2); ics->use_kb_window[1] = ics->use_kb_window[0]; ics->use_kb_window[0] = get_bits1(gb); ics->num_window_groups = 1; ics->group_len[0] = 1; if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) { int i; ics->max_sfb = get_bits(gb, 4); for (i = 0; i < 7; i++) { if (get_bits1(gb)) { ics->group_len[ics->num_window_groups - 1]++; } else { ics->num_window_groups++; ics->group_len[ics->num_window_groups - 1] = 1; } } ics->num_windows = 8; ics->swb_offset = ff_swb_offset_128[ac->m4ac.sampling_index]; ics->num_swb = ff_aac_num_swb_128[ac->m4ac.sampling_index]; ics->tns_max_bands = ff_tns_max_bands_128[ac->m4ac.sampling_index]; ics->predictor_present = 0; } else { ics->max_sfb = get_bits(gb, 6); ics->num_windows = 1; ics->swb_offset = ff_swb_offset_1024[ac->m4ac.sampling_index]; ics->num_swb = ff_aac_num_swb_1024[ac->m4ac.sampling_index]; ics->tns_max_bands = ff_tns_max_bands_1024[ac->m4ac.sampling_index]; ics->predictor_present = get_bits1(gb); ics->predictor_reset_group = 0; if (ics->predictor_present) { if (ac->m4ac.object_type == AOT_AAC_MAIN) { if (decode_prediction(ac, ics, gb)) { return AVERROR_INVALIDDATA; } } else if (ac->m4ac.object_type == AOT_AAC_LC) { av_log(ac->avctx, AV_LOG_ERROR, \"Prediction is not allowed in AAC-LC.\\n\"); return AVERROR_INVALIDDATA; } else { if ((ics->ltp.present = get_bits(gb, 1))) decode_ltp(ac, &ics->ltp, gb, ics->max_sfb); } } } if (ics->max_sfb > ics->num_swb) { av_log(ac->avctx, AV_LOG_ERROR, \"Number of scalefactor bands in group (%d) exceeds limit (%d).\\n\", ics->max_sfb, ics->num_swb); return AVERROR_INVALIDDATA; } return 0; }"} {"target": 1, "idx": 5359, "func": "long do_rt_sigreturn(CPUAlphaState *env) { abi_ulong frame_addr = env->ir[IR_A0]; struct target_rt_sigframe *frame; sigset_t set; if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { goto badframe; } target_to_host_sigset(&set, &frame->uc.tuc_sigmask); do_sigprocmask(SIG_SETMASK, &set, NULL); if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { goto badframe; } if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, uc.tuc_stack), 0, env->ir[IR_SP]) == -EFAULT) { goto badframe; } unlock_user_struct(frame, frame_addr, 0); return env->ir[IR_V0]; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); }"} {"target": 1, "idx": 5363, "func": "static int mov_read_udta_string(MOVContext *c, AVIOContext *pb, MOVAtom atom) { char tmp_key[5]; char key2[32], language[4] = {0}; char *str = NULL; const char *key = NULL; uint16_t langcode = 0; uint32_t data_type = 0, str_size, str_size_alloc; int (*parse)(MOVContext*, AVIOContext*, unsigned, const char*) = NULL; int raw = 0; int num = 0; switch (atom.type) { case MKTAG( '@','P','R','M'): key = \"premiere_version\"; raw = 1; break; case MKTAG( '@','P','R','Q'): key = \"quicktime_version\"; raw = 1; break; case MKTAG( 'X','M','P','_'): if (c->export_xmp) { key = \"xmp\"; raw = 1; } break; case MKTAG( 'a','A','R','T'): key = \"album_artist\"; break; case MKTAG( 'a','k','I','D'): key = \"account_type\"; parse = mov_metadata_int8_no_padding; break; case MKTAG( 'a','p','I','D'): key = \"account_id\"; break; case MKTAG( 'c','a','t','g'): key = \"category\"; break; case MKTAG( 'c','p','i','l'): key = \"compilation\"; parse = mov_metadata_int8_no_padding; break; case MKTAG( 'c','p','r','t'): key = \"copyright\"; break; case MKTAG( 'd','e','s','c'): key = \"description\"; break; case MKTAG( 'd','i','s','k'): key = \"disc\"; parse = mov_metadata_track_or_disc_number; break; case MKTAG( 'e','g','i','d'): key = \"episode_uid\"; parse = mov_metadata_int8_no_padding; break; case MKTAG( 'g','n','r','e'): key = \"genre\"; parse = mov_metadata_gnre; break; case MKTAG( 'h','d','v','d'): key = \"hd_video\"; parse = mov_metadata_int8_no_padding; break; case MKTAG( 'k','e','y','w'): key = \"keywords\"; break; case MKTAG( 'l','d','e','s'): key = \"synopsis\"; break; case MKTAG( 'l','o','c','i'): return mov_metadata_loci(c, pb, atom.size); case MKTAG( 'p','c','s','t'): key = \"podcast\"; parse = mov_metadata_int8_no_padding; break; case MKTAG( 'p','g','a','p'): key = \"gapless_playback\"; parse = mov_metadata_int8_no_padding; break; case MKTAG( 'p','u','r','d'): key = \"purchase_date\"; break; case MKTAG( 'r','t','n','g'): key = \"rating\"; parse = mov_metadata_int8_no_padding; break; case MKTAG( 's','o','a','a'): key = \"sort_album_artist\"; break; case MKTAG( 's','o','a','l'): key = \"sort_album\"; break; case MKTAG( 's','o','a','r'): key = \"sort_artist\"; break; case MKTAG( 's','o','c','o'): key = \"sort_composer\"; break; case MKTAG( 's','o','n','m'): key = \"sort_name\"; break; case MKTAG( 's','o','s','n'): key = \"sort_show\"; break; case MKTAG( 's','t','i','k'): key = \"media_type\"; parse = mov_metadata_int8_no_padding; break; case MKTAG( 't','r','k','n'): key = \"track\"; parse = mov_metadata_track_or_disc_number; break; case MKTAG( 't','v','e','n'): key = \"episode_id\"; break; case MKTAG( 't','v','e','s'): key = \"episode_sort\"; parse = mov_metadata_int8_bypass_padding; break; case MKTAG( 't','v','n','n'): key = \"network\"; break; case MKTAG( 't','v','s','h'): key = \"show\"; break; case MKTAG( 't','v','s','n'): key = \"season_number\"; parse = mov_metadata_int8_bypass_padding; break; case MKTAG(0xa9,'A','R','T'): key = \"artist\"; break; case MKTAG(0xa9,'P','R','D'): key = \"producer\"; break; case MKTAG(0xa9,'a','l','b'): key = \"album\"; break; case MKTAG(0xa9,'a','u','t'): key = \"artist\"; break; case MKTAG(0xa9,'c','h','p'): key = \"chapter\"; break; case MKTAG(0xa9,'c','m','t'): key = \"comment\"; break; case MKTAG(0xa9,'c','o','m'): key = \"composer\"; break; case MKTAG(0xa9,'c','p','y'): key = \"copyright\"; break; case MKTAG(0xa9,'d','a','y'): key = \"date\"; break; case MKTAG(0xa9,'d','i','r'): key = \"director\"; break; case MKTAG(0xa9,'d','i','s'): key = \"disclaimer\"; break; case MKTAG(0xa9,'e','d','1'): key = \"edit_date\"; break; case MKTAG(0xa9,'e','n','c'): key = \"encoder\"; break; case MKTAG(0xa9,'f','m','t'): key = \"original_format\"; break; case MKTAG(0xa9,'g','e','n'): key = \"genre\"; break; case MKTAG(0xa9,'g','r','p'): key = \"grouping\"; break; case MKTAG(0xa9,'h','s','t'): key = \"host_computer\"; break; case MKTAG(0xa9,'i','n','f'): key = \"comment\"; break; case MKTAG(0xa9,'l','y','r'): key = \"lyrics\"; break; case MKTAG(0xa9,'m','a','k'): key = \"make\"; break; case MKTAG(0xa9,'m','o','d'): key = \"model\"; break; case MKTAG(0xa9,'n','a','m'): key = \"title\"; break; case MKTAG(0xa9,'o','p','e'): key = \"original_artist\"; break; case MKTAG(0xa9,'p','r','d'): key = \"producer\"; break; case MKTAG(0xa9,'p','r','f'): key = \"performers\"; break; case MKTAG(0xa9,'r','e','q'): key = \"playback_requirements\"; break; case MKTAG(0xa9,'s','r','c'): key = \"original_source\"; break; case MKTAG(0xa9,'s','t','3'): key = \"subtitle\"; break; case MKTAG(0xa9,'s','w','r'): key = \"encoder\"; break; case MKTAG(0xa9,'t','o','o'): key = \"encoder\"; break; case MKTAG(0xa9,'t','r','k'): key = \"track\"; break; case MKTAG(0xa9,'u','r','l'): key = \"URL\"; break; case MKTAG(0xa9,'w','r','n'): key = \"warning\"; break; case MKTAG(0xa9,'w','r','t'): key = \"composer\"; break; case MKTAG(0xa9,'x','y','z'): key = \"location\"; break; } retry: if (c->itunes_metadata && atom.size > 8) { int data_size = avio_rb32(pb); int tag = avio_rl32(pb); if (tag == MKTAG('d','a','t','a') && data_size <= atom.size) { data_type = avio_rb32(pb); // type avio_rb32(pb); // unknown str_size = data_size - 16; atom.size -= 16; if (atom.type == MKTAG('c', 'o', 'v', 'r')) { int ret = mov_read_covr(c, pb, data_type, str_size); if (ret < 0) { av_log(c->fc, AV_LOG_ERROR, \"Error parsing cover art.\\n\"); } return ret; } else if (!key && c->found_hdlr_mdta && c->meta_keys) { uint32_t index = AV_RB32(&atom.type); if (index < c->meta_keys_count) { key = c->meta_keys[index]; } else { av_log(c->fc, AV_LOG_WARNING, \"The index of 'data' is out of range: %d >= %d.\\n\", index, c->meta_keys_count); } } } else return 0; } else if (atom.size > 4 && key && !c->itunes_metadata && !raw) { str_size = avio_rb16(pb); // string length if (str_size > atom.size) { raw = 1; avio_seek(pb, -2, SEEK_CUR); av_log(c->fc, AV_LOG_WARNING, \"UDTA parsing failed retrying raw\\n\"); goto retry; } langcode = avio_rb16(pb); ff_mov_lang_to_iso639(langcode, language); atom.size -= 4; } else str_size = atom.size; if (c->export_all && !key) { snprintf(tmp_key, 5, \"%.4s\", (char*)&atom.type); key = tmp_key; } if (!key) return 0; if (atom.size < 0 || str_size >= INT_MAX/2) return AVERROR_INVALIDDATA; // Allocates enough space if data_type is a float32 number, otherwise // worst-case requirement for output string in case of utf8 coded input num = (data_type == 23); str_size_alloc = (num ? 512 : (raw ? str_size : str_size * 2)) + 1; str = av_mallocz(str_size_alloc); if (!str) return AVERROR(ENOMEM); if (parse) parse(c, pb, str_size, key); else { if (!raw && (data_type == 3 || (data_type == 0 && (langcode < 0x400 || langcode == 0x7fff)))) { // MAC Encoded mov_read_mac_string(c, pb, str_size, str, str_size_alloc); } else if (data_type == 23 && str_size >= 4) { // BE float32 float val = av_int2float(avio_rb32(pb)); if (snprintf(str, str_size_alloc, \"%f\", val) >= str_size_alloc) { av_log(c->fc, AV_LOG_ERROR, \"Failed to store the float32 number (%f) in string.\\n\", val); return AVERROR_INVALIDDATA; } } else { int ret = ffio_read_size(pb, str, str_size); if (ret < 0) { return ret; } str[str_size] = 0; } c->fc->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED; av_dict_set(&c->fc->metadata, key, str, 0); if (*language && strcmp(language, \"und\")) { snprintf(key2, sizeof(key2), \"%s-%s\", key, language); av_dict_set(&c->fc->metadata, key2, str, 0); } } av_log(c->fc, AV_LOG_TRACE, \"lang \\\"%3s\\\" \", language); av_log(c->fc, AV_LOG_TRACE, \"tag \\\"%s\\\" value \\\"%s\\\" atom \\\"%.4s\\\" %d %\"PRId64\"\\n\", key, str, (char*)&atom.type, str_size_alloc, atom.size); av_freep(&str); return 0; }"} {"target": 0, "idx": 5366, "func": "static void cortex_a9_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); cpu->dtb_compatible = \"arm,cortex-a9\"; set_feature(&cpu->env, ARM_FEATURE_V7); set_feature(&cpu->env, ARM_FEATURE_VFP3); set_feature(&cpu->env, ARM_FEATURE_VFP_FP16); set_feature(&cpu->env, ARM_FEATURE_NEON); set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); /* Note that A9 supports the MP extensions even for * A9UP and single-core A9MP (which are both different * and valid configurations; we don't model A9UP). */ set_feature(&cpu->env, ARM_FEATURE_V7MP); set_feature(&cpu->env, ARM_FEATURE_CBAR); cpu->midr = 0x410fc090; cpu->reset_fpsid = 0x41033090; cpu->mvfr0 = 0x11110222; cpu->mvfr1 = 0x01111111; cpu->ctr = 0x80038003; cpu->reset_sctlr = 0x00c50078; cpu->id_pfr0 = 0x1031; cpu->id_pfr1 = 0x11; cpu->id_dfr0 = 0x000; cpu->id_afr0 = 0; cpu->id_mmfr0 = 0x00100103; cpu->id_mmfr1 = 0x20000000; cpu->id_mmfr2 = 0x01230000; cpu->id_mmfr3 = 0x00002111; cpu->id_isar0 = 0x00101111; cpu->id_isar1 = 0x13112111; cpu->id_isar2 = 0x21232041; cpu->id_isar3 = 0x11112131; cpu->id_isar4 = 0x00111142; cpu->dbgdidr = 0x35141000; cpu->clidr = (1 << 27) | (1 << 24) | 3; cpu->ccsidr[0] = 0xe00fe015; /* 16k L1 dcache. */ cpu->ccsidr[1] = 0x200fe015; /* 16k L1 icache. */ define_arm_cp_regs(cpu, cortexa9_cp_reginfo); }"} {"target": 0, "idx": 5379, "func": "static void bt_hci_reset(struct bt_hci_s *hci) { hci->acl_len = 0; hci->last_cmd = 0; hci->lm.connecting = 0; hci->event_mask[0] = 0xff; hci->event_mask[1] = 0xff; hci->event_mask[2] = 0xff; hci->event_mask[3] = 0xff; hci->event_mask[4] = 0xff; hci->event_mask[5] = 0x1f; hci->event_mask[6] = 0x00; hci->event_mask[7] = 0x00; hci->device.inquiry_scan = 0; hci->device.page_scan = 0; if (hci->device.lmp_name) g_free((void *) hci->device.lmp_name); hci->device.lmp_name = NULL; hci->device.class[0] = 0x00; hci->device.class[1] = 0x00; hci->device.class[2] = 0x00; hci->voice_setting = 0x0000; hci->conn_accept_tout = 0x1f40; hci->lm.inquiry_mode = 0x00; hci->psb_handle = 0x000; hci->asb_handle = 0x000; /* XXX: timer_del(sl->acl_mode_timer); for all links */ timer_del(hci->lm.inquiry_done); timer_del(hci->lm.inquiry_next); timer_del(hci->conn_accept_timer); }"} {"target": 0, "idx": 5380, "func": "static void net_tx_pkt_do_sw_csum(struct NetTxPkt *pkt) { struct iovec *iov = &pkt->vec[NET_TX_PKT_L2HDR_FRAG]; uint32_t csum_cntr; uint16_t csum = 0; /* num of iovec without vhdr */ uint32_t iov_len = pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - 1; uint16_t csl; struct ip_header *iphdr; size_t csum_offset = pkt->virt_hdr.csum_start + pkt->virt_hdr.csum_offset; /* Put zero to checksum field */ iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum); /* Calculate L4 TCP/UDP checksum */ csl = pkt->payload_len; /* data checksum */ csum_cntr = net_checksum_add_iov(iov, iov_len, pkt->virt_hdr.csum_start, csl); /* add pseudo header to csum */ iphdr = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base; csum_cntr += eth_calc_pseudo_hdr_csum(iphdr, csl); /* Put the checksum obtained into the packet */ csum = cpu_to_be16(net_checksum_finish(csum_cntr)); iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum); }"} {"target": 1, "idx": 5402, "func": "void apic_reset_irq_delivered(void) { trace_apic_reset_irq_delivered(apic_irq_delivered); apic_irq_delivered = 0; }"} {"target": 1, "idx": 5403, "func": "static TAPState *net_tap_fd_init(VLANState *vlan, int fd) { TAPState *s; s = qemu_mallocz(sizeof(TAPState)); if (!s) return NULL; s->fd = fd; s->vc = qemu_new_vlan_client(vlan, tap_receive, s); qemu_set_fd_handler(s->fd, tap_send, NULL, s); snprintf(s->vc->info_str, sizeof(s->vc->info_str), \"tap: fd=%d\", fd); return s; }"} {"target": 1, "idx": 5407, "func": "static void mov_text_style_cb(void *priv, const char style, int close) { MovTextContext *s = priv; if (!close) { if (!(s->box_flags & STYL_BOX)) { //first style entry s->style_attributes_temp = av_malloc(sizeof(*s->style_attributes_temp)); if (!s->style_attributes_temp) { av_bprint_clear(&s->buffer); s->box_flags &= ~STYL_BOX; } s->style_attributes_temp->style_flag = 0; s->style_attributes_temp->style_start = AV_RB16(&s->text_pos); } else { if (s->style_attributes_temp->style_flag) { //break the style record here and start a new one s->style_attributes_temp->style_end = AV_RB16(&s->text_pos); av_dynarray_add(&s->style_attributes, &s->count, s->style_attributes_temp); s->style_attributes_temp = av_malloc(sizeof(*s->style_attributes_temp)); if (!s->style_attributes_temp) { mov_text_cleanup(s); av_bprint_clear(&s->buffer); s->box_flags &= ~STYL_BOX; } s->style_attributes_temp->style_flag = s->style_attributes[s->count - 1]->style_flag; s->style_attributes_temp->style_start = AV_RB16(&s->text_pos); } else { s->style_attributes_temp->style_flag = 0; s->style_attributes_temp->style_start = AV_RB16(&s->text_pos); } } switch (style){ case 'b': s->style_attributes_temp->style_flag |= STYLE_FLAG_BOLD; break; case 'i': s->style_attributes_temp->style_flag |= STYLE_FLAG_ITALIC; break; case 'u': s->style_attributes_temp->style_flag |= STYLE_FLAG_UNDERLINE; break; } } else { s->style_attributes_temp->style_end = AV_RB16(&s->text_pos); av_dynarray_add(&s->style_attributes, &s->count, s->style_attributes_temp); s->style_attributes_temp = av_malloc(sizeof(*s->style_attributes_temp)); if (!s->style_attributes_temp) { mov_text_cleanup(s); av_bprint_clear(&s->buffer); s->box_flags &= ~STYL_BOX; } s->style_attributes_temp->style_flag = s->style_attributes[s->count - 1]->style_flag; switch (style){ case 'b': s->style_attributes_temp->style_flag &= ~STYLE_FLAG_BOLD; break; case 'i': s->style_attributes_temp->style_flag &= ~STYLE_FLAG_ITALIC; break; case 'u': s->style_attributes_temp->style_flag &= ~STYLE_FLAG_UNDERLINE; break; } if (s->style_attributes_temp->style_flag) { //start of new style record s->style_attributes_temp->style_start = AV_RB16(&s->text_pos); } } s->box_flags |= STYL_BOX; }"} {"target": 1, "idx": 5409, "func": "int qcow2_check_metadata_overlap(BlockDriverState *bs, int chk, int64_t offset, int64_t size) { BDRVQcowState *s = bs->opaque; int i, j; if (!size) { return 0; } if (chk & QCOW2_OL_MAIN_HEADER) { if (offset < s->cluster_size) { return QCOW2_OL_MAIN_HEADER; } } /* align range to test to cluster boundaries */ size = align_offset(offset_into_cluster(s, offset) + size, s->cluster_size); offset = start_of_cluster(s, offset); if ((chk & QCOW2_OL_ACTIVE_L1) && s->l1_size) { if (overlaps_with(s->l1_table_offset, s->l1_size * sizeof(uint64_t))) { return QCOW2_OL_ACTIVE_L1; } } if ((chk & QCOW2_OL_REFCOUNT_TABLE) && s->refcount_table_size) { if (overlaps_with(s->refcount_table_offset, s->refcount_table_size * sizeof(uint64_t))) { return QCOW2_OL_REFCOUNT_TABLE; } } if ((chk & QCOW2_OL_SNAPSHOT_TABLE) && s->snapshots_size) { if (overlaps_with(s->snapshots_offset, s->snapshots_size)) { return QCOW2_OL_SNAPSHOT_TABLE; } } if ((chk & QCOW2_OL_INACTIVE_L1) && s->snapshots) { for (i = 0; i < s->nb_snapshots; i++) { if (s->snapshots[i].l1_size && overlaps_with(s->snapshots[i].l1_table_offset, s->snapshots[i].l1_size * sizeof(uint64_t))) { return QCOW2_OL_INACTIVE_L1; } } } if ((chk & QCOW2_OL_ACTIVE_L2) && s->l1_table) { for (i = 0; i < s->l1_size; i++) { if ((s->l1_table[i] & L1E_OFFSET_MASK) && overlaps_with(s->l1_table[i] & L1E_OFFSET_MASK, s->cluster_size)) { return QCOW2_OL_ACTIVE_L2; } } } if ((chk & QCOW2_OL_REFCOUNT_BLOCK) && s->refcount_table) { for (i = 0; i < s->refcount_table_size; i++) { if ((s->refcount_table[i] & REFT_OFFSET_MASK) && overlaps_with(s->refcount_table[i] & REFT_OFFSET_MASK, s->cluster_size)) { return QCOW2_OL_REFCOUNT_BLOCK; } } } if ((chk & QCOW2_OL_INACTIVE_L2) && s->snapshots) { for (i = 0; i < s->nb_snapshots; i++) { uint64_t l1_ofs = s->snapshots[i].l1_table_offset; uint32_t l1_sz = s->snapshots[i].l1_size; uint64_t l1_sz2 = l1_sz * sizeof(uint64_t); uint64_t *l1 = g_malloc(l1_sz2); int ret; ret = bdrv_pread(bs->file, l1_ofs, l1, l1_sz2); if (ret < 0) { g_free(l1); return ret; } for (j = 0; j < l1_sz; j++) { uint64_t l2_ofs = be64_to_cpu(l1[j]) & L1E_OFFSET_MASK; if (l2_ofs && overlaps_with(l2_ofs, s->cluster_size)) { g_free(l1); return QCOW2_OL_INACTIVE_L2; } } g_free(l1); } } return 0; }"} {"target": 1, "idx": 5421, "func": "static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){ int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames); int num, den, frame_size, i; // av_log(st->codec, AV_LOG_DEBUG, \"av_write_frame: pts:%\"PRId64\" dts:%\"PRId64\" cur_dts:%\"PRId64\" b:%d size:%d st:%d\\n\", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index); /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE) return -1;*/ /* duration field */ if (pkt->duration == 0) { compute_frame_duration(&num, &den, st, NULL, pkt); if (den && num) { pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num); } } if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0) pkt->pts= pkt->dts; //XXX/FIXME this is a temporary hack until all encoders output pts if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){ pkt->dts= // pkt->pts= st->cur_dts; pkt->pts= st->pts.val; } //calculate dts from pts if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){ st->pts_buffer[0]= pkt->pts; for(i=1; ipts_buffer[i] == AV_NOPTS_VALUE; i++) st->pts_buffer[i]= (i-delay-1) * pkt->duration; for(i=0; ipts_buffer[i] > st->pts_buffer[i+1]; i++) FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]); pkt->dts= st->pts_buffer[0]; } if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){ av_log(st->codec, AV_LOG_ERROR, \"error, non monotone timestamps %\"PRId64\" >= %\"PRId64\"\\n\", st->cur_dts, pkt->dts); return -1; } if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){ av_log(st->codec, AV_LOG_ERROR, \"error, pts < dts\\n\"); return -1; } // av_log(NULL, AV_LOG_DEBUG, \"av_write_frame: pts2:%\"PRId64\" dts2:%\"PRId64\"\\n\", pkt->pts, pkt->dts); st->cur_dts= pkt->dts; st->pts.val= pkt->dts; /* update pts */ switch (st->codec->codec_type) { case CODEC_TYPE_AUDIO: frame_size = get_audio_frame_size(st->codec, pkt->size); /* HACK/FIXME, we skip the initial 0 size packets as they are most likely equal to the encoder delay, but it would be better if we had the real timestamps from the encoder */ if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) { av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size); } break; case CODEC_TYPE_VIDEO: av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num); break; default: break; } return 0; }"} {"target": 0, "idx": 5439, "func": "static int dnxhd_encode_fast(AVCodecContext *avctx, DNXHDEncContext *ctx) { int max_bits = 0; int ret, x, y; if ((ret = dnxhd_find_qscale(ctx)) < 0) return ret; for (y = 0; y < ctx->m.mb_height; y++) { for (x = 0; x < ctx->m.mb_width; x++) { int mb = y * ctx->m.mb_width + x; int rc = (ctx->qscale * ctx->m.mb_num ) + mb; int delta_bits; ctx->mb_qscale[mb] = ctx->qscale; ctx->mb_bits[mb] = ctx->mb_rc[rc].bits; max_bits += ctx->mb_rc[rc].bits; if (!RC_VARIANCE) { delta_bits = ctx->mb_rc[rc].bits - ctx->mb_rc[rc + ctx->m.mb_num].bits; ctx->mb_cmp[mb].mb = mb; ctx->mb_cmp[mb].value = delta_bits ? ((ctx->mb_rc[rc].ssd - ctx->mb_rc[rc + ctx->m.mb_num].ssd) * 100) / delta_bits : INT_MIN; // avoid increasing qscale } } max_bits += 31; // worst padding } if (!ret) { if (RC_VARIANCE) avctx->execute2(avctx, dnxhd_mb_var_thread, NULL, NULL, ctx->m.mb_height); radix_sort(ctx->mb_cmp, ctx->m.mb_num); for (x = 0; x < ctx->m.mb_num && max_bits > ctx->frame_bits; x++) { int mb = ctx->mb_cmp[x].mb; int rc = (ctx->qscale * ctx->m.mb_num ) + mb; max_bits -= ctx->mb_rc[rc].bits - ctx->mb_rc[rc + ctx->m.mb_num].bits; ctx->mb_qscale[mb] = ctx->qscale + 1; ctx->mb_bits[mb] = ctx->mb_rc[rc + ctx->m.mb_num].bits; } } return 0; }"} {"target": 0, "idx": 5446, "func": "static int img_read_header(AVFormatContext *s1, AVFormatParameters *ap) { VideoData *s = s1->priv_data; int ret, first_index, last_index; char buf[1024]; ByteIOContext pb1, *f = &pb1; AVStream *st; st = av_new_stream(s1, 0); if (!st) { av_free(s); return -ENOMEM; } if (ap && ap->image_format) s->img_fmt = ap->image_format; strcpy(s->path, s1->filename); s->img_number = 0; /* find format */ if (s1->iformat->flags & AVFMT_NOFILE) s->is_pipe = 0; else s->is_pipe = 1; if (!ap || !ap->frame_rate) { st->codec.frame_rate = 25; st->codec.frame_rate_base = 1; } else { st->codec.frame_rate = ap->frame_rate; st->codec.frame_rate_base = ap->frame_rate_base; } if (!s->is_pipe) { if (find_image_range(&first_index, &last_index, s->path) < 0) goto fail; s->img_number = first_index; /* compute duration */ st->start_time = 0; st->duration = ((int64_t)AV_TIME_BASE * (last_index - first_index + 1) * st->codec.frame_rate_base) / st->codec.frame_rate; if (get_frame_filename(buf, sizeof(buf), s->path, s->img_number) < 0) goto fail; if (url_fopen(f, buf, URL_RDONLY) < 0) goto fail; } else { f = &s1->pb; } ret = av_read_image(f, s1->filename, s->img_fmt, read_header_alloc_cb, s); if (ret < 0) goto fail1; if (!s->is_pipe) { url_fclose(f); } else { url_fseek(f, 0, SEEK_SET); } st->codec.codec_type = CODEC_TYPE_VIDEO; st->codec.codec_id = CODEC_ID_RAWVIDEO; st->codec.width = s->width; st->codec.height = s->height; st->codec.pix_fmt = s->pix_fmt; s->img_size = avpicture_get_size(s->pix_fmt, s->width, s->height); return 0; fail1: if (!s->is_pipe) url_fclose(f); fail: av_free(s); return -EIO; }"} {"target": 0, "idx": 5459, "func": "static ram_addr_t s390_virtio_device_num_vq(VirtIOS390Device *dev) { VirtIODevice *vdev = dev->vdev; int num_vq; for (num_vq = 0; num_vq < VIRTIO_PCI_QUEUE_MAX; num_vq++) { if (!virtio_queue_get_num(vdev, num_vq)) { break; } } return num_vq; }"} {"target": 0, "idx": 5460, "func": "static int kvm_get_xsave(X86CPU *cpu) { CPUX86State *env = &cpu->env; struct kvm_xsave* xsave = env->kvm_xsave_buf; int ret, i; const uint8_t *xmm, *ymmh, *zmmh; uint16_t cwd, swd, twd; if (!kvm_has_xsave()) { return kvm_get_fpu(cpu); } ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave); if (ret < 0) { return ret; } cwd = (uint16_t)xsave->region[XSAVE_FCW_FSW]; swd = (uint16_t)(xsave->region[XSAVE_FCW_FSW] >> 16); twd = (uint16_t)xsave->region[XSAVE_FTW_FOP]; env->fpop = (uint16_t)(xsave->region[XSAVE_FTW_FOP] >> 16); env->fpstt = (swd >> 11) & 7; env->fpus = swd; env->fpuc = cwd; for (i = 0; i < 8; ++i) { env->fptags[i] = !((twd >> i) & 1); } memcpy(&env->fpip, &xsave->region[XSAVE_CWD_RIP], sizeof(env->fpip)); memcpy(&env->fpdp, &xsave->region[XSAVE_CWD_RDP], sizeof(env->fpdp)); env->mxcsr = xsave->region[XSAVE_MXCSR]; memcpy(env->fpregs, &xsave->region[XSAVE_ST_SPACE], sizeof env->fpregs); env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV]; memcpy(env->bnd_regs, &xsave->region[XSAVE_BNDREGS], sizeof env->bnd_regs); memcpy(&env->bndcs_regs, &xsave->region[XSAVE_BNDCSR], sizeof(env->bndcs_regs)); memcpy(env->opmask_regs, &xsave->region[XSAVE_OPMASK], sizeof env->opmask_regs); xmm = (const uint8_t *)&xsave->region[XSAVE_XMM_SPACE]; ymmh = (const uint8_t *)&xsave->region[XSAVE_YMMH_SPACE]; zmmh = (const uint8_t *)&xsave->region[XSAVE_ZMM_Hi256]; for (i = 0; i < CPU_NB_REGS; i++, xmm += 16, ymmh += 16, zmmh += 32) { env->xmm_regs[i].XMM_Q(0) = ldq_p(xmm); env->xmm_regs[i].XMM_Q(1) = ldq_p(xmm+8); env->xmm_regs[i].XMM_Q(2) = ldq_p(ymmh); env->xmm_regs[i].XMM_Q(3) = ldq_p(ymmh+8); env->xmm_regs[i].XMM_Q(4) = ldq_p(zmmh); env->xmm_regs[i].XMM_Q(5) = ldq_p(zmmh+8); env->xmm_regs[i].XMM_Q(6) = ldq_p(zmmh+16); env->xmm_regs[i].XMM_Q(7) = ldq_p(zmmh+24); } #ifdef TARGET_X86_64 memcpy(&env->xmm_regs[16], &xsave->region[XSAVE_Hi16_ZMM], 16 * sizeof env->xmm_regs[16]); #endif return 0; }"} {"target": 0, "idx": 5464, "func": "static int jazz_led_init(SysBusDevice *dev) { LedState *s = FROM_SYSBUS(LedState, dev); memory_region_init_io(&s->iomem, &led_ops, s, \"led\", 1); sysbus_init_mmio(dev, &s->iomem); s->con = graphic_console_init(jazz_led_update_display, jazz_led_invalidate_display, NULL, jazz_led_text_update, s); return 0; }"} {"target": 0, "idx": 5467, "func": "static uint32_t hpet_ram_readb(void *opaque, target_phys_addr_t addr) { printf(\"qemu: hpet_read b at %\" PRIx64 \"\\n\", addr); return 0; }"} {"target": 0, "idx": 5470, "func": "static int read_restart_header(MLPDecodeContext *m, BitstreamContext *bc, const uint8_t *buf, unsigned int substr) { SubStream *s = &m->substream[substr]; unsigned int ch; int sync_word, tmp; uint8_t checksum; uint8_t lossless_check; int start_count = bitstream_tell(bc); int min_channel, max_channel, max_matrix_channel; const int std_max_matrix_channel = m->avctx->codec_id == AV_CODEC_ID_MLP ? MAX_MATRIX_CHANNEL_MLP : MAX_MATRIX_CHANNEL_TRUEHD; sync_word = bitstream_read(bc, 13); if (sync_word != 0x31ea >> 1) { av_log(m->avctx, AV_LOG_ERROR, \"restart header sync incorrect (got 0x%04x)\\n\", sync_word); return AVERROR_INVALIDDATA; } s->noise_type = bitstream_read_bit(bc); if (m->avctx->codec_id == AV_CODEC_ID_MLP && s->noise_type) { av_log(m->avctx, AV_LOG_ERROR, \"MLP must have 0x31ea sync word.\\n\"); return AVERROR_INVALIDDATA; } bitstream_skip(bc, 16); /* Output timestamp */ min_channel = bitstream_read(bc, 4); max_channel = bitstream_read(bc, 4); max_matrix_channel = bitstream_read(bc, 4); if (max_matrix_channel > std_max_matrix_channel) { av_log(m->avctx, AV_LOG_ERROR, \"Max matrix channel cannot be greater than %d.\\n\", max_matrix_channel); return AVERROR_INVALIDDATA; } if (max_channel != max_matrix_channel) { av_log(m->avctx, AV_LOG_ERROR, \"Max channel must be equal max matrix channel.\\n\"); return AVERROR_INVALIDDATA; } /* This should happen for TrueHD streams with >6 channels and MLP's noise * type. It is not yet known if this is allowed. */ if (s->max_channel > MAX_MATRIX_CHANNEL_MLP && !s->noise_type) { avpriv_request_sample(m->avctx, \"%d channels (more than the \" \"maximum supported by the decoder)\", s->max_channel + 2); return AVERROR_PATCHWELCOME; } if (min_channel > max_channel) { av_log(m->avctx, AV_LOG_ERROR, \"Substream min channel cannot be greater than max channel.\\n\"); return AVERROR_INVALIDDATA; } s->min_channel = min_channel; s->max_channel = max_channel; s->max_matrix_channel = max_matrix_channel; if (m->avctx->request_channel_layout && (s->mask & m->avctx->request_channel_layout) == m->avctx->request_channel_layout && m->max_decoded_substream > substr) { av_log(m->avctx, AV_LOG_DEBUG, \"Extracting %d-channel downmix (0x%\"PRIx64\") from substream %d. \" \"Further substreams will be skipped.\\n\", s->max_channel + 1, s->mask, substr); m->max_decoded_substream = substr; } s->noise_shift = bitstream_read(bc, 4); s->noisegen_seed = bitstream_read(bc, 23); bitstream_skip(bc, 19); s->data_check_present = bitstream_read_bit(bc); lossless_check = bitstream_read(bc, 8); if (substr == m->max_decoded_substream && s->lossless_check_data != 0xffffffff) { tmp = xor_32_to_8(s->lossless_check_data); if (tmp != lossless_check) av_log(m->avctx, AV_LOG_WARNING, \"Lossless check failed - expected %02x, calculated %02x.\\n\", lossless_check, tmp); } bitstream_skip(bc, 16); memset(s->ch_assign, 0, sizeof(s->ch_assign)); for (ch = 0; ch <= s->max_matrix_channel; ch++) { int ch_assign = bitstream_read(bc, 6); if (m->avctx->codec_id == AV_CODEC_ID_TRUEHD) { uint64_t channel = thd_channel_layout_extract_channel(s->mask, ch_assign); ch_assign = av_get_channel_layout_channel_index(s->mask, channel); } if (ch_assign < 0 || ch_assign > s->max_matrix_channel) { avpriv_request_sample(m->avctx, \"Assignment of matrix channel %d to invalid output channel %d\", ch, ch_assign); return AVERROR_PATCHWELCOME; } s->ch_assign[ch_assign] = ch; } checksum = ff_mlp_restart_checksum(buf, bitstream_tell(bc) - start_count); if (checksum != bitstream_read(bc, 8)) av_log(m->avctx, AV_LOG_ERROR, \"restart header checksum error\\n\"); /* Set default decoding parameters. */ s->param_presence_flags = 0xff; s->num_primitive_matrices = 0; s->blocksize = 8; s->lossless_check_data = 0; memset(s->output_shift , 0, sizeof(s->output_shift )); memset(s->quant_step_size, 0, sizeof(s->quant_step_size)); for (ch = s->min_channel; ch <= s->max_channel; ch++) { ChannelParams *cp = &s->channel_params[ch]; cp->filter_params[FIR].order = 0; cp->filter_params[IIR].order = 0; cp->filter_params[FIR].shift = 0; cp->filter_params[IIR].shift = 0; /* Default audio coding is 24-bit raw PCM. */ cp->huff_offset = 0; cp->sign_huff_offset = -(1 << 23); cp->codebook = 0; cp->huff_lsbs = 24; } if (substr == m->max_decoded_substream) { m->avctx->channels = s->max_matrix_channel + 1; m->avctx->channel_layout = s->mask; m->dsp.mlp_pack_output = m->dsp.mlp_select_pack_output(s->ch_assign, s->output_shift, s->max_matrix_channel, m->avctx->sample_fmt == AV_SAMPLE_FMT_S32); } return 0; }"} {"target": 0, "idx": 5484, "func": "static bool x86_cpu_has_work(CPUState *cs) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; return ((cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_POLL)) && (env->eflags & IF_MASK)) || (cs->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_INIT | CPU_INTERRUPT_SIPI | CPU_INTERRUPT_MCE)); }"} {"target": 0, "idx": 5487, "func": "static void sd_reset(SDState *sd, BlockBackend *blk) { uint64_t size; uint64_t sect; if (blk) { blk_get_geometry(blk, §); } else { sect = 0; } size = sect << 9; sect = sd_addr_to_wpnum(size) + 1; sd->state = sd_idle_state; sd->rca = 0x0000; sd_set_ocr(sd); sd_set_scr(sd); sd_set_cid(sd); sd_set_csd(sd, size); sd_set_cardstatus(sd); sd_set_sdstatus(sd); sd->blk = blk; if (sd->wp_groups) g_free(sd->wp_groups); sd->wp_switch = blk ? blk_is_read_only(blk) : false; sd->wpgrps_size = sect; sd->wp_groups = bitmap_new(sd->wpgrps_size); memset(sd->function_group, 0, sizeof(sd->function_group)); sd->erase_start = 0; sd->erase_end = 0; sd->size = size; sd->blk_len = 0x200; sd->pwd_len = 0; sd->expecting_acmd = false; }"} {"target": 1, "idx": 5503, "func": "static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt) { OutputFile *of = output_files[ost->file_index]; int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base); AVPacket opkt; av_init_packet(&opkt); if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) && !ost->copy_initial_nonkeyframes) return; if (of->recording_time != INT64_MAX && ist->last_dts >= of->recording_time + of->start_time) { ost->finished = 1; return; } /* force the input stream PTS */ if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) audio_size += pkt->size; else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { video_size += pkt->size; ost->sync_opts++; } if (pkt->pts != AV_NOPTS_VALUE) opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time; else opkt.pts = AV_NOPTS_VALUE; if (pkt->dts == AV_NOPTS_VALUE) opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->st->time_base); else opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base); opkt.dts -= ost_tb_start_time; opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base); opkt.flags = pkt->flags; // FIXME remove the following 2 lines they shall be replaced by the bitstream filters if ( ost->st->codec->codec_id != AV_CODEC_ID_H264 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO && ost->st->codec->codec_id != AV_CODEC_ID_VC1 ) { if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY)) { opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0); if (!opkt.buf) exit(1); } } else { opkt.data = pkt->data; opkt.size = pkt->size; } write_frame(of->ctx, &opkt, ost); ost->st->codec->frame_number++; }"} {"target": 1, "idx": 5510, "func": "struct GuestAgentInfo *qmp_guest_info(Error **errp) { GuestAgentInfo *info = g_malloc0(sizeof(GuestAgentInfo)); info->version = g_strdup(QEMU_VERSION); qmp_for_each_command(qmp_command_info, info); return info; }"} {"target": 1, "idx": 5517, "func": "static int init_pass2(MpegEncContext *s) { RateControlContext *rcc = &s->rc_context; AVCodecContext *a = s->avctx; int i, toobig; double fps = get_fps(s->avctx); double complexity[5] = { 0 }; // approximate bits at quant=1 uint64_t const_bits[5] = { 0 }; // quantizer independent bits uint64_t all_const_bits; uint64_t all_available_bits = (uint64_t)(s->bit_rate * (double)rcc->num_entries / fps); double rate_factor = 0; double step; const int filter_size = (int)(a->qblur * 4) | 1; double expected_bits; double *qscale, *blurred_qscale, qscale_sum; /* find complexity & const_bits & decide the pict_types */ for (i = 0; i < rcc->num_entries; i++) { RateControlEntry *rce = &rcc->entry[i]; rce->new_pict_type = rce->pict_type; rcc->i_cplx_sum[rce->pict_type] += rce->i_tex_bits * rce->qscale; rcc->p_cplx_sum[rce->pict_type] += rce->p_tex_bits * rce->qscale; rcc->mv_bits_sum[rce->pict_type] += rce->mv_bits; rcc->frame_count[rce->pict_type]++; complexity[rce->new_pict_type] += (rce->i_tex_bits + rce->p_tex_bits) * (double)rce->qscale; const_bits[rce->new_pict_type] += rce->mv_bits + rce->misc_bits; } all_const_bits = const_bits[AV_PICTURE_TYPE_I] + const_bits[AV_PICTURE_TYPE_P] + const_bits[AV_PICTURE_TYPE_B]; if (all_available_bits < all_const_bits) { av_log(s->avctx, AV_LOG_ERROR, \"requested bitrate is too low\\n\"); return -1; } qscale = av_malloc(sizeof(double) * rcc->num_entries); blurred_qscale = av_malloc(sizeof(double) * rcc->num_entries); toobig = 0; for (step = 256 * 256; step > 0.0000001; step *= 0.5) { expected_bits = 0; rate_factor += step; rcc->buffer_index = s->avctx->rc_buffer_size / 2; /* find qscale */ for (i = 0; i < rcc->num_entries; i++) { RateControlEntry *rce = &rcc->entry[i]; qscale[i] = get_qscale(s, &rcc->entry[i], rate_factor, i); rcc->last_qscale_for[rce->pict_type] = qscale[i]; } assert(filter_size % 2 == 1); /* fixed I/B QP relative to P mode */ for (i = FFMAX(0, rcc->num_entries - 300); i < rcc->num_entries; i++) { RateControlEntry *rce = &rcc->entry[i]; qscale[i] = get_diff_limited_q(s, rce, qscale[i]); } for (i = rcc->num_entries - 1; i >= 0; i--) { RateControlEntry *rce = &rcc->entry[i]; qscale[i] = get_diff_limited_q(s, rce, qscale[i]); } /* smooth curve */ for (i = 0; i < rcc->num_entries; i++) { RateControlEntry *rce = &rcc->entry[i]; const int pict_type = rce->new_pict_type; int j; double q = 0.0, sum = 0.0; for (j = 0; j < filter_size; j++) { int index = i + j - filter_size / 2; double d = index - i; double coeff = a->qblur == 0 ? 1.0 : exp(-d * d / (a->qblur * a->qblur)); if (index < 0 || index >= rcc->num_entries) continue; if (pict_type != rcc->entry[index].new_pict_type) continue; q += qscale[index] * coeff; sum += coeff; } blurred_qscale[i] = q / sum; } /* find expected bits */ for (i = 0; i < rcc->num_entries; i++) { RateControlEntry *rce = &rcc->entry[i]; double bits; rce->new_qscale = modify_qscale(s, rce, blurred_qscale[i], i); bits = qp2bits(rce, rce->new_qscale) + rce->mv_bits + rce->misc_bits; bits += 8 * ff_vbv_update(s, bits); rce->expected_bits = expected_bits; expected_bits += bits; } av_dlog(s->avctx, \"expected_bits: %f all_available_bits: %d rate_factor: %f\\n\", expected_bits, (int)all_available_bits, rate_factor); if (expected_bits > all_available_bits) { rate_factor -= step; ++toobig; } } av_free(qscale); av_free(blurred_qscale); /* check bitrate calculations and print info */ qscale_sum = 0.0; for (i = 0; i < rcc->num_entries; i++) { av_dlog(s, \"[lavc rc] entry[%d].new_qscale = %.3f qp = %.3f\\n\", i, rcc->entry[i].new_qscale, rcc->entry[i].new_qscale / FF_QP2LAMBDA); qscale_sum += av_clip(rcc->entry[i].new_qscale / FF_QP2LAMBDA, s->avctx->qmin, s->avctx->qmax); } assert(toobig <= 40); av_log(s->avctx, AV_LOG_DEBUG, \"[lavc rc] requested bitrate: %d bps expected bitrate: %d bps\\n\", s->bit_rate, (int)(expected_bits / ((double)all_available_bits / s->bit_rate))); av_log(s->avctx, AV_LOG_DEBUG, \"[lavc rc] estimated target average qp: %.3f\\n\", (float)qscale_sum / rcc->num_entries); if (toobig == 0) { av_log(s->avctx, AV_LOG_INFO, \"[lavc rc] Using all of requested bitrate is not \" \"necessary for this video with these parameters.\\n\"); } else if (toobig == 40) { av_log(s->avctx, AV_LOG_ERROR, \"[lavc rc] Error: bitrate too low for this video \" \"with these parameters.\\n\"); return -1; } else if (fabs(expected_bits / all_available_bits - 1.0) > 0.01) { av_log(s->avctx, AV_LOG_ERROR, \"[lavc rc] Error: 2pass curve failed to converge\\n\"); return -1; } return 0; }"} {"target": 1, "idx": 5521, "func": "void replay_configure(QemuOpts *opts) { const char *fname; const char *rr; ReplayMode mode = REPLAY_MODE_NONE; Location loc; if (!opts) { return; } loc_push_none(&loc); qemu_opts_loc_restore(opts); rr = qemu_opt_get(opts, \"rr\"); if (!rr) { /* Just enabling icount */ return; } else if (!strcmp(rr, \"record\")) { mode = REPLAY_MODE_RECORD; } else if (!strcmp(rr, \"replay\")) { mode = REPLAY_MODE_PLAY; } else { error_report(\"Invalid icount rr option: %s\", rr); exit(1); } fname = qemu_opt_get(opts, \"rrfile\"); if (!fname) { error_report(\"File name not specified for replay\"); exit(1); } replay_enable(fname, mode); loc_pop(&loc); }"} {"target": 0, "idx": 5523, "func": "static void paint_mouse_pointer(XImage *image, AVFormatContext *s1) { X11GrabContext *s = s1->priv_data; int x_off = s->x_off; int y_off = s->y_off; int width = s->width; int height = s->height; Display *dpy = s->dpy; XFixesCursorImage *xcim; int x, y; int line, column; int to_line, to_column; int pixstride = image->bits_per_pixel >> 3; /* Warning: in its insanity, xlib provides unsigned image data through a * char* pointer, so we have to make it uint8_t to make things not break. * Anyone who performs further investigation of the xlib API likely risks * permanent brain damage. */ uint8_t *pix = image->data; Window root; XSetWindowAttributes attr; /* Code doesn't currently support 16-bit or PAL8 */ if (image->bits_per_pixel != 24 && image->bits_per_pixel != 32) return; if (!s->c) s->c = XCreateFontCursor(dpy, XC_left_ptr); root = DefaultRootWindow(dpy); attr.cursor = s->c; XChangeWindowAttributes(dpy, root, CWCursor, &attr); xcim = XFixesGetCursorImage(dpy); if (!xcim) { av_log(s1, AV_LOG_WARNING, \"XFixes extension not available, impossible to draw cursor\\n\"); s->draw_mouse = 0; return; } x = xcim->x - xcim->xhot; y = xcim->y - xcim->yhot; to_line = FFMIN((y + xcim->height), (height + y_off)); to_column = FFMIN((x + xcim->width), (width + x_off)); for (line = FFMAX(y, y_off); line < to_line; line++) { for (column = FFMAX(x, x_off); column < to_column; column++) { int xcim_addr = (line - y) * xcim->width + column - x; int image_addr = ((line - y_off) * width + column - x_off) * pixstride; int r = (uint8_t)(xcim->pixels[xcim_addr] >> 0); int g = (uint8_t)(xcim->pixels[xcim_addr] >> 8); int b = (uint8_t)(xcim->pixels[xcim_addr] >> 16); int a = (uint8_t)(xcim->pixels[xcim_addr] >> 24); if (a == 255) { pix[image_addr + 0] = r; pix[image_addr + 1] = g; pix[image_addr + 2] = b; } else if (a) { /* pixel values from XFixesGetCursorImage come premultiplied by alpha */ pix[image_addr + 0] = r + (pix[image_addr + 0] * (255 - a) + 255 / 2) / 255; pix[image_addr + 1] = g + (pix[image_addr + 1] * (255 - a) + 255 / 2) / 255; pix[image_addr + 2] = b + (pix[image_addr + 2] * (255 - a) + 255 / 2) / 255; } } } XFree(xcim); xcim = NULL; }"} {"target": 1, "idx": 5526, "func": "void scsi_req_dequeue(SCSIRequest *req) { trace_scsi_req_dequeue(req->dev->id, req->lun, req->tag); if (req->enqueued) { QTAILQ_REMOVE(&req->dev->requests, req, next); req->enqueued = false; scsi_req_unref(req); } }"} {"target": 1, "idx": 5529, "func": "static int pci_rocker_init(PCIDevice *dev) { Rocker *r = to_rocker(dev); const MACAddr zero = { .a = { 0, 0, 0, 0, 0, 0 } }; const MACAddr dflt = { .a = { 0x52, 0x54, 0x00, 0x12, 0x35, 0x01 } }; static int sw_index; int i, err = 0; /* allocate worlds */ r->worlds[ROCKER_WORLD_TYPE_OF_DPA] = of_dpa_world_alloc(r); if (!r->world_name) { r->world_name = g_strdup(world_name(r->worlds[ROCKER_WORLD_TYPE_OF_DPA])); } r->world_dflt = rocker_world_type_by_name(r, r->world_name); if (!r->world_dflt) { fprintf(stderr, \"rocker: requested world \\\"%s\\\" does not exist\\n\", r->world_name); err = -EINVAL; goto err_world_type_by_name; } /* set up memory-mapped region at BAR0 */ memory_region_init_io(&r->mmio, OBJECT(r), &rocker_mmio_ops, r, \"rocker-mmio\", ROCKER_PCI_BAR0_SIZE); pci_register_bar(dev, ROCKER_PCI_BAR0_IDX, PCI_BASE_ADDRESS_SPACE_MEMORY, &r->mmio); /* set up memory-mapped region for MSI-X */ memory_region_init(&r->msix_bar, OBJECT(r), \"rocker-msix-bar\", ROCKER_PCI_MSIX_BAR_SIZE); pci_register_bar(dev, ROCKER_PCI_MSIX_BAR_IDX, PCI_BASE_ADDRESS_SPACE_MEMORY, &r->msix_bar); /* MSI-X init */ err = rocker_msix_init(r); if (err) { goto err_msix_init; } /* validate switch properties */ if (!r->name) { r->name = g_strdup(ROCKER); } if (rocker_find(r->name)) { err = -EEXIST; goto err_duplicate; } /* Rocker name is passed in port name requests to OS with the intention * that the name is used in interface names. Limit the length of the * rocker name to avoid naming problems in the OS. Also, adding the * port number as p# and unganged breakout b#, where # is at most 2 * digits, so leave room for it too (-1 for string terminator, -3 for * p# and -3 for b#) */ #define ROCKER_IFNAMSIZ 16 #define MAX_ROCKER_NAME_LEN (ROCKER_IFNAMSIZ - 1 - 3 - 3) if (strlen(r->name) > MAX_ROCKER_NAME_LEN) { fprintf(stderr, \"rocker: name too long; please shorten to at most %d chars\\n\", MAX_ROCKER_NAME_LEN); return -EINVAL; } if (memcmp(&r->fp_start_macaddr, &zero, sizeof(zero)) == 0) { memcpy(&r->fp_start_macaddr, &dflt, sizeof(dflt)); r->fp_start_macaddr.a[4] += (sw_index++); } if (!r->switch_id) { memcpy(&r->switch_id, &r->fp_start_macaddr, sizeof(r->fp_start_macaddr)); } if (r->fp_ports > ROCKER_FP_PORTS_MAX) { r->fp_ports = ROCKER_FP_PORTS_MAX; } r->rings = g_new(DescRing *, rocker_pci_ring_count(r)); /* Rings are ordered like this: * - command ring * - event ring * - port0 tx ring * - port0 rx ring * - port1 tx ring * - port1 rx ring * ..... */ for (i = 0; i < rocker_pci_ring_count(r); i++) { DescRing *ring = desc_ring_alloc(r, i); if (i == ROCKER_RING_CMD) { desc_ring_set_consume(ring, cmd_consume, ROCKER_MSIX_VEC_CMD); } else if (i == ROCKER_RING_EVENT) { desc_ring_set_consume(ring, NULL, ROCKER_MSIX_VEC_EVENT); } else if (i % 2 == 0) { desc_ring_set_consume(ring, tx_consume, ROCKER_MSIX_VEC_TX((i - 2) / 2)); } else if (i % 2 == 1) { desc_ring_set_consume(ring, NULL, ROCKER_MSIX_VEC_RX((i - 3) / 2)); } r->rings[i] = ring; } for (i = 0; i < r->fp_ports; i++) { FpPort *port = fp_port_alloc(r, r->name, &r->fp_start_macaddr, i, &r->fp_ports_peers[i]); r->fp_port[i] = port; fp_port_set_world(port, r->world_dflt); } QLIST_INSERT_HEAD(&rockers, r, next); return 0; err_duplicate: rocker_msix_uninit(r); err_msix_init: object_unparent(OBJECT(&r->msix_bar)); object_unparent(OBJECT(&r->mmio)); err_world_type_by_name: for (i = 0; i < ROCKER_WORLD_TYPE_MAX; i++) { if (r->worlds[i]) { world_free(r->worlds[i]); } } return err; }"} {"target": 0, "idx": 5566, "func": "static int cow_test_bit(int64_t bitnum, const uint8_t *bitmap) { return (bitmap[bitnum / 8] & (1 << (bitnum & 7))) != 0; }"} {"target": 0, "idx": 5570, "func": "static void create_gic(VirtBoardInfo *vbi, qemu_irq *pic) { /* We create a standalone GIC v2 */ DeviceState *gicdev; SysBusDevice *gicbusdev; const char *gictype; int i; gictype = gic_class_name(); gicdev = qdev_create(NULL, gictype); qdev_prop_set_uint32(gicdev, \"revision\", 2); qdev_prop_set_uint32(gicdev, \"num-cpu\", smp_cpus); /* Note that the num-irq property counts both internal and external * interrupts; there are always 32 of the former (mandated by GIC spec). */ qdev_prop_set_uint32(gicdev, \"num-irq\", NUM_IRQS + 32); qdev_init_nofail(gicdev); gicbusdev = SYS_BUS_DEVICE(gicdev); sysbus_mmio_map(gicbusdev, 0, vbi->memmap[VIRT_GIC_DIST].base); sysbus_mmio_map(gicbusdev, 1, vbi->memmap[VIRT_GIC_CPU].base); /* Wire the outputs from each CPU's generic timer to the * appropriate GIC PPI inputs, and the GIC's IRQ output to * the CPU's IRQ input. */ for (i = 0; i < smp_cpus; i++) { DeviceState *cpudev = DEVICE(qemu_get_cpu(i)); int ppibase = NUM_IRQS + i * GIC_INTERNAL + GIC_NR_SGIS; /* physical timer; we wire it up to the non-secure timer's ID, * since a real A15 always has TrustZone but QEMU doesn't. */ qdev_connect_gpio_out(cpudev, 0, qdev_get_gpio_in(gicdev, ppibase + ARCH_TIMER_NS_EL1_IRQ)); /* virtual timer */ qdev_connect_gpio_out(cpudev, 1, qdev_get_gpio_in(gicdev, ppibase + ARCH_TIMER_VIRT_IRQ)); /* Hypervisor timer. */ qdev_connect_gpio_out(cpudev, 2, qdev_get_gpio_in(gicdev, ppibase + ARCH_TIMER_NS_EL2_IRQ)); sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ)); sysbus_connect_irq(gicbusdev, i + smp_cpus, qdev_get_gpio_in(cpudev, ARM_CPU_FIQ)); } for (i = 0; i < NUM_IRQS; i++) { pic[i] = qdev_get_gpio_in(gicdev, i); } fdt_add_gic_node(vbi); create_v2m(vbi, pic); }"} {"target": 0, "idx": 5573, "func": "static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); uint32_t vmx = kvmppc_get_vmx(); uint32_t dfp = kvmppc_get_dfp(); uint32_t dcache_size = kvmppc_read_int_cpu_dt(\"d-cache-size\"); uint32_t icache_size = kvmppc_read_int_cpu_dt(\"i-cache-size\"); /* Now fix up the class with information we can query from the host */ pcc->pvr = mfpvr(); if (vmx != -1) { /* Only override when we know what the host supports */ alter_insns(&pcc->insns_flags, PPC_ALTIVEC, vmx > 0); alter_insns(&pcc->insns_flags2, PPC2_VSX, vmx > 1); } if (dfp != -1) { /* Only override when we know what the host supports */ alter_insns(&pcc->insns_flags2, PPC2_DFP, dfp); } if (dcache_size != -1) { pcc->l1_dcache_size = dcache_size; } if (icache_size != -1) { pcc->l1_icache_size = icache_size; } /* Reason: kvmppc_host_cpu_initfn() dies when !kvm_enabled() */ dc->cannot_destroy_with_object_finalize_yet = true; }"} {"target": 1, "idx": 5596, "func": "static float128 addFloat128Sigs( float128 a, float128 b, flag zSign STATUS_PARAM) { int32 aExp, bExp, zExp; uint64_t aSig0, aSig1, bSig0, bSig1, zSig0, zSig1, zSig2; int32 expDiff; aSig1 = extractFloat128Frac1( a ); aSig0 = extractFloat128Frac0( a ); aExp = extractFloat128Exp( a ); bSig1 = extractFloat128Frac1( b ); bSig0 = extractFloat128Frac0( b ); bExp = extractFloat128Exp( b ); expDiff = aExp - bExp; if ( 0 < expDiff ) { if ( aExp == 0x7FFF ) { if ( aSig0 | aSig1 ) return propagateFloat128NaN( a, b STATUS_VAR ); return a; } if ( bExp == 0 ) { --expDiff; } else { bSig0 |= LIT64( 0x0001000000000000 ); } shift128ExtraRightJamming( bSig0, bSig1, 0, expDiff, &bSig0, &bSig1, &zSig2 ); zExp = aExp; } else if ( expDiff < 0 ) { if ( bExp == 0x7FFF ) { if ( bSig0 | bSig1 ) return propagateFloat128NaN( a, b STATUS_VAR ); return packFloat128( zSign, 0x7FFF, 0, 0 ); } if ( aExp == 0 ) { ++expDiff; } else { aSig0 |= LIT64( 0x0001000000000000 ); } shift128ExtraRightJamming( aSig0, aSig1, 0, - expDiff, &aSig0, &aSig1, &zSig2 ); zExp = bExp; } else { if ( aExp == 0x7FFF ) { if ( aSig0 | aSig1 | bSig0 | bSig1 ) { return propagateFloat128NaN( a, b STATUS_VAR ); } return a; } add128( aSig0, aSig1, bSig0, bSig1, &zSig0, &zSig1 ); if ( aExp == 0 ) { if ( STATUS(flush_to_zero) ) return packFloat128( zSign, 0, 0, 0 ); return packFloat128( zSign, 0, zSig0, zSig1 ); } zSig2 = 0; zSig0 |= LIT64( 0x0002000000000000 ); zExp = aExp; goto shiftRight1; } aSig0 |= LIT64( 0x0001000000000000 ); add128( aSig0, aSig1, bSig0, bSig1, &zSig0, &zSig1 ); --zExp; if ( zSig0 < LIT64( 0x0002000000000000 ) ) goto roundAndPack; ++zExp; shiftRight1: shift128ExtraRightJamming( zSig0, zSig1, zSig2, 1, &zSig0, &zSig1, &zSig2 ); roundAndPack: return roundAndPackFloat128( zSign, zExp, zSig0, zSig1, zSig2 STATUS_VAR ); }"} {"target": 1, "idx": 5601, "func": "static void test_acpi_one(const char *params) { char *args; uint8_t signature_low; uint8_t signature_high; uint16_t signature; int i; uint32_t off; args = g_strdup_printf(\"-net none -display none %s %s\", params ? params : \"\", disk); qtest_start(args); /* Wait at most 1 minute */ #define TEST_DELAY (1 * G_USEC_PER_SEC / 10) #define TEST_CYCLES MAX((60 * G_USEC_PER_SEC / TEST_DELAY), 1) /* Poll until code has run and modified memory. Once it has we know BIOS * initialization is done. TODO: check that IP reached the halt * instruction. */ for (i = 0; i < TEST_CYCLES; ++i) { signature_low = readb(BOOT_SECTOR_ADDRESS + SIGNATURE_OFFSET); signature_high = readb(BOOT_SECTOR_ADDRESS + SIGNATURE_OFFSET + 1); signature = (signature_high << 8) | signature_low; if (signature == SIGNATURE) { break; } g_usleep(TEST_DELAY); } g_assert_cmphex(signature, ==, SIGNATURE); /* OK, now find RSDP */ for (off = 0xf0000; off < 0x100000; off += 0x10) { uint8_t sig[] = \"RSD PTR \"; int i; for (i = 0; i < sizeof sig - 1; ++i) { sig[i] = readb(off + i); } if (!memcmp(sig, \"RSD PTR \", sizeof sig)) { break; } } g_assert_cmphex(off, <, 0x100000); qtest_quit(global_qtest); g_free(args); }"} {"target": 1, "idx": 5611, "func": "static FFPsyWindowInfo psy_3gpp_window(FFPsyContext *ctx, const int16_t *audio, const int16_t *la, int channel, int prev_type) { int i, j; int br = ctx->avctx->bit_rate / ctx->avctx->channels; int attack_ratio = br <= 16000 ? 18 : 10; Psy3gppContext *pctx = (Psy3gppContext*) ctx->model_priv_data; Psy3gppChannel *pch = &pctx->ch[channel]; uint8_t grouping = 0; FFPsyWindowInfo wi; memset(&wi, 0, sizeof(wi)); if (la) { float s[8], v; int switch_to_eight = 0; float sum = 0.0, sum2 = 0.0; int attack_n = 0; for (i = 0; i < 8; i++) { for (j = 0; j < 128; j++) { v = iir_filter(la[(i*128+j)*ctx->avctx->channels], pch->iir_state); sum += v*v; } s[i] = sum; sum2 += sum; } for (i = 0; i < 8; i++) { if (s[i] > pch->win_energy * attack_ratio) { attack_n = i + 1; switch_to_eight = 1; break; } } pch->win_energy = pch->win_energy*7/8 + sum2/64; wi.window_type[1] = prev_type; switch (prev_type) { case ONLY_LONG_SEQUENCE: wi.window_type[0] = switch_to_eight ? LONG_START_SEQUENCE : ONLY_LONG_SEQUENCE; break; case LONG_START_SEQUENCE: wi.window_type[0] = EIGHT_SHORT_SEQUENCE; grouping = pch->next_grouping; break; case LONG_STOP_SEQUENCE: wi.window_type[0] = ONLY_LONG_SEQUENCE; break; case EIGHT_SHORT_SEQUENCE: wi.window_type[0] = switch_to_eight ? EIGHT_SHORT_SEQUENCE : LONG_STOP_SEQUENCE; grouping = switch_to_eight ? pch->next_grouping : 0; break; } pch->next_grouping = window_grouping[attack_n]; } else { for (i = 0; i < 3; i++) wi.window_type[i] = prev_type; grouping = (prev_type == EIGHT_SHORT_SEQUENCE) ? window_grouping[0] : 0; } wi.window_shape = 1; if (wi.window_type[0] != EIGHT_SHORT_SEQUENCE) { wi.num_windows = 1; wi.grouping[0] = 1; } else { int lastgrp = 0; wi.num_windows = 8; for (i = 0; i < 8; i++) { if (!((grouping >> i) & 1)) lastgrp = i; wi.grouping[lastgrp]++; } } return wi; }"} {"target": 0, "idx": 5634, "func": "void kqemu_set_phys_mem(uint64_t start_addr, ram_addr_t size, ram_addr_t phys_offset) { struct kqemu_phys_mem kphys_mem1, *kphys_mem = &kphys_mem1; uint64_t end; int ret, io_index; end = (start_addr + size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK; start_addr &= TARGET_PAGE_MASK; kphys_mem->phys_addr = start_addr; kphys_mem->size = end - start_addr; kphys_mem->ram_addr = phys_offset & TARGET_PAGE_MASK; io_index = phys_offset & ~TARGET_PAGE_MASK; switch(io_index) { case IO_MEM_RAM: kphys_mem->io_index = KQEMU_IO_MEM_RAM; break; case IO_MEM_ROM: kphys_mem->io_index = KQEMU_IO_MEM_ROM; break; default: if (qpi_io_memory == io_index) { kphys_mem->io_index = KQEMU_IO_MEM_COMM; } else { kphys_mem->io_index = KQEMU_IO_MEM_UNASSIGNED; } break; } #ifdef _WIN32 { DWORD temp; ret = DeviceIoControl(kqemu_fd, KQEMU_SET_PHYS_MEM, kphys_mem, sizeof(*kphys_mem), NULL, 0, &temp, NULL) == TRUE ? 0 : -1; } #else ret = ioctl(kqemu_fd, KQEMU_SET_PHYS_MEM, kphys_mem); #endif if (ret < 0) { fprintf(stderr, \"kqemu: KQEMU_SET_PHYS_PAGE error=%d: start_addr=0x%016\" PRIx64 \" size=0x%08lx phys_offset=0x%08lx\\n\", ret, start_addr, (unsigned long)size, (unsigned long)phys_offset); } }"} {"target": 0, "idx": 5638, "func": "static void omap_dpll_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { struct dpll_ctl_s *s = (struct dpll_ctl_s *) opaque; uint16_t diff; static const int bypass_div[4] = { 1, 2, 4, 4 }; int div, mult; if (size != 2) { return omap_badwidth_write16(opaque, addr, value); } if (addr == 0x00) { /* CTL_REG */ /* See omap_ulpd_pm_write() too */ diff = s->mode & value; s->mode = value & 0x2fff; if (diff & (0x3ff << 2)) { if (value & (1 << 4)) { /* PLL_ENABLE */ div = ((value >> 5) & 3) + 1; /* PLL_DIV */ mult = MIN((value >> 7) & 0x1f, 1); /* PLL_MULT */ } else { div = bypass_div[((value >> 2) & 3)]; /* BYPASS_DIV */ mult = 1; } omap_clk_setrate(s->dpll, div, mult); } /* Enter the desired mode. */ s->mode = (s->mode & 0xfffe) | ((s->mode >> 4) & 1); /* Act as if the lock is restored. */ s->mode |= 2; } else { OMAP_BAD_REG(addr); } }"} {"target": 0, "idx": 5640, "func": "static inline void gen_evfsneg(DisasContext *ctx) { if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_APU); return; } #if defined(TARGET_PPC64) tcg_gen_xori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x8000000080000000LL); #else tcg_gen_xori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000); tcg_gen_xori_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], 0x80000000); #endif }"} {"target": 0, "idx": 5648, "func": "static void qmp_input_start_list(Visitor *v, const char *name, Error **errp) { QmpInputVisitor *qiv = to_qiv(v); QObject *qobj = qmp_input_get_object(qiv, name, true); if (!qobj || qobject_type(qobj) != QTYPE_QLIST) { error_setg(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : \"null\", \"list\"); return; } qmp_input_push(qiv, qobj, errp); }"} {"target": 0, "idx": 5660, "func": "static void nvic_sysreg_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { uint32_t offset = addr; if (size == 4) { nvic_writel(opaque, offset, value); return; } hw_error(\"NVIC: Bad write of size %d at offset 0x%x\\n\", size, offset); }"} {"target": 0, "idx": 5666, "func": "static void pci_init_bus_master(PCIDevice *pci_dev) { AddressSpace *dma_as = pci_device_iommu_address_space(pci_dev); memory_region_init_alias(&pci_dev->bus_master_enable_region, OBJECT(pci_dev), \"bus master\", dma_as->root, 0, memory_region_size(dma_as->root)); memory_region_set_enabled(&pci_dev->bus_master_enable_region, false); address_space_init(&pci_dev->bus_master_as, &pci_dev->bus_master_enable_region, pci_dev->name); }"} {"target": 0, "idx": 5674, "func": "void aio_set_event_notifier_poll(AioContext *ctx, EventNotifier *notifier, EventNotifierHandler *io_poll_begin, EventNotifierHandler *io_poll_end) { aio_set_fd_poll(ctx, event_notifier_get_fd(notifier), (IOHandler *)io_poll_begin, (IOHandler *)io_poll_end); }"} {"target": 0, "idx": 5682, "func": "static uint64_t m5208_sys_read(void *opaque, target_phys_addr_t addr, unsigned size) { switch (addr) { case 0x110: /* SDCS0 */ { int n; for (n = 0; n < 32; n++) { if (ram_size < (2u << n)) break; } return (n - 1) | 0x40000000; } case 0x114: /* SDCS1 */ return 0; default: hw_error(\"m5208_sys_read: Bad offset 0x%x\\n\", (int)addr); return 0; } }"} {"target": 1, "idx": 5713, "func": "static int vhost_user_set_vring_addr(struct vhost_dev *dev, struct vhost_vring_addr *addr) { VhostUserMsg msg = { .request = VHOST_USER_SET_VRING_ADDR, .flags = VHOST_USER_VERSION, .payload.addr = *addr, .size = sizeof(msg.payload.addr), }; vhost_user_write(dev, &msg, NULL, 0); return 0; }"} {"target": 1, "idx": 5715, "func": "static int amr_read_packet(AVFormatContext *s, AVPacket *pkt) { AVCodecContext *enc = s->streams[0]->codec; int read, size, toc, mode; if (url_feof(&s->pb)) { return AVERROR_IO; } //FIXME this is wrong, this should rather be in a AVParset toc=get_byte(&s->pb); mode = (toc >> 3) & 0x0F; if (enc->codec_id == CODEC_ID_AMR_NB) { static const uint8_t packed_size[16] = {12, 13, 15, 17, 19, 20, 26, 31, 5, 0, 0, 0, 0, 0, 0, 0}; size=packed_size[mode]+1; } else if(enc->codec_id == CODEC_ID_AMR_WB) { static uint8_t packed_size[16] = {18, 24, 33, 37, 41, 47, 51, 59, 61, 6, 6, 0, 0, 0, 1, 1}; size=packed_size[mode]; } else { assert(0); } if ( (size==0) || av_new_packet(pkt, size)) { return AVERROR_IO; } pkt->stream_index = 0; pkt->pos= url_ftell(&s->pb); pkt->data[0]=toc; pkt->duration= enc->codec_id == CODEC_ID_AMR_NB ? 160 : 320; read = get_buffer(&s->pb, pkt->data+1, size-1); if (read != size-1) { av_free_packet(pkt); return AVERROR_IO; } return 0; }"} {"target": 1, "idx": 5729, "func": "static void park_frame_worker_threads(FrameThreadContext *fctx, int thread_count) { int i; for (i = 0; i < thread_count; i++) { PerThreadContext *p = &fctx->threads[i]; if (p->state != STATE_INPUT_READY) { pthread_mutex_lock(&p->progress_mutex); while (p->state != STATE_INPUT_READY) pthread_cond_wait(&p->output_cond, &p->progress_mutex); pthread_mutex_unlock(&p->progress_mutex); } } }"} {"target": 1, "idx": 5741, "func": "static int nut_read_packet(AVFormatContext *s, AVPacket *pkt) { NUTContext *nut = s->priv_data; ByteIOContext *bc = &s->pb; int i, frame_code=0, ret, skip; int64_t ts, back_ptr; for(;;){ int64_t pos= url_ftell(bc); uint64_t tmp= nut->next_startcode; nut->next_startcode=0; if (url_feof(bc)) return -1; if(tmp){ pos-=8; }else{ frame_code = get_byte(bc); if(frame_code == 'N'){ tmp= frame_code; for(i=1; i<8; i++) tmp = (tmp<<8) + get_byte(bc); } } switch(tmp){ case MAIN_STARTCODE: case STREAM_STARTCODE: case INDEX_STARTCODE: skip= get_packetheader(nut, bc, 0); url_fseek(bc, skip, SEEK_CUR); break; case INFO_STARTCODE: if(decode_info_header(nut)<0) goto resync; break; case SYNCPOINT_STARTCODE: if(decode_syncpoint(nut, &ts, &back_ptr)<0) goto resync; frame_code = get_byte(bc); case 0: ret= decode_frame(nut, pkt, frame_code); if(ret==0) return 0; else if(ret==1) //ok but discard packet break; default: resync: av_log(s, AV_LOG_DEBUG, \"syncing from %\"PRId64\"\\n\", pos); tmp= find_any_startcode(bc, nut->last_syncpoint_pos+1); if(tmp==0) return -1; av_log(s, AV_LOG_DEBUG, \"sync\\n\"); nut->next_startcode= tmp; } } }"} {"target": 1, "idx": 5757, "func": "void rgb8tobgr8(const uint8_t *src, uint8_t *dst, long src_size) { long i; long num_pixels = src_size; for(i=0; i>3; b = (rgb&0xC0)>>6; dst[i] = ((b<<1)&0x07) | ((g&0x07)<<3) | ((r&0x03)<<6); } }"} {"target": 0, "idx": 5761, "func": "static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple){ MpegEncContext * const s = &h->s; const int mb_x= s->mb_x; const int mb_y= s->mb_y; const int mb_xy= h->mb_xy; const int mb_type= s->current_picture.mb_type[mb_xy]; uint8_t *dest_y, *dest_cb, *dest_cr; int linesize, uvlinesize /*dct_offset*/; int i; int *block_offset = &h->block_offset[0]; const int transform_bypass = !simple && (s->qscale == 0 && h->sps.transform_bypass); const int is_h264 = simple || s->codec_id == CODEC_ID_H264; void (*idct_add)(uint8_t *dst, DCTELEM *block, int stride); void (*idct_dc_add)(uint8_t *dst, DCTELEM *block, int stride); dest_y = s->current_picture.data[0] + (mb_x + mb_y * s->linesize ) * 16; dest_cb = s->current_picture.data[1] + (mb_x + mb_y * s->uvlinesize) * 8; dest_cr = s->current_picture.data[2] + (mb_x + mb_y * s->uvlinesize) * 8; s->dsp.prefetch(dest_y + (s->mb_x&3)*4*s->linesize + 64, s->linesize, 4); s->dsp.prefetch(dest_cb + (s->mb_x&7)*s->uvlinesize + 64, dest_cr - dest_cb, 2); if (!simple && MB_FIELD) { linesize = h->mb_linesize = s->linesize * 2; uvlinesize = h->mb_uvlinesize = s->uvlinesize * 2; block_offset = &h->block_offset[24]; if(mb_y&1){ //FIXME move out of this function? dest_y -= s->linesize*15; dest_cb-= s->uvlinesize*7; dest_cr-= s->uvlinesize*7; } if(FRAME_MBAFF) { int list; for(list=0; listlist_count; list++){ if(!USES_LIST(mb_type, list)) continue; if(IS_16X16(mb_type)){ int8_t *ref = &h->ref_cache[list][scan8[0]]; fill_rectangle(ref, 4, 4, 8, (16+*ref)^(s->mb_y&1), 1); }else{ for(i=0; i<16; i+=4){ int ref = h->ref_cache[list][scan8[i]]; if(ref >= 0) fill_rectangle(&h->ref_cache[list][scan8[i]], 2, 2, 8, (16+ref)^(s->mb_y&1), 1); } } } } } else { linesize = h->mb_linesize = s->linesize; uvlinesize = h->mb_uvlinesize = s->uvlinesize; // dct_offset = s->linesize * 16; } if (!simple && IS_INTRA_PCM(mb_type)) { for (i=0; i<16; i++) { memcpy(dest_y + i* linesize, h->mb + i*8, 16); } for (i=0; i<8; i++) { memcpy(dest_cb+ i*uvlinesize, h->mb + 128 + i*4, 8); memcpy(dest_cr+ i*uvlinesize, h->mb + 160 + i*4, 8); } } else { if(IS_INTRA(mb_type)){ if(h->deblocking_filter) xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 1, simple); if(simple || !ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ h->hpc.pred8x8[ h->chroma_pred_mode ](dest_cb, uvlinesize); h->hpc.pred8x8[ h->chroma_pred_mode ](dest_cr, uvlinesize); } if(IS_INTRA4x4(mb_type)){ if(simple || !s->encoding){ if(IS_8x8DCT(mb_type)){ if(transform_bypass){ idct_dc_add = idct_add = s->dsp.add_pixels8; }else if(IS_8x8DCT(mb_type)){ idct_dc_add = s->dsp.h264_idct8_dc_add; idct_add = s->dsp.h264_idct8_add; } for(i=0; i<16; i+=4){ uint8_t * const ptr= dest_y + block_offset[i]; const int dir= h->intra4x4_pred_mode_cache[ scan8[i] ]; if(transform_bypass && h->sps.profile_idc==244 && dir<=1){ h->hpc.pred8x8l_add[dir](ptr, h->mb + i*16, linesize); }else{ const int nnz = h->non_zero_count_cache[ scan8[i] ]; h->hpc.pred8x8l[ dir ](ptr, (h->topleft_samples_available<topright_samples_available<mb[i*16]) idct_dc_add(ptr, h->mb + i*16, linesize); else idct_add (ptr, h->mb + i*16, linesize); } } } }else{ if(transform_bypass){ idct_dc_add = idct_add = s->dsp.add_pixels4; }else{ idct_dc_add = s->dsp.h264_idct_dc_add; idct_add = s->dsp.h264_idct_add; } for(i=0; i<16; i++){ uint8_t * const ptr= dest_y + block_offset[i]; const int dir= h->intra4x4_pred_mode_cache[ scan8[i] ]; if(transform_bypass && h->sps.profile_idc==244 && dir<=1){ h->hpc.pred4x4_add[dir](ptr, h->mb + i*16, linesize); }else{ uint8_t *topright; int nnz, tr; if(dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED){ const int topright_avail= (h->topright_samples_available<hpc.pred4x4[ dir ](ptr, topright, linesize); nnz = h->non_zero_count_cache[ scan8[i] ]; if(nnz){ if(is_h264){ if(nnz == 1 && h->mb[i*16]) idct_dc_add(ptr, h->mb + i*16, linesize); else idct_add (ptr, h->mb + i*16, linesize); }else svq3_add_idct_c(ptr, h->mb + i*16, linesize, s->qscale, 0); } } } } } }else{ h->hpc.pred16x16[ h->intra16x16_pred_mode ](dest_y , linesize); if(is_h264){ if(!transform_bypass) h264_luma_dc_dequant_idct_c(h->mb, s->qscale, h->dequant4_coeff[0][s->qscale][0]); }else svq3_luma_dc_dequant_idct_c(h->mb, s->qscale); } if(h->deblocking_filter) xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 0, simple); }else if(is_h264){ hl_motion(h, dest_y, dest_cb, dest_cr, s->me.qpel_put, s->dsp.put_h264_chroma_pixels_tab, s->me.qpel_avg, s->dsp.avg_h264_chroma_pixels_tab, s->dsp.weight_h264_pixels_tab, s->dsp.biweight_h264_pixels_tab); } if(!IS_INTRA4x4(mb_type)){ if(is_h264){ if(IS_INTRA16x16(mb_type)){ if(transform_bypass){ if(h->sps.profile_idc==244 && (h->intra16x16_pred_mode==VERT_PRED8x8 || h->intra16x16_pred_mode==HOR_PRED8x8)){ h->hpc.pred16x16_add[h->intra16x16_pred_mode](dest_y, block_offset, h->mb, linesize); }else{ for(i=0; i<16; i++){ if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]) s->dsp.add_pixels4(dest_y + block_offset[i], h->mb + i*16, linesize); } } }else{ s->dsp.h264_idct_add16intra(dest_y, block_offset, h->mb, linesize, h->non_zero_count_cache); } }else if(h->cbp&15){ if(transform_bypass){ const int di = IS_8x8DCT(mb_type) ? 4 : 1; idct_add= IS_8x8DCT(mb_type) ? s->dsp.add_pixels8 : s->dsp.add_pixels4; for(i=0; i<16; i+=di){ if(h->non_zero_count_cache[ scan8[i] ]){ idct_add(dest_y + block_offset[i], h->mb + i*16, linesize); } } }else{ if(IS_8x8DCT(mb_type)){ s->dsp.h264_idct8_add4(dest_y, block_offset, h->mb, linesize, h->non_zero_count_cache); }else{ s->dsp.h264_idct_add16(dest_y, block_offset, h->mb, linesize, h->non_zero_count_cache); } } } }else{ for(i=0; i<16; i++){ if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){ //FIXME benchmark weird rule, & below uint8_t * const ptr= dest_y + block_offset[i]; svq3_add_idct_c(ptr, h->mb + i*16, linesize, s->qscale, IS_INTRA(mb_type) ? 1 : 0); } } } } if((simple || !ENABLE_GRAY || !(s->flags&CODEC_FLAG_GRAY)) && (h->cbp&0x30)){ uint8_t *dest[2] = {dest_cb, dest_cr}; if(transform_bypass){ idct_add = idct_dc_add = s->dsp.add_pixels4; }else{ idct_add = s->dsp.h264_idct_add; idct_dc_add = s->dsp.h264_idct_dc_add; chroma_dc_dequant_idct_c(h->mb + 16*16, h->chroma_qp[0], h->dequant4_coeff[IS_INTRA(mb_type) ? 1:4][h->chroma_qp[0]][0]); chroma_dc_dequant_idct_c(h->mb + 16*16+4*16, h->chroma_qp[1], h->dequant4_coeff[IS_INTRA(mb_type) ? 2:5][h->chroma_qp[1]][0]); } if(is_h264){ if(transform_bypass && IS_INTRA(mb_type) && h->sps.profile_idc==244 && (h->chroma_pred_mode==VERT_PRED8x8 || h->chroma_pred_mode==HOR_PRED8x8)){ h->hpc.pred8x8_add[h->chroma_pred_mode](dest[0], block_offset + 16, h->mb + 16*16, uvlinesize); h->hpc.pred8x8_add[h->chroma_pred_mode](dest[1], block_offset + 20, h->mb + 20*16, uvlinesize); }else{ for(i=16; i<16+8; i++){ if(h->non_zero_count_cache[ scan8[i] ]) idct_add (dest[(i&4)>>2] + block_offset[i], h->mb + i*16, uvlinesize); else if(h->mb[i*16]) idct_dc_add(dest[(i&4)>>2] + block_offset[i], h->mb + i*16, uvlinesize); } } }else{ for(i=16; i<16+8; i++){ if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){ uint8_t * const ptr= dest[(i&4)>>2] + block_offset[i]; svq3_add_idct_c(ptr, h->mb + i*16, uvlinesize, chroma_qp[s->qscale + 12] - 12, 2); } } } } } if(h->deblocking_filter) { backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, simple); fill_caches(h, mb_type, 1); //FIXME don't fill stuff which isn't used by filter_mb h->chroma_qp[0] = get_chroma_qp(h, 0, s->current_picture.qscale_table[mb_xy]); h->chroma_qp[1] = get_chroma_qp(h, 1, s->current_picture.qscale_table[mb_xy]); if (!simple && FRAME_MBAFF) { filter_mb (h, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize); } else { filter_mb_fast(h, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize); } } }"} {"target": 1, "idx": 5768, "func": "static void vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd, void *opaque) { const VMStateSubsection *sub = vmsd->subsections; while (sub && sub->needed) { if (sub->needed(opaque)) { const VMStateDescription *vmsd = sub->vmsd; uint8_t len; qemu_put_byte(f, QEMU_VM_SUBSECTION); len = strlen(vmsd->name); qemu_put_byte(f, len); qemu_put_buffer(f, (uint8_t *)vmsd->name, len); qemu_put_be32(f, vmsd->version_id); assert(!vmsd->subsections); vmstate_save_state(f, vmsd, opaque); } sub++; } }"} {"target": 1, "idx": 5790, "func": "static void sbr_hf_g_filt_c(int (*Y)[2], const int (*X_high)[40][2], const SoftFloat *g_filt, int m_max, intptr_t ixh) { int m; int64_t accu; for (m = 0; m < m_max; m++) { int64_t r = 1LL << (22-g_filt[m].exp); accu = (int64_t)X_high[m][ixh][0] * ((g_filt[m].mant + 0x40)>>7); Y[m][0] = (int)((accu + r) >> (23-g_filt[m].exp)); accu = (int64_t)X_high[m][ixh][1] * ((g_filt[m].mant + 0x40)>>7); Y[m][1] = (int)((accu + r) >> (23-g_filt[m].exp)); } }"} {"target": 0, "idx": 5815, "func": "static int output_frame(AVFilterLink *outlink, int need_request) { AVFilterContext *ctx = outlink->src; MixContext *s = ctx->priv; AVFrame *out_buf, *in_buf; int nb_samples, ns, ret, i; ret = calc_active_inputs(s); if (ret < 0) return ret; if (s->input_state[0] & INPUT_ON) { /* first input live: use the corresponding frame size */ nb_samples = frame_list_next_frame_size(s->frame_list); for (i = 1; i < s->nb_inputs; i++) { if (s->input_state[i] & INPUT_ON) { ns = av_audio_fifo_size(s->fifos[i]); if (ns < nb_samples) { if (!(s->input_state[i] & INPUT_EOF)) /* unclosed input with not enough samples */ return need_request ? ff_request_frame(ctx->inputs[i]) : 0; /* closed input to drain */ nb_samples = ns; } } } } else { /* first input closed: use the available samples */ nb_samples = INT_MAX; for (i = 1; i < s->nb_inputs; i++) { if (s->input_state[i] & INPUT_ON) { ns = av_audio_fifo_size(s->fifos[i]); nb_samples = FFMIN(nb_samples, ns); } } if (nb_samples == INT_MAX) return AVERROR_EOF; } s->next_pts = frame_list_next_pts(s->frame_list); frame_list_remove_samples(s->frame_list, nb_samples); calculate_scales(s, nb_samples); if (nb_samples == 0) return 0; out_buf = ff_get_audio_buffer(outlink, nb_samples); if (!out_buf) return AVERROR(ENOMEM); in_buf = ff_get_audio_buffer(outlink, nb_samples); if (!in_buf) { av_frame_free(&out_buf); return AVERROR(ENOMEM); } for (i = 0; i < s->nb_inputs; i++) { if (s->input_state[i] & INPUT_ON) { int planes, plane_size, p; av_audio_fifo_read(s->fifos[i], (void **)in_buf->extended_data, nb_samples); planes = s->planar ? s->nb_channels : 1; plane_size = nb_samples * (s->planar ? 1 : s->nb_channels); plane_size = FFALIGN(plane_size, 16); if (out_buf->format == AV_SAMPLE_FMT_FLT || out_buf->format == AV_SAMPLE_FMT_FLTP) { for (p = 0; p < planes; p++) { s->fdsp->vector_fmac_scalar((float *)out_buf->extended_data[p], (float *) in_buf->extended_data[p], s->input_scale[i], plane_size); } } else { for (p = 0; p < planes; p++) { s->fdsp->vector_dmac_scalar((double *)out_buf->extended_data[p], (double *) in_buf->extended_data[p], s->input_scale[i], plane_size); } } } } av_frame_free(&in_buf); out_buf->pts = s->next_pts; if (s->next_pts != AV_NOPTS_VALUE) s->next_pts += nb_samples; return ff_filter_frame(outlink, out_buf); }"} {"target": 0, "idx": 5816, "func": "static const char *token_get_value(QObject *obj) { return qdict_get_str(qobject_to_qdict(obj), \"token\"); }"} {"target": 0, "idx": 5825, "func": "static void channel_store_d(struct fs_dma_ctrl *ctrl, int c) { target_phys_addr_t addr = channel_reg(ctrl, c, RW_SAVED_DATA); /* Encode and store. FIXME: handle endianness. */ D(printf(\"%s ch=%d addr=\" TARGET_FMT_plx \"\\n\", __func__, c, addr)); cpu_physical_memory_write (addr, (void *) &ctrl->channels[c].current_d, sizeof ctrl->channels[c].current_d); }"} {"target": 0, "idx": 5827, "func": "static void omap_ulpd_pm_init(MemoryRegion *system_memory, target_phys_addr_t base, struct omap_mpu_state_s *mpu) { memory_region_init_io(&mpu->ulpd_pm_iomem, &omap_ulpd_pm_ops, mpu, \"omap-ulpd-pm\", 0x800); memory_region_add_subregion(system_memory, base, &mpu->ulpd_pm_iomem); omap_ulpd_pm_reset(mpu); }"} {"target": 0, "idx": 5829, "func": "static void ahci_reset_port(AHCIState *s, int port) { AHCIDevice *d = &s->dev[port]; AHCIPortRegs *pr = &d->port_regs; IDEState *ide_state = &d->port.ifs[0]; int i; DPRINTF(port, \"reset port\\n\"); ide_bus_reset(&d->port); ide_state->ncq_queues = AHCI_MAX_CMDS; pr->scr_stat = 0; pr->scr_err = 0; pr->scr_act = 0; pr->tfdata = 0x7F; pr->sig = 0xFFFFFFFF; d->busy_slot = -1; d->init_d2h_sent = false; ide_state = &s->dev[port].port.ifs[0]; if (!ide_state->bs) { return; } /* reset ncq queue */ for (i = 0; i < AHCI_MAX_CMDS; i++) { NCQTransferState *ncq_tfs = &s->dev[port].ncq_tfs[i]; if (!ncq_tfs->used) { continue; } if (ncq_tfs->aiocb) { bdrv_aio_cancel(ncq_tfs->aiocb); ncq_tfs->aiocb = NULL; } /* Maybe we just finished the request thanks to bdrv_aio_cancel() */ if (!ncq_tfs->used) { continue; } qemu_sglist_destroy(&ncq_tfs->sglist); ncq_tfs->used = 0; } s->dev[port].port_state = STATE_RUN; if (!ide_state->bs) { pr->sig = 0; ide_state->status = SEEK_STAT | WRERR_STAT; } else if (ide_state->drive_kind == IDE_CD) { pr->sig = SATA_SIGNATURE_CDROM; ide_state->lcyl = 0x14; ide_state->hcyl = 0xeb; DPRINTF(port, \"set lcyl = %d\\n\", ide_state->lcyl); ide_state->status = SEEK_STAT | WRERR_STAT | READY_STAT; } else { pr->sig = SATA_SIGNATURE_DISK; ide_state->status = SEEK_STAT | WRERR_STAT; } ide_state->error = 1; ahci_init_d2h(d); }"} {"target": 0, "idx": 5831, "func": "int usb_device_detach(USBDevice *dev) { USBBus *bus = usb_bus_from_device(dev); USBPort *port; if (!dev->attached) { error_report(\"Error: tried to detach unattached usb device %s\\n\", dev->product_desc); return -1; } dev->attached--; QTAILQ_FOREACH(port, &bus->used, next) { if (port->dev == dev) break; } assert(port != NULL); QTAILQ_REMOVE(&bus->used, port, next); bus->nused--; usb_attach(port, NULL); QTAILQ_INSERT_TAIL(&bus->free, port, next); bus->nfree++; return 0; }"} {"target": 0, "idx": 5840, "func": "make_setup_request (AVFormatContext *s, const char *host, int port, int protocol) { RTSPState *rt = s->priv_data; int j, i, err; RTSPStream *rtsp_st; AVStream *st; RTSPHeader reply1, *reply = &reply1; char cmd[2048]; /* for each stream, make the setup request */ /* XXX: we assume the same server is used for the control of each RTSP stream */ for(j = RTSP_RTP_PORT_MIN, i = 0; i < rt->nb_rtsp_streams; ++i) { char transport[2048]; rtsp_st = rt->rtsp_streams[i]; /* compute available transports */ transport[0] = '\\0'; /* RTP/UDP */ if (protocol == RTSP_PROTOCOL_RTP_UDP) { char buf[256]; /* first try in specified port range */ if (RTSP_RTP_PORT_MIN != 0) { while(j <= RTSP_RTP_PORT_MAX) { snprintf(buf, sizeof(buf), \"rtp://%s?localport=%d\", host, j); j += 2; /* we will use two port by rtp stream (rtp and rtcp) */ if (url_open(&rtsp_st->rtp_handle, buf, URL_RDWR) == 0) { goto rtp_opened; } } } /* then try on any port ** if (url_open(&rtsp_st->rtp_handle, \"rtp://\", URL_RDONLY) < 0) { ** err = AVERROR_INVALIDDATA; ** goto fail; ** } */ rtp_opened: port = rtp_get_local_port(rtsp_st->rtp_handle); if (transport[0] != '\\0') av_strlcat(transport, \",\", sizeof(transport)); snprintf(transport + strlen(transport), sizeof(transport) - strlen(transport) - 1, \"RTP/AVP/UDP;unicast;client_port=%d-%d\", port, port + 1); } /* RTP/TCP */ else if (protocol == RTSP_PROTOCOL_RTP_TCP) { if (transport[0] != '\\0') av_strlcat(transport, \",\", sizeof(transport)); snprintf(transport + strlen(transport), sizeof(transport) - strlen(transport) - 1, \"RTP/AVP/TCP\"); } else if (protocol == RTSP_PROTOCOL_RTP_UDP_MULTICAST) { if (transport[0] != '\\0') av_strlcat(transport, \",\", sizeof(transport)); snprintf(transport + strlen(transport), sizeof(transport) - strlen(transport) - 1, \"RTP/AVP/UDP;multicast\"); } snprintf(cmd, sizeof(cmd), \"SETUP %s RTSP/1.0\\r\\n\" \"Transport: %s\\r\\n\", rtsp_st->control_url, transport); rtsp_send_cmd(s, cmd, reply, NULL); if (reply->status_code == 461 /* Unsupported protocol */ && i == 0) { err = 1; goto fail; } else if (reply->status_code != RTSP_STATUS_OK || reply->nb_transports != 1) { err = AVERROR_INVALIDDATA; goto fail; } /* XXX: same protocol for all streams is required */ if (i > 0) { if (reply->transports[0].protocol != rt->protocol) { err = AVERROR_INVALIDDATA; goto fail; } } else { rt->protocol = reply->transports[0].protocol; } /* close RTP connection if not choosen */ if (reply->transports[0].protocol != RTSP_PROTOCOL_RTP_UDP && (protocol == RTSP_PROTOCOL_RTP_UDP)) { url_close(rtsp_st->rtp_handle); rtsp_st->rtp_handle = NULL; } switch(reply->transports[0].protocol) { case RTSP_PROTOCOL_RTP_TCP: rtsp_st->interleaved_min = reply->transports[0].interleaved_min; rtsp_st->interleaved_max = reply->transports[0].interleaved_max; break; case RTSP_PROTOCOL_RTP_UDP: { char url[1024]; /* XXX: also use address if specified */ snprintf(url, sizeof(url), \"rtp://%s:%d\", host, reply->transports[0].server_port_min); if (rtp_set_remote_url(rtsp_st->rtp_handle, url) < 0) { err = AVERROR_INVALIDDATA; goto fail; } } break; case RTSP_PROTOCOL_RTP_UDP_MULTICAST: { char url[1024]; struct in_addr in; in.s_addr = htonl(reply->transports[0].destination); snprintf(url, sizeof(url), \"rtp://%s:%d?ttl=%d\", inet_ntoa(in), reply->transports[0].port_min, reply->transports[0].ttl); if (url_open(&rtsp_st->rtp_handle, url, URL_RDWR) < 0) { err = AVERROR_INVALIDDATA; goto fail; } } break; } /* open the RTP context */ st = NULL; if (rtsp_st->stream_index >= 0) st = s->streams[rtsp_st->stream_index]; if (!st) s->ctx_flags |= AVFMTCTX_NOHEADER; rtsp_st->rtp_ctx = rtp_parse_open(s, st, rtsp_st->rtp_handle, rtsp_st->sdp_payload_type, &rtsp_st->rtp_payload_data); if (!rtsp_st->rtp_ctx) { err = AVERROR(ENOMEM); goto fail; } else { if(rtsp_st->dynamic_handler) { rtsp_st->rtp_ctx->dynamic_protocol_context= rtsp_st->dynamic_protocol_context; rtsp_st->rtp_ctx->parse_packet= rtsp_st->dynamic_handler->parse_packet; } } } return 0; fail: for (i=0; inb_rtsp_streams; i++) { if (rt->rtsp_streams[i]->rtp_handle) { url_close(rt->rtsp_streams[i]->rtp_handle); rt->rtsp_streams[i]->rtp_handle = NULL; } } return err; }"} {"target": 1, "idx": 5847, "func": "static int add_metadata(int count, int type, const char *name, const char *sep, TiffContext *s) { switch(type) { case TIFF_DOUBLE: return add_doubles_metadata(count, name, sep, s); case TIFF_SHORT : return add_shorts_metadata(count, name, sep, s); case TIFF_STRING: return add_string_metadata(count, name, s); default : return AVERROR_INVALIDDATA; }; }"} {"target": 1, "idx": 5852, "func": "e1000_receive(NetClientState *nc, const uint8_t *buf, size_t size) { E1000State *s = qemu_get_nic_opaque(nc); struct e1000_rx_desc desc; dma_addr_t base; unsigned int n, rdt; uint32_t rdh_start; uint16_t vlan_special = 0; uint8_t vlan_status = 0, vlan_offset = 0; uint8_t min_buf[MIN_BUF_SIZE]; size_t desc_offset; size_t desc_size; size_t total_size; if (!(s->mac_reg[RCTL] & E1000_RCTL_EN)) return -1; /* Pad to minimum Ethernet frame length */ if (size < sizeof(min_buf)) { memcpy(min_buf, buf, size); memset(&min_buf[size], 0, sizeof(min_buf) - size); buf = min_buf; size = sizeof(min_buf); } /* Discard oversized packets if !LPE and !SBP. */ if ((size > MAXIMUM_ETHERNET_LPE_SIZE || (size > MAXIMUM_ETHERNET_VLAN_SIZE && !(s->mac_reg[RCTL] & E1000_RCTL_LPE))) && !(s->mac_reg[RCTL] & E1000_RCTL_SBP)) { return size; } if (!receive_filter(s, buf, size)) return size; if (vlan_enabled(s) && is_vlan_packet(s, buf)) { vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(buf + 14))); memmove((uint8_t *)buf + 4, buf, 12); vlan_status = E1000_RXD_STAT_VP; vlan_offset = 4; size -= 4; } rdh_start = s->mac_reg[RDH]; desc_offset = 0; total_size = size + fcs_len(s); if (!e1000_has_rxbufs(s, total_size)) { set_ics(s, 0, E1000_ICS_RXO); return -1; } do { desc_size = total_size - desc_offset; if (desc_size > s->rxbuf_size) { desc_size = s->rxbuf_size; } base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH]; pci_dma_read(&s->dev, base, &desc, sizeof(desc)); desc.special = vlan_special; desc.status |= (vlan_status | E1000_RXD_STAT_DD); if (desc.buffer_addr) { if (desc_offset < size) { size_t copy_size = size - desc_offset; if (copy_size > s->rxbuf_size) { copy_size = s->rxbuf_size; } pci_dma_write(&s->dev, le64_to_cpu(desc.buffer_addr), buf + desc_offset + vlan_offset, copy_size); } desc_offset += desc_size; desc.length = cpu_to_le16(desc_size); if (desc_offset >= total_size) { desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM; } else { /* Guest zeroing out status is not a hardware requirement. Clear EOP in case guest didn't do it. */ desc.status &= ~E1000_RXD_STAT_EOP; } } else { // as per intel docs; skip descriptors with null buf addr DBGOUT(RX, \"Null RX descriptor!!\\n\"); } pci_dma_write(&s->dev, base, &desc, sizeof(desc)); if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN]) s->mac_reg[RDH] = 0; /* see comment in start_xmit; same here */ if (s->mac_reg[RDH] == rdh_start) { DBGOUT(RXERR, \"RDH wraparound @%x, RDT %x, RDLEN %x\\n\", rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]); set_ics(s, 0, E1000_ICS_RXO); return -1; } } while (desc_offset < total_size); s->mac_reg[GPRC]++; s->mac_reg[TPR]++; /* TOR - Total Octets Received: * This register includes bytes received in a packet from the field through the field, inclusively. */ n = s->mac_reg[TORL] + size + /* Always include FCS length. */ 4; if (n < s->mac_reg[TORL]) s->mac_reg[TORH]++; s->mac_reg[TORL] = n; n = E1000_ICS_RXT0; if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH]) rdt += s->mac_reg[RDLEN] / sizeof(desc); if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >> s->rxbuf_min_shift) n |= E1000_ICS_RXDMT0; set_ics(s, 0, n); return size; }"} {"target": 0, "idx": 5860, "func": "real_parse_asm_rule(AVStream *st, const char *p, const char *end) { do { /* can be either averagebandwidth= or AverageBandwidth= */ #if AV_HAVE_INCOMPATIBLE_LIBAV_ABI if (sscanf(p, \" %*1[Aa]verage%*1[Bb]andwidth=%d\", &st->codec->bit_rate) == 1) #else if (sscanf(p, \" %*1[Aa]verage%*1[Bb]andwidth=%\"SCNd64, &st->codec->bit_rate) == 1) #endif break; if (!(p = strchr(p, ',')) || p > end) p = end; p++; } while (p < end); }"} {"target": 1, "idx": 5867, "func": "int select_watchdog(const char *p) { WatchdogTimerModel *model; if (watchdog) { fprintf(stderr, \"qemu: only one watchdog option may be given\\n\"); return 1; } /* -watchdog ? lists available devices and exits cleanly. */ if (strcmp(p, \"?\") == 0) { LIST_FOREACH(model, &watchdog_list, entry) { fprintf(stderr, \"\\t%s\\t%s\\n\", model->wdt_name, model->wdt_description); } return 2; } LIST_FOREACH(model, &watchdog_list, entry) { if (strcasecmp(model->wdt_name, p) == 0) { watchdog = model; return 0; } } fprintf(stderr, \"Unknown -watchdog device. Supported devices are:\\n\"); LIST_FOREACH(model, &watchdog_list, entry) { fprintf(stderr, \"\\t%s\\t%s\\n\", model->wdt_name, model->wdt_description); } return 1; }"} {"target": 1, "idx": 5869, "func": "static int vp3_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; Vp3DecodeContext *s = avctx->priv_data; GetBitContext gb; static int counter = 0; int i; init_get_bits(&gb, buf, buf_size * 8); if (s->theora && get_bits1(&gb)) { av_log(avctx, AV_LOG_ERROR, \"Header packet passed to frame decoder, skipping\\n\"); return -1; } s->keyframe = !get_bits1(&gb); if (!s->theora) skip_bits(&gb, 1); for (i = 0; i < 3; i++) s->last_qps[i] = s->qps[i]; s->nqps=0; do{ s->qps[s->nqps++]= get_bits(&gb, 6); } while(s->theora >= 0x030200 && s->nqps<3 && get_bits1(&gb)); for (i = s->nqps; i < 3; i++) s->qps[i] = -1; if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_INFO, \" VP3 %sframe #%d: Q index = %d\\n\", s->keyframe?\"key\":\"\", counter, s->qps[0]); counter++; if (s->qps[0] != s->last_qps[0]) init_loop_filter(s); for (i = 0; i < s->nqps; i++) // reinit all dequantizers if the first one changed, because // the DC of the first quantizer must be used for all matrices if (s->qps[i] != s->last_qps[i] || s->qps[0] != s->last_qps[0]) init_dequantizer(s, i); if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe) return buf_size; s->current_frame.reference = 3; if (avctx->get_buffer(avctx, &s->current_frame) < 0) { av_log(s->avctx, AV_LOG_ERROR, \"get_buffer() failed\\n\"); return -1; } if (s->keyframe) { if (!s->theora) { skip_bits(&gb, 4); /* width code */ skip_bits(&gb, 4); /* height code */ if (s->version) { s->version = get_bits(&gb, 5); if (counter == 1) av_log(s->avctx, AV_LOG_DEBUG, \"VP version: %d\\n\", s->version); } } if (s->version || s->theora) { if (get_bits1(&gb)) av_log(s->avctx, AV_LOG_ERROR, \"Warning, unsupported keyframe coding type?!\\n\"); skip_bits(&gb, 2); /* reserved? */ } } else { if (!s->golden_frame.data[0]) { av_log(s->avctx, AV_LOG_ERROR, \"vp3: first frame not a keyframe\\n\"); avctx->release_buffer(avctx, &s->current_frame); return -1; } } s->current_frame.qscale_table= s->qscale_table; //FIXME allocate individual tables per AVFrame s->current_frame.qstride= 0; init_frame(s, &gb); if (unpack_superblocks(s, &gb)){ av_log(s->avctx, AV_LOG_ERROR, \"error in unpack_superblocks\\n\"); return -1; } if (unpack_modes(s, &gb)){ av_log(s->avctx, AV_LOG_ERROR, \"error in unpack_modes\\n\"); return -1; } if (unpack_vectors(s, &gb)){ av_log(s->avctx, AV_LOG_ERROR, \"error in unpack_vectors\\n\"); return -1; } if (unpack_block_qpis(s, &gb)){ av_log(s->avctx, AV_LOG_ERROR, \"error in unpack_block_qpis\\n\"); return -1; } if (unpack_dct_coeffs(s, &gb)){ av_log(s->avctx, AV_LOG_ERROR, \"error in unpack_dct_coeffs\\n\"); return -1; } for (i = 0; i < 3; i++) { if (s->flipped_image) s->data_offset[i] = 0; else s->data_offset[i] = ((s->height>>!!i)-1) * s->current_frame.linesize[i]; } s->last_slice_end = 0; for (i = 0; i < s->c_superblock_height; i++) render_slice(s, i); // filter the last row for (i = 0; i < 3; i++) { int row = (s->height >> (3+!!i)) - 1; apply_loop_filter(s, i, row, row+1); } vp3_draw_horiz_band(s, s->height); *data_size=sizeof(AVFrame); *(AVFrame*)data= s->current_frame; /* release the last frame, if it is allocated and if it is not the * golden frame */ if ((s->last_frame.data[0]) && (s->last_frame.data[0] != s->golden_frame.data[0])) avctx->release_buffer(avctx, &s->last_frame); /* shuffle frames (last = current) */ s->last_frame= s->current_frame; if (s->keyframe) { if (s->golden_frame.data[0]) avctx->release_buffer(avctx, &s->golden_frame); s->golden_frame = s->current_frame; } s->current_frame.data[0]= NULL; /* ensure that we catch any access to this released frame */ return buf_size; }"} {"target": 0, "idx": 5874, "func": "static void mainstone_common_init(MemoryRegion *address_space_mem, ram_addr_t ram_size, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model, enum mainstone_model_e model, int arm_id) { uint32_t sector_len = 256 * 1024; target_phys_addr_t mainstone_flash_base[] = { MST_FLASH_0, MST_FLASH_1 }; PXA2xxState *mpu; DeviceState *mst_irq; DriveInfo *dinfo; int i; int be; MemoryRegion *rom = g_new(MemoryRegion, 1); if (!cpu_model) cpu_model = \"pxa270-c5\"; /* Setup CPU & memory */ mpu = pxa270_init(address_space_mem, mainstone_binfo.ram_size, cpu_model); memory_region_init_ram(rom, \"mainstone.rom\", MAINSTONE_ROM); vmstate_register_ram_global(rom); memory_region_set_readonly(rom, true); memory_region_add_subregion(address_space_mem, 0, rom); #ifdef TARGET_WORDS_BIGENDIAN be = 1; #else be = 0; #endif /* There are two 32MiB flash devices on the board */ for (i = 0; i < 2; i ++) { dinfo = drive_get(IF_PFLASH, 0, i); if (!dinfo) { fprintf(stderr, \"Two flash images must be given with the \" \"'pflash' parameter\\n\"); exit(1); } if (!pflash_cfi01_register(mainstone_flash_base[i], NULL, i ? \"mainstone.flash1\" : \"mainstone.flash0\", MAINSTONE_FLASH, dinfo->bdrv, sector_len, MAINSTONE_FLASH / sector_len, 4, 0, 0, 0, 0, be)) { fprintf(stderr, \"qemu: Error registering flash memory.\\n\"); exit(1); } } mst_irq = sysbus_create_simple(\"mainstone-fpga\", MST_FPGA_PHYS, qdev_get_gpio_in(mpu->gpio, 0)); /* setup keypad */ printf(\"map addr %p\\n\", &map); pxa27x_register_keypad(mpu->kp, map, 0xe0); /* MMC/SD host */ pxa2xx_mmci_handlers(mpu->mmc, NULL, qdev_get_gpio_in(mst_irq, MMC_IRQ)); pxa2xx_pcmcia_set_irq_cb(mpu->pcmcia[0], qdev_get_gpio_in(mst_irq, S0_IRQ), qdev_get_gpio_in(mst_irq, S0_CD_IRQ)); pxa2xx_pcmcia_set_irq_cb(mpu->pcmcia[1], qdev_get_gpio_in(mst_irq, S1_IRQ), qdev_get_gpio_in(mst_irq, S1_CD_IRQ)); smc91c111_init(&nd_table[0], MST_ETH_PHYS, qdev_get_gpio_in(mst_irq, ETHERNET_IRQ)); mainstone_binfo.kernel_filename = kernel_filename; mainstone_binfo.kernel_cmdline = kernel_cmdline; mainstone_binfo.initrd_filename = initrd_filename; mainstone_binfo.board_id = arm_id; arm_load_kernel(mpu->cpu, &mainstone_binfo); }"} {"target": 0, "idx": 5880, "func": "static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque) { /* Do nothing for now. */ }"} {"target": 0, "idx": 5887, "func": "static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size) { unsigned desc[2]; target_phys_addr_t packet_desc_addr, last_desc_addr; GemState *s; unsigned rxbufsize, bytes_to_copy; unsigned rxbuf_offset; uint8_t rxbuf[2048]; uint8_t *rxbuf_ptr; s = DO_UPCAST(NICState, nc, nc)->opaque; /* Do nothing if receive is not enabled. */ if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_RXENA)) { return -1; } /* Is this destination MAC address \"for us\" ? */ if (gem_mac_address_filter(s, buf) == GEM_RX_REJECT) { return -1; } /* Discard packets with receive length error enabled ? */ if (s->regs[GEM_NWCFG] & GEM_NWCFG_LERR_DISC) { unsigned type_len; /* Fish the ethertype / length field out of the RX packet */ type_len = buf[12] << 8 | buf[13]; /* It is a length field, not an ethertype */ if (type_len < 0x600) { if (size < type_len) { /* discard */ return -1; } } } /* * Determine configured receive buffer offset (probably 0) */ rxbuf_offset = (s->regs[GEM_NWCFG] & GEM_NWCFG_BUFF_OFST_M) >> GEM_NWCFG_BUFF_OFST_S; /* The configure size of each receive buffer. Determines how many * buffers needed to hold this packet. */ rxbufsize = ((s->regs[GEM_DMACFG] & GEM_DMACFG_RBUFSZ_M) >> GEM_DMACFG_RBUFSZ_S) * GEM_DMACFG_RBUFSZ_MUL; bytes_to_copy = size; /* Strip of FCS field ? (usually yes) */ if (s->regs[GEM_NWCFG] & GEM_NWCFG_STRIP_FCS) { rxbuf_ptr = (void *)buf; } else { unsigned crc_val; int crc_offset; /* The application wants the FCS field, which QEMU does not provide. * We must try and caclculate one. */ memcpy(rxbuf, buf, size); memset(rxbuf + size, 0, sizeof(rxbuf) - size); rxbuf_ptr = rxbuf; crc_val = cpu_to_le32(crc32(0, rxbuf, MAX(size, 60))); if (size < 60) { crc_offset = 60; } else { crc_offset = size; } memcpy(rxbuf + crc_offset, &crc_val, sizeof(crc_val)); bytes_to_copy += 4; size += 4; } /* Pad to minimum length */ if (size < 64) { size = 64; } DB_PRINT(\"config bufsize: %d packet size: %ld\\n\", rxbufsize, size); packet_desc_addr = s->rx_desc_addr; while (1) { DB_PRINT(\"read descriptor 0x%x\\n\", packet_desc_addr); /* read current descriptor */ cpu_physical_memory_read(packet_desc_addr, (uint8_t *)&desc[0], sizeof(desc)); /* Descriptor owned by software ? */ if (rx_desc_get_ownership(desc) == 1) { DB_PRINT(\"descriptor 0x%x owned by sw.\\n\", packet_desc_addr); s->regs[GEM_RXSTATUS] |= GEM_RXSTATUS_NOBUF; /* Handle interrupt consequences */ gem_update_int_status(s); return -1; } DB_PRINT(\"copy %d bytes to 0x%x\\n\", MIN(bytes_to_copy, rxbufsize), rx_desc_get_buffer(desc)); /* * Let's have QEMU lend a helping hand. */ if (rx_desc_get_buffer(desc) == 0) { DB_PRINT(\"Invalid RX buffer (NULL) for descriptor 0x%x\\n\", packet_desc_addr); break; } /* Copy packet data to emulated DMA buffer */ cpu_physical_memory_write(rx_desc_get_buffer(desc) + rxbuf_offset, rxbuf_ptr, MIN(bytes_to_copy, rxbufsize)); bytes_to_copy -= MIN(bytes_to_copy, rxbufsize); rxbuf_ptr += MIN(bytes_to_copy, rxbufsize); if (bytes_to_copy == 0) { break; } /* Next descriptor */ if (rx_desc_get_wrap(desc)) { packet_desc_addr = s->regs[GEM_RXQBASE]; } else { packet_desc_addr += 8; } } DB_PRINT(\"set length: %ld, EOF on descriptor 0x%x\\n\", size, (unsigned)packet_desc_addr); /* Update last descriptor with EOF and total length */ rx_desc_set_eof(desc); rx_desc_set_length(desc, size); cpu_physical_memory_write(packet_desc_addr, (uint8_t *)&desc[0], sizeof(desc)); /* Advance RX packet descriptor Q */ last_desc_addr = packet_desc_addr; packet_desc_addr = s->rx_desc_addr; s->rx_desc_addr = last_desc_addr; if (rx_desc_get_wrap(desc)) { s->rx_desc_addr = s->regs[GEM_RXQBASE]; } else { s->rx_desc_addr += 8; } DB_PRINT(\"set SOF, OWN on descriptor 0x%08x\\n\", packet_desc_addr); /* Count it */ gem_receive_updatestats(s, buf, size); /* Update first descriptor (which could also be the last) */ /* read descriptor */ cpu_physical_memory_read(packet_desc_addr, (uint8_t *)&desc[0], sizeof(desc)); rx_desc_set_sof(desc); rx_desc_set_ownership(desc); cpu_physical_memory_write(packet_desc_addr, (uint8_t *)&desc[0], sizeof(desc)); s->regs[GEM_RXSTATUS] |= GEM_RXSTATUS_FRMRCVD; /* Handle interrupt consequences */ gem_update_int_status(s); return size; }"} {"target": 1, "idx": 5910, "func": "static int dxva2_vc1_end_frame(AVCodecContext *avctx) { VC1Context *v = avctx->priv_data; struct dxva2_picture_context *ctx_pic = v->s.current_picture_ptr->hwaccel_picture_private; int ret; if (ctx_pic->bitstream_size <= 0) return -1; ret = ff_dxva2_common_end_frame(avctx, &v->s.current_picture_ptr->f, &ctx_pic->pp, sizeof(ctx_pic->pp), NULL, 0, commit_bitstream_and_slice_buffer); if (!ret) ff_mpeg_draw_horiz_band(&v->s, 0, avctx->height); return ret; }"} {"target": 1, "idx": 5913, "func": "static char *vnc_socket_remote_addr(const char *format, int fd) { struct sockaddr_storage sa; socklen_t salen; salen = sizeof(sa); if (getpeername(fd, (struct sockaddr*)&sa, &salen) < 0) return NULL; return addr_to_string(format, &sa, salen); }"} {"target": 1, "idx": 5915, "func": "static int qemu_gluster_open(BlockDriverState *bs, QDict *options, int bdrv_flags, Error **errp) { BDRVGlusterState *s = bs->opaque; int open_flags = 0; int ret = 0; BlockdevOptionsGluster *gconf = NULL; QemuOpts *opts; Error *local_err = NULL; const char *filename; opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); qemu_opts_absorb_qdict(opts, options, &local_err); if (local_err) { error_propagate(errp, local_err); ret = -EINVAL; goto out; } filename = qemu_opt_get(opts, GLUSTER_OPT_FILENAME); s->debug_level = qemu_opt_get_number(opts, GLUSTER_OPT_DEBUG, GLUSTER_DEBUG_DEFAULT); if (s->debug_level < 0) { s->debug_level = 0; } else if (s->debug_level > GLUSTER_DEBUG_MAX) { s->debug_level = GLUSTER_DEBUG_MAX; } gconf = g_new0(BlockdevOptionsGluster, 1); gconf->debug_level = s->debug_level; gconf->has_debug_level = true; s->glfs = qemu_gluster_init(gconf, filename, options, errp); if (!s->glfs) { ret = -errno; goto out; } #ifdef CONFIG_GLUSTERFS_XLATOR_OPT /* Without this, if fsync fails for a recoverable reason (for instance, * ENOSPC), gluster will dump its cache, preventing retries. This means * almost certain data loss. Not all gluster versions support the * 'resync-failed-syncs-after-fsync' key value, but there is no way to * discover during runtime if it is supported (this api returns success for * unknown key/value pairs) */ ret = glfs_set_xlator_option(s->glfs, \"*-write-behind\", \"resync-failed-syncs-after-fsync\", \"on\"); if (ret < 0) { error_setg_errno(errp, errno, \"Unable to set xlator key/value pair\"); ret = -errno; goto out; } #endif qemu_gluster_parse_flags(bdrv_flags, &open_flags); s->fd = glfs_open(s->glfs, gconf->path, open_flags); if (!s->fd) { ret = -errno; } s->supports_seek_data = qemu_gluster_test_seek(s->fd); out: qemu_opts_del(opts); qapi_free_BlockdevOptionsGluster(gconf); if (!ret) { return ret; } if (s->fd) { glfs_close(s->fd); } if (s->glfs) { glfs_fini(s->glfs); } return ret; }"} {"target": 0, "idx": 5923, "func": "static int kmvc_decode_intra_8x8(KmvcContext * ctx, int w, int h) { BitBuf bb; int res, val; int i, j; int bx, by; int l0x, l1x, l0y, l1y; int mx, my; kmvc_init_getbits(bb, &ctx->g); for (by = 0; by < h; by += 8) for (bx = 0; bx < w; bx += 8) { if (!bytestream2_get_bytes_left(&ctx->g)) { av_log(ctx->avctx, AV_LOG_ERROR, \"Data overrun\\n\"); return AVERROR_INVALIDDATA; } kmvc_getbit(bb, &ctx->g, res); if (!res) { // fill whole 8x8 block val = bytestream2_get_byte(&ctx->g); for (i = 0; i < 64; i++) BLK(ctx->cur, bx + (i & 0x7), by + (i >> 3)) = val; } else { // handle four 4x4 subblocks for (i = 0; i < 4; i++) { l0x = bx + (i & 1) * 4; l0y = by + (i & 2) * 2; kmvc_getbit(bb, &ctx->g, res); if (!res) { kmvc_getbit(bb, &ctx->g, res); if (!res) { // fill whole 4x4 block val = bytestream2_get_byte(&ctx->g); for (j = 0; j < 16; j++) BLK(ctx->cur, l0x + (j & 3), l0y + (j >> 2)) = val; } else { // copy block from already decoded place val = bytestream2_get_byte(&ctx->g); mx = val & 0xF; my = val >> 4; if ((l0x-mx) + 320*(l0y-my) < 0 || (l0x-mx) + 320*(l0y-my) > 316*196) { av_log(ctx->avctx, AV_LOG_ERROR, \"Invalid MV\\n\"); return AVERROR_INVALIDDATA; } for (j = 0; j < 16; j++) BLK(ctx->cur, l0x + (j & 3), l0y + (j >> 2)) = BLK(ctx->cur, l0x + (j & 3) - mx, l0y + (j >> 2) - my); } } else { // descend to 2x2 sub-sub-blocks for (j = 0; j < 4; j++) { l1x = l0x + (j & 1) * 2; l1y = l0y + (j & 2); kmvc_getbit(bb, &ctx->g, res); if (!res) { kmvc_getbit(bb, &ctx->g, res); if (!res) { // fill whole 2x2 block val = bytestream2_get_byte(&ctx->g); BLK(ctx->cur, l1x, l1y) = val; BLK(ctx->cur, l1x + 1, l1y) = val; BLK(ctx->cur, l1x, l1y + 1) = val; BLK(ctx->cur, l1x + 1, l1y + 1) = val; } else { // copy block from already decoded place val = bytestream2_get_byte(&ctx->g); mx = val & 0xF; my = val >> 4; if ((l1x-mx) + 320*(l1y-my) < 0 || (l1x-mx) + 320*(l1y-my) > 318*198) { av_log(ctx->avctx, AV_LOG_ERROR, \"Invalid MV\\n\"); return AVERROR_INVALIDDATA; } BLK(ctx->cur, l1x, l1y) = BLK(ctx->cur, l1x - mx, l1y - my); BLK(ctx->cur, l1x + 1, l1y) = BLK(ctx->cur, l1x + 1 - mx, l1y - my); BLK(ctx->cur, l1x, l1y + 1) = BLK(ctx->cur, l1x - mx, l1y + 1 - my); BLK(ctx->cur, l1x + 1, l1y + 1) = BLK(ctx->cur, l1x + 1 - mx, l1y + 1 - my); } } else { // read values for block BLK(ctx->cur, l1x, l1y) = bytestream2_get_byte(&ctx->g); BLK(ctx->cur, l1x + 1, l1y) = bytestream2_get_byte(&ctx->g); BLK(ctx->cur, l1x, l1y + 1) = bytestream2_get_byte(&ctx->g); BLK(ctx->cur, l1x + 1, l1y + 1) = bytestream2_get_byte(&ctx->g); } } } } } } return 0; }"} {"target": 1, "idx": 5928, "func": "static uint32_t pci_unin_main_config_readl (void *opaque, target_phys_addr_t addr) { UNINState *s = opaque; uint32_t val; val = s->config_reg; #ifdef TARGET_WORDS_BIGENDIAN val = bswap32(val); #endif UNIN_DPRINTF(\"config_readl addr \" TARGET_FMT_plx \" val %x\\n\", addr, val); return val; }"} {"target": 1, "idx": 5934, "func": "static int ram_save_init_globals(void) { int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */ dirty_rate_high_cnt = 0; bitmap_sync_count = 0; migration_bitmap_sync_init(); qemu_mutex_init(&migration_bitmap_mutex); if (migrate_use_xbzrle()) { XBZRLE_cache_lock(); ZERO_TARGET_PAGE = g_malloc0(TARGET_PAGE_SIZE); XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() / TARGET_PAGE_SIZE, TARGET_PAGE_SIZE); if (!XBZRLE.cache) { XBZRLE_cache_unlock(); error_report(\"Error creating cache\"); return -1; } XBZRLE_cache_unlock(); /* We prefer not to abort if there is no memory */ XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE); if (!XBZRLE.encoded_buf) { error_report(\"Error allocating encoded_buf\"); return -1; } XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE); if (!XBZRLE.current_buf) { error_report(\"Error allocating current_buf\"); g_free(XBZRLE.encoded_buf); XBZRLE.encoded_buf = NULL; return -1; } acct_clear(); } /* For memory_global_dirty_log_start below. */ qemu_mutex_lock_iothread(); qemu_mutex_lock_ramlist(); rcu_read_lock(); bytes_transferred = 0; reset_ram_globals(); ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS; migration_bitmap_rcu = g_new0(struct BitmapRcu, 1); migration_bitmap_rcu->bmap = bitmap_new(ram_bitmap_pages); bitmap_set(migration_bitmap_rcu->bmap, 0, ram_bitmap_pages); if (migrate_postcopy_ram()) { migration_bitmap_rcu->unsentmap = bitmap_new(ram_bitmap_pages); bitmap_set(migration_bitmap_rcu->unsentmap, 0, ram_bitmap_pages); } /* * Count the total number of pages used by ram blocks not including any * gaps due to alignment or unplugs. */ migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS; memory_global_dirty_log_start(); migration_bitmap_sync(); qemu_mutex_unlock_ramlist(); qemu_mutex_unlock_iothread(); rcu_read_unlock(); return 0; }"} {"target": 1, "idx": 5937, "func": "static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len, uint64_t prp1, uint64_t prp2) { QEMUSGList qsg; QEMUIOVector iov; uint16_t status = NVME_SUCCESS; if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) { return NVME_INVALID_FIELD | NVME_DNR; } if (qsg.nsg > 0) { if (dma_buf_read(ptr, len, &qsg)) { status = NVME_INVALID_FIELD | NVME_DNR; } qemu_sglist_destroy(&qsg); } else { if (qemu_iovec_to_buf(&iov, 0, ptr, len) != len) { status = NVME_INVALID_FIELD | NVME_DNR; } qemu_iovec_destroy(&iov); } return status; }"} {"target": 1, "idx": 5948, "func": "static int asf_read_metadata_obj(AVFormatContext *s, const GUIDParseTable *g) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; uint64_t size = avio_rl64(pb); uint16_t nb_recs = avio_rl16(pb); // number of records in the Description Records list int i, ret; for (i = 0; i < nb_recs; i++) { uint16_t name_len, buflen, type, val_len, st_num; uint8_t *name = NULL; avio_skip(pb, 2); // skip reserved field st_num = avio_rl16(pb); name_len = avio_rl16(pb); buflen = 2 * name_len + 1; if (!name_len) break; type = avio_rl16(pb); val_len = avio_rl32(pb); name = av_malloc(name_len); if (!name) return AVERROR(ENOMEM); avio_get_str16le(pb, name_len, name, buflen); if (!strcmp(name, \"AspectRatioX\") || !strcmp(name, \"AspectRatioY\")) { asf_store_aspect_ratio(s, st_num, name); } else { if (st_num < ASF_MAX_STREAMS) { if ((ret = process_metadata(s, name, name_len, val_len, type, &asf->asf_sd[st_num].asf_met)) < 0) break; } else av_freep(&name); } } align_position(pb, asf->offset, size); return 0; }"} {"target": 1, "idx": 5967, "func": "static int check_refcounts_l1(BlockDriverState *bs, BdrvCheckResult *res, uint16_t *refcount_table, int64_t refcount_table_size, int64_t l1_table_offset, int l1_size, int flags) { BDRVQcowState *s = bs->opaque; uint64_t *l1_table, l2_offset, l1_size2; int i, ret; l1_size2 = l1_size * sizeof(uint64_t); /* Mark L1 table as used */ inc_refcounts(bs, res, refcount_table, refcount_table_size, l1_table_offset, l1_size2); /* Read L1 table entries from disk */ if (l1_size2 == 0) { l1_table = NULL; } else { l1_table = g_try_malloc(l1_size2); if (l1_table == NULL) { ret = -ENOMEM; goto fail; } if (bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2) != l1_size2) goto fail; for(i = 0;i < l1_size; i++) be64_to_cpus(&l1_table[i]); } /* Do the actual checks */ for(i = 0; i < l1_size; i++) { l2_offset = l1_table[i]; if (l2_offset) { /* Mark L2 table as used */ l2_offset &= L1E_OFFSET_MASK; inc_refcounts(bs, res, refcount_table, refcount_table_size, l2_offset, s->cluster_size); /* L2 tables are cluster aligned */ if (offset_into_cluster(s, l2_offset)) { fprintf(stderr, \"ERROR l2_offset=%\" PRIx64 \": Table is not \" \"cluster aligned; L1 entry corrupted\\n\", l2_offset); res->corruptions++; } /* Process and check L2 entries */ ret = check_refcounts_l2(bs, res, refcount_table, refcount_table_size, l2_offset, flags); if (ret < 0) { goto fail; } } } g_free(l1_table); return 0; fail: fprintf(stderr, \"ERROR: I/O error in check_refcounts_l1\\n\"); res->check_errors++; g_free(l1_table); return -EIO; }"} {"target": 1, "idx": 5970, "func": "DeviceState *i2c_create_slave(i2c_bus *bus, const char *name, uint8_t addr) { DeviceState *dev; dev = qdev_create(&bus->qbus, name); qdev_prop_set_uint8(dev, \"address\", addr); qdev_init(dev); return dev; }"} {"target": 1, "idx": 5983, "func": "static void strongarm_gpio_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { StrongARMGPIOInfo *s = opaque; switch (offset) { case GPDR: /* GPIO Pin-Direction registers */ s->dir = value; strongarm_gpio_handler_update(s); break; case GPSR: /* GPIO Pin-Output Set registers */ s->olevel |= value; strongarm_gpio_handler_update(s); s->gpsr = value; break; case GPCR: /* GPIO Pin-Output Clear registers */ s->olevel &= ~value; strongarm_gpio_handler_update(s); break; case GRER: /* GPIO Rising-Edge Detect Enable registers */ s->rising = value; break; case GFER: /* GPIO Falling-Edge Detect Enable registers */ s->falling = value; break; case GAFR: /* GPIO Alternate Function registers */ s->gafr = value; break; case GEDR: /* GPIO Edge Detect Status registers */ s->status &= ~value; strongarm_gpio_irq_update(s); break; default: printf(\"%s: Bad offset 0x\" TARGET_FMT_plx \"\\n\", __func__, offset); } }"} {"target": 1, "idx": 5995, "func": "static void gen_bitops (DisasContext *ctx, uint32_t opc, int rt, int rs, int lsb, int msb) { TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); gen_load_gpr(t1, rs); switch (opc) { case OPC_EXT: if (lsb + msb > 31) goto fail; tcg_gen_shri_tl(t0, t1, lsb); if (msb != 31) { tcg_gen_andi_tl(t0, t0, (1 << (msb + 1)) - 1); } else { tcg_gen_ext32s_tl(t0, t0); } break; #if defined(TARGET_MIPS64) case OPC_DEXTM: tcg_gen_shri_tl(t0, t1, lsb); if (msb != 31) { tcg_gen_andi_tl(t0, t0, (1ULL << (msb + 1 + 32)) - 1); } break; case OPC_DEXTU: tcg_gen_shri_tl(t0, t1, lsb + 32); tcg_gen_andi_tl(t0, t0, (1ULL << (msb + 1)) - 1); break; case OPC_DEXT: tcg_gen_shri_tl(t0, t1, lsb); tcg_gen_andi_tl(t0, t0, (1ULL << (msb + 1)) - 1); break; #endif case OPC_INS: if (lsb > msb) goto fail; gen_load_gpr(t0, rt); tcg_gen_deposit_tl(t0, t0, t1, lsb, msb - lsb + 1); tcg_gen_ext32s_tl(t0, t0); break; #if defined(TARGET_MIPS64) case OPC_DINSM: gen_load_gpr(t0, rt); tcg_gen_deposit_tl(t0, t0, t1, lsb, msb + 32 - lsb + 1); break; case OPC_DINSU: gen_load_gpr(t0, rt); tcg_gen_deposit_tl(t0, t0, t1, lsb + 32, msb - lsb + 1); break; case OPC_DINS: gen_load_gpr(t0, rt); tcg_gen_deposit_tl(t0, t0, t1, lsb, msb - lsb + 1); break; #endif default: fail: MIPS_INVAL(\"bitops\"); generate_exception(ctx, EXCP_RI); tcg_temp_free(t0); tcg_temp_free(t1); return; } gen_store_gpr(t0, rt); tcg_temp_free(t0); tcg_temp_free(t1); }"} {"target": 1, "idx": 5996, "func": "static int vhost_scsi_exit(DeviceState *qdev) { VirtIODevice *vdev = VIRTIO_DEVICE(qdev); VHostSCSI *s = VHOST_SCSI(qdev); VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(qdev); migrate_del_blocker(s->migration_blocker); error_free(s->migration_blocker); /* This will stop vhost backend. */ vhost_scsi_set_status(vdev, 0); g_free(s->dev.vqs); return virtio_scsi_common_exit(vs); }"} {"target": 1, "idx": 5997, "func": "static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, unsigned int width, unsigned int height, int lumStride, int chromStride, int srcStride) { unsigned y; const unsigned chromWidth= width>>1; #ifdef HAVE_MMX for(y=0; y>RGB2YUV_SHIFT) + 16; unsigned int V = ((RV*r + GV*g + BV*b)>>RGB2YUV_SHIFT) + 128; unsigned int U = ((RU*r + GU*g + BU*b)>>RGB2YUV_SHIFT) + 128; udst[i] = U; vdst[i] = V; ydst[2*i] = Y; b= src[6*i+3]; g= src[6*i+4]; r= src[6*i+5]; Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16; ydst[2*i+1] = Y; } ydst += lumStride; src += srcStride; for(i=0; i>RGB2YUV_SHIFT) + 16; ydst[2*i] = Y; b= src[6*i+3]; g= src[6*i+4]; r= src[6*i+5]; Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16; ydst[2*i+1] = Y; } udst += chromStride; vdst += chromStride; ydst += lumStride; src += srcStride; } }"} {"target": 1, "idx": 5999, "func": "static void dec_wcsr(DisasContext *dc) { int no; LOG_DIS(\"wcsr r%d, %d\\n\", dc->r1, dc->csr); switch (dc->csr) { case CSR_IE: tcg_gen_mov_tl(cpu_ie, cpu_R[dc->r1]); tcg_gen_movi_tl(cpu_pc, dc->pc + 4); dc->is_jmp = DISAS_UPDATE; break; case CSR_IM: /* mark as an io operation because it could cause an interrupt */ if (use_icount) { gen_io_start(); } gen_helper_wcsr_im(cpu_env, cpu_R[dc->r1]); tcg_gen_movi_tl(cpu_pc, dc->pc + 4); if (use_icount) { gen_io_end(); } dc->is_jmp = DISAS_UPDATE; break; case CSR_IP: /* mark as an io operation because it could cause an interrupt */ if (use_icount) { gen_io_start(); } gen_helper_wcsr_ip(cpu_env, cpu_R[dc->r1]); tcg_gen_movi_tl(cpu_pc, dc->pc + 4); if (use_icount) { gen_io_end(); } dc->is_jmp = DISAS_UPDATE; break; case CSR_ICC: /* TODO */ break; case CSR_DCC: /* TODO */ break; case CSR_EBA: tcg_gen_mov_tl(cpu_eba, cpu_R[dc->r1]); break; case CSR_DEBA: tcg_gen_mov_tl(cpu_deba, cpu_R[dc->r1]); break; case CSR_JTX: gen_helper_wcsr_jtx(cpu_env, cpu_R[dc->r1]); break; case CSR_JRX: gen_helper_wcsr_jrx(cpu_env, cpu_R[dc->r1]); break; case CSR_DC: tcg_gen_mov_tl(cpu_dc, cpu_R[dc->r1]); break; case CSR_BP0: case CSR_BP1: case CSR_BP2: case CSR_BP3: no = dc->csr - CSR_BP0; if (dc->env->num_bps <= no) { cpu_abort(dc->env, \"breakpoint #%i is not available\\n\", no); } tcg_gen_mov_tl(cpu_bp[no], cpu_R[dc->r1]); break; case CSR_WP0: case CSR_WP1: case CSR_WP2: case CSR_WP3: no = dc->csr - CSR_WP0; if (dc->env->num_wps <= no) { cpu_abort(dc->env, \"watchpoint #%i is not available\\n\", no); } tcg_gen_mov_tl(cpu_wp[no], cpu_R[dc->r1]); break; case CSR_CC: case CSR_CFG: cpu_abort(dc->env, \"invalid write access csr=%x\\n\", dc->csr); break; default: cpu_abort(dc->env, \"write_csr unknown csr=%x\\n\", dc->csr); break; } }"} {"target": 0, "idx": 6005, "func": "void pmac_format_nvram_partition (MacIONVRAMState *nvr, int len) { unsigned int i; uint32_t start = 0, end; struct OpenBIOS_nvpart_v1 *part_header; // OpenBIOS nvram variables // Variable partition part_header = (struct OpenBIOS_nvpart_v1 *)nvr->data; part_header->signature = OPENBIOS_PART_SYSTEM; pstrcpy(part_header->name, sizeof(part_header->name), \"system\"); end = start + sizeof(struct OpenBIOS_nvpart_v1); for (i = 0; i < nb_prom_envs; i++) end = OpenBIOS_set_var(nvr->data, end, prom_envs[i]); // End marker nvr->data[end++] = '\\0'; end = start + ((end - start + 15) & ~15); /* XXX: OpenBIOS is not able to grow up a partition. Leave some space for new variables. */ if (end < DEF_SYSTEM_SIZE) end = DEF_SYSTEM_SIZE; OpenBIOS_finish_partition(part_header, end - start); // free partition start = end; part_header = (struct OpenBIOS_nvpart_v1 *)&nvr->data[start]; part_header->signature = OPENBIOS_PART_FREE; pstrcpy(part_header->name, sizeof(part_header->name), \"free\"); end = len; OpenBIOS_finish_partition(part_header, end - start); }"} {"target": 0, "idx": 6012, "func": "static void gen_slq(DisasContext *ctx) { int l1 = gen_new_label(); TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F); tcg_gen_shl_tl(t0, cpu_gpr[rS(ctx->opcode)], t1); tcg_gen_subfi_tl(t1, 32, t1); tcg_gen_shr_tl(t1, cpu_gpr[rS(ctx->opcode)], t1); tcg_gen_or_tl(t1, t0, t1); gen_store_spr(SPR_MQ, t1); tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x20); tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0); tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1); tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0); gen_set_label(l1); tcg_temp_free(t0); tcg_temp_free(t1); if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); }"} {"target": 1, "idx": 6027, "func": "vmxnet3_pop_rxc_descr(VMXNET3State *s, int qidx, uint32_t *descr_gen) { uint8_t ring_gen; struct Vmxnet3_RxCompDesc rxcd; hwaddr daddr = vmxnet3_ring_curr_cell_pa(&s->rxq_descr[qidx].comp_ring); pci_dma_read(PCI_DEVICE(s), daddr, &rxcd, sizeof(struct Vmxnet3_RxCompDesc)); ring_gen = vmxnet3_ring_curr_gen(&s->rxq_descr[qidx].comp_ring); if (rxcd.gen != ring_gen) { *descr_gen = ring_gen; vmxnet3_inc_rx_completion_counter(s, qidx); return daddr; } return 0; }"} {"target": 0, "idx": 6034, "func": "build_madt(GArray *table_data, GArray *linker, AcpiCpuInfo *cpu) { PCMachineState *pcms = PC_MACHINE(qdev_get_machine()); int madt_start = table_data->len; AcpiMultipleApicTable *madt; AcpiMadtIoApic *io_apic; AcpiMadtIntsrcovr *intsrcovr; AcpiMadtLocalNmi *local_nmi; int i; madt = acpi_data_push(table_data, sizeof *madt); madt->local_apic_address = cpu_to_le32(APIC_DEFAULT_ADDRESS); madt->flags = cpu_to_le32(1); for (i = 0; i < pcms->apic_id_limit; i++) { AcpiMadtProcessorApic *apic = acpi_data_push(table_data, sizeof *apic); apic->type = ACPI_APIC_PROCESSOR; apic->length = sizeof(*apic); apic->processor_id = i; apic->local_apic_id = i; if (test_bit(i, cpu->found_cpus)) { apic->flags = cpu_to_le32(1); } else { apic->flags = cpu_to_le32(0); } } io_apic = acpi_data_push(table_data, sizeof *io_apic); io_apic->type = ACPI_APIC_IO; io_apic->length = sizeof(*io_apic); #define ACPI_BUILD_IOAPIC_ID 0x0 io_apic->io_apic_id = ACPI_BUILD_IOAPIC_ID; io_apic->address = cpu_to_le32(IO_APIC_DEFAULT_ADDRESS); io_apic->interrupt = cpu_to_le32(0); if (pcms->apic_xrupt_override) { intsrcovr = acpi_data_push(table_data, sizeof *intsrcovr); intsrcovr->type = ACPI_APIC_XRUPT_OVERRIDE; intsrcovr->length = sizeof(*intsrcovr); intsrcovr->source = 0; intsrcovr->gsi = cpu_to_le32(2); intsrcovr->flags = cpu_to_le16(0); /* conforms to bus specifications */ } for (i = 1; i < 16; i++) { #define ACPI_BUILD_PCI_IRQS ((1<<5) | (1<<9) | (1<<10) | (1<<11)) if (!(ACPI_BUILD_PCI_IRQS & (1 << i))) { /* No need for a INT source override structure. */ continue; } intsrcovr = acpi_data_push(table_data, sizeof *intsrcovr); intsrcovr->type = ACPI_APIC_XRUPT_OVERRIDE; intsrcovr->length = sizeof(*intsrcovr); intsrcovr->source = i; intsrcovr->gsi = cpu_to_le32(i); intsrcovr->flags = cpu_to_le16(0xd); /* active high, level triggered */ } local_nmi = acpi_data_push(table_data, sizeof *local_nmi); local_nmi->type = ACPI_APIC_LOCAL_NMI; local_nmi->length = sizeof(*local_nmi); local_nmi->processor_id = 0xff; /* all processors */ local_nmi->flags = cpu_to_le16(0); local_nmi->lint = 1; /* ACPI_LINT1 */ build_header(linker, table_data, (void *)(table_data->data + madt_start), \"APIC\", table_data->len - madt_start, 1, NULL); }"} {"target": 0, "idx": 6040, "func": "matroska_parse_blockgroup (MatroskaDemuxContext *matroska, uint64_t cluster_time) { int res = 0; uint32_t id; AVPacket *pkt = NULL; int is_keyframe = PKT_FLAG_KEY, last_num_packets = matroska->num_packets; uint64_t duration = AV_NOPTS_VALUE; int track = -1; uint8_t *data; int size = 0; int64_t pos = 0; av_log(matroska->ctx, AV_LOG_DEBUG, \"parsing blockgroup...\\n\"); while (res == 0) { if (!(id = ebml_peek_id(matroska, &matroska->level_up))) { res = AVERROR_IO; break; } else if (matroska->level_up) { matroska->level_up--; break; } switch (id) { /* one block inside the group. Note, block parsing is one * of the harder things, so this code is a bit complicated. * See http://www.matroska.org/ for documentation. */ case MATROSKA_ID_BLOCK: { pos = url_ftell(&matroska->ctx->pb); res = ebml_read_binary(matroska, &id, &data, &size); break; } case MATROSKA_ID_BLOCKDURATION: { if ((res = ebml_read_uint(matroska, &id, &duration)) < 0) break; break; } case MATROSKA_ID_BLOCKREFERENCE: /* We've found a reference, so not even the first frame in * the lace is a key frame. */ is_keyframe = 0; if (last_num_packets != matroska->num_packets) matroska->packets[last_num_packets]->flags = 0; res = ebml_read_skip(matroska); break; default: av_log(matroska->ctx, AV_LOG_INFO, \"Unknown entry 0x%x in blockgroup data\\n\", id); /* fall-through */ case EBML_ID_VOID: res = ebml_read_skip(matroska); break; } if (matroska->level_up) { matroska->level_up--; break; } } if (res) return res; if (size > 0) res = matroska_parse_block(matroska, data, size, pos, cluster_time, is_keyframe, &track, &pkt); if (pkt) { if (duration != AV_NOPTS_VALUE) pkt->duration = duration; else if (track >= 0 && track < matroska->num_tracks) pkt->duration = matroska->tracks[track]->default_duration / matroska->time_scale; } return res; }"} {"target": 0, "idx": 6047, "func": "void vncws_tls_handshake_peek(void *opaque) { VncState *vs = opaque; long ret; if (!vs->ws_tls.session) { char peek[4]; ret = qemu_recv(vs->csock, peek, sizeof(peek), MSG_PEEK); if (ret && (strncmp(peek, \"\\x16\", 1) == 0 || strncmp(peek, \"\\x80\", 1) == 0)) { VNC_DEBUG(\"TLS Websocket connection recognized\"); vnc_tls_client_setup(vs, 1); vncws_start_tls_handshake(vs); } else { vncws_handshake_read(vs); } } else { qemu_set_fd_handler2(vs->csock, NULL, vncws_handshake_read, NULL, vs); } }"} {"target": 0, "idx": 6053, "func": "bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update) { target_ulong dr6; int reg; bool hit_enabled = false; dr6 = env->dr[6] & ~0xf; for (reg = 0; reg < DR7_MAX_BP; reg++) { bool bp_match = false; bool wp_match = false; switch (hw_breakpoint_type(env->dr[7], reg)) { case DR7_TYPE_BP_INST: if (env->dr[reg] == env->eip) { bp_match = true; } break; case DR7_TYPE_DATA_WR: case DR7_TYPE_DATA_RW: if (env->cpu_watchpoint[reg] && env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) { wp_match = true; } break; case DR7_TYPE_IO_RW: break; } if (bp_match || wp_match) { dr6 |= 1 << reg; if (hw_breakpoint_enabled(env->dr[7], reg)) { hit_enabled = true; } } } if (hit_enabled || force_dr6_update) { env->dr[6] = dr6; } return hit_enabled; }"} {"target": 0, "idx": 6059, "func": "static int ipvideo_decode_block_opcode_0x7(IpvideoContext *s) { int x, y; unsigned char P[2]; unsigned int flags; /* 2-color encoding */ CHECK_STREAM_PTR(2); P[0] = *s->stream_ptr++; P[1] = *s->stream_ptr++; if (P[0] <= P[1]) { /* need 8 more bytes from the stream */ CHECK_STREAM_PTR(8); for (y = 0; y < 8; y++) { flags = *s->stream_ptr++ | 0x100; for (; flags != 1; flags >>= 1) *s->pixel_ptr++ = P[flags & 1]; s->pixel_ptr += s->line_inc; } } else { /* need 2 more bytes from the stream */ CHECK_STREAM_PTR(2); flags = bytestream_get_le16(&s->stream_ptr); for (y = 0; y < 8; y += 2) { for (x = 0; x < 8; x += 2, flags >>= 1) { s->pixel_ptr[x ] = s->pixel_ptr[x + 1 ] = s->pixel_ptr[x + s->stride] = s->pixel_ptr[x + 1 + s->stride] = P[flags & 1]; } s->pixel_ptr += s->stride * 2; } } /* report success */ return 0; }"} {"target": 0, "idx": 6072, "func": "void arm_translate_init(void) { cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, \"env\"); cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG1, \"T0\"); cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG2, \"T1\"); }"} {"target": 0, "idx": 6074, "func": "void virtio_9p_push_and_notify(V9fsPDU *pdu) { V9fsState *s = pdu->s; V9fsVirtioState *v = container_of(s, V9fsVirtioState, state); VirtQueueElement *elem = &v->elems[pdu->idx]; /* push onto queue and notify */ virtqueue_push(v->vq, elem, pdu->size); /* FIXME: we should batch these completions */ virtio_notify(VIRTIO_DEVICE(v), v->vq); }"} {"target": 0, "idx": 6083, "func": "int coroutine_fn bdrv_co_pwritev(BlockDriverState *bs, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) { BdrvTrackedRequest req; uint64_t align = bs->request_alignment; uint8_t *head_buf = NULL; uint8_t *tail_buf = NULL; QEMUIOVector local_qiov; bool use_local_qiov = false; int ret; if (!bs->drv) { return -ENOMEDIUM; } if (bs->read_only) { return -EPERM; } assert(!(bs->open_flags & BDRV_O_INACTIVE)); ret = bdrv_check_byte_request(bs, offset, bytes); if (ret < 0) { return ret; } /* * Align write if necessary by performing a read-modify-write cycle. * Pad qiov with the read parts and be sure to have a tracked request not * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle. */ tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE); if (!qiov) { ret = bdrv_co_do_zero_pwritev(bs, offset, bytes, flags, &req); goto out; } if (offset & (align - 1)) { QEMUIOVector head_qiov; struct iovec head_iov; mark_request_serialising(&req, align); wait_serialising_requests(&req); head_buf = qemu_blockalign(bs, align); head_iov = (struct iovec) { .iov_base = head_buf, .iov_len = align, }; qemu_iovec_init_external(&head_qiov, &head_iov, 1); bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD); ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align, align, &head_qiov, 0); if (ret < 0) { goto fail; } bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); qemu_iovec_init(&local_qiov, qiov->niov + 2); qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); use_local_qiov = true; bytes += offset & (align - 1); offset = offset & ~(align - 1); /* We have read the tail already if the request is smaller * than one aligned block. */ if (bytes < align) { qemu_iovec_add(&local_qiov, head_buf + bytes, align - bytes); bytes = align; } } if ((offset + bytes) & (align - 1)) { QEMUIOVector tail_qiov; struct iovec tail_iov; size_t tail_bytes; bool waited; mark_request_serialising(&req, align); waited = wait_serialising_requests(&req); assert(!waited || !use_local_qiov); tail_buf = qemu_blockalign(bs, align); tail_iov = (struct iovec) { .iov_base = tail_buf, .iov_len = align, }; qemu_iovec_init_external(&tail_qiov, &tail_iov, 1); bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL); ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align, align, &tail_qiov, 0); if (ret < 0) { goto fail; } bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); if (!use_local_qiov) { qemu_iovec_init(&local_qiov, qiov->niov + 1); qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); use_local_qiov = true; } tail_bytes = (offset + bytes) & (align - 1); qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes); bytes = ROUND_UP(bytes, align); } ret = bdrv_aligned_pwritev(bs, &req, offset, bytes, align, use_local_qiov ? &local_qiov : qiov, flags); fail: if (use_local_qiov) { qemu_iovec_destroy(&local_qiov); } qemu_vfree(head_buf); qemu_vfree(tail_buf); out: tracked_request_end(&req); return ret; }"} {"target": 0, "idx": 6087, "func": "static int kvm_s390_check_clear_cmma(KVMState *s) { struct kvm_device_attr attr = { .group = KVM_S390_VM_MEM_CTRL, .attr = KVM_S390_VM_MEM_CLR_CMMA, }; return kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attr); }"} {"target": 0, "idx": 6097, "func": "static int bdrv_fill_options(QDict **options, const char **pfilename, int flags, BlockDriver *drv, Error **errp) { const char *filename = *pfilename; const char *drvname; bool protocol = flags & BDRV_O_PROTOCOL; bool parse_filename = false; Error *local_err = NULL; /* Parse json: pseudo-protocol */ if (filename && g_str_has_prefix(filename, \"json:\")) { QDict *json_options = parse_json_filename(filename, &local_err); if (local_err) { error_propagate(errp, local_err); return -EINVAL; } /* Options given in the filename have lower priority than options * specified directly */ qdict_join(*options, json_options, false); QDECREF(json_options); *pfilename = filename = NULL; } /* Fetch the file name from the options QDict if necessary */ if (protocol && filename) { if (!qdict_haskey(*options, \"filename\")) { qdict_put(*options, \"filename\", qstring_from_str(filename)); parse_filename = true; } else { error_setg(errp, \"Can't specify 'file' and 'filename' options at \" \"the same time\"); return -EINVAL; } } /* Find the right block driver */ filename = qdict_get_try_str(*options, \"filename\"); drvname = qdict_get_try_str(*options, \"driver\"); if (drv) { if (drvname) { error_setg(errp, \"Driver specified twice\"); return -EINVAL; } drvname = drv->format_name; qdict_put(*options, \"driver\", qstring_from_str(drvname)); } else { if (!drvname && protocol) { if (filename) { drv = bdrv_find_protocol(filename, parse_filename, errp); if (!drv) { return -EINVAL; } drvname = drv->format_name; qdict_put(*options, \"driver\", qstring_from_str(drvname)); } else { error_setg(errp, \"Must specify either driver or file\"); return -EINVAL; } } else if (drvname) { drv = bdrv_find_format(drvname); if (!drv) { error_setg(errp, \"Unknown driver '%s'\", drvname); return -ENOENT; } } } assert(drv || !protocol); /* Driver-specific filename parsing */ if (drv && drv->bdrv_parse_filename && parse_filename) { drv->bdrv_parse_filename(filename, *options, &local_err); if (local_err) { error_propagate(errp, local_err); return -EINVAL; } if (!drv->bdrv_needs_filename) { qdict_del(*options, \"filename\"); } } return 0; }"} {"target": 0, "idx": 6102, "func": "int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t * const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[]) { int i, ret; const uint8_t *src2[4] = { srcSlice[0], srcSlice[1], srcSlice[2], srcSlice[3] }; uint8_t *dst2[4] = { dst[0], dst[1], dst[2], dst[3] }; uint8_t *rgb0_tmp = NULL; // do not mess up sliceDir if we have a \"trailing\" 0-size slice if (srcSliceH == 0) return 0; if (!check_image_pointers(srcSlice, c->srcFormat, srcStride)) { av_log(c, AV_LOG_ERROR, \"bad src image pointers\\n\"); return 0; } if (!check_image_pointers((const uint8_t* const*)dst, c->dstFormat, dstStride)) { av_log(c, AV_LOG_ERROR, \"bad dst image pointers\\n\"); return 0; } if (c->sliceDir == 0 && srcSliceY != 0 && srcSliceY + srcSliceH != c->srcH) { av_log(c, AV_LOG_ERROR, \"Slices start in the middle!\\n\"); return 0; } if (c->sliceDir == 0) { if (srcSliceY == 0) c->sliceDir = 1; else c->sliceDir = -1; } if (usePal(c->srcFormat)) { for (i = 0; i < 256; i++) { int p, r, g, b, y, u, v, a = 0xff; if (c->srcFormat == AV_PIX_FMT_PAL8) { p = ((const uint32_t *)(srcSlice[1]))[i]; a = (p >> 24) & 0xFF; r = (p >> 16) & 0xFF; g = (p >> 8) & 0xFF; b = p & 0xFF; } else if (c->srcFormat == AV_PIX_FMT_RGB8) { r = ( i >> 5 ) * 36; g = ((i >> 2) & 7) * 36; b = ( i & 3) * 85; } else if (c->srcFormat == AV_PIX_FMT_BGR8) { b = ( i >> 6 ) * 85; g = ((i >> 3) & 7) * 36; r = ( i & 7) * 36; } else if (c->srcFormat == AV_PIX_FMT_RGB4_BYTE) { r = ( i >> 3 ) * 255; g = ((i >> 1) & 3) * 85; b = ( i & 1) * 255; } else if (c->srcFormat == AV_PIX_FMT_GRAY8 || c->srcFormat == AV_PIX_FMT_GRAY8A) { r = g = b = i; } else { av_assert1(c->srcFormat == AV_PIX_FMT_BGR4_BYTE); b = ( i >> 3 ) * 255; g = ((i >> 1) & 3) * 85; r = ( i & 1) * 255; } #define RGB2YUV_SHIFT 15 #define BY ( (int) (0.114 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5)) #define BV (-(int) (0.081 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5)) #define BU ( (int) (0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5)) #define GY ( (int) (0.587 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5)) #define GV (-(int) (0.419 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5)) #define GU (-(int) (0.331 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5)) #define RY ( (int) (0.299 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5)) #define RV ( (int) (0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5)) #define RU (-(int) (0.169 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5)) y = av_clip_uint8((RY * r + GY * g + BY * b + ( 33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT); u = av_clip_uint8((RU * r + GU * g + BU * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT); v = av_clip_uint8((RV * r + GV * g + BV * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT); c->pal_yuv[i]= y + (u<<8) + (v<<16) + ((unsigned)a<<24); switch (c->dstFormat) { case AV_PIX_FMT_BGR32: #if !HAVE_BIGENDIAN case AV_PIX_FMT_RGB24: #endif c->pal_rgb[i]= r + (g<<8) + (b<<16) + ((unsigned)a<<24); break; case AV_PIX_FMT_BGR32_1: #if HAVE_BIGENDIAN case AV_PIX_FMT_BGR24: #endif c->pal_rgb[i]= a + (r<<8) + (g<<16) + ((unsigned)b<<24); break; case AV_PIX_FMT_RGB32_1: #if HAVE_BIGENDIAN case AV_PIX_FMT_RGB24: #endif c->pal_rgb[i]= a + (b<<8) + (g<<16) + ((unsigned)r<<24); break; case AV_PIX_FMT_RGB32: #if !HAVE_BIGENDIAN case AV_PIX_FMT_BGR24: #endif default: c->pal_rgb[i]= b + (g<<8) + (r<<16) + ((unsigned)a<<24); } } } if (c->src0Alpha && !c->dst0Alpha && isALPHA(c->dstFormat)) { uint8_t *base; int x,y; rgb0_tmp = av_malloc(FFABS(srcStride[0]) * srcSliceH + 32); base = srcStride[0] < 0 ? rgb0_tmp - srcStride[0] * (srcSliceH-1) : rgb0_tmp; for (y=0; ysrcW); for (x=c->src0Alpha-1; x<4*c->srcW; x+=4) { base[ srcStride[0]*y + x] = 0xFF; } } src2[0] = base; } // copy strides, so they can safely be modified if (c->sliceDir == 1) { // slices go from top to bottom int srcStride2[4] = { srcStride[0], srcStride[1], srcStride[2], srcStride[3] }; int dstStride2[4] = { dstStride[0], dstStride[1], dstStride[2], dstStride[3] }; reset_ptr(src2, c->srcFormat); reset_ptr((void*)dst2, c->dstFormat); /* reset slice direction at end of frame */ if (srcSliceY + srcSliceH == c->srcH) c->sliceDir = 0; ret = c->swScale(c, src2, srcStride2, srcSliceY, srcSliceH, dst2, dstStride2); } else { // slices go from bottom to top => we flip the image internally int srcStride2[4] = { -srcStride[0], -srcStride[1], -srcStride[2], -srcStride[3] }; int dstStride2[4] = { -dstStride[0], -dstStride[1], -dstStride[2], -dstStride[3] }; src2[0] += (srcSliceH - 1) * srcStride[0]; if (!usePal(c->srcFormat)) src2[1] += ((srcSliceH >> c->chrSrcVSubSample) - 1) * srcStride[1]; src2[2] += ((srcSliceH >> c->chrSrcVSubSample) - 1) * srcStride[2]; src2[3] += (srcSliceH - 1) * srcStride[3]; dst2[0] += ( c->dstH - 1) * dstStride[0]; dst2[1] += ((c->dstH >> c->chrDstVSubSample) - 1) * dstStride[1]; dst2[2] += ((c->dstH >> c->chrDstVSubSample) - 1) * dstStride[2]; dst2[3] += ( c->dstH - 1) * dstStride[3]; reset_ptr(src2, c->srcFormat); reset_ptr((void*)dst2, c->dstFormat); /* reset slice direction at end of frame */ if (!srcSliceY) c->sliceDir = 0; ret = c->swScale(c, src2, srcStride2, c->srcH-srcSliceY-srcSliceH, srcSliceH, dst2, dstStride2); } av_free(rgb0_tmp); return ret; }"} {"target": 1, "idx": 6107, "func": "static inline uint32_t vmsvga_fifo_read_raw(struct vmsvga_state_s *s) { uint32_t cmd = s->fifo[CMD(stop) >> 2]; s->cmd->stop = cpu_to_le32(CMD(stop) + 4); if (CMD(stop) >= CMD(max)) { s->cmd->stop = s->cmd->min; } return cmd; }"} {"target": 0, "idx": 6129, "func": "static void do_rematrixing(AC3DecodeContext *s) { int bnd, i; int end, bndend; int tmp0, tmp1; end = FFMIN(s->end_freq[1], s->end_freq[2]); for(bnd=0; bndnum_rematrixing_bands; bnd++) { if(s->rematrixing_flags[bnd]) { bndend = FFMIN(end, ff_ac3_rematrix_band_tab[bnd+1]); for(i=ff_ac3_rematrix_band_tab[bnd]; ifixed_coeffs[1][i]; tmp1 = s->fixed_coeffs[2][i]; s->fixed_coeffs[1][i] = tmp0 + tmp1; s->fixed_coeffs[2][i] = tmp0 - tmp1; } } } }"} {"target": 1, "idx": 6133, "func": "static void dead_tmp(TCGv tmp) { tcg_temp_free(tmp); num_temps--; }"} {"target": 1, "idx": 6153, "func": "int bdrv_has_zero_init(BlockDriverState *bs) { assert(bs->drv); if (bs->drv->bdrv_has_zero_init) { return bs->drv->bdrv_has_zero_init(bs); } return 1; }"} {"target": 1, "idx": 6155, "func": "static CharDriverState *qemu_chr_open_mux(CharDriverState *drv) { CharDriverState *chr; MuxDriver *d; chr = qemu_chr_alloc(); d = g_malloc0(sizeof(MuxDriver)); chr->opaque = d; d->drv = drv; d->focus = -1; chr->chr_write = mux_chr_write; chr->chr_update_read_handler = mux_chr_update_read_handler; chr->chr_accept_input = mux_chr_accept_input; /* Frontend guest-open / -close notification is not support with muxes */ chr->chr_set_fe_open = NULL; if (drv->chr_add_watch) { chr->chr_add_watch = mux_chr_add_watch; } /* only default to opened state if we've realized the initial * set of muxes */ chr->explicit_be_open = muxes_realized ? 0 : 1; chr->is_mux = 1; return chr; }"} {"target": 1, "idx": 6158, "func": "static int scan_file(AVFormatContext *avctx, AVStream *vst, AVStream *ast, int file) { MlvContext *mlv = avctx->priv_data; AVIOContext *pb = mlv->pb[file]; int ret; while (!avio_feof(pb)) { int type; unsigned int size; type = avio_rl32(pb); size = avio_rl32(pb); avio_skip(pb, 8); //timestamp if (size < 16) break; size -= 16; if (vst && type == MKTAG('R','A','W','I') && size >= 164) { vst->codec->width = avio_rl16(pb); vst->codec->height = avio_rl16(pb); if (avio_rl32(pb) != 1) avpriv_request_sample(avctx, \"raw api version\"); avio_skip(pb, 20); // pointer, width, height, pitch, frame_size vst->codec->bits_per_coded_sample = avio_rl32(pb); avio_skip(pb, 8 + 16 + 24); // black_level, white_level, xywh, active_area, exposure_bias if (avio_rl32(pb) != 0x2010100) /* RGGB */ avpriv_request_sample(avctx, \"cfa_pattern\"); avio_skip(pb, 80); // calibration_illuminant1, color_matrix1, dynamic_range vst->codec->pix_fmt = AV_PIX_FMT_BAYER_RGGB16LE; vst->codec->codec_tag = MKTAG('B', 'I', 'T', 16); size -= 164; } else if (ast && type == MKTAG('W', 'A', 'V', 'I') && size >= 16) { ret = ff_get_wav_header(avctx, pb, ast->codec, 16, 0); size -= 16; } else if (type == MKTAG('I','N','F','O')) { if (size > 0) read_string(avctx, pb, \"info\", size); continue; } else if (type == MKTAG('I','D','N','T') && size >= 36) { read_string(avctx, pb, \"cameraName\", 32); read_uint32(avctx, pb, \"cameraModel\", \"0x%\"PRIx32); size -= 36; if (size >= 32) { read_string(avctx, pb, \"cameraSerial\", 32); size -= 32; } else if (type == MKTAG('L','E','N','S') && size >= 48) { read_uint16(avctx, pb, \"focalLength\", \"%i\"); read_uint16(avctx, pb, \"focalDist\", \"%i\"); read_uint16(avctx, pb, \"aperture\", \"%i\"); read_uint8(avctx, pb, \"stabilizerMode\", \"%i\"); read_uint8(avctx, pb, \"autofocusMode\", \"%i\"); read_uint32(avctx, pb, \"flags\", \"0x%\"PRIx32); read_uint32(avctx, pb, \"lensID\", \"%\"PRIi32); read_string(avctx, pb, \"lensName\", 32); size -= 48; if (size >= 32) { read_string(avctx, pb, \"lensSerial\", 32); size -= 32; } else if (vst && type == MKTAG('V', 'I', 'D', 'F') && size >= 4) { uint64_t pts = avio_rl32(pb); ff_add_index_entry(&vst->index_entries, &vst->nb_index_entries, &vst->index_entries_allocated_size, avio_tell(pb) - 20, pts, file, 0, AVINDEX_KEYFRAME); size -= 4; } else if (ast && type == MKTAG('A', 'U', 'D', 'F') && size >= 4) { uint64_t pts = avio_rl32(pb); ff_add_index_entry(&ast->index_entries, &ast->nb_index_entries, &ast->index_entries_allocated_size, avio_tell(pb) - 20, pts, file, 0, AVINDEX_KEYFRAME); size -= 4; } else if (vst && type == MKTAG('W','B','A','L') && size >= 28) { read_uint32(avctx, pb, \"wb_mode\", \"%\"PRIi32); read_uint32(avctx, pb, \"kelvin\", \"%\"PRIi32); read_uint32(avctx, pb, \"wbgain_r\", \"%\"PRIi32); read_uint32(avctx, pb, \"wbgain_g\", \"%\"PRIi32); read_uint32(avctx, pb, \"wbgain_b\", \"%\"PRIi32); read_uint32(avctx, pb, \"wbs_gm\", \"%\"PRIi32); read_uint32(avctx, pb, \"wbs_ba\", \"%\"PRIi32); size -= 28; } else if (type == MKTAG('R','T','C','I') && size >= 20) { char str[32]; struct tm time = { 0 }; time.tm_sec = avio_rl16(pb); time.tm_min = avio_rl16(pb); time.tm_hour = avio_rl16(pb); time.tm_mday = avio_rl16(pb); time.tm_mon = avio_rl16(pb); time.tm_year = avio_rl16(pb); time.tm_wday = avio_rl16(pb); time.tm_yday = avio_rl16(pb); time.tm_isdst = avio_rl16(pb); avio_skip(pb, 2); if (strftime(str, sizeof(str), \"%Y-%m-%d %H:%M:%S\", &time)) av_dict_set(&avctx->metadata, \"time\", str, 0); size -= 20; } else if (type == MKTAG('E','X','P','O') && size >= 16) { av_dict_set(&avctx->metadata, \"isoMode\", avio_rl32(pb) ? \"auto\" : \"manual\", 0); read_uint32(avctx, pb, \"isoValue\", \"%\"PRIi32); read_uint32(avctx, pb, \"isoAnalog\", \"%\"PRIi32); read_uint32(avctx, pb, \"digitalGain\", \"%\"PRIi32); size -= 16; if (size >= 8) { read_uint64(avctx, pb, \"shutterValue\", \"%\"PRIi64); size -= 8; } else if (type == MKTAG('S','T','Y','L') && size >= 36) { read_uint32(avctx, pb, \"picStyleId\", \"%\"PRIi32); read_uint32(avctx, pb, \"contrast\", \"%\"PRIi32); read_uint32(avctx, pb, \"sharpness\", \"%\"PRIi32); read_uint32(avctx, pb, \"saturation\", \"%\"PRIi32); read_uint32(avctx, pb, \"colortone\", \"%\"PRIi32); read_string(avctx, pb, \"picStyleName\", 16); size -= 36; } else if (type == MKTAG('M','A','R','K')) { } else if (type == MKTAG('N','U','L','L')) { } else if (type == MKTAG('M','L','V','I')) { /* occurs when MLV and Mnn files are concatenated */ } else { av_log(avctx, AV_LOG_INFO, \"unsupported tag %c%c%c%c, size %u\\n\", type&0xFF, (type>>8)&0xFF, (type>>16)&0xFF, (type>>24)&0xFF, size); avio_skip(pb, size); return 0;"} {"target": 1, "idx": 6174, "func": "static void qmp_query_auth(VncDisplay *vd, VncInfo2 *info) { switch (vd->auth) { case VNC_AUTH_VNC: info->auth = VNC_PRIMARY_AUTH_VNC; break; case VNC_AUTH_RA2: info->auth = VNC_PRIMARY_AUTH_RA2; break; case VNC_AUTH_RA2NE: info->auth = VNC_PRIMARY_AUTH_RA2NE; break; case VNC_AUTH_TIGHT: info->auth = VNC_PRIMARY_AUTH_TIGHT; break; case VNC_AUTH_ULTRA: info->auth = VNC_PRIMARY_AUTH_ULTRA; break; case VNC_AUTH_TLS: info->auth = VNC_PRIMARY_AUTH_TLS; break; case VNC_AUTH_VENCRYPT: info->auth = VNC_PRIMARY_AUTH_VENCRYPT; #ifdef CONFIG_VNC_TLS info->has_vencrypt = true; switch (vd->subauth) { case VNC_AUTH_VENCRYPT_PLAIN: info->vencrypt = VNC_VENCRYPT_SUB_AUTH_PLAIN; break; case VNC_AUTH_VENCRYPT_TLSNONE: info->vencrypt = VNC_VENCRYPT_SUB_AUTH_TLS_NONE; break; case VNC_AUTH_VENCRYPT_TLSVNC: info->vencrypt = VNC_VENCRYPT_SUB_AUTH_TLS_VNC; break; case VNC_AUTH_VENCRYPT_TLSPLAIN: info->vencrypt = VNC_VENCRYPT_SUB_AUTH_TLS_PLAIN; break; case VNC_AUTH_VENCRYPT_X509NONE: info->vencrypt = VNC_VENCRYPT_SUB_AUTH_X509_NONE; break; case VNC_AUTH_VENCRYPT_X509VNC: info->vencrypt = VNC_VENCRYPT_SUB_AUTH_X509_VNC; break; case VNC_AUTH_VENCRYPT_X509PLAIN: info->vencrypt = VNC_VENCRYPT_SUB_AUTH_X509_PLAIN; break; case VNC_AUTH_VENCRYPT_TLSSASL: info->vencrypt = VNC_VENCRYPT_SUB_AUTH_TLS_SASL; break; case VNC_AUTH_VENCRYPT_X509SASL: info->vencrypt = VNC_VENCRYPT_SUB_AUTH_X509_SASL; break; default: info->has_vencrypt = false; break; } #endif break; case VNC_AUTH_SASL: info->auth = VNC_PRIMARY_AUTH_SASL; break; case VNC_AUTH_NONE: default: info->auth = VNC_PRIMARY_AUTH_NONE; break; } }"} {"target": 0, "idx": 6183, "func": "int hvf_vcpu_exec(CPUState *cpu) { X86CPU *x86_cpu = X86_CPU(cpu); CPUX86State *env = &x86_cpu->env; int ret = 0; uint64_t rip = 0; cpu->halted = 0; if (hvf_process_events(cpu)) { return EXCP_HLT; } do { if (cpu->vcpu_dirty) { hvf_put_registers(cpu); cpu->vcpu_dirty = false; } env->hvf_emul->interruptable = !(rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) & (VMCS_INTERRUPTIBILITY_STI_BLOCKING | VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)); hvf_inject_interrupts(cpu); vmx_update_tpr(cpu); qemu_mutex_unlock_iothread(); if (!cpu_is_bsp(X86_CPU(cpu)) && cpu->halted) { qemu_mutex_lock_iothread(); return EXCP_HLT; } hv_return_t r = hv_vcpu_run(cpu->hvf_fd); assert_hvf_ok(r); /* handle VMEXIT */ uint64_t exit_reason = rvmcs(cpu->hvf_fd, VMCS_EXIT_REASON); uint64_t exit_qual = rvmcs(cpu->hvf_fd, VMCS_EXIT_QUALIFICATION); uint32_t ins_len = (uint32_t)rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH); uint64_t idtvec_info = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO); rip = rreg(cpu->hvf_fd, HV_X86_RIP); RFLAGS(env) = rreg(cpu->hvf_fd, HV_X86_RFLAGS); env->eflags = RFLAGS(env); qemu_mutex_lock_iothread(); update_apic_tpr(cpu); current_cpu = cpu; ret = 0; switch (exit_reason) { case EXIT_REASON_HLT: { macvm_set_rip(cpu, rip + ins_len); if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) && (EFLAGS(env) & IF_MASK)) && !(cpu->interrupt_request & CPU_INTERRUPT_NMI) && !(idtvec_info & VMCS_IDT_VEC_VALID)) { cpu->halted = 1; ret = EXCP_HLT; } ret = EXCP_INTERRUPT; break; } case EXIT_REASON_MWAIT: { ret = EXCP_INTERRUPT; break; } /* Need to check if MMIO or unmmaped fault */ case EXIT_REASON_EPT_FAULT: { hvf_slot *slot; addr_t gpa = rvmcs(cpu->hvf_fd, VMCS_GUEST_PHYSICAL_ADDRESS); if (((idtvec_info & VMCS_IDT_VEC_VALID) == 0) && ((exit_qual & EXIT_QUAL_NMIUDTI) != 0)) { vmx_set_nmi_blocking(cpu); } slot = hvf_find_overlap_slot(gpa, gpa); /* mmio */ if (ept_emulation_fault(slot, gpa, exit_qual)) { struct x86_decode decode; load_regs(cpu); env->hvf_emul->fetch_rip = rip; decode_instruction(env, &decode); exec_instruction(env, &decode); store_regs(cpu); break; } break; } case EXIT_REASON_INOUT: { uint32_t in = (exit_qual & 8) != 0; uint32_t size = (exit_qual & 7) + 1; uint32_t string = (exit_qual & 16) != 0; uint32_t port = exit_qual >> 16; /*uint32_t rep = (exit_qual & 0x20) != 0;*/ #if 1 if (!string && in) { uint64_t val = 0; load_regs(cpu); hvf_handle_io(env, port, &val, 0, size, 1); if (size == 1) { AL(env) = val; } else if (size == 2) { AX(env) = val; } else if (size == 4) { RAX(env) = (uint32_t)val; } else { VM_PANIC(\"size\"); } RIP(env) += ins_len; store_regs(cpu); break; } else if (!string && !in) { RAX(env) = rreg(cpu->hvf_fd, HV_X86_RAX); hvf_handle_io(env, port, &RAX(env), 1, size, 1); macvm_set_rip(cpu, rip + ins_len); break; } #endif struct x86_decode decode; load_regs(cpu); env->hvf_emul->fetch_rip = rip; decode_instruction(env, &decode); VM_PANIC_ON(ins_len != decode.len); exec_instruction(env, &decode); store_regs(cpu); break; } case EXIT_REASON_CPUID: { uint32_t rax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX); uint32_t rbx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RBX); uint32_t rcx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX); uint32_t rdx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX); cpu_x86_cpuid(env, rax, rcx, &rax, &rbx, &rcx, &rdx); wreg(cpu->hvf_fd, HV_X86_RAX, rax); wreg(cpu->hvf_fd, HV_X86_RBX, rbx); wreg(cpu->hvf_fd, HV_X86_RCX, rcx); wreg(cpu->hvf_fd, HV_X86_RDX, rdx); macvm_set_rip(cpu, rip + ins_len); break; } case EXIT_REASON_XSETBV: { X86CPU *x86_cpu = X86_CPU(cpu); CPUX86State *env = &x86_cpu->env; uint32_t eax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX); uint32_t ecx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX); uint32_t edx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX); if (ecx) { macvm_set_rip(cpu, rip + ins_len); break; } env->xcr0 = ((uint64_t)edx << 32) | eax; wreg(cpu->hvf_fd, HV_X86_XCR0, env->xcr0 | 1); macvm_set_rip(cpu, rip + ins_len); break; } case EXIT_REASON_INTR_WINDOW: vmx_clear_int_window_exiting(cpu); ret = EXCP_INTERRUPT; break; case EXIT_REASON_NMI_WINDOW: vmx_clear_nmi_window_exiting(cpu); ret = EXCP_INTERRUPT; break; case EXIT_REASON_EXT_INTR: /* force exit and allow io handling */ ret = EXCP_INTERRUPT; break; case EXIT_REASON_RDMSR: case EXIT_REASON_WRMSR: { load_regs(cpu); if (exit_reason == EXIT_REASON_RDMSR) { simulate_rdmsr(cpu); } else { simulate_wrmsr(cpu); } RIP(env) += rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH); store_regs(cpu); break; } case EXIT_REASON_CR_ACCESS: { int cr; int reg; load_regs(cpu); cr = exit_qual & 15; reg = (exit_qual >> 8) & 15; switch (cr) { case 0x0: { macvm_set_cr0(cpu->hvf_fd, RRX(env, reg)); break; } case 4: { macvm_set_cr4(cpu->hvf_fd, RRX(env, reg)); break; } case 8: { X86CPU *x86_cpu = X86_CPU(cpu); if (exit_qual & 0x10) { RRX(env, reg) = cpu_get_apic_tpr(x86_cpu->apic_state); } else { int tpr = RRX(env, reg); cpu_set_apic_tpr(x86_cpu->apic_state, tpr); ret = EXCP_INTERRUPT; } break; } default: error_report(\"Unrecognized CR %d\\n\", cr); abort(); } RIP(env) += ins_len; store_regs(cpu); break; } case EXIT_REASON_APIC_ACCESS: { /* TODO */ struct x86_decode decode; load_regs(cpu); env->hvf_emul->fetch_rip = rip; decode_instruction(env, &decode); exec_instruction(env, &decode); store_regs(cpu); break; } case EXIT_REASON_TPR: { ret = 1; break; } case EXIT_REASON_TASK_SWITCH: { uint64_t vinfo = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO); x68_segment_selector sel = {.sel = exit_qual & 0xffff}; vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3, vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo & VMCS_INTR_T_MASK); break; } case EXIT_REASON_TRIPLE_FAULT: { qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); ret = EXCP_INTERRUPT; break; } case EXIT_REASON_RDPMC: wreg(cpu->hvf_fd, HV_X86_RAX, 0); wreg(cpu->hvf_fd, HV_X86_RDX, 0); macvm_set_rip(cpu, rip + ins_len); break; case VMX_REASON_VMCALL: /* TODO: inject #GP fault */ break; default: error_report(\"%llx: unhandled exit %llx\\n\", rip, exit_reason); } } while (ret == 0); return ret; }"} {"target": 0, "idx": 6185, "func": "static void dummy_event_handler(void *opaque) { }"} {"target": 0, "idx": 6197, "func": "void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint) { TAILQ_REMOVE(&env->watchpoints, watchpoint, entry); tlb_flush_page(env, watchpoint->vaddr); qemu_free(watchpoint); }"} {"target": 0, "idx": 6198, "func": "void armv7m_nvic_set_pending(void *opaque, int irq, bool secure) { NVICState *s = (NVICState *)opaque; bool banked = exc_is_banked(irq); VecInfo *vec; assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq); assert(!secure || banked); vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq]; trace_nvic_set_pending(irq, secure, vec->enabled, vec->prio); if (irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV) { /* If a synchronous exception is pending then it may be * escalated to HardFault if: * * it is equal or lower priority to current execution * * it is disabled * (ie we need to take it immediately but we can't do so). * Asynchronous exceptions (and interrupts) simply remain pending. * * For QEMU, we don't have any imprecise (asynchronous) faults, * so we can assume that PREFETCH_ABORT and DATA_ABORT are always * synchronous. * Debug exceptions are awkward because only Debug exceptions * resulting from the BKPT instruction should be escalated, * but we don't currently implement any Debug exceptions other * than those that result from BKPT, so we treat all debug exceptions * as needing escalation. * * This all means we can identify whether to escalate based only on * the exception number and don't (yet) need the caller to explicitly * tell us whether this exception is synchronous or not. */ int running = nvic_exec_prio(s); bool escalate = false; if (vec->prio >= running) { trace_nvic_escalate_prio(irq, vec->prio, running); escalate = true; } else if (!vec->enabled) { trace_nvic_escalate_disabled(irq); escalate = true; } if (escalate) { if (running < 0) { /* We want to escalate to HardFault but we can't take a * synchronous HardFault at this point either. This is a * Lockup condition due to a guest bug. We don't model * Lockup, so report via cpu_abort() instead. */ cpu_abort(&s->cpu->parent_obj, \"Lockup: can't escalate %d to HardFault \" \"(current priority %d)\\n\", irq, running); } /* We can do the escalation, so we take HardFault instead. * If BFHFNMINS is set then we escalate to the banked HF for * the target security state of the original exception; otherwise * we take a Secure HardFault. */ irq = ARMV7M_EXCP_HARD; if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) && (secure || !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) { vec = &s->sec_vectors[irq]; } else { vec = &s->vectors[irq]; } /* HF may be banked but there is only one shared HFSR */ s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK; } } if (!vec->pending) { vec->pending = 1; nvic_irq_update(s); } }"} {"target": 0, "idx": 6199, "func": "void portio_list_init(PortioList *piolist, const MemoryRegionPortio *callbacks, void *opaque, const char *name) { unsigned n = 0; while (callbacks[n].size) { ++n; } piolist->ports = callbacks; piolist->nr = 0; piolist->regions = g_new0(MemoryRegion *, n); piolist->aliases = g_new0(MemoryRegion *, n); piolist->address_space = NULL; piolist->opaque = opaque; piolist->name = name; }"} {"target": 1, "idx": 6202, "func": "static int mch_init(PCIDevice *d) { int i; hwaddr pci_hole64_size; MCHPCIState *mch = MCH_PCI_DEVICE(d); /* Leave enough space for the biggest MCFG BAR */ /* TODO: this matches current bios behaviour, but * it's not a power of two, which means an MTRR * can't cover it exactly. */ mch->guest_info->pci_info.w32.begin = MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT + MCH_HOST_BRIDGE_PCIEXBAR_MAX; /* setup pci memory regions */ memory_region_init_alias(&mch->pci_hole, OBJECT(mch), \"pci-hole\", mch->pci_address_space, mch->below_4g_mem_size, 0x100000000ULL - mch->below_4g_mem_size); memory_region_add_subregion(mch->system_memory, mch->below_4g_mem_size, &mch->pci_hole); pci_hole64_size = (sizeof(hwaddr) == 4 ? 0 : ((uint64_t)1 << 62)); memory_region_init_alias(&mch->pci_hole_64bit, OBJECT(mch), \"pci-hole64\", mch->pci_address_space, 0x100000000ULL + mch->above_4g_mem_size, pci_hole64_size); if (pci_hole64_size) { memory_region_add_subregion(mch->system_memory, 0x100000000ULL + mch->above_4g_mem_size, &mch->pci_hole_64bit); } /* smram */ cpu_smm_register(&mch_set_smm, mch); memory_region_init_alias(&mch->smram_region, OBJECT(mch), \"smram-region\", mch->pci_address_space, 0xa0000, 0x20000); memory_region_add_subregion_overlap(mch->system_memory, 0xa0000, &mch->smram_region, 1); memory_region_set_enabled(&mch->smram_region, false); init_pam(DEVICE(mch), mch->ram_memory, mch->system_memory, mch->pci_address_space, &mch->pam_regions[0], PAM_BIOS_BASE, PAM_BIOS_SIZE); for (i = 0; i < 12; ++i) { init_pam(DEVICE(mch), mch->ram_memory, mch->system_memory, mch->pci_address_space, &mch->pam_regions[i+1], PAM_EXPAN_BASE + i * PAM_EXPAN_SIZE, PAM_EXPAN_SIZE); } return 0; }"} {"target": 1, "idx": 6207, "func": "static void gen_mtsr(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); #else TCGv t0; if (unlikely(ctx->pr)) { gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); return; } t0 = tcg_const_tl(SR(ctx->opcode)); gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); tcg_temp_free(t0); #endif }"} {"target": 1, "idx": 6215, "func": "static int get_codec_data(ByteIOContext *pb, AVStream *vst, AVStream *ast, int myth) { frametype_t frametype; if (!vst && !myth) return 1; // no codec data needed while (!url_feof(pb)) { int size, subtype; frametype = get_byte(pb); switch (frametype) { case NUV_EXTRADATA: subtype = get_byte(pb); url_fskip(pb, 6); size = PKTSIZE(get_le32(pb)); if (subtype == 'R') { vst->codec->extradata_size = size; vst->codec->extradata = av_malloc(size); get_buffer(pb, vst->codec->extradata, size); size = 0; if (!myth) return 1; } break; case NUV_MYTHEXT: url_fskip(pb, 7); size = PKTSIZE(get_le32(pb)); if (size != 128 * 4) break; get_le32(pb); // version if (vst) { vst->codec->codec_tag = get_le32(pb); vst->codec->codec_id = codec_get_id(codec_bmp_tags, vst->codec->codec_tag); } else url_fskip(pb, 4); if (ast) { ast->codec->codec_tag = get_le32(pb); ast->codec->sample_rate = get_le32(pb); ast->codec->bits_per_sample = get_le32(pb); ast->codec->channels = get_le32(pb); ast->codec->codec_id = wav_codec_get_id(ast->codec->codec_tag, ast->codec->bits_per_sample); } else url_fskip(pb, 4 * 4); size -= 6 * 4; url_fskip(pb, size); return 1; case NUV_SEEKP: size = 11; break; default: url_fskip(pb, 7); size = PKTSIZE(get_le32(pb)); break; } url_fskip(pb, size); } return 0; }"} {"target": 1, "idx": 6269, "func": "static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, unsigned src_size) { const uint8_t *s = src; const uint8_t *end; #ifdef HAVE_MMX const uint8_t *mm_end; #endif uint16_t *d = (uint16_t *)dst; end = s + src_size; #ifdef HAVE_MMX __asm __volatile(PREFETCH\" %0\"::\"m\"(*src):\"memory\"); __asm __volatile( \"movq %0, %%mm7\\n\\t\" \"movq %1, %%mm6\\n\\t\" ::\"m\"(red_15mask),\"m\"(green_15mask)); mm_end = end - 11; while(s < mm_end) { __asm __volatile( PREFETCH\" 32%1\\n\\t\" \"movd %1, %%mm0\\n\\t\" \"movd 3%1, %%mm3\\n\\t\" \"punpckldq 6%1, %%mm0\\n\\t\" \"punpckldq 9%1, %%mm3\\n\\t\" \"movq %%mm0, %%mm1\\n\\t\" \"movq %%mm0, %%mm2\\n\\t\" \"movq %%mm3, %%mm4\\n\\t\" \"movq %%mm3, %%mm5\\n\\t\" \"psrlq $3, %%mm0\\n\\t\" \"psrlq $3, %%mm3\\n\\t\" \"pand %2, %%mm0\\n\\t\" \"pand %2, %%mm3\\n\\t\" \"psrlq $6, %%mm1\\n\\t\" \"psrlq $6, %%mm4\\n\\t\" \"pand %%mm6, %%mm1\\n\\t\" \"pand %%mm6, %%mm4\\n\\t\" \"psrlq $9, %%mm2\\n\\t\" \"psrlq $9, %%mm5\\n\\t\" \"pand %%mm7, %%mm2\\n\\t\" \"pand %%mm7, %%mm5\\n\\t\" \"por %%mm1, %%mm0\\n\\t\" \"por %%mm4, %%mm3\\n\\t\" \"por %%mm2, %%mm0\\n\\t\" \"por %%mm5, %%mm3\\n\\t\" \"psllq $16, %%mm3\\n\\t\" \"por %%mm3, %%mm0\\n\\t\" MOVNTQ\" %%mm0, %0\\n\\t\" :\"=m\"(*d):\"m\"(*s),\"m\"(blue_15mask):\"memory\"); d += 4; s += 12; } __asm __volatile(SFENCE:::\"memory\"); __asm __volatile(EMMS:::\"memory\"); #endif while(s < end) { const int b= *s++; const int g= *s++; const int r= *s++; *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7); } }"} {"target": 1, "idx": 6274, "func": "static int rsd_read_header(AVFormatContext *s) { AVIOContext *pb = s->pb; int i, ret, version, start = 0x800; AVCodecParameters *par; AVStream *st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); avio_skip(pb, 3); // \"RSD\" version = avio_r8(pb) - '0'; par = st->codecpar; par->codec_type = AVMEDIA_TYPE_AUDIO; par->codec_tag = avio_rl32(pb); par->codec_id = ff_codec_get_id(rsd_tags, par->codec_tag); if (!par->codec_id) { char tag_buf[32]; av_get_codec_tag_string(tag_buf, sizeof(tag_buf), par->codec_tag); for (i=0; i < FF_ARRAY_ELEMS(rsd_unsupported_tags); i++) { if (par->codec_tag == rsd_unsupported_tags[i]) { avpriv_request_sample(s, \"Codec tag: %s\", tag_buf); return AVERROR_PATCHWELCOME; } } av_log(s, AV_LOG_ERROR, \"Unknown codec tag: %s\\n\", tag_buf); return AVERROR_INVALIDDATA; } par->channels = avio_rl32(pb); if (!par->channels) return AVERROR_INVALIDDATA; avio_skip(pb, 4); // Bit depth par->sample_rate = avio_rl32(pb); if (!par->sample_rate) return AVERROR_INVALIDDATA; avio_skip(pb, 4); // Unknown switch (par->codec_id) { case AV_CODEC_ID_XMA2: par->block_align = 2048; ff_alloc_extradata(par, 34); if (!par->extradata) return AVERROR(ENOMEM); memset(par->extradata, 0, 34); break; case AV_CODEC_ID_ADPCM_PSX: par->block_align = 16 * par->channels; if (pb->seekable) st->duration = av_get_audio_frame_duration2(par, avio_size(pb) - start); break; case AV_CODEC_ID_ADPCM_IMA_RAD: par->block_align = 20 * par->channels; if (pb->seekable) st->duration = av_get_audio_frame_duration2(par, avio_size(pb) - start); break; case AV_CODEC_ID_ADPCM_IMA_WAV: if (version == 2) start = avio_rl32(pb); par->bits_per_coded_sample = 4; par->block_align = 36 * par->channels; if (pb->seekable) st->duration = av_get_audio_frame_duration2(par, avio_size(pb) - start); break; case AV_CODEC_ID_ADPCM_THP_LE: /* RSD3GADP is mono, so only alloc enough memory to store the coeff table for a single channel. */ start = avio_rl32(pb); if ((ret = ff_get_extradata(s, par, s->pb, 32)) < 0) return ret; if (pb->seekable) st->duration = av_get_audio_frame_duration2(par, avio_size(pb) - start); break; case AV_CODEC_ID_ADPCM_THP: par->block_align = 8 * par->channels; avio_skip(s->pb, 0x1A4 - avio_tell(s->pb)); if ((ret = ff_alloc_extradata(st->codecpar, 32 * par->channels)) < 0) return ret; for (i = 0; i < par->channels; i++) { avio_read(s->pb, st->codecpar->extradata + 32 * i, 32); avio_skip(s->pb, 8); } if (pb->seekable) st->duration = (avio_size(pb) - start) / (8 * par->channels) * 14; break; case AV_CODEC_ID_PCM_S16LE: case AV_CODEC_ID_PCM_S16BE: if (version != 4) start = avio_rl32(pb); if (pb->seekable) st->duration = (avio_size(pb) - start) / 2 / par->channels; break; } avio_skip(pb, start - avio_tell(pb)); if (par->codec_id == AV_CODEC_ID_XMA2) { avio_skip(pb, avio_rb32(pb) + avio_rb32(pb)); st->duration = avio_rb32(pb); } avpriv_set_pts_info(st, 64, 1, par->sample_rate); return 0; }"} {"target": 1, "idx": 6275, "func": "static void device_initfn(Object *obj) { DeviceState *dev = DEVICE(obj); ObjectClass *class; Property *prop; if (qdev_hotplug) { dev->hotplugged = 1; qdev_hot_added = true; } dev->instance_id_alias = -1; dev->realized = false; object_property_add_bool(obj, \"realized\", device_get_realized, device_set_realized, NULL); object_property_add_bool(obj, \"hotpluggable\", device_get_hotpluggable, NULL, NULL); object_property_add_bool(obj, \"hotplugged\", device_get_hotplugged, device_set_hotplugged, &error_abort); class = object_get_class(OBJECT(dev)); do { for (prop = DEVICE_CLASS(class)->props; prop && prop->name; prop++) { qdev_property_add_legacy(dev, prop, &error_abort); qdev_property_add_static(dev, prop, &error_abort); } class = object_class_get_parent(class); } while (class != object_class_by_name(TYPE_DEVICE)); object_property_add_link(OBJECT(dev), \"parent_bus\", TYPE_BUS, (Object **)&dev->parent_bus, NULL, 0, &error_abort); QLIST_INIT(&dev->gpios); }"} {"target": 0, "idx": 6282, "func": "static int http_prepare_data(HTTPContext *c) { int i, len, ret; AVFormatContext *ctx; switch(c->state) { case HTTPSTATE_SEND_DATA_HEADER: memset(&c->fmt_ctx, 0, sizeof(c->fmt_ctx)); pstrcpy(c->fmt_ctx.author, sizeof(c->fmt_ctx.author), c->stream->author); pstrcpy(c->fmt_ctx.comment, sizeof(c->fmt_ctx.comment), c->stream->comment); pstrcpy(c->fmt_ctx.copyright, sizeof(c->fmt_ctx.copyright), c->stream->copyright); pstrcpy(c->fmt_ctx.title, sizeof(c->fmt_ctx.title), c->stream->title); /* open output stream by using specified codecs */ c->fmt_ctx.oformat = c->stream->fmt; c->fmt_ctx.nb_streams = c->stream->nb_streams; for(i=0;ifmt_ctx.nb_streams;i++) { AVStream *st; st = av_mallocz(sizeof(AVStream)); c->fmt_ctx.streams[i] = st; /* if file or feed, then just take streams from FFStream struct */ if (!c->stream->feed || c->stream->feed == c->stream) memcpy(st, c->stream->streams[i], sizeof(AVStream)); else memcpy(st, c->stream->feed->streams[c->stream->feed_streams[i]], sizeof(AVStream)); st->codec.frame_number = 0; /* XXX: should be done in AVStream, not in codec */ } c->got_key_frame = 0; /* prepare header and save header data in a stream */ if (url_open_dyn_buf(&c->fmt_ctx.pb) < 0) { /* XXX: potential leak */ return -1; } c->fmt_ctx.pb.is_streamed = 1; av_write_header(&c->fmt_ctx); len = url_close_dyn_buf(&c->fmt_ctx.pb, &c->pb_buffer); c->buffer_ptr = c->pb_buffer; c->buffer_end = c->pb_buffer + len; c->state = HTTPSTATE_SEND_DATA; c->last_packet_sent = 0; break; case HTTPSTATE_SEND_DATA: /* find a new packet */ { AVPacket pkt; /* read a packet from the input stream */ if (c->stream->feed) { ffm_set_write_index(c->fmt_in, c->stream->feed->feed_write_index, c->stream->feed->feed_size); } if (c->stream->max_time && c->stream->max_time + c->start_time - cur_time < 0) { /* We have timed out */ c->state = HTTPSTATE_SEND_DATA_TRAILER; } else { if (c->is_packetized) { if (compute_send_delay(c) > 0) { c->state = HTTPSTATE_WAIT; return 1; /* state changed */ } } if (av_read_frame(c->fmt_in, &pkt) < 0) { if (c->stream->feed && c->stream->feed->feed_opened) { /* if coming from feed, it means we reached the end of the ffm file, so must wait for more data */ c->state = HTTPSTATE_WAIT_FEED; return 1; /* state changed */ } else { /* must send trailer now because eof or error */ c->state = HTTPSTATE_SEND_DATA_TRAILER; } } else { /* update first pts if needed */ if (c->first_pts == AV_NOPTS_VALUE) c->first_pts = pkt.pts; /* send it to the appropriate stream */ if (c->stream->feed) { /* if coming from a feed, select the right stream */ if (c->switch_pending) { c->switch_pending = 0; for(i=0;istream->nb_streams;i++) { if (c->switch_feed_streams[i] == pkt.stream_index) { if (pkt.flags & PKT_FLAG_KEY) { do_switch_stream(c, i); } } if (c->switch_feed_streams[i] >= 0) { c->switch_pending = 1; } } } for(i=0;istream->nb_streams;i++) { if (c->feed_streams[i] == pkt.stream_index) { pkt.stream_index = i; if (pkt.flags & PKT_FLAG_KEY) { c->got_key_frame |= 1 << i; } /* See if we have all the key frames, then * we start to send. This logic is not quite * right, but it works for the case of a * single video stream with one or more * audio streams (for which every frame is * typically a key frame). */ if (!c->stream->send_on_key || ((c->got_key_frame + 1) >> c->stream->nb_streams)) { goto send_it; } } } } else { AVCodecContext *codec; send_it: /* specific handling for RTP: we use several output stream (one for each RTP connection). XXX: need more abstract handling */ if (c->is_packetized) { c->packet_stream_index = pkt.stream_index; ctx = c->rtp_ctx[c->packet_stream_index]; codec = &ctx->streams[0]->codec; } else { ctx = &c->fmt_ctx; /* Fudge here */ codec = &ctx->streams[pkt.stream_index]->codec; } codec->key_frame = ((pkt.flags & PKT_FLAG_KEY) != 0); #ifdef PJSG if (codec->codec_type == CODEC_TYPE_AUDIO) { codec->frame_size = (codec->sample_rate * pkt.duration + 500000) / 1000000; /* printf(\"Calculated size %d, from sr %d, duration %d\\n\", codec->frame_size, codec->sample_rate, pkt.duration); */ } #endif if (c->is_packetized) { ret = url_open_dyn_packet_buf(&ctx->pb, url_get_max_packet_size(c->rtp_handles[c->packet_stream_index])); c->packet_byte_count = 0; c->packet_start_time_us = av_gettime(); } else { ret = url_open_dyn_buf(&ctx->pb); } if (ret < 0) { /* XXX: potential leak */ return -1; } if (av_write_packet(ctx, &pkt, pkt.pts)) { c->state = HTTPSTATE_SEND_DATA_TRAILER; } len = url_close_dyn_buf(&ctx->pb, &c->pb_buffer); c->buffer_ptr = c->pb_buffer; c->buffer_end = c->pb_buffer + len; codec->frame_number++; } #ifndef AV_READ_FRAME av_free_packet(&pkt); #endif } } } break; default: case HTTPSTATE_SEND_DATA_TRAILER: /* last packet test ? */ if (c->last_packet_sent || c->is_packetized) return -1; ctx = &c->fmt_ctx; /* prepare header */ if (url_open_dyn_buf(&ctx->pb) < 0) { /* XXX: potential leak */ return -1; } av_write_trailer(ctx); len = url_close_dyn_buf(&ctx->pb, &c->pb_buffer); c->buffer_ptr = c->pb_buffer; c->buffer_end = c->pb_buffer + len; c->last_packet_sent = 1; break; } return 0; }"} {"target": 0, "idx": 6286, "func": "static inline void cris_update_result(DisasContext *dc, TCGv res) { if (dc->update_cc) { if (dc->cc_size == 4 && (dc->cc_op == CC_OP_SUB || dc->cc_op == CC_OP_ADD)) return; tcg_gen_mov_tl(cc_result, res); } }"} {"target": 0, "idx": 6287, "func": "static inline TranslationBlock *tb_find_fast(CPUState *cpu, TranslationBlock *last_tb, int tb_exit) { CPUArchState *env = (CPUArchState *)cpu->env_ptr; TranslationBlock *tb; target_ulong cs_base, pc; uint32_t flags; /* we record a subset of the CPU state. It will always be the same before a given translated block is executed. */ cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); tb_lock(); tb = atomic_rcu_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]); if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base || tb->flags != flags)) { tb = tb_find_slow(cpu, pc, cs_base, flags); } #ifndef CONFIG_USER_ONLY /* We don't take care of direct jumps when address mapping changes in * system emulation. So it's not safe to make a direct jump to a TB * spanning two pages because the mapping for the second page can change. */ if (tb->page_addr[1] != -1) { last_tb = NULL; } #endif /* See if we can patch the calling TB. */ if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { /* Check if translation buffer has been flushed */ if (cpu->tb_flushed) { cpu->tb_flushed = false; } else { tb_add_jump(last_tb, tb_exit, tb); } } tb_unlock(); return tb; }"} {"target": 1, "idx": 6308, "func": "static int proxy_init(FsContext *ctx) { V9fsProxy *proxy = g_malloc(sizeof(V9fsProxy)); int sock_id; if (ctx->export_flags & V9FS_PROXY_SOCK_NAME) { sock_id = connect_namedsocket(ctx->fs_root); } else { sock_id = atoi(ctx->fs_root); if (sock_id < 0) { fprintf(stderr, \"socket descriptor not initialized\\n\"); return -1; } } g_free(ctx->fs_root); proxy->in_iovec.iov_base = g_malloc(PROXY_MAX_IO_SZ + PROXY_HDR_SZ); proxy->in_iovec.iov_len = PROXY_MAX_IO_SZ + PROXY_HDR_SZ; proxy->out_iovec.iov_base = g_malloc(PROXY_MAX_IO_SZ + PROXY_HDR_SZ); proxy->out_iovec.iov_len = PROXY_MAX_IO_SZ + PROXY_HDR_SZ; ctx->private = proxy; proxy->sockfd = sock_id; qemu_mutex_init(&proxy->mutex); ctx->export_flags |= V9FS_PATHNAME_FSCONTEXT; ctx->exops.get_st_gen = proxy_ioc_getversion; return 0; }"} {"target": 1, "idx": 6314, "func": "int kvm_arch_init(MachineState *ms, KVMState *s) { MachineClass *mc = MACHINE_GET_CLASS(ms); mc->default_cpu_type = S390_CPU_TYPE_NAME(\"host\"); cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS); cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF); cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP); cap_s390_irq = kvm_check_extension(s, KVM_CAP_S390_INJECT_IRQ); if (!kvm_check_extension(s, KVM_CAP_S390_GMAP) || !kvm_check_extension(s, KVM_CAP_S390_COW)) { phys_mem_set_alloc(legacy_s390_alloc); } kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0); kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0); kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0); if (ri_allowed()) { if (kvm_vm_enable_cap(s, KVM_CAP_S390_RI, 0) == 0) { cap_ri = 1; } } if (gs_allowed()) { if (kvm_vm_enable_cap(s, KVM_CAP_S390_GS, 0) == 0) { cap_gs = 1; } } /* * The migration interface for ais was introduced with kernel 4.13 * but the capability itself had been active since 4.12. As migration * support is considered necessary let's disable ais in the 2.10 * machine. */ /* kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0); */ return 0; }"} {"target": 0, "idx": 6322, "func": "static void decode_fixed_sparse(AMRFixed *fixed_sparse, const uint16_t *pulses, const enum Mode mode, const int subframe) { av_assert1(MODE_4k75 <= mode && mode <= MODE_12k2); if (mode == MODE_12k2) { ff_decode_10_pulses_35bits(pulses, fixed_sparse, gray_decode, 5, 3); } else if (mode == MODE_10k2) { decode_8_pulses_31bits(pulses, fixed_sparse); } else { int *pulse_position = fixed_sparse->x; int i, pulse_subset; const int fixed_index = pulses[0]; if (mode <= MODE_5k15) { pulse_subset = ((fixed_index >> 3) & 8) + (subframe << 1); pulse_position[0] = ( fixed_index & 7) * 5 + track_position[pulse_subset]; pulse_position[1] = ((fixed_index >> 3) & 7) * 5 + track_position[pulse_subset + 1]; fixed_sparse->n = 2; } else if (mode == MODE_5k9) { pulse_subset = ((fixed_index & 1) << 1) + 1; pulse_position[0] = ((fixed_index >> 1) & 7) * 5 + pulse_subset; pulse_subset = (fixed_index >> 4) & 3; pulse_position[1] = ((fixed_index >> 6) & 7) * 5 + pulse_subset + (pulse_subset == 3 ? 1 : 0); fixed_sparse->n = pulse_position[0] == pulse_position[1] ? 1 : 2; } else if (mode == MODE_6k7) { pulse_position[0] = (fixed_index & 7) * 5; pulse_subset = (fixed_index >> 2) & 2; pulse_position[1] = ((fixed_index >> 4) & 7) * 5 + pulse_subset + 1; pulse_subset = (fixed_index >> 6) & 2; pulse_position[2] = ((fixed_index >> 8) & 7) * 5 + pulse_subset + 2; fixed_sparse->n = 3; } else { // mode <= MODE_7k95 pulse_position[0] = gray_decode[ fixed_index & 7]; pulse_position[1] = gray_decode[(fixed_index >> 3) & 7] + 1; pulse_position[2] = gray_decode[(fixed_index >> 6) & 7] + 2; pulse_subset = (fixed_index >> 9) & 1; pulse_position[3] = gray_decode[(fixed_index >> 10) & 7] + pulse_subset + 3; fixed_sparse->n = 4; } for (i = 0; i < fixed_sparse->n; i++) fixed_sparse->y[i] = (pulses[1] >> i) & 1 ? 1.0 : -1.0; } }"} {"target": 1, "idx": 6339, "func": "static void cg3_reg_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { CG3State *s = opaque; uint8_t regval; int i; DPRINTF(\"write %\" PRIx64 \" to reg %\" HWADDR_PRIx \" size %d\\n\", val, addr, size); switch (addr) { case CG3_REG_BT458_ADDR: s->dac_index = val; s->dac_state = 0; break; case CG3_REG_BT458_COLMAP: /* This register can be written to as either a long word or a byte */ if (size == 1) { val <<= 24; } for (i = 0; i < size; i++) { regval = val >> 24; switch (s->dac_state) { case 0: s->r[s->dac_index] = regval; s->dac_state++; break; case 1: s->g[s->dac_index] = regval; s->dac_state++; break; case 2: s->b[s->dac_index] = regval; /* Index autoincrement */ s->dac_index = (s->dac_index + 1) & 0xff; default: s->dac_state = 0; break; } val <<= 8; } s->full_update = 1; break; case CG3_REG_FBC_CTRL: s->regs[0] = val; break; case CG3_REG_FBC_STATUS: if (s->regs[1] & CG3_SR_PENDING_INT) { /* clear interrupt */ s->regs[1] &= ~CG3_SR_PENDING_INT; qemu_irq_lower(s->irq); } break; case CG3_REG_FBC_CURSTART ... CG3_REG_SIZE: s->regs[addr - 0x10] = val; break; default: qemu_log_mask(LOG_UNIMP, \"cg3: Unimplemented register write \" \"reg 0x%\" HWADDR_PRIx \" size 0x%x value 0x%\" PRIx64 \"\\n\", addr, size, val); break; } }"} {"target": 1, "idx": 6352, "func": "static void qxl_dirty_surfaces(PCIQXLDevice *qxl) { intptr_t vram_start; int i; if (qxl->mode != QXL_MODE_NATIVE && qxl->mode != QXL_MODE_COMPAT) { return; } /* dirty the primary surface */ qxl_set_dirty(&qxl->vga.vram, qxl->shadow_rom.draw_area_offset, qxl->shadow_rom.surface0_area_size); vram_start = (intptr_t)memory_region_get_ram_ptr(&qxl->vram_bar); /* dirty the off-screen surfaces */ for (i = 0; i < qxl->ssd.num_surfaces; i++) { QXLSurfaceCmd *cmd; intptr_t surface_offset; int surface_size; if (qxl->guest_surfaces.cmds[i] == 0) { continue; } cmd = qxl_phys2virt(qxl, qxl->guest_surfaces.cmds[i], MEMSLOT_GROUP_GUEST); assert(cmd); assert(cmd->type == QXL_SURFACE_CMD_CREATE); surface_offset = (intptr_t)qxl_phys2virt(qxl, cmd->u.surface_create.data, MEMSLOT_GROUP_GUEST); assert(surface_offset); surface_offset -= vram_start; surface_size = cmd->u.surface_create.height * abs(cmd->u.surface_create.stride); trace_qxl_surfaces_dirty(qxl->id, i, (int)surface_offset, surface_size); qxl_set_dirty(&qxl->vram_bar, surface_offset, surface_size); } }"} {"target": 1, "idx": 6361, "func": "VirtIODevice *virtio_blk_init(DeviceState *dev, VirtIOBlkConf *blk) { VirtIOBlock *s; static int virtio_blk_id; if (!blk->conf.bs) { error_report(\"drive property not set\"); return NULL; } if (!bdrv_is_inserted(blk->conf.bs)) { error_report(\"Device needs media, but drive is empty\"); return NULL; } blkconf_serial(&blk->conf, &blk->serial); if (blkconf_geometry(&blk->conf, NULL, 65535, 255, 255) < 0) { return NULL; } s = (VirtIOBlock *)virtio_common_init(\"virtio-blk\", VIRTIO_ID_BLOCK, sizeof(struct virtio_blk_config), sizeof(VirtIOBlock)); s->vdev.get_config = virtio_blk_update_config; s->vdev.set_config = virtio_blk_set_config; s->vdev.get_features = virtio_blk_get_features; s->vdev.set_status = virtio_blk_set_status; s->vdev.reset = virtio_blk_reset; s->bs = blk->conf.bs; s->conf = &blk->conf; s->blk = blk; s->rq = NULL; s->sector_mask = (s->conf->logical_block_size / BDRV_SECTOR_SIZE) - 1; s->vq = virtio_add_queue(&s->vdev, 128, virtio_blk_handle_output); #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE if (!virtio_blk_data_plane_create(&s->vdev, blk, &s->dataplane)) { virtio_cleanup(&s->vdev); return NULL; } #endif qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s); s->qdev = dev; register_savevm(dev, \"virtio-blk\", virtio_blk_id++, 2, virtio_blk_save, virtio_blk_load, s); bdrv_set_dev_ops(s->bs, &virtio_block_ops, s); bdrv_set_buffer_alignment(s->bs, s->conf->logical_block_size); bdrv_iostatus_enable(s->bs); add_boot_device_path(s->conf->bootindex, dev, \"/disk@0,0\"); return &s->vdev; }"} {"target": 1, "idx": 6377, "func": "void _decode_opc(DisasContext * ctx) { #if 0 fprintf(stderr, \"Translating opcode 0x%04x\\n\", ctx->opcode); #endif switch (ctx->opcode) { case 0x0019: /* div0u */ tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(SR_M | SR_Q | SR_T)); return; case 0x000b: /* rts */ CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr); ctx->flags |= DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x0028: /* clrmac */ tcg_gen_movi_i32(cpu_mach, 0); tcg_gen_movi_i32(cpu_macl, 0); return; case 0x0048: /* clrs */ tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_S); return; case 0x0008: /* clrt */ gen_clr_t(); return; case 0x0038: /* ldtlb */ #if defined(CONFIG_USER_ONLY) assert(0); /* XXXXX */ #else tcg_gen_helper_0_0(helper_ldtlb); #endif return; case 0x002b: /* rte */ CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_sr, cpu_ssr); tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc); ctx->flags |= DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x0058: /* sets */ tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_S); return; case 0x0018: /* sett */ gen_set_t(); return; case 0xfbfd: /* frchg */ tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR); ctx->bstate = BS_STOP; return; case 0xf3fd: /* fschg */ tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ); ctx->bstate = BS_STOP; return; case 0x0009: /* nop */ return; case 0x001b: /* sleep */ if (ctx->memidx) { tcg_gen_helper_0_1(helper_sleep, tcg_const_i32(ctx->pc + 2)); } else { tcg_gen_helper_0_0(helper_raise_illegal_instruction); ctx->bstate = BS_EXCP; } return; } switch (ctx->opcode & 0xf000) { case 0x1000: /* mov.l Rm,@(disp,Rn) */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4); tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x5000: /* mov.l @(disp,Rm),Rn */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4); tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0xe000: /* mov #imm,Rn */ tcg_gen_movi_i32(REG(B11_8), B7_0s); return; case 0x9000: /* mov.w @(disp,PC),Rn */ { TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2); tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0xd000: /* mov.l @(disp,PC),Rn */ { TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3); tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x7000: /* add #imm,Rn */ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s); return; case 0xa000: /* bra disp */ CHECK_NOT_DELAY_SLOT ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2; tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc); ctx->flags |= DELAY_SLOT; return; case 0xb000: /* bsr disp */ CHECK_NOT_DELAY_SLOT tcg_gen_movi_i32(cpu_pr, ctx->pc + 4); ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2; tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc); ctx->flags |= DELAY_SLOT; return; } switch (ctx->opcode & 0xf00f) { case 0x6003: /* mov Rm,Rn */ tcg_gen_mov_i32(REG(B11_8), REG(B7_4)); return; case 0x2000: /* mov.b Rm,@Rn */ tcg_gen_qemu_st8(REG(B7_4), REG(B11_8), ctx->memidx); return; case 0x2001: /* mov.w Rm,@Rn */ tcg_gen_qemu_st16(REG(B7_4), REG(B11_8), ctx->memidx); return; case 0x2002: /* mov.l Rm,@Rn */ tcg_gen_qemu_st32(REG(B7_4), REG(B11_8), ctx->memidx); return; case 0x6000: /* mov.b @Rm,Rn */ tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx); return; case 0x6001: /* mov.w @Rm,Rn */ tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx); return; case 0x6002: /* mov.l @Rm,Rn */ tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx); return; case 0x2004: /* mov.b Rm,@-Rn */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_subi_i32(addr, REG(B11_8), 1); tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx); /* might cause re-execution */ tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1); /* modify register status */ tcg_temp_free(addr); } return; case 0x2005: /* mov.w Rm,@-Rn */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_subi_i32(addr, REG(B11_8), 2); tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx); tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 2); tcg_temp_free(addr); } return; case 0x2006: /* mov.l Rm,@-Rn */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_subi_i32(addr, REG(B11_8), 4); tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx); tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 4); } return; case 0x6004: /* mov.b @Rm+,Rn */ tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx); if ( B11_8 != B7_4 ) tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1); return; case 0x6005: /* mov.w @Rm+,Rn */ tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx); if ( B11_8 != B7_4 ) tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2); return; case 0x6006: /* mov.l @Rm+,Rn */ tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx); if ( B11_8 != B7_4 ) tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4); return; case 0x0004: /* mov.b Rm,@(R0,Rn) */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_add_i32(addr, REG(B11_8), REG(0)); tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x0005: /* mov.w Rm,@(R0,Rn) */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_add_i32(addr, REG(B11_8), REG(0)); tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x0006: /* mov.l Rm,@(R0,Rn) */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_add_i32(addr, REG(B11_8), REG(0)); tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x000c: /* mov.b @(R0,Rm),Rn */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_add_i32(addr, REG(B7_4), REG(0)); tcg_gen_qemu_ld8s(REG(B11_8), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x000d: /* mov.w @(R0,Rm),Rn */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_add_i32(addr, REG(B7_4), REG(0)); tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x000e: /* mov.l @(R0,Rm),Rn */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_add_i32(addr, REG(B7_4), REG(0)); tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x6008: /* swap.b Rm,Rn */ { TCGv high, low; high = tcg_temp_new(TCG_TYPE_I32); tcg_gen_ext8u_i32(high, REG(B7_4)); tcg_gen_shli_i32(high, high, 8); low = tcg_temp_new(TCG_TYPE_I32); tcg_gen_shri_i32(low, REG(B7_4), 8); tcg_gen_ext8u_i32(low, low); tcg_gen_or_i32(REG(B11_8), high, low); tcg_temp_free(low); tcg_temp_free(high); } return; case 0x6009: /* swap.w Rm,Rn */ { TCGv high, low; high = tcg_temp_new(TCG_TYPE_I32); tcg_gen_ext16u_i32(high, REG(B7_4)); tcg_gen_shli_i32(high, high, 16); low = tcg_temp_new(TCG_TYPE_I32); tcg_gen_shri_i32(low, REG(B7_4), 16); tcg_gen_ext16u_i32(low, low); tcg_gen_or_i32(REG(B11_8), high, low); tcg_temp_free(low); tcg_temp_free(high); } return; case 0x200d: /* xtrct Rm,Rn */ { TCGv high, low; high = tcg_temp_new(TCG_TYPE_I32); tcg_gen_ext16u_i32(high, REG(B7_4)); tcg_gen_shli_i32(high, high, 16); low = tcg_temp_new(TCG_TYPE_I32); tcg_gen_shri_i32(low, REG(B11_8), 16); tcg_gen_ext16u_i32(low, low); tcg_gen_or_i32(REG(B11_8), high, low); tcg_temp_free(low); tcg_temp_free(high); } return; case 0x300c: /* add Rm,Rn */ tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4)); return; case 0x300e: /* addc Rm,Rn */ tcg_gen_helper_1_2(helper_addc, REG(B11_8), REG(B7_4), REG(B11_8)); return; case 0x300f: /* addv Rm,Rn */ tcg_gen_helper_1_2(helper_addv, REG(B11_8), REG(B7_4), REG(B11_8)); return; case 0x2009: /* and Rm,Rn */ tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4)); return; case 0x3000: /* cmp/eq Rm,Rn */ gen_cmp(TCG_COND_EQ, REG(B7_4), REG(B11_8)); return; case 0x3003: /* cmp/ge Rm,Rn */ gen_cmp(TCG_COND_GE, REG(B7_4), REG(B11_8)); return; case 0x3007: /* cmp/gt Rm,Rn */ gen_cmp(TCG_COND_GT, REG(B7_4), REG(B11_8)); return; case 0x3006: /* cmp/hi Rm,Rn */ gen_cmp(TCG_COND_GTU, REG(B7_4), REG(B11_8)); return; case 0x3002: /* cmp/hs Rm,Rn */ gen_cmp(TCG_COND_GEU, REG(B7_4), REG(B11_8)); return; case 0x200c: /* cmp/str Rm,Rn */ { int label1 = gen_new_label(); int label2 = gen_new_label(); TCGv cmp1 = tcg_temp_local_new(TCG_TYPE_I32); TCGv cmp2 = tcg_temp_local_new(TCG_TYPE_I32); tcg_gen_xor_i32(cmp1, REG(B7_4), REG(B11_8)); tcg_gen_andi_i32(cmp2, cmp1, 0xff000000); tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1); tcg_gen_andi_i32(cmp2, cmp1, 0x00ff0000); tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1); tcg_gen_andi_i32(cmp2, cmp1, 0x0000ff00); tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1); tcg_gen_andi_i32(cmp2, cmp1, 0x000000ff); tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1); tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T); tcg_gen_br(label2); gen_set_label(label1); tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T); gen_set_label(label2); tcg_temp_free(cmp2); tcg_temp_free(cmp1); } return; case 0x2007: /* div0s Rm,Rn */ { gen_copy_bit_i32(cpu_sr, 8, REG(B11_8), 31); /* SR_Q */ gen_copy_bit_i32(cpu_sr, 9, REG(B7_4), 31); /* SR_M */ TCGv val = tcg_temp_new(TCG_TYPE_I32); tcg_gen_xor_i32(val, REG(B7_4), REG(B11_8)); gen_copy_bit_i32(cpu_sr, 0, val, 31); /* SR_T */ tcg_temp_free(val); } return; case 0x3004: /* div1 Rm,Rn */ tcg_gen_helper_1_2(helper_div1, REG(B11_8), REG(B7_4), REG(B11_8)); return; case 0x300d: /* dmuls.l Rm,Rn */ { TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64); TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64); tcg_gen_ext_i32_i64(tmp1, REG(B7_4)); tcg_gen_ext_i32_i64(tmp2, REG(B11_8)); tcg_gen_mul_i64(tmp1, tmp1, tmp2); tcg_gen_trunc_i64_i32(cpu_macl, tmp1); tcg_gen_shri_i64(tmp1, tmp1, 32); tcg_gen_trunc_i64_i32(cpu_mach, tmp1); tcg_temp_free(tmp2); tcg_temp_free(tmp1); } return; case 0x3005: /* dmulu.l Rm,Rn */ { TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64); TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64); tcg_gen_extu_i32_i64(tmp1, REG(B7_4)); tcg_gen_extu_i32_i64(tmp2, REG(B11_8)); tcg_gen_mul_i64(tmp1, tmp1, tmp2); tcg_gen_trunc_i64_i32(cpu_macl, tmp1); tcg_gen_shri_i64(tmp1, tmp1, 32); tcg_gen_trunc_i64_i32(cpu_mach, tmp1); tcg_temp_free(tmp2); tcg_temp_free(tmp1); } return; case 0x600e: /* exts.b Rm,Rn */ tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4)); return; case 0x600f: /* exts.w Rm,Rn */ tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4)); return; case 0x600c: /* extu.b Rm,Rn */ tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4)); return; case 0x600d: /* extu.w Rm,Rn */ tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4)); return; case 0x000f: /* mac.l @Rm+,@Rn+ */ { TCGv arg0, arg1; arg0 = tcg_temp_new(TCG_TYPE_I32); tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx); arg1 = tcg_temp_new(TCG_TYPE_I32); tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx); tcg_gen_helper_0_2(helper_macl, arg0, arg1); tcg_temp_free(arg1); tcg_temp_free(arg0); tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); } return; case 0x400f: /* mac.w @Rm+,@Rn+ */ { TCGv arg0, arg1; arg0 = tcg_temp_new(TCG_TYPE_I32); tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx); arg1 = tcg_temp_new(TCG_TYPE_I32); tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx); tcg_gen_helper_0_2(helper_macw, arg0, arg1); tcg_temp_free(arg1); tcg_temp_free(arg0); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2); tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2); } return; case 0x0007: /* mul.l Rm,Rn */ tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8)); return; case 0x200f: /* muls.w Rm,Rn */ { TCGv arg0, arg1; arg0 = tcg_temp_new(TCG_TYPE_I32); tcg_gen_ext16s_i32(arg0, REG(B7_4)); arg1 = tcg_temp_new(TCG_TYPE_I32); tcg_gen_ext16s_i32(arg1, REG(B11_8)); tcg_gen_mul_i32(cpu_macl, arg0, arg1); tcg_temp_free(arg1); tcg_temp_free(arg0); } return; case 0x200e: /* mulu.w Rm,Rn */ { TCGv arg0, arg1; arg0 = tcg_temp_new(TCG_TYPE_I32); tcg_gen_ext16u_i32(arg0, REG(B7_4)); arg1 = tcg_temp_new(TCG_TYPE_I32); tcg_gen_ext16u_i32(arg1, REG(B11_8)); tcg_gen_mul_i32(cpu_macl, arg0, arg1); tcg_temp_free(arg1); tcg_temp_free(arg0); } return; case 0x600b: /* neg Rm,Rn */ tcg_gen_neg_i32(REG(B11_8), REG(B7_4)); return; case 0x600a: /* negc Rm,Rn */ tcg_gen_helper_1_1(helper_negc, REG(B11_8), REG(B7_4)); return; case 0x6007: /* not Rm,Rn */ tcg_gen_not_i32(REG(B11_8), REG(B7_4)); return; case 0x200b: /* or Rm,Rn */ tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4)); return; case 0x400c: /* shad Rm,Rn */ { int label1 = gen_new_label(); int label2 = gen_new_label(); int label3 = gen_new_label(); int label4 = gen_new_label(); TCGv shift = tcg_temp_local_new(TCG_TYPE_I32); tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1); /* Rm positive, shift to the left */ tcg_gen_andi_i32(shift, REG(B7_4), 0x1f); tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift); tcg_gen_br(label4); /* Rm negative, shift to the right */ gen_set_label(label1); tcg_gen_andi_i32(shift, REG(B7_4), 0x1f); tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2); tcg_gen_not_i32(shift, REG(B7_4)); tcg_gen_andi_i32(shift, shift, 0x1f); tcg_gen_addi_i32(shift, shift, 1); tcg_gen_sar_i32(REG(B11_8), REG(B11_8), shift); tcg_gen_br(label4); /* Rm = -32 */ gen_set_label(label2); tcg_gen_brcondi_i32(TCG_COND_LT, REG(B11_8), 0, label3); tcg_gen_movi_i32(REG(B11_8), 0); tcg_gen_br(label4); gen_set_label(label3); tcg_gen_movi_i32(REG(B11_8), 0xffffffff); gen_set_label(label4); tcg_temp_free(shift); } return; case 0x400d: /* shld Rm,Rn */ { int label1 = gen_new_label(); int label2 = gen_new_label(); int label3 = gen_new_label(); TCGv shift = tcg_temp_local_new(TCG_TYPE_I32); tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1); /* Rm positive, shift to the left */ tcg_gen_andi_i32(shift, REG(B7_4), 0x1f); tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift); tcg_gen_br(label3); /* Rm negative, shift to the right */ gen_set_label(label1); tcg_gen_andi_i32(shift, REG(B7_4), 0x1f); tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2); tcg_gen_not_i32(shift, REG(B7_4)); tcg_gen_andi_i32(shift, shift, 0x1f); tcg_gen_addi_i32(shift, shift, 1); tcg_gen_shr_i32(REG(B11_8), REG(B11_8), shift); tcg_gen_br(label3); /* Rm = -32 */ gen_set_label(label2); tcg_gen_movi_i32(REG(B11_8), 0); gen_set_label(label3); tcg_temp_free(shift); } return; case 0x3008: /* sub Rm,Rn */ tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4)); return; case 0x300a: /* subc Rm,Rn */ tcg_gen_helper_1_2(helper_subc, REG(B11_8), REG(B7_4), REG(B11_8)); return; case 0x300b: /* subv Rm,Rn */ tcg_gen_helper_1_2(helper_subv, REG(B11_8), REG(B7_4), REG(B11_8)); return; case 0x2008: /* tst Rm,Rn */ { TCGv val = tcg_temp_new(TCG_TYPE_I32); tcg_gen_and_i32(val, REG(B7_4), REG(B11_8)); gen_cmp_imm(TCG_COND_EQ, val, 0); tcg_temp_free(val); } return; case 0x200a: /* xor Rm,Rn */ tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4)); return; case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */ if (ctx->fpscr & FPSCR_SZ) { TCGv fp = tcg_temp_new(TCG_TYPE_I64); gen_load_fpr64(fp, XREG(B7_4)); gen_store_fpr64(fp, XREG(B11_8)); tcg_temp_free(fp); } else { TCGv fp = tcg_temp_new(TCG_TYPE_I32); gen_load_fpr32(fp, FREG(B7_4)); gen_store_fpr32(fp, FREG(B11_8)); tcg_temp_free(fp); } return; case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */ if (ctx->fpscr & FPSCR_SZ) { TCGv fp = tcg_temp_new(TCG_TYPE_I64); gen_load_fpr64(fp, XREG(B7_4)); tcg_gen_qemu_st64(fp, REG(B11_8), ctx->memidx); tcg_temp_free(fp); } else { TCGv fp = tcg_temp_new(TCG_TYPE_I32); gen_load_fpr32(fp, FREG(B7_4)); tcg_gen_qemu_st32(fp, REG(B11_8), ctx->memidx); tcg_temp_free(fp); } return; case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */ if (ctx->fpscr & FPSCR_SZ) { TCGv fp = tcg_temp_new(TCG_TYPE_I64); tcg_gen_qemu_ld64(fp, REG(B7_4), ctx->memidx); gen_store_fpr64(fp, XREG(B11_8)); tcg_temp_free(fp); } else { TCGv fp = tcg_temp_new(TCG_TYPE_I32); tcg_gen_qemu_ld32u(fp, REG(B7_4), ctx->memidx); gen_store_fpr32(fp, FREG(B11_8)); tcg_temp_free(fp); } return; case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */ if (ctx->fpscr & FPSCR_SZ) { TCGv fp = tcg_temp_new(TCG_TYPE_I64); tcg_gen_qemu_ld64(fp, REG(B7_4), ctx->memidx); gen_store_fpr64(fp, XREG(B11_8)); tcg_temp_free(fp); tcg_gen_addi_i32(REG(B7_4),REG(B7_4), 8); } else { TCGv fp = tcg_temp_new(TCG_TYPE_I32); tcg_gen_qemu_ld32u(fp, REG(B7_4), ctx->memidx); gen_store_fpr32(fp, FREG(B11_8)); tcg_temp_free(fp); tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4); } return; case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */ if (ctx->fpscr & FPSCR_SZ) { TCGv addr, fp; addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_subi_i32(addr, REG(B11_8), 8); fp = tcg_temp_new(TCG_TYPE_I64); gen_load_fpr64(fp, XREG(B7_4)); tcg_gen_qemu_st64(fp, addr, ctx->memidx); tcg_temp_free(fp); tcg_temp_free(addr); tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 8); } else { TCGv addr, fp; addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_subi_i32(addr, REG(B11_8), 4); fp = tcg_temp_new(TCG_TYPE_I32); gen_load_fpr32(fp, FREG(B7_4)); tcg_gen_qemu_st32(fp, addr, ctx->memidx); tcg_temp_free(fp); tcg_temp_free(addr); tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 4); } return; case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_add_i32(addr, REG(B7_4), REG(0)); if (ctx->fpscr & FPSCR_SZ) { TCGv fp = tcg_temp_new(TCG_TYPE_I64); tcg_gen_qemu_ld64(fp, addr, ctx->memidx); gen_store_fpr64(fp, XREG(B11_8)); tcg_temp_free(fp); } else { TCGv fp = tcg_temp_new(TCG_TYPE_I32); tcg_gen_qemu_ld32u(fp, addr, ctx->memidx); gen_store_fpr32(fp, FREG(B11_8)); tcg_temp_free(fp); } tcg_temp_free(addr); } return; case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_add_i32(addr, REG(B11_8), REG(0)); if (ctx->fpscr & FPSCR_SZ) { TCGv fp = tcg_temp_new(TCG_TYPE_I64); gen_load_fpr64(fp, XREG(B7_4)); tcg_gen_qemu_st64(fp, addr, ctx->memidx); tcg_temp_free(fp); } else { TCGv fp = tcg_temp_new(TCG_TYPE_I32); gen_load_fpr32(fp, FREG(B7_4)); tcg_gen_qemu_st32(fp, addr, ctx->memidx); tcg_temp_free(fp); } tcg_temp_free(addr); } return; case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */ case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */ case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */ case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */ case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */ case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */ { TCGv fp0, fp1; if (ctx->fpscr & FPSCR_PR) { if (ctx->opcode & 0x0110) break; /* illegal instruction */ fp0 = tcg_temp_new(TCG_TYPE_I64); fp1 = tcg_temp_new(TCG_TYPE_I64); gen_load_fpr64(fp0, DREG(B11_8)); gen_load_fpr64(fp1, DREG(B7_4)); } else { fp0 = tcg_temp_new(TCG_TYPE_I32); fp1 = tcg_temp_new(TCG_TYPE_I32); gen_load_fpr32(fp0, FREG(B11_8)); gen_load_fpr32(fp1, FREG(B7_4)); } switch (ctx->opcode & 0xf00f) { case 0xf000: /* fadd Rm,Rn */ if (ctx->fpscr & FPSCR_PR) tcg_gen_helper_1_2(helper_fadd_DT, fp0, fp0, fp1); else tcg_gen_helper_1_2(helper_fadd_FT, fp0, fp0, fp1); break; case 0xf001: /* fsub Rm,Rn */ if (ctx->fpscr & FPSCR_PR) tcg_gen_helper_1_2(helper_fsub_DT, fp0, fp0, fp1); else tcg_gen_helper_1_2(helper_fsub_FT, fp0, fp0, fp1); break; case 0xf002: /* fmul Rm,Rn */ if (ctx->fpscr & FPSCR_PR) tcg_gen_helper_1_2(helper_fmul_DT, fp0, fp0, fp1); else tcg_gen_helper_1_2(helper_fmul_FT, fp0, fp0, fp1); break; case 0xf003: /* fdiv Rm,Rn */ if (ctx->fpscr & FPSCR_PR) tcg_gen_helper_1_2(helper_fdiv_DT, fp0, fp0, fp1); else tcg_gen_helper_1_2(helper_fdiv_FT, fp0, fp0, fp1); break; case 0xf004: /* fcmp/eq Rm,Rn */ if (ctx->fpscr & FPSCR_PR) tcg_gen_helper_0_2(helper_fcmp_eq_DT, fp0, fp1); else tcg_gen_helper_0_2(helper_fcmp_eq_FT, fp0, fp1); return; case 0xf005: /* fcmp/gt Rm,Rn */ if (ctx->fpscr & FPSCR_PR) tcg_gen_helper_0_2(helper_fcmp_gt_DT, fp0, fp1); else tcg_gen_helper_0_2(helper_fcmp_gt_FT, fp0, fp1); return; } if (ctx->fpscr & FPSCR_PR) { gen_store_fpr64(fp0, DREG(B11_8)); } else { gen_store_fpr32(fp0, FREG(B11_8)); } tcg_temp_free(fp1); tcg_temp_free(fp0); } return; } switch (ctx->opcode & 0xff00) { case 0xc900: /* and #imm,R0 */ tcg_gen_andi_i32(REG(0), REG(0), B7_0); return; case 0xcd00: /* and.b #imm,@(R0,GBR) */ { TCGv addr, val; addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_add_i32(addr, REG(0), cpu_gbr); val = tcg_temp_new(TCG_TYPE_I32); tcg_gen_qemu_ld8u(val, addr, ctx->memidx); tcg_gen_andi_i32(val, val, B7_0); tcg_gen_qemu_st8(val, addr, ctx->memidx); tcg_temp_free(val); tcg_temp_free(addr); } return; case 0x8b00: /* bf label */ CHECK_NOT_DELAY_SLOT gen_conditional_jump(ctx, ctx->pc + 2, ctx->pc + 4 + B7_0s * 2); ctx->bstate = BS_BRANCH; return; case 0x8f00: /* bf/s label */ CHECK_NOT_DELAY_SLOT gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0); ctx->flags |= DELAY_SLOT_CONDITIONAL; return; case 0x8900: /* bt label */ CHECK_NOT_DELAY_SLOT gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2, ctx->pc + 2); ctx->bstate = BS_BRANCH; return; case 0x8d00: /* bt/s label */ CHECK_NOT_DELAY_SLOT gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1); ctx->flags |= DELAY_SLOT_CONDITIONAL; return; case 0x8800: /* cmp/eq #imm,R0 */ gen_cmp_imm(TCG_COND_EQ, REG(0), B7_0s); return; case 0xc400: /* mov.b @(disp,GBR),R0 */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_addi_i32(addr, cpu_gbr, B7_0); tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0xc500: /* mov.w @(disp,GBR),R0 */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2); tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0xc600: /* mov.l @(disp,GBR),R0 */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4); tcg_gen_qemu_ld32s(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0xc000: /* mov.b R0,@(disp,GBR) */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_addi_i32(addr, cpu_gbr, B7_0); tcg_gen_qemu_st8(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0xc100: /* mov.w R0,@(disp,GBR) */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2); tcg_gen_qemu_st16(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0xc200: /* mov.l R0,@(disp,GBR) */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4); tcg_gen_qemu_st32(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x8000: /* mov.b R0,@(disp,Rn) */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_addi_i32(addr, REG(B7_4), B3_0); tcg_gen_qemu_st8(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x8100: /* mov.w R0,@(disp,Rn) */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2); tcg_gen_qemu_st16(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x8400: /* mov.b @(disp,Rn),R0 */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_addi_i32(addr, REG(B7_4), B3_0); tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x8500: /* mov.w @(disp,Rn),R0 */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2); tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0xc700: /* mova @(disp,PC),R0 */ tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3); return; case 0xcb00: /* or #imm,R0 */ tcg_gen_ori_i32(REG(0), REG(0), B7_0); return; case 0xcf00: /* or.b #imm,@(R0,GBR) */ { TCGv addr, val; addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_add_i32(addr, REG(0), cpu_gbr); val = tcg_temp_new(TCG_TYPE_I32); tcg_gen_qemu_ld8u(val, addr, ctx->memidx); tcg_gen_ori_i32(val, val, B7_0); tcg_gen_qemu_st8(val, addr, ctx->memidx); tcg_temp_free(val); tcg_temp_free(addr); } return; case 0xc300: /* trapa #imm */ { TCGv imm; CHECK_NOT_DELAY_SLOT tcg_gen_movi_i32(cpu_pc, ctx->pc); imm = tcg_const_i32(B7_0); tcg_gen_helper_0_1(helper_trapa, imm); tcg_temp_free(imm); ctx->bstate = BS_BRANCH; } return; case 0xc800: /* tst #imm,R0 */ { TCGv val = tcg_temp_new(TCG_TYPE_I32); tcg_gen_andi_i32(val, REG(0), B7_0); gen_cmp_imm(TCG_COND_EQ, val, 0); tcg_temp_free(val); } return; case 0xcc00: /* tst.b #imm,@(R0,GBR) */ { TCGv val = tcg_temp_new(TCG_TYPE_I32); tcg_gen_add_i32(val, REG(0), cpu_gbr); tcg_gen_qemu_ld8u(val, val, ctx->memidx); tcg_gen_andi_i32(val, val, B7_0); gen_cmp_imm(TCG_COND_EQ, val, 0); tcg_temp_free(val); } return; case 0xca00: /* xor #imm,R0 */ tcg_gen_xori_i32(REG(0), REG(0), B7_0); return; case 0xce00: /* xor.b #imm,@(R0,GBR) */ { TCGv addr, val; addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_add_i32(addr, REG(0), cpu_gbr); val = tcg_temp_new(TCG_TYPE_I32); tcg_gen_qemu_ld8u(val, addr, ctx->memidx); tcg_gen_xori_i32(val, val, B7_0); tcg_gen_qemu_st8(val, addr, ctx->memidx); tcg_temp_free(val); tcg_temp_free(addr); } return; } switch (ctx->opcode & 0xf08f) { case 0x408e: /* ldc Rm,Rn_BANK */ tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8)); return; case 0x4087: /* ldc.l @Rm+,Rn_BANK */ tcg_gen_qemu_ld32s(ALTREG(B6_4), REG(B11_8), ctx->memidx); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); return; case 0x0082: /* stc Rm_BANK,Rn */ tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4)); return; case 0x4083: /* stc.l Rm_BANK,@-Rn */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_subi_i32(addr, REG(B11_8), 4); tcg_gen_qemu_st32(ALTREG(B6_4), addr, ctx->memidx); tcg_temp_free(addr); tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 4); } return; } switch (ctx->opcode & 0xf0ff) { case 0x0023: /* braf Rn */ CHECK_NOT_DELAY_SLOT tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4); ctx->flags |= DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x0003: /* bsrf Rn */ CHECK_NOT_DELAY_SLOT tcg_gen_movi_i32(cpu_pr, ctx->pc + 4); tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr); ctx->flags |= DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x4015: /* cmp/pl Rn */ gen_cmp_imm(TCG_COND_GT, REG(B11_8), 0); return; case 0x4011: /* cmp/pz Rn */ gen_cmp_imm(TCG_COND_GE, REG(B11_8), 0); return; case 0x4010: /* dt Rn */ tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1); gen_cmp_imm(TCG_COND_EQ, REG(B11_8), 0); return; case 0x402b: /* jmp @Rn */ CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8)); ctx->flags |= DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x400b: /* jsr @Rn */ CHECK_NOT_DELAY_SLOT tcg_gen_movi_i32(cpu_pr, ctx->pc + 4); tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8)); ctx->flags |= DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x400e: /* lds Rm,SR */ tcg_gen_andi_i32(cpu_sr, REG(B11_8), 0x700083f3); ctx->bstate = BS_STOP; return; case 0x4007: /* lds.l @Rm+,SR */ { TCGv val = tcg_temp_new(TCG_TYPE_I32); tcg_gen_qemu_ld32s(val, REG(B11_8), ctx->memidx); tcg_gen_andi_i32(cpu_sr, val, 0x700083f3); tcg_temp_free(val); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); ctx->bstate = BS_STOP; } return; case 0x0002: /* sts SR,Rn */ tcg_gen_mov_i32(REG(B11_8), cpu_sr); return; case 0x4003: /* sts SR,@-Rn */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_subi_i32(addr, REG(B11_8), 4); tcg_gen_qemu_st32(cpu_sr, addr, ctx->memidx); tcg_temp_free(addr); tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 4); } return; #define LDST(reg,ldnum,ldpnum,stnum,stpnum) \\ case ldnum: \\ tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \\ return; \\ case ldpnum: \\ tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \\ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \\ return; \\ case stnum: \\ tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \\ return; \\ case stpnum: \\ { \\ TCGv addr = tcg_temp_new(TCG_TYPE_I32); \\ tcg_gen_subi_i32(addr, REG(B11_8), 4); \\ tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \\ tcg_temp_free(addr); \\ tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 4); \\ } \\ return; LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013) LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023) LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033) LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043) LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2) LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002) LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012) LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022) LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052) case 0x406a: /* lds Rm,FPSCR */ tcg_gen_helper_0_1(helper_ld_fpscr, REG(B11_8)); ctx->bstate = BS_STOP; return; case 0x4066: /* lds.l @Rm+,FPSCR */ { TCGv addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_qemu_ld32s(addr, REG(B11_8), ctx->memidx); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); tcg_gen_helper_0_1(helper_ld_fpscr, addr); tcg_temp_free(addr); ctx->bstate = BS_STOP; } return; case 0x006a: /* sts FPSCR,Rn */ tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff); return; case 0x4062: /* sts FPSCR,@-Rn */ { TCGv addr, val; val = tcg_temp_new(TCG_TYPE_I32); tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff); addr = tcg_temp_new(TCG_TYPE_I32); tcg_gen_subi_i32(addr, REG(B11_8), 4); tcg_gen_qemu_st32(val, addr, ctx->memidx); tcg_temp_free(addr); tcg_temp_free(val); tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 4); } return; case 0x00c3: /* movca.l R0,@Rm */ tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx); return; case 0x0029: /* movt Rn */ tcg_gen_andi_i32(REG(B11_8), cpu_sr, SR_T); return; case 0x0093: /* ocbi @Rn */ { TCGv dummy = tcg_temp_new(TCG_TYPE_I32); tcg_gen_qemu_ld32s(dummy, REG(B11_8), ctx->memidx); tcg_temp_free(dummy); } return; case 0x00a3: /* ocbp @Rn */ { TCGv dummy = tcg_temp_new(TCG_TYPE_I32); tcg_gen_qemu_ld32s(dummy, REG(B11_8), ctx->memidx); tcg_temp_free(dummy); } return; case 0x00b3: /* ocbwb @Rn */ { TCGv dummy = tcg_temp_new(TCG_TYPE_I32); tcg_gen_qemu_ld32s(dummy, REG(B11_8), ctx->memidx); tcg_temp_free(dummy); } return; case 0x0083: /* pref @Rn */ return; case 0x4024: /* rotcl Rn */ { TCGv tmp = tcg_temp_new(TCG_TYPE_I32); tcg_gen_mov_i32(tmp, cpu_sr); gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31); tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1); gen_copy_bit_i32(REG(B11_8), 0, tmp, 0); tcg_temp_free(tmp); } return; case 0x4025: /* rotcr Rn */ { TCGv tmp = tcg_temp_new(TCG_TYPE_I32); tcg_gen_mov_i32(tmp, cpu_sr); gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0); tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1); gen_copy_bit_i32(REG(B11_8), 31, tmp, 0); tcg_temp_free(tmp); } return; case 0x4004: /* rotl Rn */ gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31); tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1); gen_copy_bit_i32(REG(B11_8), 0, cpu_sr, 0); return; case 0x4005: /* rotr Rn */ gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0); tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1); gen_copy_bit_i32(REG(B11_8), 31, cpu_sr, 0); return; case 0x4000: /* shll Rn */ case 0x4020: /* shal Rn */ gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31); tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1); return; case 0x4021: /* shar Rn */ gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0); tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1); return; case 0x4001: /* shlr Rn */ gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0); tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1); return; case 0x4008: /* shll2 Rn */ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2); return; case 0x4018: /* shll8 Rn */ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8); return; case 0x4028: /* shll16 Rn */ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16); return; case 0x4009: /* shlr2 Rn */ tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2); return; case 0x4019: /* shlr8 Rn */ tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8); return; case 0x4029: /* shlr16 Rn */ tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16); return; case 0x401b: /* tas.b @Rn */ { TCGv addr, val; addr = tcg_temp_local_new(TCG_TYPE_I32); tcg_gen_mov_i32(addr, REG(B11_8)); val = tcg_temp_local_new(TCG_TYPE_I32); tcg_gen_qemu_ld8u(val, addr, ctx->memidx); gen_cmp_imm(TCG_COND_EQ, val, 0); tcg_gen_ori_i32(val, val, 0x80); tcg_gen_qemu_st8(val, addr, ctx->memidx); tcg_temp_free(val); tcg_temp_free(addr); } return; case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */ { TCGv fp = tcg_temp_new(TCG_TYPE_I32); tcg_gen_mov_i32(fp, cpu_fpul); gen_store_fpr32(fp, FREG(B11_8)); tcg_temp_free(fp); } return; case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */ { TCGv fp = tcg_temp_new(TCG_TYPE_I32); gen_load_fpr32(fp, FREG(B11_8)); tcg_gen_mov_i32(cpu_fpul, fp); tcg_temp_free(fp); } return; case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */ if (ctx->fpscr & FPSCR_PR) { TCGv fp; if (ctx->opcode & 0x0100) break; /* illegal instruction */ fp = tcg_temp_new(TCG_TYPE_I64); tcg_gen_helper_1_1(helper_float_DT, fp, cpu_fpul); gen_store_fpr64(fp, DREG(B11_8)); tcg_temp_free(fp); } else { TCGv fp = tcg_temp_new(TCG_TYPE_I32); tcg_gen_helper_1_1(helper_float_FT, fp, cpu_fpul); gen_store_fpr32(fp, FREG(B11_8)); tcg_temp_free(fp); } return; case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */ if (ctx->fpscr & FPSCR_PR) { TCGv fp; if (ctx->opcode & 0x0100) break; /* illegal instruction */ fp = tcg_temp_new(TCG_TYPE_I64); gen_load_fpr64(fp, DREG(B11_8)); tcg_gen_helper_1_1(helper_ftrc_DT, cpu_fpul, fp); tcg_temp_free(fp); } else { TCGv fp = tcg_temp_new(TCG_TYPE_I32); gen_load_fpr32(fp, FREG(B11_8)); tcg_gen_helper_1_1(helper_ftrc_FT, cpu_fpul, fp); tcg_temp_free(fp); } return; case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */ { TCGv fp = tcg_temp_new(TCG_TYPE_I32); gen_load_fpr32(fp, FREG(B11_8)); tcg_gen_helper_1_1(helper_fneg_T, fp, fp); gen_store_fpr32(fp, FREG(B11_8)); tcg_temp_free(fp); } return; case 0xf05d: /* fabs FRn/DRn */ if (ctx->fpscr & FPSCR_PR) { if (ctx->opcode & 0x0100) break; /* illegal instruction */ TCGv fp = tcg_temp_new(TCG_TYPE_I64); gen_load_fpr64(fp, DREG(B11_8)); tcg_gen_helper_1_1(helper_fabs_DT, fp, fp); gen_store_fpr64(fp, DREG(B11_8)); tcg_temp_free(fp); } else { TCGv fp = tcg_temp_new(TCG_TYPE_I32); gen_load_fpr32(fp, FREG(B11_8)); tcg_gen_helper_1_1(helper_fabs_FT, fp, fp); gen_store_fpr32(fp, FREG(B11_8)); tcg_temp_free(fp); } return; case 0xf06d: /* fsqrt FRn */ if (ctx->fpscr & FPSCR_PR) { if (ctx->opcode & 0x0100) break; /* illegal instruction */ TCGv fp = tcg_temp_new(TCG_TYPE_I64); gen_load_fpr64(fp, DREG(B11_8)); tcg_gen_helper_1_1(helper_fsqrt_DT, fp, fp); gen_store_fpr64(fp, DREG(B11_8)); tcg_temp_free(fp); } else { TCGv fp = tcg_temp_new(TCG_TYPE_I32); gen_load_fpr32(fp, FREG(B11_8)); tcg_gen_helper_1_1(helper_fsqrt_FT, fp, fp); gen_store_fpr32(fp, FREG(B11_8)); tcg_temp_free(fp); } return; case 0xf07d: /* fsrra FRn */ break; case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */ if (!(ctx->fpscr & FPSCR_PR)) { TCGv val = tcg_const_i32(0); gen_load_fpr32(val, FREG(B11_8)); tcg_temp_free(val); return; } break; case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */ if (!(ctx->fpscr & FPSCR_PR)) { TCGv val = tcg_const_i32(0x3f800000); gen_load_fpr32(val, FREG(B11_8)); tcg_temp_free(val); return; } break; case 0xf0ad: /* fcnvsd FPUL,DRn */ { TCGv fp = tcg_temp_new(TCG_TYPE_I64); tcg_gen_helper_1_1(helper_fcnvsd_FT_DT, fp, cpu_fpul); gen_store_fpr64(fp, DREG(B11_8)); tcg_temp_free(fp); } return; case 0xf0bd: /* fcnvds DRn,FPUL */ { TCGv fp = tcg_temp_new(TCG_TYPE_I64); gen_load_fpr64(fp, DREG(B11_8)); tcg_gen_helper_1_1(helper_fcnvds_DT_FT, cpu_fpul, fp); tcg_temp_free(fp); } return; } fprintf(stderr, \"unknown instruction 0x%04x at pc 0x%08x\\n\", ctx->opcode, ctx->pc); tcg_gen_helper_0_0(helper_raise_illegal_instruction); ctx->bstate = BS_EXCP; }"} {"target": 0, "idx": 6401, "func": "static void a9_scu_write(void *opaque, target_phys_addr_t offset, uint64_t value, unsigned size) { a9mp_priv_state *s = (a9mp_priv_state *)opaque; uint32_t mask; uint32_t shift; switch (size) { case 1: mask = 0xff; break; case 2: mask = 0xffff; break; case 4: mask = 0xffffffff; break; default: fprintf(stderr, \"Invalid size %u in write to a9 scu register %x\\n\", size, (unsigned)offset); return; } switch (offset) { case 0x00: /* Control */ s->scu_control = value & 1; break; case 0x4: /* Configuration: RO */ break; case 0x08: case 0x09: case 0x0A: case 0x0B: /* Power Control */ shift = (offset - 0x8) * 8; s->scu_status &= ~(mask << shift); s->scu_status |= ((value & mask) << shift); break; case 0x0c: /* Invalidate All Registers In Secure State */ /* no-op as we do not implement caches */ break; case 0x40: /* Filtering Start Address Register */ case 0x44: /* Filtering End Address Register */ /* RAZ/WI, like an implementation with only one AXI master */ break; case 0x50: /* SCU Access Control Register */ case 0x54: /* SCU Non-secure Access Control Register */ /* unimplemented, fall through */ default: break; } }"} {"target": 0, "idx": 6404, "func": "static void s390_virtio_net_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtIOS390DeviceClass *k = VIRTIO_S390_DEVICE_CLASS(klass); k->init = s390_virtio_net_init; dc->props = s390_virtio_net_properties; dc->alias = \"virtio-net\"; }"} {"target": 0, "idx": 6409, "func": "static void hb_synthesis(AMRWBContext *ctx, int subframe, float *samples, const float *exc, const float *isf, const float *isf_past) { float hb_lpc[LP_ORDER_16k]; enum Mode mode = ctx->fr_cur_mode; if (mode == MODE_6k60) { float e_isf[LP_ORDER_16k]; // ISF vector for extrapolation double e_isp[LP_ORDER_16k]; ff_weighted_vector_sumf(e_isf, isf_past, isf, isfp_inter[subframe], 1.0 - isfp_inter[subframe], LP_ORDER); extrapolate_isf(e_isf, e_isf); e_isf[LP_ORDER_16k - 1] *= 2.0; ff_acelp_lsf2lspd(e_isp, e_isf, LP_ORDER_16k); ff_amrwb_lsp2lpc(e_isp, hb_lpc, LP_ORDER_16k); lpc_weighting(hb_lpc, hb_lpc, 0.9, LP_ORDER_16k); } else { lpc_weighting(hb_lpc, ctx->lp_coef[subframe], 0.6, LP_ORDER); } ff_celp_lp_synthesis_filterf(samples, hb_lpc, exc, AMRWB_SFR_SIZE_16k, (mode == MODE_6k60) ? LP_ORDER_16k : LP_ORDER); }"} {"target": 1, "idx": 6425, "func": "static int qemu_savevm_state(QEMUFile *f, Error **errp) { int ret; MigrationParams params = { .blk = 0, .shared = 0 }; MigrationState *ms = migrate_init(¶ms); ms->to_dst_file = f; if (qemu_savevm_state_blocked(errp)) { return -EINVAL; } qemu_mutex_unlock_iothread(); qemu_savevm_state_header(f); qemu_savevm_state_begin(f, ¶ms); qemu_mutex_lock_iothread(); while (qemu_file_get_error(f) == 0) { if (qemu_savevm_state_iterate(f, false) > 0) { break; } } ret = qemu_file_get_error(f); if (ret == 0) { qemu_savevm_state_complete_precopy(f, false); ret = qemu_file_get_error(f); } qemu_savevm_state_cleanup(); if (ret != 0) { error_setg_errno(errp, -ret, \"Error while writing VM state\"); } return ret; }"} {"target": 0, "idx": 6431, "func": "static always_inline void dv_set_class_number(DCTELEM* blk, EncBlockInfo* bi, const uint8_t* zigzag_scan, int bias) { int i, area; int run; int classes[] = {12, 24, 36, 0xffff}; run = 0; bi->mb[0] = blk[0]; bi->cno = 0; for (area = 0; area < 4; area++) { bi->prev_run[area] = run; bi->bit_size[area] = 0; for (i=mb_area_start[area]; imb[i] = (blk[zigzag_scan[i]] / 16); while ((bi->mb[i] ^ (bi->mb[i] >> 8)) > classes[bi->cno]) bi->cno++; if (bi->mb[i]) { bi->bit_size[area] += dv_rl2vlc_size(run, bi->mb[i]); run = 0; } else ++run; } } bi->bit_size[3] += 4; /* EOB marker */ bi->cno += bias; if (bi->cno >= 3) { /* FIXME: we have to recreate bit_size[], prev_run[] */ bi->cno = 3; for (i=1; i<64; i++) bi->mb[i] /= 2; } }"} {"target": 1, "idx": 6440, "func": "static void t_gen_lsl(TCGv d, TCGv a, TCGv b) { TCGv t0, t_31; t0 = tcg_temp_new(TCG_TYPE_TL); t_31 = tcg_temp_new(TCG_TYPE_TL); tcg_gen_shl_tl(d, a, b); tcg_gen_movi_tl(t_31, 31); tcg_gen_sub_tl(t0, t_31, b); tcg_gen_sar_tl(t0, t0, t_31); tcg_gen_and_tl(t0, t0, d); tcg_gen_xor_tl(d, d, t0); tcg_temp_free(t0); tcg_temp_free(t_31); }"} {"target": 1, "idx": 6445, "func": "bool virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq) { VirtIOSCSIReq *req; bool progress = false; virtio_scsi_acquire(s); while ((req = virtio_scsi_pop_req(s, vq))) { progress = true; virtio_scsi_handle_ctrl_req(s, req); } virtio_scsi_release(s); return progress; }"} {"target": 1, "idx": 6461, "func": "static void gus_realizefn (DeviceState *dev, Error **errp) { ISADevice *d = ISA_DEVICE(dev); GUSState *s = GUS (dev); IsaDmaClass *k; struct audsettings as; AUD_register_card (\"gus\", &s->card); as.freq = s->freq; as.nchannels = 2; as.fmt = AUD_FMT_S16; as.endianness = GUS_ENDIANNESS; s->voice = AUD_open_out ( &s->card, NULL, \"gus\", s, GUS_callback, &as ); if (!s->voice) { AUD_remove_card (&s->card); error_setg(errp, \"No voice\"); return; } s->shift = 2; s->samples = AUD_get_buffer_size_out (s->voice) >> s->shift; s->mixbuf = g_malloc0 (s->samples << s->shift); isa_register_portio_list (d, s->port, gus_portio_list1, s, \"gus\"); isa_register_portio_list (d, (s->port + 0x100) & 0xf00, gus_portio_list2, s, \"gus\"); s->isa_dma = isa_get_dma(isa_bus_from_device(d), s->emu.gusdma); k = ISADMA_GET_CLASS(s->isa_dma); k->register_channel(s->isa_dma, s->emu.gusdma, GUS_read_DMA, s); s->emu.himemaddr = s->himem; s->emu.gusdatapos = s->emu.himemaddr + 1024 * 1024 + 32; s->emu.opaque = s; isa_init_irq (d, &s->pic, s->emu.gusirq); AUD_set_active_out (s->voice, 1); }"} {"target": 1, "idx": 6467, "func": "static inline bool migration_bitmap_clear_dirty(RAMState *rs, ram_addr_t addr) { bool ret; int nr = addr >> TARGET_PAGE_BITS; unsigned long *bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; ret = test_and_clear_bit(nr, bitmap); if (ret) { rs->migration_dirty_pages--; } return ret; }"} {"target": 0, "idx": 6491, "func": "static int test_vector_fmul_window(AVFloatDSPContext *fdsp, AVFloatDSPContext *cdsp, const float *v1, const float *v2, const float *v3) { LOCAL_ALIGNED(32, float, cdst, [LEN]); LOCAL_ALIGNED(32, float, odst, [LEN]); int ret; cdsp->vector_fmul_window(cdst, v1, v2, v3, LEN / 2); fdsp->vector_fmul_window(odst, v1, v2, v3, LEN / 2); if (ret = compare_floats(cdst, odst, LEN, ARBITRARY_FMUL_WINDOW_CONST)) av_log(NULL, AV_LOG_ERROR, \"vector_fmul_window failed\\n\"); return ret; }"} {"target": 1, "idx": 6495, "func": "static void unassign_storage(SCLPDevice *sclp, SCCB *sccb) { MemoryRegion *mr = NULL; AssignStorage *assign_info = (AssignStorage *) sccb; sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev(); ram_addr_t unassign_addr; MemoryRegion *sysmem = get_system_memory(); if (!mhd) { sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND); return; } unassign_addr = (assign_info->rn - 1) * mhd->rzm; /* if the addr is a multiple of 256 MB */ if ((unassign_addr % MEM_SECTION_SIZE == 0) && (unassign_addr >= mhd->padded_ram_size)) { mhd->standby_state_map[(unassign_addr - mhd->padded_ram_size) / MEM_SECTION_SIZE] = 0; /* find the specified memory region and destroy it */ mr = memory_region_find(sysmem, unassign_addr, 1).mr; memory_region_unref(mr); if (mr) { int i; int is_removable = 1; ram_addr_t map_offset = (unassign_addr - mhd->padded_ram_size - (unassign_addr - mhd->padded_ram_size) % mhd->standby_subregion_size); /* Mark all affected subregions as 'standby' once again */ for (i = 0; i < (mhd->standby_subregion_size / MEM_SECTION_SIZE); i++) { if (mhd->standby_state_map[i + map_offset / MEM_SECTION_SIZE]) { is_removable = 0; break; } } if (is_removable) { memory_region_del_subregion(sysmem, mr); object_unref(OBJECT(mr)); } } } sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION); }"} {"target": 1, "idx": 6504, "func": "matroska_parse_block(MatroskaDemuxContext *matroska, uint8_t *data, int size, int64_t pos, uint64_t cluster_time, uint64_t duration, int is_keyframe, int is_bframe) { int res = 0; int track; AVStream *st; AVPacket *pkt; uint8_t *origdata = data; int16_t block_time; uint32_t *lace_size = NULL; int n, flags, laces = 0; uint64_t num; int stream_index; /* first byte(s): tracknum */ if ((n = matroska_ebmlnum_uint(data, size, &num)) < 0) { av_log(matroska->ctx, AV_LOG_ERROR, \"EBML block data error\\n\"); av_free(origdata); return res; } data += n; size -= n; /* fetch track from num */ track = matroska_find_track_by_num(matroska, num); if (size <= 3 || track < 0 || track >= matroska->num_tracks) { av_log(matroska->ctx, AV_LOG_INFO, \"Invalid stream %d or size %u\\n\", track, size); av_free(origdata); return res; } stream_index = matroska->tracks[track]->stream_index; if (stream_index < 0 || stream_index >= matroska->ctx->nb_streams) { av_free(origdata); return res; } st = matroska->ctx->streams[stream_index]; if (st->discard >= AVDISCARD_ALL) { av_free(origdata); return res; } if (duration == AV_NOPTS_VALUE) duration = matroska->tracks[track]->default_duration / matroska->time_scale; /* block_time (relative to cluster time) */ block_time = AV_RB16(data); data += 2; flags = *data++; size -= 3; if (is_keyframe == -1) is_keyframe = flags & 0x80 ? PKT_FLAG_KEY : 0; if (matroska->skip_to_keyframe) { if (!is_keyframe || st != matroska->skip_to_stream) { av_free(origdata); return res; } matroska->skip_to_keyframe = 0; } switch ((flags & 0x06) >> 1) { case 0x0: /* no lacing */ laces = 1; lace_size = av_mallocz(sizeof(int)); lace_size[0] = size; break; case 0x1: /* xiph lacing */ case 0x2: /* fixed-size lacing */ case 0x3: /* EBML lacing */ assert(size>0); // size <=3 is checked before size-=3 above laces = (*data) + 1; data += 1; size -= 1; lace_size = av_mallocz(laces * sizeof(int)); switch ((flags & 0x06) >> 1) { case 0x1: /* xiph lacing */ { uint8_t temp; uint32_t total = 0; for (n = 0; res == 0 && n < laces - 1; n++) { while (1) { if (size == 0) { res = -1; break; } temp = *data; lace_size[n] += temp; data += 1; size -= 1; if (temp != 0xff) break; } total += lace_size[n]; } lace_size[n] = size - total; break; } case 0x2: /* fixed-size lacing */ for (n = 0; n < laces; n++) lace_size[n] = size / laces; break; case 0x3: /* EBML lacing */ { uint32_t total; n = matroska_ebmlnum_uint(data, size, &num); if (n < 0) { av_log(matroska->ctx, AV_LOG_INFO, \"EBML block data error\\n\"); break; } data += n; size -= n; total = lace_size[0] = num; for (n = 1; res == 0 && n < laces - 1; n++) { int64_t snum; int r; r = matroska_ebmlnum_sint (data, size, &snum); if (r < 0) { av_log(matroska->ctx, AV_LOG_INFO, \"EBML block data error\\n\"); break; } data += r; size -= r; lace_size[n] = lace_size[n - 1] + snum; total += lace_size[n]; } lace_size[n] = size - total; break; } } break; } if (res == 0) { uint64_t timecode = AV_NOPTS_VALUE; if (cluster_time != (uint64_t)-1 && (block_time >= 0 || cluster_time >= -block_time)) timecode = cluster_time + block_time; for (n = 0; n < laces; n++) { if (st->codec->codec_id == CODEC_ID_RA_288 || st->codec->codec_id == CODEC_ID_COOK || st->codec->codec_id == CODEC_ID_ATRAC3) { MatroskaAudioTrack *audiotrack = (MatroskaAudioTrack *)matroska->tracks[track]; int a = st->codec->block_align; int sps = audiotrack->sub_packet_size; int cfs = audiotrack->coded_framesize; int h = audiotrack->sub_packet_h; int y = audiotrack->sub_packet_cnt; int w = audiotrack->frame_size; int x; if (!audiotrack->pkt_cnt) { if (st->codec->codec_id == CODEC_ID_RA_288) for (x=0; xbuf+x*2*w+y*cfs, data+x*cfs, cfs); else for (x=0; xbuf+sps*(h*x+((h+1)/2)*(y&1)+(y>>1)), data+x*sps, sps); if (++audiotrack->sub_packet_cnt >= h) { audiotrack->sub_packet_cnt = 0; audiotrack->pkt_cnt = h*w / a; } } while (audiotrack->pkt_cnt) { pkt = av_mallocz(sizeof(AVPacket)); av_new_packet(pkt, a); memcpy(pkt->data, audiotrack->buf + a * (h*w / a - audiotrack->pkt_cnt--), a); pkt->pos = pos; pkt->stream_index = stream_index; matroska_queue_packet(matroska, pkt); } } else { int result, offset = 0, ilen, olen, pkt_size = lace_size[n]; uint8_t *pkt_data = data; if (matroska->tracks[track]->encoding_scope & 1) { switch (matroska->tracks[track]->encoding_algo) { case MATROSKA_TRACK_ENCODING_COMP_HEADERSTRIP: offset = matroska->tracks[track]->encoding_settings_len; break; case MATROSKA_TRACK_ENCODING_COMP_LZO: pkt_data = NULL; do { ilen = lace_size[n]; olen = pkt_size *= 3; pkt_data = av_realloc(pkt_data, pkt_size+LZO_OUTPUT_PADDING); result = lzo1x_decode(pkt_data, &olen, data, &ilen); } while (result==LZO_OUTPUT_FULL && pkt_size<10000000); if (result) { av_free(pkt_data); continue; } pkt_size -= olen; break; #ifdef CONFIG_ZLIB case MATROSKA_TRACK_ENCODING_COMP_ZLIB: { z_stream zstream = {0}; pkt_data = NULL; if (inflateInit(&zstream) != Z_OK) continue; zstream.next_in = data; zstream.avail_in = lace_size[n]; do { pkt_size *= 3; pkt_data = av_realloc(pkt_data, pkt_size); zstream.avail_out = pkt_size - zstream.total_out; zstream.next_out = pkt_data + zstream.total_out; result = inflate(&zstream, Z_NO_FLUSH); } while (result==Z_OK && pkt_size<10000000); pkt_size = zstream.total_out; inflateEnd(&zstream); if (result != Z_STREAM_END) { av_free(pkt_data); continue; } break; } #endif #ifdef CONFIG_BZLIB case MATROSKA_TRACK_ENCODING_COMP_BZLIB: { bz_stream bzstream = {0}; pkt_data = NULL; if (BZ2_bzDecompressInit(&bzstream, 0, 0) != BZ_OK) continue; bzstream.next_in = data; bzstream.avail_in = lace_size[n]; do { pkt_size *= 3; pkt_data = av_realloc(pkt_data, pkt_size); bzstream.avail_out = pkt_size - bzstream.total_out_lo32; bzstream.next_out = pkt_data + bzstream.total_out_lo32; result = BZ2_bzDecompress(&bzstream); } while (result==BZ_OK && pkt_size<10000000); pkt_size = bzstream.total_out_lo32; BZ2_bzDecompressEnd(&bzstream); if (result != BZ_STREAM_END) { av_free(pkt_data); continue; } break; } #endif } } pkt = av_mallocz(sizeof(AVPacket)); /* XXX: prevent data copy... */ if (av_new_packet(pkt, pkt_size+offset) < 0) { res = AVERROR(ENOMEM); n = laces-1; break; } if (offset) memcpy (pkt->data, matroska->tracks[track]->encoding_settings, offset); memcpy (pkt->data+offset, pkt_data, pkt_size); if (n == 0) pkt->flags = is_keyframe; pkt->stream_index = stream_index; pkt->pts = timecode; pkt->pos = pos; pkt->duration = duration; matroska_queue_packet(matroska, pkt); } if (timecode != AV_NOPTS_VALUE) timecode = duration ? timecode + duration : AV_NOPTS_VALUE; data += lace_size[n]; } } av_free(lace_size); av_free(origdata); return res; }"} {"target": 1, "idx": 6508, "func": "static void do_flush_queued_data(VirtIOSerialPort *port, VirtQueue *vq, VirtIODevice *vdev) { VirtIOSerialPortClass *vsc; assert(port); assert(virtio_queue_ready(vq)); vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port); while (!port->throttled) { unsigned int i; /* Pop an elem only if we haven't left off a previous one mid-way */ if (!port->elem) { port->elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); if (!port->elem) { break; port->iov_idx = 0; port->iov_offset = 0; for (i = port->iov_idx; i < port->elem->out_num; i++) { size_t buf_size; ssize_t ret; buf_size = port->elem->out_sg[i].iov_len - port->iov_offset; ret = vsc->have_data(port, port->elem->out_sg[i].iov_base + port->iov_offset, buf_size); if (port->throttled) { port->iov_idx = i; if (ret > 0) { port->iov_offset += ret; break; port->iov_offset = 0; if (port->throttled) { break; virtqueue_push(vq, port->elem, 0); g_free(port->elem); port->elem = NULL; virtio_notify(vdev, vq);"} {"target": 1, "idx": 6515, "func": "yuv2planeX_16_c_template(const int16_t *filter, int filterSize, const int32_t **src, uint16_t *dest, int dstW, int big_endian, int output_bits) { int i; int shift = 15; av_assert0(output_bits == 16); for (i = 0; i < dstW; i++) { int val = 1 << (shift - 1); int j; /* range of val is [0,0x7FFFFFFF], so 31 bits, but with lanczos/spline * filters (or anything with negative coeffs, the range can be slightly * wider in both directions. To account for this overflow, we subtract * a constant so it always fits in the signed range (assuming a * reasonable filterSize), and re-add that at the end. */ val -= 0x40000000; for (j = 0; j < filterSize; j++) val += src[j][i] * filter[j]; output_pixel(&dest[i], val, 0x8000, int); } }"} {"target": 0, "idx": 6517, "func": "static int generate_coupling_coordinates(AC3DecodeContext * ctx) { ac3_audio_block *ab = &ctx->audio_block; uint8_t exp, mstrcplco; int16_t mant; uint32_t cplbndstrc = (1 << ab->ncplsubnd) >> 1; int ch, bnd, sbnd; float cplco; if (ab->cplcoe) for (ch = 0; ch < ctx->bsi.nfchans; ch++) if (ab->cplcoe & (1 << ch)) { mstrcplco = 3 * ab->mstrcplco[ch]; sbnd = ab->cplbegf; for (bnd = 0; bnd < ab->ncplbnd; bnd++) { exp = ab->cplcoexp[ch][bnd]; if (exp == 15) mant = ab->cplcomant[ch][bnd] <<= 14; else mant = (ab->cplcomant[ch][bnd] | 0x10) << 13; cplco = to_float(exp + mstrcplco, mant); if (ctx->bsi.acmod == 0x02 && (ab->flags & AC3_AB_PHSFLGINU) && ch == 1 && (ab->phsflg & (1 << bnd))) cplco = -cplco; /* invert the right channel */ ab->cplco[ch][sbnd++] = cplco; while (cplbndstrc & ab->cplbndstrc) { cplbndstrc >>= 1; ab->cplco[ch][sbnd++] = cplco; } cplbndstrc >>= 1; } } return 0; }"} {"target": 1, "idx": 6523, "func": "static void fifo_deinit(AVFormatContext *avf) { FifoContext *fifo = avf->priv_data; av_dict_free(&fifo->format_options); avformat_free_context(fifo->avf); av_thread_message_queue_free(&fifo->queue); pthread_mutex_destroy(&fifo->overflow_flag_lock); }"} {"target": 0, "idx": 6541, "func": "static int v9fs_receive_response(V9fsProxy *proxy, int type, int *status, void *response) { int retval; ProxyHeader header; struct iovec *reply = &proxy->in_iovec; *status = 0; reply->iov_len = 0; retval = socket_read(proxy->sockfd, reply->iov_base, PROXY_HDR_SZ); if (retval < 0) { return retval; } reply->iov_len = PROXY_HDR_SZ; proxy_unmarshal(reply, 0, \"dd\", &header.type, &header.size); /* * if response size > PROXY_MAX_IO_SZ, read the response but ignore it and * return -ENOBUFS */ if (header.size > PROXY_MAX_IO_SZ) { int count; while (header.size > 0) { count = MIN(PROXY_MAX_IO_SZ, header.size); count = socket_read(proxy->sockfd, reply->iov_base, count); if (count < 0) { return count; } header.size -= count; } *status = -ENOBUFS; return 0; } retval = socket_read(proxy->sockfd, reply->iov_base + PROXY_HDR_SZ, header.size); if (retval < 0) { return retval; } reply->iov_len += header.size; /* there was an error during processing request */ if (header.type == T_ERROR) { int ret; ret = proxy_unmarshal(reply, PROXY_HDR_SZ, \"d\", status); if (ret < 0) { *status = ret; } return 0; } switch (type) { case T_LSTAT: { ProxyStat prstat; retval = proxy_unmarshal(reply, PROXY_HDR_SZ, \"qqqdddqqqqqqqqqq\", &prstat.st_dev, &prstat.st_ino, &prstat.st_nlink, &prstat.st_mode, &prstat.st_uid, &prstat.st_gid, &prstat.st_rdev, &prstat.st_size, &prstat.st_blksize, &prstat.st_blocks, &prstat.st_atim_sec, &prstat.st_atim_nsec, &prstat.st_mtim_sec, &prstat.st_mtim_nsec, &prstat.st_ctim_sec, &prstat.st_ctim_nsec); prstat_to_stat(response, &prstat); break; } case T_STATFS: { ProxyStatFS prstfs; retval = proxy_unmarshal(reply, PROXY_HDR_SZ, \"qqqqqqqqqqq\", &prstfs.f_type, &prstfs.f_bsize, &prstfs.f_blocks, &prstfs.f_bfree, &prstfs.f_bavail, &prstfs.f_files, &prstfs.f_ffree, &prstfs.f_fsid[0], &prstfs.f_fsid[1], &prstfs.f_namelen, &prstfs.f_frsize); prstatfs_to_statfs(response, &prstfs); break; } case T_READLINK: { V9fsString target; v9fs_string_init(&target); retval = proxy_unmarshal(reply, PROXY_HDR_SZ, \"s\", &target); strcpy(response, target.data); v9fs_string_free(&target); break; } case T_LGETXATTR: case T_LLISTXATTR: { V9fsString xattr; v9fs_string_init(&xattr); retval = proxy_unmarshal(reply, PROXY_HDR_SZ, \"s\", &xattr); memcpy(response, xattr.data, xattr.size); v9fs_string_free(&xattr); break; } case T_GETVERSION: proxy_unmarshal(reply, PROXY_HDR_SZ, \"q\", response); break; default: return -1; } if (retval < 0) { *status = retval; } return 0; }"} {"target": 1, "idx": 6553, "func": "static av_cold int vpx_init(AVCodecContext *avctx, const struct vpx_codec_iface *iface) { VP8Context *ctx = avctx->priv_data; struct vpx_codec_enc_cfg enccfg; int res; av_log(avctx, AV_LOG_INFO, \"%s\\n\", vpx_codec_version_str()); av_log(avctx, AV_LOG_VERBOSE, \"%s\\n\", vpx_codec_build_config()); if ((res = vpx_codec_enc_config_default(iface, &enccfg, 0)) != VPX_CODEC_OK) { av_log(avctx, AV_LOG_ERROR, \"Failed to get config: %s\\n\", vpx_codec_err_to_string(res)); if(!avctx->bit_rate) if(avctx->rc_max_rate || avctx->rc_buffer_size || avctx->rc_initial_buffer_occupancy) { av_log( avctx, AV_LOG_ERROR, \"Rate control parameters set without a bitrate\\n\"); dump_enc_cfg(avctx, &enccfg); enccfg.g_w = avctx->width; enccfg.g_h = avctx->height; enccfg.g_timebase.num = avctx->time_base.num; enccfg.g_timebase.den = avctx->time_base.den; enccfg.g_threads = avctx->thread_count; enccfg.g_lag_in_frames= ctx->lag_in_frames; if (avctx->flags & CODEC_FLAG_PASS1) enccfg.g_pass = VPX_RC_FIRST_PASS; else if (avctx->flags & CODEC_FLAG_PASS2) enccfg.g_pass = VPX_RC_LAST_PASS; else enccfg.g_pass = VPX_RC_ONE_PASS; if (avctx->rc_min_rate == avctx->rc_max_rate && avctx->rc_min_rate == avctx->bit_rate && avctx->bit_rate) enccfg.rc_end_usage = VPX_CBR; else if (ctx->crf) enccfg.rc_end_usage = VPX_CQ; if (avctx->bit_rate) { enccfg.rc_target_bitrate = av_rescale_rnd(avctx->bit_rate, 1, 1000, AV_ROUND_NEAR_INF); } else { enccfg.rc_target_bitrate = 1000000; } else { avctx->bit_rate = enccfg.rc_target_bitrate * 1000; av_log(avctx, AV_LOG_WARNING, \"Neither bitrate nor constrained quality specified, using default bitrate of %dkbit/sec\\n\", enccfg.rc_target_bitrate); if (avctx->qmin >= 0) enccfg.rc_min_quantizer = avctx->qmin; if (avctx->qmax > 0) enccfg.rc_max_quantizer = avctx->qmax; enccfg.rc_dropframe_thresh = avctx->frame_skip_threshold; //0-100 (0 => CBR, 100 => VBR) enccfg.rc_2pass_vbr_bias_pct = round(avctx->qcompress * 100); if (avctx->bit_rate) enccfg.rc_2pass_vbr_minsection_pct = avctx->rc_min_rate * 100LL / avctx->bit_rate; if (avctx->rc_max_rate) enccfg.rc_2pass_vbr_maxsection_pct = avctx->rc_max_rate * 100LL / avctx->bit_rate; if (avctx->rc_buffer_size) enccfg.rc_buf_sz = avctx->rc_buffer_size * 1000LL / avctx->bit_rate; if (avctx->rc_initial_buffer_occupancy) enccfg.rc_buf_initial_sz = avctx->rc_initial_buffer_occupancy * 1000LL / avctx->bit_rate; enccfg.rc_buf_optimal_sz = enccfg.rc_buf_sz * 5 / 6; enccfg.rc_undershoot_pct = round(avctx->rc_buffer_aggressivity * 100); //_enc_init() will balk if kf_min_dist differs from max w/VPX_KF_AUTO if (avctx->keyint_min >= 0 && avctx->keyint_min == avctx->gop_size) enccfg.kf_min_dist = avctx->keyint_min; if (avctx->gop_size >= 0) enccfg.kf_max_dist = avctx->gop_size; if (enccfg.g_pass == VPX_RC_FIRST_PASS) enccfg.g_lag_in_frames = 0; else if (enccfg.g_pass == VPX_RC_LAST_PASS) { int decode_size; if (!avctx->stats_in) { av_log(avctx, AV_LOG_ERROR, \"No stats file for second pass\\n\"); return AVERROR_INVALIDDATA; ctx->twopass_stats.sz = strlen(avctx->stats_in) * 3 / 4; ctx->twopass_stats.buf = av_malloc(ctx->twopass_stats.sz); if (!ctx->twopass_stats.buf) { \"Stat buffer alloc (%zu bytes) failed\\n\", ctx->twopass_stats.sz); return AVERROR(ENOMEM); decode_size = av_base64_decode(ctx->twopass_stats.buf, avctx->stats_in, ctx->twopass_stats.sz); if (decode_size < 0) { av_log(avctx, AV_LOG_ERROR, \"Stat buffer decode failed\\n\"); return AVERROR_INVALIDDATA; ctx->twopass_stats.sz = decode_size; enccfg.rc_twopass_stats_in = ctx->twopass_stats; /* 0-3: For non-zero values the encoder increasingly optimizes for reduced complexity playback on low powered devices at the expense of encode quality. */ if (avctx->profile != FF_PROFILE_UNKNOWN) enccfg.g_profile = avctx->profile; enccfg.g_error_resilient = ctx->error_resilient || ctx->flags & VP8F_ERROR_RESILIENT; dump_enc_cfg(avctx, &enccfg); /* Construct Encoder Context */ res = vpx_codec_enc_init(&ctx->encoder, iface, &enccfg, 0); if (res != VPX_CODEC_OK) { log_encoder_error(avctx, \"Failed to initialize encoder\"); //codec control failures are currently treated only as warnings av_log(avctx, AV_LOG_DEBUG, \"vpx_codec_control\\n\"); if (ctx->cpu_used != INT_MIN) codecctl_int(avctx, VP8E_SET_CPUUSED, ctx->cpu_used); if (ctx->flags & VP8F_AUTO_ALT_REF) ctx->auto_alt_ref = 1; if (ctx->auto_alt_ref >= 0) codecctl_int(avctx, VP8E_SET_ENABLEAUTOALTREF, ctx->auto_alt_ref); if (ctx->arnr_max_frames >= 0) codecctl_int(avctx, VP8E_SET_ARNR_MAXFRAMES, ctx->arnr_max_frames); if (ctx->arnr_strength >= 0) codecctl_int(avctx, VP8E_SET_ARNR_STRENGTH, ctx->arnr_strength); if (ctx->arnr_type >= 0) codecctl_int(avctx, VP8E_SET_ARNR_TYPE, ctx->arnr_type); codecctl_int(avctx, VP8E_SET_NOISE_SENSITIVITY, avctx->noise_reduction); codecctl_int(avctx, VP8E_SET_TOKEN_PARTITIONS, av_log2(avctx->slices)); codecctl_int(avctx, VP8E_SET_STATIC_THRESHOLD, avctx->mb_threshold); codecctl_int(avctx, VP8E_SET_CQ_LEVEL, ctx->crf); if (ctx->max_intra_rate >= 0) codecctl_int(avctx, VP8E_SET_MAX_INTRA_BITRATE_PCT, ctx->max_intra_rate); av_log(avctx, AV_LOG_DEBUG, \"Using deadline: %d\\n\", ctx->deadline); //provide dummy value to initialize wrapper, values will be updated each _encode() vpx_img_wrap(&ctx->rawimg, VPX_IMG_FMT_I420, avctx->width, avctx->height, 1, (unsigned char*)1); avctx->coded_frame = avcodec_alloc_frame(); if (!avctx->coded_frame) { av_log(avctx, AV_LOG_ERROR, \"Error allocating coded frame\\n\"); vp8_free(avctx); return AVERROR(ENOMEM); return 0;"} {"target": 1, "idx": 6557, "func": "static int get_metadata_size(const uint8_t *buf, int buf_size) { int metadata_last, metadata_size; const uint8_t *buf_end = buf + buf_size; buf += 4; do { ff_flac_parse_block_header(buf, &metadata_last, NULL, &metadata_size); buf += 4; if (buf + metadata_size > buf_end) { /* need more data in order to read the complete header */ return 0; } buf += metadata_size; } while (!metadata_last); return buf_size - (buf_end - buf); }"} {"target": 1, "idx": 6563, "func": "static int usbredir_handle_interrupt_data(USBRedirDevice *dev, USBPacket *p, uint8_t ep) { if (ep & USB_DIR_IN) { /* Input interrupt endpoint, buffered packet input */ struct buf_packet *intp; int status, len; if (!dev->endpoint[EP2I(ep)].interrupt_started && !dev->endpoint[EP2I(ep)].interrupt_error) { struct usb_redir_start_interrupt_receiving_header start_int = { .endpoint = ep, }; /* No id, we look at the ep when receiving a status back */ usbredirparser_send_start_interrupt_receiving(dev->parser, 0, &start_int); usbredirparser_do_write(dev->parser); DPRINTF(\"interrupt recv started ep %02X\\n\", ep); dev->endpoint[EP2I(ep)].interrupt_started = 1; } intp = QTAILQ_FIRST(&dev->endpoint[EP2I(ep)].bufpq); if (intp == NULL) { DPRINTF2(\"interrupt-token-in ep %02X, no intp\\n\", ep); /* Check interrupt_error for stream errors */ status = dev->endpoint[EP2I(ep)].interrupt_error; dev->endpoint[EP2I(ep)].interrupt_error = 0; return usbredir_handle_status(dev, status, 0); } DPRINTF(\"interrupt-token-in ep %02X status %d len %d\\n\", ep, intp->status, intp->len); status = intp->status; if (status != usb_redir_success) { bufp_free(dev, intp, ep); return usbredir_handle_status(dev, status, 0); } len = intp->len; if (len > p->len) { ERROR(\"received int data is larger then packet ep %02X\\n\", ep); bufp_free(dev, intp, ep); return USB_RET_NAK; } memcpy(p->data, intp->data, len); bufp_free(dev, intp, ep); return len; } else { /* Output interrupt endpoint, normal async operation */ AsyncURB *aurb = async_alloc(dev, p); struct usb_redir_interrupt_packet_header interrupt_packet; DPRINTF(\"interrupt-out ep %02X len %d id %u\\n\", ep, p->len, aurb->packet_id); interrupt_packet.endpoint = ep; interrupt_packet.length = p->len; aurb->interrupt_packet = interrupt_packet; usbredir_log_data(dev, \"interrupt data out:\", p->data, p->len); usbredirparser_send_interrupt_packet(dev->parser, aurb->packet_id, &interrupt_packet, p->data, p->len); usbredirparser_do_write(dev->parser); return USB_RET_ASYNC; } }"} {"target": 1, "idx": 6569, "func": "static av_cold int msrle_decode_init(AVCodecContext *avctx) { MsrleContext *s = avctx->priv_data; int i; s->avctx = avctx; switch (avctx->bits_per_coded_sample) { case 1: avctx->pix_fmt = AV_PIX_FMT_MONOWHITE; break; case 4: case 8: avctx->pix_fmt = AV_PIX_FMT_PAL8; break; case 24: avctx->pix_fmt = AV_PIX_FMT_BGR24; break; default: av_log(avctx, AV_LOG_ERROR, \"unsupported bits per sample\\n\"); return -1; } avcodec_get_frame_defaults(&s->frame); s->frame.data[0] = NULL; if (avctx->extradata_size >= AVPALETTE_SIZE) for (i = 0; i < AVPALETTE_SIZE/4; i++) s->pal[i] = 0xFF<<24 | AV_RL32(avctx->extradata+4*i); return 0; }"} {"target": 0, "idx": 6595, "func": "static void nvme_clear_ctrl(NvmeCtrl *n) { int i; for (i = 0; i < n->num_queues; i++) { if (n->sq[i] != NULL) { nvme_free_sq(n->sq[i], n); } } for (i = 0; i < n->num_queues; i++) { if (n->cq[i] != NULL) { nvme_free_cq(n->cq[i], n); } } bdrv_flush(n->conf.bs); n->bar.cc = 0; }"} {"target": 0, "idx": 6609, "func": "void r4k_helper_tlbwi(CPUMIPSState *env) { r4k_tlb_t *tlb; int idx; target_ulong VPN; uint16_t ASID; bool G, V0, D0, V1, D1; idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb; tlb = &env->tlb->mmu.r4k.tlb[idx]; VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1); #if defined(TARGET_MIPS64) VPN &= env->SEGMask; #endif ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1; V0 = (env->CP0_EntryLo0 & 2) != 0; D0 = (env->CP0_EntryLo0 & 4) != 0; V1 = (env->CP0_EntryLo1 & 2) != 0; D1 = (env->CP0_EntryLo1 & 4) != 0; /* Discard cached TLB entries, unless tlbwi is just upgrading access permissions on the current entry. */ if (tlb->VPN != VPN || tlb->ASID != ASID || tlb->G != G || (tlb->V0 && !V0) || (tlb->D0 && !D0) || (tlb->V1 && !V1) || (tlb->D1 && !D1)) { r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb); } r4k_invalidate_tlb(env, idx, 0); r4k_fill_tlb(env, idx); }"} {"target": 0, "idx": 6612, "func": "static void an5206_init(MachineState *machine) { ram_addr_t ram_size = machine->ram_size; const char *cpu_model = machine->cpu_model; const char *kernel_filename = machine->kernel_filename; M68kCPU *cpu; CPUM68KState *env; int kernel_size; uint64_t elf_entry; hwaddr entry; MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *ram = g_new(MemoryRegion, 1); MemoryRegion *sram = g_new(MemoryRegion, 1); if (!cpu_model) { cpu_model = \"m5206\"; } cpu = M68K_CPU(cpu_generic_init(TYPE_M68K_CPU, cpu_model)); env = &cpu->env; /* Initialize CPU registers. */ env->vbr = 0; /* TODO: allow changing MBAR and RAMBAR. */ env->mbar = AN5206_MBAR_ADDR | 1; env->rambar0 = AN5206_RAMBAR_ADDR | 1; /* DRAM at address zero */ memory_region_allocate_system_memory(ram, NULL, \"an5206.ram\", ram_size); memory_region_add_subregion(address_space_mem, 0, ram); /* Internal SRAM. */ memory_region_init_ram(sram, NULL, \"an5206.sram\", 512, &error_fatal); memory_region_add_subregion(address_space_mem, AN5206_RAMBAR_ADDR, sram); mcf5206_init(address_space_mem, AN5206_MBAR_ADDR, cpu); /* Load kernel. */ if (!kernel_filename) { if (qtest_enabled()) { return; } fprintf(stderr, \"Kernel image must be specified\\n\"); exit(1); } kernel_size = load_elf(kernel_filename, NULL, NULL, &elf_entry, NULL, NULL, 1, EM_68K, 0, 0); entry = elf_entry; if (kernel_size < 0) { kernel_size = load_uimage(kernel_filename, &entry, NULL, NULL, NULL, NULL); } if (kernel_size < 0) { kernel_size = load_image_targphys(kernel_filename, KERNEL_LOAD_ADDR, ram_size - KERNEL_LOAD_ADDR); entry = KERNEL_LOAD_ADDR; } if (kernel_size < 0) { fprintf(stderr, \"qemu: could not load kernel '%s'\\n\", kernel_filename); exit(1); } env->pc = entry; }"} {"target": 0, "idx": 6618, "func": "static inline int ape_decode_value(APEContext *ctx, APERice *rice) { int x, overflow; if (ctx->fileversion < 3990) { int tmpk; overflow = range_get_symbol(ctx, counts_3970, counts_diff_3970); if (overflow == (MODEL_ELEMENTS - 1)) { tmpk = range_decode_bits(ctx, 5); overflow = 0; } else tmpk = (rice->k < 1) ? 0 : rice->k - 1; if (tmpk <= 16) x = range_decode_bits(ctx, tmpk); else { x = range_decode_bits(ctx, 16); x |= (range_decode_bits(ctx, tmpk - 16) << 16); } x += overflow << tmpk; } else { int base, pivot; pivot = rice->ksum >> 5; if (pivot == 0) pivot = 1; overflow = range_get_symbol(ctx, counts_3980, counts_diff_3980); if (overflow == (MODEL_ELEMENTS - 1)) { overflow = range_decode_bits(ctx, 16) << 16; overflow |= range_decode_bits(ctx, 16); } if (pivot < 0x10000) { base = range_decode_culfreq(ctx, pivot); range_decode_update(ctx, 1, base); } else { int base_hi = pivot, base_lo; int bbits = 0; while (base_hi & ~0xFFFF) { base_hi >>= 1; bbits++; } base_hi = range_decode_culfreq(ctx, base_hi + 1); range_decode_update(ctx, 1, base_hi); base_lo = range_decode_culfreq(ctx, 1 << bbits); range_decode_update(ctx, 1, base_lo); base = (base_hi << bbits) + base_lo; } x = base + overflow * pivot; } update_rice(rice, x); /* Convert to signed */ if (x & 1) return (x >> 1) + 1; else return -(x >> 1); }"} {"target": 0, "idx": 6619, "func": "int ffio_ensure_seekback(AVIOContext *s, int buf_size) { uint8_t *buffer; int max_buffer_size = s->max_packet_size ? s->max_packet_size : IO_BUFFER_SIZE; buf_size += s->buf_ptr - s->buffer + max_buffer_size; if (buf_size < s->buffer_size || s->seekable) return 0; av_assert0(!s->write_flag); buffer = av_malloc(buf_size); if (!buffer) return AVERROR(ENOMEM); memcpy(buffer, s->buffer, s->buffer_size); av_free(s->buffer); s->buf_ptr = buffer + (s->buf_ptr - s->buffer); s->buf_end = buffer + (s->buf_end - s->buffer); s->buffer = buffer; s->buffer_size = buf_size; return 0; }"} {"target": 1, "idx": 6626, "func": "static int film_probe(AVProbeData *p) { if (AV_RB32(&p->buf[0]) != FILM_TAG) return AVPROBE_SCORE_MAX; }"} {"target": 1, "idx": 6627, "func": "static uint8_t ide_wait_clear(uint8_t flag) { QPCIDevice *dev; QPCIBar bmdma_bar, ide_bar; uint8_t data; time_t st; dev = get_pci_device(&bmdma_bar, &ide_bar); /* Wait with a 5 second timeout */ time(&st); while (true) { data = qpci_io_readb(dev, ide_bar, reg_status); if (!(data & flag)) { return data; } if (difftime(time(NULL), st) > 5.0) { break; } nsleep(400); } g_assert_not_reached(); }"} {"target": 1, "idx": 6630, "func": "static void perf_lifecycle(void) { Coroutine *coroutine; unsigned int i, max; double duration; max = 1000000; g_test_timer_start(); for (i = 0; i < max; i++) { coroutine = qemu_coroutine_create(empty_coroutine); qemu_coroutine_enter(coroutine, NULL); } duration = g_test_timer_elapsed(); g_test_message(\"Lifecycle %u iterations: %f s\\n\", max, duration); }"} {"target": 0, "idx": 6632, "func": "static int plot_cqt(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; ShowCQTContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; int fft_len = 1 << s->fft_bits; FFTSample result[VIDEO_WIDTH][4]; int x, y, ret = 0; int linesize = s->outpicref->linesize[0]; int video_scale = s->fullhd ? 2 : 1; int video_width = (VIDEO_WIDTH/2) * video_scale; int spectogram_height = (SPECTOGRAM_HEIGHT/2) * video_scale; int spectogram_start = (SPECTOGRAM_START/2) * video_scale; int font_height = (FONT_HEIGHT/2) * video_scale; /* real part contains left samples, imaginary part contains right samples */ memcpy(s->fft_result, s->fft_data, fft_len * sizeof(*s->fft_data)); av_fft_permute(s->fft_context, s->fft_result); av_fft_calc(s->fft_context, s->fft_result); s->fft_result[fft_len] = s->fft_result[0]; /* calculating cqt */ for (x = 0; x < VIDEO_WIDTH; x++) { int u; FFTComplex v = {0,0}; FFTComplex w = {0,0}; FFTComplex l, r; for (u = 0; u < s->coeffs[x].len; u++) { FFTSample value = s->coeffs[x].values[u]; int index = s->coeffs[x].start + u; v.re += value * s->fft_result[index].re; v.im += value * s->fft_result[index].im; w.re += value * s->fft_result[fft_len - index].re; w.im += value * s->fft_result[fft_len - index].im; } /* separate left and right, (and multiply by 2.0) */ l.re = v.re + w.re; l.im = v.im - w.im; r.re = w.im + v.im; r.im = w.re - v.re; /* result is power, not amplitude */ result[x][0] = l.re * l.re + l.im * l.im; result[x][2] = r.re * r.re + r.im * r.im; result[x][1] = 0.5f * (result[x][0] + result[x][2]); if (s->gamma2 == 1.0f) result[x][3] = result[x][1]; else if (s->gamma2 == 2.0f) result[x][3] = sqrtf(result[x][1]); else if (s->gamma2 == 3.0f) result[x][3] = cbrtf(result[x][1]); else if (s->gamma2 == 4.0f) result[x][3] = sqrtf(sqrtf(result[x][1])); else result[x][3] = expf(logf(result[x][1]) * (1.0f / s->gamma2)); result[x][0] = FFMIN(1.0f, result[x][0]); result[x][1] = FFMIN(1.0f, result[x][1]); result[x][2] = FFMIN(1.0f, result[x][2]); if (s->gamma == 1.0f) { result[x][0] = 255.0f * result[x][0]; result[x][1] = 255.0f * result[x][1]; result[x][2] = 255.0f * result[x][2]; } else if (s->gamma == 2.0f) { result[x][0] = 255.0f * sqrtf(result[x][0]); result[x][1] = 255.0f * sqrtf(result[x][1]); result[x][2] = 255.0f * sqrtf(result[x][2]); } else if (s->gamma == 3.0f) { result[x][0] = 255.0f * cbrtf(result[x][0]); result[x][1] = 255.0f * cbrtf(result[x][1]); result[x][2] = 255.0f * cbrtf(result[x][2]); } else if (s->gamma == 4.0f) { result[x][0] = 255.0f * sqrtf(sqrtf(result[x][0])); result[x][1] = 255.0f * sqrtf(sqrtf(result[x][1])); result[x][2] = 255.0f * sqrtf(sqrtf(result[x][2])); } else { result[x][0] = 255.0f * expf(logf(result[x][0]) * (1.0f / s->gamma)); result[x][1] = 255.0f * expf(logf(result[x][1]) * (1.0f / s->gamma)); result[x][2] = 255.0f * expf(logf(result[x][2]) * (1.0f / s->gamma)); } } if (!s->fullhd) { for (x = 0; x < video_width; x++) { result[x][0] = 0.5f * (result[2*x][0] + result[2*x+1][0]); result[x][1] = 0.5f * (result[2*x][1] + result[2*x+1][1]); result[x][2] = 0.5f * (result[2*x][2] + result[2*x+1][2]); result[x][3] = 0.5f * (result[2*x][3] + result[2*x+1][3]); } } for (x = 0; x < video_width; x++) { s->spectogram[s->spectogram_index*linesize + 3*x] = result[x][0] + 0.5f; s->spectogram[s->spectogram_index*linesize + 3*x + 1] = result[x][1] + 0.5f; s->spectogram[s->spectogram_index*linesize + 3*x + 2] = result[x][2] + 0.5f; } /* drawing */ if (!s->spectogram_count) { uint8_t *data = (uint8_t*) s->outpicref->data[0]; float rcp_result[VIDEO_WIDTH]; int total_length = linesize * spectogram_height; int back_length = linesize * s->spectogram_index; for (x = 0; x < video_width; x++) rcp_result[x] = 1.0f / (result[x][3]+0.0001f); /* drawing bar */ for (y = 0; y < spectogram_height; y++) { float height = (spectogram_height - y) * (1.0f/spectogram_height); uint8_t *lineptr = data + y * linesize; for (x = 0; x < video_width; x++) { float mul; if (result[x][3] <= height) { *lineptr++ = 0; *lineptr++ = 0; *lineptr++ = 0; } else { mul = (result[x][3] - height) * rcp_result[x]; *lineptr++ = mul * result[x][0] + 0.5f; *lineptr++ = mul * result[x][1] + 0.5f; *lineptr++ = mul * result[x][2] + 0.5f; } } } /* drawing font */ if (s->font_alpha && s->draw_text) { for (y = 0; y < font_height; y++) { uint8_t *lineptr = data + (spectogram_height + y) * linesize; uint8_t *spectogram_src = s->spectogram + s->spectogram_index * linesize; uint8_t *fontcolor_value = s->fontcolor_value; for (x = 0; x < video_width; x++) { uint8_t alpha = s->font_alpha[y*video_width+x]; lineptr[3*x] = (spectogram_src[3*x] * (255-alpha) + fontcolor_value[0] * alpha + 255) >> 8; lineptr[3*x+1] = (spectogram_src[3*x+1] * (255-alpha) + fontcolor_value[1] * alpha + 255) >> 8; lineptr[3*x+2] = (spectogram_src[3*x+2] * (255-alpha) + fontcolor_value[2] * alpha + 255) >> 8; fontcolor_value += 3; } } } else if (s->draw_text) { for (y = 0; y < font_height; y++) { uint8_t *lineptr = data + (spectogram_height + y) * linesize; memcpy(lineptr, s->spectogram + s->spectogram_index * linesize, video_width*3); } for (x = 0; x < video_width; x += video_width/10) { int u; static const char str[] = \"EF G A BC D \"; uint8_t *startptr = data + spectogram_height * linesize + x * 3; for (u = 0; str[u]; u++) { int v; for (v = 0; v < 16; v++) { uint8_t *p = startptr + v * linesize * video_scale + 8 * 3 * u * video_scale; int ux = x + 8 * u * video_scale; int mask; for (mask = 0x80; mask; mask >>= 1) { if (mask & avpriv_vga16_font[str[u] * 16 + v]) { p[0] = s->fontcolor_value[3*ux]; p[1] = s->fontcolor_value[3*ux+1]; p[2] = s->fontcolor_value[3*ux+2]; if (video_scale == 2) { p[linesize] = p[0]; p[linesize+1] = p[1]; p[linesize+2] = p[2]; p[3] = p[linesize+3] = s->fontcolor_value[3*ux+3]; p[4] = p[linesize+4] = s->fontcolor_value[3*ux+4]; p[5] = p[linesize+5] = s->fontcolor_value[3*ux+5]; } } p += 3 * video_scale; ux += video_scale; } } } } } else { for (y = 0; y < font_height; y++) { uint8_t *lineptr = data + (spectogram_height + y) * linesize; uint8_t *spectogram_src = s->spectogram + s->spectogram_index * linesize; for (x = 0; x < video_width; x++) { lineptr[3*x] = spectogram_src[3*x]; lineptr[3*x+1] = spectogram_src[3*x+1]; lineptr[3*x+2] = spectogram_src[3*x+2]; } } } /* drawing spectogram/sonogram */ data += spectogram_start * linesize; memcpy(data, s->spectogram + s->spectogram_index*linesize, total_length - back_length); data += total_length - back_length; if (back_length) memcpy(data, s->spectogram, back_length); s->outpicref->pts = s->frame_count; ret = ff_filter_frame(outlink, av_frame_clone(s->outpicref)); s->req_fullfilled = 1; s->frame_count++; } s->spectogram_count = (s->spectogram_count + 1) % s->count; s->spectogram_index = (s->spectogram_index + spectogram_height - 1) % spectogram_height; return ret; }"} {"target": 1, "idx": 6644, "func": "void do_4xx_tlbsx_ (void) { int tmp = xer_ov; T0 = ppcemb_tlb_search(env, T0, env->spr[SPR_40x_PID]); if (T0 != -1) tmp |= 0x02; env->crf[0] = tmp; }"} {"target": 1, "idx": 6654, "func": "static void decode_bol_opc(CPUTriCoreState *env, DisasContext *ctx, int32_t op1) { int r1, r2; int32_t address; TCGv temp; r1 = MASK_OP_BOL_S1D(ctx->opcode); r2 = MASK_OP_BOL_S2(ctx->opcode); address = MASK_OP_BOL_OFF16_SEXT(ctx->opcode); switch (op1) { case OPC1_32_BOL_LD_A_LONGOFF: temp = tcg_temp_new(); tcg_gen_addi_tl(temp, cpu_gpr_a[r2], address); tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LEUL); tcg_temp_free(temp); break; case OPC1_32_BOL_LD_W_LONGOFF: temp = tcg_temp_new(); tcg_gen_addi_tl(temp, cpu_gpr_a[r2], address); tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUL); tcg_temp_free(temp); break; case OPC1_32_BOL_LEA_LONGOFF: tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], address); break; case OPC1_32_BOL_ST_A_LONGOFF: if (tricore_feature(env, TRICORE_FEATURE_16)) { gen_offset_st(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], address, MO_LEUL); } else { /* raise illegal opcode trap */ } break; case OPC1_32_BOL_ST_W_LONGOFF: gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_LEUL); break; case OPC1_32_BOL_LD_B_LONGOFF: if (tricore_feature(env, TRICORE_FEATURE_16)) { gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_SB); } else { /* raise illegal opcode trap */ } break; case OPC1_32_BOL_LD_BU_LONGOFF: if (tricore_feature(env, TRICORE_FEATURE_16)) { gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_UB); } else { /* raise illegal opcode trap */ } break; case OPC1_32_BOL_LD_H_LONGOFF: if (tricore_feature(env, TRICORE_FEATURE_16)) { gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_LESW); } else { /* raise illegal opcode trap */ } break; case OPC1_32_BOL_LD_HU_LONGOFF: if (tricore_feature(env, TRICORE_FEATURE_16)) { gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_LEUW); } else { /* raise illegal opcode trap */ } break; case OPC1_32_BOL_ST_B_LONGOFF: if (tricore_feature(env, TRICORE_FEATURE_16)) { gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_SB); } else { /* raise illegal opcode trap */ } break; case OPC1_32_BOL_ST_H_LONGOFF: if (tricore_feature(env, TRICORE_FEATURE_16)) { gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_LESW); } else { /* raise illegal opcode trap */ } break; } }"} {"target": 1, "idx": 6661, "func": "static void virtconsole_realize(DeviceState *dev, Error **errp) { VirtIOSerialPort *port = VIRTIO_SERIAL_PORT(dev); VirtConsole *vcon = VIRTIO_CONSOLE(dev); VirtIOSerialPortClass *k = VIRTIO_SERIAL_PORT_GET_CLASS(dev); if (port->id == 0 && !k->is_console) { error_setg(errp, \"Port number 0 on virtio-serial devices reserved \" \"for virtconsole devices for backward compatibility.\"); return; } if (vcon->chr) { vcon->chr->explicit_fe_open = 1; qemu_chr_add_handlers(vcon->chr, chr_can_read, chr_read, chr_event, vcon); } }"} {"target": 1, "idx": 6673, "func": "static void pc_q35_init(MachineState *machine) { PCMachineState *pc_machine = PC_MACHINE(machine); ram_addr_t below_4g_mem_size, above_4g_mem_size; Q35PCIHost *q35_host; PCIHostState *phb; PCIBus *host_bus; PCIDevice *lpc; BusState *idebus[MAX_SATA_PORTS]; ISADevice *rtc_state; ISADevice *floppy; MemoryRegion *pci_memory; MemoryRegion *rom_memory; MemoryRegion *ram_memory; GSIState *gsi_state; ISABus *isa_bus; int pci_enabled = 1; qemu_irq *cpu_irq; qemu_irq *gsi; qemu_irq *i8259; int i; ICH9LPCState *ich9_lpc; PCIDevice *ahci; DeviceState *icc_bridge; PcGuestInfo *guest_info; ram_addr_t lowmem; /* Check whether RAM fits below 4G (leaving 1/2 GByte for IO memory * and 256 Mbytes for PCI Express Enhanced Configuration Access Mapping * also known as MMCFG). * If it doesn't, we need to split it in chunks below and above 4G. * In any case, try to make sure that guest addresses aligned at * 1G boundaries get mapped to host addresses aligned at 1G boundaries. * For old machine types, use whatever split we used historically to avoid * breaking migration. if (machine->ram_size >= 0xb0000000) { lowmem = gigabyte_align ? 0x80000000 : 0xb0000000; } else { lowmem = 0xb0000000; } /* Handle the machine opt max-ram-below-4g. It is basically doing * min(qemu limit, user limit). if (lowmem > pc_machine->max_ram_below_4g) { lowmem = pc_machine->max_ram_below_4g; if (machine->ram_size - lowmem > lowmem && lowmem & ((1ULL << 30) - 1)) { error_report(\"Warning: Large machine and max_ram_below_4g(%\"PRIu64 \") not a multiple of 1G; possible bad performance.\", pc_machine->max_ram_below_4g); } } if (machine->ram_size >= lowmem) { above_4g_mem_size = machine->ram_size - lowmem; below_4g_mem_size = lowmem; } else { above_4g_mem_size = 0; below_4g_mem_size = machine->ram_size; } if (xen_enabled() && xen_hvm_init(&below_4g_mem_size, &above_4g_mem_size, &ram_memory) != 0) { fprintf(stderr, \"xen hardware virtual machine initialisation failed\\n\"); exit(1); } icc_bridge = qdev_create(NULL, TYPE_ICC_BRIDGE); object_property_add_child(qdev_get_machine(), \"icc-bridge\", OBJECT(icc_bridge), NULL); pc_cpus_init(machine->cpu_model, icc_bridge); pc_acpi_init(\"q35-acpi-dsdt.aml\"); kvmclock_create(); /* pci enabled */ if (pci_enabled) { pci_memory = g_new(MemoryRegion, 1); memory_region_init(pci_memory, NULL, \"pci\", UINT64_MAX); rom_memory = pci_memory; } else { pci_memory = NULL; rom_memory = get_system_memory(); } guest_info = pc_guest_info_init(below_4g_mem_size, above_4g_mem_size); guest_info->has_pci_info = has_pci_info; guest_info->isapc_ram_fw = false; guest_info->has_acpi_build = has_acpi_build; guest_info->has_reserved_memory = has_reserved_memory; if (smbios_defaults) { MachineClass *mc = MACHINE_GET_CLASS(machine); /* These values are guest ABI, do not change */ smbios_set_defaults(\"QEMU\", \"Standard PC (Q35 + ICH9, 2009)\", mc->name, smbios_legacy_mode); } /* allocate ram and load rom/bios */ if (!xen_enabled()) { pc_memory_init(machine, get_system_memory(), below_4g_mem_size, above_4g_mem_size, rom_memory, &ram_memory, guest_info); } /* irq lines */ gsi_state = g_malloc0(sizeof(*gsi_state)); if (kvm_irqchip_in_kernel()) { kvm_pc_setup_irq_routing(pci_enabled); gsi = qemu_allocate_irqs(kvm_pc_gsi_handler, gsi_state, GSI_NUM_PINS); } else { gsi = qemu_allocate_irqs(gsi_handler, gsi_state, GSI_NUM_PINS); } /* create pci host bus */ q35_host = Q35_HOST_DEVICE(qdev_create(NULL, TYPE_Q35_HOST_DEVICE)); object_property_add_child(qdev_get_machine(), \"q35\", OBJECT(q35_host), NULL); q35_host->mch.ram_memory = ram_memory; q35_host->mch.pci_address_space = pci_memory; q35_host->mch.system_memory = get_system_memory(); q35_host->mch.address_space_io = get_system_io(); q35_host->mch.below_4g_mem_size = below_4g_mem_size; q35_host->mch.above_4g_mem_size = above_4g_mem_size; q35_host->mch.guest_info = guest_info; /* pci */ qdev_init_nofail(DEVICE(q35_host)); phb = PCI_HOST_BRIDGE(q35_host); host_bus = phb->bus; /* create ISA bus */ lpc = pci_create_simple_multifunction(host_bus, PCI_DEVFN(ICH9_LPC_DEV, ICH9_LPC_FUNC), true, TYPE_ICH9_LPC_DEVICE); object_property_add_link(OBJECT(machine), PC_MACHINE_ACPI_DEVICE_PROP, TYPE_HOTPLUG_HANDLER, (Object **)&pc_machine->acpi_dev, object_property_allow_set_link, OBJ_PROP_LINK_UNREF_ON_RELEASE, &error_abort); object_property_set_link(OBJECT(machine), OBJECT(lpc), PC_MACHINE_ACPI_DEVICE_PROP, &error_abort); ich9_lpc = ICH9_LPC_DEVICE(lpc); ich9_lpc->pic = gsi; ich9_lpc->ioapic = gsi_state->ioapic_irq; pci_bus_irqs(host_bus, ich9_lpc_set_irq, ich9_lpc_map_irq, ich9_lpc, ICH9_LPC_NB_PIRQS); pci_bus_set_route_irq_fn(host_bus, ich9_route_intx_pin_to_irq); isa_bus = ich9_lpc->isa_bus; /*end early*/ isa_bus_irqs(isa_bus, gsi); if (kvm_irqchip_in_kernel()) { i8259 = kvm_i8259_init(isa_bus); } else if (xen_enabled()) { i8259 = xen_interrupt_controller_init(); } else { cpu_irq = pc_allocate_cpu_irq(); i8259 = i8259_init(isa_bus, cpu_irq[0]); } for (i = 0; i < ISA_NUM_IRQS; i++) { gsi_state->i8259_irq[i] = i8259[i]; } if (pci_enabled) { ioapic_init_gsi(gsi_state, NULL); } qdev_init_nofail(icc_bridge); pc_register_ferr_irq(gsi[13]); /* init basic PC hardware */ pc_basic_device_init(isa_bus, gsi, &rtc_state, &floppy, false, 0xff0104); /* connect pm stuff to lpc */ ich9_lpc_pm_init(lpc); /* ahci and SATA device, for q35 1 ahci controller is built-in */ ahci = pci_create_simple_multifunction(host_bus, PCI_DEVFN(ICH9_SATA1_DEV, ICH9_SATA1_FUNC), true, \"ich9-ahci\"); idebus[0] = qdev_get_child_bus(&ahci->qdev, \"ide.0\"); idebus[1] = qdev_get_child_bus(&ahci->qdev, \"ide.1\"); if (usb_enabled(false)) { /* Should we create 6 UHCI according to ich9 spec? */ ehci_create_ich9_with_companions(host_bus, 0x1d); } /* TODO: Populate SPD eeprom data. */ smbus_eeprom_init(ich9_smb_init(host_bus, PCI_DEVFN(ICH9_SMB_DEV, ICH9_SMB_FUNC), 0xb100), 8, NULL, 0); pc_cmos_init(below_4g_mem_size, above_4g_mem_size, machine->boot_order, floppy, idebus[0], idebus[1], rtc_state); /* the rest devices to which pci devfn is automatically assigned */ pc_vga_init(isa_bus, host_bus); pc_nic_init(isa_bus, host_bus); if (pci_enabled) { pc_pci_device_init(host_bus); } }"} {"target": 1, "idx": 6679, "func": "static int vpc_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVVPCState *s = bs->opaque; int i; VHDFooter *footer; VHDDynDiskHeader *dyndisk_header; uint8_t buf[HEADER_SIZE]; uint32_t checksum; int disk_type = VHD_DYNAMIC; int ret; ret = bdrv_pread(bs->file, 0, s->footer_buf, HEADER_SIZE); if (ret < 0) { goto fail; } footer = (VHDFooter *) s->footer_buf; if (strncmp(footer->creator, \"conectix\", 8)) { int64_t offset = bdrv_getlength(bs->file); if (offset < 0) { ret = offset; goto fail; } else if (offset < HEADER_SIZE) { ret = -EINVAL; goto fail; } /* If a fixed disk, the footer is found only at the end of the file */ ret = bdrv_pread(bs->file, offset-HEADER_SIZE, s->footer_buf, HEADER_SIZE); if (ret < 0) { goto fail; } if (strncmp(footer->creator, \"conectix\", 8)) { error_setg(errp, \"invalid VPC image\"); ret = -EINVAL; goto fail; } disk_type = VHD_FIXED; } checksum = be32_to_cpu(footer->checksum); footer->checksum = 0; if (vpc_checksum(s->footer_buf, HEADER_SIZE) != checksum) fprintf(stderr, \"block-vpc: The header checksum of '%s' is \" \"incorrect.\\n\", bs->filename); /* Write 'checksum' back to footer, or else will leave it with zero. */ footer->checksum = be32_to_cpu(checksum); // The visible size of a image in Virtual PC depends on the geometry // rather than on the size stored in the footer (the size in the footer // is too large usually) bs->total_sectors = (int64_t) be16_to_cpu(footer->cyls) * footer->heads * footer->secs_per_cyl; /* images created with disk2vhd report a far higher virtual size * than expected with the cyls * heads * sectors_per_cyl formula. * use the footer->size instead if the image was created with * disk2vhd. */ if (!strncmp(footer->creator_app, \"d2v\", 4)) { bs->total_sectors = be64_to_cpu(footer->size) / BDRV_SECTOR_SIZE; } /* Allow a maximum disk size of approximately 2 TB */ if (bs->total_sectors >= 65535LL * 255 * 255) { ret = -EFBIG; goto fail; } if (disk_type == VHD_DYNAMIC) { ret = bdrv_pread(bs->file, be64_to_cpu(footer->data_offset), buf, HEADER_SIZE); if (ret < 0) { goto fail; } dyndisk_header = (VHDDynDiskHeader *) buf; if (strncmp(dyndisk_header->magic, \"cxsparse\", 8)) { ret = -EINVAL; goto fail; } s->block_size = be32_to_cpu(dyndisk_header->block_size); s->bitmap_size = ((s->block_size / (8 * 512)) + 511) & ~511; s->max_table_entries = be32_to_cpu(dyndisk_header->max_table_entries); s->pagetable = g_malloc(s->max_table_entries * 4); s->bat_offset = be64_to_cpu(dyndisk_header->table_offset); ret = bdrv_pread(bs->file, s->bat_offset, s->pagetable, s->max_table_entries * 4); if (ret < 0) { goto fail; } s->free_data_block_offset = (s->bat_offset + (s->max_table_entries * 4) + 511) & ~511; for (i = 0; i < s->max_table_entries; i++) { be32_to_cpus(&s->pagetable[i]); if (s->pagetable[i] != 0xFFFFFFFF) { int64_t next = (512 * (int64_t) s->pagetable[i]) + s->bitmap_size + s->block_size; if (next > s->free_data_block_offset) { s->free_data_block_offset = next; } } } if (s->free_data_block_offset > bdrv_getlength(bs->file)) { error_setg(errp, \"block-vpc: free_data_block_offset points after \" \"the end of file. The image has been truncated.\"); ret = -EINVAL; goto fail; } s->last_bitmap_offset = (int64_t) -1; #ifdef CACHE s->pageentry_u8 = g_malloc(512); s->pageentry_u32 = s->pageentry_u8; s->pageentry_u16 = s->pageentry_u8; s->last_pagetable = -1; #endif } qemu_co_mutex_init(&s->lock); /* Disable migration when VHD images are used */ error_set(&s->migration_blocker, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED, \"vpc\", bs->device_name, \"live migration\"); migrate_add_blocker(s->migration_blocker); return 0; fail: g_free(s->pagetable); #ifdef CACHE g_free(s->pageentry_u8); #endif return ret; }"} {"target": 1, "idx": 6682, "func": "void helper_done(CPUSPARCState *env) { trap_state *tsptr = cpu_tsptr(env); env->pc = tsptr->tnpc; env->npc = tsptr->tnpc + 4; cpu_put_ccr(env, tsptr->tstate >> 32); env->asi = (tsptr->tstate >> 24) & 0xff; cpu_change_pstate(env, (tsptr->tstate >> 8) & 0xf3f); cpu_put_cwp64(env, tsptr->tstate & 0xff); if (cpu_has_hypervisor(env)) { uint32_t new_gl = (tsptr->tstate >> 40) & 7; env->hpstate = env->htstate[env->tl]; cpu_gl_switch_gregs(env, new_gl); env->gl = new_gl; } env->tl--; trace_win_helper_done(env->tl); #if !defined(CONFIG_USER_ONLY) if (cpu_interrupts_enabled(env)) { cpu_check_irqs(env); } #endif }"} {"target": 1, "idx": 6686, "func": "void CALLBACK host_alarm_handler(UINT uTimerID, UINT uMsg, DWORD_PTR dwUser, DWORD_PTR dw1, DWORD_PTR dw2) #else static void host_alarm_handler(int host_signum) #endif { #if 0 #define DISP_FREQ 1000 { static int64_t delta_min = INT64_MAX; static int64_t delta_max, delta_cum, last_clock, delta, ti; static int count; ti = qemu_get_clock(vm_clock); if (last_clock != 0) { delta = ti - last_clock; if (delta < delta_min) delta_min = delta; if (delta > delta_max) delta_max = delta; delta_cum += delta; if (++count == DISP_FREQ) { printf(\"timer: min=%\" PRId64 \" us max=%\" PRId64 \" us avg=%\" PRId64 \" us avg_freq=%0.3f Hz\\n\", muldiv64(delta_min, 1000000, ticks_per_sec), muldiv64(delta_max, 1000000, ticks_per_sec), muldiv64(delta_cum, 1000000 / DISP_FREQ, ticks_per_sec), (double)ticks_per_sec / ((double)delta_cum / DISP_FREQ)); count = 0; delta_min = INT64_MAX; delta_max = 0; delta_cum = 0; } } last_clock = ti; } #endif if (alarm_has_dynticks(alarm_timer) || qemu_timer_expired(active_timers[QEMU_TIMER_VIRTUAL], qemu_get_clock(vm_clock)) || qemu_timer_expired(active_timers[QEMU_TIMER_REALTIME], qemu_get_clock(rt_clock))) { #ifdef _WIN32 struct qemu_alarm_win32 *data = ((struct qemu_alarm_timer*)dwUser)->priv; SetEvent(data->host_alarm); #endif CPUState *env = next_cpu; /* stop the currently executing cpu because a timer occured */ cpu_interrupt(env, CPU_INTERRUPT_EXIT); #ifdef USE_KQEMU if (env->kqemu_enabled) { kqemu_cpu_interrupt(env); } #endif event_pending = 1; } }"} {"target": 1, "idx": 6697, "func": "static void tqi_calculate_qtable(TqiContext *t, int quant) { const int qscale = (215 - 2*quant)*5; int i; t->intra_matrix[0] = (ff_inv_aanscales[0] * ff_mpeg1_default_intra_matrix[0]) >> 11; for(i=1; i<64; i++) t->intra_matrix[i] = (ff_inv_aanscales[i] * ff_mpeg1_default_intra_matrix[i] * qscale + 32) >> 14; }"} {"target": 0, "idx": 6706, "func": "static void free_input_threads(void) { int i; if (nb_input_files == 1) return; transcoding_finished = 1; for (i = 0; i < nb_input_files; i++) { InputFile *f = input_files[i]; AVPacket pkt; if (f->joined) continue; pthread_mutex_lock(&f->fifo_lock); while (av_fifo_size(f->fifo)) { av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL); av_free_packet(&pkt); } pthread_cond_signal(&f->fifo_cond); pthread_mutex_unlock(&f->fifo_lock); pthread_join(f->thread, NULL); f->joined = 1; while (av_fifo_size(f->fifo)) { av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL); av_free_packet(&pkt); } av_fifo_free(f->fifo); } }"} {"target": 1, "idx": 6710, "func": "static void tcp_chr_disconnect(CharDriverState *chr) { TCPCharDriver *s = chr->opaque; if (!s->connected) { return; } s->connected = 0; if (s->listen_ioc) { s->listen_tag = qio_channel_add_watch( QIO_CHANNEL(s->listen_ioc), G_IO_IN, tcp_chr_accept, chr, NULL); } tcp_set_msgfds(chr, NULL, 0); remove_fd_in_watch(chr); object_unref(OBJECT(s->sioc)); s->sioc = NULL; object_unref(OBJECT(s->ioc)); s->ioc = NULL; g_free(chr->filename); chr->filename = SocketAddress_to_str(\"disconnected:\", s->addr, s->is_listen, s->is_telnet); qemu_chr_be_event(chr, CHR_EVENT_CLOSED); if (s->reconnect_time) { qemu_chr_socket_restart_timer(chr); } }"} {"target": 0, "idx": 6740, "func": "static inline void menelaus_rtc_stop(MenelausState *s) { qemu_del_timer(s->rtc.hz_tm); s->rtc.next -= qemu_get_clock(rt_clock); if (s->rtc.next < 1) s->rtc.next = 1; }"} {"target": 0, "idx": 6748, "func": "int ide_get_geometry(BusState *bus, int unit, int16_t *cyls, int8_t *heads, int8_t *secs) { IDEState *s = &DO_UPCAST(IDEBus, qbus, bus)->ifs[unit]; if (s->drive_kind != IDE_HD || !s->bs) { return -1; } *cyls = s->cylinders; *heads = s->heads; *secs = s->sectors; return 0; }"} {"target": 0, "idx": 6802, "func": "static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n, int mquant, int ttmb, int first_block, uint8_t *dst, int linesize, int skip_block, int apply_filter, int cbp_top, int cbp_left) { MpegEncContext *s = &v->s; GetBitContext *gb = &s->gb; int i, j; int subblkpat = 0; int scale, off, idx, last, skip, value; int ttblk = ttmb & 7; int pat = 0; if(ttmb == -1) { ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)]; } if(ttblk == TT_4X4) { subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1); } if((ttblk != TT_8X8 && ttblk != TT_4X4) && (v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))) { subblkpat = decode012(gb); if(subblkpat) subblkpat ^= 3; //swap decoded pattern bits if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) ttblk = TT_8X4; if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) ttblk = TT_4X8; } scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0); // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) { subblkpat = 2 - (ttblk == TT_8X4_TOP); ttblk = TT_8X4; } if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) { subblkpat = 2 - (ttblk == TT_4X8_LEFT); ttblk = TT_4X8; } switch(ttblk) { case TT_8X8: pat = 0xF; i = 0; last = 0; while (!last) { vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2); i += skip; if(i > 63) break; idx = wmv1_scantable[0][i++]; block[idx] = value * scale; if(!v->pquantizer) block[idx] += (block[idx] < 0) ? -mquant : mquant; } if(!skip_block){ s->dsp.vc1_inv_trans_8x8(block); s->dsp.add_pixels_clamped(block, dst, linesize); if(apply_filter && cbp_top & 0xC) vc1_loop_filter(dst, 1, linesize, 8, mquant); if(apply_filter && cbp_left & 0xA) vc1_loop_filter(dst, linesize, 1, 8, mquant); } break; case TT_4X4: pat = ~subblkpat & 0xF; for(j = 0; j < 4; j++) { last = subblkpat & (1 << (3 - j)); i = 0; off = (j & 1) * 4 + (j & 2) * 16; while (!last) { vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2); i += skip; if(i > 15) break; idx = ff_vc1_simple_progressive_4x4_zz[i++]; block[idx + off] = value * scale; if(!v->pquantizer) block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant; } if(!(subblkpat & (1 << (3 - j))) && !skip_block){ s->dsp.vc1_inv_trans_4x4(dst + (j&1)*4 + (j&2)*2*linesize, linesize, block + off); if(apply_filter && (j&2 ? pat & (1<<(j-2)) : (cbp_top & (1 << (j + 2))))) vc1_loop_filter(dst + (j&1)*4 + (j&2)*2*linesize, 1, linesize, 4, mquant); if(apply_filter && (j&1 ? pat & (1<<(j-1)) : (cbp_left & (1 << (j + 1))))) vc1_loop_filter(dst + (j&1)*4 + (j&2)*2*linesize, linesize, 1, 4, mquant); } } break; case TT_8X4: pat = ~((subblkpat & 2)*6 + (subblkpat & 1)*3) & 0xF; for(j = 0; j < 2; j++) { last = subblkpat & (1 << (1 - j)); i = 0; off = j * 32; while (!last) { vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2); i += skip; if(i > 31) break; idx = v->zz_8x4[i++]+off; block[idx] = value * scale; if(!v->pquantizer) block[idx] += (block[idx] < 0) ? -mquant : mquant; } if(!(subblkpat & (1 << (1 - j))) && !skip_block){ s->dsp.vc1_inv_trans_8x4(dst + j*4*linesize, linesize, block + off); if(apply_filter && j ? pat & 0x3 : (cbp_top & 0xC)) vc1_loop_filter(dst + j*4*linesize, 1, linesize, 8, mquant); if(apply_filter && cbp_left & (2 << j)) vc1_loop_filter(dst + j*4*linesize, linesize, 1, 4, mquant); } } break; case TT_4X8: pat = ~(subblkpat*5) & 0xF; for(j = 0; j < 2; j++) { last = subblkpat & (1 << (1 - j)); i = 0; off = j * 4; while (!last) { vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2); i += skip; if(i > 31) break; idx = v->zz_4x8[i++]+off; block[idx] = value * scale; if(!v->pquantizer) block[idx] += (block[idx] < 0) ? -mquant : mquant; } if(!(subblkpat & (1 << (1 - j))) && !skip_block){ s->dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off); if(apply_filter && cbp_top & (2 << j)) vc1_loop_filter(dst + j*4, 1, linesize, 4, mquant); if(apply_filter && j ? pat & 0x5 : (cbp_left & 0xA)) vc1_loop_filter(dst + j*4, linesize, 1, 8, mquant); } } break; } return pat; }"} {"target": 1, "idx": 6809, "func": "static void gen_dcread(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); #else TCGv EA, val; if (unlikely(ctx->pr)) { gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; } gen_set_access_type(ctx, ACCESS_CACHE); EA = tcg_temp_new(); gen_addr_reg_index(ctx, EA); val = tcg_temp_new(); gen_qemu_ld32u(ctx, val, EA); tcg_temp_free(val); tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], EA); tcg_temp_free(EA); #endif }"} {"target": 1, "idx": 6810, "func": "uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, uint64_t offset, int compressed_size) { BDRVQcowState *s = bs->opaque; int l2_index, ret; uint64_t *l2_table; int64_t cluster_offset; int nb_csectors; ret = get_cluster_table(bs, offset, &l2_table, &l2_index); if (ret < 0) { return 0; } cluster_offset = be64_to_cpu(l2_table[l2_index]); if (cluster_offset & QCOW_OFLAG_COPIED) { qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); return 0; } if (cluster_offset) qcow2_free_any_clusters(bs, cluster_offset, 1); cluster_offset = qcow2_alloc_bytes(bs, compressed_size); if (cluster_offset < 0) { qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); return 0; } nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - (cluster_offset >> 9); cluster_offset |= QCOW_OFLAG_COMPRESSED | ((uint64_t)nb_csectors << s->csize_shift); /* update L2 table */ /* compressed clusters never have the copied flag */ BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); l2_table[l2_index] = cpu_to_be64(cluster_offset); ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); if (ret < 0) { return 0; } return cluster_offset; }"} {"target": 1, "idx": 6811, "func": "static int ac3_eac3_probe(AVProbeData *p, enum AVCodecID expected_codec_id) { int max_frames, first_frames = 0, frames; uint8_t *buf, *buf2, *end; AC3HeaderInfo hdr; GetBitContext gbc; enum AVCodecID codec_id = AV_CODEC_ID_AC3; max_frames = 0; buf = p->buf; end = buf + p->buf_size; for(; buf < end; buf++) { if(buf > p->buf && !(buf[0] == 0x0B && buf[1] == 0x77) && !(buf[0] == 0x77 && buf[1] == 0x0B) ) continue; buf2 = buf; for(frames = 0; buf2 < end; frames++) { uint8_t buf3[4096]; int i; if(!memcmp(buf2, \"\\x1\\x10\\0\\0\\0\\0\\0\\0\", 8)) buf2+=16; if (buf[0] == 0x77 && buf[1] == 0x0B) { for(i=0; i<8; i+=2) { buf3[i ] = buf[i+1]; buf3[i+1] = buf[i ]; } init_get_bits(&gbc, buf3, 54); }else init_get_bits(&gbc, buf2, 54); if(avpriv_ac3_parse_header(&gbc, &hdr) < 0) break; if(buf2 + hdr.frame_size > end) break; if (buf[0] == 0x77 && buf[1] == 0x0B) { av_assert0(hdr.frame_size <= sizeof(buf3)); for(; i 10) codec_id = AV_CODEC_ID_EAC3; buf2 += hdr.frame_size; } max_frames = FFMAX(max_frames, frames); if(buf == p->buf) first_frames = frames; } if(codec_id != expected_codec_id) return 0; // keep this in sync with mp3 probe, both need to avoid // issues with MPEG-files! if (first_frames>=4) return AVPROBE_SCORE_MAX/2+1; else if(max_frames>200)return AVPROBE_SCORE_MAX/2; else if(max_frames>=4) return AVPROBE_SCORE_MAX/4; else if(max_frames>=1) return 1; else return 0; }"} {"target": 1, "idx": 6812, "func": "static int asf_read_frame_header(AVFormatContext *s, AVIOContext *pb){ ASFContext *asf = s->priv_data; int rsize = 1; int num = avio_r8(pb); int64_t ts0, ts1; asf->packet_segments--; asf->packet_key_frame = num >> 7; asf->stream_index = asf->asfid2avid[num & 0x7f]; // sequence should be ignored! DO_2BITS(asf->packet_property >> 4, asf->packet_seq, 0); DO_2BITS(asf->packet_property >> 2, asf->packet_frag_offset, 0); DO_2BITS(asf->packet_property, asf->packet_replic_size, 0); //printf(\"key:%d stream:%d seq:%d offset:%d replic_size:%d\\n\", asf->packet_key_frame, asf->stream_index, asf->packet_seq, //asf->packet_frag_offset, asf->packet_replic_size); if (asf->packet_replic_size >= 8) { asf->packet_obj_size = avio_rl32(pb); if(asf->packet_obj_size >= (1<<24) || asf->packet_obj_size <= 0){ av_log(s, AV_LOG_ERROR, \"packet_obj_size invalid\\n\"); asf->packet_frag_timestamp = avio_rl32(pb); // timestamp if(asf->packet_replic_size >= 8+38+4){ // for(i=0; ipacket_replic_size-8; i++) // av_log(s, AV_LOG_DEBUG, \"%02X \",avio_r8(pb)); // av_log(s, AV_LOG_DEBUG, \"\\n\"); avio_skip(pb, 10); ts0= avio_rl64(pb); ts1= avio_rl64(pb); avio_skip(pb, 12); avio_rl32(pb); avio_skip(pb, asf->packet_replic_size - 8 - 38 - 4); if(ts0!= -1) asf->packet_frag_timestamp= ts0/10000; else asf->packet_frag_timestamp= AV_NOPTS_VALUE; }else avio_skip(pb, asf->packet_replic_size - 8); rsize += asf->packet_replic_size; // FIXME - check validity } else if (asf->packet_replic_size==1){ // multipacket - frag_offset is beginning timestamp asf->packet_time_start = asf->packet_frag_offset; asf->packet_frag_offset = 0; asf->packet_frag_timestamp = asf->packet_timestamp; asf->packet_time_delta = avio_r8(pb); rsize++; }else if(asf->packet_replic_size!=0){ av_log(s, AV_LOG_ERROR, \"unexpected packet_replic_size of %d\\n\", asf->packet_replic_size); if (asf->packet_flags & 0x01) { DO_2BITS(asf->packet_segsizetype >> 6, asf->packet_frag_size, 0); // 0 is illegal if(asf->packet_frag_size > asf->packet_size_left - rsize){ if (asf->packet_frag_size > asf->packet_size_left - rsize + asf->packet_padsize) { av_log(s, AV_LOG_ERROR, \"packet_frag_size is invalid (%d-%d)\\n\", asf->packet_size_left, rsize); } else { int diff = asf->packet_frag_size - (asf->packet_size_left - rsize); asf->packet_size_left += diff; asf->packet_padsize -= diff; //printf(\"Fragsize %d\\n\", asf->packet_frag_size); } else { asf->packet_frag_size = asf->packet_size_left - rsize; //printf(\"Using rest %d %d %d\\n\", asf->packet_frag_size, asf->packet_size_left, rsize); if (asf->packet_replic_size == 1) { asf->packet_multi_size = asf->packet_frag_size; if (asf->packet_multi_size > asf->packet_size_left) asf->packet_size_left -= rsize; //printf(\"___objsize____ %d %d rs:%d\\n\", asf->packet_obj_size, asf->packet_frag_offset, rsize); return 0;"} {"target": 1, "idx": 6814, "func": "static void mp_decode_line(MotionPixelsContext *mp, GetBitContext *gb, int y) { YuvPixel p; const int y0 = y * mp->avctx->width; int w, i, x = 0; p = mp->vpt[y]; if (mp->changes_map[y0 + x] == 0) { memset(mp->gradient_scale, 1, sizeof(mp->gradient_scale)); ++x; } while (x < mp->avctx->width) { w = mp->changes_map[y0 + x]; if (w != 0) { if ((y & 3) == 0) { if (mp->changes_map[y0 + x + mp->avctx->width] < w || mp->changes_map[y0 + x + mp->avctx->width * 2] < w || mp->changes_map[y0 + x + mp->avctx->width * 3] < w) { for (i = (x + 3) & ~3; i < x + w; i += 4) { mp->hpt[((y / 4) * mp->avctx->width + i) / 4] = mp_get_yuv_from_rgb(mp, i, y); } } } x += w; memset(mp->gradient_scale, 1, sizeof(mp->gradient_scale)); p = mp_get_yuv_from_rgb(mp, x - 1, y); } else { p.y += mp_gradient(mp, 0, mp_get_vlc(mp, gb)); if ((x & 3) == 0) { if ((y & 3) == 0) { p.v += mp_gradient(mp, 1, mp_get_vlc(mp, gb)); p.u += mp_gradient(mp, 2, mp_get_vlc(mp, gb)); mp->hpt[((y / 4) * mp->avctx->width + x) / 4] = p; } else { p.v = mp->hpt[((y / 4) * mp->avctx->width + x) / 4].v; p.u = mp->hpt[((y / 4) * mp->avctx->width + x) / 4].u; } } mp_set_rgb_from_yuv(mp, x, y, &p); ++x; } } }"} {"target": 1, "idx": 6815, "func": "int fw_cfg_add_i64(FWCfgState *s, uint16_t key, uint64_t value) { uint64_t *copy; copy = g_malloc(sizeof(value)); *copy = cpu_to_le64(value); return fw_cfg_add_bytes(s, key, (uint8_t *)copy, sizeof(value)); }"} {"target": 1, "idx": 6818, "func": "void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode) { const char *codec_type; const char *codec_name; const char *profile = NULL; const AVCodec *p; int64_t bitrate; int new_line = 0; AVRational display_aspect_ratio; const char *separator = enc->dump_separator ? (const char *)enc->dump_separator : \", \"; if (!buf || buf_size <= 0) return; codec_type = av_get_media_type_string(enc->codec_type); codec_name = avcodec_get_name(enc->codec_id); if (enc->profile != FF_PROFILE_UNKNOWN) { if (enc->codec) p = enc->codec; else p = encode ? avcodec_find_encoder(enc->codec_id) : avcodec_find_decoder(enc->codec_id); if (p) profile = av_get_profile_name(p, enc->profile); } snprintf(buf, buf_size, \"%s: %s\", codec_type ? codec_type : \"unknown\", codec_name); buf[0] ^= 'a' ^ 'A'; /* first letter in uppercase */ if (enc->codec && strcmp(enc->codec->name, codec_name)) snprintf(buf + strlen(buf), buf_size - strlen(buf), \" (%s)\", enc->codec->name); if (profile) snprintf(buf + strlen(buf), buf_size - strlen(buf), \" (%s)\", profile); if ( enc->codec_type == AVMEDIA_TYPE_VIDEO && av_log_get_level() >= AV_LOG_VERBOSE && enc->refs) snprintf(buf + strlen(buf), buf_size - strlen(buf), \", %d reference frame%s\", enc->refs, enc->refs > 1 ? \"s\" : \"\"); if (enc->codec_tag) { char tag_buf[32]; av_get_codec_tag_string(tag_buf, sizeof(tag_buf), enc->codec_tag); snprintf(buf + strlen(buf), buf_size - strlen(buf), \" (%s / 0x%04X)\", tag_buf, enc->codec_tag); } switch (enc->codec_type) { case AVMEDIA_TYPE_VIDEO: { char detail[256] = \"(\"; av_strlcat(buf, separator, buf_size); snprintf(buf + strlen(buf), buf_size - strlen(buf), \"%s\", enc->pix_fmt == AV_PIX_FMT_NONE ? \"none\" : av_get_pix_fmt_name(enc->pix_fmt)); if (enc->bits_per_raw_sample && enc->pix_fmt != AV_PIX_FMT_NONE && enc->bits_per_raw_sample < av_pix_fmt_desc_get(enc->pix_fmt)->comp[0].depth) av_strlcatf(detail, sizeof(detail), \"%d bpc, \", enc->bits_per_raw_sample); if (enc->color_range != AVCOL_RANGE_UNSPECIFIED) av_strlcatf(detail, sizeof(detail), \"%s, \", av_color_range_name(enc->color_range)); if (enc->colorspace != AVCOL_SPC_UNSPECIFIED || enc->color_primaries != AVCOL_PRI_UNSPECIFIED || enc->color_trc != AVCOL_TRC_UNSPECIFIED) { if (enc->colorspace != (int)enc->color_primaries || enc->colorspace != (int)enc->color_trc) { new_line = 1; av_strlcatf(detail, sizeof(detail), \"%s/%s/%s, \", av_color_space_name(enc->colorspace), av_color_primaries_name(enc->color_primaries), av_color_transfer_name(enc->color_trc)); } else av_strlcatf(detail, sizeof(detail), \"%s, \", av_get_colorspace_name(enc->colorspace)); } if (av_log_get_level() >= AV_LOG_DEBUG && enc->chroma_sample_location != AVCHROMA_LOC_UNSPECIFIED) av_strlcatf(detail, sizeof(detail), \"%s, \", av_chroma_location_name(enc->chroma_sample_location)); if (strlen(detail) > 1) { detail[strlen(detail) - 2] = 0; av_strlcatf(buf, buf_size, \"%s)\", detail); } } if (enc->width) { av_strlcat(buf, new_line ? separator : \", \", buf_size); snprintf(buf + strlen(buf), buf_size - strlen(buf), \"%dx%d\", enc->width, enc->height); if (av_log_get_level() >= AV_LOG_VERBOSE && (enc->width != enc->coded_width || enc->height != enc->coded_height)) snprintf(buf + strlen(buf), buf_size - strlen(buf), \" (%dx%d)\", enc->coded_width, enc->coded_height); if (enc->sample_aspect_ratio.num) { av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, enc->width * enc->sample_aspect_ratio.num, enc->height * enc->sample_aspect_ratio.den, 1024 * 1024); snprintf(buf + strlen(buf), buf_size - strlen(buf), \" [SAR %d:%d DAR %d:%d]\", enc->sample_aspect_ratio.num, enc->sample_aspect_ratio.den, display_aspect_ratio.num, display_aspect_ratio.den); } if (av_log_get_level() >= AV_LOG_DEBUG) { int g = av_gcd(enc->time_base.num, enc->time_base.den); snprintf(buf + strlen(buf), buf_size - strlen(buf), \", %d/%d\", enc->time_base.num / g, enc->time_base.den / g); } } if (encode) { snprintf(buf + strlen(buf), buf_size - strlen(buf), \", q=%d-%d\", enc->qmin, enc->qmax); } else { if (enc->properties & FF_CODEC_PROPERTY_CLOSED_CAPTIONS) snprintf(buf + strlen(buf), buf_size - strlen(buf), \", Closed Captions\"); if (enc->properties & FF_CODEC_PROPERTY_LOSSLESS) snprintf(buf + strlen(buf), buf_size - strlen(buf), \", lossless\"); } break; case AVMEDIA_TYPE_AUDIO: av_strlcat(buf, separator, buf_size); if (enc->sample_rate) { snprintf(buf + strlen(buf), buf_size - strlen(buf), \"%d Hz, \", enc->sample_rate); } av_get_channel_layout_string(buf + strlen(buf), buf_size - strlen(buf), enc->channels, enc->channel_layout); if (enc->sample_fmt != AV_SAMPLE_FMT_NONE) { snprintf(buf + strlen(buf), buf_size - strlen(buf), \", %s\", av_get_sample_fmt_name(enc->sample_fmt)); } if ( enc->bits_per_raw_sample > 0 && enc->bits_per_raw_sample != av_get_bytes_per_sample(enc->sample_fmt) * 8) snprintf(buf + strlen(buf), buf_size - strlen(buf), \" (%d bit)\", enc->bits_per_raw_sample); break; case AVMEDIA_TYPE_DATA: if (av_log_get_level() >= AV_LOG_DEBUG) { int g = av_gcd(enc->time_base.num, enc->time_base.den); if (g) snprintf(buf + strlen(buf), buf_size - strlen(buf), \", %d/%d\", enc->time_base.num / g, enc->time_base.den / g); } break; case AVMEDIA_TYPE_SUBTITLE: if (enc->width) snprintf(buf + strlen(buf), buf_size - strlen(buf), \", %dx%d\", enc->width, enc->height); break; default: return; } if (encode) { if (enc->flags & AV_CODEC_FLAG_PASS1) snprintf(buf + strlen(buf), buf_size - strlen(buf), \", pass 1\"); if (enc->flags & AV_CODEC_FLAG_PASS2) snprintf(buf + strlen(buf), buf_size - strlen(buf), \", pass 2\"); } bitrate = get_bit_rate(enc); if (bitrate != 0) { snprintf(buf + strlen(buf), buf_size - strlen(buf), \", %\"PRId64\" kb/s\", bitrate / 1000); } else if (enc->rc_max_rate > 0) { snprintf(buf + strlen(buf), buf_size - strlen(buf), \", max. %\"PRId64\" kb/s\", (int64_t)enc->rc_max_rate / 1000); } }"} {"target": 0, "idx": 6826, "func": "static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeCmd *cmd) { NvmeDeleteQ *c = (NvmeDeleteQ *)cmd; NvmeRequest *req, *next; NvmeSQueue *sq; NvmeCQueue *cq; uint16_t qid = le16_to_cpu(c->qid); if (!qid || nvme_check_sqid(n, qid)) { return NVME_INVALID_QID | NVME_DNR; } sq = n->sq[qid]; while (!QTAILQ_EMPTY(&sq->out_req_list)) { req = QTAILQ_FIRST(&sq->out_req_list); assert(req->aiocb); bdrv_aio_cancel(req->aiocb); } if (!nvme_check_cqid(n, sq->cqid)) { cq = n->cq[sq->cqid]; QTAILQ_REMOVE(&cq->sq_list, sq, entry); nvme_post_cqes(cq); QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) { if (req->sq == sq) { QTAILQ_REMOVE(&cq->req_list, req, entry); QTAILQ_INSERT_TAIL(&sq->req_list, req, entry); } } } nvme_free_sq(sq, n); return NVME_SUCCESS; }"} {"target": 0, "idx": 6833, "func": "static void qmp_query_auth(VncDisplay *vd, VncInfo2 *info) { switch (vd->auth) { case VNC_AUTH_VNC: info->auth = VNC_PRIMARY_AUTH_VNC; break; case VNC_AUTH_RA2: info->auth = VNC_PRIMARY_AUTH_RA2; break; case VNC_AUTH_RA2NE: info->auth = VNC_PRIMARY_AUTH_RA2NE; break; case VNC_AUTH_TIGHT: info->auth = VNC_PRIMARY_AUTH_TIGHT; break; case VNC_AUTH_ULTRA: info->auth = VNC_PRIMARY_AUTH_ULTRA; break; case VNC_AUTH_TLS: info->auth = VNC_PRIMARY_AUTH_TLS; break; case VNC_AUTH_VENCRYPT: info->auth = VNC_PRIMARY_AUTH_VENCRYPT; info->has_vencrypt = true; switch (vd->subauth) { case VNC_AUTH_VENCRYPT_PLAIN: info->vencrypt = VNC_VENCRYPT_SUB_AUTH_PLAIN; break; case VNC_AUTH_VENCRYPT_TLSNONE: info->vencrypt = VNC_VENCRYPT_SUB_AUTH_TLS_NONE; break; case VNC_AUTH_VENCRYPT_TLSVNC: info->vencrypt = VNC_VENCRYPT_SUB_AUTH_TLS_VNC; break; case VNC_AUTH_VENCRYPT_TLSPLAIN: info->vencrypt = VNC_VENCRYPT_SUB_AUTH_TLS_PLAIN; break; case VNC_AUTH_VENCRYPT_X509NONE: info->vencrypt = VNC_VENCRYPT_SUB_AUTH_X509_NONE; break; case VNC_AUTH_VENCRYPT_X509VNC: info->vencrypt = VNC_VENCRYPT_SUB_AUTH_X509_VNC; break; case VNC_AUTH_VENCRYPT_X509PLAIN: info->vencrypt = VNC_VENCRYPT_SUB_AUTH_X509_PLAIN; break; case VNC_AUTH_VENCRYPT_TLSSASL: info->vencrypt = VNC_VENCRYPT_SUB_AUTH_TLS_SASL; break; case VNC_AUTH_VENCRYPT_X509SASL: info->vencrypt = VNC_VENCRYPT_SUB_AUTH_X509_SASL; break; default: info->has_vencrypt = false; break; } break; case VNC_AUTH_SASL: info->auth = VNC_PRIMARY_AUTH_SASL; break; case VNC_AUTH_NONE: default: info->auth = VNC_PRIMARY_AUTH_NONE; break; } }"} {"target": 0, "idx": 6835, "func": "static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext) { TCGv_i32 r_src1, r_src2; TCGv_i64 r_temp, r_temp2; r_src1 = tcg_temp_new_i32(); r_src2 = tcg_temp_new_i32(); tcg_gen_trunc_tl_i32(r_src1, src1); tcg_gen_trunc_tl_i32(r_src2, src2); r_temp = tcg_temp_new_i64(); r_temp2 = tcg_temp_new_i64(); if (sign_ext) { tcg_gen_ext_i32_i64(r_temp, r_src2); tcg_gen_ext_i32_i64(r_temp2, r_src1); } else { tcg_gen_extu_i32_i64(r_temp, r_src2); tcg_gen_extu_i32_i64(r_temp2, r_src1); } tcg_gen_mul_i64(r_temp2, r_temp, r_temp2); tcg_gen_shri_i64(r_temp, r_temp2, 32); tcg_gen_trunc_i64_tl(cpu_tmp0, r_temp); tcg_temp_free_i64(r_temp); tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff); tcg_gen_trunc_i64_tl(dst, r_temp2); tcg_temp_free_i64(r_temp2); tcg_temp_free_i32(r_src1); tcg_temp_free_i32(r_src2); }"} {"target": 0, "idx": 6839, "func": "AioContext *aio_context_new(void) { return (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext)); }"} {"target": 0, "idx": 6856, "func": "static int check_refcounts_l1(BlockDriverState *bs, BdrvCheckResult *res, uint16_t *refcount_table, int refcount_table_size, int64_t l1_table_offset, int l1_size, int flags) { BDRVQcowState *s = bs->opaque; uint64_t *l1_table, l2_offset, l1_size2; int i, ret; l1_size2 = l1_size * sizeof(uint64_t); /* Mark L1 table as used */ inc_refcounts(bs, res, refcount_table, refcount_table_size, l1_table_offset, l1_size2); /* Read L1 table entries from disk */ if (l1_size2 == 0) { l1_table = NULL; } else { l1_table = g_try_malloc(l1_size2); if (l1_table == NULL) { ret = -ENOMEM; goto fail; } if (bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2) != l1_size2) goto fail; for(i = 0;i < l1_size; i++) be64_to_cpus(&l1_table[i]); } /* Do the actual checks */ for(i = 0; i < l1_size; i++) { l2_offset = l1_table[i]; if (l2_offset) { /* Mark L2 table as used */ l2_offset &= L1E_OFFSET_MASK; inc_refcounts(bs, res, refcount_table, refcount_table_size, l2_offset, s->cluster_size); /* L2 tables are cluster aligned */ if (offset_into_cluster(s, l2_offset)) { fprintf(stderr, \"ERROR l2_offset=%\" PRIx64 \": Table is not \" \"cluster aligned; L1 entry corrupted\\n\", l2_offset); res->corruptions++; } /* Process and check L2 entries */ ret = check_refcounts_l2(bs, res, refcount_table, refcount_table_size, l2_offset, flags); if (ret < 0) { goto fail; } } } g_free(l1_table); return 0; fail: fprintf(stderr, \"ERROR: I/O error in check_refcounts_l1\\n\"); res->check_errors++; g_free(l1_table); return -EIO; }"} {"target": 0, "idx": 6864, "func": "void ppce500_init(QEMUMachineInitArgs *args, PPCE500Params *params) { MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *ram = g_new(MemoryRegion, 1); PCIBus *pci_bus; CPUPPCState *env = NULL; uint64_t elf_entry; uint64_t elf_lowaddr; hwaddr entry=0; hwaddr loadaddr=UIMAGE_LOAD_BASE; target_long kernel_size=0; target_ulong dt_base = 0; target_ulong initrd_base = 0; target_long initrd_size = 0; target_ulong cur_base = 0; int i; unsigned int pci_irq_nrs[4] = {1, 2, 3, 4}; qemu_irq **irqs, *mpic; DeviceState *dev; CPUPPCState *firstenv = NULL; MemoryRegion *ccsr_addr_space; SysBusDevice *s; PPCE500CCSRState *ccsr; /* Setup CPUs */ if (args->cpu_model == NULL) { args->cpu_model = \"e500v2_v30\"; } irqs = g_malloc0(smp_cpus * sizeof(qemu_irq *)); irqs[0] = g_malloc0(smp_cpus * sizeof(qemu_irq) * OPENPIC_OUTPUT_NB); for (i = 0; i < smp_cpus; i++) { PowerPCCPU *cpu; CPUState *cs; qemu_irq *input; cpu = cpu_ppc_init(args->cpu_model); if (cpu == NULL) { fprintf(stderr, \"Unable to initialize CPU!\\n\"); exit(1); } env = &cpu->env; cs = CPU(cpu); if (!firstenv) { firstenv = env; } irqs[i] = irqs[0] + (i * OPENPIC_OUTPUT_NB); input = (qemu_irq *)env->irq_inputs; irqs[i][OPENPIC_OUTPUT_INT] = input[PPCE500_INPUT_INT]; irqs[i][OPENPIC_OUTPUT_CINT] = input[PPCE500_INPUT_CINT]; env->spr[SPR_BOOKE_PIR] = cs->cpu_index = i; env->mpic_iack = MPC8544_CCSRBAR_BASE + MPC8544_MPIC_REGS_OFFSET + 0xa0; ppc_booke_timers_init(cpu, 400000000, PPC_TIMER_E500); /* Register reset handler */ if (!i) { /* Primary CPU */ struct boot_info *boot_info; boot_info = g_malloc0(sizeof(struct boot_info)); qemu_register_reset(ppce500_cpu_reset, cpu); env->load_info = boot_info; } else { /* Secondary CPUs */ qemu_register_reset(ppce500_cpu_reset_sec, cpu); } } env = firstenv; /* Fixup Memory size on a alignment boundary */ ram_size &= ~(RAM_SIZES_ALIGN - 1); args->ram_size = ram_size; /* Register Memory */ memory_region_init_ram(ram, NULL, \"mpc8544ds.ram\", ram_size); vmstate_register_ram_global(ram); memory_region_add_subregion(address_space_mem, 0, ram); dev = qdev_create(NULL, \"e500-ccsr\"); object_property_add_child(qdev_get_machine(), \"e500-ccsr\", OBJECT(dev), NULL); qdev_init_nofail(dev); ccsr = CCSR(dev); ccsr_addr_space = &ccsr->ccsr_space; memory_region_add_subregion(address_space_mem, MPC8544_CCSRBAR_BASE, ccsr_addr_space); mpic = ppce500_init_mpic(params, ccsr_addr_space, irqs); /* Serial */ if (serial_hds[0]) { serial_mm_init(ccsr_addr_space, MPC8544_SERIAL0_REGS_OFFSET, 0, mpic[42], 399193, serial_hds[0], DEVICE_BIG_ENDIAN); } if (serial_hds[1]) { serial_mm_init(ccsr_addr_space, MPC8544_SERIAL1_REGS_OFFSET, 0, mpic[42], 399193, serial_hds[1], DEVICE_BIG_ENDIAN); } /* General Utility device */ dev = qdev_create(NULL, \"mpc8544-guts\"); qdev_init_nofail(dev); s = SYS_BUS_DEVICE(dev); memory_region_add_subregion(ccsr_addr_space, MPC8544_UTIL_OFFSET, sysbus_mmio_get_region(s, 0)); /* PCI */ dev = qdev_create(NULL, \"e500-pcihost\"); qdev_prop_set_uint32(dev, \"first_slot\", params->pci_first_slot); qdev_init_nofail(dev); s = SYS_BUS_DEVICE(dev); sysbus_connect_irq(s, 0, mpic[pci_irq_nrs[0]]); sysbus_connect_irq(s, 1, mpic[pci_irq_nrs[1]]); sysbus_connect_irq(s, 2, mpic[pci_irq_nrs[2]]); sysbus_connect_irq(s, 3, mpic[pci_irq_nrs[3]]); memory_region_add_subregion(ccsr_addr_space, MPC8544_PCI_REGS_OFFSET, sysbus_mmio_get_region(s, 0)); pci_bus = (PCIBus *)qdev_get_child_bus(dev, \"pci.0\"); if (!pci_bus) printf(\"couldn't create PCI controller!\\n\"); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 1, MPC8544_PCI_IO); if (pci_bus) { /* Register network interfaces. */ for (i = 0; i < nb_nics; i++) { pci_nic_init_nofail(&nd_table[i], pci_bus, \"virtio\", NULL); } } /* Register spinning region */ sysbus_create_simple(\"e500-spin\", MPC8544_SPIN_BASE, NULL); /* Load kernel. */ if (args->kernel_filename) { kernel_size = load_uimage(args->kernel_filename, &entry, &loadaddr, NULL); if (kernel_size < 0) { kernel_size = load_elf(args->kernel_filename, NULL, NULL, &elf_entry, &elf_lowaddr, NULL, 1, ELF_MACHINE, 0); entry = elf_entry; loadaddr = elf_lowaddr; } /* XXX try again as binary */ if (kernel_size < 0) { fprintf(stderr, \"qemu: could not load kernel '%s'\\n\", args->kernel_filename); exit(1); } cur_base = loadaddr + kernel_size; /* Reserve space for dtb */ dt_base = (cur_base + DTC_LOAD_PAD) & ~DTC_PAD_MASK; cur_base += DTB_MAX_SIZE; } /* Load initrd. */ if (args->initrd_filename) { initrd_base = (cur_base + INITRD_LOAD_PAD) & ~INITRD_PAD_MASK; initrd_size = load_image_targphys(args->initrd_filename, initrd_base, ram_size - initrd_base); if (initrd_size < 0) { fprintf(stderr, \"qemu: could not load initial ram disk '%s'\\n\", args->initrd_filename); exit(1); } cur_base = initrd_base + initrd_size; } /* If we're loading a kernel directly, we must load the device tree too. */ if (args->kernel_filename) { struct boot_info *boot_info; int dt_size; dt_size = ppce500_load_device_tree(env, args, params, dt_base, initrd_base, initrd_size); if (dt_size < 0) { fprintf(stderr, \"couldn't load device tree\\n\"); exit(1); } assert(dt_size < DTB_MAX_SIZE); boot_info = env->load_info; boot_info->entry = entry; boot_info->dt_base = dt_base; boot_info->dt_size = dt_size; } if (kvm_enabled()) { kvmppc_init(); } }"} {"target": 0, "idx": 6866, "func": "static int write_f(BlockBackend *blk, int argc, char **argv) { struct timeval t1, t2; int Cflag = 0, pflag = 0, qflag = 0, bflag = 0, Pflag = 0, zflag = 0; int cflag = 0; int c, cnt; char *buf = NULL; int64_t offset; int count; /* Some compilers get confused and warn if this is not initialized. */ int total = 0; int pattern = 0xcd; while ((c = getopt(argc, argv, \"bcCpP:qz\")) != EOF) { switch (c) { case 'b': bflag = 1; break; case 'c': cflag = 1; break; case 'C': Cflag = 1; break; case 'p': pflag = 1; break; case 'P': Pflag = 1; pattern = parse_pattern(optarg); if (pattern < 0) { return 0; } break; case 'q': qflag = 1; break; case 'z': zflag = 1; break; default: return qemuio_command_usage(&write_cmd); } } if (optind != argc - 2) { return qemuio_command_usage(&write_cmd); } if (bflag + pflag + zflag > 1) { printf(\"-b, -p, or -z cannot be specified at the same time\\n\"); return 0; } if (zflag && Pflag) { printf(\"-z and -P cannot be specified at the same time\\n\"); return 0; } offset = cvtnum(argv[optind]); if (offset < 0) { printf(\"non-numeric length argument -- %s\\n\", argv[optind]); return 0; } optind++; count = cvtnum(argv[optind]); if (count < 0) { printf(\"non-numeric length argument -- %s\\n\", argv[optind]); return 0; } if (!pflag) { if (offset & 0x1ff) { printf(\"offset %\" PRId64 \" is not sector aligned\\n\", offset); return 0; } if (count & 0x1ff) { printf(\"count %d is not sector aligned\\n\", count); return 0; } } if (!zflag) { buf = qemu_io_alloc(blk, count, pattern); } gettimeofday(&t1, NULL); if (pflag) { cnt = do_pwrite(blk, buf, offset, count, &total); } else if (bflag) { cnt = do_save_vmstate(blk, buf, offset, count, &total); } else if (zflag) { cnt = do_co_write_zeroes(blk, offset, count, &total); } else if (cflag) { cnt = do_write_compressed(blk, buf, offset, count, &total); } else { cnt = do_write(blk, buf, offset, count, &total); } gettimeofday(&t2, NULL); if (cnt < 0) { printf(\"write failed: %s\\n\", strerror(-cnt)); goto out; } if (qflag) { goto out; } /* Finally, report back -- -C gives a parsable format */ t2 = tsub(t2, t1); print_report(\"wrote\", &t2, offset, count, total, cnt, Cflag); out: if (!zflag) { qemu_io_free(buf); } return 0; }"} {"target": 1, "idx": 6886, "func": "static int lance_init(SysBusDevice *dev) { SysBusPCNetState *d = FROM_SYSBUS(SysBusPCNetState, dev); PCNetState *s = &d->state; memory_region_init_io(&s->mmio, &lance_mem_ops, s, \"lance-mmio\", 4); qdev_init_gpio_in(&dev->qdev, parent_lance_reset, 1); sysbus_init_mmio_region(dev, &s->mmio); sysbus_init_irq(dev, &s->irq); s->phys_mem_read = ledma_memory_read; s->phys_mem_write = ledma_memory_write; return pcnet_common_init(&dev->qdev, s, &net_lance_info); }"} {"target": 0, "idx": 6917, "func": "static int decode_slice_header(H264Context *h, H264Context *h0) { unsigned int first_mb_in_slice; unsigned int pps_id; int ret; unsigned int slice_type, tmp, i, j; int default_ref_list_done = 0; int last_pic_structure, last_pic_droppable; int needs_reinit = 0; int field_pic_flag, bottom_field_flag; h->me.qpel_put = h->h264qpel.put_h264_qpel_pixels_tab; h->me.qpel_avg = h->h264qpel.avg_h264_qpel_pixels_tab; first_mb_in_slice = get_ue_golomb(&h->gb); if (first_mb_in_slice == 0) { // FIXME better field boundary detection if (h0->current_slice && h->cur_pic_ptr && FIELD_PICTURE(h)) { field_end(h, 1); } h0->current_slice = 0; if (!h0->first_field) { if (h->cur_pic_ptr && !h->droppable) { ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, h->picture_structure == PICT_BOTTOM_FIELD); } h->cur_pic_ptr = NULL; } } slice_type = get_ue_golomb_31(&h->gb); if (slice_type > 9) { av_log(h->avctx, AV_LOG_ERROR, \"slice type %d too large at %d %d\\n\", slice_type, h->mb_x, h->mb_y); return AVERROR_INVALIDDATA; } if (slice_type > 4) { slice_type -= 5; h->slice_type_fixed = 1; } else h->slice_type_fixed = 0; slice_type = golomb_to_pict_type[slice_type]; if (slice_type == AV_PICTURE_TYPE_I || (h0->current_slice != 0 && slice_type == h0->last_slice_type)) { default_ref_list_done = 1; } h->slice_type = slice_type; h->slice_type_nos = slice_type & 3; if (h->nal_unit_type == NAL_IDR_SLICE && h->slice_type_nos != AV_PICTURE_TYPE_I) { av_log(h->avctx, AV_LOG_ERROR, \"A non-intra slice in an IDR NAL unit.\\n\"); return AVERROR_INVALIDDATA; } // to make a few old functions happy, it's wrong though h->pict_type = h->slice_type; pps_id = get_ue_golomb(&h->gb); if (pps_id >= MAX_PPS_COUNT) { av_log(h->avctx, AV_LOG_ERROR, \"pps_id %u out of range\\n\", pps_id); return AVERROR_INVALIDDATA; } if (!h0->pps_buffers[pps_id]) { av_log(h->avctx, AV_LOG_ERROR, \"non-existing PPS %u referenced\\n\", pps_id); return AVERROR_INVALIDDATA; } h->pps = *h0->pps_buffers[pps_id]; if (!h0->sps_buffers[h->pps.sps_id]) { av_log(h->avctx, AV_LOG_ERROR, \"non-existing SPS %u referenced\\n\", h->pps.sps_id); return AVERROR_INVALIDDATA; } if (h->pps.sps_id != h->sps.sps_id || h0->sps_buffers[h->pps.sps_id]->new) { h0->sps_buffers[h->pps.sps_id]->new = 0; h->sps = *h0->sps_buffers[h->pps.sps_id]; if (h->bit_depth_luma != h->sps.bit_depth_luma || h->chroma_format_idc != h->sps.chroma_format_idc) { h->bit_depth_luma = h->sps.bit_depth_luma; h->chroma_format_idc = h->sps.chroma_format_idc; needs_reinit = 1; } if ((ret = h264_set_parameter_from_sps(h)) < 0) return ret; } h->avctx->profile = ff_h264_get_profile(&h->sps); h->avctx->level = h->sps.level_idc; h->avctx->refs = h->sps.ref_frame_count; if (h->mb_width != h->sps.mb_width || h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag)) needs_reinit = 1; h->mb_width = h->sps.mb_width; h->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag); h->mb_num = h->mb_width * h->mb_height; h->mb_stride = h->mb_width + 1; h->b_stride = h->mb_width * 4; h->chroma_y_shift = h->sps.chroma_format_idc <= 1; // 400 uses yuv420p h->width = 16 * h->mb_width; h->height = 16 * h->mb_height; ret = init_dimensions(h); if (ret < 0) return ret; if (h->sps.video_signal_type_present_flag) { h->avctx->color_range = h->sps.full_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG; if (h->sps.colour_description_present_flag) { if (h->avctx->colorspace != h->sps.colorspace) needs_reinit = 1; h->avctx->color_primaries = h->sps.color_primaries; h->avctx->color_trc = h->sps.color_trc; h->avctx->colorspace = h->sps.colorspace; } } if (h->context_initialized && (h->width != h->avctx->coded_width || h->height != h->avctx->coded_height || needs_reinit)) { if (h != h0) { av_log(h->avctx, AV_LOG_ERROR, \"changing width %d -> %d / height %d -> %d on \" \"slice %d\\n\", h->width, h->avctx->coded_width, h->height, h->avctx->coded_height, h0->current_slice + 1); return AVERROR_INVALIDDATA; } flush_change(h); if ((ret = get_pixel_format(h)) < 0) return ret; h->avctx->pix_fmt = ret; av_log(h->avctx, AV_LOG_INFO, \"Reinit context to %dx%d, \" \"pix_fmt: %d\\n\", h->width, h->height, h->avctx->pix_fmt); if ((ret = h264_slice_header_init(h, 1)) < 0) { av_log(h->avctx, AV_LOG_ERROR, \"h264_slice_header_init() failed\\n\"); return ret; } } if (!h->context_initialized) { if (h != h0) { av_log(h->avctx, AV_LOG_ERROR, \"Cannot (re-)initialize context during parallel decoding.\\n\"); return AVERROR_PATCHWELCOME; } if ((ret = get_pixel_format(h)) < 0) return ret; h->avctx->pix_fmt = ret; if ((ret = h264_slice_header_init(h, 0)) < 0) { av_log(h->avctx, AV_LOG_ERROR, \"h264_slice_header_init() failed\\n\"); return ret; } } if (h == h0 && h->dequant_coeff_pps != pps_id) { h->dequant_coeff_pps = pps_id; init_dequant_tables(h); } h->frame_num = get_bits(&h->gb, h->sps.log2_max_frame_num); h->mb_mbaff = 0; h->mb_aff_frame = 0; last_pic_structure = h0->picture_structure; last_pic_droppable = h0->droppable; h->droppable = h->nal_ref_idc == 0; if (h->sps.frame_mbs_only_flag) { h->picture_structure = PICT_FRAME; } else { field_pic_flag = get_bits1(&h->gb); if (field_pic_flag) { bottom_field_flag = get_bits1(&h->gb); h->picture_structure = PICT_TOP_FIELD + bottom_field_flag; } else { h->picture_structure = PICT_FRAME; h->mb_aff_frame = h->sps.mb_aff; } } h->mb_field_decoding_flag = h->picture_structure != PICT_FRAME; if (h0->current_slice != 0) { if (last_pic_structure != h->picture_structure || last_pic_droppable != h->droppable) { av_log(h->avctx, AV_LOG_ERROR, \"Changing field mode (%d -> %d) between slices is not allowed\\n\", last_pic_structure, h->picture_structure); h->picture_structure = last_pic_structure; h->droppable = last_pic_droppable; return AVERROR_INVALIDDATA; } else if (!h0->cur_pic_ptr) { av_log(h->avctx, AV_LOG_ERROR, \"unset cur_pic_ptr on slice %d\\n\", h0->current_slice + 1); return AVERROR_INVALIDDATA; } } else { /* Shorten frame num gaps so we don't have to allocate reference * frames just to throw them away */ if (h->frame_num != h->prev_frame_num) { int unwrap_prev_frame_num = h->prev_frame_num; int max_frame_num = 1 << h->sps.log2_max_frame_num; if (unwrap_prev_frame_num > h->frame_num) unwrap_prev_frame_num -= max_frame_num; if ((h->frame_num - unwrap_prev_frame_num) > h->sps.ref_frame_count) { unwrap_prev_frame_num = (h->frame_num - h->sps.ref_frame_count) - 1; if (unwrap_prev_frame_num < 0) unwrap_prev_frame_num += max_frame_num; h->prev_frame_num = unwrap_prev_frame_num; } } /* See if we have a decoded first field looking for a pair... * Here, we're using that to see if we should mark previously * decode frames as \"finished\". * We have to do that before the \"dummy\" in-between frame allocation, * since that can modify s->current_picture_ptr. */ if (h0->first_field) { assert(h0->cur_pic_ptr); assert(h0->cur_pic_ptr->f.buf[0]); assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF); /* figure out if we have a complementary field pair */ if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) { /* Previous field is unmatched. Don't display it, but let it * remain for reference if marked as such. */ if (!last_pic_droppable && last_pic_structure != PICT_FRAME) { ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX, last_pic_structure == PICT_TOP_FIELD); } } else { if (h0->cur_pic_ptr->frame_num != h->frame_num) { /* This and previous field were reference, but had * different frame_nums. Consider this field first in * pair. Throw away previous field except for reference * purposes. */ if (!last_pic_droppable && last_pic_structure != PICT_FRAME) { ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX, last_pic_structure == PICT_TOP_FIELD); } } else { /* Second field in complementary pair */ if (!((last_pic_structure == PICT_TOP_FIELD && h->picture_structure == PICT_BOTTOM_FIELD) || (last_pic_structure == PICT_BOTTOM_FIELD && h->picture_structure == PICT_TOP_FIELD))) { av_log(h->avctx, AV_LOG_ERROR, \"Invalid field mode combination %d/%d\\n\", last_pic_structure, h->picture_structure); h->picture_structure = last_pic_structure; h->droppable = last_pic_droppable; return AVERROR_INVALIDDATA; } else if (last_pic_droppable != h->droppable) { avpriv_request_sample(h->avctx, \"Found reference and non-reference fields in the same frame, which\"); h->picture_structure = last_pic_structure; h->droppable = last_pic_droppable; return AVERROR_PATCHWELCOME; } } } } while (h->frame_num != h->prev_frame_num && h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) { Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL; av_log(h->avctx, AV_LOG_DEBUG, \"Frame num gap %d %d\\n\", h->frame_num, h->prev_frame_num); ret = h264_frame_start(h); if (ret < 0) { h0->first_field = 0; return ret; } h->prev_frame_num++; h->prev_frame_num %= 1 << h->sps.log2_max_frame_num; h->cur_pic_ptr->frame_num = h->prev_frame_num; ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0); ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1); ret = ff_generate_sliding_window_mmcos(h, 1); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) return ret; ret = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) return ret; /* Error concealment: If a ref is missing, copy the previous ref * in its place. * FIXME: Avoiding a memcpy would be nice, but ref handling makes * many assumptions about there being no actual duplicates. * FIXME: This does not copy padding for out-of-frame motion * vectors. Given we are concealing a lost frame, this probably * is not noticeable by comparison, but it should be fixed. */ if (h->short_ref_count) { if (prev) { av_image_copy(h->short_ref[0]->f.data, h->short_ref[0]->f.linesize, (const uint8_t **)prev->f.data, prev->f.linesize, h->avctx->pix_fmt, h->mb_width * 16, h->mb_height * 16); h->short_ref[0]->poc = prev->poc + 2; } h->short_ref[0]->frame_num = h->prev_frame_num; } } /* See if we have a decoded first field looking for a pair... * We're using that to see whether to continue decoding in that * frame, or to allocate a new one. */ if (h0->first_field) { assert(h0->cur_pic_ptr); assert(h0->cur_pic_ptr->f.buf[0]); assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF); /* figure out if we have a complementary field pair */ if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) { /* Previous field is unmatched. Don't display it, but let it * remain for reference if marked as such. */ h0->cur_pic_ptr = NULL; h0->first_field = FIELD_PICTURE(h); } else { if (h0->cur_pic_ptr->frame_num != h->frame_num) { /* This and the previous field had different frame_nums. * Consider this field first in pair. Throw away previous * one except for reference purposes. */ h0->first_field = 1; h0->cur_pic_ptr = NULL; } else { /* Second field in complementary pair */ h0->first_field = 0; } } } else { /* Frame or first field in a potentially complementary pair */ h0->first_field = FIELD_PICTURE(h); } if (!FIELD_PICTURE(h) || h0->first_field) { if (h264_frame_start(h) < 0) { h0->first_field = 0; return AVERROR_INVALIDDATA; } } else { release_unused_pictures(h, 0); } } if (h != h0 && (ret = clone_slice(h, h0)) < 0) return ret; h->cur_pic_ptr->frame_num = h->frame_num; // FIXME frame_num cleanup assert(h->mb_num == h->mb_width * h->mb_height); if (first_mb_in_slice << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num || first_mb_in_slice >= h->mb_num) { av_log(h->avctx, AV_LOG_ERROR, \"first_mb_in_slice overflow\\n\"); return AVERROR_INVALIDDATA; } h->resync_mb_x = h->mb_x = first_mb_in_slice % h->mb_width; h->resync_mb_y = h->mb_y = (first_mb_in_slice / h->mb_width) << FIELD_OR_MBAFF_PICTURE(h); if (h->picture_structure == PICT_BOTTOM_FIELD) h->resync_mb_y = h->mb_y = h->mb_y + 1; assert(h->mb_y < h->mb_height); if (h->picture_structure == PICT_FRAME) { h->curr_pic_num = h->frame_num; h->max_pic_num = 1 << h->sps.log2_max_frame_num; } else { h->curr_pic_num = 2 * h->frame_num + 1; h->max_pic_num = 1 << (h->sps.log2_max_frame_num + 1); } if (h->nal_unit_type == NAL_IDR_SLICE) get_ue_golomb(&h->gb); /* idr_pic_id */ if (h->sps.poc_type == 0) { h->poc_lsb = get_bits(&h->gb, h->sps.log2_max_poc_lsb); if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME) h->delta_poc_bottom = get_se_golomb(&h->gb); } if (h->sps.poc_type == 1 && !h->sps.delta_pic_order_always_zero_flag) { h->delta_poc[0] = get_se_golomb(&h->gb); if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME) h->delta_poc[1] = get_se_golomb(&h->gb); } ff_init_poc(h, h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc); if (h->pps.redundant_pic_cnt_present) h->redundant_pic_count = get_ue_golomb(&h->gb); ret = ff_set_ref_count(h); if (ret < 0) return ret; else if (ret == 1) default_ref_list_done = 0; if (!default_ref_list_done) ff_h264_fill_default_ref_list(h); if (h->slice_type_nos != AV_PICTURE_TYPE_I) { ret = ff_h264_decode_ref_pic_list_reordering(h); if (ret < 0) { h->ref_count[1] = h->ref_count[0] = 0; return ret; } } if ((h->pps.weighted_pred && h->slice_type_nos == AV_PICTURE_TYPE_P) || (h->pps.weighted_bipred_idc == 1 && h->slice_type_nos == AV_PICTURE_TYPE_B)) ff_pred_weight_table(h); else if (h->pps.weighted_bipred_idc == 2 && h->slice_type_nos == AV_PICTURE_TYPE_B) { implicit_weight_table(h, -1); } else { h->use_weight = 0; for (i = 0; i < 2; i++) { h->luma_weight_flag[i] = 0; h->chroma_weight_flag[i] = 0; } } // If frame-mt is enabled, only update mmco tables for the first slice // in a field. Subsequent slices can temporarily clobber h->mmco_index // or h->mmco, which will cause ref list mix-ups and decoding errors // further down the line. This may break decoding if the first slice is // corrupt, thus we only do this if frame-mt is enabled. if (h->nal_ref_idc) { ret = ff_h264_decode_ref_pic_marking(h0, &h->gb, !(h->avctx->active_thread_type & FF_THREAD_FRAME) || h0->current_slice == 0); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) return AVERROR_INVALIDDATA; } if (FRAME_MBAFF(h)) { ff_h264_fill_mbaff_ref_list(h); if (h->pps.weighted_bipred_idc == 2 && h->slice_type_nos == AV_PICTURE_TYPE_B) { implicit_weight_table(h, 0); implicit_weight_table(h, 1); } } if (h->slice_type_nos == AV_PICTURE_TYPE_B && !h->direct_spatial_mv_pred) ff_h264_direct_dist_scale_factor(h); ff_h264_direct_ref_list_init(h); if (h->slice_type_nos != AV_PICTURE_TYPE_I && h->pps.cabac) { tmp = get_ue_golomb_31(&h->gb); if (tmp > 2) { av_log(h->avctx, AV_LOG_ERROR, \"cabac_init_idc %u overflow\\n\", tmp); return AVERROR_INVALIDDATA; } h->cabac_init_idc = tmp; } h->last_qscale_diff = 0; tmp = h->pps.init_qp + get_se_golomb(&h->gb); if (tmp > 51 + 6 * (h->sps.bit_depth_luma - 8)) { av_log(h->avctx, AV_LOG_ERROR, \"QP %u out of range\\n\", tmp); return AVERROR_INVALIDDATA; } h->qscale = tmp; h->chroma_qp[0] = get_chroma_qp(h, 0, h->qscale); h->chroma_qp[1] = get_chroma_qp(h, 1, h->qscale); // FIXME qscale / qp ... stuff if (h->slice_type == AV_PICTURE_TYPE_SP) get_bits1(&h->gb); /* sp_for_switch_flag */ if (h->slice_type == AV_PICTURE_TYPE_SP || h->slice_type == AV_PICTURE_TYPE_SI) get_se_golomb(&h->gb); /* slice_qs_delta */ h->deblocking_filter = 1; h->slice_alpha_c0_offset = 52; h->slice_beta_offset = 52; if (h->pps.deblocking_filter_parameters_present) { tmp = get_ue_golomb_31(&h->gb); if (tmp > 2) { av_log(h->avctx, AV_LOG_ERROR, \"deblocking_filter_idc %u out of range\\n\", tmp); return AVERROR_INVALIDDATA; } h->deblocking_filter = tmp; if (h->deblocking_filter < 2) h->deblocking_filter ^= 1; // 1<->0 if (h->deblocking_filter) { h->slice_alpha_c0_offset += get_se_golomb(&h->gb) << 1; h->slice_beta_offset += get_se_golomb(&h->gb) << 1; if (h->slice_alpha_c0_offset > 104U || h->slice_beta_offset > 104U) { av_log(h->avctx, AV_LOG_ERROR, \"deblocking filter parameters %d %d out of range\\n\", h->slice_alpha_c0_offset, h->slice_beta_offset); return AVERROR_INVALIDDATA; } } } if (h->avctx->skip_loop_filter >= AVDISCARD_ALL || (h->avctx->skip_loop_filter >= AVDISCARD_NONKEY && h->slice_type_nos != AV_PICTURE_TYPE_I) || (h->avctx->skip_loop_filter >= AVDISCARD_BIDIR && h->slice_type_nos == AV_PICTURE_TYPE_B) || (h->avctx->skip_loop_filter >= AVDISCARD_NONREF && h->nal_ref_idc == 0)) h->deblocking_filter = 0; if (h->deblocking_filter == 1 && h0->max_contexts > 1) { if (h->avctx->flags2 & CODEC_FLAG2_FAST) { /* Cheat slightly for speed: * Do not bother to deblock across slices. */ h->deblocking_filter = 2; } else { h0->max_contexts = 1; if (!h0->single_decode_warning) { av_log(h->avctx, AV_LOG_INFO, \"Cannot parallelize deblocking type 1, decoding such frames in sequential order\\n\"); h0->single_decode_warning = 1; } if (h != h0) { av_log(h->avctx, AV_LOG_ERROR, \"Deblocking switched inside frame.\\n\"); return 1; } } } h->qp_thresh = 15 + 52 - FFMIN(h->slice_alpha_c0_offset, h->slice_beta_offset) - FFMAX3(0, h->pps.chroma_qp_index_offset[0], h->pps.chroma_qp_index_offset[1]) + 6 * (h->sps.bit_depth_luma - 8); h0->last_slice_type = slice_type; h->slice_num = ++h0->current_slice; if (h->slice_num >= MAX_SLICES) { av_log(h->avctx, AV_LOG_ERROR, \"Too many slices, increase MAX_SLICES and recompile\\n\"); } for (j = 0; j < 2; j++) { int id_list[16]; int *ref2frm = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][j]; for (i = 0; i < 16; i++) { id_list[i] = 60; if (j < h->list_count && i < h->ref_count[j] && h->ref_list[j][i].f.buf[0]) { int k; AVBuffer *buf = h->ref_list[j][i].f.buf[0]->buffer; for (k = 0; k < h->short_ref_count; k++) if (h->short_ref[k]->f.buf[0]->buffer == buf) { id_list[i] = k; break; } for (k = 0; k < h->long_ref_count; k++) if (h->long_ref[k] && h->long_ref[k]->f.buf[0]->buffer == buf) { id_list[i] = h->short_ref_count + k; break; } } } ref2frm[0] = ref2frm[1] = -1; for (i = 0; i < 16; i++) ref2frm[i + 2] = 4 * id_list[i] + (h->ref_list[j][i].reference & 3); ref2frm[18 + 0] = ref2frm[18 + 1] = -1; for (i = 16; i < 48; i++) ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] + (h->ref_list[j][i].reference & 3); } if (h->avctx->debug & FF_DEBUG_PICT_INFO) { av_log(h->avctx, AV_LOG_DEBUG, \"slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\\n\", h->slice_num, (h->picture_structure == PICT_FRAME ? \"F\" : h->picture_structure == PICT_TOP_FIELD ? \"T\" : \"B\"), first_mb_in_slice, av_get_picture_type_char(h->slice_type), h->slice_type_fixed ? \" fix\" : \"\", h->nal_unit_type == NAL_IDR_SLICE ? \" IDR\" : \"\", pps_id, h->frame_num, h->cur_pic_ptr->field_poc[0], h->cur_pic_ptr->field_poc[1], h->ref_count[0], h->ref_count[1], h->qscale, h->deblocking_filter, h->slice_alpha_c0_offset / 2 - 26, h->slice_beta_offset / 2 - 26, h->use_weight, h->use_weight == 1 && h->use_weight_chroma ? \"c\" : \"\", h->slice_type == AV_PICTURE_TYPE_B ? (h->direct_spatial_mv_pred ? \"SPAT\" : \"TEMP\") : \"\"); } return 0; }"} {"target": 0, "idx": 6921, "func": "void bdrv_drained_begin(BlockDriverState *bs) { if (!bs->quiesce_counter++) { aio_disable_external(bdrv_get_aio_context(bs)); bdrv_parent_drained_begin(bs); } bdrv_io_unplugged_begin(bs); bdrv_drain_recurse(bs); if (qemu_in_coroutine()) { bdrv_co_yield_to_drain(bs); } else { bdrv_drain_poll(bs); } bdrv_io_unplugged_end(bs); }"} {"target": 0, "idx": 6931, "func": "static ssize_t block_crypto_init_func(QCryptoBlock *block, void *opaque, size_t headerlen, Error **errp) { struct BlockCryptoCreateData *data = opaque; int ret; /* User provided size should reflect amount of space made * available to the guest, so we must take account of that * which will be used by the crypto header */ data->size += headerlen; qemu_opt_set_number(data->opts, BLOCK_OPT_SIZE, data->size, &error_abort); ret = bdrv_create_file(data->filename, data->opts, errp); if (ret < 0) { return -1; } data->blk = blk_new_open(data->filename, NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, errp); if (!data->blk) { return -1; } return 0; }"} {"target": 0, "idx": 6933, "func": "ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size) { RAMBlock *new_block, *block; size = TARGET_PAGE_ALIGN(size); new_block = qemu_mallocz(sizeof(*new_block)); if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) { char *id = dev->parent_bus->info->get_dev_path(dev); if (id) { snprintf(new_block->idstr, sizeof(new_block->idstr), \"%s/\", id); qemu_free(id); } } pstrcat(new_block->idstr, sizeof(new_block->idstr), name); QLIST_FOREACH(block, &ram_list.blocks, next) { if (!strcmp(block->idstr, new_block->idstr)) { fprintf(stderr, \"RAMBlock \\\"%s\\\" already registered, abort!\\n\", new_block->idstr); abort(); } } if (mem_path) { #if defined (__linux__) && !defined(TARGET_S390X) new_block->host = file_ram_alloc(new_block, size, mem_path); if (!new_block->host) { new_block->host = qemu_vmalloc(size); #ifdef MADV_MERGEABLE madvise(new_block->host, size, MADV_MERGEABLE); #endif } #else fprintf(stderr, \"-mem-path option unsupported\\n\"); exit(1); #endif } else { #if defined(TARGET_S390X) && defined(CONFIG_KVM) /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */ new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); #else new_block->host = qemu_vmalloc(size); #endif #ifdef MADV_MERGEABLE madvise(new_block->host, size, MADV_MERGEABLE); #endif } new_block->offset = find_ram_offset(size); new_block->length = size; QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next); ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty, last_ram_offset() >> TARGET_PAGE_BITS); memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS), 0xff, size >> TARGET_PAGE_BITS); if (kvm_enabled()) kvm_setup_guest_memory(new_block->host, size); return new_block->offset; }"} {"target": 0, "idx": 6941, "func": "void HELPER(ucf64_set_fpscr)(CPUUniCore32State *env, uint32_t val) { int i; uint32_t changed; changed = env->ucf64.xregs[UC32_UCF64_FPSCR]; env->ucf64.xregs[UC32_UCF64_FPSCR] = (val & UCF64_FPSCR_MASK); changed ^= val; if (changed & (UCF64_FPSCR_RND_MASK)) { i = UCF64_FPSCR_RND(val); switch (i) { case 0: i = float_round_nearest_even; break; case 1: i = float_round_to_zero; break; case 2: i = float_round_up; break; case 3: i = float_round_down; break; default: /* 100 and 101 not implement */ cpu_abort(env, \"Unsupported UniCore-F64 round mode\"); } set_float_rounding_mode(i, &env->ucf64.fp_status); } i = ucf64_exceptbits_to_host(UCF64_FPSCR_TRAPEN(val)); set_float_exception_flags(i, &env->ucf64.fp_status); }"} {"target": 1, "idx": 6960, "func": "static void qemu_rbd_close(BlockDriverState *bs) { BDRVRBDState *s = bs->opaque; close(s->fds[0]); close(s->fds[1]); qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], NULL, NULL, NULL); rbd_close(s->image); rados_ioctx_destroy(s->io_ctx); g_free(s->snap); rados_shutdown(s->cluster); }"} {"target": 1, "idx": 6967, "func": "static av_cold int channelmap_init(AVFilterContext *ctx) { ChannelMapContext *s = ctx->priv; int ret; char *mapping, separator = '|'; int map_entries = 0; char buf[256]; enum MappingMode mode; uint64_t out_ch_mask = 0; int i; mapping = s->mapping_str; if (!mapping) { mode = MAP_NONE; } else { char *dash = strchr(mapping, '-'); if (!dash) { // short mapping if (av_isdigit(*mapping)) mode = MAP_ONE_INT; else mode = MAP_ONE_STR; } else if (av_isdigit(*mapping)) { if (av_isdigit(*(dash+1))) mode = MAP_PAIR_INT_INT; else mode = MAP_PAIR_INT_STR; } else { if (av_isdigit(*(dash+1))) mode = MAP_PAIR_STR_INT; else mode = MAP_PAIR_STR_STR; } #if FF_API_OLD_FILTER_OPTS if (strchr(mapping, ',')) { av_log(ctx, AV_LOG_WARNING, \"This syntax is deprecated, use \" \"'|' to separate the mappings.\\n\"); separator = ','; } #endif } if (mode != MAP_NONE) { char *sep = mapping; map_entries = 1; while ((sep = strchr(sep, separator))) { if (*++sep) // Allow trailing comma map_entries++; } } if (map_entries > MAX_CH) { av_log(ctx, AV_LOG_ERROR, \"Too many channels mapped: '%d'.\\n\", map_entries); ret = AVERROR(EINVAL); goto fail; } for (i = 0; i < map_entries; i++) { int in_ch_idx = -1, out_ch_idx = -1; uint64_t in_ch = 0, out_ch = 0; static const char err[] = \"Failed to parse channel map\\n\"; switch (mode) { case MAP_ONE_INT: if (get_channel_idx(&mapping, &in_ch_idx, separator, MAX_CH) < 0) { ret = AVERROR(EINVAL); av_log(ctx, AV_LOG_ERROR, err); goto fail; } s->map[i].in_channel_idx = in_ch_idx; s->map[i].out_channel_idx = i; break; case MAP_ONE_STR: if (!get_channel(&mapping, &in_ch, separator)) { av_log(ctx, AV_LOG_ERROR, err); ret = AVERROR(EINVAL); goto fail; } s->map[i].in_channel = in_ch; s->map[i].out_channel_idx = i; break; case MAP_PAIR_INT_INT: if (get_channel_idx(&mapping, &in_ch_idx, '-', MAX_CH) < 0 || get_channel_idx(&mapping, &out_ch_idx, separator, MAX_CH) < 0) { av_log(ctx, AV_LOG_ERROR, err); ret = AVERROR(EINVAL); goto fail; } s->map[i].in_channel_idx = in_ch_idx; s->map[i].out_channel_idx = out_ch_idx; break; case MAP_PAIR_INT_STR: if (get_channel_idx(&mapping, &in_ch_idx, '-', MAX_CH) < 0 || get_channel(&mapping, &out_ch, separator) < 0 || out_ch & out_ch_mask) { av_log(ctx, AV_LOG_ERROR, err); ret = AVERROR(EINVAL); goto fail; } s->map[i].in_channel_idx = in_ch_idx; s->map[i].out_channel = out_ch; out_ch_mask |= out_ch; break; case MAP_PAIR_STR_INT: if (get_channel(&mapping, &in_ch, '-') < 0 || get_channel_idx(&mapping, &out_ch_idx, separator, MAX_CH) < 0) { av_log(ctx, AV_LOG_ERROR, err); ret = AVERROR(EINVAL); goto fail; } s->map[i].in_channel = in_ch; s->map[i].out_channel_idx = out_ch_idx; break; case MAP_PAIR_STR_STR: if (get_channel(&mapping, &in_ch, '-') < 0 || get_channel(&mapping, &out_ch, separator) < 0 || out_ch & out_ch_mask) { av_log(ctx, AV_LOG_ERROR, err); ret = AVERROR(EINVAL); goto fail; } s->map[i].in_channel = in_ch; s->map[i].out_channel = out_ch; out_ch_mask |= out_ch; break; } } s->mode = mode; s->nch = map_entries; s->output_layout = out_ch_mask ? out_ch_mask : av_get_default_channel_layout(map_entries); if (s->channel_layout_str) { uint64_t fmt; if ((fmt = av_get_channel_layout(s->channel_layout_str)) == 0) { av_log(ctx, AV_LOG_ERROR, \"Error parsing channel layout: '%s'.\\n\", s->channel_layout_str); ret = AVERROR(EINVAL); goto fail; } if (mode == MAP_NONE) { int i; s->nch = av_get_channel_layout_nb_channels(fmt); for (i = 0; i < s->nch; i++) { s->map[i].in_channel_idx = i; s->map[i].out_channel_idx = i; } } else if (out_ch_mask && out_ch_mask != fmt) { av_get_channel_layout_string(buf, sizeof(buf), 0, out_ch_mask); av_log(ctx, AV_LOG_ERROR, \"Output channel layout '%s' does not match the list of channel mapped: '%s'.\\n\", s->channel_layout_str, buf); ret = AVERROR(EINVAL); goto fail; } else if (s->nch != av_get_channel_layout_nb_channels(fmt)) { av_log(ctx, AV_LOG_ERROR, \"Output channel layout %s does not match the number of channels mapped %d.\\n\", s->channel_layout_str, s->nch); ret = AVERROR(EINVAL); goto fail; } s->output_layout = fmt; } ff_add_channel_layout(&s->channel_layouts, s->output_layout); if (mode == MAP_PAIR_INT_STR || mode == MAP_PAIR_STR_STR) { for (i = 0; i < s->nch; i++) { s->map[i].out_channel_idx = av_get_channel_layout_channel_index( s->output_layout, s->map[i].out_channel); } } fail: av_opt_free(s); return ret; }"} {"target": 1, "idx": 6981, "func": "static void pred_weight_table(HEVCContext *s, GetBitContext *gb) { int i = 0; int j = 0; uint8_t luma_weight_l0_flag[16]; uint8_t chroma_weight_l0_flag[16]; uint8_t luma_weight_l1_flag[16]; uint8_t chroma_weight_l1_flag[16]; int luma_log2_weight_denom; luma_log2_weight_denom = get_ue_golomb_long(gb); if (luma_log2_weight_denom < 0 || luma_log2_weight_denom > 7) av_log(s->avctx, AV_LOG_ERROR, \"luma_log2_weight_denom %d is invalid\\n\", luma_log2_weight_denom); s->sh.luma_log2_weight_denom = av_clip_uintp2(luma_log2_weight_denom, 3); if (s->ps.sps->chroma_format_idc != 0) { int delta = get_se_golomb(gb); s->sh.chroma_log2_weight_denom = av_clip_uintp2(s->sh.luma_log2_weight_denom + delta, 3); } for (i = 0; i < s->sh.nb_refs[L0]; i++) { luma_weight_l0_flag[i] = get_bits1(gb); if (!luma_weight_l0_flag[i]) { s->sh.luma_weight_l0[i] = 1 << s->sh.luma_log2_weight_denom; s->sh.luma_offset_l0[i] = 0; } } if (s->ps.sps->chroma_format_idc != 0) { for (i = 0; i < s->sh.nb_refs[L0]; i++) chroma_weight_l0_flag[i] = get_bits1(gb); } else { for (i = 0; i < s->sh.nb_refs[L0]; i++) chroma_weight_l0_flag[i] = 0; } for (i = 0; i < s->sh.nb_refs[L0]; i++) { if (luma_weight_l0_flag[i]) { int delta_luma_weight_l0 = get_se_golomb(gb); s->sh.luma_weight_l0[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l0; s->sh.luma_offset_l0[i] = get_se_golomb(gb); } if (chroma_weight_l0_flag[i]) { for (j = 0; j < 2; j++) { int delta_chroma_weight_l0 = get_se_golomb(gb); int delta_chroma_offset_l0 = get_se_golomb(gb); s->sh.chroma_weight_l0[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l0; s->sh.chroma_offset_l0[i][j] = av_clip((delta_chroma_offset_l0 - ((128 * s->sh.chroma_weight_l0[i][j]) >> s->sh.chroma_log2_weight_denom) + 128), -128, 127); } } else { s->sh.chroma_weight_l0[i][0] = 1 << s->sh.chroma_log2_weight_denom; s->sh.chroma_offset_l0[i][0] = 0; s->sh.chroma_weight_l0[i][1] = 1 << s->sh.chroma_log2_weight_denom; s->sh.chroma_offset_l0[i][1] = 0; } } if (s->sh.slice_type == HEVC_SLICE_B) { for (i = 0; i < s->sh.nb_refs[L1]; i++) { luma_weight_l1_flag[i] = get_bits1(gb); if (!luma_weight_l1_flag[i]) { s->sh.luma_weight_l1[i] = 1 << s->sh.luma_log2_weight_denom; s->sh.luma_offset_l1[i] = 0; } } if (s->ps.sps->chroma_format_idc != 0) { for (i = 0; i < s->sh.nb_refs[L1]; i++) chroma_weight_l1_flag[i] = get_bits1(gb); } else { for (i = 0; i < s->sh.nb_refs[L1]; i++) chroma_weight_l1_flag[i] = 0; } for (i = 0; i < s->sh.nb_refs[L1]; i++) { if (luma_weight_l1_flag[i]) { int delta_luma_weight_l1 = get_se_golomb(gb); s->sh.luma_weight_l1[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l1; s->sh.luma_offset_l1[i] = get_se_golomb(gb); } if (chroma_weight_l1_flag[i]) { for (j = 0; j < 2; j++) { int delta_chroma_weight_l1 = get_se_golomb(gb); int delta_chroma_offset_l1 = get_se_golomb(gb); s->sh.chroma_weight_l1[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l1; s->sh.chroma_offset_l1[i][j] = av_clip((delta_chroma_offset_l1 - ((128 * s->sh.chroma_weight_l1[i][j]) >> s->sh.chroma_log2_weight_denom) + 128), -128, 127); } } else { s->sh.chroma_weight_l1[i][0] = 1 << s->sh.chroma_log2_weight_denom; s->sh.chroma_offset_l1[i][0] = 0; s->sh.chroma_weight_l1[i][1] = 1 << s->sh.chroma_log2_weight_denom; s->sh.chroma_offset_l1[i][1] = 0; } } } }"} {"target": 0, "idx": 7021, "func": "void slirp_output(const uint8_t *pkt, int pkt_len) { #ifdef DEBUG_SLIRP printf(\"slirp output:\\n\"); hex_dump(stdout, pkt, pkt_len); #endif if (!slirp_vc) return; qemu_send_packet(slirp_vc, pkt, pkt_len); }"} {"target": 0, "idx": 7026, "func": "static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int16_t bS[8], int qp[2] ) { int i; for( i = 0; i < 16; i++, pix += stride) { int index_a; int alpha; int beta; int qp_index; int bS_index = (i >> 1); if (!MB_FIELD) { bS_index &= ~1; bS_index |= (i & 1); } if( bS[bS_index] == 0 ) { continue; } qp_index = MB_FIELD ? (i >> 3) : (i & 1); index_a = qp[qp_index] + h->slice_alpha_c0_offset; alpha = (alpha_table+52)[index_a]; beta = (beta_table+52)[qp[qp_index] + h->slice_beta_offset]; if( bS[bS_index] < 4 ) { const int tc0 = (tc0_table+52)[index_a][bS[bS_index] - 1]; const int p0 = pix[-1]; const int p1 = pix[-2]; const int p2 = pix[-3]; const int q0 = pix[0]; const int q1 = pix[1]; const int q2 = pix[2]; if( FFABS( p0 - q0 ) < alpha && FFABS( p1 - p0 ) < beta && FFABS( q1 - q0 ) < beta ) { int tc = tc0; int i_delta; if( FFABS( p2 - p0 ) < beta ) { pix[-2] = p1 + av_clip( ( p2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( p1 << 1 ) ) >> 1, -tc0, tc0 ); tc++; } if( FFABS( q2 - q0 ) < beta ) { pix[1] = q1 + av_clip( ( q2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( q1 << 1 ) ) >> 1, -tc0, tc0 ); tc++; } i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); pix[-1] = av_clip_uint8( p0 + i_delta ); /* p0' */ pix[0] = av_clip_uint8( q0 - i_delta ); /* q0' */ tprintf(h->s.avctx, \"filter_mb_mbaff_edgev i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d, tc:%d\\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\\n\", i, qp[qp_index], index_a, alpha, beta, tc, bS[bS_index], pix[-3], p1, p0, q0, q1, pix[2], p1, pix[-1], pix[0], q1); } }else{ const int p0 = pix[-1]; const int p1 = pix[-2]; const int p2 = pix[-3]; const int q0 = pix[0]; const int q1 = pix[1]; const int q2 = pix[2]; if( FFABS( p0 - q0 ) < alpha && FFABS( p1 - p0 ) < beta && FFABS( q1 - q0 ) < beta ) { if(FFABS( p0 - q0 ) < (( alpha >> 2 ) + 2 )){ if( FFABS( p2 - p0 ) < beta) { const int p3 = pix[-4]; /* p0', p1', p2' */ pix[-1] = ( p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4 ) >> 3; pix[-2] = ( p2 + p1 + p0 + q0 + 2 ) >> 2; pix[-3] = ( 2*p3 + 3*p2 + p1 + p0 + q0 + 4 ) >> 3; } else { /* p0' */ pix[-1] = ( 2*p1 + p0 + q1 + 2 ) >> 2; } if( FFABS( q2 - q0 ) < beta) { const int q3 = pix[3]; /* q0', q1', q2' */ pix[0] = ( p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4 ) >> 3; pix[1] = ( p0 + q0 + q1 + q2 + 2 ) >> 2; pix[2] = ( 2*q3 + 3*q2 + q1 + q0 + p0 + 4 ) >> 3; } else { /* q0' */ pix[0] = ( 2*q1 + q0 + p1 + 2 ) >> 2; } }else{ /* p0', q0' */ pix[-1] = ( 2*p1 + p0 + q1 + 2 ) >> 2; pix[ 0] = ( 2*q1 + q0 + p1 + 2 ) >> 2; } tprintf(h->s.avctx, \"filter_mb_mbaff_edgev i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d\\n# bS:4 -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x, %02x, %02x]\\n\", i, qp[qp_index], index_a, alpha, beta, p2, p1, p0, q0, q1, q2, pix[-3], pix[-2], pix[-1], pix[0], pix[1], pix[2]); } } } }"} {"target": 0, "idx": 7037, "func": "static void do_audio_out(AVFormatContext *s, AVOutputStream *ost, AVInputStream *ist, unsigned char *buf, int size) { uint8_t *buftmp; static uint8_t *audio_buf = NULL; static uint8_t *audio_out = NULL; const int audio_out_size= 4*MAX_AUDIO_PACKET_SIZE; int size_out, frame_bytes, ret; AVCodecContext *enc; /* SC: dynamic allocation of buffers */ if (!audio_buf) audio_buf = av_malloc(2*MAX_AUDIO_PACKET_SIZE); if (!audio_out) audio_out = av_malloc(audio_out_size); if (!audio_buf || !audio_out) return; /* Should signal an error ! */ enc = &ost->st->codec; if (ost->audio_resample) { buftmp = audio_buf; size_out = audio_resample(ost->resample, (short *)buftmp, (short *)buf, size / (ist->st->codec.channels * 2)); size_out = size_out * enc->channels * 2; } else { buftmp = buf; size_out = size; } /* now encode as many frames as possible */ if (enc->frame_size > 1) { /* output resampled raw samples */ fifo_write(&ost->fifo, buftmp, size_out, &ost->fifo.wptr); frame_bytes = enc->frame_size * 2 * enc->channels; while (fifo_read(&ost->fifo, audio_buf, frame_bytes, &ost->fifo.rptr) == 0) { AVPacket pkt; av_init_packet(&pkt); ret = avcodec_encode_audio(enc, audio_out, audio_out_size, (short *)audio_buf); audio_size += ret; pkt.stream_index= ost->index; pkt.data= audio_out; pkt.size= ret; if(enc->coded_frame) pkt.pts= enc->coded_frame->pts; pkt.flags |= PKT_FLAG_KEY; av_write_frame(s, &pkt); } } else { AVPacket pkt; av_init_packet(&pkt); /* output a pcm frame */ /* XXX: change encoding codec API to avoid this ? */ switch(enc->codec->id) { case CODEC_ID_PCM_S16LE: case CODEC_ID_PCM_S16BE: case CODEC_ID_PCM_U16LE: case CODEC_ID_PCM_U16BE: break; default: size_out = size_out >> 1; break; } ret = avcodec_encode_audio(enc, audio_out, size_out, (short *)buftmp); audio_size += ret; pkt.stream_index= ost->index; pkt.data= audio_out; pkt.size= ret; if(enc->coded_frame) pkt.pts= enc->coded_frame->pts; pkt.flags |= PKT_FLAG_KEY; av_write_frame(s, &pkt); } }"} {"target": 0, "idx": 7055, "func": "static int libopus_encode(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { LibopusEncContext *opus = avctx->priv_data; const int sample_size = avctx->channels * av_get_bytes_per_sample(avctx->sample_fmt); uint8_t *audio; int ret; int discard_padding; if (frame) { ret = ff_af_queue_add(&opus->afq, frame); if (ret < 0) return ret; if (frame->nb_samples < opus->opts.packet_size) { audio = opus->samples; memcpy(audio, frame->data[0], frame->nb_samples * sample_size); } else audio = frame->data[0]; } else { if (!opus->afq.remaining_samples) return 0; audio = opus->samples; memset(audio, 0, opus->opts.packet_size * sample_size); } /* Maximum packet size taken from opusenc in opus-tools. 60ms packets * consist of 3 frames in one packet. The maximum frame size is 1275 * bytes along with the largest possible packet header of 7 bytes. */ if ((ret = ff_alloc_packet2(avctx, avpkt, (1275 * 3 + 7) * opus->stream_count, 0)) < 0) return ret; if (avctx->sample_fmt == AV_SAMPLE_FMT_FLT) ret = opus_multistream_encode_float(opus->enc, (float *)audio, opus->opts.packet_size, avpkt->data, avpkt->size); else ret = opus_multistream_encode(opus->enc, (opus_int16 *)audio, opus->opts.packet_size, avpkt->data, avpkt->size); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, \"Error encoding frame: %s\\n\", opus_strerror(ret)); return ff_opus_error_to_averror(ret); } av_shrink_packet(avpkt, ret); ff_af_queue_remove(&opus->afq, opus->opts.packet_size, &avpkt->pts, &avpkt->duration); discard_padding = opus->opts.packet_size - avpkt->duration; // Check if subtraction resulted in an overflow if ((discard_padding < opus->opts.packet_size) != (avpkt->duration > 0)) { av_free_packet(avpkt); av_free(avpkt); return AVERROR(EINVAL); } if (discard_padding > 0) { uint8_t* side_data = av_packet_new_side_data(avpkt, AV_PKT_DATA_SKIP_SAMPLES, 10); if(!side_data) { av_free_packet(avpkt); av_free(avpkt); return AVERROR(ENOMEM); } AV_WL32(side_data + 4, discard_padding); } *got_packet_ptr = 1; return 0; }"} {"target": 1, "idx": 7056, "func": "static void vnc_connect(VncDisplay *vd, int csock, int skipauth, bool websocket) { VncState *vs = g_malloc0(sizeof(VncState)); int i; vs->csock = csock; if (skipauth) { vs->auth = VNC_AUTH_NONE; #ifdef CONFIG_VNC_TLS vs->subauth = VNC_AUTH_INVALID; #endif } else { vs->auth = vd->auth; #ifdef CONFIG_VNC_TLS vs->subauth = vd->subauth; #endif } vs->lossy_rect = g_malloc0(VNC_STAT_ROWS * sizeof (*vs->lossy_rect)); for (i = 0; i < VNC_STAT_ROWS; ++i) { vs->lossy_rect[i] = g_malloc0(VNC_STAT_COLS * sizeof (uint8_t)); } VNC_DEBUG(\"New client on socket %d\\n\", csock); dcl->idle = 0; socket_set_nonblock(vs->csock); #ifdef CONFIG_VNC_WS if (websocket) { vs->websocket = 1; qemu_set_fd_handler2(vs->csock, NULL, vncws_handshake_read, NULL, vs); } else #endif /* CONFIG_VNC_WS */ { qemu_set_fd_handler2(vs->csock, NULL, vnc_client_read, NULL, vs); } vnc_client_cache_addr(vs); vnc_qmp_event(vs, QEVENT_VNC_CONNECTED); vnc_set_share_mode(vs, VNC_SHARE_MODE_CONNECTING); vs->vd = vd; #ifdef CONFIG_VNC_WS if (!vs->websocket) #endif { vnc_init_state(vs); } }"} {"target": 1, "idx": 7059, "func": "static void v9fs_open(void *opaque) { int flags; int iounit; int32_t fid; int32_t mode; V9fsQID qid; ssize_t err = 0; size_t offset = 7; struct stat stbuf; V9fsFidState *fidp; V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; if (s->proto_version == V9FS_PROTO_2000L) { pdu_unmarshal(pdu, offset, \"dd\", &fid, &mode); } else { pdu_unmarshal(pdu, offset, \"db\", &fid, &mode); } trace_v9fs_open(pdu->tag, pdu->id, fid, mode); fidp = get_fid(pdu, fid); if (fidp == NULL) { err = -ENOENT; goto out_nofid; } BUG_ON(fidp->fid_type != P9_FID_NONE); err = v9fs_co_lstat(pdu, &fidp->path, &stbuf); if (err < 0) { goto out; } stat_to_qid(&stbuf, &qid); if (S_ISDIR(stbuf.st_mode)) { err = v9fs_co_opendir(pdu, fidp); if (err < 0) { goto out; } fidp->fid_type = P9_FID_DIR; offset += pdu_marshal(pdu, offset, \"Qd\", &qid, 0); err = offset; } else { if (s->proto_version == V9FS_PROTO_2000L) { flags = get_dotl_openflags(s, mode); } else { flags = omode_to_uflags(mode); } err = v9fs_co_open(pdu, fidp, flags); if (err < 0) { goto out; } fidp->fid_type = P9_FID_FILE; fidp->open_flags = flags; if (flags & O_EXCL) { /* * We let the host file system do O_EXCL check * We should not reclaim such fd */ fidp->flags |= FID_NON_RECLAIMABLE; } iounit = get_iounit(pdu, &fidp->path); offset += pdu_marshal(pdu, offset, \"Qd\", &qid, iounit); err = offset; } out: put_fid(pdu, fidp); out_nofid: complete_pdu(s, pdu, err); }"} {"target": 0, "idx": 7070, "func": "static void tcp_chr_disconnect(CharDriverState *chr) { TCPCharDriver *s = chr->opaque; s->connected = 0; if (s->listen_chan) { s->listen_tag = g_io_add_watch(s->listen_chan, G_IO_IN, tcp_chr_accept, chr); } remove_fd_in_watch(chr); g_io_channel_unref(s->chan); s->chan = NULL; closesocket(s->fd); s->fd = -1; SocketAddress_to_str(chr->filename, CHR_MAX_FILENAME_SIZE, \"disconnected:\", s->addr, s->is_listen, s->is_telnet); qemu_chr_be_event(chr, CHR_EVENT_CLOSED); if (s->reconnect_time) { qemu_chr_socket_restart_timer(chr); } }"} {"target": 0, "idx": 7073, "func": "float64 HELPER(ucf64_si2df)(float32 x, CPUUniCore32State *env) { return int32_to_float64(ucf64_stoi(x), &env->ucf64.fp_status); }"} {"target": 0, "idx": 7076, "func": "static QObject *parse_literal(JSONParserContext *ctxt) { QObject *token; token = parser_context_pop_token(ctxt); assert(token); switch (token_get_type(token)) { case JSON_STRING: return QOBJECT(qstring_from_escaped_str(ctxt, token)); case JSON_INTEGER: { /* A possibility exists that this is a whole-valued float where the * fractional part was left out due to being 0 (.0). It's not a big * deal to treat these as ints in the parser, so long as users of the * resulting QObject know to expect a QInt in place of a QFloat in * cases like these. * * However, in some cases these values will overflow/underflow a * QInt/int64 container, thus we should assume these are to be handled * as QFloats/doubles rather than silently changing their values. * * strtoll() indicates these instances by setting errno to ERANGE */ int64_t value; errno = 0; /* strtoll doesn't set errno on success */ value = strtoll(token_get_value(token), NULL, 10); if (errno != ERANGE) { return QOBJECT(qint_from_int(value)); } /* fall through to JSON_FLOAT */ } case JSON_FLOAT: /* FIXME dependent on locale */ return QOBJECT(qfloat_from_double(strtod(token_get_value(token), NULL))); default: abort(); } }"} {"target": 0, "idx": 7078, "func": "uint64_t esp_reg_read(ESPState *s, uint32_t saddr) { uint32_t old_val; trace_esp_mem_readb(saddr, s->rregs[saddr]); switch (saddr) { case ESP_FIFO: if (s->ti_size > 0) { s->ti_size--; if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) { /* Data out. */ qemu_log_mask(LOG_UNIMP, \"esp: PIO data read not implemented\\n\"); s->rregs[ESP_FIFO] = 0; } else { s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++]; } esp_raise_irq(s); } if (s->ti_size == 0) { s->ti_rptr = 0; s->ti_wptr = 0; } break; case ESP_RINTR: /* Clear sequence step, interrupt register and all status bits except TC */ old_val = s->rregs[ESP_RINTR]; s->rregs[ESP_RINTR] = 0; s->rregs[ESP_RSTAT] &= ~STAT_TC; s->rregs[ESP_RSEQ] = SEQ_CD; esp_lower_irq(s); return old_val; case ESP_TCHI: /* Return the unique id if the value has never been written */ if (!s->tchi_written) { return s->chip_id; } default: break; } return s->rregs[saddr]; }"} {"target": 0, "idx": 7083, "func": "static void co_test_cb(void *opaque) { WorkerTestData *data = opaque; active = 1; data->n = 0; data->ret = -EINPROGRESS; thread_pool_submit_co(worker_cb, data); /* The test continues in test_submit_co, after qemu_coroutine_enter... */ g_assert_cmpint(data->n, ==, 1); data->ret = 0; active--; /* The test continues in test_submit_co, after qemu_aio_wait_all... */ }"} {"target": 0, "idx": 7089, "func": "print_insn_microblaze (bfd_vma memaddr, struct disassemble_info * info) { fprintf_ftype fprintf = info->fprintf_func; void * stream = info->stream; unsigned long inst, prev_inst; struct op_code_struct * op, *pop; int immval = 0; bfd_boolean immfound = FALSE; static bfd_vma prev_insn_addr = -1; /*init the prev insn addr */ static int prev_insn_vma = -1; /*init the prev insn vma */ int curr_insn_vma = info->buffer_vma; info->bytes_per_chunk = 4; inst = read_insn_microblaze (memaddr, info, &op); if (inst == 0) { return -1; } if (prev_insn_vma == curr_insn_vma) { if (memaddr-(info->bytes_per_chunk) == prev_insn_addr) { prev_inst = read_insn_microblaze (prev_insn_addr, info, &pop); if (prev_inst == 0) return -1; if (pop->instr == imm) { immval = (get_int_field_imm(prev_inst) << 16) & 0xffff0000; immfound = TRUE; } else { immval = 0; immfound = FALSE; } } } /* make curr insn as prev insn */ prev_insn_addr = memaddr; prev_insn_vma = curr_insn_vma; if (op->name == 0) { fprintf (stream, \".short 0x%04lx\", inst); } else { fprintf (stream, \"%s\", op->name); switch (op->inst_type) { case INST_TYPE_RD_R1_R2: fprintf(stream, \"\\t%s, %s, %s\", get_field_rd(inst), get_field_r1(inst), get_field_r2(inst)); break; case INST_TYPE_RD_R1_IMM: fprintf(stream, \"\\t%s, %s, %s\", get_field_rd(inst), get_field_r1(inst), get_field_imm(inst)); if (info->print_address_func && get_int_field_r1(inst) == 0 && info->symbol_at_address_func) { if (immfound) immval |= (get_int_field_imm(inst) & 0x0000ffff); else { immval = get_int_field_imm(inst); if (immval & 0x8000) immval |= 0xFFFF0000; } if (immval > 0 && info->symbol_at_address_func(immval, info)) { fprintf (stream, \"\\t// \"); info->print_address_func (immval, info); } } break; case INST_TYPE_RD_R1_IMM5: fprintf(stream, \"\\t%s, %s, %s\", get_field_rd(inst), get_field_r1(inst), get_field_imm5(inst)); break; case INST_TYPE_RD_RFSL: fprintf(stream, \"\\t%s, %s\", get_field_rd(inst), get_field_rfsl(inst)); break; case INST_TYPE_R1_RFSL: fprintf(stream, \"\\t%s, %s\", get_field_r1(inst), get_field_rfsl(inst)); break; case INST_TYPE_RD_SPECIAL: fprintf(stream, \"\\t%s, %s\", get_field_rd(inst), get_field_special(inst, op)); break; case INST_TYPE_SPECIAL_R1: fprintf(stream, \"\\t%s, %s\", get_field_special(inst, op), get_field_r1(inst)); break; case INST_TYPE_RD_R1: fprintf(stream, \"\\t%s, %s\", get_field_rd(inst), get_field_r1(inst)); break; case INST_TYPE_R1_R2: fprintf(stream, \"\\t%s, %s\", get_field_r1(inst), get_field_r2(inst)); break; case INST_TYPE_R1_IMM: fprintf(stream, \"\\t%s, %s\", get_field_r1(inst), get_field_imm(inst)); /* The non-pc relative instructions are returns, which shouldn't have a label printed */ if (info->print_address_func && op->inst_offset_type == INST_PC_OFFSET && info->symbol_at_address_func) { if (immfound) immval |= (get_int_field_imm(inst) & 0x0000ffff); else { immval = get_int_field_imm(inst); if (immval & 0x8000) immval |= 0xFFFF0000; } immval += memaddr; if (immval > 0 && info->symbol_at_address_func(immval, info)) { fprintf (stream, \"\\t// \"); info->print_address_func (immval, info); } else { fprintf (stream, \"\\t\\t// \"); fprintf (stream, \"%x\", immval); } } break; case INST_TYPE_RD_IMM: fprintf(stream, \"\\t%s, %s\", get_field_rd(inst), get_field_imm(inst)); if (info->print_address_func && info->symbol_at_address_func) { if (immfound) immval |= (get_int_field_imm(inst) & 0x0000ffff); else { immval = get_int_field_imm(inst); if (immval & 0x8000) immval |= 0xFFFF0000; } if (op->inst_offset_type == INST_PC_OFFSET) immval += (int) memaddr; if (info->symbol_at_address_func(immval, info)) { fprintf (stream, \"\\t// \"); info->print_address_func (immval, info); } } break; case INST_TYPE_IMM: fprintf(stream, \"\\t%s\", get_field_imm(inst)); if (info->print_address_func && info->symbol_at_address_func && op->instr != imm) { if (immfound) immval |= (get_int_field_imm(inst) & 0x0000ffff); else { immval = get_int_field_imm(inst); if (immval & 0x8000) immval |= 0xFFFF0000; } if (op->inst_offset_type == INST_PC_OFFSET) immval += (int) memaddr; if (immval > 0 && info->symbol_at_address_func(immval, info)) { fprintf (stream, \"\\t// \"); info->print_address_func (immval, info); } else if (op->inst_offset_type == INST_PC_OFFSET) { fprintf (stream, \"\\t\\t// \"); fprintf (stream, \"%x\", immval); } } break; case INST_TYPE_RD_R2: fprintf(stream, \"\\t%s, %s\", get_field_rd(inst), get_field_r2(inst)); break; case INST_TYPE_R2: fprintf(stream, \"\\t%s\", get_field_r2(inst)); break; case INST_TYPE_R1: fprintf(stream, \"\\t%s\", get_field_r1(inst)); break; case INST_TYPE_RD_R1_SPECIAL: fprintf(stream, \"\\t%s, %s\", get_field_rd(inst), get_field_r2(inst)); break; case INST_TYPE_RD_IMM15: fprintf(stream, \"\\t%s, %s\", get_field_rd(inst), get_field_imm15(inst)); break; /* For tuqula instruction */ case INST_TYPE_RD: fprintf(stream, \"\\t%s\", get_field_rd(inst)); break; case INST_TYPE_RFSL: fprintf(stream, \"\\t%s\", get_field_rfsl(inst)); break; default: /* if the disassembler lags the instruction set */ fprintf (stream, \"\\tundecoded operands, inst is 0x%04lx\", inst); break; } } /* Say how many bytes we consumed? */ return 4; }"} {"target": 0, "idx": 7090, "func": "int nbd_client_session_co_flush(NbdClientSession *client) { struct nbd_request request = { .type = NBD_CMD_FLUSH }; struct nbd_reply reply; ssize_t ret; if (!(client->nbdflags & NBD_FLAG_SEND_FLUSH)) { return 0; } if (client->nbdflags & NBD_FLAG_SEND_FUA) { request.type |= NBD_CMD_FLAG_FUA; } request.from = 0; request.len = 0; nbd_coroutine_start(client, &request); ret = nbd_co_send_request(client, &request, NULL, 0); if (ret < 0) { reply.error = -ret; } else { nbd_co_receive_reply(client, &request, &reply, NULL, 0); } nbd_coroutine_end(client, &request); return -reply.error; }"} {"target": 0, "idx": 7115, "func": "static void i440fx_pcihost_initfn(Object *obj) { PCIHostState *s = PCI_HOST_BRIDGE(obj); I440FXState *d = I440FX_PCI_HOST_BRIDGE(obj); memory_region_init_io(&s->conf_mem, obj, &pci_host_conf_le_ops, s, \"pci-conf-idx\", 4); memory_region_init_io(&s->data_mem, obj, &pci_host_data_le_ops, s, \"pci-conf-data\", 4); object_property_add(obj, PCI_HOST_PROP_PCI_HOLE_START, \"int\", i440fx_pcihost_get_pci_hole_start, NULL, NULL, NULL, NULL); object_property_add(obj, PCI_HOST_PROP_PCI_HOLE_END, \"int\", i440fx_pcihost_get_pci_hole_end, NULL, NULL, NULL, NULL); object_property_add(obj, PCI_HOST_PROP_PCI_HOLE64_START, \"int\", i440fx_pcihost_get_pci_hole64_start, NULL, NULL, NULL, NULL); object_property_add(obj, PCI_HOST_PROP_PCI_HOLE64_END, \"int\", i440fx_pcihost_get_pci_hole64_end, NULL, NULL, NULL, NULL); d->pci_info.w32.end = IO_APIC_DEFAULT_ADDRESS; }"} {"target": 1, "idx": 7138, "func": "int avio_open2(AVIOContext **s, const char *filename, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options) { AVIOInternal *internal; const URLProtocol **protocols; URLContext *h; int err; protocols = ffurl_get_protocols(NULL, NULL); if (!protocols) return AVERROR(ENOMEM); err = ffurl_open(&h, filename, flags, int_cb, options, protocols); if (err < 0) { av_freep(&protocols); return err; } err = ffio_fdopen(s, h); if (err < 0) { ffurl_close(h); av_freep(&protocols); return err; } internal = (*s)->opaque; internal->protocols = protocols; return 0; }"} {"target": 1, "idx": 7139, "func": "static int decode_vop_header(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; int time_incr, time_increment; int64_t pts; s->pict_type = get_bits(gb, 2) + AV_PICTURE_TYPE_I; /* pict type: I = 0 , P = 1 */ if (s->pict_type == AV_PICTURE_TYPE_B && s->low_delay && s->vol_control_parameters == 0 && !(s->flags & CODEC_FLAG_LOW_DELAY)) { av_log(s->avctx, AV_LOG_ERROR, \"low_delay flag incorrectly, clearing it\\n\"); s->low_delay = 0; } s->partitioned_frame = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_B; if (s->partitioned_frame) s->decode_mb = mpeg4_decode_partitioned_mb; else s->decode_mb = mpeg4_decode_mb; time_incr = 0; while (get_bits1(gb) != 0) time_incr++; check_marker(gb, \"before time_increment\"); if (ctx->time_increment_bits == 0 || !(show_bits(gb, ctx->time_increment_bits + 1) & 1)) { av_log(s->avctx, AV_LOG_ERROR, \"hmm, seems the headers are not complete, trying to guess time_increment_bits\\n\"); for (ctx->time_increment_bits = 1; ctx->time_increment_bits < 16; ctx->time_increment_bits++) { if (s->pict_type == AV_PICTURE_TYPE_P || (s->pict_type == AV_PICTURE_TYPE_S && ctx->vol_sprite_usage == GMC_SPRITE)) { if ((show_bits(gb, ctx->time_increment_bits + 6) & 0x37) == 0x30) break; } else if ((show_bits(gb, ctx->time_increment_bits + 5) & 0x1F) == 0x18) break; } av_log(s->avctx, AV_LOG_ERROR, \"my guess is %d bits ;)\\n\", ctx->time_increment_bits); if (s->avctx->time_base.den && 4*s->avctx->time_base.den < 1<time_increment_bits) { s->avctx->time_base.den = 1<time_increment_bits; } } if (IS_3IV1) time_increment = get_bits1(gb); // FIXME investigate further else time_increment = get_bits(gb, ctx->time_increment_bits); if (s->pict_type != AV_PICTURE_TYPE_B) { s->last_time_base = s->time_base; s->time_base += time_incr; s->time = s->time_base * s->avctx->time_base.den + time_increment; if (s->workaround_bugs & FF_BUG_UMP4) { if (s->time < s->last_non_b_time) { /* header is not mpeg-4-compatible, broken encoder, * trying to workaround */ s->time_base++; s->time += s->avctx->time_base.den; } } s->pp_time = s->time - s->last_non_b_time; s->last_non_b_time = s->time; } else { s->time = (s->last_time_base + time_incr) * s->avctx->time_base.den + time_increment; s->pb_time = s->pp_time - (s->last_non_b_time - s->time); if (s->pp_time <= s->pb_time || s->pp_time <= s->pp_time - s->pb_time || s->pp_time <= 0) { /* messed up order, maybe after seeking? skipping current b-frame */ return FRAME_SKIPPED; } ff_mpeg4_init_direct_mv(s); if (ctx->t_frame == 0) ctx->t_frame = s->pb_time; if (ctx->t_frame == 0) ctx->t_frame = 1; // 1/0 protection s->pp_field_time = (ROUNDED_DIV(s->last_non_b_time, ctx->t_frame) - ROUNDED_DIV(s->last_non_b_time - s->pp_time, ctx->t_frame)) * 2; s->pb_field_time = (ROUNDED_DIV(s->time, ctx->t_frame) - ROUNDED_DIV(s->last_non_b_time - s->pp_time, ctx->t_frame)) * 2; if (!s->progressive_sequence) { if (s->pp_field_time <= s->pb_field_time || s->pb_field_time <= 1) return FRAME_SKIPPED; } } if (s->avctx->time_base.num) pts = ROUNDED_DIV(s->time, s->avctx->time_base.num); else pts = AV_NOPTS_VALUE; if (s->avctx->debug&FF_DEBUG_PTS) av_log(s->avctx, AV_LOG_DEBUG, \"MPEG4 PTS: %\"PRId64\"\\n\", pts); check_marker(gb, \"before vop_coded\"); /* vop coded */ if (get_bits1(gb) != 1) { if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_ERROR, \"vop not coded\\n\"); return FRAME_SKIPPED; } if (ctx->new_pred) decode_new_pred(ctx, gb); if (ctx->shape != BIN_ONLY_SHAPE && (s->pict_type == AV_PICTURE_TYPE_P || (s->pict_type == AV_PICTURE_TYPE_S && ctx->vol_sprite_usage == GMC_SPRITE))) { /* rounding type for motion estimation */ s->no_rounding = get_bits1(gb); } else { s->no_rounding = 0; } // FIXME reduced res stuff if (ctx->shape != RECT_SHAPE) { if (ctx->vol_sprite_usage != 1 || s->pict_type != AV_PICTURE_TYPE_I) { skip_bits(gb, 13); /* width */ skip_bits1(gb); /* marker */ skip_bits(gb, 13); /* height */ skip_bits1(gb); /* marker */ skip_bits(gb, 13); /* hor_spat_ref */ skip_bits1(gb); /* marker */ skip_bits(gb, 13); /* ver_spat_ref */ } skip_bits1(gb); /* change_CR_disable */ if (get_bits1(gb) != 0) skip_bits(gb, 8); /* constant_alpha_value */ } // FIXME complexity estimation stuff if (ctx->shape != BIN_ONLY_SHAPE) { skip_bits_long(gb, ctx->cplx_estimation_trash_i); if (s->pict_type != AV_PICTURE_TYPE_I) skip_bits_long(gb, ctx->cplx_estimation_trash_p); if (s->pict_type == AV_PICTURE_TYPE_B) skip_bits_long(gb, ctx->cplx_estimation_trash_b); if (get_bits_left(gb) < 3) { av_log(s->avctx, AV_LOG_ERROR, \"Header truncated\\n\"); return -1; } ctx->intra_dc_threshold = ff_mpeg4_dc_threshold[get_bits(gb, 3)]; if (!s->progressive_sequence) { s->top_field_first = get_bits1(gb); s->alternate_scan = get_bits1(gb); } else s->alternate_scan = 0; } if (s->alternate_scan) { ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan); ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan); ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_vertical_scan); ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan); } else { ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct); ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct); ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan); ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan); } if (s->pict_type == AV_PICTURE_TYPE_S && (ctx->vol_sprite_usage == STATIC_SPRITE || ctx->vol_sprite_usage == GMC_SPRITE)) { if (mpeg4_decode_sprite_trajectory(ctx, gb) < 0) return AVERROR_INVALIDDATA; if (ctx->sprite_brightness_change) av_log(s->avctx, AV_LOG_ERROR, \"sprite_brightness_change not supported\\n\"); if (ctx->vol_sprite_usage == STATIC_SPRITE) av_log(s->avctx, AV_LOG_ERROR, \"static sprite not supported\\n\"); } if (ctx->shape != BIN_ONLY_SHAPE) { s->chroma_qscale = s->qscale = get_bits(gb, s->quant_precision); if (s->qscale == 0) { av_log(s->avctx, AV_LOG_ERROR, \"Error, header damaged or not MPEG4 header (qscale=0)\\n\"); return -1; // makes no sense to continue, as there is nothing left from the image then } if (s->pict_type != AV_PICTURE_TYPE_I) { s->f_code = get_bits(gb, 3); /* fcode_for */ if (s->f_code == 0) { av_log(s->avctx, AV_LOG_ERROR, \"Error, header damaged or not MPEG4 header (f_code=0)\\n\"); s->f_code = 1; return -1; // makes no sense to continue, as there is nothing left from the image then } } else s->f_code = 1; if (s->pict_type == AV_PICTURE_TYPE_B) { s->b_code = get_bits(gb, 3); if (s->b_code == 0) { av_log(s->avctx, AV_LOG_ERROR, \"Error, header damaged or not MPEG4 header (b_code=0)\\n\"); s->b_code=1; return -1; // makes no sense to continue, as the MV decoding will break very quickly } } else s->b_code = 1; if (s->avctx->debug & FF_DEBUG_PICT_INFO) { av_log(s->avctx, AV_LOG_DEBUG, \"qp:%d fc:%d,%d %s size:%d pro:%d alt:%d top:%d %spel part:%d resync:%d w:%d a:%d rnd:%d vot:%d%s dc:%d ce:%d/%d/%d time:%\"PRId64\" tincr:%d\\n\", s->qscale, s->f_code, s->b_code, s->pict_type == AV_PICTURE_TYPE_I ? \"I\" : (s->pict_type == AV_PICTURE_TYPE_P ? \"P\" : (s->pict_type == AV_PICTURE_TYPE_B ? \"B\" : \"S\")), gb->size_in_bits,s->progressive_sequence, s->alternate_scan, s->top_field_first, s->quarter_sample ? \"q\" : \"h\", s->data_partitioning, ctx->resync_marker, ctx->num_sprite_warping_points, s->sprite_warping_accuracy, 1 - s->no_rounding, s->vo_type, s->vol_control_parameters ? \" VOLC\" : \" \", ctx->intra_dc_threshold, ctx->cplx_estimation_trash_i, ctx->cplx_estimation_trash_p, ctx->cplx_estimation_trash_b, s->time, time_increment ); } if (!ctx->scalability) { if (ctx->shape != RECT_SHAPE && s->pict_type != AV_PICTURE_TYPE_I) skip_bits1(gb); // vop shape coding type } else { if (ctx->enhancement_type) { int load_backward_shape = get_bits1(gb); if (load_backward_shape) av_log(s->avctx, AV_LOG_ERROR, \"load backward shape isn't supported\\n\"); } skip_bits(gb, 2); // ref_select_code } } /* detect buggy encoders which don't set the low_delay flag * (divx4/xvid/opendivx). Note we cannot detect divx5 without b-frames * easily (although it's buggy too) */ if (s->vo_type == 0 && s->vol_control_parameters == 0 && ctx->divx_version == -1 && s->picture_number == 0) { av_log(s->avctx, AV_LOG_WARNING, \"looks like this file was encoded with (divx4/(old)xvid/opendivx) -> forcing low_delay flag\\n\"); s->low_delay = 1; } s->picture_number++; // better than pic number==0 always ;) // FIXME add short header support s->y_dc_scale_table = ff_mpeg4_y_dc_scale_table; s->c_dc_scale_table = ff_mpeg4_c_dc_scale_table; if (s->workaround_bugs & FF_BUG_EDGE) { s->h_edge_pos = s->width; s->v_edge_pos = s->height; } return 0; }"} {"target": 1, "idx": 7152, "func": "void qemu_mutex_unlock_iothread(void) {}"} {"target": 1, "idx": 7157, "func": "static void parallel_isa_realizefn(DeviceState *dev, Error **errp) { static int index; ISADevice *isadev = ISA_DEVICE(dev); ISAParallelState *isa = ISA_PARALLEL(dev); ParallelState *s = &isa->state; int base; uint8_t dummy; if (!s->chr) { error_setg(errp, \"Can't create parallel device, empty char device\"); return; } if (isa->index == -1) { isa->index = index; } if (isa->index >= MAX_PARALLEL_PORTS) { error_setg(errp, \"Max. supported number of parallel ports is %d.\", MAX_PARALLEL_PORTS); return; } if (isa->iobase == -1) { isa->iobase = isa_parallel_io[isa->index]; } index++; base = isa->iobase; isa_init_irq(isadev, &s->irq, isa->isairq); qemu_register_reset(parallel_reset, s); if (qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_PP_READ_STATUS, &dummy) == 0) { s->hw_driver = 1; s->status = dummy; } isa_register_portio_list(isadev, base, (s->hw_driver ? &isa_parallel_portio_hw_list[0] : &isa_parallel_portio_sw_list[0]), s, \"parallel\"); }"} {"target": 1, "idx": 7164, "func": "static void test_bmdma_long_prdt(void) { QPCIDevice *dev; QPCIBar bmdma_bar, ide_bar; uint8_t status; PrdtEntry prdt[] = { { .addr = 0, .size = cpu_to_le32(0x1000 | PRDT_EOT), }, }; dev = get_pci_device(&bmdma_bar, &ide_bar); /* Normal request */ status = send_dma_request(CMD_READ_DMA, 0, 1, prdt, ARRAY_SIZE(prdt), NULL); g_assert_cmphex(status, ==, BM_STS_ACTIVE | BM_STS_INTR); assert_bit_clear(qpci_io_readb(dev, ide_bar, reg_status), DF | ERR); /* Abort the request before it completes */ status = send_dma_request(CMD_READ_DMA | CMDF_ABORT, 0, 1, prdt, ARRAY_SIZE(prdt), NULL); g_assert_cmphex(status, ==, BM_STS_INTR); assert_bit_clear(qpci_io_readb(dev, ide_bar, reg_status), DF | ERR); }"} {"target": 1, "idx": 7169, "func": "static void memory_region_update_coalesced_range(MemoryRegion *mr) { FlatRange *fr; CoalescedMemoryRange *cmr; AddrRange tmp; FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) { if (fr->mr == mr) { qemu_unregister_coalesced_mmio(int128_get64(fr->addr.start), int128_get64(fr->addr.size)); QTAILQ_FOREACH(cmr, &mr->coalesced, link) { tmp = addrrange_shift(cmr->addr, int128_sub(fr->addr.start, int128_make64(fr->offset_in_region))); if (!addrrange_intersects(tmp, fr->addr)) { continue; } tmp = addrrange_intersection(tmp, fr->addr); qemu_register_coalesced_mmio(int128_get64(tmp.start), int128_get64(tmp.size)); } } } }"} {"target": 1, "idx": 7171, "func": "static int qxl_init_secondary(PCIDevice *dev) { static int device_id = 1; PCIQXLDevice *qxl = DO_UPCAST(PCIQXLDevice, pci, dev); qxl->id = device_id++; qxl_init_ramsize(qxl, 16); memory_region_init_ram(&qxl->vga.vram, \"qxl.vgavram\", qxl->vga.vram_size); vmstate_register_ram(&qxl->vga.vram, &qxl->pci.qdev); qxl->vga.vram_ptr = memory_region_get_ram_ptr(&qxl->vga.vram); return qxl_init_common(qxl); }"} {"target": 1, "idx": 7180, "func": "static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOp opc = l->opc; TCGMemOp s_bits = opc & MO_SIZE; uint8_t **label_ptr = &l->label_ptr[0]; TCGReg retaddr; /* resolve label address */ *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4); if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4); } if (TCG_TARGET_REG_BITS == 32) { int ofs = 0; tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs); ofs += 4; tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs); ofs += 4; if (TARGET_LONG_BITS == 64) { tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs); ofs += 4; } tcg_out_st(s, TCG_TYPE_I32, l->datalo_reg, TCG_REG_ESP, ofs); ofs += 4; if (s_bits == MO_64) { tcg_out_st(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_ESP, ofs); ofs += 4; } tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index); ofs += 4; retaddr = TCG_REG_EAX; tcg_out_movi(s, TCG_TYPE_I32, retaddr, (uintptr_t)l->raddr); tcg_out_st(s, TCG_TYPE_I32, retaddr, TCG_REG_ESP, ofs); } else { tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); /* The second argument is already loaded with addrlo. */ tcg_out_mov(s, (s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), tcg_target_call_iarg_regs[2], l->datalo_reg); tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], l->mem_index); if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) { retaddr = tcg_target_call_iarg_regs[4]; tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr); } else { retaddr = TCG_REG_RAX; tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr); tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, 0); } } /* \"Tail call\" to the helper, with the return address back inline. */ tcg_out_push(s, retaddr); tcg_out_jmp(s, (uintptr_t)qemu_st_helpers[opc]); }"} {"target": 0, "idx": 7187, "func": "static int vorbis_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { vorbis_enc_context *venc = avctx->priv_data; int i, ret, need_more; int samples = 0, frame_size = 1 << (venc->log2_blocksize[1] - 1); vorbis_enc_mode *mode; vorbis_enc_mapping *mapping; PutBitContext pb; if (frame) { if ((ret = ff_af_queue_add(&venc->afq, frame)) < 0) return ret; ff_bufqueue_add(avctx, &venc->bufqueue, av_frame_clone(frame)); } else if (!venc->afq.remaining_samples) return 0; need_more = venc->bufqueue.available * avctx->frame_size < frame_size; need_more = frame && need_more; if (need_more) return 0; /* Pad the bufqueue with empty frames for encoding the last packet. */ if (!frame) { if (venc->bufqueue.available * avctx->frame_size < frame_size) { int frames_needed = (frame_size/avctx->frame_size) - venc->bufqueue.available; for (int i = 0; i < frames_needed; i++) { AVFrame *empty = spawn_empty_frame(avctx, venc->channels); if (!empty) return AVERROR(ENOMEM); ff_bufqueue_add(avctx, &venc->bufqueue, empty); } } } move_audio(venc, venc->scratch, &samples, avctx->frame_size); if (!apply_window_and_mdct(venc, venc->scratch, samples)) return 0; if ((ret = ff_alloc_packet2(avctx, avpkt, 8192, 0)) < 0) return ret; init_put_bits(&pb, avpkt->data, avpkt->size); if (pb.size_in_bits - put_bits_count(&pb) < 1 + ilog(venc->nmodes - 1)) { av_log(avctx, AV_LOG_ERROR, \"output buffer is too small\\n\"); return AVERROR(EINVAL); } put_bits(&pb, 1, 0); // magic bit put_bits(&pb, ilog(venc->nmodes - 1), 0); // 0 bits, the mode mode = &venc->modes[0]; mapping = &venc->mappings[mode->mapping]; if (mode->blockflag) { put_bits(&pb, 1, 0); put_bits(&pb, 1, 0); } for (i = 0; i < venc->channels; i++) { vorbis_enc_floor *fc = &venc->floors[mapping->floor[mapping->mux[i]]]; uint16_t posts[MAX_FLOOR_VALUES]; floor_fit(venc, fc, &venc->coeffs[i * samples], posts, samples); if (floor_encode(venc, fc, &pb, posts, &venc->floor[i * samples], samples)) { av_log(avctx, AV_LOG_ERROR, \"output buffer is too small\\n\"); return AVERROR(EINVAL); } } for (i = 0; i < venc->channels * samples; i++) venc->coeffs[i] /= venc->floor[i]; for (i = 0; i < mapping->coupling_steps; i++) { float *mag = venc->coeffs + mapping->magnitude[i] * samples; float *ang = venc->coeffs + mapping->angle[i] * samples; int j; for (j = 0; j < samples; j++) { float a = ang[j]; ang[j] -= mag[j]; if (mag[j] > 0) ang[j] = -ang[j]; if (ang[j] < 0) mag[j] = a; } } if (residue_encode(venc, &venc->residues[mapping->residue[mapping->mux[0]]], &pb, venc->coeffs, samples, venc->channels)) { av_log(avctx, AV_LOG_ERROR, \"output buffer is too small\\n\"); return AVERROR(EINVAL); } flush_put_bits(&pb); avpkt->size = put_bits_count(&pb) >> 3; ff_af_queue_remove(&venc->afq, frame_size, &avpkt->pts, &avpkt->duration); if (frame_size > avpkt->duration) { uint8_t *side = av_packet_new_side_data(avpkt, AV_PKT_DATA_SKIP_SAMPLES, 10); if (!side) return AVERROR(ENOMEM); AV_WL32(&side[4], frame_size - avpkt->duration); } *got_packet_ptr = 1; return 0; }"} {"target": 1, "idx": 7206, "func": "static void gd_set_keycode_type(GtkDisplayState *s) { #ifdef GDK_WINDOWING_X11 GdkDisplay *display = gtk_widget_get_display(s->window); if (GDK_IS_X11_DISPLAY(display)) { Display *x11_display = gdk_x11_display_get_xdisplay(display); XkbDescPtr desc = XkbGetKeyboard(x11_display, XkbGBN_AllComponentsMask, XkbUseCoreKbd); char *keycodes = NULL; if (desc && desc->names) { keycodes = XGetAtomName(x11_display, desc->names->keycodes); if (keycodes == NULL) { fprintf(stderr, \"could not lookup keycode name\\n\"); } else if (strstart(keycodes, \"evdev\", NULL)) { s->has_evdev = true; } else if (!strstart(keycodes, \"xfree86\", NULL)) { fprintf(stderr, \"unknown keycodes `%s', please report to \" \"qemu-devel@nongnu.org\\n\", keycodes); #endif"} {"target": 0, "idx": 7211, "func": "static void kvm_client_set_memory(struct CPUPhysMemoryClient *client, target_phys_addr_t start_addr, ram_addr_t size, ram_addr_t phys_offset) { kvm_set_phys_mem(start_addr, size, phys_offset); }"} {"target": 0, "idx": 7218, "func": "int coroutine_fn bdrv_co_pdiscard(BlockDriverState *bs, int64_t offset, int count) { BdrvTrackedRequest req; int max_pdiscard, ret; int head, align; if (!bs->drv) { return -ENOMEDIUM; } ret = bdrv_check_byte_request(bs, offset, count); if (ret < 0) { return ret; } else if (bs->read_only) { return -EPERM; } assert(!(bs->open_flags & BDRV_O_INACTIVE)); /* Do nothing if disabled. */ if (!(bs->open_flags & BDRV_O_UNMAP)) { return 0; } if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) { return 0; } /* Discard is advisory, so ignore any unaligned head or tail */ align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment); assert(align % bs->bl.request_alignment == 0); head = offset % align; if (head) { head = MIN(count, align - head); count -= head; offset += head; } count = QEMU_ALIGN_DOWN(count, align); if (!count) { return 0; } bdrv_inc_in_flight(bs); tracked_request_begin(&req, bs, offset, count, BDRV_TRACKED_DISCARD); ret = notifier_with_return_list_notify(&bs->before_write_notifiers, &req); if (ret < 0) { goto out; } max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX), align); assert(max_pdiscard); while (count > 0) { int ret; int num = MIN(count, max_pdiscard); if (bs->drv->bdrv_co_pdiscard) { ret = bs->drv->bdrv_co_pdiscard(bs, offset, num); } else { BlockAIOCB *acb; CoroutineIOCompletion co = { .coroutine = qemu_coroutine_self(), }; acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num, bdrv_co_io_em_complete, &co); if (acb == NULL) { ret = -EIO; goto out; } else { qemu_coroutine_yield(); ret = co.ret; } } if (ret && ret != -ENOTSUP) { goto out; } offset += num; count -= num; } ret = 0; out: ++bs->write_gen; bdrv_set_dirty(bs, req.offset >> BDRV_SECTOR_BITS, req.bytes >> BDRV_SECTOR_BITS); tracked_request_end(&req); bdrv_dec_in_flight(bs); return ret; }"} {"target": 0, "idx": 7231, "func": "static inline int fp_reg_offset(int regno, TCGMemOp size) { int offs = offsetof(CPUARMState, vfp.regs[regno * 2]); #ifdef HOST_WORDS_BIGENDIAN offs += (8 - (1 << size)); #endif return offs; }"} {"target": 1, "idx": 7250, "func": "static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, unsigned int src_size) { const uint8_t *s = src; const uint8_t *end; #ifdef HAVE_MMX const uint8_t *mm_end; #endif uint16_t *d = (uint16_t *)dst; end = s + src_size; #ifdef HAVE_MMX __asm __volatile(PREFETCH\" %0\"::\"m\"(*src):\"memory\"); __asm __volatile( \"movq %0, %%mm7\\n\\t\" \"movq %1, %%mm6\\n\\t\" ::\"m\"(red_16mask),\"m\"(green_16mask)); mm_end = end - 15; while(s < mm_end) { __asm __volatile( PREFETCH\" 32%1\\n\\t\" \"movd %1, %%mm0\\n\\t\" \"movd 3%1, %%mm3\\n\\t\" \"punpckldq 6%1, %%mm0\\n\\t\" \"punpckldq 9%1, %%mm3\\n\\t\" \"movq %%mm0, %%mm1\\n\\t\" \"movq %%mm0, %%mm2\\n\\t\" \"movq %%mm3, %%mm4\\n\\t\" \"movq %%mm3, %%mm5\\n\\t\" \"psllq $8, %%mm0\\n\\t\" \"psllq $8, %%mm3\\n\\t\" \"pand %%mm7, %%mm0\\n\\t\" \"pand %%mm7, %%mm3\\n\\t\" \"psrlq $5, %%mm1\\n\\t\" \"psrlq $5, %%mm4\\n\\t\" \"pand %%mm6, %%mm1\\n\\t\" \"pand %%mm6, %%mm4\\n\\t\" \"psrlq $19, %%mm2\\n\\t\" \"psrlq $19, %%mm5\\n\\t\" \"pand %2, %%mm2\\n\\t\" \"pand %2, %%mm5\\n\\t\" \"por %%mm1, %%mm0\\n\\t\" \"por %%mm4, %%mm3\\n\\t\" \"por %%mm2, %%mm0\\n\\t\" \"por %%mm5, %%mm3\\n\\t\" \"psllq $16, %%mm3\\n\\t\" \"por %%mm3, %%mm0\\n\\t\" MOVNTQ\" %%mm0, %0\\n\\t\" :\"=m\"(*d):\"m\"(*s),\"m\"(blue_16mask):\"memory\"); d += 4; s += 12; } __asm __volatile(SFENCE:::\"memory\"); __asm __volatile(EMMS:::\"memory\"); #endif while(s < end) { const int r= *s++; const int g= *s++; const int b= *s++; *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8); } }"} {"target": 0, "idx": 7256, "func": "static int probe_file(WriterContext *wctx, const char *filename) { AVFormatContext *fmt_ctx; int ret, i; int section_id; do_read_frames = do_show_frames || do_count_frames; do_read_packets = do_show_packets || do_count_packets; ret = open_input_file(&fmt_ctx, filename); if (ret < 0) return ret; nb_streams_frames = av_calloc(fmt_ctx->nb_streams, sizeof(*nb_streams_frames)); nb_streams_packets = av_calloc(fmt_ctx->nb_streams, sizeof(*nb_streams_packets)); selected_streams = av_calloc(fmt_ctx->nb_streams, sizeof(*selected_streams)); for (i = 0; i < fmt_ctx->nb_streams; i++) { if (stream_specifier) { ret = avformat_match_stream_specifier(fmt_ctx, fmt_ctx->streams[i], stream_specifier); if (ret < 0) goto end; else selected_streams[i] = ret; ret = 0; } else { selected_streams[i] = 1; } } if (do_read_frames || do_read_packets) { if (do_show_frames && do_show_packets && wctx->writer->flags & WRITER_FLAG_PUT_PACKETS_AND_FRAMES_IN_SAME_CHAPTER) section_id = SECTION_ID_PACKETS_AND_FRAMES; else if (do_show_packets && !do_show_frames) section_id = SECTION_ID_PACKETS; else // (!do_show_packets && do_show_frames) section_id = SECTION_ID_FRAMES; if (do_show_frames || do_show_packets) writer_print_section_header(wctx, section_id); read_packets(wctx, fmt_ctx); if (do_show_frames || do_show_packets) writer_print_section_footer(wctx); } if (do_show_programs) show_programs(wctx, fmt_ctx); if (do_show_streams) show_streams(wctx, fmt_ctx); if (do_show_chapters) show_chapters(wctx, fmt_ctx); if (do_show_format) show_format(wctx, fmt_ctx); end: close_input_file(&fmt_ctx); av_freep(&nb_streams_frames); av_freep(&nb_streams_packets); av_freep(&selected_streams); return ret; }"} {"target": 1, "idx": 7263, "func": "static int qcow2_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) { BDRVQcowState *s = bs->opaque; int64_t total_sectors = bs->total_sectors; int growable = bs->growable; int ret; BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE); bs->growable = 1; ret = bdrv_pwritev(bs, qcow2_vm_state_offset(s) + pos, qiov); bs->growable = growable; /* bdrv_co_do_writev will have increased the total_sectors value to include * the VM state - the VM state is however not an actual part of the block * device, therefore, we need to restore the old value. */ bs->total_sectors = total_sectors; return ret; }"} {"target": 1, "idx": 7288, "func": "static int decode_frame_header(AVCodecContext *ctx, const uint8_t *data, int size, int *ref) { VP9Context *s = ctx->priv_data; int c, i, j, k, l, m, n, w, h, max, size2, res, sharp; int last_invisible; const uint8_t *data2; /* general header */ if ((res = init_get_bits8(&s->gb, data, size)) < 0) { av_log(ctx, AV_LOG_ERROR, \"Failed to initialize bitstream reader\\n\"); return res; } if (get_bits(&s->gb, 2) != 0x2) { // frame marker av_log(ctx, AV_LOG_ERROR, \"Invalid frame marker\\n\"); return AVERROR_INVALIDDATA; } s->profile = get_bits1(&s->gb); if (get_bits1(&s->gb)) { // reserved bit av_log(ctx, AV_LOG_ERROR, \"Reserved bit should be zero\\n\"); return AVERROR_INVALIDDATA; } if (get_bits1(&s->gb)) { *ref = get_bits(&s->gb, 3); return 0; } s->last_uses_2pass = s->uses_2pass; s->last_keyframe = s->keyframe; s->keyframe = !get_bits1(&s->gb); last_invisible = s->invisible; s->invisible = !get_bits1(&s->gb); s->errorres = get_bits1(&s->gb); s->use_last_frame_mvs = !s->errorres && !last_invisible; if (s->keyframe) { if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode av_log(ctx, AV_LOG_ERROR, \"Invalid sync code\\n\"); return AVERROR_INVALIDDATA; } s->colorspace = get_bits(&s->gb, 3); if (s->colorspace == 7) { // RGB = profile 1 av_log(ctx, AV_LOG_ERROR, \"RGB not supported in profile 0\\n\"); return AVERROR_INVALIDDATA; } s->fullrange = get_bits1(&s->gb); // for profile 1, here follows the subsampling bits s->refreshrefmask = 0xff; w = get_bits(&s->gb, 16) + 1; h = get_bits(&s->gb, 16) + 1; if (get_bits1(&s->gb)) // display size skip_bits(&s->gb, 32); } else { s->intraonly = s->invisible ? get_bits1(&s->gb) : 0; s->resetctx = s->errorres ? 0 : get_bits(&s->gb, 2); if (s->intraonly) { if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode av_log(ctx, AV_LOG_ERROR, \"Invalid sync code\\n\"); return AVERROR_INVALIDDATA; } s->refreshrefmask = get_bits(&s->gb, 8); w = get_bits(&s->gb, 16) + 1; h = get_bits(&s->gb, 16) + 1; if (get_bits1(&s->gb)) // display size skip_bits(&s->gb, 32); } else { s->refreshrefmask = get_bits(&s->gb, 8); s->refidx[0] = get_bits(&s->gb, 3); s->signbias[0] = get_bits1(&s->gb); s->refidx[1] = get_bits(&s->gb, 3); s->signbias[1] = get_bits1(&s->gb); s->refidx[2] = get_bits(&s->gb, 3); s->signbias[2] = get_bits1(&s->gb); if (!s->refs[s->refidx[0]].f->data[0] || !s->refs[s->refidx[1]].f->data[0] || !s->refs[s->refidx[2]].f->data[0]) { av_log(ctx, AV_LOG_ERROR, \"Not all references are available\\n\"); return AVERROR_INVALIDDATA; } if (get_bits1(&s->gb)) { w = s->refs[s->refidx[0]].f->width; h = s->refs[s->refidx[0]].f->height; } else if (get_bits1(&s->gb)) { w = s->refs[s->refidx[1]].f->width; h = s->refs[s->refidx[1]].f->height; } else if (get_bits1(&s->gb)) { w = s->refs[s->refidx[2]].f->width; h = s->refs[s->refidx[2]].f->height; } else { w = get_bits(&s->gb, 16) + 1; h = get_bits(&s->gb, 16) + 1; } // Note that in this code, \"CUR_FRAME\" is actually before we // have formally allocated a frame, and thus actually represents // the _last_ frame s->use_last_frame_mvs &= s->frames[CUR_FRAME].tf.f->width == w && s->frames[CUR_FRAME].tf.f->height == h; if (get_bits1(&s->gb)) // display size skip_bits(&s->gb, 32); s->highprecisionmvs = get_bits1(&s->gb); s->filtermode = get_bits1(&s->gb) ? FILTER_SWITCHABLE : get_bits(&s->gb, 2); s->allowcompinter = s->signbias[0] != s->signbias[1] || s->signbias[0] != s->signbias[2]; if (s->allowcompinter) { if (s->signbias[0] == s->signbias[1]) { s->fixcompref = 2; s->varcompref[0] = 0; s->varcompref[1] = 1; } else if (s->signbias[0] == s->signbias[2]) { s->fixcompref = 1; s->varcompref[0] = 0; s->varcompref[1] = 2; } else { s->fixcompref = 0; s->varcompref[0] = 1; s->varcompref[1] = 2; } } } } s->refreshctx = s->errorres ? 0 : get_bits1(&s->gb); s->parallelmode = s->errorres ? 1 : get_bits1(&s->gb); s->framectxid = c = get_bits(&s->gb, 2); /* loopfilter header data */ s->filter.level = get_bits(&s->gb, 6); sharp = get_bits(&s->gb, 3); // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep // the old cache values since they are still valid if (s->filter.sharpness != sharp) memset(s->filter.lim_lut, 0, sizeof(s->filter.lim_lut)); s->filter.sharpness = sharp; if ((s->lf_delta.enabled = get_bits1(&s->gb))) { if (get_bits1(&s->gb)) { for (i = 0; i < 4; i++) if (get_bits1(&s->gb)) s->lf_delta.ref[i] = get_sbits_inv(&s->gb, 6); for (i = 0; i < 2; i++) if (get_bits1(&s->gb)) s->lf_delta.mode[i] = get_sbits_inv(&s->gb, 6); } } else { memset(&s->lf_delta, 0, sizeof(s->lf_delta)); } /* quantization header data */ s->yac_qi = get_bits(&s->gb, 8); s->ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0; s->uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0; s->uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0; s->lossless = s->yac_qi == 0 && s->ydc_qdelta == 0 && s->uvdc_qdelta == 0 && s->uvac_qdelta == 0; /* segmentation header info */ if ((s->segmentation.enabled = get_bits1(&s->gb))) { if ((s->segmentation.update_map = get_bits1(&s->gb))) { for (i = 0; i < 7; i++) s->prob.seg[i] = get_bits1(&s->gb) ? get_bits(&s->gb, 8) : 255; if ((s->segmentation.temporal = get_bits1(&s->gb))) for (i = 0; i < 3; i++) s->prob.segpred[i] = get_bits1(&s->gb) ? get_bits(&s->gb, 8) : 255; } else { s->use_last_frame_segmap = !s->keyframe && !s->intraonly && s->frames[CUR_FRAME].tf.f->width == w && s->frames[CUR_FRAME].tf.f->height == h; } if (get_bits1(&s->gb)) { s->segmentation.absolute_vals = get_bits1(&s->gb); for (i = 0; i < 8; i++) { if ((s->segmentation.feat[i].q_enabled = get_bits1(&s->gb))) s->segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8); if ((s->segmentation.feat[i].lf_enabled = get_bits1(&s->gb))) s->segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6); if ((s->segmentation.feat[i].ref_enabled = get_bits1(&s->gb))) s->segmentation.feat[i].ref_val = get_bits(&s->gb, 2); s->segmentation.feat[i].skip_enabled = get_bits1(&s->gb); } } } else { s->segmentation.feat[0].q_enabled = 0; s->segmentation.feat[0].lf_enabled = 0; s->segmentation.feat[0].skip_enabled = 0; s->segmentation.feat[0].ref_enabled = 0; } // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas for (i = 0; i < (s->segmentation.enabled ? 8 : 1); i++) { int qyac, qydc, quvac, quvdc, lflvl, sh; if (s->segmentation.feat[i].q_enabled) { if (s->segmentation.absolute_vals) qyac = s->segmentation.feat[i].q_val; else qyac = s->yac_qi + s->segmentation.feat[i].q_val; } else { qyac = s->yac_qi; } qydc = av_clip_uintp2(qyac + s->ydc_qdelta, 8); quvdc = av_clip_uintp2(qyac + s->uvdc_qdelta, 8); quvac = av_clip_uintp2(qyac + s->uvac_qdelta, 8); qyac = av_clip_uintp2(qyac, 8); s->segmentation.feat[i].qmul[0][0] = vp9_dc_qlookup[qydc]; s->segmentation.feat[i].qmul[0][1] = vp9_ac_qlookup[qyac]; s->segmentation.feat[i].qmul[1][0] = vp9_dc_qlookup[quvdc]; s->segmentation.feat[i].qmul[1][1] = vp9_ac_qlookup[quvac]; sh = s->filter.level >= 32; if (s->segmentation.feat[i].lf_enabled) { if (s->segmentation.absolute_vals) lflvl = s->segmentation.feat[i].lf_val; else lflvl = s->filter.level + s->segmentation.feat[i].lf_val; } else { lflvl = s->filter.level; } s->segmentation.feat[i].lflvl[0][0] = s->segmentation.feat[i].lflvl[0][1] = av_clip_uintp2(lflvl + (s->lf_delta.ref[0] << sh), 6); for (j = 1; j < 4; j++) { s->segmentation.feat[i].lflvl[j][0] = av_clip_uintp2(lflvl + ((s->lf_delta.ref[j] + s->lf_delta.mode[0]) << sh), 6); s->segmentation.feat[i].lflvl[j][1] = av_clip_uintp2(lflvl + ((s->lf_delta.ref[j] + s->lf_delta.mode[1]) << sh), 6); } } /* tiling info */ if ((res = update_size(ctx, w, h)) < 0) { av_log(ctx, AV_LOG_ERROR, \"Failed to initialize decoder for %dx%d\\n\", w, h); return res; } for (s->tiling.log2_tile_cols = 0; (s->sb_cols >> s->tiling.log2_tile_cols) > 64; s->tiling.log2_tile_cols++) ; for (max = 0; (s->sb_cols >> max) >= 4; max++) ; max = FFMAX(0, max - 1); while (max > s->tiling.log2_tile_cols) { if (get_bits1(&s->gb)) s->tiling.log2_tile_cols++; else break; } s->tiling.log2_tile_rows = decode012(&s->gb); s->tiling.tile_rows = 1 << s->tiling.log2_tile_rows; if (s->tiling.tile_cols != (1 << s->tiling.log2_tile_cols)) { s->tiling.tile_cols = 1 << s->tiling.log2_tile_cols; s->c_b = av_fast_realloc(s->c_b, &s->c_b_size, sizeof(VP56RangeCoder) * s->tiling.tile_cols); if (!s->c_b) { av_log(ctx, AV_LOG_ERROR, \"Ran out of memory during range coder init\\n\"); return AVERROR(ENOMEM); } } if (s->keyframe || s->errorres || s->intraonly) { s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p = s->prob_ctx[3].p = vp9_default_probs; memcpy(s->prob_ctx[0].coef, vp9_default_coef_probs, sizeof(vp9_default_coef_probs)); memcpy(s->prob_ctx[1].coef, vp9_default_coef_probs, sizeof(vp9_default_coef_probs)); memcpy(s->prob_ctx[2].coef, vp9_default_coef_probs, sizeof(vp9_default_coef_probs)); memcpy(s->prob_ctx[3].coef, vp9_default_coef_probs, sizeof(vp9_default_coef_probs)); } // next 16 bits is size of the rest of the header (arith-coded) size2 = get_bits(&s->gb, 16); data2 = align_get_bits(&s->gb); if (size2 > size - (data2 - data)) { av_log(ctx, AV_LOG_ERROR, \"Invalid compressed header size\\n\"); return AVERROR_INVALIDDATA; } ff_vp56_init_range_decoder(&s->c, data2, size2); if (vp56_rac_get_prob_branchy(&s->c, 128)) { // marker bit av_log(ctx, AV_LOG_ERROR, \"Marker bit was set\\n\"); return AVERROR_INVALIDDATA; } if (s->keyframe || s->intraonly) { memset(s->counts.coef, 0, sizeof(s->counts.coef) + sizeof(s->counts.eob)); } else { memset(&s->counts, 0, sizeof(s->counts)); } // FIXME is it faster to not copy here, but do it down in the fw updates // as explicit copies if the fw update is missing (and skip the copy upon // fw update)? s->prob.p = s->prob_ctx[c].p; // txfm updates if (s->lossless) { s->txfmmode = TX_4X4; } else { s->txfmmode = vp8_rac_get_uint(&s->c, 2); if (s->txfmmode == 3) s->txfmmode += vp8_rac_get(&s->c); if (s->txfmmode == TX_SWITCHABLE) { for (i = 0; i < 2; i++) if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]); for (i = 0; i < 2; i++) for (j = 0; j < 2; j++) if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.tx16p[i][j] = update_prob(&s->c, s->prob.p.tx16p[i][j]); for (i = 0; i < 2; i++) for (j = 0; j < 3; j++) if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.tx32p[i][j] = update_prob(&s->c, s->prob.p.tx32p[i][j]); } } // coef updates for (i = 0; i < 4; i++) { uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i]; if (vp8_rac_get(&s->c)) { for (j = 0; j < 2; j++) for (k = 0; k < 2; k++) for (l = 0; l < 6; l++) for (m = 0; m < 6; m++) { uint8_t *p = s->prob.coef[i][j][k][l][m]; uint8_t *r = ref[j][k][l][m]; if (m >= 3 && l == 0) // dc only has 3 pt break; for (n = 0; n < 3; n++) { if (vp56_rac_get_prob_branchy(&s->c, 252)) { p[n] = update_prob(&s->c, r[n]); } else { p[n] = r[n]; } } p[3] = 0; } } else { for (j = 0; j < 2; j++) for (k = 0; k < 2; k++) for (l = 0; l < 6; l++) for (m = 0; m < 6; m++) { uint8_t *p = s->prob.coef[i][j][k][l][m]; uint8_t *r = ref[j][k][l][m]; if (m > 3 && l == 0) // dc only has 3 pt break; memcpy(p, r, 3); p[3] = 0; } } if (s->txfmmode == i) break; } // mode updates for (i = 0; i < 3; i++) if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]); if (!s->keyframe && !s->intraonly) { for (i = 0; i < 7; i++) for (j = 0; j < 3; j++) if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.mv_mode[i][j] = update_prob(&s->c, s->prob.p.mv_mode[i][j]); if (s->filtermode == FILTER_SWITCHABLE) for (i = 0; i < 4; i++) for (j = 0; j < 2; j++) if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.filter[i][j] = update_prob(&s->c, s->prob.p.filter[i][j]); for (i = 0; i < 4; i++) if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]); if (s->allowcompinter) { s->comppredmode = vp8_rac_get(&s->c); if (s->comppredmode) s->comppredmode += vp8_rac_get(&s->c); if (s->comppredmode == PRED_SWITCHABLE) for (i = 0; i < 5; i++) if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.comp[i] = update_prob(&s->c, s->prob.p.comp[i]); } else { s->comppredmode = PRED_SINGLEREF; } if (s->comppredmode != PRED_COMPREF) { for (i = 0; i < 5; i++) { if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.single_ref[i][0] = update_prob(&s->c, s->prob.p.single_ref[i][0]); if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.single_ref[i][1] = update_prob(&s->c, s->prob.p.single_ref[i][1]); } } if (s->comppredmode != PRED_SINGLEREF) { for (i = 0; i < 5; i++) if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.comp_ref[i] = update_prob(&s->c, s->prob.p.comp_ref[i]); } for (i = 0; i < 4; i++) for (j = 0; j < 9; j++) if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.y_mode[i][j] = update_prob(&s->c, s->prob.p.y_mode[i][j]); for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) for (k = 0; k < 3; k++) if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.partition[3 - i][j][k] = update_prob(&s->c, s->prob.p.partition[3 - i][j][k]); // mv fields don't use the update_prob subexp model for some reason for (i = 0; i < 3; i++) if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.mv_joint[i] = (vp8_rac_get_uint(&s->c, 7) << 1) | 1; for (i = 0; i < 2; i++) { if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.mv_comp[i].sign = (vp8_rac_get_uint(&s->c, 7) << 1) | 1; for (j = 0; j < 10; j++) if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.mv_comp[i].classes[j] = (vp8_rac_get_uint(&s->c, 7) << 1) | 1; if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.mv_comp[i].class0 = (vp8_rac_get_uint(&s->c, 7) << 1) | 1; for (j = 0; j < 10; j++) if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.mv_comp[i].bits[j] = (vp8_rac_get_uint(&s->c, 7) << 1) | 1; } for (i = 0; i < 2; i++) { for (j = 0; j < 2; j++) for (k = 0; k < 3; k++) if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.mv_comp[i].class0_fp[j][k] = (vp8_rac_get_uint(&s->c, 7) << 1) | 1; for (j = 0; j < 3; j++) if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.mv_comp[i].fp[j] = (vp8_rac_get_uint(&s->c, 7) << 1) | 1; } if (s->highprecisionmvs) { for (i = 0; i < 2; i++) { if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.mv_comp[i].class0_hp = (vp8_rac_get_uint(&s->c, 7) << 1) | 1; if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.mv_comp[i].hp = (vp8_rac_get_uint(&s->c, 7) << 1) | 1; } } } return (data2 - data) + size2; }"} {"target": 1, "idx": 7295, "func": "yuv2422_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum PixelFormat target) { const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1], *vbuf0 = vbuf[0], *vbuf1 = vbuf[1]; int i; if (uvalpha < 2048) { for (i = 0; i < (dstW >> 1); i++) { int Y1 = buf0[i * 2] >> 7; int Y2 = buf0[i * 2 + 1] >> 7; int U = ubuf1[i] >> 7; int V = vbuf1[i] >> 7; output_pixels(i * 4, Y1, U, Y2, V); } } else { for (i = 0; i < (dstW >> 1); i++) { int Y1 = buf0[i * 2] >> 7; int Y2 = buf0[i * 2 + 1] >> 7; int U = (ubuf0[i] + ubuf1[i]) >> 8; int V = (vbuf0[i] + vbuf1[i]) >> 8; output_pixels(i * 4, Y1, U, Y2, V); } } }"} {"target": 1, "idx": 7308, "func": "static void local_mapped_file_attr(int dirfd, const char *name, struct stat *stbuf) { FILE *fp; char buf[ATTR_MAX]; int map_dirfd; map_dirfd = openat(dirfd, VIRTFS_META_DIR, O_RDONLY | O_DIRECTORY | O_NOFOLLOW); if (map_dirfd == -1) { return; } fp = local_fopenat(map_dirfd, name, \"r\"); close_preserve_errno(map_dirfd); if (!fp) { return; } memset(buf, 0, ATTR_MAX); while (fgets(buf, ATTR_MAX, fp)) { if (!strncmp(buf, \"virtfs.uid\", 10)) { stbuf->st_uid = atoi(buf+11); } else if (!strncmp(buf, \"virtfs.gid\", 10)) { stbuf->st_gid = atoi(buf+11); } else if (!strncmp(buf, \"virtfs.mode\", 11)) { stbuf->st_mode = atoi(buf+12); } else if (!strncmp(buf, \"virtfs.rdev\", 11)) { stbuf->st_rdev = atoi(buf+12); } memset(buf, 0, ATTR_MAX); } fclose(fp); }"} {"target": 1, "idx": 7309, "func": "pci_e1000_init(PCIBus *bus, NICInfo *nd, int devfn) { E1000State *d; uint8_t *pci_conf; uint16_t checksum = 0; static const char info_str[] = \"e1000\"; int i; d = (E1000State *)pci_register_device(bus, \"e1000\", sizeof(E1000State), devfn, NULL, NULL); pci_conf = d->dev.config; memset(pci_conf, 0, 256); pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_INTEL); pci_config_set_device_id(pci_conf, E1000_DEVID); *(uint16_t *)(pci_conf+0x04) = cpu_to_le16(0x0407); *(uint16_t *)(pci_conf+0x06) = cpu_to_le16(0x0010); pci_conf[0x08] = 0x03; pci_config_set_class(pci_conf, PCI_CLASS_NETWORK_ETHERNET); pci_conf[0x0c] = 0x10; pci_conf[0x3d] = 1; // interrupt pin 0 d->mmio_index = cpu_register_io_memory(0, e1000_mmio_read, e1000_mmio_write, d); pci_register_io_region((PCIDevice *)d, 0, PNPMMIO_SIZE, PCI_ADDRESS_SPACE_MEM, e1000_mmio_map); pci_register_io_region((PCIDevice *)d, 1, IOPORT_SIZE, PCI_ADDRESS_SPACE_IO, ioport_map); memmove(d->eeprom_data, e1000_eeprom_template, sizeof e1000_eeprom_template); for (i = 0; i < 3; i++) d->eeprom_data[i] = (nd->macaddr[2*i+1]<<8) | nd->macaddr[2*i]; for (i = 0; i < EEPROM_CHECKSUM_REG; i++) checksum += d->eeprom_data[i]; checksum = (uint16_t) EEPROM_SUM - checksum; d->eeprom_data[EEPROM_CHECKSUM_REG] = checksum; memset(d->phy_reg, 0, sizeof d->phy_reg); memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init); memset(d->mac_reg, 0, sizeof d->mac_reg); memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init); d->rxbuf_min_shift = 1; memset(&d->tx, 0, sizeof d->tx); d->vc = qemu_new_vlan_client(nd->vlan, nd->model, nd->name, e1000_receive, e1000_can_receive, d); d->vc->link_status_changed = e1000_set_link_status; qemu_format_nic_info_str(d->vc, nd->macaddr); register_savevm(info_str, -1, 2, nic_save, nic_load, d); d->dev.unregister = pci_e1000_uninit; return (PCIDevice *)d; }"} {"target": 0, "idx": 7316, "func": "static void legacy_kbd_event(DeviceState *dev, QemuConsole *src, InputEvent *evt) { QEMUPutKbdEntry *entry = (QEMUPutKbdEntry *)dev; int scancodes[3], i, count; if (!entry || !entry->put_kbd) { return; } count = qemu_input_key_value_to_scancode(evt->key->key, evt->key->down, scancodes); for (i = 0; i < count; i++) { entry->put_kbd(entry->opaque, scancodes[i]); } }"} {"target": 0, "idx": 7322, "func": "PITState *pit_init(int base, qemu_irq irq) { PITState *pit = &pit_state; PITChannelState *s; s = &pit->channels[0]; /* the timer 0 is connected to an IRQ */ s->irq_timer = qemu_new_timer(vm_clock, pit_irq_timer, s); s->irq = irq; vmstate_register(base, &vmstate_pit, pit); qemu_register_reset(pit_reset, pit); register_ioport_write(base, 4, 1, pit_ioport_write, pit); register_ioport_read(base, 3, 1, pit_ioport_read, pit); pit_reset(pit); return pit; }"} {"target": 0, "idx": 7325, "func": "static int qxl_init_common(PCIQXLDevice *qxl) { uint8_t* config = qxl->pci.config; uint32_t pci_device_id; uint32_t pci_device_rev; uint32_t io_size; qxl->mode = QXL_MODE_UNDEFINED; qxl->generation = 1; qxl->num_memslots = NUM_MEMSLOTS; qxl->num_surfaces = NUM_SURFACES; switch (qxl->revision) { case 1: /* spice 0.4 -- qxl-1 */ pci_device_id = QXL_DEVICE_ID_STABLE; pci_device_rev = QXL_REVISION_STABLE_V04; break; case 2: /* spice 0.6 -- qxl-2 */ pci_device_id = QXL_DEVICE_ID_STABLE; pci_device_rev = QXL_REVISION_STABLE_V06; break; default: /* experimental */ pci_device_id = QXL_DEVICE_ID_DEVEL; pci_device_rev = 1; break; } pci_config_set_vendor_id(config, REDHAT_PCI_VENDOR_ID); pci_config_set_device_id(config, pci_device_id); pci_set_byte(&config[PCI_REVISION_ID], pci_device_rev); pci_set_byte(&config[PCI_INTERRUPT_PIN], 1); qxl->rom_size = qxl_rom_size(); qxl->rom_offset = qemu_ram_alloc(&qxl->pci.qdev, \"qxl.vrom\", qxl->rom_size); init_qxl_rom(qxl); init_qxl_ram(qxl); if (qxl->vram_size < 16 * 1024 * 1024) { qxl->vram_size = 16 * 1024 * 1024; } if (qxl->revision == 1) { qxl->vram_size = 4096; } qxl->vram_size = msb_mask(qxl->vram_size * 2 - 1); qxl->vram_offset = qemu_ram_alloc(&qxl->pci.qdev, \"qxl.vram\", qxl->vram_size); io_size = msb_mask(QXL_IO_RANGE_SIZE * 2 - 1); if (qxl->revision == 1) { io_size = 8; } pci_register_bar(&qxl->pci, QXL_IO_RANGE_INDEX, io_size, PCI_BASE_ADDRESS_SPACE_IO, qxl_map); pci_register_bar(&qxl->pci, QXL_ROM_RANGE_INDEX, qxl->rom_size, PCI_BASE_ADDRESS_SPACE_MEMORY, qxl_map); pci_register_bar(&qxl->pci, QXL_RAM_RANGE_INDEX, qxl->vga.vram_size, PCI_BASE_ADDRESS_SPACE_MEMORY, qxl_map); pci_register_bar(&qxl->pci, QXL_VRAM_RANGE_INDEX, qxl->vram_size, PCI_BASE_ADDRESS_SPACE_MEMORY, qxl_map); qxl->ssd.qxl.base.sif = &qxl_interface.base; qxl->ssd.qxl.id = qxl->id; qemu_spice_add_interface(&qxl->ssd.qxl.base); qemu_add_vm_change_state_handler(qxl_vm_change_state_handler, qxl); init_pipe_signaling(qxl); qxl_reset_state(qxl); return 0; }"} {"target": 0, "idx": 7331, "func": "static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc, int mem_index, bool is_ld) { int s_mask = (1 << (opc & MO_SIZE)) - 1; int ofs, a_off; uint64_t tlb_mask; /* For aligned accesses, we check the first byte and include the alignment bits within the address. For unaligned access, we check that we don't cross pages using the address of the last byte of the access. */ if ((opc & MO_AMASK) == MO_ALIGN || s_mask == 0) { a_off = 0; tlb_mask = TARGET_PAGE_MASK | s_mask; } else { a_off = s_mask; tlb_mask = TARGET_PAGE_MASK; } if (facilities & FACILITY_GEN_INST_EXT) { tcg_out_risbg(s, TCG_REG_R2, addr_reg, 64 - CPU_TLB_BITS - CPU_TLB_ENTRY_BITS, 63 - CPU_TLB_ENTRY_BITS, 64 + CPU_TLB_ENTRY_BITS - TARGET_PAGE_BITS, 1); if (a_off) { tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off); tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask); } else { tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask); } } else { tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off); tgen_andi(s, TCG_TYPE_I64, TCG_REG_R2, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask); } if (is_ld) { ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read); } else { ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write); } if (TARGET_LONG_BITS == 32) { tcg_out_mem(s, RX_C, RXY_CY, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs); } else { tcg_out_mem(s, 0, RXY_CG, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs); } ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend); tcg_out_mem(s, 0, RXY_LG, TCG_REG_R2, TCG_REG_R2, TCG_AREG0, ofs); if (TARGET_LONG_BITS == 32) { tgen_ext32u(s, TCG_REG_R3, addr_reg); return TCG_REG_R3; } return addr_reg; }"} {"target": 1, "idx": 7349, "func": "static void unterminated_literal(void) { QObject *obj = qobject_from_json(\"nul\", NULL); g_assert(obj == NULL); }"} {"target": 1, "idx": 7352, "func": "static void decode_sublayer_hrd(HEVCContext *s, int nb_cpb, int subpic_params_present) { GetBitContext *gb = &s->HEVClc.gb; int i; for (i = 0; i < nb_cpb; i++) { get_ue_golomb_long(gb); // bit_rate_value_minus1 get_ue_golomb_long(gb); // cpb_size_value_minus1 if (subpic_params_present) { get_ue_golomb_long(gb); // cpb_size_du_value_minus1 get_ue_golomb_long(gb); // bit_rate_du_value_minus1 } skip_bits1(gb); // cbr_flag } }"} {"target": 1, "idx": 7353, "func": "static inline int onenand_load_main(OneNANDState *s, int sec, int secn, void *dest) { if (s->blk_cur) { return blk_read(s->blk_cur, sec, dest, secn) < 0; } else if (sec + secn > s->secs_cur) { return 1; } memcpy(dest, s->current + (sec << 9), secn << 9); return 0; }"} {"target": 1, "idx": 7355, "func": "static void flush_trace_file(void) { /* If the trace file is not open yet, open it now */ if (!trace_fp) { trace_fp = fopen(trace_file_name, \"w\"); if (!trace_fp) { /* Avoid repeatedly trying to open file on failure */ trace_file_enabled = false; return; } write_header(trace_fp); } if (trace_fp) { size_t unused; /* for when fwrite(3) is declared warn_unused_result */ unused = fwrite(trace_buf, trace_idx * sizeof(trace_buf[0]), 1, trace_fp); } }"} {"target": 0, "idx": 7359, "func": "void helper_fxtoq(CPUSPARCState *env, int64_t src) { /* No possible exceptions converting long long to long double. */ QT0 = int64_to_float128(src, &env->fp_status); }"} {"target": 0, "idx": 7371, "func": "static void v9fs_open(void *opaque) { int flags; int32_t fid; int32_t mode; V9fsQID qid; int iounit = 0; ssize_t err = 0; size_t offset = 7; struct stat stbuf; V9fsFidState *fidp; V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; if (s->proto_version == V9FS_PROTO_2000L) { pdu_unmarshal(pdu, offset, \"dd\", &fid, &mode); } else { pdu_unmarshal(pdu, offset, \"db\", &fid, &mode); } trace_v9fs_open(pdu->tag, pdu->id, fid, mode); fidp = get_fid(pdu, fid); if (fidp == NULL) { err = -ENOENT; goto out_nofid; } BUG_ON(fidp->fid_type != P9_FID_NONE); err = v9fs_co_lstat(pdu, &fidp->path, &stbuf); if (err < 0) { goto out; } stat_to_qid(&stbuf, &qid); if (S_ISDIR(stbuf.st_mode)) { err = v9fs_co_opendir(pdu, fidp); if (err < 0) { goto out; } fidp->fid_type = P9_FID_DIR; offset += pdu_marshal(pdu, offset, \"Qd\", &qid, 0); err = offset; } else { if (s->proto_version == V9FS_PROTO_2000L) { flags = get_dotl_openflags(s, mode); } else { flags = omode_to_uflags(mode); } if (is_ro_export(&s->ctx)) { if (mode & O_WRONLY || mode & O_RDWR || mode & O_APPEND || mode & O_TRUNC) { err = -EROFS; goto out; } flags |= O_NOATIME; } err = v9fs_co_open(pdu, fidp, flags); if (err < 0) { goto out; } fidp->fid_type = P9_FID_FILE; fidp->open_flags = flags; if (flags & O_EXCL) { /* * We let the host file system do O_EXCL check * We should not reclaim such fd */ fidp->flags |= FID_NON_RECLAIMABLE; } iounit = get_iounit(pdu, &fidp->path); offset += pdu_marshal(pdu, offset, \"Qd\", &qid, iounit); err = offset; } trace_v9fs_open_return(pdu->tag, pdu->id, qid.type, qid.version, qid.path, iounit); out: put_fid(pdu, fidp); out_nofid: complete_pdu(s, pdu, err); }"} {"target": 0, "idx": 7377, "func": "static int pci_piix3_ide_initfn(PCIDevice *dev) { PCIIDEState *d = DO_UPCAST(PCIIDEState, dev, dev); pci_config_set_vendor_id(d->dev.config, PCI_VENDOR_ID_INTEL); pci_config_set_device_id(d->dev.config, PCI_DEVICE_ID_INTEL_82371SB_1); return pci_piix_ide_initfn(d); }"} {"target": 0, "idx": 7378, "func": "static abi_long do_getpeername(int fd, abi_ulong target_addr, abi_ulong target_addrlen_addr) { socklen_t addrlen; void *addr; abi_long ret; if (get_user_u32(addrlen, target_addrlen_addr)) return -TARGET_EFAULT; if (addrlen < 0 || addrlen > MAX_SOCK_ADDR) return -TARGET_EINVAL; addr = alloca(addrlen); ret = get_errno(getpeername(fd, addr, &addrlen)); if (!is_error(ret)) { host_to_target_sockaddr(target_addr, addr, addrlen); if (put_user_u32(addrlen, target_addrlen_addr)) ret = -TARGET_EFAULT; } return ret; }"} {"target": 0, "idx": 7383, "func": "void ppc_hash64_set_external_hpt(PowerPCCPU *cpu, void *hpt, int shift, Error **errp) { CPUPPCState *env = &cpu->env; Error *local_err = NULL; cpu_synchronize_state(CPU(cpu)); env->external_htab = hpt; ppc_hash64_set_sdr1(cpu, (target_ulong)(uintptr_t)hpt | (shift - 18), &local_err); if (local_err) { error_propagate(errp, local_err); return; } /* Not strictly necessary, but makes it clearer that an external * htab is in use when debugging */ env->htab_base = -1; if (kvm_enabled()) { if (kvmppc_put_books_sregs(cpu) < 0) { error_setg(errp, \"Unable to update SDR1 in KVM\"); } } }"} {"target": 1, "idx": 7394, "func": "void numa_set_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node) { struct numa_addr_range *range = g_malloc0(sizeof(*range)); /* * Memory-less nodes can come here with 0 size in which case, * there is nothing to do. */ if (!size) { return; } range->mem_start = addr; range->mem_end = addr + size - 1; QLIST_INSERT_HEAD(&numa_info[node].addr, range, entry); }"} {"target": 1, "idx": 7400, "func": "static inline int cris_bound_d(int v, int b) { int r = v; asm (\"bound.d\\t%1, %0\\n\" : \"+r\" (r) : \"ri\" (b)); return r; }"} {"target": 0, "idx": 7406, "func": "static void mpeg4_encode_visual_object_header(MpegEncContext * s){ int profile_and_level_indication; int vo_ver_id; if(s->max_b_frames || s->quarter_sample){ profile_and_level_indication= 0xF1; // adv simple level 1 vo_ver_id= 5; }else{ profile_and_level_indication= 0x01; // simple level 1 vo_ver_id= 1; } //FIXME levels put_bits(&s->pb, 16, 0); put_bits(&s->pb, 16, VOS_STARTCODE); put_bits(&s->pb, 8, profile_and_level_indication); put_bits(&s->pb, 16, 0); put_bits(&s->pb, 16, VISUAL_OBJ_STARTCODE); put_bits(&s->pb, 1, 1); put_bits(&s->pb, 4, vo_ver_id); put_bits(&s->pb, 3, 1); //priority put_bits(&s->pb, 4, 1); //visual obj type== video obj put_bits(&s->pb, 1, 0); //video signal type == no clue //FIXME ff_mpeg4_stuffing(&s->pb); }"} {"target": 0, "idx": 7434, "func": "static void add_entry1(TiffEncoderContext *s, enum TiffTags tag, enum TiffTypes type, int val) { uint16_t w = val; uint32_t dw = val; add_entry(s, tag, type, 1, type == TIFF_SHORT ? (void *)&w : (void *)&dw); }"} {"target": 0, "idx": 7446, "func": "static const char *pxb_host_root_bus_path(PCIHostState *host_bridge, PCIBus *rootbus) { PXBBus *bus = PXB_BUS(rootbus); snprintf(bus->bus_path, 8, \"0000:%02x\", pxb_bus_num(rootbus)); return bus->bus_path; }"} {"target": 0, "idx": 7450, "func": "int qemu_strtou64(const char *nptr, const char **endptr, int base, uint64_t *result) { char *p; int err = 0; if (!nptr) { if (endptr) { *endptr = nptr; } err = -EINVAL; } else { errno = 0; /* FIXME This assumes uint64_t is unsigned long long */ *result = strtoull(nptr, &p, base); /* Windows returns 1 for negative out-of-range values. */ if (errno == ERANGE) { *result = -1; } err = check_strtox_error(nptr, p, endptr, errno); } return err; }"} {"target": 0, "idx": 7469, "func": "int ff_hevc_decode_nal_sps(HEVCContext *s) { const AVPixFmtDescriptor *desc; GetBitContext *gb = &s->HEVClc->gb; int ret = 0; int sps_id = 0; int log2_diff_max_min_transform_block_size; int bit_depth_chroma, start, vui_present, sublayer_ordering_info; int i; HEVCSPS *sps; AVBufferRef *sps_buf = av_buffer_allocz(sizeof(*sps)); if (!sps_buf) return AVERROR(ENOMEM); sps = (HEVCSPS*)sps_buf->data; av_log(s->avctx, AV_LOG_DEBUG, \"Decoding SPS\\n\"); // Coded parameters sps->vps_id = get_bits(gb, 4); if (sps->vps_id >= MAX_VPS_COUNT) { av_log(s->avctx, AV_LOG_ERROR, \"VPS id out of range: %d\\n\", sps->vps_id); ret = AVERROR_INVALIDDATA; goto err; } if (!s->vps_list[sps->vps_id]) { av_log(s->avctx, AV_LOG_ERROR, \"VPS does not exist \\n\"); ret = AVERROR_INVALIDDATA; goto err; } sps->max_sub_layers = get_bits(gb, 3) + 1; if (sps->max_sub_layers > MAX_SUB_LAYERS) { av_log(s->avctx, AV_LOG_ERROR, \"sps_max_sub_layers out of range: %d\\n\", sps->max_sub_layers); ret = AVERROR_INVALIDDATA; goto err; } skip_bits1(gb); // temporal_id_nesting_flag parse_ptl(s, &sps->ptl, sps->max_sub_layers); sps_id = get_ue_golomb_long(gb); if (sps_id >= MAX_SPS_COUNT) { av_log(s->avctx, AV_LOG_ERROR, \"SPS id out of range: %d\\n\", sps_id); ret = AVERROR_INVALIDDATA; goto err; } sps->chroma_format_idc = get_ue_golomb_long(gb); if (sps->chroma_format_idc != 1) { avpriv_report_missing_feature(s->avctx, \"chroma_format_idc != 1\\n\"); ret = AVERROR_PATCHWELCOME; goto err; } if (sps->chroma_format_idc == 3) sps->separate_colour_plane_flag = get_bits1(gb); sps->width = get_ue_golomb_long(gb); sps->height = get_ue_golomb_long(gb); if ((ret = av_image_check_size(sps->width, sps->height, 0, s->avctx)) < 0) goto err; if (get_bits1(gb)) { // pic_conformance_flag //TODO: * 2 is only valid for 420 sps->pic_conf_win.left_offset = get_ue_golomb_long(gb) * 2; sps->pic_conf_win.right_offset = get_ue_golomb_long(gb) * 2; sps->pic_conf_win.top_offset = get_ue_golomb_long(gb) * 2; sps->pic_conf_win.bottom_offset = get_ue_golomb_long(gb) * 2; if (s->avctx->flags2 & CODEC_FLAG2_IGNORE_CROP) { av_log(s->avctx, AV_LOG_DEBUG, \"discarding sps conformance window, \" \"original values are l:%u r:%u t:%u b:%u\\n\", sps->pic_conf_win.left_offset, sps->pic_conf_win.right_offset, sps->pic_conf_win.top_offset, sps->pic_conf_win.bottom_offset); sps->pic_conf_win.left_offset = sps->pic_conf_win.right_offset = sps->pic_conf_win.top_offset = sps->pic_conf_win.bottom_offset = 0; } sps->output_window = sps->pic_conf_win; } sps->bit_depth = get_ue_golomb_long(gb) + 8; bit_depth_chroma = get_ue_golomb_long(gb) + 8; if (bit_depth_chroma != sps->bit_depth) { av_log(s->avctx, AV_LOG_ERROR, \"Luma bit depth (%d) is different from chroma bit depth (%d), \" \"this is unsupported.\\n\", sps->bit_depth, bit_depth_chroma); ret = AVERROR_INVALIDDATA; goto err; } if (sps->chroma_format_idc == 1) { switch (sps->bit_depth) { case 8: sps->pix_fmt = AV_PIX_FMT_YUV420P; break; case 9: sps->pix_fmt = AV_PIX_FMT_YUV420P9; break; case 10: sps->pix_fmt = AV_PIX_FMT_YUV420P10; break; default: av_log(s->avctx, AV_LOG_ERROR, \"Unsupported bit depth: %d\\n\", sps->bit_depth); ret = AVERROR_PATCHWELCOME; goto err; } } else { av_log(s->avctx, AV_LOG_ERROR, \"non-4:2:0 support is currently unspecified.\\n\"); return AVERROR_PATCHWELCOME; } desc = av_pix_fmt_desc_get(sps->pix_fmt); if (!desc) { ret = AVERROR(EINVAL); goto err; } sps->hshift[0] = sps->vshift[0] = 0; sps->hshift[2] = sps->hshift[1] = desc->log2_chroma_w; sps->vshift[2] = sps->vshift[1] = desc->log2_chroma_h; sps->pixel_shift = sps->bit_depth > 8; sps->log2_max_poc_lsb = get_ue_golomb_long(gb) + 4; if (sps->log2_max_poc_lsb > 16) { av_log(s->avctx, AV_LOG_ERROR, \"log2_max_pic_order_cnt_lsb_minus4 out range: %d\\n\", sps->log2_max_poc_lsb - 4); ret = AVERROR_INVALIDDATA; goto err; } sublayer_ordering_info = get_bits1(gb); start = sublayer_ordering_info ? 0 : sps->max_sub_layers - 1; for (i = start; i < sps->max_sub_layers; i++) { sps->temporal_layer[i].max_dec_pic_buffering = get_ue_golomb_long(gb) + 1; sps->temporal_layer[i].num_reorder_pics = get_ue_golomb_long(gb); sps->temporal_layer[i].max_latency_increase = get_ue_golomb_long(gb) - 1; if (sps->temporal_layer[i].max_dec_pic_buffering > MAX_DPB_SIZE) { av_log(s->avctx, AV_LOG_ERROR, \"sps_max_dec_pic_buffering_minus1 out of range: %d\\n\", sps->temporal_layer[i].max_dec_pic_buffering - 1); ret = AVERROR_INVALIDDATA; goto err; } if (sps->temporal_layer[i].num_reorder_pics > sps->temporal_layer[i].max_dec_pic_buffering - 1) { av_log(s->avctx, AV_LOG_ERROR, \"sps_max_num_reorder_pics out of range: %d\\n\", sps->temporal_layer[i].num_reorder_pics); ret = AVERROR_INVALIDDATA; goto err; } } if (!sublayer_ordering_info) { for (i = 0; i < start; i++) { sps->temporal_layer[i].max_dec_pic_buffering = sps->temporal_layer[start].max_dec_pic_buffering; sps->temporal_layer[i].num_reorder_pics = sps->temporal_layer[start].num_reorder_pics; sps->temporal_layer[i].max_latency_increase = sps->temporal_layer[start].max_latency_increase; } } sps->log2_min_cb_size = get_ue_golomb_long(gb) + 3; sps->log2_diff_max_min_coding_block_size = get_ue_golomb_long(gb); sps->log2_min_tb_size = get_ue_golomb_long(gb) + 2; log2_diff_max_min_transform_block_size = get_ue_golomb_long(gb); sps->log2_max_trafo_size = log2_diff_max_min_transform_block_size + sps->log2_min_tb_size; if (sps->log2_min_tb_size >= sps->log2_min_cb_size) { av_log(s->avctx, AV_LOG_ERROR, \"Invalid value for log2_min_tb_size\"); ret = AVERROR_INVALIDDATA; goto err; } sps->max_transform_hierarchy_depth_inter = get_ue_golomb_long(gb); sps->max_transform_hierarchy_depth_intra = get_ue_golomb_long(gb); sps->scaling_list_enable_flag = get_bits1(gb); if (sps->scaling_list_enable_flag) { set_default_scaling_list_data(&sps->scaling_list); if (get_bits1(gb)) { ret = scaling_list_data(s, &sps->scaling_list); if (ret < 0) goto err; } } sps->amp_enabled_flag = get_bits1(gb); sps->sao_enabled = get_bits1(gb); sps->pcm_enabled_flag = get_bits1(gb); if (sps->pcm_enabled_flag) { sps->pcm.bit_depth = get_bits(gb, 4) + 1; sps->pcm.bit_depth_chroma = get_bits(gb, 4) + 1; sps->pcm.log2_min_pcm_cb_size = get_ue_golomb_long(gb) + 3; sps->pcm.log2_max_pcm_cb_size = sps->pcm.log2_min_pcm_cb_size + get_ue_golomb_long(gb); if (sps->pcm.bit_depth > sps->bit_depth) { av_log(s->avctx, AV_LOG_ERROR, \"PCM bit depth (%d) is greater than normal bit depth (%d)\\n\", sps->pcm.bit_depth, sps->bit_depth); ret = AVERROR_INVALIDDATA; goto err; } sps->pcm.loop_filter_disable_flag = get_bits1(gb); } sps->nb_st_rps = get_ue_golomb_long(gb); if (sps->nb_st_rps > MAX_SHORT_TERM_RPS_COUNT) { av_log(s->avctx, AV_LOG_ERROR, \"Too many short term RPS: %d.\\n\", sps->nb_st_rps); ret = AVERROR_INVALIDDATA; goto err; } for (i = 0; i < sps->nb_st_rps; i++) { if ((ret = ff_hevc_decode_short_term_rps(s, &sps->st_rps[i], sps, 0)) < 0) goto err; } sps->long_term_ref_pics_present_flag = get_bits1(gb); if (sps->long_term_ref_pics_present_flag) { sps->num_long_term_ref_pics_sps = get_ue_golomb_long(gb); for (i = 0; i < sps->num_long_term_ref_pics_sps; i++) { sps->lt_ref_pic_poc_lsb_sps[i] = get_bits(gb, sps->log2_max_poc_lsb); sps->used_by_curr_pic_lt_sps_flag[i] = get_bits1(gb); } } sps->sps_temporal_mvp_enabled_flag = get_bits1(gb); sps->sps_strong_intra_smoothing_enable_flag = get_bits1(gb); sps->vui.sar = (AVRational){0, 1}; vui_present = get_bits1(gb); if (vui_present) decode_vui(s, sps); skip_bits1(gb); // sps_extension_flag if (s->apply_defdispwin) { sps->output_window.left_offset += sps->vui.def_disp_win.left_offset; sps->output_window.right_offset += sps->vui.def_disp_win.right_offset; sps->output_window.top_offset += sps->vui.def_disp_win.top_offset; sps->output_window.bottom_offset += sps->vui.def_disp_win.bottom_offset; } if (sps->output_window.left_offset & (0x1F >> (sps->pixel_shift)) && !(s->avctx->flags & CODEC_FLAG_UNALIGNED)) { sps->output_window.left_offset &= ~(0x1F >> (sps->pixel_shift)); av_log(s->avctx, AV_LOG_WARNING, \"Reducing left output window to %d \" \"chroma samples to preserve alignment.\\n\", sps->output_window.left_offset); } sps->output_width = sps->width - (sps->output_window.left_offset + sps->output_window.right_offset); sps->output_height = sps->height - (sps->output_window.top_offset + sps->output_window.bottom_offset); if (sps->output_width <= 0 || sps->output_height <= 0) { av_log(s->avctx, AV_LOG_WARNING, \"Invalid visible frame dimensions: %dx%d.\\n\", sps->output_width, sps->output_height); if (s->avctx->err_recognition & AV_EF_EXPLODE) { ret = AVERROR_INVALIDDATA; goto err; } av_log(s->avctx, AV_LOG_WARNING, \"Displaying the whole video surface.\\n\"); sps->pic_conf_win.left_offset = sps->pic_conf_win.right_offset = sps->pic_conf_win.top_offset = sps->pic_conf_win.bottom_offset = 0; sps->output_width = sps->width; sps->output_height = sps->height; } // Inferred parameters sps->log2_ctb_size = sps->log2_min_cb_size + sps->log2_diff_max_min_coding_block_size; sps->log2_min_pu_size = sps->log2_min_cb_size - 1; sps->ctb_width = (sps->width + (1 << sps->log2_ctb_size) - 1) >> sps->log2_ctb_size; sps->ctb_height = (sps->height + (1 << sps->log2_ctb_size) - 1) >> sps->log2_ctb_size; sps->ctb_size = sps->ctb_width * sps->ctb_height; sps->min_cb_width = sps->width >> sps->log2_min_cb_size; sps->min_cb_height = sps->height >> sps->log2_min_cb_size; sps->min_tb_width = sps->width >> sps->log2_min_tb_size; sps->min_tb_height = sps->height >> sps->log2_min_tb_size; sps->min_pu_width = sps->width >> sps->log2_min_pu_size; sps->min_pu_height = sps->height >> sps->log2_min_pu_size; sps->qp_bd_offset = 6 * (sps->bit_depth - 8); if (sps->width & ((1 << sps->log2_min_cb_size) - 1) || sps->height & ((1 << sps->log2_min_cb_size) - 1)) { av_log(s->avctx, AV_LOG_ERROR, \"Invalid coded frame dimensions.\\n\"); goto err; } if (sps->log2_ctb_size > MAX_LOG2_CTB_SIZE) { av_log(s->avctx, AV_LOG_ERROR, \"CTB size out of range: 2^%d\\n\", sps->log2_ctb_size); goto err; } if (sps->max_transform_hierarchy_depth_inter > sps->log2_ctb_size - sps->log2_min_tb_size) { av_log(s->avctx, AV_LOG_ERROR, \"max_transform_hierarchy_depth_inter out of range: %d\\n\", sps->max_transform_hierarchy_depth_inter); goto err; } if (sps->max_transform_hierarchy_depth_intra > sps->log2_ctb_size - sps->log2_min_tb_size) { av_log(s->avctx, AV_LOG_ERROR, \"max_transform_hierarchy_depth_intra out of range: %d\\n\", sps->max_transform_hierarchy_depth_intra); goto err; } if (sps->log2_max_trafo_size > FFMIN(sps->log2_ctb_size, 5)) { av_log(s->avctx, AV_LOG_ERROR, \"max transform block size out of range: %d\\n\", sps->log2_max_trafo_size); goto err; } if (s->avctx->debug & FF_DEBUG_BITSTREAM) { av_log(s->avctx, AV_LOG_DEBUG, \"Parsed SPS: id %d; coded wxh: %dx%d; \" \"cropped wxh: %dx%d; pix_fmt: %s.\\n\", sps_id, sps->width, sps->height, sps->output_width, sps->output_height, av_get_pix_fmt_name(sps->pix_fmt)); } /* check if this is a repeat of an already parsed SPS, then keep the * original one. * otherwise drop all PPSes that depend on it */ if (s->sps_list[sps_id] && !memcmp(s->sps_list[sps_id]->data, sps_buf->data, sps_buf->size)) { av_buffer_unref(&sps_buf); } else { for (i = 0; i < FF_ARRAY_ELEMS(s->pps_list); i++) { if (s->pps_list[i] && ((HEVCPPS*)s->pps_list[i]->data)->sps_id == sps_id) av_buffer_unref(&s->pps_list[i]); } av_buffer_unref(&s->sps_list[sps_id]); s->sps_list[sps_id] = sps_buf; } return 0; err: av_buffer_unref(&sps_buf); return ret; }"} {"target": 0, "idx": 7473, "func": "static void do_audio_out(AVFormatContext *s, OutputStream *ost, InputStream *ist, AVFrame *decoded_frame) { uint8_t *buftmp; int size_out, frame_bytes, resample_changed; AVCodecContext *enc = ost->st->codec; AVCodecContext *dec = ist->st->codec; int osize = av_get_bytes_per_sample(enc->sample_fmt); int isize = av_get_bytes_per_sample(dec->sample_fmt); uint8_t *buf = decoded_frame->data[0]; int size = decoded_frame->nb_samples * dec->channels * isize; if (alloc_audio_output_buf(dec, enc, decoded_frame->nb_samples) < 0) { av_log(NULL, AV_LOG_FATAL, \"Error allocating audio buffer\\n\"); exit_program(1); } if (enc->channels != dec->channels || enc->sample_rate != dec->sample_rate) ost->audio_resample = 1; resample_changed = ost->resample_sample_fmt != dec->sample_fmt || ost->resample_channels != dec->channels || ost->resample_sample_rate != dec->sample_rate; if ((ost->audio_resample && !ost->resample) || resample_changed) { if (resample_changed) { av_log(NULL, AV_LOG_INFO, \"Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d to rate:%d fmt:%s ch:%d\\n\", ist->file_index, ist->st->index, ost->resample_sample_rate, av_get_sample_fmt_name(ost->resample_sample_fmt), ost->resample_channels, dec->sample_rate, av_get_sample_fmt_name(dec->sample_fmt), dec->channels); ost->resample_sample_fmt = dec->sample_fmt; ost->resample_channels = dec->channels; ost->resample_sample_rate = dec->sample_rate; if (ost->resample) audio_resample_close(ost->resample); } /* if audio_sync_method is >1 the resampler is needed for audio drift compensation */ if (audio_sync_method <= 1 && ost->resample_sample_fmt == enc->sample_fmt && ost->resample_channels == enc->channels && ost->resample_sample_rate == enc->sample_rate) { ost->resample = NULL; ost->audio_resample = 0; } else if (ost->audio_resample) { if (dec->sample_fmt != AV_SAMPLE_FMT_S16) av_log(NULL, AV_LOG_WARNING, \"Using s16 intermediate sample format for resampling\\n\"); ost->resample = av_audio_resample_init(enc->channels, dec->channels, enc->sample_rate, dec->sample_rate, enc->sample_fmt, dec->sample_fmt, 16, 10, 0, 0.8); if (!ost->resample) { av_log(NULL, AV_LOG_FATAL, \"Can not resample %d channels @ %d Hz to %d channels @ %d Hz\\n\", dec->channels, dec->sample_rate, enc->channels, enc->sample_rate); exit_program(1); } } } #define MAKE_SFMT_PAIR(a,b) ((a)+AV_SAMPLE_FMT_NB*(b)) if (!ost->audio_resample && dec->sample_fmt != enc->sample_fmt && MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt) != ost->reformat_pair) { if (ost->reformat_ctx) av_audio_convert_free(ost->reformat_ctx); ost->reformat_ctx = av_audio_convert_alloc(enc->sample_fmt, 1, dec->sample_fmt, 1, NULL, 0); if (!ost->reformat_ctx) { av_log(NULL, AV_LOG_FATAL, \"Cannot convert %s sample format to %s sample format\\n\", av_get_sample_fmt_name(dec->sample_fmt), av_get_sample_fmt_name(enc->sample_fmt)); exit_program(1); } ost->reformat_pair = MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt); } if (audio_sync_method) { double delta = get_sync_ipts(ost, ist->last_dts) * enc->sample_rate - ost->sync_opts - av_fifo_size(ost->fifo) / (enc->channels * osize); int idelta = delta * dec->sample_rate / enc->sample_rate; int byte_delta = idelta * isize * dec->channels; // FIXME resample delay if (fabs(delta) > 50) { if (ist->is_start || fabs(delta) > audio_drift_threshold*enc->sample_rate) { if (byte_delta < 0) { byte_delta = FFMAX(byte_delta, -size); size += byte_delta; buf -= byte_delta; av_log(NULL, AV_LOG_VERBOSE, \"discarding %d audio samples\\n\", -byte_delta / (isize * dec->channels)); if (!size) return; ist->is_start = 0; } else { av_fast_malloc(&async_buf, &allocated_async_buf_size, byte_delta + size); if (!async_buf) { av_log(NULL, AV_LOG_FATAL, \"Out of memory in do_audio_out\\n\"); exit_program(1); } if (alloc_audio_output_buf(dec, enc, decoded_frame->nb_samples + idelta) < 0) { av_log(NULL, AV_LOG_FATAL, \"Error allocating audio buffer\\n\"); exit_program(1); } ist->is_start = 0; generate_silence(async_buf, dec->sample_fmt, byte_delta); memcpy(async_buf + byte_delta, buf, size); buf = async_buf; size += byte_delta; av_log(NULL, AV_LOG_VERBOSE, \"adding %d audio samples of silence\\n\", idelta); } } else if (audio_sync_method > 1) { int comp = av_clip(delta, -audio_sync_method, audio_sync_method); av_assert0(ost->audio_resample); av_log(NULL, AV_LOG_VERBOSE, \"compensating audio timestamp drift:%f compensation:%d in:%d\\n\", delta, comp, enc->sample_rate); // fprintf(stderr, \"drift:%f len:%d opts:%\"PRId64\" ipts:%\"PRId64\" fifo:%d\\n\", delta, -1, ost->sync_opts, (int64_t)(get_sync_ipts(ost) * enc->sample_rate), av_fifo_size(ost->fifo)/(ost->st->codec->channels * 2)); av_resample_compensate(*(struct AVResampleContext**)ost->resample, comp, enc->sample_rate); } } } else ost->sync_opts = lrintf(get_sync_ipts(ost, ist->last_dts) * enc->sample_rate) - av_fifo_size(ost->fifo) / (enc->channels * osize); // FIXME wrong if (ost->audio_resample) { buftmp = audio_buf; size_out = audio_resample(ost->resample, (short *)buftmp, (short *)buf, size / (dec->channels * isize)); size_out = size_out * enc->channels * osize; } else { buftmp = buf; size_out = size; } if (!ost->audio_resample && dec->sample_fmt != enc->sample_fmt) { const void *ibuf[6] = { buftmp }; void *obuf[6] = { audio_buf }; int istride[6] = { isize }; int ostride[6] = { osize }; int len = size_out / istride[0]; if (av_audio_convert(ost->reformat_ctx, obuf, ostride, ibuf, istride, len) < 0) { printf(\"av_audio_convert() failed\\n\"); if (exit_on_error) exit_program(1); return; } buftmp = audio_buf; size_out = len * osize; } /* now encode as many frames as possible */ if (!(enc->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) { /* output resampled raw samples */ if (av_fifo_realloc2(ost->fifo, av_fifo_size(ost->fifo) + size_out) < 0) { av_log(NULL, AV_LOG_FATAL, \"av_fifo_realloc2() failed\\n\"); exit_program(1); } av_fifo_generic_write(ost->fifo, buftmp, size_out, NULL); frame_bytes = enc->frame_size * osize * enc->channels; while (av_fifo_size(ost->fifo) >= frame_bytes) { av_fifo_generic_read(ost->fifo, audio_buf, frame_bytes, NULL); encode_audio_frame(s, ost, audio_buf, frame_bytes); } } else { encode_audio_frame(s, ost, buftmp, size_out); } }"} {"target": 0, "idx": 7484, "func": "void v9fs_string_init(V9fsString *str) { str->data = NULL; str->size = 0; }"} {"target": 0, "idx": 7487, "func": "static void do_spawn_thread(ThreadPool *pool) { QemuThread t; /* Runs with lock taken. */ if (!pool->new_threads) { return; } pool->new_threads--; pool->pending_threads++; qemu_thread_create(&t, \"worker\", worker_thread, pool, QEMU_THREAD_DETACHED); }"} {"target": 0, "idx": 7496, "func": "icmp_input(struct mbuf *m, int hlen) { register struct icmp *icp; register struct ip *ip=mtod(m, struct ip *); int icmplen=ip->ip_len; Slirp *slirp = m->slirp; DEBUG_CALL(\"icmp_input\"); DEBUG_ARG(\"m = %p\", m); DEBUG_ARG(\"m_len = %d\", m->m_len); /* * Locate icmp structure in mbuf, and check * that its not corrupted and of at least minimum length. */ if (icmplen < ICMP_MINLEN) { /* min 8 bytes payload */ freeit: m_free(m); goto end_error; } m->m_len -= hlen; m->m_data += hlen; icp = mtod(m, struct icmp *); if (cksum(m, icmplen)) { goto freeit; } m->m_len += hlen; m->m_data -= hlen; DEBUG_ARG(\"icmp_type = %d\", icp->icmp_type); switch (icp->icmp_type) { case ICMP_ECHO: ip->ip_len += hlen; /* since ip_input subtracts this */ if (ip->ip_dst.s_addr == slirp->vhost_addr.s_addr) { icmp_reflect(m); } else if (slirp->restricted) { goto freeit; } else { struct socket *so; struct sockaddr_in addr; if ((so = socreate(slirp)) == NULL) goto freeit; if (icmp_send(so, m, hlen) == 0) { return; } if(udp_attach(so) == -1) { DEBUG_MISC((dfd,\"icmp_input udp_attach errno = %d-%s\\n\", errno,strerror(errno))); sofree(so); m_free(m); goto end_error; } so->so_m = m; so->so_ffamily = AF_INET; so->so_faddr = ip->ip_dst; so->so_fport = htons(7); so->so_lfamily = AF_INET; so->so_laddr = ip->ip_src; so->so_lport = htons(9); so->so_iptos = ip->ip_tos; so->so_type = IPPROTO_ICMP; so->so_state = SS_ISFCONNECTED; /* Send the packet */ addr.sin_family = AF_INET; if ((so->so_faddr.s_addr & slirp->vnetwork_mask.s_addr) == slirp->vnetwork_addr.s_addr) { /* It's an alias */ if (so->so_faddr.s_addr == slirp->vnameserver_addr.s_addr) { if (get_dns_addr(&addr.sin_addr) < 0) addr.sin_addr = loopback_addr; } else { addr.sin_addr = loopback_addr; } } else { addr.sin_addr = so->so_faddr; } addr.sin_port = so->so_fport; if(sendto(so->s, icmp_ping_msg, strlen(icmp_ping_msg), 0, (struct sockaddr *)&addr, sizeof(addr)) == -1) { DEBUG_MISC((dfd,\"icmp_input udp sendto tx errno = %d-%s\\n\", errno,strerror(errno))); icmp_error(m, ICMP_UNREACH,ICMP_UNREACH_NET, 0,strerror(errno)); udp_detach(so); } } /* if ip->ip_dst.s_addr == alias_addr.s_addr */ break; case ICMP_UNREACH: /* XXX? report error? close socket? */ case ICMP_TIMXCEED: case ICMP_PARAMPROB: case ICMP_SOURCEQUENCH: case ICMP_TSTAMP: case ICMP_MASKREQ: case ICMP_REDIRECT: m_free(m); break; default: m_free(m); } /* swith */ end_error: /* m is m_free()'d xor put in a socket xor or given to ip_send */ return; }"} {"target": 0, "idx": 7502, "func": "static void virtio_net_tx_complete(NetClientState *nc, ssize_t len) { VirtIONet *n = qemu_get_nic_opaque(nc); VirtIONetQueue *q = virtio_net_get_subqueue(nc); VirtIODevice *vdev = VIRTIO_DEVICE(n); virtqueue_push(q->tx_vq, &q->async_tx.elem, 0); virtio_notify(vdev, q->tx_vq); q->async_tx.elem.out_num = 0; virtio_queue_set_notification(q->tx_vq, 1); virtio_net_flush_tx(q); }"} {"target": 0, "idx": 7511, "func": "static int decode_cell(Indeo3DecodeContext *ctx, AVCodecContext *avctx, Plane *plane, Cell *cell, const uint8_t *data_ptr, const uint8_t *last_ptr) { int x, mv_x, mv_y, mode, vq_index, prim_indx, second_indx; int zoom_fac; int offset, error = 0, swap_quads[2]; uint8_t code, *block, *ref_block = 0; const vqEntry *delta[2]; const uint8_t *data_start = data_ptr; /* get coding mode and VQ table index from the VQ descriptor byte */ code = *data_ptr++; mode = code >> 4; vq_index = code & 0xF; /* setup output and reference pointers */ offset = (cell->ypos << 2) * plane->pitch + (cell->xpos << 2); block = plane->pixels[ctx->buf_sel] + offset; if (!cell->mv_ptr) { /* use previous line as reference for INTRA cells */ ref_block = block - plane->pitch; } else if (mode >= 10) { /* for mode 10 and 11 INTER first copy the predicted cell into the current one */ /* so we don't need to do data copying for each RLE code later */ copy_cell(ctx, plane, cell); } else { /* set the pointer to the reference pixels for modes 0-4 INTER */ mv_y = cell->mv_ptr[0]; mv_x = cell->mv_ptr[1]; if ( mv_x + 4*cell->xpos < 0 || mv_y + 4*cell->ypos < 0 || mv_x + 4*cell->xpos + 4*cell->width > plane->width || mv_y + 4*cell->ypos + 4*cell->height > plane->height) { av_log(avctx, AV_LOG_ERROR, \"motion vector %d %d outside reference\\n\", mv_x + 4*cell->xpos, mv_y + 4*cell->ypos); return AVERROR_INVALIDDATA; } offset += mv_y * plane->pitch + mv_x; ref_block = plane->pixels[ctx->buf_sel ^ 1] + offset; } /* select VQ tables as follows: */ /* modes 0 and 3 use only the primary table for all lines in a block */ /* while modes 1 and 4 switch between primary and secondary tables on alternate lines */ if (mode == 1 || mode == 4) { code = ctx->alt_quant[vq_index]; prim_indx = (code >> 4) + ctx->cb_offset; second_indx = (code & 0xF) + ctx->cb_offset; } else { vq_index += ctx->cb_offset; prim_indx = second_indx = vq_index; } if (prim_indx >= 24 || second_indx >= 24) { av_log(avctx, AV_LOG_ERROR, \"Invalid VQ table indexes! Primary: %d, secondary: %d!\\n\", prim_indx, second_indx); return AVERROR_INVALIDDATA; } delta[0] = &vq_tab[second_indx]; delta[1] = &vq_tab[prim_indx]; swap_quads[0] = second_indx >= 16; swap_quads[1] = prim_indx >= 16; /* requantize the prediction if VQ index of this cell differs from VQ index */ /* of the predicted cell in order to avoid overflows. */ if (vq_index >= 8 && ref_block) { for (x = 0; x < cell->width << 2; x++) ref_block[x] = requant_tab[vq_index & 7][ref_block[x] & 127]; } error = IV3_NOERR; switch (mode) { case 0: /*------------------ MODES 0 & 1 (4x4 block processing) --------------------*/ case 1: case 3: /*------------------ MODES 3 & 4 (4x8 block processing) --------------------*/ case 4: if (mode >= 3 && cell->mv_ptr) { av_log(avctx, AV_LOG_ERROR, \"Attempt to apply Mode 3/4 to an INTER cell!\\n\"); return AVERROR_INVALIDDATA; } zoom_fac = mode >= 3; error = decode_cell_data(cell, block, ref_block, plane->pitch, 0, zoom_fac, mode, delta, swap_quads, &data_ptr, last_ptr); break; case 10: /*-------------------- MODE 10 (8x8 block processing) ---------------------*/ case 11: /*----------------- MODE 11 (4x8 INTER block processing) ------------------*/ if (mode == 10 && !cell->mv_ptr) { /* MODE 10 INTRA processing */ error = decode_cell_data(cell, block, ref_block, plane->pitch, 1, 1, mode, delta, swap_quads, &data_ptr, last_ptr); } else { /* mode 10 and 11 INTER processing */ if (mode == 11 && !cell->mv_ptr) { av_log(avctx, AV_LOG_ERROR, \"Attempt to use Mode 11 for an INTRA cell!\\n\"); return AVERROR_INVALIDDATA; } zoom_fac = mode == 10; error = decode_cell_data(cell, block, ref_block, plane->pitch, zoom_fac, 1, mode, delta, swap_quads, &data_ptr, last_ptr); } break; default: av_log(avctx, AV_LOG_ERROR, \"Unsupported coding mode: %d\\n\", mode); return AVERROR_INVALIDDATA; }//switch mode switch (error) { case IV3_BAD_RLE: av_log(avctx, AV_LOG_ERROR, \"Mode %d: RLE code %X is not allowed at the current line\\n\", mode, data_ptr[-1]); return AVERROR_INVALIDDATA; case IV3_BAD_DATA: av_log(avctx, AV_LOG_ERROR, \"Mode %d: invalid VQ data\\n\", mode); return AVERROR_INVALIDDATA; case IV3_BAD_COUNTER: av_log(avctx, AV_LOG_ERROR, \"Mode %d: RLE-FB invalid counter: %d\\n\", mode, code); return AVERROR_INVALIDDATA; case IV3_UNSUPPORTED: av_log(avctx, AV_LOG_ERROR, \"Mode %d: unsupported RLE code: %X\\n\", mode, data_ptr[-1]); return AVERROR_INVALIDDATA; case IV3_OUT_OF_DATA: av_log(avctx, AV_LOG_ERROR, \"Mode %d: attempt to read past end of buffer\\n\", mode); return AVERROR_INVALIDDATA; } return data_ptr - data_start; /* report number of bytes consumed from the input buffer */ }"} {"target": 1, "idx": 7522, "func": "static int net_socket_mcast_init(NetClientState *peer, const char *model, const char *name, const char *host_str, const char *localaddr_str) { NetSocketState *s; int fd; struct sockaddr_in saddr; struct in_addr localaddr, *param_localaddr; if (parse_host_port(&saddr, host_str) < 0) return -1; if (localaddr_str != NULL) { if (inet_aton(localaddr_str, &localaddr) == 0) return -1; param_localaddr = &localaddr; } else { param_localaddr = NULL; } fd = net_socket_mcast_create(&saddr, param_localaddr); if (fd < 0) return -1; s = net_socket_fd_init(peer, model, name, fd, 0); if (!s) return -1; s->dgram_dst = saddr; snprintf(s->nc.info_str, sizeof(s->nc.info_str), \"socket: mcast=%s:%d\", inet_ntoa(saddr.sin_addr), ntohs(saddr.sin_port)); return 0; }"} {"target": 1, "idx": 7528, "func": "static void pl041_device_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); k->init = pl041_init; set_bit(DEVICE_CATEGORY_SOUND, dc->categories); dc->no_user = 1; dc->reset = pl041_device_reset; dc->vmsd = &vmstate_pl041; dc->props = pl041_device_properties; }"} {"target": 1, "idx": 7530, "func": "static void reduce_matrix(AudioMix *am, const double *matrix, int stride) { int i, o; memset(am->output_zero, 0, sizeof(am->output_zero)); memset(am->input_skip, 0, sizeof(am->input_skip)); memset(am->output_skip, 0, sizeof(am->output_skip)); /* exclude output channels if they can be zeroed instead of mixed */ for (o = 0; o < am->out_channels; o++) { int zero = 1; /* check if the output is always silent */ for (i = 0; i < am->in_channels; i++) { if (matrix[o * stride + i] != 0.0) { zero = 0; break; } } /* check if the corresponding input channel makes a contribution to any output channel */ if (o < am->in_channels) { for (i = 0; i < am->out_channels; i++) { if (matrix[i * stride + o] != 0.0) { zero = 0; break; } } } if (zero) { am->output_zero[o] = 1; am->out_matrix_channels--; } } if (am->out_matrix_channels == 0) { am->in_matrix_channels = 0; return; } /* skip input channels that contribute fully only to the corresponding output channel */ for (i = 0; i < FFMIN(am->in_channels, am->out_channels); i++) { int skip = 1; for (o = 0; o < am->out_channels; o++) { int i0; if ((o != i && matrix[o * stride + i] != 0.0) || (o == i && matrix[o * stride + i] != 1.0)) { skip = 0; break; } /* if the input contributes fully to the output, also check that no other inputs contribute to this output */ if (o == i) { for (i0 = 0; i0 < am->in_channels; i0++) { if (i0 != i && matrix[o * stride + i0] != 0.0) { skip = 0; break; } } } } if (skip) { am->input_skip[i] = 1; am->in_matrix_channels--; } } /* skip input channels that do not contribute to any output channel */ for (; i < am->in_channels; i++) { int contrib = 0; for (o = 0; o < am->out_channels; o++) { if (matrix[o * stride + i] != 0.0) { contrib = 1; break; } } if (!contrib) { am->input_skip[i] = 1; am->in_matrix_channels--; } } if (am->in_matrix_channels == 0) { am->out_matrix_channels = 0; return; } /* skip output channels that only get full contribution from the corresponding input channel */ for (o = 0; o < FFMIN(am->in_channels, am->out_channels); o++) { int skip = 1; int o0; for (i = 0; i < am->in_channels; i++) { if ((o != i && matrix[o * stride + i] != 0.0) || (o == i && matrix[o * stride + i] != 1.0)) { skip = 0; break; } } /* check if the corresponding input channel makes a contribution to any other output channel */ i = o; for (o0 = 0; o0 < am->out_channels; o0++) { if (o0 != i && matrix[o0 * stride + i] != 0.0) { skip = 0; break; } } if (skip) { am->output_skip[o] = 1; am->out_matrix_channels--; } } if (am->out_matrix_channels == 0) { am->in_matrix_channels = 0; return; } }"} {"target": 1, "idx": 7531, "func": "static SpiceChannelList *qmp_query_spice_channels(void) { SpiceChannelList *cur_item = NULL, *head = NULL; ChannelList *item; QTAILQ_FOREACH(item, &channel_list, link) { SpiceChannelList *chan; char host[NI_MAXHOST], port[NI_MAXSERV]; struct sockaddr *paddr; socklen_t plen; if (!(item->info->flags & SPICE_CHANNEL_EVENT_FLAG_ADDR_EXT)) { error_report(\"invalid channel event\"); return NULL; } chan = g_malloc0(sizeof(*chan)); chan->value = g_malloc0(sizeof(*chan->value)); chan->value->base = g_malloc0(sizeof(*chan->value->base)); paddr = (struct sockaddr *)&item->info->paddr_ext; plen = item->info->plen_ext; getnameinfo(paddr, plen, host, sizeof(host), port, sizeof(port), NI_NUMERICHOST | NI_NUMERICSERV); chan->value->base->host = g_strdup(host); chan->value->base->port = g_strdup(port); chan->value->base->family = inet_netfamily(paddr->sa_family); chan->value->connection_id = item->info->connection_id; chan->value->channel_type = item->info->type; chan->value->channel_id = item->info->id; chan->value->tls = item->info->flags & SPICE_CHANNEL_EVENT_FLAG_TLS; /* XXX: waiting for the qapi to support GSList */ if (!cur_item) { head = cur_item = chan; } else { cur_item->next = chan; cur_item = chan; } } return head; }"} {"target": 1, "idx": 7538, "func": "static int cd_read_sector(IDEState *s, int lba, uint8_t *buf, int sector_size) { int ret; switch(sector_size) { case 2048: block_acct_start(blk_get_stats(s->blk), &s->acct, 4 * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ); ret = blk_read(s->blk, (int64_t)lba << 2, buf, 4); block_acct_done(blk_get_stats(s->blk), &s->acct); break; case 2352: block_acct_start(blk_get_stats(s->blk), &s->acct, 4 * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ); ret = blk_read(s->blk, (int64_t)lba << 2, buf + 16, 4); block_acct_done(blk_get_stats(s->blk), &s->acct); if (ret < 0) return ret; cd_data_to_raw(buf, lba); break; default: ret = -EIO; break; } return ret; }"} {"target": 1, "idx": 7540, "func": "static int cow_read(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, int nb_sectors) { BDRVCowState *s = bs->opaque; int ret, n; while (nb_sectors > 0) { if (cow_is_allocated(bs, sector_num, nb_sectors, &n)) { ret = bdrv_pread(bs->file, s->cow_sectors_offset + sector_num * 512, buf, n * 512); if (ret != n * 512) return -1; } else { if (bs->backing_hd) { /* read from the base image */ ret = bdrv_read(bs->backing_hd, sector_num, buf, n); if (ret < 0) return -1; } else { memset(buf, 0, n * 512); } } nb_sectors -= n; sector_num += n; buf += n * 512; } return 0; }"} {"target": 1, "idx": 7543, "func": "static int decode_slice_header(H264Context *h, H264Context *h0){ MpegEncContext * const s = &h->s; MpegEncContext * const s0 = &h0->s; unsigned int first_mb_in_slice; unsigned int pps_id; int num_ref_idx_active_override_flag; unsigned int slice_type, tmp, i, j; int default_ref_list_done = 0; int last_pic_structure; s->dropable= h->nal_ref_idc == 0; if((s->avctx->flags2 & CODEC_FLAG2_FAST) && !h->nal_ref_idc){ s->me.qpel_put= s->dsp.put_2tap_qpel_pixels_tab; s->me.qpel_avg= s->dsp.avg_2tap_qpel_pixels_tab; }else{ s->me.qpel_put= s->dsp.put_h264_qpel_pixels_tab; s->me.qpel_avg= s->dsp.avg_h264_qpel_pixels_tab; } first_mb_in_slice= get_ue_golomb(&s->gb); if((s->flags2 & CODEC_FLAG2_CHUNKS) && first_mb_in_slice == 0){ h0->current_slice = 0; if (!s0->first_field) s->current_picture_ptr= NULL; } slice_type= get_ue_golomb_31(&s->gb); if(slice_type > 9){ av_log(h->s.avctx, AV_LOG_ERROR, \"slice type too large (%d) at %d %d\\n\", h->slice_type, s->mb_x, s->mb_y); return -1; } if(slice_type > 4){ slice_type -= 5; h->slice_type_fixed=1; }else h->slice_type_fixed=0; slice_type= golomb_to_pict_type[ slice_type ]; if (slice_type == FF_I_TYPE || (h0->current_slice != 0 && slice_type == h0->last_slice_type) ) { default_ref_list_done = 1; } h->slice_type= slice_type; h->slice_type_nos= slice_type & 3; s->pict_type= h->slice_type; // to make a few old functions happy, it's wrong though if (s->pict_type == FF_B_TYPE && s0->last_picture_ptr == NULL) { av_log(h->s.avctx, AV_LOG_ERROR, \"B picture before any references, skipping\\n\"); return -1; } pps_id= get_ue_golomb(&s->gb); if(pps_id>=MAX_PPS_COUNT){ av_log(h->s.avctx, AV_LOG_ERROR, \"pps_id out of range\\n\"); return -1; } if(!h0->pps_buffers[pps_id]) { av_log(h->s.avctx, AV_LOG_ERROR, \"non-existing PPS referenced\\n\"); return -1; } h->pps= *h0->pps_buffers[pps_id]; if(!h0->sps_buffers[h->pps.sps_id]) { av_log(h->s.avctx, AV_LOG_ERROR, \"non-existing SPS referenced\\n\"); return -1; } h->sps = *h0->sps_buffers[h->pps.sps_id]; if(h == h0 && h->dequant_coeff_pps != pps_id){ h->dequant_coeff_pps = pps_id; init_dequant_tables(h); } s->mb_width= h->sps.mb_width; s->mb_height= h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag); h->b_stride= s->mb_width*4; h->b8_stride= s->mb_width*2; s->width = 16*s->mb_width - 2*FFMIN(h->sps.crop_right, 7); if(h->sps.frame_mbs_only_flag) s->height= 16*s->mb_height - 2*FFMIN(h->sps.crop_bottom, 7); else s->height= 16*s->mb_height - 4*FFMIN(h->sps.crop_bottom, 3); if (s->context_initialized && ( s->width != s->avctx->width || s->height != s->avctx->height)) { if(h != h0) return -1; // width / height changed during parallelized decoding free_tables(h); flush_dpb(s->avctx); MPV_common_end(s); } if (!s->context_initialized) { if(h != h0) return -1; // we cant (re-)initialize context during parallel decoding if (MPV_common_init(s) < 0) return -1; s->first_field = 0; init_scan_tables(h); alloc_tables(h); for(i = 1; i < s->avctx->thread_count; i++) { H264Context *c; c = h->thread_context[i] = av_malloc(sizeof(H264Context)); memcpy(c, h->s.thread_context[i], sizeof(MpegEncContext)); memset(&c->s + 1, 0, sizeof(H264Context) - sizeof(MpegEncContext)); c->sps = h->sps; c->pps = h->pps; init_scan_tables(c); clone_tables(c, h); } for(i = 0; i < s->avctx->thread_count; i++) if(context_init(h->thread_context[i]) < 0) return -1; s->avctx->width = s->width; s->avctx->height = s->height; s->avctx->sample_aspect_ratio= h->sps.sar; if(!s->avctx->sample_aspect_ratio.den) s->avctx->sample_aspect_ratio.den = 1; if(h->sps.timing_info_present_flag){ s->avctx->time_base= (AVRational){h->sps.num_units_in_tick * 2, h->sps.time_scale}; if(h->x264_build > 0 && h->x264_build < 44) s->avctx->time_base.den *= 2; av_reduce(&s->avctx->time_base.num, &s->avctx->time_base.den, s->avctx->time_base.num, s->avctx->time_base.den, 1<<30); } } h->frame_num= get_bits(&s->gb, h->sps.log2_max_frame_num); h->mb_mbaff = 0; h->mb_aff_frame = 0; last_pic_structure = s0->picture_structure; if(h->sps.frame_mbs_only_flag){ s->picture_structure= PICT_FRAME; }else{ if(get_bits1(&s->gb)) { //field_pic_flag s->picture_structure= PICT_TOP_FIELD + get_bits1(&s->gb); //bottom_field_flag } else { s->picture_structure= PICT_FRAME; h->mb_aff_frame = h->sps.mb_aff; } } h->mb_field_decoding_flag= s->picture_structure != PICT_FRAME; if(h0->current_slice == 0){ while(h->frame_num != h->prev_frame_num && h->frame_num != (h->prev_frame_num+1)%(1<sps.log2_max_frame_num)){ av_log(NULL, AV_LOG_DEBUG, \"Frame num gap %d %d\\n\", h->frame_num, h->prev_frame_num); frame_start(h); h->prev_frame_num++; h->prev_frame_num %= 1<sps.log2_max_frame_num; s->current_picture_ptr->frame_num= h->prev_frame_num; execute_ref_pic_marking(h, NULL, 0); } /* See if we have a decoded first field looking for a pair... */ if (s0->first_field) { assert(s0->current_picture_ptr); assert(s0->current_picture_ptr->data[0]); assert(s0->current_picture_ptr->reference != DELAYED_PIC_REF); /* figure out if we have a complementary field pair */ if (!FIELD_PICTURE || s->picture_structure == last_pic_structure) { /* * Previous field is unmatched. Don't display it, but let it * remain for reference if marked as such. */ s0->current_picture_ptr = NULL; s0->first_field = FIELD_PICTURE; } else { if (h->nal_ref_idc && s0->current_picture_ptr->reference && s0->current_picture_ptr->frame_num != h->frame_num) { /* * This and previous field were reference, but had * different frame_nums. Consider this field first in * pair. Throw away previous field except for reference * purposes. */ s0->first_field = 1; s0->current_picture_ptr = NULL; } else { /* Second field in complementary pair */ s0->first_field = 0; } } } else { /* Frame or first field in a potentially complementary pair */ assert(!s0->current_picture_ptr); s0->first_field = FIELD_PICTURE; } if((!FIELD_PICTURE || s0->first_field) && frame_start(h) < 0) { s0->first_field = 0; return -1; } } if(h != h0) clone_slice(h, h0); s->current_picture_ptr->frame_num= h->frame_num; //FIXME frame_num cleanup assert(s->mb_num == s->mb_width * s->mb_height); if(first_mb_in_slice << FIELD_OR_MBAFF_PICTURE >= s->mb_num || first_mb_in_slice >= s->mb_num){ av_log(h->s.avctx, AV_LOG_ERROR, \"first_mb_in_slice overflow\\n\"); return -1; } s->resync_mb_x = s->mb_x = first_mb_in_slice % s->mb_width; s->resync_mb_y = s->mb_y = (first_mb_in_slice / s->mb_width) << FIELD_OR_MBAFF_PICTURE; if (s->picture_structure == PICT_BOTTOM_FIELD) s->resync_mb_y = s->mb_y = s->mb_y + 1; assert(s->mb_y < s->mb_height); if(s->picture_structure==PICT_FRAME){ h->curr_pic_num= h->frame_num; h->max_pic_num= 1<< h->sps.log2_max_frame_num; }else{ h->curr_pic_num= 2*h->frame_num + 1; h->max_pic_num= 1<<(h->sps.log2_max_frame_num + 1); } if(h->nal_unit_type == NAL_IDR_SLICE){ get_ue_golomb(&s->gb); /* idr_pic_id */ } if(h->sps.poc_type==0){ h->poc_lsb= get_bits(&s->gb, h->sps.log2_max_poc_lsb); if(h->pps.pic_order_present==1 && s->picture_structure==PICT_FRAME){ h->delta_poc_bottom= get_se_golomb(&s->gb); } } if(h->sps.poc_type==1 && !h->sps.delta_pic_order_always_zero_flag){ h->delta_poc[0]= get_se_golomb(&s->gb); if(h->pps.pic_order_present==1 && s->picture_structure==PICT_FRAME) h->delta_poc[1]= get_se_golomb(&s->gb); } init_poc(h); if(h->pps.redundant_pic_cnt_present){ h->redundant_pic_count= get_ue_golomb(&s->gb); } //set defaults, might be overridden a few lines later h->ref_count[0]= h->pps.ref_count[0]; h->ref_count[1]= h->pps.ref_count[1]; if(h->slice_type_nos != FF_I_TYPE){ if(h->slice_type_nos == FF_B_TYPE){ h->direct_spatial_mv_pred= get_bits1(&s->gb); } num_ref_idx_active_override_flag= get_bits1(&s->gb); if(num_ref_idx_active_override_flag){ h->ref_count[0]= get_ue_golomb(&s->gb) + 1; if(h->slice_type_nos==FF_B_TYPE) h->ref_count[1]= get_ue_golomb(&s->gb) + 1; if(h->ref_count[0]-1 > 32-1 || h->ref_count[1]-1 > 32-1){ av_log(h->s.avctx, AV_LOG_ERROR, \"reference overflow\\n\"); h->ref_count[0]= h->ref_count[1]= 1; return -1; } } if(h->slice_type_nos == FF_B_TYPE) h->list_count= 2; else h->list_count= 1; }else h->list_count= 0; if(!default_ref_list_done){ fill_default_ref_list(h); } if(h->slice_type_nos!=FF_I_TYPE && decode_ref_pic_list_reordering(h) < 0) return -1; if(h->slice_type_nos!=FF_I_TYPE){ s->last_picture_ptr= &h->ref_list[0][0]; ff_copy_picture(&s->last_picture, s->last_picture_ptr); } if(h->slice_type_nos==FF_B_TYPE){ s->next_picture_ptr= &h->ref_list[1][0]; ff_copy_picture(&s->next_picture, s->next_picture_ptr); } if( (h->pps.weighted_pred && h->slice_type_nos == FF_P_TYPE ) || (h->pps.weighted_bipred_idc==1 && h->slice_type_nos== FF_B_TYPE ) ) pred_weight_table(h); else if(h->pps.weighted_bipred_idc==2 && h->slice_type_nos== FF_B_TYPE) implicit_weight_table(h); else { h->use_weight = 0; for (i = 0; i < 2; i++) { h->luma_weight_flag[i] = 0; h->chroma_weight_flag[i] = 0; } } if(h->nal_ref_idc) decode_ref_pic_marking(h0, &s->gb); if(FRAME_MBAFF) fill_mbaff_ref_list(h); if(h->slice_type_nos==FF_B_TYPE && !h->direct_spatial_mv_pred) direct_dist_scale_factor(h); direct_ref_list_init(h); if( h->slice_type_nos != FF_I_TYPE && h->pps.cabac ){ tmp = get_ue_golomb_31(&s->gb); if(tmp > 2){ av_log(s->avctx, AV_LOG_ERROR, \"cabac_init_idc overflow\\n\"); return -1; } h->cabac_init_idc= tmp; } h->last_qscale_diff = 0; tmp = h->pps.init_qp + get_se_golomb(&s->gb); if(tmp>51){ av_log(s->avctx, AV_LOG_ERROR, \"QP %u out of range\\n\", tmp); return -1; } s->qscale= tmp; h->chroma_qp[0] = get_chroma_qp(h, 0, s->qscale); h->chroma_qp[1] = get_chroma_qp(h, 1, s->qscale); //FIXME qscale / qp ... stuff if(h->slice_type == FF_SP_TYPE){ get_bits1(&s->gb); /* sp_for_switch_flag */ } if(h->slice_type==FF_SP_TYPE || h->slice_type == FF_SI_TYPE){ get_se_golomb(&s->gb); /* slice_qs_delta */ } h->deblocking_filter = 1; h->slice_alpha_c0_offset = 0; h->slice_beta_offset = 0; if( h->pps.deblocking_filter_parameters_present ) { tmp= get_ue_golomb_31(&s->gb); if(tmp > 2){ av_log(s->avctx, AV_LOG_ERROR, \"deblocking_filter_idc %u out of range\\n\", tmp); return -1; } h->deblocking_filter= tmp; if(h->deblocking_filter < 2) h->deblocking_filter^= 1; // 1<->0 if( h->deblocking_filter ) { h->slice_alpha_c0_offset = get_se_golomb(&s->gb) << 1; h->slice_beta_offset = get_se_golomb(&s->gb) << 1; } } if( s->avctx->skip_loop_filter >= AVDISCARD_ALL ||(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY && h->slice_type_nos != FF_I_TYPE) ||(s->avctx->skip_loop_filter >= AVDISCARD_BIDIR && h->slice_type_nos == FF_B_TYPE) ||(s->avctx->skip_loop_filter >= AVDISCARD_NONREF && h->nal_ref_idc == 0)) h->deblocking_filter= 0; if(h->deblocking_filter == 1 && h0->max_contexts > 1) { if(s->avctx->flags2 & CODEC_FLAG2_FAST) { /* Cheat slightly for speed: Do not bother to deblock across slices. */ h->deblocking_filter = 2; } else { h0->max_contexts = 1; if(!h0->single_decode_warning) { av_log(s->avctx, AV_LOG_INFO, \"Cannot parallelize deblocking type 1, decoding such frames in sequential order\\n\"); h0->single_decode_warning = 1; } if(h != h0) return 1; // deblocking switched inside frame } } #if 0 //FMO if( h->pps.num_slice_groups > 1 && h->pps.mb_slice_group_map_type >= 3 && h->pps.mb_slice_group_map_type <= 5) slice_group_change_cycle= get_bits(&s->gb, ?); #endif h0->last_slice_type = slice_type; h->slice_num = ++h0->current_slice; if(h->slice_num >= MAX_SLICES){ av_log(s->avctx, AV_LOG_ERROR, \"Too many slices, increase MAX_SLICES and recompile\\n\"); } for(j=0; j<2; j++){ int *ref2frm= h->ref2frm[h->slice_num&(MAX_SLICES-1)][j]; ref2frm[0]= ref2frm[1]= -1; for(i=0; i<16; i++) ref2frm[i+2]= 4*h->ref_list[j][i].frame_num +(h->ref_list[j][i].reference&3); ref2frm[18+0]= ref2frm[18+1]= -1; for(i=16; i<48; i++) ref2frm[i+4]= 4*h->ref_list[j][i].frame_num +(h->ref_list[j][i].reference&3); } h->emu_edge_width= (s->flags&CODEC_FLAG_EMU_EDGE) ? 0 : 16; h->emu_edge_height= (FRAME_MBAFF || FIELD_PICTURE) ? 0 : h->emu_edge_width; s->avctx->refs= h->sps.ref_frame_count; if(s->avctx->debug&FF_DEBUG_PICT_INFO){ av_log(h->s.avctx, AV_LOG_DEBUG, \"slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\\n\", h->slice_num, (s->picture_structure==PICT_FRAME ? \"F\" : s->picture_structure==PICT_TOP_FIELD ? \"T\" : \"B\"), first_mb_in_slice, av_get_pict_type_char(h->slice_type), h->slice_type_fixed ? \" fix\" : \"\", h->nal_unit_type == NAL_IDR_SLICE ? \" IDR\" : \"\", pps_id, h->frame_num, s->current_picture_ptr->field_poc[0], s->current_picture_ptr->field_poc[1], h->ref_count[0], h->ref_count[1], s->qscale, h->deblocking_filter, h->slice_alpha_c0_offset/2, h->slice_beta_offset/2, h->use_weight, h->use_weight==1 && h->use_weight_chroma ? \"c\" : \"\", h->slice_type == FF_B_TYPE ? (h->direct_spatial_mv_pred ? \"SPAT\" : \"TEMP\") : \"\" ); } return 0; }"} {"target": 0, "idx": 7580, "func": "static HotplugHandler *spapr_get_hotpug_handler(MachineState *machine, DeviceState *dev) { if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) || object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { return HOTPLUG_HANDLER(machine); } return NULL; }"} {"target": 0, "idx": 7610, "func": "static void yuvj444p_to_rgb24(AVPicture *dst, AVPicture *src, int width, int height) { uint8_t *y1_ptr, *cb_ptr, *cr_ptr, *d, *d1; int w, y, cb, cr, r_add, g_add, b_add; uint8_t *cm = cropTbl + MAX_NEG_CROP; unsigned int r, g, b; d = dst->data[0]; y1_ptr = src->data[0]; cb_ptr = src->data[1]; cr_ptr = src->data[2]; for(;height > 0; height --) { d1 = d; for(w = width; w > 0; w--) { YUV_TO_RGB1(cb_ptr[0], cr_ptr[0]); YUV_TO_RGB2(r, g, b, y1_ptr[0]); RGB_OUT(d1, r, g, b); d1 += BPP; y1_ptr++; cb_ptr++; cr_ptr++; } d += dst->linesize[0]; y1_ptr += src->linesize[0] - width; cb_ptr += src->linesize[1] - width; cr_ptr += src->linesize[2] - width; } }"} {"target": 1, "idx": 7665, "func": "void kvm_init_cpu_signals(CPUState *cpu) { int r; sigset_t set; struct sigaction sigact; memset(&sigact, 0, sizeof(sigact)); sigact.sa_handler = dummy_signal; sigaction(SIG_IPI, &sigact, NULL); pthread_sigmask(SIG_BLOCK, NULL, &set); #if defined KVM_HAVE_MCE_INJECTION sigdelset(&set, SIGBUS); pthread_sigmask(SIG_SETMASK, &set, NULL); #endif sigdelset(&set, SIG_IPI); r = kvm_set_signal_mask(cpu, &set); if (r) { fprintf(stderr, \"kvm_set_signal_mask: %s\\n\", strerror(-r)); exit(1); } }"} {"target": 1, "idx": 7678, "func": "static void vfio_disable_msix(VFIODevice *vdev) { msix_unset_vector_notifiers(&vdev->pdev); if (vdev->nr_vectors) { vfio_disable_irqindex(vdev, VFIO_PCI_MSIX_IRQ_INDEX); vfio_disable_msi_common(vdev); DPRINTF(\"%s(%04x:%02x:%02x.%x)\\n\", __func__, vdev->host.domain, vdev->host.bus, vdev->host.slot, vdev->host.function);"} {"target": 1, "idx": 7689, "func": "static int rtcp_parse_packet(RTPDemuxContext *s, const unsigned char *buf, int len) { int payload_len; while (len >= 2) { switch (buf[1]) { case RTCP_SR: if (len < 16) { av_log(NULL, AV_LOG_ERROR, \"Invalid length for RTCP SR packet\\n\"); return AVERROR_INVALIDDATA; } payload_len = (AV_RB16(buf + 2) + 1) * 4; s->last_rtcp_ntp_time = AV_RB64(buf + 8); s->last_rtcp_timestamp = AV_RB32(buf + 16); if (s->first_rtcp_ntp_time == AV_NOPTS_VALUE) { s->first_rtcp_ntp_time = s->last_rtcp_ntp_time; if (!s->base_timestamp) s->base_timestamp = s->last_rtcp_timestamp; s->rtcp_ts_offset = s->last_rtcp_timestamp - s->base_timestamp; } buf += payload_len; len -= payload_len; break; case RTCP_BYE: return -RTCP_BYE; default: return -1; } } return -1; }"} {"target": 1, "idx": 7690, "func": "void virtqueue_map_sg(struct iovec *sg, hwaddr *addr, size_t num_sg, int is_write) { unsigned int i; hwaddr len; for (i = 0; i < num_sg; i++) { len = sg[i].iov_len; sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write); if (sg[i].iov_base == NULL || len != sg[i].iov_len) { error_report(\"virtio: trying to map MMIO memory\");"} {"target": 1, "idx": 7706, "func": "static void ehci_async_complete_packet(USBPort *port, USBPacket *packet) { EHCIQueue *q; EHCIState *s = port->opaque; uint32_t portsc = s->portsc[port->index]; if (portsc & PORTSC_POWNER) { USBPort *companion = s->companion_ports[port->index]; companion->ops->complete(companion, packet); return; } q = container_of(packet, EHCIQueue, packet); trace_usb_ehci_queue_action(q, \"wakeup\"); assert(q->async == EHCI_ASYNC_INFLIGHT); q->async = EHCI_ASYNC_FINISHED; q->usb_status = packet->len; }"} {"target": 1, "idx": 7709, "func": "static int mov_read_header(AVFormatContext *s) { MOVContext *mov = s->priv_data; AVIOContext *pb = s->pb; int j, err; MOVAtom atom = { AV_RL32(\"root\") }; int i; if (mov->decryption_key_len != 0 && mov->decryption_key_len != AES_CTR_KEY_SIZE) { av_log(s, AV_LOG_ERROR, \"Invalid decryption key len %d expected %d\\n\", mov->decryption_key_len, AES_CTR_KEY_SIZE); return AVERROR(EINVAL); } mov->fc = s; mov->trak_index = -1; /* .mov and .mp4 aren't streamable anyway (only progressive download if moov is before mdat) */ if (pb->seekable) atom.size = avio_size(pb); else atom.size = INT64_MAX; /* check MOV header */ do { if (mov->moov_retry) avio_seek(pb, 0, SEEK_SET); if ((err = mov_read_default(mov, pb, atom)) < 0) { av_log(s, AV_LOG_ERROR, \"error reading header\\n\"); mov_read_close(s); return err; } } while (pb->seekable && !mov->found_moov && !mov->moov_retry++); if (!mov->found_moov) { av_log(s, AV_LOG_ERROR, \"moov atom not found\\n\"); mov_read_close(s); return AVERROR_INVALIDDATA; } av_log(mov->fc, AV_LOG_TRACE, \"on_parse_exit_offset=%\"PRId64\"\\n\", avio_tell(pb)); if (pb->seekable) { if (mov->nb_chapter_tracks > 0 && !mov->ignore_chapters) mov_read_chapters(s); for (i = 0; i < s->nb_streams; i++) if (s->streams[i]->codecpar->codec_tag == AV_RL32(\"tmcd\")) { mov_read_timecode_track(s, s->streams[i]); } else if (s->streams[i]->codecpar->codec_tag == AV_RL32(\"rtmd\")) { mov_read_rtmd_track(s, s->streams[i]); } } /* copy timecode metadata from tmcd tracks to the related video streams */ for (i = 0; i < s->nb_streams; i++) { AVStream *st = s->streams[i]; MOVStreamContext *sc = st->priv_data; if (sc->timecode_track > 0) { AVDictionaryEntry *tcr; int tmcd_st_id = -1; for (j = 0; j < s->nb_streams; j++) if (s->streams[j]->id == sc->timecode_track) tmcd_st_id = j; if (tmcd_st_id < 0 || tmcd_st_id == i) continue; tcr = av_dict_get(s->streams[tmcd_st_id]->metadata, \"timecode\", NULL, 0); if (tcr) av_dict_set(&st->metadata, \"timecode\", tcr->value, 0); } } export_orphan_timecode(s); for (i = 0; i < s->nb_streams; i++) { AVStream *st = s->streams[i]; MOVStreamContext *sc = st->priv_data; fix_timescale(mov, sc); if(st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && st->codecpar->codec_id == AV_CODEC_ID_AAC) { st->skip_samples = sc->start_pad; } if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && sc->nb_frames_for_fps > 0 && sc->duration_for_fps > 0) av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den, sc->time_scale*(int64_t)sc->nb_frames_for_fps, sc->duration_for_fps, INT_MAX); if (st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) { if (st->codecpar->width <= 0 || st->codecpar->height <= 0) { st->codecpar->width = sc->width; st->codecpar->height = sc->height; } if (st->codecpar->codec_id == AV_CODEC_ID_DVD_SUBTITLE) { if ((err = mov_rewrite_dvd_sub_extradata(st)) < 0) return err; } } if (mov->handbrake_version && mov->handbrake_version <= 1000000*0 + 1000*10 + 2 && // 0.10.2 st->codecpar->codec_id == AV_CODEC_ID_MP3 ) { av_log(s, AV_LOG_VERBOSE, \"Forcing full parsing for mp3 stream\\n\"); st->need_parsing = AVSTREAM_PARSE_FULL; } } if (mov->trex_data) { for (i = 0; i < s->nb_streams; i++) { AVStream *st = s->streams[i]; MOVStreamContext *sc = st->priv_data; if (st->duration > 0) st->codecpar->bit_rate = sc->data_size * 8 * sc->time_scale / st->duration; } } if (mov->use_mfra_for > 0) { for (i = 0; i < s->nb_streams; i++) { AVStream *st = s->streams[i]; MOVStreamContext *sc = st->priv_data; if (sc->duration_for_fps > 0) { st->codecpar->bit_rate = sc->data_size * 8 * sc->time_scale / sc->duration_for_fps; } } } for (i = 0; i < mov->bitrates_count && i < s->nb_streams; i++) { if (mov->bitrates[i]) { s->streams[i]->codecpar->bit_rate = mov->bitrates[i]; } } ff_rfps_calculate(s); for (i = 0; i < s->nb_streams; i++) { AVStream *st = s->streams[i]; MOVStreamContext *sc = st->priv_data; switch (st->codecpar->codec_type) { case AVMEDIA_TYPE_AUDIO: err = ff_replaygain_export(st, s->metadata); if (err < 0) { mov_read_close(s); return err; } break; case AVMEDIA_TYPE_VIDEO: if (sc->display_matrix) { err = av_stream_add_side_data(st, AV_PKT_DATA_DISPLAYMATRIX, (uint8_t*)sc->display_matrix, sizeof(int32_t) * 9); if (err < 0) return err; sc->display_matrix = NULL; } if (sc->stereo3d) { err = av_stream_add_side_data(st, AV_PKT_DATA_STEREO3D, (uint8_t *)sc->stereo3d, sizeof(*sc->stereo3d)); if (err < 0) return err; sc->stereo3d = NULL; } if (sc->spherical) { err = av_stream_add_side_data(st, AV_PKT_DATA_SPHERICAL, (uint8_t *)sc->spherical, sc->spherical_size); if (err < 0) return err; sc->spherical = NULL; } break; } } ff_configure_buffers_for_index(s, AV_TIME_BASE); return 0; }"} {"target": 1, "idx": 7712, "func": "static int thread_execute(AVCodecContext *avctx, action_func* func, void *arg, int *ret, int job_count, int job_size) { SliceThreadContext *c = avctx->internal->thread_ctx; if (!(avctx->active_thread_type&FF_THREAD_SLICE) || avctx->thread_count <= 1) return avcodec_default_execute(avctx, func, arg, ret, job_count, job_size); if (job_count <= 0) return 0; pthread_mutex_lock(&c->current_job_lock); c->current_job = avctx->thread_count; c->job_count = job_count; c->job_size = job_size; c->args = arg; c->func = func; c->rets = ret; c->current_execute++; pthread_cond_broadcast(&c->current_job_cond); thread_park_workers(c, avctx->thread_count); return 0; }"} {"target": 0, "idx": 7715, "func": "static void gain_compensate(COOKContext *q, cook_gains *gains_ptr, float* previous_buffer) { const float fc = q->pow2tab[gains_ptr->previous[0] + 63]; float *buffer = q->mono_mdct_output; int i; /* Overlap with the previous block. */ for(i=0 ; isamples_per_channel ; i++) { buffer[i] *= fc; buffer[i] += previous_buffer[i]; } /* Apply gain profile */ for (i = 0; i < 8; i++) { if (gains_ptr->now[i] || gains_ptr->now[i + 1]) interpolate(q, &buffer[q->gain_size_factor * i], gains_ptr->now[i], gains_ptr->now[i + 1]); } /* Save away the current to be previous block. */ memcpy(previous_buffer, buffer+q->samples_per_channel, sizeof(float)*q->samples_per_channel); }"} {"target": 1, "idx": 7719, "func": "int ff_mov_init_hinting(AVFormatContext *s, int index, int src_index) { MOVMuxContext *mov = s->priv_data; MOVTrack *track = &mov->tracks[index]; MOVTrack *src_track = &mov->tracks[src_index]; AVStream *src_st = s->streams[src_index]; int ret = AVERROR(ENOMEM); AVOutputFormat *rtp_format = av_guess_format(\"rtp\", NULL, NULL); track->tag = MKTAG('r','t','p',' '); track->src_track = src_index; if (!rtp_format) { ret = AVERROR(ENOENT); goto fail; } track->enc = avcodec_alloc_context(); if (!track->enc) goto fail; track->enc->codec_type = AVMEDIA_TYPE_DATA; track->enc->codec_tag = track->tag; track->rtp_ctx = avformat_alloc_context(); if (!track->rtp_ctx) goto fail; track->rtp_ctx->oformat = rtp_format; if (!av_new_stream(track->rtp_ctx, 0)) goto fail; /* Copy stream parameters */ track->rtp_ctx->streams[0]->sample_aspect_ratio = src_st->sample_aspect_ratio; /* Remove the allocated codec context, link to the original one * instead, to give the rtp muxer access to codec parameters. */ av_free(track->rtp_ctx->streams[0]->codec); track->rtp_ctx->streams[0]->codec = src_st->codec; if ((ret = url_open_dyn_packet_buf(&track->rtp_ctx->pb, RTP_MAX_PACKET_SIZE)) < 0) goto fail; ret = av_write_header(track->rtp_ctx); if (ret) goto fail; /* Copy the RTP AVStream timebase back to the hint AVStream */ track->timescale = track->rtp_ctx->streams[0]->time_base.den; /* Mark the hinted track that packets written to it should be * sent to this track for hinting. */ src_track->hint_track = index; return 0; fail: av_log(s, AV_LOG_WARNING, \"Unable to initialize hinting of stream %d\\n\", src_index); if (track->rtp_ctx && track->rtp_ctx->pb) { uint8_t *buf; url_close_dyn_buf(track->rtp_ctx->pb, &buf); av_free(buf); } if (track->rtp_ctx && track->rtp_ctx->streams[0]) { av_metadata_free(&track->rtp_ctx->streams[0]->metadata); av_free(track->rtp_ctx->streams[0]); } if (track->rtp_ctx) { av_metadata_free(&track->rtp_ctx->metadata); av_free(track->rtp_ctx->priv_data); av_freep(&track->rtp_ctx); } av_freep(&track->enc); /* Set a default timescale, to avoid crashes in dump_format */ track->timescale = 90000; return ret; }"} {"target": 1, "idx": 7735, "func": "static inline int l3_unscale(int value, int exponent) { unsigned int m; int e; e = table_4_3_exp [4 * value + (exponent & 3)]; m = table_4_3_value[4 * value + (exponent & 3)]; e -= exponent >> 2; #ifdef DEBUG if(e < 1) av_log(NULL, AV_LOG_WARNING, \"l3_unscale: e is %d\\n\", e); #endif if (e > (SUINT)31) return 0; m = (m + (1 << (e - 1))) >> e; return m; }"} {"target": 1, "idx": 7745, "func": "static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table) { BDRVQcow2State *s = bs->opaque; uint64_t old_l2_offset; uint64_t *l2_table = NULL; int64_t l2_offset; int ret; old_l2_offset = s->l1_table[l1_index]; trace_qcow2_l2_allocate(bs, l1_index); /* allocate a new l2 entry */ l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t)); if (l2_offset < 0) { ret = l2_offset; ret = qcow2_cache_flush(bs, s->refcount_block_cache); if (ret < 0) { /* allocate a new entry in the l2 cache */ trace_qcow2_l2_allocate_get_empty(bs, l1_index); ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table); if (ret < 0) { l2_table = *table; if ((old_l2_offset & L1E_OFFSET_MASK) == 0) { /* if there was no old l2 table, clear the new table */ memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); } else { uint64_t* old_table; /* if there was an old l2 table, read it from the disk */ BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); ret = qcow2_cache_get(bs, s->l2_table_cache, old_l2_offset & L1E_OFFSET_MASK, (void**) &old_table); if (ret < 0) { memcpy(l2_table, old_table, s->cluster_size); qcow2_cache_put(bs, s->l2_table_cache, (void **) &old_table); /* write the l2 table to the file */ BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); trace_qcow2_l2_allocate_write_l2(bs, l1_index); qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); ret = qcow2_cache_flush(bs, s->l2_table_cache); if (ret < 0) { /* update the L1 entry */ trace_qcow2_l2_allocate_write_l1(bs, l1_index); s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; ret = qcow2_write_l1_entry(bs, l1_index); if (ret < 0) { *table = l2_table; trace_qcow2_l2_allocate_done(bs, l1_index, 0); return 0; fail: trace_qcow2_l2_allocate_done(bs, l1_index, ret); if (l2_table != NULL) { qcow2_cache_put(bs, s->l2_table_cache, (void**) table); s->l1_table[l1_index] = old_l2_offset; if (l2_offset > 0) { qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), QCOW2_DISCARD_ALWAYS); return ret;"} {"target": 1, "idx": 7770, "func": "void get_tmp_filename(char *filename, int size) { char temp_dir[MAX_PATH]; GetTempPath(MAX_PATH, temp_dir); GetTempFileName(temp_dir, \"qem\", 0, filename); }"} {"target": 1, "idx": 7772, "func": "static int handle_p_frame_apng(AVCodecContext *avctx, PNGDecContext *s, AVFrame *p) { size_t x, y; uint8_t *buffer = av_malloc(s->image_linesize * s->height); if (!buffer) return AVERROR(ENOMEM); if (s->blend_op == APNG_BLEND_OP_OVER && avctx->pix_fmt != AV_PIX_FMT_RGBA && avctx->pix_fmt != AV_PIX_FMT_GRAY8A && avctx->pix_fmt != AV_PIX_FMT_PAL8) { avpriv_request_sample(avctx, \"Blending with pixel format %s\", av_get_pix_fmt_name(avctx->pix_fmt)); return AVERROR_PATCHWELCOME; } // Do the disposal operation specified by the last frame on the frame if (s->last_dispose_op != APNG_DISPOSE_OP_PREVIOUS) { ff_thread_await_progress(&s->last_picture, INT_MAX, 0); memcpy(buffer, s->last_picture.f->data[0], s->image_linesize * s->height); if (s->last_dispose_op == APNG_DISPOSE_OP_BACKGROUND) for (y = s->last_y_offset; y < s->last_y_offset + s->last_h; ++y) memset(buffer + s->image_linesize * y + s->bpp * s->last_x_offset, 0, s->bpp * s->last_w); memcpy(s->previous_picture.f->data[0], buffer, s->image_linesize * s->height); ff_thread_report_progress(&s->previous_picture, INT_MAX, 0); } else { ff_thread_await_progress(&s->previous_picture, INT_MAX, 0); memcpy(buffer, s->previous_picture.f->data[0], s->image_linesize * s->height); } // Perform blending if (s->blend_op == APNG_BLEND_OP_SOURCE) { for (y = s->y_offset; y < s->y_offset + s->cur_h; ++y) { size_t row_start = s->image_linesize * y + s->bpp * s->x_offset; memcpy(buffer + row_start, p->data[0] + row_start, s->bpp * s->cur_w); } } else { // APNG_BLEND_OP_OVER for (y = s->y_offset; y < s->y_offset + s->cur_h; ++y) { uint8_t *foreground = p->data[0] + s->image_linesize * y + s->bpp * s->x_offset; uint8_t *background = buffer + s->image_linesize * y + s->bpp * s->x_offset; for (x = s->x_offset; x < s->x_offset + s->cur_w; ++x, foreground += s->bpp, background += s->bpp) { size_t b; uint8_t foreground_alpha, background_alpha, output_alpha; uint8_t output[4]; // Since we might be blending alpha onto alpha, we use the following equations: // output_alpha = foreground_alpha + (1 - foreground_alpha) * background_alpha // output = (foreground_alpha * foreground + (1 - foreground_alpha) * background_alpha * background) / output_alpha switch (avctx->pix_fmt) { case AV_PIX_FMT_RGBA: foreground_alpha = foreground[3]; background_alpha = background[3]; break; case AV_PIX_FMT_GRAY8A: foreground_alpha = foreground[1]; background_alpha = background[1]; break; case AV_PIX_FMT_PAL8: foreground_alpha = s->palette[foreground[0]] >> 24; background_alpha = s->palette[background[0]] >> 24; break; } if (foreground_alpha == 0) continue; if (foreground_alpha == 255) { memcpy(background, foreground, s->bpp); continue; } if (avctx->pix_fmt == AV_PIX_FMT_PAL8) { // TODO: Alpha blending with PAL8 will likely need the entire image converted over to RGBA first avpriv_request_sample(avctx, \"Alpha blending palette samples\"); background[0] = foreground[0]; continue; } output_alpha = foreground_alpha + FAST_DIV255((255 - foreground_alpha) * background_alpha); for (b = 0; b < s->bpp - 1; ++b) { if (output_alpha == 0) { output[b] = 0; } else if (background_alpha == 255) { output[b] = FAST_DIV255(foreground_alpha * foreground[b] + (255 - foreground_alpha) * background[b]); } else { output[b] = (255 * foreground_alpha * foreground[b] + (255 - foreground_alpha) * background_alpha * background[b]) / (255 * output_alpha); } } output[b] = output_alpha; memcpy(background, output, s->bpp); } } } // Copy blended buffer into the frame and free memcpy(p->data[0], buffer, s->image_linesize * s->height); av_free(buffer); return 0; }"} {"target": 0, "idx": 7786, "func": "void microblaze_load_kernel(MicroBlazeCPU *cpu, target_phys_addr_t ddr_base, uint32_t ramsize, const char *dtb_filename, void (*machine_cpu_reset)(MicroBlazeCPU *)) { QemuOpts *machine_opts; const char *kernel_filename = NULL; const char *kernel_cmdline = NULL; machine_opts = qemu_opts_find(qemu_find_opts(\"machine\"), 0); if (machine_opts) { const char *dtb_arg; kernel_filename = qemu_opt_get(machine_opts, \"kernel\"); kernel_cmdline = qemu_opt_get(machine_opts, \"append\"); dtb_arg = qemu_opt_get(machine_opts, \"dtb\"); if (dtb_arg) { /* Preference a -dtb argument */ dtb_filename = dtb_arg; } else { /* default to pcbios dtb as passed by machine_init */ dtb_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, dtb_filename); } } boot_info.machine_cpu_reset = machine_cpu_reset; qemu_register_reset(main_cpu_reset, cpu); if (kernel_filename) { int kernel_size; uint64_t entry, low, high; uint32_t base32; int big_endian = 0; #ifdef TARGET_WORDS_BIGENDIAN big_endian = 1; #endif /* Boots a kernel elf binary. */ kernel_size = load_elf(kernel_filename, NULL, NULL, &entry, &low, &high, big_endian, ELF_MACHINE, 0); base32 = entry; if (base32 == 0xc0000000) { kernel_size = load_elf(kernel_filename, translate_kernel_address, NULL, &entry, NULL, NULL, big_endian, ELF_MACHINE, 0); } /* Always boot into physical ram. */ boot_info.bootstrap_pc = ddr_base + (entry & 0x0fffffff); /* If it wasn't an ELF image, try an u-boot image. */ if (kernel_size < 0) { target_phys_addr_t uentry, loadaddr; kernel_size = load_uimage(kernel_filename, &uentry, &loadaddr, 0); boot_info.bootstrap_pc = uentry; high = (loadaddr + kernel_size + 3) & ~3; } /* Not an ELF image nor an u-boot image, try a RAW image. */ if (kernel_size < 0) { kernel_size = load_image_targphys(kernel_filename, ddr_base, ram_size); boot_info.bootstrap_pc = ddr_base; high = (ddr_base + kernel_size + 3) & ~3; } boot_info.cmdline = high + 4096; if (kernel_cmdline && strlen(kernel_cmdline)) { pstrcpy_targphys(\"cmdline\", boot_info.cmdline, 256, kernel_cmdline); } /* Provide a device-tree. */ boot_info.fdt = boot_info.cmdline + 4096; microblaze_load_dtb(boot_info.fdt, ram_size, kernel_cmdline, dtb_filename); } }"} {"target": 0, "idx": 7791, "func": "void qemu_clock_enable(QEMUClockType type, bool enabled) { QEMUClock *clock = qemu_clock_ptr(type); QEMUTimerList *tl; bool old = clock->enabled; clock->enabled = enabled; if (enabled && !old) { qemu_clock_notify(type); } else if (!enabled && old) { QLIST_FOREACH(tl, &clock->timerlists, list) { qemu_event_wait(&tl->timers_done_ev); } } }"} {"target": 0, "idx": 7792, "func": "static uint64_t grlib_gptimer_read(void *opaque, target_phys_addr_t addr, unsigned size) { GPTimerUnit *unit = opaque; target_phys_addr_t timer_addr; int id; uint32_t value = 0; addr &= 0xff; /* Unit registers */ switch (addr) { case SCALER_OFFSET: trace_grlib_gptimer_readl(-1, addr, unit->scaler); return unit->scaler; case SCALER_RELOAD_OFFSET: trace_grlib_gptimer_readl(-1, addr, unit->reload); return unit->reload; case CONFIG_OFFSET: trace_grlib_gptimer_readl(-1, addr, unit->config); return unit->config; default: break; } timer_addr = (addr % TIMER_BASE); id = (addr - TIMER_BASE) / TIMER_BASE; if (id >= 0 && id < unit->nr_timers) { /* GPTimer registers */ switch (timer_addr) { case COUNTER_OFFSET: value = ptimer_get_count(unit->timers[id].ptimer); trace_grlib_gptimer_readl(id, addr, value); return value; case COUNTER_RELOAD_OFFSET: value = unit->timers[id].reload; trace_grlib_gptimer_readl(id, addr, value); return value; case CONFIG_OFFSET: trace_grlib_gptimer_readl(id, addr, unit->timers[id].config); return unit->timers[id].config; default: break; } } trace_grlib_gptimer_readl(-1, addr, 0); return 0; }"} {"target": 0, "idx": 7797, "func": "static int get_cpsr(QEMUFile *f, void *opaque, size_t size, VMStateField *field) { ARMCPU *cpu = opaque; CPUARMState *env = &cpu->env; uint32_t val = qemu_get_be32(f); if (arm_feature(env, ARM_FEATURE_M)) { if (val & XPSR_EXCP) { /* This is a CPSR format value from an older QEMU. (We can tell * because values transferred in XPSR format always have zero * for the EXCP field, and CPSR format will always have bit 4 * set in CPSR_M.) Rearrange it into XPSR format. The significant * differences are that the T bit is not in the same place, the * primask/faultmask info may be in the CPSR I and F bits, and * we do not want the mode bits. */ uint32_t newval = val; newval &= (CPSR_NZCV | CPSR_Q | CPSR_IT | CPSR_GE); if (val & CPSR_T) { newval |= XPSR_T; } /* If the I or F bits are set then this is a migration from * an old QEMU which still stored the M profile FAULTMASK * and PRIMASK in env->daif. For a new QEMU, the data is * transferred using the vmstate_m_faultmask_primask subsection. */ if (val & CPSR_F) { env->v7m.faultmask = 1; } if (val & CPSR_I) { env->v7m.primask = 1; } val = newval; } /* Ignore the low bits, they are handled by vmstate_m. */ xpsr_write(env, val, ~XPSR_EXCP); return 0; } env->aarch64 = ((val & PSTATE_nRW) == 0); if (is_a64(env)) { pstate_write(env, val); return 0; } cpsr_write(env, val, 0xffffffff, CPSRWriteRaw); return 0; }"} {"target": 0, "idx": 7817, "func": "static void pcnet_receive(void *opaque, const uint8_t *buf, int size) { PCNetState *s = opaque; int is_padr = 0, is_bcast = 0, is_ladr = 0; uint8_t buf1[60]; if (CSR_DRX(s) || CSR_STOP(s) || CSR_SPND(s) || !size) return; #ifdef PCNET_DEBUG printf(\"pcnet_receive size=%d\\n\", size); #endif /* if too small buffer, then expand it */ if (size < MIN_BUF_SIZE) { memcpy(buf1, buf, size); memset(buf1 + size, 0, MIN_BUF_SIZE - size); buf = buf1; size = MIN_BUF_SIZE; } if (CSR_PROM(s) || (is_padr=padr_match(s, buf, size)) || (is_bcast=padr_bcast(s, buf, size)) || (is_ladr=ladr_match(s, buf, size))) { pcnet_rdte_poll(s); if (!(CSR_CRST(s) & 0x8000) && s->rdra) { struct pcnet_RMD rmd; int rcvrc = CSR_RCVRC(s)-1,i; target_phys_addr_t nrda; for (i = CSR_RCVRL(s)-1; i > 0; i--, rcvrc--) { if (rcvrc <= 1) rcvrc = CSR_RCVRL(s); nrda = s->rdra + (CSR_RCVRL(s) - rcvrc) * (BCR_SWSTYLE(s) ? 16 : 8 ); RMDLOAD(&rmd, PHYSADDR(s,nrda)); if (GET_FIELD(rmd.status, RMDS, OWN)) { #ifdef PCNET_DEBUG_RMD printf(\"pcnet - scan buffer: RCVRC=%d PREV_RCVRC=%d\\n\", rcvrc, CSR_RCVRC(s)); #endif CSR_RCVRC(s) = rcvrc; pcnet_rdte_poll(s); break; } } } if (!(CSR_CRST(s) & 0x8000)) { #ifdef PCNET_DEBUG_RMD printf(\"pcnet - no buffer: RCVRC=%d\\n\", CSR_RCVRC(s)); #endif s->csr[0] |= 0x1000; /* Set MISS flag */ CSR_MISSC(s)++; } else { uint8_t *src = &s->buffer[8]; target_phys_addr_t crda = CSR_CRDA(s); struct pcnet_RMD rmd; int pktcount = 0; memcpy(src, buf, size); #if 1 /* no need to compute the CRC */ src[size] = 0; src[size + 1] = 0; src[size + 2] = 0; src[size + 3] = 0; size += 4; #else /* XXX: avoid CRC generation */ if (!CSR_ASTRP_RCV(s)) { uint32_t fcs = ~0; uint8_t *p = src; while (size < 46) { src[size++] = 0; } while (p != &src[size]) { CRC(fcs, *p++); } ((uint32_t *)&src[size])[0] = htonl(fcs); size += 4; /* FCS at end of packet */ } else size += 4; #endif #ifdef PCNET_DEBUG_MATCH PRINT_PKTHDR(buf); #endif RMDLOAD(&rmd, PHYSADDR(s,crda)); /*if (!CSR_LAPPEN(s))*/ SET_FIELD(&rmd.status, RMDS, STP, 1); #define PCNET_RECV_STORE() do { \\ int count = MIN(4096 - GET_FIELD(rmd.buf_length, RMDL, BCNT),size); \\ target_phys_addr_t rbadr = PHYSADDR(s, rmd.rbadr); \\ s->phys_mem_write(s->dma_opaque, rbadr, src, count, CSR_BSWP(s)); \\ src += count; size -= count; \\ SET_FIELD(&rmd.msg_length, RMDM, MCNT, count); \\ SET_FIELD(&rmd.status, RMDS, OWN, 0); \\ RMDSTORE(&rmd, PHYSADDR(s,crda)); \\ pktcount++; \\ } while (0) PCNET_RECV_STORE(); if ((size > 0) && CSR_NRDA(s)) { target_phys_addr_t nrda = CSR_NRDA(s); RMDLOAD(&rmd, PHYSADDR(s,nrda)); if (GET_FIELD(rmd.status, RMDS, OWN)) { crda = nrda; PCNET_RECV_STORE(); if ((size > 0) && (nrda=CSR_NNRD(s))) { RMDLOAD(&rmd, PHYSADDR(s,nrda)); if (GET_FIELD(rmd.status, RMDS, OWN)) { crda = nrda; PCNET_RECV_STORE(); } } } } #undef PCNET_RECV_STORE RMDLOAD(&rmd, PHYSADDR(s,crda)); if (size == 0) { SET_FIELD(&rmd.status, RMDS, ENP, 1); SET_FIELD(&rmd.status, RMDS, PAM, !CSR_PROM(s) && is_padr); SET_FIELD(&rmd.status, RMDS, LFAM, !CSR_PROM(s) && is_ladr); SET_FIELD(&rmd.status, RMDS, BAM, !CSR_PROM(s) && is_bcast); } else { SET_FIELD(&rmd.status, RMDS, OFLO, 1); SET_FIELD(&rmd.status, RMDS, BUFF, 1); SET_FIELD(&rmd.status, RMDS, ERR, 1); } RMDSTORE(&rmd, PHYSADDR(s,crda)); s->csr[0] |= 0x0400; #ifdef PCNET_DEBUG printf(\"RCVRC=%d CRDA=0x%08x BLKS=%d\\n\", CSR_RCVRC(s), PHYSADDR(s,CSR_CRDA(s)), pktcount); #endif #ifdef PCNET_DEBUG_RMD PRINT_RMD(&rmd); #endif while (pktcount--) { if (CSR_RCVRC(s) <= 1) CSR_RCVRC(s) = CSR_RCVRL(s); else CSR_RCVRC(s)--; } pcnet_rdte_poll(s); } } pcnet_poll(s); pcnet_update_irq(s); }"} {"target": 0, "idx": 7819, "func": "static uint32_t m5206_mbar_readl(void *opaque, target_phys_addr_t offset) { m5206_mbar_state *s = (m5206_mbar_state *)opaque; int width; offset &= 0x3ff; if (offset >= 0x200) { hw_error(\"Bad MBAR read offset 0x%x\", (int)offset); } width = m5206_mbar_width[offset >> 2]; if (width < 4) { uint32_t val; val = m5206_mbar_readw(opaque, offset) << 16; val |= m5206_mbar_readw(opaque, offset + 2); return val; } return m5206_mbar_read(s, offset, 4); }"} {"target": 0, "idx": 7826, "func": "uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, uint64_t offset, int compressed_size) { BDRVQcowState *s = bs->opaque; int l2_index, ret; uint64_t *l2_table; int64_t cluster_offset; int nb_csectors; ret = get_cluster_table(bs, offset, &l2_table, &l2_index); if (ret < 0) { return 0; } /* Compression can't overwrite anything. Fail if the cluster was already * allocated. */ cluster_offset = be64_to_cpu(l2_table[l2_index]); if (cluster_offset & L2E_OFFSET_MASK) { qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); return 0; } cluster_offset = qcow2_alloc_bytes(bs, compressed_size); if (cluster_offset < 0) { qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); return 0; } nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - (cluster_offset >> 9); cluster_offset |= QCOW_OFLAG_COMPRESSED | ((uint64_t)nb_csectors << s->csize_shift); /* update L2 table */ /* compressed clusters never have the copied flag */ BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); l2_table[l2_index] = cpu_to_be64(cluster_offset); ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); if (ret < 0) { return 0; } return cluster_offset; }"} {"target": 0, "idx": 7837, "func": "void spapr_core_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { int index; sPAPRDRConnector *drc; sPAPRDRConnectorClass *drck; Error *local_err = NULL; CPUCore *cc = CPU_CORE(dev); int smt = kvmppc_smt_threads(); if (!spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index)) { error_setg(errp, \"Unable to find CPU core with core-id: %d\", cc->core_id); return; } if (index == 0) { error_setg(errp, \"Boot CPU core may not be unplugged\"); return; } drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_CPU, index * smt); g_assert(drc); drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); drck->detach(drc, dev, spapr_core_release, NULL, &local_err); if (local_err) { error_propagate(errp, local_err); return; } spapr_hotplug_req_remove_by_index(drc); }"} {"target": 0, "idx": 7858, "func": "void commit_active_start(BlockDriverState *bs, BlockDriverState *base, int64_t speed, BlockdevOnError on_error, BlockDriverCompletionFunc *cb, void *opaque, Error **errp) { int64_t length, base_length; int orig_base_flags; int ret; Error *local_err = NULL; orig_base_flags = bdrv_get_flags(base); if (bdrv_reopen(base, bs->open_flags, errp)) { return; } length = bdrv_getlength(bs); if (length < 0) { error_setg_errno(errp, -length, \"Unable to determine length of %s\", bs->filename); goto error_restore_flags; } base_length = bdrv_getlength(base); if (base_length < 0) { error_setg_errno(errp, -base_length, \"Unable to determine length of %s\", base->filename); goto error_restore_flags; } if (length > base_length) { ret = bdrv_truncate(base, length); if (ret < 0) { error_setg_errno(errp, -ret, \"Top image %s is larger than base image %s, and \" \"resize of base image failed\", bs->filename, base->filename); goto error_restore_flags; } } bdrv_ref(base); mirror_start_job(bs, base, speed, 0, 0, on_error, on_error, cb, opaque, &local_err, &commit_active_job_driver, false, base); if (error_is_set(&local_err)) { error_propagate(errp, local_err); goto error_restore_flags; } return; error_restore_flags: /* ignore error and errp for bdrv_reopen, because we want to propagate * the original error */ bdrv_reopen(base, orig_base_flags, NULL); return; }"} {"target": 0, "idx": 7877, "func": "static void term_up_char(void) { int idx; if (term_hist_entry == 0) return; if (term_hist_entry == -1) { /* Find latest entry */ for (idx = 0; idx < TERM_MAX_CMDS; idx++) { if (term_history[idx] == NULL) break; } term_hist_entry = idx; } term_hist_entry--; if (term_hist_entry >= 0) { pstrcpy(term_cmd_buf, sizeof(term_cmd_buf), term_history[term_hist_entry]); term_cmd_buf_index = term_cmd_buf_size = strlen(term_cmd_buf); } }"} {"target": 1, "idx": 7894, "func": "static int kalman_smoothen(WMAVoiceContext *s, int pitch, const float *in, float *out, int size) { int n; float optimal_gain = 0, dot; const float *ptr = &in[-FFMAX(s->min_pitch_val, pitch - 3)], *end = &in[-FFMIN(s->max_pitch_val, pitch + 3)], *best_hist_ptr; /* find best fitting point in history */ do { dot = ff_scalarproduct_float_c(in, ptr, size); if (dot > optimal_gain) { optimal_gain = dot; best_hist_ptr = ptr; } } while (--ptr >= end); if (optimal_gain <= 0) return -1; dot = ff_scalarproduct_float_c(best_hist_ptr, best_hist_ptr, size); if (dot <= 0) // would be 1.0 return -1; if (optimal_gain <= dot) { dot = dot / (dot + 0.6 * optimal_gain); // 0.625-1.000 } else dot = 0.625; /* actual smoothing */ for (n = 0; n < size; n++) out[n] = best_hist_ptr[n] + dot * (in[n] - best_hist_ptr[n]); return 0; }"} {"target": 1, "idx": 7903, "func": "static inline void gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, int search_pc) { DisasContext dc1, *dc = &dc1; CPUBreakpoint *bp; uint16_t *gen_opc_end; int j, lj; target_ulong pc_start; uint32_t next_page_start; int num_insns; int max_insns; /* generate intermediate code */ pc_start = tb->pc; dc->tb = tb; gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; dc->is_jmp = DISAS_NEXT; dc->pc = pc_start; dc->singlestep_enabled = env->singlestep_enabled; dc->condjmp = 0; dc->thumb = ARM_TBFLAG_THUMB(tb->flags); dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1; dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4; #if !defined(CONFIG_USER_ONLY) dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0); #endif dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags); dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags); dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags); cpu_F0s = tcg_temp_new_i32(); cpu_F1s = tcg_temp_new_i32(); cpu_F0d = tcg_temp_new_i64(); cpu_F1d = tcg_temp_new_i64(); cpu_V0 = cpu_F0d; cpu_V1 = cpu_F1d; /* FIXME: cpu_M0 can probably be the same as cpu_V0. */ cpu_M0 = tcg_temp_new_i64(); next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; lj = -1; num_insns = 0; max_insns = tb->cflags & CF_COUNT_MASK; if (max_insns == 0) max_insns = CF_COUNT_MASK; gen_icount_start(); tcg_clear_temp_count(); /* A note on handling of the condexec (IT) bits: * * We want to avoid the overhead of having to write the updated condexec * bits back to the CPUState for every instruction in an IT block. So: * (1) if the condexec bits are not already zero then we write * zero back into the CPUState now. This avoids complications trying * to do it at the end of the block. (For example if we don't do this * it's hard to identify whether we can safely skip writing condexec * at the end of the TB, which we definitely want to do for the case * where a TB doesn't do anything with the IT state at all.) * (2) if we are going to leave the TB then we call gen_set_condexec() * which will write the correct value into CPUState if zero is wrong. * This is done both for leaving the TB at the end, and for leaving * it because of an exception we know will happen, which is done in * gen_exception_insn(). The latter is necessary because we need to * leave the TB with the PC/IT state just prior to execution of the * instruction which caused the exception. * (3) if we leave the TB unexpectedly (eg a data abort on a load) * then the CPUState will be wrong and we need to reset it. * This is handled in the same way as restoration of the * PC in these situations: we will be called again with search_pc=1 * and generate a mapping of the condexec bits for each PC in * gen_opc_condexec_bits[]. gen_pc_load[] then uses this to restore * the condexec bits. * * Note that there are no instructions which can read the condexec * bits, and none which can write non-static values to them, so * we don't need to care about whether CPUState is correct in the * middle of a TB. */ /* Reset the conditional execution bits immediately. This avoids complications trying to do it at the end of the block. */ if (dc->condexec_mask || dc->condexec_cond) { TCGv tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, 0); store_cpu_field(tmp, condexec_bits); do { #ifdef CONFIG_USER_ONLY /* Intercept jump to the magic kernel page. */ if (dc->pc >= 0xffff0000) { /* We always get here via a jump, so know we are not in a conditional execution block. */ gen_exception(EXCP_KERNEL_TRAP); dc->is_jmp = DISAS_UPDATE; break; #else if (dc->pc >= 0xfffffff0 && IS_M(env)) { /* We always get here via a jump, so know we are not in a conditional execution block. */ gen_exception(EXCP_EXCEPTION_EXIT); dc->is_jmp = DISAS_UPDATE; break; #endif if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) { QTAILQ_FOREACH(bp, &env->breakpoints, entry) { if (bp->pc == dc->pc) { gen_exception_insn(dc, 0, EXCP_DEBUG); /* Advance PC so that clearing the breakpoint will invalidate this TB. */ dc->pc += 2; goto done_generating; break; if (search_pc) { j = gen_opc_ptr - gen_opc_buf; if (lj < j) { lj++; while (lj < j) gen_opc_instr_start[lj++] = 0; gen_opc_pc[lj] = dc->pc; gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1); gen_opc_instr_start[lj] = 1; gen_opc_icount[lj] = num_insns; if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) gen_io_start(); if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) { tcg_gen_debug_insn_start(dc->pc); if (dc->thumb) { disas_thumb_insn(env, dc); if (dc->condexec_mask) { dc->condexec_cond = (dc->condexec_cond & 0xe) | ((dc->condexec_mask >> 4) & 1); dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f; if (dc->condexec_mask == 0) { dc->condexec_cond = 0; } else { disas_arm_insn(env, dc); if (dc->condjmp && !dc->is_jmp) { gen_set_label(dc->condlabel); dc->condjmp = 0; /* Translation stops when a conditional branch is encountered. * Otherwise the subsequent code could get translated several times. * Also stop translation when a page boundary is reached. This * ensures prefetch aborts occur at the right place. */ num_insns ++; } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end && !env->singlestep_enabled && !singlestep && dc->pc < next_page_start && num_insns < max_insns); if (tb->cflags & CF_LAST_IO) { if (dc->condjmp) { /* FIXME: This can theoretically happen with self-modifying code. */ cpu_abort(env, \"IO on conditional branch instruction\"); gen_io_end(); /* At this stage dc->condjmp will only be set when the skipped instruction was a conditional branch or trap, and the PC has already been written. */ if (unlikely(env->singlestep_enabled)) { /* Make sure the pc is updated, and raise a debug exception. */ if (dc->condjmp) { gen_set_condexec(dc); if (dc->is_jmp == DISAS_SWI) { gen_exception(EXCP_SWI); } else { gen_exception(EXCP_DEBUG); gen_set_label(dc->condlabel); if (dc->condjmp || !dc->is_jmp) { gen_set_pc_im(dc->pc); dc->condjmp = 0; gen_set_condexec(dc); if (dc->is_jmp == DISAS_SWI && !dc->condjmp) { gen_exception(EXCP_SWI); } else { /* FIXME: Single stepping a WFI insn will not halt the CPU. */ gen_exception(EXCP_DEBUG); } else { /* While branches must always occur at the end of an IT block, there are a few other things that can cause us to terminate the TB in the middel of an IT block: - Exception generating instructions (bkpt, swi, undefined). - Page boundaries. - Hardware watchpoints. Hardware breakpoints have already been handled and skip this code. */ gen_set_condexec(dc); switch(dc->is_jmp) { case DISAS_NEXT: gen_goto_tb(dc, 1, dc->pc); break; default: case DISAS_JUMP: case DISAS_UPDATE: /* indicate that the hash table must be used to find the next TB */ tcg_gen_exit_tb(0); break; case DISAS_TB_JUMP: /* nothing more to generate */ break; case DISAS_WFI: gen_helper_wfi(); break; case DISAS_SWI: gen_exception(EXCP_SWI); break; if (dc->condjmp) { gen_set_label(dc->condlabel); gen_set_condexec(dc); gen_goto_tb(dc, 1, dc->pc); dc->condjmp = 0; done_generating: gen_icount_end(tb, num_insns); *gen_opc_ptr = INDEX_op_end; #ifdef DEBUG_DISAS if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { qemu_log(\"----------------\\n\"); qemu_log(\"IN: %s\\n\", lookup_symbol(pc_start)); log_target_disas(pc_start, dc->pc - pc_start, dc->thumb); qemu_log(\"\\n\"); #endif if (search_pc) { j = gen_opc_ptr - gen_opc_buf; lj++; while (lj <= j) gen_opc_instr_start[lj++] = 0; } else { tb->size = dc->pc - pc_start; tb->icount = num_insns;"} {"target": 1, "idx": 7905, "func": "static av_always_inline void thread_park_workers(SliceThreadContext *c, int thread_count) { while (c->current_job != thread_count + c->job_count) pthread_cond_wait(&c->last_job_cond, &c->current_job_lock); pthread_mutex_unlock(&c->current_job_lock); }"} {"target": 0, "idx": 7906, "func": "int av_image_get_linesize(enum PixelFormat pix_fmt, int width, int plane) { const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[pix_fmt]; int max_step [4]; /* max pixel step for each plane */ int max_step_comp[4]; /* the component for each plane which has the max pixel step */ int s, linesize; if ((unsigned)pix_fmt >= PIX_FMT_NB || desc->flags & PIX_FMT_HWACCEL) return AVERROR(EINVAL); av_image_fill_max_pixsteps(max_step, max_step_comp, desc); s = (max_step_comp[plane] == 1 || max_step_comp[plane] == 2) ? desc->log2_chroma_w : 0; linesize = max_step[plane] * (((width + (1 << s) - 1)) >> s); if (desc->flags & PIX_FMT_BITSTREAM) linesize = (linesize + 7) >> 3; return linesize; }"} {"target": 0, "idx": 7907, "func": "static int field_end(H264Context *h, int in_setup) { MpegEncContext *const s = &h->s; AVCodecContext *const avctx = s->avctx; int err = 0; s->mb_y = 0; if (!in_setup && !s->droppable) ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, s->picture_structure == PICT_BOTTOM_FIELD); if (CONFIG_H264_VDPAU_DECODER && s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) ff_vdpau_h264_set_reference_frames(s); if (in_setup || !(avctx->active_thread_type & FF_THREAD_FRAME)) { if (!s->droppable) { err = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index); h->prev_poc_msb = h->poc_msb; h->prev_poc_lsb = h->poc_lsb; } h->prev_frame_num_offset = h->frame_num_offset; h->prev_frame_num = h->frame_num; h->outputed_poc = h->next_outputed_poc; } if (avctx->hwaccel) { if (avctx->hwaccel->end_frame(avctx) < 0) av_log(avctx, AV_LOG_ERROR, \"hardware accelerator failed to decode picture\\n\"); } if (CONFIG_H264_VDPAU_DECODER && s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) ff_vdpau_h264_picture_complete(s); /* * FIXME: Error handling code does not seem to support interlaced * when slices span multiple rows * The ff_er_add_slice calls don't work right for bottom * fields; they cause massive erroneous error concealing * Error marking covers both fields (top and bottom). * This causes a mismatched s->error_count * and a bad error table. Further, the error count goes to * INT_MAX when called for bottom field, because mb_y is * past end by one (callers fault) and resync_mb_y != 0 * causes problems for the first MB line, too. */ if (!FIELD_PICTURE && h->current_slice) ff_er_frame_end(s); ff_MPV_frame_end(s); h->current_slice = 0; return err; }"} {"target": 0, "idx": 7908, "func": "static int vmd_decode(VmdVideoContext *s, AVFrame *frame) { int i; unsigned int *palette32; unsigned char r, g, b; GetByteContext gb; unsigned char meth; unsigned char *dp; /* pointer to current frame */ unsigned char *pp; /* pointer to previous frame */ unsigned char len; int ofs; int frame_x, frame_y; int frame_width, frame_height; frame_x = AV_RL16(&s->buf[6]); frame_y = AV_RL16(&s->buf[8]); frame_width = AV_RL16(&s->buf[10]) - frame_x + 1; frame_height = AV_RL16(&s->buf[12]) - frame_y + 1; if (frame_x < 0 || frame_width < 0 || frame_x >= s->avctx->width || frame_width > s->avctx->width || frame_x + frame_width > s->avctx->width) { av_log(s->avctx, AV_LOG_ERROR, \"Invalid horizontal range %d-%d\\n\", frame_x, frame_width); return AVERROR_INVALIDDATA; } if (frame_y < 0 || frame_height < 0 || frame_y >= s->avctx->height || frame_height > s->avctx->height || frame_y + frame_height > s->avctx->height) { av_log(s->avctx, AV_LOG_ERROR, \"Invalid vertical range %d-%d\\n\", frame_x, frame_width); return AVERROR_INVALIDDATA; } if ((frame_width == s->avctx->width && frame_height == s->avctx->height) && (frame_x || frame_y)) { s->x_off = frame_x; s->y_off = frame_y; } frame_x -= s->x_off; frame_y -= s->y_off; /* if only a certain region will be updated, copy the entire previous * frame before the decode */ if (s->prev_frame.data[0] && (frame_x || frame_y || (frame_width != s->avctx->width) || (frame_height != s->avctx->height))) { memcpy(frame->data[0], s->prev_frame.data[0], s->avctx->height * frame->linesize[0]); } /* check if there is a new palette */ bytestream2_init(&gb, s->buf + 16, s->size - 16); if (s->buf[15] & 0x02) { bytestream2_skip(&gb, 2); palette32 = (unsigned int *)s->palette; if (bytestream2_get_bytes_left(&gb) >= PALETTE_COUNT * 3) { for (i = 0; i < PALETTE_COUNT; i++) { r = bytestream2_get_byteu(&gb) * 4; g = bytestream2_get_byteu(&gb) * 4; b = bytestream2_get_byteu(&gb) * 4; palette32[i] = (r << 16) | (g << 8) | (b); } } else { av_log(s->avctx, AV_LOG_ERROR, \"Incomplete palette\\n\"); return AVERROR_INVALIDDATA; } s->size -= PALETTE_COUNT * 3 + 2; } if (s->size > 0) { /* originally UnpackFrame in VAG's code */ bytestream2_init(&gb, gb.buffer, s->buf + s->size - gb.buffer); if (bytestream2_get_bytes_left(&gb) < 1) return AVERROR_INVALIDDATA; meth = bytestream2_get_byteu(&gb); if (meth & 0x80) { lz_unpack(gb.buffer, bytestream2_get_bytes_left(&gb), s->unpack_buffer, s->unpack_buffer_size); meth &= 0x7F; bytestream2_init(&gb, s->unpack_buffer, s->unpack_buffer_size); } dp = &frame->data[0][frame_y * frame->linesize[0] + frame_x]; pp = &s->prev_frame.data[0][frame_y * s->prev_frame.linesize[0] + frame_x]; switch (meth) { case 1: for (i = 0; i < frame_height; i++) { ofs = 0; do { len = bytestream2_get_byte(&gb); if (len & 0x80) { len = (len & 0x7F) + 1; if (ofs + len > frame_width || bytestream2_get_bytes_left(&gb) < len) return AVERROR_INVALIDDATA; bytestream2_get_buffer(&gb, &dp[ofs], len); ofs += len; } else { /* interframe pixel copy */ if (ofs + len + 1 > frame_width || !s->prev_frame.data[0]) return AVERROR_INVALIDDATA; memcpy(&dp[ofs], &pp[ofs], len + 1); ofs += len + 1; } } while (ofs < frame_width); if (ofs > frame_width) { av_log(s->avctx, AV_LOG_ERROR, \"VMD video: offset > width (%d > %d)\\n\", ofs, frame_width); return AVERROR_INVALIDDATA; } dp += frame->linesize[0]; pp += s->prev_frame.linesize[0]; } break; case 2: for (i = 0; i < frame_height; i++) { bytestream2_get_buffer(&gb, dp, frame_width); dp += frame->linesize[0]; pp += s->prev_frame.linesize[0]; } break; case 3: for (i = 0; i < frame_height; i++) { ofs = 0; do { len = bytestream2_get_byte(&gb); if (len & 0x80) { len = (len & 0x7F) + 1; if (bytestream2_get_byte(&gb) == 0xFF) len = rle_unpack(gb.buffer, &dp[ofs], len, bytestream2_get_bytes_left(&gb), frame_width - ofs); else bytestream2_get_buffer(&gb, &dp[ofs], len); bytestream2_skip(&gb, len); } else { /* interframe pixel copy */ if (ofs + len + 1 > frame_width || !s->prev_frame.data[0]) return AVERROR_INVALIDDATA; memcpy(&dp[ofs], &pp[ofs], len + 1); ofs += len + 1; } } while (ofs < frame_width); if (ofs > frame_width) { av_log(s->avctx, AV_LOG_ERROR, \"VMD video: offset > width (%d > %d)\\n\", ofs, frame_width); return AVERROR_INVALIDDATA; } dp += frame->linesize[0]; pp += s->prev_frame.linesize[0]; } break; } } return 0; }"} {"target": 0, "idx": 7926, "func": "static void joint_decode(COOKContext *q, float* mlt_buffer1, float* mlt_buffer2) { int i,j; int decouple_tab[SUBBAND_SIZE]; float decode_buffer[2048]; //Only 1060 might be needed. int idx, cpl_tmp,tmp_idx; float f1,f2; float* cplscale; memset(decouple_tab, 0, sizeof(decouple_tab)); memset(decode_buffer, 0, sizeof(decode_buffer)); /* Make sure the buffers are zeroed out. */ memset(mlt_buffer1,0, 1024*sizeof(float)); memset(mlt_buffer2,0, 1024*sizeof(float)); decouple_info(q, decouple_tab); mono_decode(q, decode_buffer); /* The two channels are stored interleaved in decode_buffer. */ for (i=0 ; ijs_subband_start ; i++) { for (j=0 ; jjs_vlc_bits) - 1; if (q->js_subband_start < q->subbands) { for (i=0 ; isubbands ; i++) { cpl_tmp = cplband[i + q->js_subband_start]; idx -=decouple_tab[cpl_tmp]; cplscale = (float*)cplscales[q->js_vlc_bits-2]; //choose decoupler table f1 = cplscale[decouple_tab[cpl_tmp]]; f2 = cplscale[idx-1]; for (j=0 ; jjs_subband_start + i)*20)+j; mlt_buffer1[20*(i+q->js_subband_start) + j] = f1 * decode_buffer[tmp_idx]; mlt_buffer2[20*(i+q->js_subband_start) + j] = f2 * decode_buffer[tmp_idx]; } idx = (1 << q->js_vlc_bits) - 1; } } }"} {"target": 0, "idx": 7929, "func": "static inline int nvic_exec_prio(NVICState *s) { CPUARMState *env = &s->cpu->env; int running; if (env->daif & PSTATE_F) { /* FAULTMASK */ running = -1; } else if (env->daif & PSTATE_I) { /* PRIMASK */ running = 0; } else if (env->v7m.basepri > 0) { running = env->v7m.basepri & nvic_gprio_mask(s); } else { running = NVIC_NOEXC_PRIO; /* lower than any possible priority */ } /* consider priority of active handler */ return MIN(running, s->exception_prio); }"} {"target": 1, "idx": 7949, "func": "static int get_uint16(QEMUFile *f, void *pv, size_t size) { uint16_t *v = pv; qemu_get_be16s(f, v); return 0; }"} {"target": 1, "idx": 7955, "func": "void *qemu_malloc(size_t size) { if (!size && !allow_zero_malloc()) { abort(); } return oom_check(malloc(size ? size : 1)); }"} {"target": 0, "idx": 7960, "func": "int nbd_receive_negotiate(int csock, const char *name, uint32_t *flags, off_t *size, size_t *blocksize) { char buf[256]; uint64_t magic, s; uint16_t tmp; TRACE(\"Receiving negotiation.\"); if (read_sync(csock, buf, 8) != 8) { LOG(\"read failed\"); errno = EINVAL; return -1; } buf[8] = '\\0'; if (strlen(buf) == 0) { LOG(\"server connection closed\"); errno = EINVAL; return -1; } TRACE(\"Magic is %c%c%c%c%c%c%c%c\", qemu_isprint(buf[0]) ? buf[0] : '.', qemu_isprint(buf[1]) ? buf[1] : '.', qemu_isprint(buf[2]) ? buf[2] : '.', qemu_isprint(buf[3]) ? buf[3] : '.', qemu_isprint(buf[4]) ? buf[4] : '.', qemu_isprint(buf[5]) ? buf[5] : '.', qemu_isprint(buf[6]) ? buf[6] : '.', qemu_isprint(buf[7]) ? buf[7] : '.'); if (memcmp(buf, \"NBDMAGIC\", 8) != 0) { LOG(\"Invalid magic received\"); errno = EINVAL; return -1; } if (read_sync(csock, &magic, sizeof(magic)) != sizeof(magic)) { LOG(\"read failed\"); errno = EINVAL; return -1; } magic = be64_to_cpu(magic); TRACE(\"Magic is 0x%\" PRIx64, magic); if (name) { uint32_t reserved = 0; uint32_t opt; uint32_t namesize; TRACE(\"Checking magic (opts_magic)\"); if (magic != 0x49484156454F5054LL) { LOG(\"Bad magic received\"); errno = EINVAL; return -1; } if (read_sync(csock, &tmp, sizeof(tmp)) != sizeof(tmp)) { LOG(\"flags read failed\"); errno = EINVAL; return -1; } *flags = be16_to_cpu(tmp) << 16; /* reserved for future use */ if (write_sync(csock, &reserved, sizeof(reserved)) != sizeof(reserved)) { LOG(\"write failed (reserved)\"); errno = EINVAL; return -1; } /* write the export name */ magic = cpu_to_be64(magic); if (write_sync(csock, &magic, sizeof(magic)) != sizeof(magic)) { LOG(\"write failed (magic)\"); errno = EINVAL; return -1; } opt = cpu_to_be32(NBD_OPT_EXPORT_NAME); if (write_sync(csock, &opt, sizeof(opt)) != sizeof(opt)) { LOG(\"write failed (opt)\"); errno = EINVAL; return -1; } namesize = cpu_to_be32(strlen(name)); if (write_sync(csock, &namesize, sizeof(namesize)) != sizeof(namesize)) { LOG(\"write failed (namesize)\"); errno = EINVAL; return -1; } if (write_sync(csock, (char*)name, strlen(name)) != strlen(name)) { LOG(\"write failed (name)\"); errno = EINVAL; return -1; } } else { TRACE(\"Checking magic (cli_magic)\"); if (magic != 0x00420281861253LL) { LOG(\"Bad magic received\"); errno = EINVAL; return -1; } } if (read_sync(csock, &s, sizeof(s)) != sizeof(s)) { LOG(\"read failed\"); errno = EINVAL; return -1; } *size = be64_to_cpu(s); *blocksize = 1024; TRACE(\"Size is %\" PRIu64, *size); if (!name) { if (read_sync(csock, flags, sizeof(*flags)) != sizeof(*flags)) { LOG(\"read failed (flags)\"); errno = EINVAL; return -1; } *flags = be32_to_cpup(flags); } else { if (read_sync(csock, &tmp, sizeof(tmp)) != sizeof(tmp)) { LOG(\"read failed (tmp)\"); errno = EINVAL; return -1; } *flags |= be32_to_cpu(tmp); } if (read_sync(csock, &buf, 124) != 124) { LOG(\"read failed (buf)\"); errno = EINVAL; return -1; } return 0; }"} {"target": 0, "idx": 7963, "func": "static bool timer_mod_ns_locked(QEMUTimerList *timer_list, QEMUTimer *ts, int64_t expire_time) { QEMUTimer **pt, *t; /* add the timer in the sorted list */ pt = &timer_list->active_timers; for (;;) { t = *pt; if (!timer_expired_ns(t, expire_time)) { break; } pt = &t->next; } ts->expire_time = MAX(expire_time, 0); ts->next = *pt; *pt = ts; return pt == &timer_list->active_timers; }"} {"target": 0, "idx": 7969, "func": "static void qvirtio_9p_start(void) { char *args; test_share = g_strdup(\"/tmp/qtest.XXXXXX\"); g_assert_nonnull(mkdtemp(test_share)); args = g_strdup_printf(\"-fsdev local,id=fsdev0,security_model=none,path=%s \" \"-device virtio-9p-pci,fsdev=fsdev0,mount_tag=%s\", test_share, mount_tag); qtest_start(args); g_free(args); }"} {"target": 1, "idx": 7989, "func": "static void build_fs_mount_list(FsMountList *mounts, Error **errp) { FsMount *mount; char const *mountinfo = \"/proc/self/mountinfo\"; FILE *fp; char *line = NULL, *dash; size_t n; char check; unsigned int devmajor, devminor; int ret, dir_s, dir_e, type_s, type_e, dev_s, dev_e; fp = fopen(mountinfo, \"r\"); if (!fp) { build_fs_mount_list_from_mtab(mounts, errp); return; } while (getline(&line, &n, fp) != -1) { ret = sscanf(line, \"%*u %*u %u:%u %*s %n%*s%n%c\", &devmajor, &devminor, &dir_s, &dir_e, &check); if (ret < 3) { continue; } dash = strstr(line + dir_e, \" - \"); if (!dash) { continue; } ret = sscanf(dash, \" - %n%*s%n %n%*s%n%c\", &type_s, &type_e, &dev_s, &dev_e, &check); if (ret < 1) { continue; } line[dir_e] = 0; dash[type_e] = 0; dash[dev_e] = 0; decode_mntname(line + dir_s, dir_e - dir_s); decode_mntname(dash + dev_s, dev_e - dev_s); if (devmajor == 0) { /* btrfs reports major number = 0 */ if (strcmp(\"btrfs\", dash + type_s) != 0 || dev_major_minor(dash + dev_s, &devmajor, &devminor) < 0) { continue; } } mount = g_malloc0(sizeof(FsMount)); mount->dirname = g_strdup(line + dir_s); mount->devtype = g_strdup(dash + type_s); mount->devmajor = devmajor; mount->devminor = devminor; QTAILQ_INSERT_TAIL(mounts, mount, next); } free(line); fclose(fp); }"} {"target": 0, "idx": 7995, "func": "float64 int32_to_float64( int32 a STATUS_PARAM ) { flag zSign; uint32 absA; int8 shiftCount; bits64 zSig; if ( a == 0 ) return 0; zSign = ( a < 0 ); absA = zSign ? - a : a; shiftCount = countLeadingZeros32( absA ) + 21; zSig = absA; return packFloat64( zSign, 0x432 - shiftCount, zSig<bs); if (*dinfo->serial) { *serial = g_strdup(dinfo->serial); } } }"} {"target": 0, "idx": 8006, "func": "int bitbang_i2c_set(bitbang_i2c_interface *i2c, int line, int level) { int data; if (level != 0 && level != 1) { abort(); } if (line == BITBANG_I2C_SDA) { if (level == i2c->last_data) { return bitbang_i2c_nop(i2c); } i2c->last_data = level; if (i2c->last_clock == 0) { return bitbang_i2c_nop(i2c); } if (level == 0) { DPRINTF(\"START\\n\"); /* START condition. */ i2c->state = SENDING_BIT7; i2c->current_addr = -1; } else { /* STOP condition. */ bitbang_i2c_enter_stop(i2c); } return bitbang_i2c_ret(i2c, 1); } data = i2c->last_data; if (i2c->last_clock == level) { return bitbang_i2c_nop(i2c); } i2c->last_clock = level; if (level == 0) { /* State is set/read at the start of the clock pulse. release the data line at the end. */ return bitbang_i2c_ret(i2c, 1); } switch (i2c->state) { case STOPPED: case SENT_NACK: return bitbang_i2c_ret(i2c, 1); case SENDING_BIT7 ... SENDING_BIT0: i2c->buffer = (i2c->buffer << 1) | data; /* will end up in WAITING_FOR_ACK */ i2c->state++; return bitbang_i2c_ret(i2c, 1); case WAITING_FOR_ACK: if (i2c->current_addr < 0) { i2c->current_addr = i2c->buffer; DPRINTF(\"Address 0x%02x\\n\", i2c->current_addr); i2c_start_transfer(i2c->bus, i2c->current_addr >> 1, i2c->current_addr & 1); } else { DPRINTF(\"Sent 0x%02x\\n\", i2c->buffer); i2c_send(i2c->bus, i2c->buffer); } if (i2c->current_addr & 1) { i2c->state = RECEIVING_BIT7; } else { i2c->state = SENDING_BIT7; } return bitbang_i2c_ret(i2c, 0); case RECEIVING_BIT7: i2c->buffer = i2c_recv(i2c->bus); DPRINTF(\"RX byte 0x%02x\\n\", i2c->buffer); /* Fall through... */ case RECEIVING_BIT6 ... RECEIVING_BIT0: data = i2c->buffer >> 7; /* will end up in SENDING_ACK */ i2c->state++; i2c->buffer <<= 1; return bitbang_i2c_ret(i2c, data); case SENDING_ACK: i2c->state = RECEIVING_BIT7; if (data != 0) { DPRINTF(\"NACKED\\n\"); i2c->state = SENT_NACK; i2c_nack(i2c->bus); } else { DPRINTF(\"ACKED\\n\"); } return bitbang_i2c_ret(i2c, 1); } abort(); }"} {"target": 1, "idx": 8024, "func": "static void peripheral_device_del_completion(ReadLineState *rs, const char *str, size_t len) { Object *peripheral; GSList *list = NULL, *item; peripheral = object_resolve_path(\"/machine/peripheral/\", NULL); if (peripheral == NULL) { return; } object_child_foreach(peripheral, qdev_build_hotpluggable_device_list, &list); for (item = list; item; item = g_slist_next(item)) { DeviceState *dev = item->data; if (dev->id && !strncmp(str, dev->id, len)) { readline_add_completion(rs, dev->id); } } g_slist_free(list); }"} {"target": 0, "idx": 8030, "func": "av_cold void ff_synth_filter_init(SynthFilterContext *c) { c->synth_filter_float = synth_filter_float; if (ARCH_ARM) ff_synth_filter_init_arm(c); if (ARCH_X86) ff_synth_filter_init_x86(c); }"} {"target": 0, "idx": 8044, "func": "static void tcp_accept_incoming_migration(void *opaque) { struct sockaddr_in addr; socklen_t addrlen = sizeof(addr); int s = (unsigned long)opaque; QEMUFile *f; int c, ret; do { c = qemu_accept(s, (struct sockaddr *)&addr, &addrlen); } while (c == -1 && socket_error() == EINTR); DPRINTF(\"accepted migration\\n\"); if (c == -1) { fprintf(stderr, \"could not accept migration connection\\n\"); return; } f = qemu_fopen_socket(c); if (f == NULL) { fprintf(stderr, \"could not qemu_fopen socket\\n\"); goto out; } ret = qemu_loadvm_state(f); if (ret < 0) { fprintf(stderr, \"load of migration failed\\n\"); goto out_fopen; } qemu_announce_self(); DPRINTF(\"successfully loaded vm state\\n\"); /* we've successfully migrated, close the server socket */ qemu_set_fd_handler2(s, NULL, NULL, NULL, NULL); close(s); if (autostart) vm_start(); out_fopen: qemu_fclose(f); out: close(c); }"} {"target": 0, "idx": 8065, "func": "int main(int argc, char** argv) { FILE *f= fopen(argv[1], \"rb+\"); int count= atoi(argv[2]); int maxburst= atoi(argv[3]); int length; srand (time (0)); fseek(f, 0, SEEK_END); length= ftell(f); fseek(f, 0, SEEK_SET); while(count--){ int burst= 1 + random() * (uint64_t) (abs(maxburst)-1) / RAND_MAX; int pos= random() * (uint64_t) length / RAND_MAX; fseek(f, pos, SEEK_SET); if(maxburst<0) burst= -maxburst; if(pos + burst > length) continue; while(burst--){ int val= random() * 256ULL / RAND_MAX; if(maxburst<0) val=0; fwrite(&val, 1, 1, f); } } return 0; }"} {"target": 0, "idx": 8073, "func": "static void do_info_network(int argc, const char **argv) { int i, j; NetDriverState *nd; for(i = 0; i < nb_nics; i++) { nd = &nd_table[i]; term_printf(\"%d: ifname=%s macaddr=\", i, nd->ifname); for(j = 0; j < 6; j++) { if (j > 0) term_printf(\":\"); term_printf(\"%02x\", nd->macaddr[j]); } term_printf(\"\\n\"); } }"} {"target": 0, "idx": 8078, "func": "static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque) { RDMAControlHeader reg_resp = { .len = sizeof(RDMARegisterResult), .type = RDMA_CONTROL_REGISTER_RESULT, .repeat = 0, }; RDMAControlHeader unreg_resp = { .len = 0, .type = RDMA_CONTROL_UNREGISTER_FINISHED, .repeat = 0, }; RDMAControlHeader blocks = { .type = RDMA_CONTROL_RAM_BLOCKS_RESULT, .repeat = 1 }; QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(opaque); RDMAContext *rdma = rioc->rdma; RDMALocalBlocks *local = &rdma->local_ram_blocks; RDMAControlHeader head; RDMARegister *reg, *registers; RDMACompress *comp; RDMARegisterResult *reg_result; static RDMARegisterResult results[RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE]; RDMALocalBlock *block; void *host_addr; int ret = 0; int idx = 0; int count = 0; int i = 0; CHECK_ERROR_STATE(); do { trace_qemu_rdma_registration_handle_wait(); ret = qemu_rdma_exchange_recv(rdma, &head, RDMA_CONTROL_NONE); if (ret < 0) { break; } if (head.repeat > RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE) { error_report(\"rdma: Too many requests in this message (%d).\" \"Bailing.\", head.repeat); ret = -EIO; break; } switch (head.type) { case RDMA_CONTROL_COMPRESS: comp = (RDMACompress *) rdma->wr_data[idx].control_curr; network_to_compress(comp); trace_qemu_rdma_registration_handle_compress(comp->length, comp->block_idx, comp->offset); if (comp->block_idx >= rdma->local_ram_blocks.nb_blocks) { error_report(\"rdma: 'compress' bad block index %u (vs %d)\", (unsigned int)comp->block_idx, rdma->local_ram_blocks.nb_blocks); ret = -EIO; goto out; } block = &(rdma->local_ram_blocks.block[comp->block_idx]); host_addr = block->local_host_addr + (comp->offset - block->offset); ram_handle_compressed(host_addr, comp->value, comp->length); break; case RDMA_CONTROL_REGISTER_FINISHED: trace_qemu_rdma_registration_handle_finished(); goto out; case RDMA_CONTROL_RAM_BLOCKS_REQUEST: trace_qemu_rdma_registration_handle_ram_blocks(); /* Sort our local RAM Block list so it's the same as the source, * we can do this since we've filled in a src_index in the list * as we received the RAMBlock list earlier. */ qsort(rdma->local_ram_blocks.block, rdma->local_ram_blocks.nb_blocks, sizeof(RDMALocalBlock), dest_ram_sort_func); if (rdma->pin_all) { ret = qemu_rdma_reg_whole_ram_blocks(rdma); if (ret) { error_report(\"rdma migration: error dest \" \"registering ram blocks\"); goto out; } } /* * Dest uses this to prepare to transmit the RAMBlock descriptions * to the source VM after connection setup. * Both sides use the \"remote\" structure to communicate and update * their \"local\" descriptions with what was sent. */ for (i = 0; i < local->nb_blocks; i++) { rdma->dest_blocks[i].remote_host_addr = (uintptr_t)(local->block[i].local_host_addr); if (rdma->pin_all) { rdma->dest_blocks[i].remote_rkey = local->block[i].mr->rkey; } rdma->dest_blocks[i].offset = local->block[i].offset; rdma->dest_blocks[i].length = local->block[i].length; dest_block_to_network(&rdma->dest_blocks[i]); trace_qemu_rdma_registration_handle_ram_blocks_loop( local->block[i].block_name, local->block[i].offset, local->block[i].length, local->block[i].local_host_addr, local->block[i].src_index); } blocks.len = rdma->local_ram_blocks.nb_blocks * sizeof(RDMADestBlock); ret = qemu_rdma_post_send_control(rdma, (uint8_t *) rdma->dest_blocks, &blocks); if (ret < 0) { error_report(\"rdma migration: error sending remote info\"); goto out; } break; case RDMA_CONTROL_REGISTER_REQUEST: trace_qemu_rdma_registration_handle_register(head.repeat); reg_resp.repeat = head.repeat; registers = (RDMARegister *) rdma->wr_data[idx].control_curr; for (count = 0; count < head.repeat; count++) { uint64_t chunk; uint8_t *chunk_start, *chunk_end; reg = ®isters[count]; network_to_register(reg); reg_result = &results[count]; trace_qemu_rdma_registration_handle_register_loop(count, reg->current_index, reg->key.current_addr, reg->chunks); if (reg->current_index >= rdma->local_ram_blocks.nb_blocks) { error_report(\"rdma: 'register' bad block index %u (vs %d)\", (unsigned int)reg->current_index, rdma->local_ram_blocks.nb_blocks); ret = -ENOENT; goto out; } block = &(rdma->local_ram_blocks.block[reg->current_index]); if (block->is_ram_block) { if (block->offset > reg->key.current_addr) { error_report(\"rdma: bad register address for block %s\" \" offset: %\" PRIx64 \" current_addr: %\" PRIx64, block->block_name, block->offset, reg->key.current_addr); ret = -ERANGE; goto out; } host_addr = (block->local_host_addr + (reg->key.current_addr - block->offset)); chunk = ram_chunk_index(block->local_host_addr, (uint8_t *) host_addr); } else { chunk = reg->key.chunk; host_addr = block->local_host_addr + (reg->key.chunk * (1UL << RDMA_REG_CHUNK_SHIFT)); /* Check for particularly bad chunk value */ if (host_addr < (void *)block->local_host_addr) { error_report(\"rdma: bad chunk for block %s\" \" chunk: %\" PRIx64, block->block_name, reg->key.chunk); ret = -ERANGE; goto out; } } chunk_start = ram_chunk_start(block, chunk); chunk_end = ram_chunk_end(block, chunk + reg->chunks); if (qemu_rdma_register_and_get_keys(rdma, block, (uintptr_t)host_addr, NULL, ®_result->rkey, chunk, chunk_start, chunk_end)) { error_report(\"cannot get rkey\"); ret = -EINVAL; goto out; } reg_result->host_addr = (uintptr_t)block->local_host_addr; trace_qemu_rdma_registration_handle_register_rkey( reg_result->rkey); result_to_network(reg_result); } ret = qemu_rdma_post_send_control(rdma, (uint8_t *) results, ®_resp); if (ret < 0) { error_report(\"Failed to send control buffer\"); goto out; } break; case RDMA_CONTROL_UNREGISTER_REQUEST: trace_qemu_rdma_registration_handle_unregister(head.repeat); unreg_resp.repeat = head.repeat; registers = (RDMARegister *) rdma->wr_data[idx].control_curr; for (count = 0; count < head.repeat; count++) { reg = ®isters[count]; network_to_register(reg); trace_qemu_rdma_registration_handle_unregister_loop(count, reg->current_index, reg->key.chunk); block = &(rdma->local_ram_blocks.block[reg->current_index]); ret = ibv_dereg_mr(block->pmr[reg->key.chunk]); block->pmr[reg->key.chunk] = NULL; if (ret != 0) { perror(\"rdma unregistration chunk failed\"); ret = -ret; goto out; } rdma->total_registrations--; trace_qemu_rdma_registration_handle_unregister_success( reg->key.chunk); } ret = qemu_rdma_post_send_control(rdma, NULL, &unreg_resp); if (ret < 0) { error_report(\"Failed to send control buffer\"); goto out; } break; case RDMA_CONTROL_REGISTER_RESULT: error_report(\"Invalid RESULT message at dest.\"); ret = -EIO; goto out; default: error_report(\"Unknown control message %s\", control_desc[head.type]); ret = -EIO; goto out; } } while (1); out: if (ret < 0) { rdma->error_state = ret; } return ret; }"} {"target": 0, "idx": 8081, "func": "void xen_invalidate_map_cache(void) { unsigned long i; MapCacheRev *reventry; /* Flush pending AIO before destroying the mapcache */ bdrv_drain_all(); QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { DPRINTF(\"There should be no locked mappings at this time, \" \"but \"TARGET_FMT_plx\" -> %p is present\\n\", reventry->paddr_index, reventry->vaddr_req); } mapcache_lock(); for (i = 0; i < mapcache->nr_buckets; i++) { MapCacheEntry *entry = &mapcache->entry[i]; if (entry->vaddr_base == NULL) { continue; } if (entry->lock > 0) { continue; } if (munmap(entry->vaddr_base, entry->size) != 0) { perror(\"unmap fails\"); exit(-1); } entry->paddr_index = 0; entry->vaddr_base = NULL; entry->size = 0; g_free(entry->valid_mapping); entry->valid_mapping = NULL; } mapcache->last_entry = NULL; mapcache_unlock(); }"} {"target": 0, "idx": 8086, "func": "static uint64_t omap_clkdsp_read(void *opaque, target_phys_addr_t addr, unsigned size) { struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) opaque; if (size != 2) { return omap_badwidth_read16(opaque, addr); } switch (addr) { case 0x04: /* DSP_IDLECT1 */ return s->clkm.dsp_idlect1; case 0x08: /* DSP_IDLECT2 */ return s->clkm.dsp_idlect2; case 0x14: /* DSP_RSTCT2 */ return s->clkm.dsp_rstct2; case 0x18: /* DSP_SYSST */ return (s->clkm.clocking_scheme << 11) | s->clkm.cold_start | (s->cpu->env.halted << 6); /* Quite useless... */ } OMAP_BAD_REG(addr); return 0; }"} {"target": 1, "idx": 8088, "func": "static int fd_close(MigrationState *s) { DPRINTF(\"fd_close\\n\"); if (s->fd != -1) { close(s->fd); s->fd = -1; } return 0; }"} {"target": 0, "idx": 8093, "func": "static av_cold int oggvorbis_encode_init(AVCodecContext *avccontext) { OggVorbisContext *context = avccontext->priv_data; ogg_packet header, header_comm, header_code; uint8_t *p; unsigned int offset; vorbis_info_init(&context->vi); if (oggvorbis_init_encoder(&context->vi, avccontext) < 0) { av_log(avccontext, AV_LOG_ERROR, \"oggvorbis_encode_init: init_encoder failed\\n\"); return -1; } vorbis_analysis_init(&context->vd, &context->vi); vorbis_block_init(&context->vd, &context->vb); vorbis_comment_init(&context->vc); vorbis_comment_add_tag(&context->vc, \"encoder\", LIBAVCODEC_IDENT); vorbis_analysis_headerout(&context->vd, &context->vc, &header, &header_comm, &header_code); avccontext->extradata_size = 1 + xiph_len(header.bytes) + xiph_len(header_comm.bytes) + header_code.bytes; p = avccontext->extradata = av_malloc(avccontext->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); p[0] = 2; offset = 1; offset += av_xiphlacing(&p[offset], header.bytes); offset += av_xiphlacing(&p[offset], header_comm.bytes); memcpy(&p[offset], header.packet, header.bytes); offset += header.bytes; memcpy(&p[offset], header_comm.packet, header_comm.bytes); offset += header_comm.bytes; memcpy(&p[offset], header_code.packet, header_code.bytes); offset += header_code.bytes; assert(offset == avccontext->extradata_size); #if 0 vorbis_block_clear(&context->vb); vorbis_dsp_clear(&context->vd); vorbis_info_clear(&context->vi); #endif vorbis_comment_clear(&context->vc); avccontext->frame_size = OGGVORBIS_FRAME_SIZE; avccontext->coded_frame = avcodec_alloc_frame(); return 0; }"} {"target": 0, "idx": 8095, "func": "static int iff_read_header(AVFormatContext *s) { IffDemuxContext *iff = s->priv_data; AVIOContext *pb = s->pb; AVStream *st; uint8_t *buf; uint32_t chunk_id, data_size; uint32_t screenmode = 0, num, den; unsigned transparency = 0; unsigned masking = 0; // no mask uint8_t fmt[16]; int fmt_size; st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); st->codec->channels = 1; st->codec->channel_layout = AV_CH_LAYOUT_MONO; avio_skip(pb, 8); // codec_tag used by ByteRun1 decoder to distinguish progressive (PBM) and interlaced (ILBM) content st->codec->codec_tag = avio_rl32(pb); while(!url_feof(pb)) { uint64_t orig_pos; int res; const char *metadata_tag = NULL; chunk_id = avio_rl32(pb); data_size = avio_rb32(pb); orig_pos = avio_tell(pb); switch(chunk_id) { case ID_VHDR: st->codec->codec_type = AVMEDIA_TYPE_AUDIO; if (data_size < 14) return AVERROR_INVALIDDATA; avio_skip(pb, 12); st->codec->sample_rate = avio_rb16(pb); if (data_size >= 16) { avio_skip(pb, 1); iff->svx8_compression = avio_r8(pb); } break; case ID_MHDR: st->codec->codec_type = AVMEDIA_TYPE_AUDIO; iff->maud_bits = -1; iff->maud_compression = -1; if (data_size < 32) return AVERROR_INVALIDDATA; avio_skip(pb, 4); iff->maud_bits = avio_rb16(pb); avio_skip(pb, 2); num = avio_rb32(pb); den = avio_rb16(pb); if (!den) return AVERROR_INVALIDDATA; avio_skip(pb, 2); st->codec->sample_rate = num / den; st->codec->channels = avio_rb16(pb); iff->maud_compression = avio_rb16(pb); if (st->codec->channels == 1) st->codec->channel_layout = AV_CH_LAYOUT_MONO; else if (st->codec->channels == 2) st->codec->channel_layout = AV_CH_LAYOUT_STEREO; break; case ID_ABIT: case ID_BODY: case ID_DBOD: case ID_MDAT: iff->body_pos = avio_tell(pb); iff->body_size = data_size; break; case ID_CHAN: if (data_size < 4) return AVERROR_INVALIDDATA; if (avio_rb32(pb) < 6) { st->codec->channels = 1; st->codec->channel_layout = AV_CH_LAYOUT_MONO; } else { st->codec->channels = 2; st->codec->channel_layout = AV_CH_LAYOUT_STEREO; } break; case ID_CAMG: if (data_size < 4) return AVERROR_INVALIDDATA; screenmode = avio_rb32(pb); break; case ID_CMAP: st->codec->extradata_size = data_size + IFF_EXTRA_VIDEO_SIZE; st->codec->extradata = av_malloc(data_size + IFF_EXTRA_VIDEO_SIZE + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) return AVERROR(ENOMEM); if (avio_read(pb, st->codec->extradata + IFF_EXTRA_VIDEO_SIZE, data_size) < 0) return AVERROR(EIO); break; case ID_BMHD: iff->bitmap_compression = -1; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; if (data_size <= 8) return AVERROR_INVALIDDATA; st->codec->width = avio_rb16(pb); st->codec->height = avio_rb16(pb); avio_skip(pb, 4); // x, y offset st->codec->bits_per_coded_sample = avio_r8(pb); if (data_size >= 10) masking = avio_r8(pb); if (data_size >= 11) iff->bitmap_compression = avio_r8(pb); if (data_size >= 14) { avio_skip(pb, 1); // padding transparency = avio_rb16(pb); } if (data_size >= 16) { st->sample_aspect_ratio.num = avio_r8(pb); st->sample_aspect_ratio.den = avio_r8(pb); } break; case ID_DPEL: if (data_size < 4 || (data_size & 3)) return AVERROR_INVALIDDATA; if ((fmt_size = avio_read(pb, fmt, sizeof(fmt))) < 0) return fmt_size; if (fmt_size == sizeof(deep_rgb24) && !memcmp(fmt, deep_rgb24, sizeof(deep_rgb24))) st->codec->pix_fmt = AV_PIX_FMT_RGB24; else if (fmt_size == sizeof(deep_rgba) && !memcmp(fmt, deep_rgba, sizeof(deep_rgba))) st->codec->pix_fmt = AV_PIX_FMT_RGBA; else if (fmt_size == sizeof(deep_bgra) && !memcmp(fmt, deep_bgra, sizeof(deep_bgra))) st->codec->pix_fmt = AV_PIX_FMT_BGRA; else if (fmt_size == sizeof(deep_argb) && !memcmp(fmt, deep_argb, sizeof(deep_argb))) st->codec->pix_fmt = AV_PIX_FMT_ARGB; else if (fmt_size == sizeof(deep_abgr) && !memcmp(fmt, deep_abgr, sizeof(deep_abgr))) st->codec->pix_fmt = AV_PIX_FMT_ABGR; else { av_log_ask_for_sample(s, \"unsupported color format\\n\"); return AVERROR_PATCHWELCOME; } break; case ID_DGBL: st->codec->codec_type = AVMEDIA_TYPE_VIDEO; if (data_size < 8) return AVERROR_INVALIDDATA; st->codec->width = avio_rb16(pb); st->codec->height = avio_rb16(pb); iff->bitmap_compression = avio_rb16(pb); st->sample_aspect_ratio.num = avio_r8(pb); st->sample_aspect_ratio.den = avio_r8(pb); st->codec->bits_per_coded_sample = 24; break; case ID_DLOC: if (data_size < 4) return AVERROR_INVALIDDATA; st->codec->width = avio_rb16(pb); st->codec->height = avio_rb16(pb); break; case ID_ANNO: case ID_TEXT: metadata_tag = \"comment\"; break; case ID_AUTH: metadata_tag = \"artist\"; break; case ID_COPYRIGHT: metadata_tag = \"copyright\"; break; case ID_NAME: metadata_tag = \"title\"; break; } if (metadata_tag) { if ((res = get_metadata(s, metadata_tag, data_size)) < 0) { av_log(s, AV_LOG_ERROR, \"cannot allocate metadata tag %s!\\n\", metadata_tag); return res; } } avio_skip(pb, data_size - (avio_tell(pb) - orig_pos) + (data_size & 1)); } avio_seek(pb, iff->body_pos, SEEK_SET); switch(st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: avpriv_set_pts_info(st, 32, 1, st->codec->sample_rate); if (st->codec->codec_tag == ID_16SV) st->codec->codec_id = AV_CODEC_ID_PCM_S16BE_PLANAR; else if (st->codec->codec_tag == ID_MAUD) { if (iff->maud_bits == 8 && !iff->maud_compression) { st->codec->codec_id = AV_CODEC_ID_PCM_U8; } else if (iff->maud_bits == 16 && !iff->maud_compression) { st->codec->codec_id = AV_CODEC_ID_PCM_S16BE; } else if (iff->maud_bits == 8 && iff->maud_compression == 2) { st->codec->codec_id = AV_CODEC_ID_PCM_ALAW; } else if (iff->maud_bits == 8 && iff->maud_compression == 3) { st->codec->codec_id = AV_CODEC_ID_PCM_MULAW; } else { av_log_ask_for_sample(s, \"unsupported compression %d and bit depth %d\\n\", iff->maud_compression, iff->maud_bits); return AVERROR_PATCHWELCOME; } st->codec->bits_per_coded_sample = av_get_bits_per_sample(st->codec->codec_id); st->codec->block_align = st->codec->bits_per_coded_sample * st->codec->channels / 8; } else { switch (iff->svx8_compression) { case COMP_NONE: st->codec->codec_id = AV_CODEC_ID_PCM_S8_PLANAR; break; case COMP_FIB: st->codec->codec_id = AV_CODEC_ID_8SVX_FIB; break; case COMP_EXP: st->codec->codec_id = AV_CODEC_ID_8SVX_EXP; break; default: av_log(s, AV_LOG_ERROR, \"Unknown SVX8 compression method '%d'\\n\", iff->svx8_compression); return -1; } } st->codec->bits_per_coded_sample = av_get_bits_per_sample(st->codec->codec_id); st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample; st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample; break; case AVMEDIA_TYPE_VIDEO: iff->bpp = st->codec->bits_per_coded_sample; if ((screenmode & 0x800 /* Hold And Modify */) && iff->bpp <= 8) { iff->ham = iff->bpp > 6 ? 6 : 4; st->codec->bits_per_coded_sample = 24; } iff->flags = (screenmode & 0x80 /* Extra HalfBrite */) && iff->bpp <= 8; iff->masking = masking; iff->transparency = transparency; if (!st->codec->extradata) { st->codec->extradata_size = IFF_EXTRA_VIDEO_SIZE; st->codec->extradata = av_malloc(IFF_EXTRA_VIDEO_SIZE + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) return AVERROR(ENOMEM); } buf = st->codec->extradata; bytestream_put_be16(&buf, IFF_EXTRA_VIDEO_SIZE); bytestream_put_byte(&buf, iff->bitmap_compression); bytestream_put_byte(&buf, iff->bpp); bytestream_put_byte(&buf, iff->ham); bytestream_put_byte(&buf, iff->flags); bytestream_put_be16(&buf, iff->transparency); bytestream_put_byte(&buf, iff->masking); st->codec->codec_id = AV_CODEC_ID_IFF_ILBM; break; default: return -1; } return 0; }"} {"target": 1, "idx": 8129, "func": "static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb) { int i, j, k; int scheme; int current_macroblock; int current_fragment; int coding_mode; int custom_mode_alphabet[CODING_MODE_COUNT]; if (s->keyframe) { for (i = 0; i < s->fragment_count; i++) s->all_fragments[i].coding_method = MODE_INTRA; } else { /* fetch the mode coding scheme for this frame */ scheme = get_bits(gb, 3); /* is it a custom coding scheme? */ if (scheme == 0) { custom_mode_alphabet[get_bits(gb, 3)] = i; } /* iterate through all of the macroblocks that contain 1 or more * coded fragments */ for (i = 0; i < s->u_superblock_start; i++) { for (j = 0; j < 4; j++) { current_macroblock = s->superblock_macroblocks[i * 4 + j]; if ((current_macroblock == -1) || (s->macroblock_coding[current_macroblock] == MODE_COPY)) continue; if (current_macroblock >= s->macroblock_count) { av_log(s->avctx, AV_LOG_ERROR, \" vp3:unpack_modes(): bad macroblock number (%d >= %d)\\n\", current_macroblock, s->macroblock_count); return 1; } /* mode 7 means get 3 bits for each coding mode */ if (scheme == 7) coding_mode = get_bits(gb, 3); else if(scheme == 0) coding_mode = custom_mode_alphabet [get_vlc2(gb, s->mode_code_vlc.table, 3, 3)]; else coding_mode = ModeAlphabet[scheme-1] [get_vlc2(gb, s->mode_code_vlc.table, 3, 3)]; s->macroblock_coding[current_macroblock] = coding_mode; for (k = 0; k < 6; k++) { current_fragment = s->macroblock_fragments[current_macroblock * 6 + k]; if (current_fragment == -1) continue; if (current_fragment >= s->fragment_count) { av_log(s->avctx, AV_LOG_ERROR, \" vp3:unpack_modes(): bad fragment number (%d >= %d)\\n\", current_fragment, s->fragment_count); return 1; } if (s->all_fragments[current_fragment].coding_method != MODE_COPY) s->all_fragments[current_fragment].coding_method = coding_mode; } } } } return 0; }"} {"target": 0, "idx": 8149, "func": "struct USBEndpoint *usb_ep_get(USBDevice *dev, int pid, int ep) { struct USBEndpoint *eps = pid == USB_TOKEN_IN ? dev->ep_in : dev->ep_out; if (ep == 0) { return &dev->ep_ctl; } assert(pid == USB_TOKEN_IN || pid == USB_TOKEN_OUT); assert(ep > 0 && ep <= USB_MAX_ENDPOINTS); return eps + ep - 1; }"} {"target": 0, "idx": 8160, "func": "static int vpx_decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { VPxContext *ctx = avctx->priv_data; AVFrame *picture = data; const void *iter = NULL; const void *iter_alpha = NULL; struct vpx_image *img, *img_alpha; int ret; uint8_t *side_data = NULL; int side_data_size = 0; ret = decode_frame(avctx, &ctx->decoder, avpkt->data, avpkt->size); if (ret) return ret; side_data = av_packet_get_side_data(avpkt, AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL, &side_data_size); if (side_data_size > 1) { const uint64_t additional_id = AV_RB64(side_data); side_data += 8; side_data_size -= 8; if (additional_id == 1) { // 1 stands for alpha channel data. if (!ctx->has_alpha_channel) { ctx->has_alpha_channel = 1; ret = vpx_init(avctx, #if CONFIG_LIBVPX_VP8_DECODER && CONFIG_LIBVPX_VP9_DECODER (avctx->codec_id == AV_CODEC_ID_VP8) ? &vpx_codec_vp8_dx_algo : &vpx_codec_vp9_dx_algo, #elif CONFIG_LIBVPX_VP8_DECODER &vpx_codec_vp8_dx_algo, #else &vpx_codec_vp9_dx_algo, #endif 1); if (ret) return ret; } ret = decode_frame(avctx, &ctx->decoder_alpha, side_data, side_data_size); if (ret) return ret; } } if ((img = vpx_codec_get_frame(&ctx->decoder, &iter)) && (!ctx->has_alpha_channel || (img_alpha = vpx_codec_get_frame(&ctx->decoder_alpha, &iter_alpha)))) { uint8_t *planes[4]; int linesizes[4]; if (img->d_w > img->w || img->d_h > img->h) { av_log(avctx, AV_LOG_ERROR, \"Display dimensions %dx%d exceed storage %dx%d\\n\", img->d_w, img->d_h, img->w, img->h); return AVERROR_EXTERNAL; } if ((ret = set_pix_fmt(avctx, img, ctx->has_alpha_channel)) < 0) { #ifdef VPX_IMG_FMT_HIGHBITDEPTH av_log(avctx, AV_LOG_ERROR, \"Unsupported output colorspace (%d) / bit_depth (%d)\\n\", img->fmt, img->bit_depth); #else av_log(avctx, AV_LOG_ERROR, \"Unsupported output colorspace (%d) / bit_depth (%d)\\n\", img->fmt, 8); #endif return ret; } if ((int) img->d_w != avctx->width || (int) img->d_h != avctx->height) { av_log(avctx, AV_LOG_INFO, \"dimension change! %dx%d -> %dx%d\\n\", avctx->width, avctx->height, img->d_w, img->d_h); ret = ff_set_dimensions(avctx, img->d_w, img->d_h); if (ret < 0) return ret; } if ((ret = ff_get_buffer(avctx, picture, 0)) < 0) return ret; planes[0] = img->planes[VPX_PLANE_Y]; planes[1] = img->planes[VPX_PLANE_U]; planes[2] = img->planes[VPX_PLANE_V]; planes[3] = ctx->has_alpha_channel ? img_alpha->planes[VPX_PLANE_Y] : NULL; linesizes[0] = img->stride[VPX_PLANE_Y]; linesizes[1] = img->stride[VPX_PLANE_U]; linesizes[2] = img->stride[VPX_PLANE_V]; linesizes[3] = ctx->has_alpha_channel ? img_alpha->stride[VPX_PLANE_Y] : 0; av_image_copy(picture->data, picture->linesize, (const uint8_t**)planes, linesizes, avctx->pix_fmt, img->d_w, img->d_h); *got_frame = 1; } return avpkt->size; }"} {"target": 0, "idx": 8162, "func": "static int ipvideo_decode_block_opcode_0xB(IpvideoContext *s) { int y; /* 64-color encoding (each pixel in block is a different color) */ CHECK_STREAM_PTR(64); for (y = 0; y < 8; y++) { memcpy(s->pixel_ptr, s->stream_ptr, 8); s->stream_ptr += 8; s->pixel_ptr += s->stride; } /* report success */ return 0; }"} {"target": 0, "idx": 8164, "func": "static int mov_read_tkhd(MOVContext *c, AVIOContext *pb, MOVAtom atom) { int i; int width; int height; int64_t disp_transform[2]; int display_matrix[3][3]; AVStream *st; MOVStreamContext *sc; int version; int flags; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; sc = st->priv_data; version = avio_r8(pb); flags = avio_rb24(pb); st->disposition |= (flags & MOV_TKHD_FLAG_ENABLED) ? AV_DISPOSITION_DEFAULT : 0; if (version == 1) { avio_rb64(pb); avio_rb64(pb); } else { avio_rb32(pb); /* creation time */ avio_rb32(pb); /* modification time */ } st->id = (int)avio_rb32(pb); /* track id (NOT 0 !)*/ avio_rb32(pb); /* reserved */ /* highlevel (considering edits) duration in movie timebase */ (version == 1) ? avio_rb64(pb) : avio_rb32(pb); avio_rb32(pb); /* reserved */ avio_rb32(pb); /* reserved */ avio_rb16(pb); /* layer */ avio_rb16(pb); /* alternate group */ avio_rb16(pb); /* volume */ avio_rb16(pb); /* reserved */ //read in the display matrix (outlined in ISO 14496-12, Section 6.2.2) // they're kept in fixed point format through all calculations // save u,v,z to store the whole matrix in the AV_PKT_DATA_DISPLAYMATRIX // side data, but the scale factor is not needed to calculate aspect ratio for (i = 0; i < 3; i++) { display_matrix[i][0] = avio_rb32(pb); // 16.16 fixed point display_matrix[i][1] = avio_rb32(pb); // 16.16 fixed point display_matrix[i][2] = avio_rb32(pb); // 2.30 fixed point } width = avio_rb32(pb); // 16.16 fixed point track width height = avio_rb32(pb); // 16.16 fixed point track height sc->width = width >> 16; sc->height = height >> 16; // save the matrix when it is not the default identity if (display_matrix[0][0] != (1 << 16) || display_matrix[1][1] != (1 << 16) || display_matrix[2][2] != (1 << 30) || display_matrix[0][1] || display_matrix[0][2] || display_matrix[1][0] || display_matrix[1][2] || display_matrix[2][0] || display_matrix[2][1]) { int i, j; av_freep(&sc->display_matrix); sc->display_matrix = av_malloc(sizeof(int32_t) * 9); if (!sc->display_matrix) return AVERROR(ENOMEM); for (i = 0; i < 3; i++) for (j = 0; j < 3; j++) sc->display_matrix[i * 3 + j] = display_matrix[j][i]; } // transform the display width/height according to the matrix // skip this if the rotation angle is 0 degrees // to keep the same scale, use [width height 1<<16] if (width && height && sc->display_matrix && av_display_rotation_get(sc->display_matrix) != 0.0f) { for (i = 0; i < 2; i++) disp_transform[i] = (int64_t) width * display_matrix[0][i] + (int64_t) height * display_matrix[1][i] + ((int64_t) display_matrix[2][i] << 16); //sample aspect ratio is new width/height divided by old width/height st->sample_aspect_ratio = av_d2q( ((double) disp_transform[0] * height) / ((double) disp_transform[1] * width), INT_MAX); } return 0; }"} {"target": 1, "idx": 8168, "func": "uint8_t *av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size) { int elems = pkt->side_data_elems; if ((unsigned)elems + 1 > INT_MAX / sizeof(*pkt->side_data)) return NULL; if ((unsigned)size > INT_MAX - FF_INPUT_BUFFER_PADDING_SIZE) return NULL; pkt->side_data = av_realloc(pkt->side_data, (elems + 1) * sizeof(*pkt->side_data)); if (!pkt->side_data) return NULL; pkt->side_data[elems].data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE); if (!pkt->side_data[elems].data) return NULL; pkt->side_data[elems].size = size; pkt->side_data[elems].type = type; pkt->side_data_elems++; return pkt->side_data[elems].data; }"} {"target": 1, "idx": 8173, "func": "static void pointer_event(VncState *vs, int button_mask, int x, int y) { static uint32_t bmap[INPUT_BUTTON_MAX] = { [INPUT_BUTTON_LEFT] = 0x01, [INPUT_BUTTON_MIDDLE] = 0x02, [INPUT_BUTTON_RIGHT] = 0x04, [INPUT_BUTTON_WHEEL_UP] = 0x08, [INPUT_BUTTON_WHEEL_DOWN] = 0x10, }; QemuConsole *con = vs->vd->dcl.con; int width = surface_width(vs->vd->ds); int height = surface_height(vs->vd->ds); if (vs->last_bmask != button_mask) { qemu_input_update_buttons(con, bmap, vs->last_bmask, button_mask); vs->last_bmask = button_mask; } if (vs->absolute) { qemu_input_queue_abs(con, INPUT_AXIS_X, x, width); qemu_input_queue_abs(con, INPUT_AXIS_Y, y, height); } else if (vnc_has_feature(vs, VNC_FEATURE_POINTER_TYPE_CHANGE)) { qemu_input_queue_rel(con, INPUT_AXIS_X, x - 0x7FFF); qemu_input_queue_rel(con, INPUT_AXIS_Y, y - 0x7FFF); } else { if (vs->last_x != -1) { qemu_input_queue_rel(con, INPUT_AXIS_X, x - vs->last_x); qemu_input_queue_rel(con, INPUT_AXIS_Y, y - vs->last_y); } vs->last_x = x; vs->last_y = y; } qemu_input_event_sync(); }"} {"target": 1, "idx": 8177, "func": "void visit_type_uint16(Visitor *v, uint16_t *obj, const char *name, Error **errp) { int64_t value; if (!error_is_set(errp)) { if (v->type_uint16) { v->type_uint16(v, obj, name, errp); } else { value = *obj; v->type_int(v, &value, name, errp); if (value < 0 || value > UINT16_MAX) { error_set(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : \"null\", \"uint16_t\"); return; } *obj = value; } } }"} {"target": 0, "idx": 8182, "func": "static void rtsp_parse_transport(RTSPMessageHeader *reply, const char *p) { char transport_protocol[16]; char profile[16]; char lower_transport[16]; char parameter[16]; RTSPTransportField *th; char buf[256]; reply->nb_transports = 0; for(;;) { skip_spaces(&p); if (*p == '\\0') break; th = &reply->transports[reply->nb_transports]; get_word_sep(transport_protocol, sizeof(transport_protocol), \"/\", &p); if (!strcasecmp (transport_protocol, \"rtp\")) { get_word_sep(profile, sizeof(profile), \"/;,\", &p); lower_transport[0] = '\\0'; /* rtp/avp/ */ if (*p == '/') { get_word_sep(lower_transport, sizeof(lower_transport), \";,\", &p); } th->transport = RTSP_TRANSPORT_RTP; } else if (!strcasecmp (transport_protocol, \"x-pn-tng\") || !strcasecmp (transport_protocol, \"x-real-rdt\")) { /* x-pn-tng/ */ get_word_sep(lower_transport, sizeof(lower_transport), \"/;,\", &p); profile[0] = '\\0'; th->transport = RTSP_TRANSPORT_RDT; } if (!strcasecmp(lower_transport, \"TCP\")) th->lower_transport = RTSP_LOWER_TRANSPORT_TCP; else th->lower_transport = RTSP_LOWER_TRANSPORT_UDP; if (*p == ';') p++; /* get each parameter */ while (*p != '\\0' && *p != ',') { get_word_sep(parameter, sizeof(parameter), \"=;,\", &p); if (!strcmp(parameter, \"port\")) { if (*p == '=') { p++; rtsp_parse_range(&th->port_min, &th->port_max, &p); } } else if (!strcmp(parameter, \"client_port\")) { if (*p == '=') { p++; rtsp_parse_range(&th->client_port_min, &th->client_port_max, &p); } } else if (!strcmp(parameter, \"server_port\")) { if (*p == '=') { p++; rtsp_parse_range(&th->server_port_min, &th->server_port_max, &p); } } else if (!strcmp(parameter, \"interleaved\")) { if (*p == '=') { p++; rtsp_parse_range(&th->interleaved_min, &th->interleaved_max, &p); } } else if (!strcmp(parameter, \"multicast\")) { if (th->lower_transport == RTSP_LOWER_TRANSPORT_UDP) th->lower_transport = RTSP_LOWER_TRANSPORT_UDP_MULTICAST; } else if (!strcmp(parameter, \"ttl\")) { if (*p == '=') { p++; th->ttl = strtol(p, (char **)&p, 10); } } else if (!strcmp(parameter, \"destination\")) { struct in_addr ipaddr; if (*p == '=') { p++; get_word_sep(buf, sizeof(buf), \";,\", &p); if (inet_aton(buf, &ipaddr)) th->destination = ntohl(ipaddr.s_addr); } } while (*p != ';' && *p != '\\0' && *p != ',') p++; if (*p == ';') p++; } if (*p == ',') p++; reply->nb_transports++; } }"} {"target": 0, "idx": 8190, "func": "static int mov_write_avcc_tag(ByteIOContext *pb, MOVTrack *track) { offset_t pos = url_ftell(pb); put_be32(pb, 0); put_tag(pb, \"avcC\"); if (track->vosLen > 6) { /* check for h264 start code */ if (AV_RB32(track->vosData) == 0x00000001) { uint8_t *buf, *end; uint32_t sps_size=0, pps_size=0; uint8_t *sps=0, *pps=0; avc_parse_nal_units(&track->vosData, &track->vosLen); buf = track->vosData; end = track->vosData + track->vosLen; /* look for sps and pps */ while (buf < end) { unsigned int size; uint8_t nal_type; size = AV_RB32(buf); nal_type = buf[4] & 0x1f; if (nal_type == 7) { /* SPS */ sps = buf + 4; sps_size = size; } else if (nal_type == 8) { /* PPS */ pps = buf + 4; pps_size = size; } buf += size + 4; } assert(sps); assert(pps); put_byte(pb, 1); /* version */ put_byte(pb, sps[1]); /* profile */ put_byte(pb, sps[2]); /* profile compat */ put_byte(pb, sps[3]); /* level */ put_byte(pb, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 1 (11) */ put_byte(pb, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */ put_be16(pb, sps_size); put_buffer(pb, sps, sps_size); put_byte(pb, 1); /* number of pps */ put_be16(pb, pps_size); put_buffer(pb, pps, pps_size); } else { put_buffer(pb, track->vosData, track->vosLen); } } return updateSize(pb, pos); }"} {"target": 0, "idx": 8196, "func": "static inline int compress_coeffs(int *coef, int order, int c_bits) { int i, res = 0; const int low_idx = c_bits ? 4 : 2; const int shift_val = c_bits ? 8 : 4; const int high_idx = c_bits ? 11 : 5; for (i = 0; i < order; i++) if (coef[i] < low_idx || coef[i] > high_idx) res++; if (res == order) for (i = 0; i < order; i++) coef[i] -= (coef[i] > high_idx) ? shift_val : 0; return res == order; }"} {"target": 1, "idx": 8218, "func": "static av_cold int indeo3_decode_end(AVCodecContext *avctx) { Indeo3DecodeContext *s = avctx->priv_data; iv_free_func(s); return 0; }"} {"target": 1, "idx": 8226, "func": "void qemu_iovec_destroy(QEMUIOVector *qiov) { assert(qiov->nalloc != -1); g_free(qiov->iov); }"} {"target": 1, "idx": 8233, "func": "bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *blk, VirtIOBlockDataPlane **dataplane) { VirtIOBlockDataPlane *s; int fd; *dataplane = NULL; if (!blk->data_plane) { return true; if (blk->scsi) { error_report(\"device is incompatible with x-data-plane, use scsi=off\"); if (blk->config_wce) { error_report(\"device is incompatible with x-data-plane, \" \"use config-wce=off\"); fd = raw_get_aio_fd(blk->conf.bs); if (fd < 0) { error_report(\"drive is incompatible with x-data-plane, \" \"use format=raw,cache=none,aio=native\"); s = g_new0(VirtIOBlockDataPlane, 1); s->vdev = vdev; s->fd = fd; s->blk = blk; /* Prevent block operations that conflict with data plane thread */ bdrv_set_in_use(blk->conf.bs, 1); *dataplane = s; return true;"} {"target": 0, "idx": 8249, "func": "void helper_sysexit(int dflag) { int cpl; cpl = env->hflags & HF_CPL_MASK; if (env->sysenter_cs == 0 || cpl != 0) { raise_exception_err(EXCP0D_GPF, 0); } cpu_x86_set_cpl(env, 3); #ifdef TARGET_X86_64 if (dflag == 2) { cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK); cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_W_MASK | DESC_A_MASK); } else #endif { cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_W_MASK | DESC_A_MASK); } ESP = ECX; EIP = EDX; #ifdef CONFIG_KQEMU if (kqemu_is_ok(env)) { env->exception_index = -1; cpu_loop_exit(); } #endif }"} {"target": 0, "idx": 8250, "func": "static void ecc_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) { printf(\"ECC: Unsupported write 0x\" TARGET_FMT_plx \" %02x\\n\", addr, val & 0xff); }"} {"target": 0, "idx": 8255, "func": "static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num, const uint8_t *buf, int nb_sectors) { int async_ret; BlockDriverAIOCB *acb; struct iovec iov; QEMUIOVector qiov; async_ret = NOT_DONE; iov.iov_base = (void *)buf; iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE; qemu_iovec_init_external(&qiov, &iov, 1); acb = bs->drv->bdrv_aio_writev(bs, sector_num, &qiov, nb_sectors, bdrv_rw_em_cb, &async_ret); if (acb == NULL) { async_ret = -1; goto fail; } while (async_ret == NOT_DONE) { qemu_aio_wait(); } fail: return async_ret; }"} {"target": 0, "idx": 8262, "func": "static uint32_t virtio_9p_get_features(VirtIODevice *vdev, uint32_t features) { features |= 1 << VIRTIO_9P_MOUNT_TAG; return features; }"} {"target": 0, "idx": 8274, "func": "static void restore_median_il(uint8_t *src, int step, int stride, int width, int height, int slices, int rmode) { int i, j, slice; int A, B, C; uint8_t *bsrc; int slice_start, slice_height; const int cmask = ~(rmode ? 3 : 1); const int stride2 = stride << 1; for (slice = 0; slice < slices; slice++) { slice_start = ((slice * height) / slices) & cmask; slice_height = ((((slice + 1) * height) / slices) & cmask) - slice_start; slice_height >>= 1; bsrc = src + slice_start * stride; // first line - left neighbour prediction bsrc[0] += 0x80; A = bsrc[0]; for (i = step; i < width * step; i += step) { bsrc[i] += A; A = bsrc[i]; } for (i = 0; i < width * step; i += step) { bsrc[stride + i] += A; A = bsrc[stride + i]; } bsrc += stride2; if (slice_height == 1) continue; // second line - first element has top prediction, the rest uses median C = bsrc[-stride2]; bsrc[0] += C; A = bsrc[0]; for (i = step; i < width * step; i += step) { B = bsrc[i - stride2]; bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C)); C = B; A = bsrc[i]; } for (i = 0; i < width * step; i += step) { B = bsrc[i - stride]; bsrc[stride + i] += mid_pred(A, B, (uint8_t)(A + B - C)); C = B; A = bsrc[stride + i]; } bsrc += stride2; // the rest of lines use continuous median prediction for (j = 2; j < slice_height; j++) { for (i = 0; i < width * step; i += step) { B = bsrc[i - stride2]; bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C)); C = B; A = bsrc[i]; } for (i = 0; i < width * step; i += step) { B = bsrc[i - stride]; bsrc[i + stride] += mid_pred(A, B, (uint8_t)(A + B - C)); C = B; A = bsrc[i + stride]; } bsrc += stride2; } } }"} {"target": 0, "idx": 8276, "func": "AVOpenCLExternalEnv *av_opencl_alloc_external_env(void) { AVOpenCLExternalEnv *ext = av_mallocz(sizeof(AVOpenCLExternalEnv)); if (!ext) { av_log(&openclutils, AV_LOG_ERROR, \"Could not malloc external opencl environment data space\\n\"); } return ext; }"} {"target": 0, "idx": 8284, "func": "static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr, unsigned size) { abort(); }"} {"target": 0, "idx": 8297, "func": "static void openpic_set_irq(void *opaque, int n_IRQ, int level) { OpenPICState *opp = opaque; IRQSource *src; if (n_IRQ >= MAX_IRQ) { fprintf(stderr, \"%s: IRQ %d out of range\\n\", __func__, n_IRQ); abort(); } src = &opp->src[n_IRQ]; DPRINTF(\"openpic: set irq %d = %d ivpr=0x%08x\\n\", n_IRQ, level, src->ivpr); if (src->level) { /* level-sensitive irq */ src->pending = level; if (!level) { src->ivpr &= ~IVPR_ACTIVITY_MASK; } } else { /* edge-sensitive irq */ if (level) { src->pending = 1; } } openpic_update_irq(opp, n_IRQ); }"} {"target": 0, "idx": 8311, "func": "static int context_init(H264Context *h){ CHECKED_ALLOCZ(h->top_borders[0], h->s.mb_width * (16+8+8) * sizeof(uint8_t)) CHECKED_ALLOCZ(h->top_borders[1], h->s.mb_width * (16+8+8) * sizeof(uint8_t)) return 0; fail: return -1; // free_tables will clean up for us }"} {"target": 1, "idx": 8337, "func": "void ff_snow_vertical_compose97i_mmx(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width){ long i = width; while(i & 0x7) { i--; b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS; b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS; b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS; b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS; } asm volatile( \"jmp 2f \\n\\t\" \"1: \\n\\t\" \"mov %6, %%\"REG_a\" \\n\\t\" \"mov %4, %%\"REG_S\" \\n\\t\" snow_vertical_compose_mmx_load(REG_S,\"mm0\",\"mm2\",\"mm4\",\"mm6\") snow_vertical_compose_mmx_add(REG_a,\"mm0\",\"mm2\",\"mm4\",\"mm6\") snow_vertical_compose_mmx_move(\"mm0\",\"mm2\",\"mm4\",\"mm6\",\"mm1\",\"mm3\",\"mm5\",\"mm7\") snow_vertical_compose_mmx_r2r_add(\"mm0\",\"mm2\",\"mm4\",\"mm6\",\"mm0\",\"mm2\",\"mm4\",\"mm6\") snow_vertical_compose_mmx_r2r_add(\"mm1\",\"mm3\",\"mm5\",\"mm7\",\"mm0\",\"mm2\",\"mm4\",\"mm6\") \"pcmpeqd %%mm1, %%mm1 \\n\\t\" \"pslld $31, %%mm1 \\n\\t\" \"psrld $29, %%mm1 \\n\\t\" \"mov %5, %%\"REG_a\" \\n\\t\" snow_vertical_compose_mmx_r2r_add(\"mm1\",\"mm1\",\"mm1\",\"mm1\",\"mm0\",\"mm2\",\"mm4\",\"mm6\") snow_vertical_compose_mmx_sra(\"3\",\"mm0\",\"mm2\",\"mm4\",\"mm6\") snow_vertical_compose_mmx_load(REG_a,\"mm1\",\"mm3\",\"mm5\",\"mm7\") snow_vertical_compose_mmx_sub(\"mm0\",\"mm2\",\"mm4\",\"mm6\",\"mm1\",\"mm3\",\"mm5\",\"mm7\") snow_vertical_compose_mmx_store(REG_a,\"mm1\",\"mm3\",\"mm5\",\"mm7\") \"mov %3, %%\"REG_c\" \\n\\t\" snow_vertical_compose_mmx_load(REG_S,\"mm0\",\"mm2\",\"mm4\",\"mm6\") snow_vertical_compose_mmx_add(REG_c,\"mm1\",\"mm3\",\"mm5\",\"mm7\") snow_vertical_compose_mmx_sub(\"mm1\",\"mm3\",\"mm5\",\"mm7\",\"mm0\",\"mm2\",\"mm4\",\"mm6\") snow_vertical_compose_mmx_store(REG_S,\"mm0\",\"mm2\",\"mm4\",\"mm6\") \"mov %2, %%\"REG_a\" \\n\\t\" snow_vertical_compose_mmx_add(REG_a,\"mm0\",\"mm2\",\"mm4\",\"mm6\") snow_vertical_compose_mmx_sra(\"2\",\"mm0\",\"mm2\",\"mm4\",\"mm6\") snow_vertical_compose_mmx_add(REG_c,\"mm0\",\"mm2\",\"mm4\",\"mm6\") \"pcmpeqd %%mm1, %%mm1 \\n\\t\" \"pslld $31, %%mm1 \\n\\t\" \"psrld $30, %%mm1 \\n\\t\" \"mov %1, %%\"REG_S\" \\n\\t\" snow_vertical_compose_mmx_r2r_add(\"mm1\",\"mm1\",\"mm1\",\"mm1\",\"mm0\",\"mm2\",\"mm4\",\"mm6\") snow_vertical_compose_mmx_sra(\"2\",\"mm0\",\"mm2\",\"mm4\",\"mm6\") snow_vertical_compose_mmx_add(REG_c,\"mm0\",\"mm2\",\"mm4\",\"mm6\") snow_vertical_compose_mmx_store(REG_c,\"mm0\",\"mm2\",\"mm4\",\"mm6\") snow_vertical_compose_mmx_add(REG_S,\"mm0\",\"mm2\",\"mm4\",\"mm6\") snow_vertical_compose_mmx_move(\"mm0\",\"mm2\",\"mm4\",\"mm6\",\"mm1\",\"mm3\",\"mm5\",\"mm7\") snow_vertical_compose_mmx_sra(\"1\",\"mm0\",\"mm2\",\"mm4\",\"mm6\") snow_vertical_compose_mmx_r2r_add(\"mm1\",\"mm3\",\"mm5\",\"mm7\",\"mm0\",\"mm2\",\"mm4\",\"mm6\") snow_vertical_compose_mmx_add(REG_a,\"mm0\",\"mm2\",\"mm4\",\"mm6\") snow_vertical_compose_mmx_store(REG_a,\"mm0\",\"mm2\",\"mm4\",\"mm6\") \"2: \\n\\t\" \"sub $8, %%\"REG_d\" \\n\\t\" \"jge 1b \\n\\t\" :\"+d\"(i) : \"m\"(b0),\"m\"(b1),\"m\"(b2),\"m\"(b3),\"m\"(b4),\"m\"(b5): \"%\"REG_a\"\",\"%\"REG_S\"\",\"%\"REG_c\"\"); }"} {"target": 1, "idx": 8366, "func": "static int mxf_read_material_package(void *arg, AVIOContext *pb, int tag, int size, UID uid) { MXFPackage *package = arg; switch(tag) { case 0x4403: package->tracks_count = avio_rb32(pb); if (package->tracks_count >= UINT_MAX / sizeof(UID)) return -1; package->tracks_refs = av_malloc(package->tracks_count * sizeof(UID)); if (!package->tracks_refs) return -1; avio_skip(pb, 4); /* useless size of objects, always 16 according to specs */ avio_read(pb, (uint8_t *)package->tracks_refs, package->tracks_count * sizeof(UID)); break; } return 0; }"} {"target": 0, "idx": 8374, "func": "static void rng_egd_finalize(Object *obj) { RngEgd *s = RNG_EGD(obj); if (s->chr) { qemu_chr_add_handlers(s->chr, NULL, NULL, NULL, NULL); qemu_chr_fe_release(s->chr); } g_free(s->chr_name); rng_egd_free_requests(s); }"} {"target": 1, "idx": 8419, "func": "static int cinepak_decode_strip (CinepakContext *s, cvid_strip_t *strip, uint8_t *data, int size) { uint8_t *eod = (data + size); int chunk_id, chunk_size; /* coordinate sanity checks */ if (strip->x1 >= s->width || strip->x2 > s->width || strip->y1 >= s->height || strip->y2 > s->height || strip->x1 >= strip->x2 || strip->y1 >= strip->y2) while ((data + 4) <= eod) { chunk_id = BE_16 (&data[0]); chunk_size = BE_16 (&data[2]) - 4; data += 4; chunk_size = ((data + chunk_size) > eod) ? (eod - data) : chunk_size; switch (chunk_id) { case 0x2000: case 0x2100: case 0x2400: case 0x2500: cinepak_decode_codebook (strip->v4_codebook, chunk_id, chunk_size, data); break; case 0x2200: case 0x2300: case 0x2600: case 0x2700: cinepak_decode_codebook (strip->v1_codebook, chunk_id, chunk_size, data); break; case 0x3000: case 0x3100: case 0x3200: return cinepak_decode_vectors (s, strip, chunk_id, chunk_size, data); } data += chunk_size; } }"} {"target": 0, "idx": 8424, "func": "void *av_realloc_array(void *ptr, size_t nmemb, size_t size) { if (size <= 0 || nmemb >= INT_MAX / size) return NULL; return av_realloc(ptr, nmemb * size); }"} {"target": 1, "idx": 8428, "func": "static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2, uint8_t *dst1, uint8_t *dst2, unsigned width, unsigned height, int srcStride1, int srcStride2, int dstStride1, int dstStride2) { unsigned int y,x,h; int w; w=width/2; h=height/2; #ifdef HAVE_MMX asm volatile( PREFETCH\" %0\\n\\t\" PREFETCH\" %1\\n\\t\" ::\"m\"(*(src1+srcStride1)),\"m\"(*(src2+srcStride2)):\"memory\"); #endif for(y=0;y>1); uint8_t* d=dst1+dstStride1*y; x=0; #ifdef HAVE_MMX for(;x>1); uint8_t* d=dst2+dstStride2*y; x=0; #ifdef HAVE_MMX for(;x= 0) { if (irq == 2) { irq2 = pic_get_irq(slave_pic); if (irq2 >= 0) { pic_intack(slave_pic, irq2); } else { /* spurious IRQ on slave controller */ irq2 = 7; intno = slave_pic->irq_base + irq2; } else { intno = s->irq_base + irq; pic_intack(s, irq); } else { /* spurious IRQ on host controller */ irq = 7; intno = s->irq_base + irq; #if defined(DEBUG_PIC) || defined(DEBUG_IRQ_LATENCY) if (irq == 2) { irq = irq2 + 8; #endif #ifdef DEBUG_IRQ_LATENCY printf(\"IRQ%d latency=%0.3fus\\n\", irq, (double)(qemu_get_clock_ns(vm_clock) - irq_time[irq]) * 1000000.0 / get_ticks_per_sec()); #endif DPRINTF(\"pic_interrupt: irq=%d\\n\", irq); return intno;"} {"target": 1, "idx": 8449, "func": "void memory_region_notify_one(IOMMUNotifier *notifier, IOMMUTLBEntry *entry) { IOMMUNotifierFlag request_flags; /* * Skip the notification if the notification does not overlap * with registered range. */ if (notifier->start > entry->iova + entry->addr_mask + 1 || notifier->end < entry->iova) { return; } if (entry->perm & IOMMU_RW) { request_flags = IOMMU_NOTIFIER_MAP; } else { request_flags = IOMMU_NOTIFIER_UNMAP; } if (notifier->notifier_flags & request_flags) { notifier->notify(notifier, entry); } }"} {"target": 1, "idx": 8451, "func": "static void udp_chr_close(CharDriverState *chr) { NetCharDriver *s = chr->opaque; if (s->tag) { g_source_remove(s->tag); s->tag = 0; } if (s->chan) { g_io_channel_unref(s->chan); closesocket(s->fd); } g_free(s); qemu_chr_be_event(chr, CHR_EVENT_CLOSED); }"} {"target": 1, "idx": 8456, "func": "static void av_estimate_timings_from_pts(AVFormatContext *ic) { AVPacket pkt1, *pkt = &pkt1; AVStream *st; int read_size, i, ret; int64_t end_time; int64_t filesize, offset, duration; /* free previous packet */ if (ic->cur_st && ic->cur_st->parser) av_free_packet(&ic->cur_pkt); ic->cur_st = NULL; /* flush packet queue */ flush_packet_queue(ic); for(i=0;inb_streams;i++) { st = ic->streams[i]; if (st->parser) { av_parser_close(st->parser); st->parser= NULL; } } /* we read the first packets to get the first PTS (not fully accurate, but it is enough now) */ url_fseek(&ic->pb, 0, SEEK_SET); read_size = 0; for(;;) { if (read_size >= DURATION_MAX_READ_SIZE) break; /* if all info is available, we can stop */ for(i = 0;i < ic->nb_streams; i++) { st = ic->streams[i]; if (st->start_time == AV_NOPTS_VALUE) break; } if (i == ic->nb_streams) break; ret = av_read_packet(ic, pkt); if (ret != 0) break; read_size += pkt->size; st = ic->streams[pkt->stream_index]; if (pkt->pts != AV_NOPTS_VALUE) { if (st->start_time == AV_NOPTS_VALUE) st->start_time = pkt->pts; } av_free_packet(pkt); } /* estimate the end time (duration) */ /* XXX: may need to support wrapping */ filesize = ic->file_size; offset = filesize - DURATION_MAX_READ_SIZE; if (offset < 0) offset = 0; url_fseek(&ic->pb, offset, SEEK_SET); read_size = 0; for(;;) { if (read_size >= DURATION_MAX_READ_SIZE) break; /* if all info is available, we can stop */ for(i = 0;i < ic->nb_streams; i++) { st = ic->streams[i]; if (st->duration == AV_NOPTS_VALUE) break; } if (i == ic->nb_streams) break; ret = av_read_packet(ic, pkt); if (ret != 0) break; read_size += pkt->size; st = ic->streams[pkt->stream_index]; if (pkt->pts != AV_NOPTS_VALUE) { end_time = pkt->pts; duration = end_time - st->start_time; if (duration > 0) { if (st->duration == AV_NOPTS_VALUE || st->duration < duration) st->duration = duration; } } av_free_packet(pkt); } fill_all_stream_timings(ic); url_fseek(&ic->pb, 0, SEEK_SET); }"} {"target": 1, "idx": 8460, "func": "static int vmdaudio_decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size) { VmdAudioContext *s = (VmdAudioContext *)avctx->priv_data; unsigned int sound_flags; unsigned char *output_samples = (unsigned char *)data; /* point to the start of the encoded data */ unsigned char *p = buf + 16; unsigned char *p_end = buf + buf_size; if (buf_size < 16) return buf_size; if (buf[6] == 1) { /* the chunk contains audio */ *data_size = vmdaudio_loadsound(s, output_samples, p, 0); } else if (buf[6] == 2) { /* the chunk contains audio and silence mixed together */ sound_flags = LE_32(p); p += 4; /* do something with extrabufs here? */ while (p < p_end) { if (sound_flags & 0x01) /* silence */ *data_size += vmdaudio_loadsound(s, output_samples, p, 1); else { /* audio */ *data_size += vmdaudio_loadsound(s, output_samples, p, 0); p += s->block_align; } output_samples += (s->block_align * s->bits / 8); sound_flags >>= 1; } } else if (buf[6] == 3) { /* silent chunk */ *data_size = vmdaudio_loadsound(s, output_samples, p, 1); } return buf_size; }"} {"target": 1, "idx": 8469, "func": "static void scsi_write_same_complete(void *opaque, int ret) { WriteSameCBData *data = opaque; SCSIDiskReq *r = data->r; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); assert(r->req.aiocb != NULL); r->req.aiocb = NULL; aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); if (scsi_disk_req_check_error(r, ret, true)) { goto done; } block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); data->nb_sectors -= data->iov.iov_len / 512; data->sector += data->iov.iov_len / 512; data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len); if (data->iov.iov_len) { block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, data->iov.iov_len, BLOCK_ACCT_WRITE); /* Reinitialize qiov, to handle unaligned WRITE SAME request * where final qiov may need smaller size */ qemu_iovec_init_external(&data->qiov, &data->iov, 1); r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, data->sector << BDRV_SECTOR_BITS, &data->qiov, 0, scsi_write_same_complete, data); return; } scsi_req_complete(&r->req, GOOD); done: scsi_req_unref(&r->req); qemu_vfree(data->iov.iov_base); g_free(data); }"} {"target": 1, "idx": 8527, "func": "PXA2xxState *pxa255_init(MemoryRegion *address_space, unsigned int sdram_size) { PXA2xxState *s; int i; DriveInfo *dinfo; s = (PXA2xxState *) g_malloc0(sizeof(PXA2xxState)); s->cpu = cpu_arm_init(\"pxa255\"); if (s->cpu == NULL) { fprintf(stderr, \"Unable to find CPU definition\\n\"); exit(1); } s->reset = qemu_allocate_irq(pxa2xx_reset, s, 0); /* SDRAM & Internal Memory Storage */ memory_region_init_ram(&s->sdram, NULL, \"pxa255.sdram\", sdram_size, &error_abort); vmstate_register_ram_global(&s->sdram); memory_region_add_subregion(address_space, PXA2XX_SDRAM_BASE, &s->sdram); memory_region_init_ram(&s->internal, NULL, \"pxa255.internal\", PXA2XX_INTERNAL_SIZE, &error_abort); vmstate_register_ram_global(&s->internal); memory_region_add_subregion(address_space, PXA2XX_INTERNAL_BASE, &s->internal); s->pic = pxa2xx_pic_init(0x40d00000, s->cpu); s->dma = pxa255_dma_init(0x40000000, qdev_get_gpio_in(s->pic, PXA2XX_PIC_DMA)); sysbus_create_varargs(\"pxa25x-timer\", 0x40a00000, qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 0), qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 1), qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 2), qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 3), NULL); s->gpio = pxa2xx_gpio_init(0x40e00000, s->cpu, s->pic, 85); dinfo = drive_get(IF_SD, 0, 0); if (!dinfo) { fprintf(stderr, \"qemu: missing SecureDigital device\\n\"); exit(1); } s->mmc = pxa2xx_mmci_init(address_space, 0x41100000, blk_by_legacy_dinfo(dinfo), qdev_get_gpio_in(s->pic, PXA2XX_PIC_MMC), qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_MMCI), qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_MMCI)); for (i = 0; pxa255_serial[i].io_base; i++) { if (serial_hds[i]) { serial_mm_init(address_space, pxa255_serial[i].io_base, 2, qdev_get_gpio_in(s->pic, pxa255_serial[i].irqn), 14745600 / 16, serial_hds[i], DEVICE_NATIVE_ENDIAN); } else { break; } } if (serial_hds[i]) s->fir = pxa2xx_fir_init(address_space, 0x40800000, qdev_get_gpio_in(s->pic, PXA2XX_PIC_ICP), qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_ICP), qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_ICP), serial_hds[i]); s->lcd = pxa2xx_lcdc_init(address_space, 0x44000000, qdev_get_gpio_in(s->pic, PXA2XX_PIC_LCD)); s->cm_base = 0x41300000; s->cm_regs[CCCR >> 2] = 0x02000210; /* 416.0 MHz */ s->clkcfg = 0x00000009; /* Turbo mode active */ memory_region_init_io(&s->cm_iomem, NULL, &pxa2xx_cm_ops, s, \"pxa2xx-cm\", 0x1000); memory_region_add_subregion(address_space, s->cm_base, &s->cm_iomem); vmstate_register(NULL, 0, &vmstate_pxa2xx_cm, s); pxa2xx_setup_cp14(s); s->mm_base = 0x48000000; s->mm_regs[MDMRS >> 2] = 0x00020002; s->mm_regs[MDREFR >> 2] = 0x03ca4000; s->mm_regs[MECR >> 2] = 0x00000001; /* Two PC Card sockets */ memory_region_init_io(&s->mm_iomem, NULL, &pxa2xx_mm_ops, s, \"pxa2xx-mm\", 0x1000); memory_region_add_subregion(address_space, s->mm_base, &s->mm_iomem); vmstate_register(NULL, 0, &vmstate_pxa2xx_mm, s); s->pm_base = 0x40f00000; memory_region_init_io(&s->pm_iomem, NULL, &pxa2xx_pm_ops, s, \"pxa2xx-pm\", 0x100); memory_region_add_subregion(address_space, s->pm_base, &s->pm_iomem); vmstate_register(NULL, 0, &vmstate_pxa2xx_pm, s); for (i = 0; pxa255_ssp[i].io_base; i ++); s->ssp = (SSIBus **)g_malloc0(sizeof(SSIBus *) * i); for (i = 0; pxa255_ssp[i].io_base; i ++) { DeviceState *dev; dev = sysbus_create_simple(TYPE_PXA2XX_SSP, pxa255_ssp[i].io_base, qdev_get_gpio_in(s->pic, pxa255_ssp[i].irqn)); s->ssp[i] = (SSIBus *)qdev_get_child_bus(dev, \"ssi\"); } if (usb_enabled()) { sysbus_create_simple(\"sysbus-ohci\", 0x4c000000, qdev_get_gpio_in(s->pic, PXA2XX_PIC_USBH1)); } s->pcmcia[0] = pxa2xx_pcmcia_init(address_space, 0x20000000); s->pcmcia[1] = pxa2xx_pcmcia_init(address_space, 0x30000000); sysbus_create_simple(TYPE_PXA2XX_RTC, 0x40900000, qdev_get_gpio_in(s->pic, PXA2XX_PIC_RTCALARM)); s->i2c[0] = pxa2xx_i2c_init(0x40301600, qdev_get_gpio_in(s->pic, PXA2XX_PIC_I2C), 0xffff); s->i2c[1] = pxa2xx_i2c_init(0x40f00100, qdev_get_gpio_in(s->pic, PXA2XX_PIC_PWRI2C), 0xff); s->i2s = pxa2xx_i2s_init(address_space, 0x40400000, qdev_get_gpio_in(s->pic, PXA2XX_PIC_I2S), qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_I2S), qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_I2S)); /* GPIO1 resets the processor */ /* The handler can be overridden by board-specific code */ qdev_connect_gpio_out(s->gpio, 1, s->reset); return s; }"} {"target": 1, "idx": 8538, "func": "static int audio_open(AVFormatContext *s1, int is_output, const char *audio_device) { AudioData *s = s1->priv_data; int audio_fd; int tmp, err; char *flip = getenv(\"AUDIO_FLIP_LEFT\"); if (is_output) audio_fd = avpriv_open(audio_device, O_WRONLY); else audio_fd = avpriv_open(audio_device, O_RDONLY); if (audio_fd < 0) { av_log(s1, AV_LOG_ERROR, \"%s: %s\\n\", audio_device, strerror(errno)); return AVERROR(EIO); } if (flip && *flip == '1') { s->flip_left = 1; } /* non blocking mode */ if (!is_output) { if (fcntl(audio_fd, F_SETFL, O_NONBLOCK) < 0) { av_log(s1, AV_LOG_WARNING, \"%s: Could not enable non block mode (%s)\\n\", audio_device, strerror(errno)); } } s->frame_size = AUDIO_BLOCK_SIZE; /* select format : favour native format */ err = ioctl(audio_fd, SNDCTL_DSP_GETFMTS, &tmp); #if HAVE_BIGENDIAN if (tmp & AFMT_S16_BE) { tmp = AFMT_S16_BE; } else if (tmp & AFMT_S16_LE) { tmp = AFMT_S16_LE; } else { tmp = 0; } #else if (tmp & AFMT_S16_LE) { tmp = AFMT_S16_LE; } else if (tmp & AFMT_S16_BE) { tmp = AFMT_S16_BE; } else { tmp = 0; } #endif switch(tmp) { case AFMT_S16_LE: s->codec_id = AV_CODEC_ID_PCM_S16LE; break; case AFMT_S16_BE: s->codec_id = AV_CODEC_ID_PCM_S16BE; break; default: av_log(s1, AV_LOG_ERROR, \"Soundcard does not support 16 bit sample format\\n\"); close(audio_fd); return AVERROR(EIO); } err=ioctl(audio_fd, SNDCTL_DSP_SETFMT, &tmp); if (err < 0) { av_log(s1, AV_LOG_ERROR, \"SNDCTL_DSP_SETFMT: %s\\n\", strerror(errno)); goto fail; } tmp = (s->channels == 2); err = ioctl(audio_fd, SNDCTL_DSP_STEREO, &tmp); if (err < 0) { av_log(s1, AV_LOG_ERROR, \"SNDCTL_DSP_STEREO: %s\\n\", strerror(errno)); goto fail; } tmp = s->sample_rate; err = ioctl(audio_fd, SNDCTL_DSP_SPEED, &tmp); if (err < 0) { av_log(s1, AV_LOG_ERROR, \"SNDCTL_DSP_SPEED: %s\\n\", strerror(errno)); goto fail; } s->sample_rate = tmp; /* store real sample rate */ s->fd = audio_fd; return 0; fail: close(audio_fd); return AVERROR(EIO); }"} {"target": 0, "idx": 8562, "func": "static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64]) { int i, j, k, cbp, val, mb_type, motion_type; const int mb_block_count = 4 + (1 << s->chroma_format); int ret; av_dlog(s->avctx, \"decode_mb: x=%d y=%d\\n\", s->mb_x, s->mb_y); av_assert2(s->mb_skipped == 0); if (s->mb_skip_run-- != 0) { if (s->pict_type == AV_PICTURE_TYPE_P) { s->mb_skipped = 1; s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] = MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16; } else { int mb_type; if (s->mb_x) mb_type = s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1]; else // FIXME not sure if this is allowed in MPEG at all mb_type = s->current_picture.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1]; if (IS_INTRA(mb_type)) { av_log(s->avctx, AV_LOG_ERROR, \"skip with previntra\\n\"); return AVERROR_INVALIDDATA; } s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type | MB_TYPE_SKIP; if ((s->mv[0][0][0] | s->mv[0][0][1] | s->mv[1][0][0] | s->mv[1][0][1]) == 0) s->mb_skipped = 1; } return 0; } switch (s->pict_type) { default: case AV_PICTURE_TYPE_I: if (get_bits1(&s->gb) == 0) { if (get_bits1(&s->gb) == 0) { av_log(s->avctx, AV_LOG_ERROR, \"invalid mb type in I Frame at %d %d\\n\", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } mb_type = MB_TYPE_QUANT | MB_TYPE_INTRA; } else { mb_type = MB_TYPE_INTRA; } break; case AV_PICTURE_TYPE_P: mb_type = get_vlc2(&s->gb, ff_mb_ptype_vlc.table, MB_PTYPE_VLC_BITS, 1); if (mb_type < 0) { av_log(s->avctx, AV_LOG_ERROR, \"invalid mb type in P Frame at %d %d\\n\", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } mb_type = ptype2mb_type[mb_type]; break; case AV_PICTURE_TYPE_B: mb_type = get_vlc2(&s->gb, ff_mb_btype_vlc.table, MB_BTYPE_VLC_BITS, 1); if (mb_type < 0) { av_log(s->avctx, AV_LOG_ERROR, \"invalid mb type in B Frame at %d %d\\n\", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } mb_type = btype2mb_type[mb_type]; break; } av_dlog(s->avctx, \"mb_type=%x\\n\", mb_type); // motion_type = 0; /* avoid warning */ if (IS_INTRA(mb_type)) { s->bdsp.clear_blocks(s->block[0]); if (!s->chroma_y_shift) s->bdsp.clear_blocks(s->block[6]); /* compute DCT type */ // FIXME: add an interlaced_dct coded var? if (s->picture_structure == PICT_FRAME && !s->frame_pred_frame_dct) s->interlaced_dct = get_bits1(&s->gb); if (IS_QUANT(mb_type)) s->qscale = get_qscale(s); if (s->concealment_motion_vectors) { /* just parse them */ if (s->picture_structure != PICT_FRAME) skip_bits1(&s->gb); /* field select */ s->mv[0][0][0] = s->last_mv[0][0][0] = s->last_mv[0][1][0] = mpeg_decode_motion(s, s->mpeg_f_code[0][0], s->last_mv[0][0][0]); s->mv[0][0][1] = s->last_mv[0][0][1] = s->last_mv[0][1][1] = mpeg_decode_motion(s, s->mpeg_f_code[0][1], s->last_mv[0][0][1]); skip_bits1(&s->gb); /* marker */ } else { /* reset mv prediction */ memset(s->last_mv, 0, sizeof(s->last_mv)); } s->mb_intra = 1; // if 1, we memcpy blocks in xvmcvideo if ((CONFIG_MPEG1_XVMC_HWACCEL || CONFIG_MPEG2_XVMC_HWACCEL) && s->pack_pblocks) ff_xvmc_pack_pblocks(s, -1); // inter are always full blocks if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) { if (s->flags2 & CODEC_FLAG2_FAST) { for (i = 0; i < 6; i++) mpeg2_fast_decode_block_intra(s, *s->pblocks[i], i); } else { for (i = 0; i < mb_block_count; i++) if ((ret = mpeg2_decode_block_intra(s, *s->pblocks[i], i)) < 0) return ret; } } else { for (i = 0; i < 6; i++) if ((ret = mpeg1_decode_block_intra(s, *s->pblocks[i], i)) < 0) return ret; } } else { if (mb_type & MB_TYPE_ZERO_MV) { av_assert2(mb_type & MB_TYPE_CBP); s->mv_dir = MV_DIR_FORWARD; if (s->picture_structure == PICT_FRAME) { if (s->picture_structure == PICT_FRAME && !s->frame_pred_frame_dct) s->interlaced_dct = get_bits1(&s->gb); s->mv_type = MV_TYPE_16X16; } else { s->mv_type = MV_TYPE_FIELD; mb_type |= MB_TYPE_INTERLACED; s->field_select[0][0] = s->picture_structure - 1; } if (IS_QUANT(mb_type)) s->qscale = get_qscale(s); s->last_mv[0][0][0] = 0; s->last_mv[0][0][1] = 0; s->last_mv[0][1][0] = 0; s->last_mv[0][1][1] = 0; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; } else { av_assert2(mb_type & MB_TYPE_L0L1); // FIXME decide if MBs in field pictures are MB_TYPE_INTERLACED /* get additional motion vector type */ if (s->picture_structure == PICT_FRAME && s->frame_pred_frame_dct) { motion_type = MT_FRAME; } else { motion_type = get_bits(&s->gb, 2); if (s->picture_structure == PICT_FRAME && HAS_CBP(mb_type)) s->interlaced_dct = get_bits1(&s->gb); } if (IS_QUANT(mb_type)) s->qscale = get_qscale(s); /* motion vectors */ s->mv_dir = (mb_type >> 13) & 3; av_dlog(s->avctx, \"motion_type=%d\\n\", motion_type); switch (motion_type) { case MT_FRAME: /* or MT_16X8 */ if (s->picture_structure == PICT_FRAME) { mb_type |= MB_TYPE_16x16; s->mv_type = MV_TYPE_16X16; for (i = 0; i < 2; i++) { if (USES_LIST(mb_type, i)) { /* MT_FRAME */ s->mv[i][0][0] = s->last_mv[i][0][0] = s->last_mv[i][1][0] = mpeg_decode_motion(s, s->mpeg_f_code[i][0], s->last_mv[i][0][0]); s->mv[i][0][1] = s->last_mv[i][0][1] = s->last_mv[i][1][1] = mpeg_decode_motion(s, s->mpeg_f_code[i][1], s->last_mv[i][0][1]); /* full_pel: only for MPEG-1 */ if (s->full_pel[i]) { s->mv[i][0][0] <<= 1; s->mv[i][0][1] <<= 1; } } } } else { mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED; s->mv_type = MV_TYPE_16X8; for (i = 0; i < 2; i++) { if (USES_LIST(mb_type, i)) { /* MT_16X8 */ for (j = 0; j < 2; j++) { s->field_select[i][j] = get_bits1(&s->gb); for (k = 0; k < 2; k++) { val = mpeg_decode_motion(s, s->mpeg_f_code[i][k], s->last_mv[i][j][k]); s->last_mv[i][j][k] = val; s->mv[i][j][k] = val; } } } } } break; case MT_FIELD: s->mv_type = MV_TYPE_FIELD; if (s->picture_structure == PICT_FRAME) { mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED; for (i = 0; i < 2; i++) { if (USES_LIST(mb_type, i)) { for (j = 0; j < 2; j++) { s->field_select[i][j] = get_bits1(&s->gb); val = mpeg_decode_motion(s, s->mpeg_f_code[i][0], s->last_mv[i][j][0]); s->last_mv[i][j][0] = val; s->mv[i][j][0] = val; av_dlog(s->avctx, \"fmx=%d\\n\", val); val = mpeg_decode_motion(s, s->mpeg_f_code[i][1], s->last_mv[i][j][1] >> 1); s->last_mv[i][j][1] = 2 * val; s->mv[i][j][1] = val; av_dlog(s->avctx, \"fmy=%d\\n\", val); } } } } else { av_assert0(!s->progressive_sequence); mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED; for (i = 0; i < 2; i++) { if (USES_LIST(mb_type, i)) { s->field_select[i][0] = get_bits1(&s->gb); for (k = 0; k < 2; k++) { val = mpeg_decode_motion(s, s->mpeg_f_code[i][k], s->last_mv[i][0][k]); s->last_mv[i][0][k] = val; s->last_mv[i][1][k] = val; s->mv[i][0][k] = val; } } } } break; case MT_DMV: if (s->progressive_sequence){ av_log(s->avctx, AV_LOG_ERROR, \"MT_DMV in progressive_sequence\\n\"); return AVERROR_INVALIDDATA; } s->mv_type = MV_TYPE_DMV; for (i = 0; i < 2; i++) { if (USES_LIST(mb_type, i)) { int dmx, dmy, mx, my, m; const int my_shift = s->picture_structure == PICT_FRAME; mx = mpeg_decode_motion(s, s->mpeg_f_code[i][0], s->last_mv[i][0][0]); s->last_mv[i][0][0] = mx; s->last_mv[i][1][0] = mx; dmx = get_dmv(s); my = mpeg_decode_motion(s, s->mpeg_f_code[i][1], s->last_mv[i][0][1] >> my_shift); dmy = get_dmv(s); s->last_mv[i][0][1] = my << my_shift; s->last_mv[i][1][1] = my << my_shift; s->mv[i][0][0] = mx; s->mv[i][0][1] = my; s->mv[i][1][0] = mx; // not used s->mv[i][1][1] = my; // not used if (s->picture_structure == PICT_FRAME) { mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED; // m = 1 + 2 * s->top_field_first; m = s->top_field_first ? 1 : 3; /* top -> top pred */ s->mv[i][2][0] = ((mx * m + (mx > 0)) >> 1) + dmx; s->mv[i][2][1] = ((my * m + (my > 0)) >> 1) + dmy - 1; m = 4 - m; s->mv[i][3][0] = ((mx * m + (mx > 0)) >> 1) + dmx; s->mv[i][3][1] = ((my * m + (my > 0)) >> 1) + dmy + 1; } else { mb_type |= MB_TYPE_16x16; s->mv[i][2][0] = ((mx + (mx > 0)) >> 1) + dmx; s->mv[i][2][1] = ((my + (my > 0)) >> 1) + dmy; if (s->picture_structure == PICT_TOP_FIELD) s->mv[i][2][1]--; else s->mv[i][2][1]++; } } } break; default: av_log(s->avctx, AV_LOG_ERROR, \"00 motion_type at %d %d\\n\", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } } s->mb_intra = 0; if (HAS_CBP(mb_type)) { s->bdsp.clear_blocks(s->block[0]); cbp = get_vlc2(&s->gb, ff_mb_pat_vlc.table, MB_PAT_VLC_BITS, 1); if (mb_block_count > 6) { cbp <<= mb_block_count - 6; cbp |= get_bits(&s->gb, mb_block_count - 6); s->bdsp.clear_blocks(s->block[6]); } if (cbp <= 0) { av_log(s->avctx, AV_LOG_ERROR, \"invalid cbp %d at %d %d\\n\", cbp, s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } // if 1, we memcpy blocks in xvmcvideo if ((CONFIG_MPEG1_XVMC_HWACCEL || CONFIG_MPEG2_XVMC_HWACCEL) && s->pack_pblocks) ff_xvmc_pack_pblocks(s, cbp); if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) { if (s->flags2 & CODEC_FLAG2_FAST) { for (i = 0; i < 6; i++) { if (cbp & 32) mpeg2_fast_decode_block_non_intra(s, *s->pblocks[i], i); else s->block_last_index[i] = -1; cbp += cbp; } } else { cbp <<= 12 - mb_block_count; for (i = 0; i < mb_block_count; i++) { if (cbp & (1 << 11)) { if ((ret = mpeg2_decode_block_non_intra(s, *s->pblocks[i], i)) < 0) return ret; } else { s->block_last_index[i] = -1; } cbp += cbp; } } } else { if (s->flags2 & CODEC_FLAG2_FAST) { for (i = 0; i < 6; i++) { if (cbp & 32) mpeg1_fast_decode_block_inter(s, *s->pblocks[i], i); else s->block_last_index[i] = -1; cbp += cbp; } } else { for (i = 0; i < 6; i++) { if (cbp & 32) { if ((ret = mpeg1_decode_block_inter(s, *s->pblocks[i], i)) < 0) return ret; } else { s->block_last_index[i] = -1; } cbp += cbp; } } } } else { for (i = 0; i < 12; i++) s->block_last_index[i] = -1; } } s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type; return 0; }"} {"target": 0, "idx": 8566, "func": "void xen_be_unbind_evtchn(struct XenDevice *xendev) { if (xendev->local_port == -1) { return; } qemu_set_fd_handler(xc_evtchn_fd(xendev->evtchndev), NULL, NULL, NULL); xc_evtchn_unbind(xendev->evtchndev, xendev->local_port); xen_be_printf(xendev, 2, \"unbind evtchn port %d\\n\", xendev->local_port); xendev->local_port = -1; }"} {"target": 0, "idx": 8573, "func": "int avio_printf(AVIOContext *s, const char *fmt, ...) { va_list ap; char buf[4096]; int ret; va_start(ap, fmt); ret = vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); avio_write(s, buf, strlen(buf)); return ret; }"} {"target": 0, "idx": 8574, "func": "static int ppc_hash64_check_prot(int prot, int rw, int access_type) { int ret; if (access_type == ACCESS_CODE) { if (prot & PAGE_EXEC) { ret = 0; } else { ret = -2; } } else if (rw) { if (prot & PAGE_WRITE) { ret = 0; } else { ret = -2; } } else { if (prot & PAGE_READ) { ret = 0; } else { ret = -2; } } return ret; }"} {"target": 1, "idx": 8581, "func": "static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); int buflen = 0; int start; if (req->cmd.buf[1] & 0x1) { /* Vital product data */ uint8_t page_code = req->cmd.buf[2]; outbuf[buflen++] = s->qdev.type & 0x1f; outbuf[buflen++] = page_code ; // this page outbuf[buflen++] = 0x00; outbuf[buflen++] = 0x00; start = buflen; switch (page_code) { case 0x00: /* Supported page codes, mandatory */ { DPRINTF(\"Inquiry EVPD[Supported pages] \" \"buffer size %zd\\n\", req->cmd.xfer); outbuf[buflen++] = 0x00; // list of supported pages (this page) if (s->serial) { outbuf[buflen++] = 0x80; // unit serial number } outbuf[buflen++] = 0x83; // device identification if (s->qdev.type == TYPE_DISK) { outbuf[buflen++] = 0xb0; // block limits outbuf[buflen++] = 0xb2; // thin provisioning } break; } case 0x80: /* Device serial number, optional */ { int l; if (!s->serial) { DPRINTF(\"Inquiry (EVPD[Serial number] not supported\\n\"); return -1; } l = strlen(s->serial); if (l > 20) { l = 20; } DPRINTF(\"Inquiry EVPD[Serial number] \" \"buffer size %zd\\n\", req->cmd.xfer); memcpy(outbuf+buflen, s->serial, l); buflen += l; break; } case 0x83: /* Device identification page, mandatory */ { const char *str = s->serial ?: bdrv_get_device_name(s->qdev.conf.bs); int max_len = s->serial ? 20 : 255 - 8; int id_len = strlen(str); if (id_len > max_len) { id_len = max_len; } DPRINTF(\"Inquiry EVPD[Device identification] \" \"buffer size %zd\\n\", req->cmd.xfer); outbuf[buflen++] = 0x2; // ASCII outbuf[buflen++] = 0; // not officially assigned outbuf[buflen++] = 0; // reserved outbuf[buflen++] = id_len; // length of data following memcpy(outbuf+buflen, str, id_len); buflen += id_len; if (s->wwn) { outbuf[buflen++] = 0x1; // Binary outbuf[buflen++] = 0x3; // NAA outbuf[buflen++] = 0; // reserved outbuf[buflen++] = 8; stq_be_p(&outbuf[buflen], s->wwn); buflen += 8; } break; } case 0xb0: /* block limits */ { unsigned int unmap_sectors = s->qdev.conf.discard_granularity / s->qdev.blocksize; unsigned int min_io_size = s->qdev.conf.min_io_size / s->qdev.blocksize; unsigned int opt_io_size = s->qdev.conf.opt_io_size / s->qdev.blocksize; if (s->qdev.type == TYPE_ROM) { DPRINTF(\"Inquiry (EVPD[%02X] not supported for CDROM\\n\", page_code); return -1; } /* required VPD size with unmap support */ buflen = 0x40; memset(outbuf + 4, 0, buflen - 4); /* optimal transfer length granularity */ outbuf[6] = (min_io_size >> 8) & 0xff; outbuf[7] = min_io_size & 0xff; /* optimal transfer length */ outbuf[12] = (opt_io_size >> 24) & 0xff; outbuf[13] = (opt_io_size >> 16) & 0xff; outbuf[14] = (opt_io_size >> 8) & 0xff; outbuf[15] = opt_io_size & 0xff; /* optimal unmap granularity */ outbuf[28] = (unmap_sectors >> 24) & 0xff; outbuf[29] = (unmap_sectors >> 16) & 0xff; outbuf[30] = (unmap_sectors >> 8) & 0xff; outbuf[31] = unmap_sectors & 0xff; break; } case 0xb2: /* thin provisioning */ { buflen = 8; outbuf[4] = 0; outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */ outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1; outbuf[7] = 0; break; } default: return -1; } /* done with EVPD */ assert(buflen - start <= 255); outbuf[start - 1] = buflen - start; return buflen; } /* Standard INQUIRY data */ if (req->cmd.buf[2] != 0) { return -1; } /* PAGE CODE == 0 */ buflen = req->cmd.xfer; if (buflen > SCSI_MAX_INQUIRY_LEN) { buflen = SCSI_MAX_INQUIRY_LEN; } memset(outbuf, 0, buflen); outbuf[0] = s->qdev.type & 0x1f; outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0; strpadcpy((char *) &outbuf[16], 16, s->product, ' '); strpadcpy((char *) &outbuf[8], 8, s->vendor, ' '); memset(&outbuf[32], 0, 4); memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version))); /* * We claim conformance to SPC-3, which is required for guests * to ask for modern features like READ CAPACITY(16) or the * block characteristics VPD page by default. Not all of SPC-3 * is actually implemented, but we're good enough. */ outbuf[2] = 5; outbuf[3] = 2 | 0x10; /* Format 2, HiSup */ if (buflen > 36) { outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ } else { /* If the allocation length of CDB is too small, the additional length is not adjusted */ outbuf[4] = 36 - 5; } /* Sync data transfer and TCQ. */ outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0); return buflen; }"} {"target": 1, "idx": 8602, "func": "static void vmd_decode(VmdVideoContext *s) { int i; unsigned int *palette32; unsigned char r, g, b; /* point to the start of the encoded data */ const unsigned char *p = s->buf + 16; const unsigned char *pb; unsigned char meth; unsigned char *dp; /* pointer to current frame */ unsigned char *pp; /* pointer to previous frame */ unsigned char len; int ofs; int frame_x, frame_y; int frame_width, frame_height; frame_x = AV_RL16(&s->buf[6]); frame_y = AV_RL16(&s->buf[8]); frame_width = AV_RL16(&s->buf[10]) - frame_x + 1; frame_height = AV_RL16(&s->buf[12]) - frame_y + 1; if (frame_x < 0 || frame_width < 0 || frame_x >= s->avctx->width || frame_width > s->avctx->width || frame_x + frame_width > s->avctx->width) return; if (frame_y < 0 || frame_height < 0 || frame_y >= s->avctx->height || frame_height > s->avctx->height || frame_y + frame_height > s->avctx->height) return; if ((frame_width == s->avctx->width && frame_height == s->avctx->height) && (frame_x || frame_y)) { s->x_off = frame_x; s->y_off = frame_y; } frame_x -= s->x_off; frame_y -= s->y_off; /* if only a certain region will be updated, copy the entire previous * frame before the decode */ if (s->prev_frame.data[0] && (frame_x || frame_y || (frame_width != s->avctx->width) || (frame_height != s->avctx->height))) { memcpy(s->frame.data[0], s->prev_frame.data[0], s->avctx->height * s->frame.linesize[0]); } /* check if there is a new palette */ if (s->buf[15] & 0x02) { p += 2; palette32 = (unsigned int *)s->palette; for (i = 0; i < PALETTE_COUNT; i++) { r = *p++ * 4; g = *p++ * 4; b = *p++ * 4; palette32[i] = (r << 16) | (g << 8) | (b); } s->size -= (256 * 3 + 2); } if (s->size >= 0) { /* originally UnpackFrame in VAG's code */ pb = p; meth = *pb++; if (meth & 0x80) { lz_unpack(pb, s->unpack_buffer, s->unpack_buffer_size); meth &= 0x7F; pb = s->unpack_buffer; } dp = &s->frame.data[0][frame_y * s->frame.linesize[0] + frame_x]; pp = &s->prev_frame.data[0][frame_y * s->prev_frame.linesize[0] + frame_x]; switch (meth) { case 1: for (i = 0; i < frame_height; i++) { ofs = 0; do { len = *pb++; if (len & 0x80) { len = (len & 0x7F) + 1; if (ofs + len > frame_width) return; memcpy(&dp[ofs], pb, len); pb += len; ofs += len; } else { /* interframe pixel copy */ if (ofs + len + 1 > frame_width || !s->prev_frame.data[0]) return; memcpy(&dp[ofs], &pp[ofs], len + 1); ofs += len + 1; } } while (ofs < frame_width); if (ofs > frame_width) { av_log(s->avctx, AV_LOG_ERROR, \"VMD video: offset > width (%d > %d)\\n\", ofs, frame_width); break; } dp += s->frame.linesize[0]; pp += s->prev_frame.linesize[0]; } break; case 2: for (i = 0; i < frame_height; i++) { memcpy(dp, pb, frame_width); pb += frame_width; dp += s->frame.linesize[0]; pp += s->prev_frame.linesize[0]; } break; case 3: for (i = 0; i < frame_height; i++) { ofs = 0; do { len = *pb++; if (len & 0x80) { len = (len & 0x7F) + 1; if (*pb++ == 0xFF) len = rle_unpack(pb, &dp[ofs], len, frame_width - ofs); else memcpy(&dp[ofs], pb, len); pb += len; ofs += len; } else { /* interframe pixel copy */ if (ofs + len + 1 > frame_width || !s->prev_frame.data[0]) return; memcpy(&dp[ofs], &pp[ofs], len + 1); ofs += len + 1; } } while (ofs < frame_width); if (ofs > frame_width) { av_log(s->avctx, AV_LOG_ERROR, \"VMD video: offset > width (%d > %d)\\n\", ofs, frame_width); } dp += s->frame.linesize[0]; pp += s->prev_frame.linesize[0]; } break; } } }"} {"target": 0, "idx": 8624, "func": "static int unin_agp_pci_host_init(PCIDevice *d) { pci_config_set_vendor_id(d->config, PCI_VENDOR_ID_APPLE); pci_config_set_device_id(d->config, PCI_DEVICE_ID_APPLE_UNI_N_AGP); d->config[0x08] = 0x00; // revision pci_config_set_class(d->config, PCI_CLASS_BRIDGE_HOST); d->config[0x0C] = 0x08; // cache_line_size d->config[0x0D] = 0x10; // latency_timer // d->config[0x34] = 0x80; // capabilities_pointer return 0; }"} {"target": 0, "idx": 8625, "func": "static void float_number(void) { int i; struct { const char *encoded; double decoded; int skip; } test_cases[] = { { \"32.43\", 32.43 }, { \"0.222\", 0.222 }, { \"-32.12313\", -32.12313 }, { \"-32.20e-10\", -32.20e-10, .skip = 1 }, { }, }; for (i = 0; test_cases[i].encoded; i++) { QObject *obj; QFloat *qfloat; obj = qobject_from_json(test_cases[i].encoded); g_assert(obj != NULL); g_assert(qobject_type(obj) == QTYPE_QFLOAT); qfloat = qobject_to_qfloat(obj); g_assert(qfloat_get_double(qfloat) == test_cases[i].decoded); if (test_cases[i].skip == 0) { QString *str; str = qobject_to_json(obj); g_assert(strcmp(qstring_get_str(str), test_cases[i].encoded) == 0); QDECREF(str); } QDECREF(qfloat); } }"} {"target": 0, "idx": 8626, "func": "static void colo_process_checkpoint(MigrationState *s) { QIOChannelBuffer *bioc; QEMUFile *fb = NULL; int64_t current_time, checkpoint_time = qemu_clock_get_ms(QEMU_CLOCK_HOST); Error *local_err = NULL; int ret; failover_init_state(); s->rp_state.from_dst_file = qemu_file_get_return_path(s->to_dst_file); if (!s->rp_state.from_dst_file) { error_report(\"Open QEMUFile from_dst_file failed\"); goto out; } /* * Wait for Secondary finish loading VM states and enter COLO * restore. */ colo_receive_check_message(s->rp_state.from_dst_file, COLO_MESSAGE_CHECKPOINT_READY, &local_err); if (local_err) { goto out; } bioc = qio_channel_buffer_new(COLO_BUFFER_BASE_SIZE); fb = qemu_fopen_channel_output(QIO_CHANNEL(bioc)); object_unref(OBJECT(bioc)); qemu_mutex_lock_iothread(); vm_start(); qemu_mutex_unlock_iothread(); trace_colo_vm_state_change(\"stop\", \"run\"); while (s->state == MIGRATION_STATUS_COLO) { if (failover_get_state() != FAILOVER_STATUS_NONE) { error_report(\"failover request\"); goto out; } current_time = qemu_clock_get_ms(QEMU_CLOCK_HOST); if (current_time - checkpoint_time < s->parameters.x_checkpoint_delay) { int64_t delay_ms; delay_ms = s->parameters.x_checkpoint_delay - (current_time - checkpoint_time); g_usleep(delay_ms * 1000); } ret = colo_do_checkpoint_transaction(s, bioc, fb); if (ret < 0) { goto out; } checkpoint_time = qemu_clock_get_ms(QEMU_CLOCK_HOST); } out: /* Throw the unreported error message after exited from loop */ if (local_err) { error_report_err(local_err); } if (fb) { qemu_fclose(fb); } if (s->rp_state.from_dst_file) { qemu_fclose(s->rp_state.from_dst_file); } }"} {"target": 0, "idx": 8647, "func": "static void ecc_mem_write(void *opaque, target_phys_addr_t addr, uint64_t val, unsigned size) { ECCState *s = opaque; switch (addr >> 2) { case ECC_MER: if (s->version == ECC_MCC) s->regs[ECC_MER] = (val & ECC_MER_MASK_0); else if (s->version == ECC_EMC) s->regs[ECC_MER] = s->version | (val & ECC_MER_MASK_1); else if (s->version == ECC_SMC) s->regs[ECC_MER] = s->version | (val & ECC_MER_MASK_2); trace_ecc_mem_writel_mer(val); break; case ECC_MDR: s->regs[ECC_MDR] = val & ECC_MDR_MASK; trace_ecc_mem_writel_mdr(val); break; case ECC_MFSR: s->regs[ECC_MFSR] = val; qemu_irq_lower(s->irq); trace_ecc_mem_writel_mfsr(val); break; case ECC_VCR: s->regs[ECC_VCR] = val; trace_ecc_mem_writel_vcr(val); break; case ECC_DR: s->regs[ECC_DR] = val; trace_ecc_mem_writel_dr(val); break; case ECC_ECR0: s->regs[ECC_ECR0] = val; trace_ecc_mem_writel_ecr0(val); break; case ECC_ECR1: s->regs[ECC_ECR0] = val; trace_ecc_mem_writel_ecr1(val); break; } }"} {"target": 0, "idx": 8666, "func": "static void gen_op_mull_T0_T1(void) { TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64); TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64); tcg_gen_extu_i32_i64(tmp1, cpu_T[0]); tcg_gen_extu_i32_i64(tmp2, cpu_T[1]); tcg_gen_mul_i64(tmp1, tmp1, tmp2); tcg_gen_trunc_i64_i32(cpu_T[0], tmp1); tcg_gen_shri_i64(tmp1, tmp1, 32); tcg_gen_trunc_i64_i32(cpu_T[1], tmp1); }"} {"target": 1, "idx": 8694, "func": "static int get_stats(AVCodecContext *avctx, int eos) { #ifdef TH_ENCCTL_2PASS_OUT TheoraContext *h = avctx->priv_data; uint8_t *buf; int bytes; bytes = th_encode_ctl(h->t_state, TH_ENCCTL_2PASS_OUT, &buf, sizeof(buf)); if (bytes < 0) { av_log(avctx, AV_LOG_ERROR, \"Error getting first pass stats\\n\"); return AVERROR_EXTERNAL; } if (!eos) { void *tmp = av_fast_realloc(h->stats, &h->stats_size, h->stats_offset + bytes); if (!tmp) h->stats = tmp; memcpy(h->stats + h->stats_offset, buf, bytes); h->stats_offset += bytes; } else { int b64_size = AV_BASE64_SIZE(h->stats_offset); // libtheora generates a summary header at the end memcpy(h->stats, buf, bytes); avctx->stats_out = av_malloc(b64_size); av_base64_encode(avctx->stats_out, b64_size, h->stats, h->stats_offset); } return 0; #else av_log(avctx, AV_LOG_ERROR, \"libtheora too old to support 2pass\\n\"); return AVERROR(ENOSUP); #endif }"} {"target": 1, "idx": 8702, "func": "static int allocate_buffers(ALACContext *alac) { int ch; int buf_size = alac->max_samples_per_frame * sizeof(int32_t); for (ch = 0; ch < FFMIN(alac->channels, 2); ch++) { FF_ALLOC_OR_GOTO(alac->avctx, alac->predict_error_buffer[ch], buf_size, buf_alloc_fail); alac->direct_output = alac->sample_size > 16 && av_sample_fmt_is_planar(alac->avctx->sample_fmt); if (!alac->direct_output) { FF_ALLOC_OR_GOTO(alac->avctx, alac->output_samples_buffer[ch], buf_size, buf_alloc_fail); } FF_ALLOC_OR_GOTO(alac->avctx, alac->extra_bits_buffer[ch], buf_size, buf_alloc_fail); } return 0; buf_alloc_fail: alac_decode_close(alac->avctx); return AVERROR(ENOMEM); }"} {"target": 1, "idx": 8712, "func": "static av_cold int iss_read_header(AVFormatContext *s) { IssDemuxContext *iss = s->priv_data; AVIOContext *pb = s->pb; AVStream *st; char token[MAX_TOKEN_SIZE]; int stereo, rate_divisor; get_token(pb, token, sizeof(token)); //\"IMA_ADPCM_Sound\" get_token(pb, token, sizeof(token)); //packet size sscanf(token, \"%d\", &iss->packet_size); get_token(pb, token, sizeof(token)); //File ID get_token(pb, token, sizeof(token)); //out size get_token(pb, token, sizeof(token)); //stereo sscanf(token, \"%d\", &stereo); get_token(pb, token, sizeof(token)); //Unknown1 get_token(pb, token, sizeof(token)); //RateDivisor sscanf(token, \"%d\", &rate_divisor); get_token(pb, token, sizeof(token)); //Unknown2 get_token(pb, token, sizeof(token)); //Version ID get_token(pb, token, sizeof(token)); //Size if (iss->packet_size <= 0) { av_log(s, AV_LOG_ERROR, \"packet_size %d is invalid\\n\", iss->packet_size); return AVERROR_INVALIDDATA; } iss->sample_start_pos = avio_tell(pb); st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = AV_CODEC_ID_ADPCM_IMA_ISS; if (stereo) { st->codec->channels = 2; st->codec->channel_layout = AV_CH_LAYOUT_STEREO; } else { st->codec->channels = 1; st->codec->channel_layout = AV_CH_LAYOUT_MONO; } st->codec->sample_rate = 44100; if(rate_divisor > 0) st->codec->sample_rate /= rate_divisor; st->codec->bits_per_coded_sample = 4; st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample; st->codec->block_align = iss->packet_size; avpriv_set_pts_info(st, 32, 1, st->codec->sample_rate); return 0; }"} {"target": 1, "idx": 8719, "func": "void do_subfco (void) { T2 = T0; T0 = T1 - T0; if (likely(T0 > T1)) { xer_ca = 0; } else { xer_ca = 1; } if (likely(!(((~T2) ^ T1 ^ (-1)) & ((~T2) ^ T0) & (1 << 31)))) { xer_ov = 0; } else { xer_so = 1; xer_ov = 1; } }"} {"target": 1, "idx": 8724, "func": "static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeCmd *cmd) { NvmeDeleteQ *c = (NvmeDeleteQ *)cmd; NvmeRequest *req, *next; NvmeSQueue *sq; NvmeCQueue *cq; uint16_t qid = le16_to_cpu(c->qid); if (!qid || nvme_check_sqid(n, qid)) { return NVME_INVALID_QID | NVME_DNR; } sq = n->sq[qid]; while (!QTAILQ_EMPTY(&sq->out_req_list)) { req = QTAILQ_FIRST(&sq->out_req_list); assert(req->aiocb); blk_aio_cancel(req->aiocb); } if (!nvme_check_cqid(n, sq->cqid)) { cq = n->cq[sq->cqid]; QTAILQ_REMOVE(&cq->sq_list, sq, entry); nvme_post_cqes(cq); QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) { if (req->sq == sq) { QTAILQ_REMOVE(&cq->req_list, req, entry); QTAILQ_INSERT_TAIL(&sq->req_list, req, entry); } } } nvme_free_sq(sq, n); return NVME_SUCCESS; }"} {"target": 1, "idx": 8726, "func": "static void virtio_scsi_locate_device(VDev *vdev) { const uint16_t channel = 0; /* again, it's what QEMU does */ uint16_t target; static uint8_t data[16 + 8 * 63]; ScsiLunReport *r = (void *) data; ScsiDevice *sdev = vdev->scsi_device; int i, luns; /* QEMU has hardcoded channel #0 in many places. * If this hardcoded value is ever changed, we'll need to add code for * vdev->config.scsi.max_channel != 0 here. */ debug_print_int(\"config.scsi.max_channel\", vdev->config.scsi.max_channel); debug_print_int(\"config.scsi.max_target \", vdev->config.scsi.max_target); debug_print_int(\"config.scsi.max_lun \", vdev->config.scsi.max_lun); debug_print_int(\"config.scsi.max_sectors\", vdev->config.scsi.max_sectors); if (vdev->scsi_device_selected) { sdev->channel = vdev->selected_scsi_device.channel; sdev->target = vdev->selected_scsi_device.target; sdev->lun = vdev->selected_scsi_device.lun; IPL_check(sdev->channel == 0, \"non-zero channel requested\"); IPL_check(sdev->target <= vdev->config.scsi.max_target, \"target# high\"); IPL_check(sdev->lun <= vdev->config.scsi.max_lun, \"LUN# high\"); return; } for (target = 0; target <= vdev->config.scsi.max_target; target++) { sdev->channel = channel; sdev->target = target; /* sdev->lun will be 0 here */ if (!scsi_report_luns(vdev, data, sizeof(data))) { if (resp.response == VIRTIO_SCSI_S_BAD_TARGET) { continue; } print_int(\"target\", target); virtio_scsi_verify_response(&resp, \"SCSI cannot report LUNs\"); } if (r->lun_list_len == 0) { print_int(\"no LUNs for target\", target); continue; } luns = r->lun_list_len / 8; debug_print_int(\"LUNs reported\", luns); if (luns == 1) { /* There is no \",lun=#\" arg for -device or \",lun=0\" given. * Hence, the only LUN reported. * Usually, it's 0. */ sdev->lun = r->lun[0].v16[0]; /* it's returned this way */ debug_print_int(\"Have to use LUN\", sdev->lun); return; /* we have to use this device */ } for (i = 0; i < luns; i++) { if (r->lun[i].v64) { /* Look for non-zero LUN - we have where to choose from */ sdev->lun = r->lun[i].v16[0]; debug_print_int(\"Will use LUN\", sdev->lun); return; /* we have found a device */ } } } panic(\"\\n! Cannot locate virtio-scsi device !\\n\"); }"} {"target": 1, "idx": 8727, "func": "static void qcow2_aio_read_cb(void *opaque, int ret) { QCowAIOCB *acb = opaque; BlockDriverState *bs = acb->common.bs; BDRVQcowState *s = bs->opaque; int index_in_cluster, n1; acb->hd_aiocb = NULL; if (ret < 0) goto done; /* post process the read buffer */ if (!acb->cluster_offset) { /* nothing to do */ } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) { /* nothing to do */ } else { if (s->crypt_method) { qcow2_encrypt_sectors(s, acb->sector_num, acb->cluster_data, acb->cluster_data, acb->cur_nr_sectors, 0, &s->aes_decrypt_key); qemu_iovec_reset(&acb->hd_qiov); qemu_iovec_copy(&acb->hd_qiov, acb->qiov, acb->bytes_done, acb->cur_nr_sectors * 512); qemu_iovec_from_buffer(&acb->hd_qiov, acb->cluster_data, 512 * acb->cur_nr_sectors); } } acb->remaining_sectors -= acb->cur_nr_sectors; acb->sector_num += acb->cur_nr_sectors; acb->bytes_done += acb->cur_nr_sectors * 512; if (acb->remaining_sectors == 0) { /* request completed */ ret = 0; goto done; } /* prepare next AIO request */ acb->cur_nr_sectors = acb->remaining_sectors; if (s->crypt_method) { acb->cur_nr_sectors = MIN(acb->cur_nr_sectors, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors); } ret = qcow2_get_cluster_offset(bs, acb->sector_num << 9, &acb->cur_nr_sectors, &acb->cluster_offset); if (ret < 0) { goto done; } index_in_cluster = acb->sector_num & (s->cluster_sectors - 1); qemu_iovec_reset(&acb->hd_qiov); qemu_iovec_copy(&acb->hd_qiov, acb->qiov, acb->bytes_done, acb->cur_nr_sectors * 512); if (!acb->cluster_offset) { if (bs->backing_hd) { /* read from the base image */ n1 = qcow2_backing_read1(bs->backing_hd, &acb->hd_qiov, acb->sector_num, acb->cur_nr_sectors); if (n1 > 0) { BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); acb->hd_aiocb = bdrv_aio_readv(bs->backing_hd, acb->sector_num, &acb->hd_qiov, acb->cur_nr_sectors, qcow2_aio_read_cb, acb); if (acb->hd_aiocb == NULL) goto done; } else { ret = qcow2_schedule_bh(qcow2_aio_read_bh, acb); if (ret < 0) goto done; } } else { /* Note: in this case, no need to wait */ qemu_iovec_memset(&acb->hd_qiov, 0, 512 * acb->cur_nr_sectors); ret = qcow2_schedule_bh(qcow2_aio_read_bh, acb); if (ret < 0) goto done; } } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) { /* add AIO support for compressed blocks ? */ if (qcow2_decompress_cluster(bs, acb->cluster_offset) < 0) goto done; qemu_iovec_from_buffer(&acb->hd_qiov, s->cluster_cache + index_in_cluster * 512, 512 * acb->cur_nr_sectors); ret = qcow2_schedule_bh(qcow2_aio_read_bh, acb); if (ret < 0) goto done; } else { if ((acb->cluster_offset & 511) != 0) { ret = -EIO; goto done; } if (s->crypt_method) { /* * For encrypted images, read everything into a temporary * contiguous buffer on which the AES functions can work. */ if (!acb->cluster_data) { acb->cluster_data = qemu_mallocz(QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); } assert(acb->cur_nr_sectors <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors); qemu_iovec_reset(&acb->hd_qiov); qemu_iovec_add(&acb->hd_qiov, acb->cluster_data, 512 * acb->cur_nr_sectors); } BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); acb->hd_aiocb = bdrv_aio_readv(bs->file, (acb->cluster_offset >> 9) + index_in_cluster, &acb->hd_qiov, acb->cur_nr_sectors, qcow2_aio_read_cb, acb); if (acb->hd_aiocb == NULL) { ret = -EIO; goto done; } } return; done: acb->common.cb(acb->common.opaque, ret); qemu_iovec_destroy(&acb->hd_qiov); qemu_aio_release(acb); }"} {"target": 1, "idx": 8735, "func": "static int mmsh_open(URLContext *h, const char *uri, int flags) { int i, port, err; char httpname[256], path[256], host[128], location[1024]; char *stream_selection; char headers[1024]; MMSHContext *mmsh; MMSContext *mms; mmsh = h->priv_data = av_mallocz(sizeof(MMSHContext)); if (!h->priv_data) return AVERROR(ENOMEM); mmsh->request_seq = h->is_streamed = 1; mms = &mmsh->mms; av_strlcpy(location, uri, sizeof(location)); av_url_split(NULL, 0, NULL, 0, host, sizeof(host), &port, path, sizeof(path), location); if (port<0) port = 80; // default mmsh protocol port ff_url_join(httpname, sizeof(httpname), \"http\", NULL, host, port, path); if (url_alloc(&mms->mms_hd, httpname, URL_RDONLY) < 0) { return AVERROR(EIO); } snprintf(headers, sizeof(headers), \"Accept: */*\\r\\n\" USERAGENT \"Host: %s:%d\\r\\n\" \"Pragma: no-cache,rate=1.000000,stream-time=0,\" \"stream-offset=0:0,request-context=%u,max-duration=0\\r\\n\" CLIENTGUID \"Connection: Close\\r\\n\\r\\n\", host, port, mmsh->request_seq++); ff_http_set_headers(mms->mms_hd, headers); err = url_connect(mms->mms_hd); if (err) { goto fail; } err = get_http_header_data(mmsh); if (err) { av_log(NULL, AV_LOG_ERROR, \"Get http header data failed!\\n\"); goto fail; } // close the socket and then reopen it for sending the second play request. url_close(mms->mms_hd); memset(headers, 0, sizeof(headers)); if (url_alloc(&mms->mms_hd, httpname, URL_RDONLY) < 0) { return AVERROR(EIO); } stream_selection = av_mallocz(mms->stream_num * 19 + 1); if (!stream_selection) return AVERROR(ENOMEM); for (i = 0; i < mms->stream_num; i++) { char tmp[20]; err = snprintf(tmp, sizeof(tmp), \"ffff:%d:0 \", mms->streams[i].id); if (err < 0) goto fail; av_strlcat(stream_selection, tmp, mms->stream_num * 19 + 1); } // send play request err = snprintf(headers, sizeof(headers), \"Accept: */*\\r\\n\" USERAGENT \"Host: %s:%d\\r\\n\" \"Pragma: no-cache,rate=1.000000,request-context=%u\\r\\n\" \"Pragma: xPlayStrm=1\\r\\n\" CLIENTGUID \"Pragma: stream-switch-count=%d\\r\\n\" \"Pragma: stream-switch-entry=%s\\r\\n\" \"Connection: Close\\r\\n\\r\\n\", host, port, mmsh->request_seq++, mms->stream_num, stream_selection); av_freep(&stream_selection); if (err < 0) { av_log(NULL, AV_LOG_ERROR, \"Build play request failed!\\n\"); goto fail; } dprintf(NULL, \"out_buffer is %s\", headers); ff_http_set_headers(mms->mms_hd, headers); err = url_connect(mms->mms_hd); if (err) { goto fail; } err = get_http_header_data(mmsh); if (err) { av_log(NULL, AV_LOG_ERROR, \"Get http header data failed!\\n\"); goto fail; } dprintf(NULL, \"Connection successfully open\\n\"); return 0; fail: av_freep(&stream_selection); mmsh_close(h); dprintf(NULL, \"Connection failed with error %d\\n\", err); return err; }"} {"target": 0, "idx": 8750, "func": "static void t_gen_btst(TCGv d, TCGv a, TCGv b) { TCGv sbit; TCGv bset; TCGv t0; int l1; /* des ref: The N flag is set according to the selected bit in the dest reg. The Z flag is set if the selected bit and all bits to the right are zero. The X flag is cleared. Other flags are left untouched. The destination reg is not affected. unsigned int fz, sbit, bset, mask, masked_t0; sbit = T1 & 31; bset = !!(T0 & (1 << sbit)); mask = sbit == 31 ? -1 : (1 << (sbit + 1)) - 1; masked_t0 = T0 & mask; fz = !(masked_t0 | bset); // Clear the X, N and Z flags. T0 = env->pregs[PR_CCS] & ~(X_FLAG | N_FLAG | Z_FLAG); // Set the N and Z flags accordingly. T0 |= (bset << 3) | (fz << 2); */ l1 = gen_new_label(); sbit = tcg_temp_new(TCG_TYPE_TL); bset = tcg_temp_new(TCG_TYPE_TL); t0 = tcg_temp_new(TCG_TYPE_TL); /* Compute bset and sbit. */ tcg_gen_andi_tl(sbit, b, 31); tcg_gen_shl_tl(t0, tcg_const_tl(1), sbit); tcg_gen_and_tl(bset, a, t0); tcg_gen_shr_tl(bset, bset, sbit); /* Displace to N_FLAG. */ tcg_gen_shli_tl(bset, bset, 3); tcg_gen_shl_tl(sbit, tcg_const_tl(2), sbit); tcg_gen_subi_tl(sbit, sbit, 1); tcg_gen_and_tl(sbit, a, sbit); tcg_gen_andi_tl(d, cpu_PR[PR_CCS], ~(X_FLAG | N_FLAG | Z_FLAG)); /* or in the N_FLAG. */ tcg_gen_or_tl(d, d, bset); tcg_gen_brcondi_tl(TCG_COND_NE, sbit, 0, l1); /* or in the Z_FLAG. */ tcg_gen_ori_tl(d, d, Z_FLAG); gen_set_label(l1); tcg_temp_free(sbit); tcg_temp_free(bset); }"} {"target": 0, "idx": 8758, "func": "read_f(int argc, char **argv) { struct timeval t1, t2; int Cflag = 0, pflag = 0, qflag = 0, vflag = 0; int Pflag = 0, sflag = 0, lflag = 0, bflag = 0; int c, cnt; char *buf; int64_t offset; int count; /* Some compilers get confused and warn if this is not initialized. */ int total = 0; int pattern = 0, pattern_offset = 0, pattern_count = 0; while ((c = getopt(argc, argv, \"bCl:pP:qs:v\")) != EOF) { switch (c) { case 'b': bflag = 1; break; case 'C': Cflag = 1; break; case 'l': lflag = 1; pattern_count = cvtnum(optarg); if (pattern_count < 0) { printf(\"non-numeric length argument -- %s\\n\", optarg); return 0; } break; case 'p': pflag = 1; break; case 'P': Pflag = 1; pattern = atoi(optarg); break; case 'q': qflag = 1; break; case 's': sflag = 1; pattern_offset = cvtnum(optarg); if (pattern_offset < 0) { printf(\"non-numeric length argument -- %s\\n\", optarg); return 0; } break; case 'v': vflag = 1; break; default: return command_usage(&read_cmd); } } if (optind != argc - 2) return command_usage(&read_cmd); if (bflag && pflag) { printf(\"-b and -p cannot be specified at the same time\\n\"); return 0; } offset = cvtnum(argv[optind]); if (offset < 0) { printf(\"non-numeric length argument -- %s\\n\", argv[optind]); return 0; } optind++; count = cvtnum(argv[optind]); if (count < 0) { printf(\"non-numeric length argument -- %s\\n\", argv[optind]); return 0; } if (!Pflag && (lflag || sflag)) { return command_usage(&read_cmd); } if (!lflag) { pattern_count = count - pattern_offset; } if ((pattern_count < 0) || (pattern_count + pattern_offset > count)) { printf(\"pattern verfication range exceeds end of read data\\n\"); return 0; } if (!pflag) if (offset & 0x1ff) { printf(\"offset %lld is not sector aligned\\n\", (long long)offset); return 0; if (count & 0x1ff) { printf(\"count %d is not sector aligned\\n\", count); return 0; } } buf = qemu_io_alloc(count, 0xab); gettimeofday(&t1, NULL); if (pflag) cnt = do_pread(buf, offset, count, &total); else if (bflag) cnt = do_load_vmstate(buf, offset, count, &total); else cnt = do_read(buf, offset, count, &total); gettimeofday(&t2, NULL); if (cnt < 0) { printf(\"read failed: %s\\n\", strerror(-cnt)); goto out; } if (Pflag) { void* cmp_buf = malloc(pattern_count); memset(cmp_buf, pattern, pattern_count); if (memcmp(buf + pattern_offset, cmp_buf, pattern_count)) { printf(\"Pattern verification failed at offset %lld, \" \"%d bytes\\n\", (long long) offset + pattern_offset, pattern_count); } free(cmp_buf); } if (qflag) goto out; if (vflag) dump_buffer(buf, offset, count); /* Finally, report back -- -C gives a parsable format */ t2 = tsub(t2, t1); print_report(\"read\", &t2, offset, count, total, cnt, Cflag); out: qemu_io_free(buf); return 0; }"} {"target": 0, "idx": 8801, "func": "int ff_network_init(void) { #if HAVE_WINSOCK2_H WSADATA wsaData; #endif if (!ff_network_inited_globally) av_log(NULL, AV_LOG_WARNING, \"Using network protocols without global \" \"network initialization. Please use \" \"avformat_network_init(), this will \" \"become mandatory later.\\n\"); #if HAVE_WINSOCK2_H if (WSAStartup(MAKEWORD(1,1), &wsaData)) return 0; #endif return 1; }"} {"target": 1, "idx": 8802, "func": "static int handle_connect_error(URLContext *s, const char *desc) { RTMPContext *rt = s->priv_data; char buf[300], *ptr, authmod[15]; int i = 0, ret = 0; const char *user = \"\", *salt = \"\", *opaque = NULL, *challenge = NULL, *cptr = NULL, *nonce = NULL; if (!(cptr = strstr(desc, \"authmod=adobe\")) && !(cptr = strstr(desc, \"authmod=llnw\"))) { av_log(s, AV_LOG_ERROR, \"Unknown connect error (unsupported authentication method?)\\n\"); return AVERROR_UNKNOWN; } cptr += strlen(\"authmod=\"); while (*cptr && *cptr != ' ' && i < sizeof(authmod) - 1) authmod[i++] = *cptr++; authmod[i] = '\\0'; if (!rt->username[0] || !rt->password[0]) { av_log(s, AV_LOG_ERROR, \"No credentials set\\n\"); return AVERROR_UNKNOWN; } if (strstr(desc, \"?reason=authfailed\")) { av_log(s, AV_LOG_ERROR, \"Incorrect username/password\\n\"); return AVERROR_UNKNOWN; } else if (strstr(desc, \"?reason=nosuchuser\")) { av_log(s, AV_LOG_ERROR, \"Incorrect username\\n\"); return AVERROR_UNKNOWN; } if (rt->auth_tried) { av_log(s, AV_LOG_ERROR, \"Authentication failed\\n\"); return AVERROR_UNKNOWN; } rt->auth_params[0] = '\\0'; if (strstr(desc, \"code=403 need auth\")) { snprintf(rt->auth_params, sizeof(rt->auth_params), \"?authmod=%s&user=%s\", authmod, rt->username); return 0; } if (!(cptr = strstr(desc, \"?reason=needauth\"))) { av_log(s, AV_LOG_ERROR, \"No auth parameters found\\n\"); return AVERROR_UNKNOWN; } av_strlcpy(buf, cptr + 1, sizeof(buf)); ptr = buf; while (ptr) { char *next = strchr(ptr, '&'); char *value = strchr(ptr, '='); if (next) *next++ = '\\0'; if (value) *value++ = '\\0'; if (!strcmp(ptr, \"user\")) { user = value; } else if (!strcmp(ptr, \"salt\")) { salt = value; } else if (!strcmp(ptr, \"opaque\")) { opaque = value; } else if (!strcmp(ptr, \"challenge\")) { challenge = value; } else if (!strcmp(ptr, \"nonce\")) { nonce = value; } ptr = next; } if (!strcmp(authmod, \"adobe\")) { if ((ret = do_adobe_auth(rt, user, salt, opaque, challenge)) < 0) return ret; } else { if ((ret = do_llnw_auth(rt, user, nonce)) < 0) return ret; } rt->auth_tried = 1; return 0; }"} {"target": 1, "idx": 8803, "func": "static int jacosub_probe(AVProbeData *p) { const char *ptr = p->buf; const char *ptr_end = p->buf + p->buf_size; if (AV_RB24(ptr) == 0xEFBBBF) ptr += 3; /* skip UTF-8 BOM */ while (ptr < ptr_end) { while (jss_whitespace(*ptr)) ptr++; if (*ptr != '#' && *ptr != '\\n') { if (timed_line(ptr)) return AVPROBE_SCORE_EXTENSION + 1; return 0; } ptr += strcspn(ptr, \"\\n\") + 1; } return 0; }"} {"target": 1, "idx": 8809, "func": "static void vp6_parse_coeff(VP56Context *s) { VP56RangeCoder *c = s->ccp; VP56Model *model = s->modelp; uint8_t *permute = s->scantable.permutated; uint8_t *model1, *model2, *model3; int coeff, sign, coeff_idx; int b, i, cg, idx, ctx; int pt = 0; /* plane type (0 for Y, 1 for U or V) */ for (b=0; b<6; b++) { int ct = 1; /* code type */ int run = 1; if (b > 3) pt = 1; ctx = s->left_block[vp56_b6to4[b]].not_null_dc + s->above_blocks[s->above_block_idx[b]].not_null_dc; model1 = model->coeff_dccv[pt]; model2 = model->coeff_dcct[pt][ctx]; for (coeff_idx=0; coeff_idx<64; ) { if ((coeff_idx>1 && ct==0) || vp56_rac_get_prob(c, model2[0])) { /* parse a coeff */ if (vp56_rac_get_prob(c, model2[2])) { if (vp56_rac_get_prob(c, model2[3])) { idx = vp56_rac_get_tree(c, vp56_pc_tree, model1); coeff = vp56_coeff_bias[idx+5]; for (i=vp56_coeff_bit_length[idx]; i>=0; i--) coeff += vp56_rac_get_prob(c, vp56_coeff_parse_table[idx][i]) << i; } else { if (vp56_rac_get_prob(c, model2[4])) coeff = 3 + vp56_rac_get_prob(c, model1[5]); else coeff = 2; } ct = 2; } else { ct = 1; coeff = 1; } sign = vp56_rac_get(c); coeff = (coeff ^ -sign) + sign; if (coeff_idx) coeff *= s->dequant_ac; idx = model->coeff_index_to_pos[coeff_idx]; s->block_coeff[b][permute[idx]] = coeff; run = 1; } else { /* parse a run */ ct = 0; if (coeff_idx > 0) { if (!vp56_rac_get_prob(c, model2[1])) break; model3 = model->coeff_runv[coeff_idx >= 6]; run = vp56_rac_get_tree(c, vp6_pcr_tree, model3); if (!run) for (run=9, i=0; i<6; i++) run += vp56_rac_get_prob(c, model3[i+8]) << i; } } cg = vp6_coeff_groups[coeff_idx+=run]; model1 = model2 = model->coeff_ract[pt][ct][cg]; } s->left_block[vp56_b6to4[b]].not_null_dc = s->above_blocks[s->above_block_idx[b]].not_null_dc = !!s->block_coeff[b][0]; } }"} {"target": 1, "idx": 8827, "func": "static void test_parse_path(void) { g_test_trap_subprocess (\"/logging/parse_path/subprocess\", 0, 0); g_test_trap_assert_passed(); g_test_trap_assert_stdout(\"\"); g_test_trap_assert_stderr(\"\"); }"} {"target": 0, "idx": 8837, "func": "int floatx80_unordered(floatx80 a, floatx80 b, float_status *status) { if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) && (uint64_t) ( extractFloatx80Frac( a )<<1 ) ) || ( ( extractFloatx80Exp( b ) == 0x7FFF ) && (uint64_t) ( extractFloatx80Frac( b )<<1 ) ) ) { float_raise(float_flag_invalid, status); return 1; } return 0; }"} {"target": 0, "idx": 8838, "func": "static void usbredir_do_attach(void *opaque) { USBRedirDevice *dev = opaque; /* In order to work properly with XHCI controllers we need these caps */ if ((dev->dev.port->speedmask & USB_SPEED_MASK_SUPER) && !( usbredirparser_peer_has_cap(dev->parser, usb_redir_cap_ep_info_max_packet_size) && usbredirparser_peer_has_cap(dev->parser, usb_redir_cap_32bits_bulk_length) && usbredirparser_peer_has_cap(dev->parser, usb_redir_cap_64bits_ids))) { ERROR(\"usb-redir-host lacks capabilities needed for use with XHCI\\n\"); usbredir_reject_device(dev); return; } if (usb_device_attach(&dev->dev) != 0) { WARNING(\"rejecting device due to speed mismatch\\n\"); usbredir_reject_device(dev); } }"} {"target": 0, "idx": 8853, "func": "uint64_t helper_fctidz (uint64_t arg) { CPU_DoubleU farg; farg.ll = arg; if (unlikely(float64_is_signaling_nan(farg.d))) { /* sNaN conversion */ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI); } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) { /* qNan / infinity conversion */ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI); } else { farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status); } return farg.ll; }"} {"target": 0, "idx": 8862, "func": "static void vtd_iommu_notify_flag_changed(MemoryRegion *iommu, IOMMUNotifierFlag old, IOMMUNotifierFlag new) { VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu); if (new & IOMMU_NOTIFIER_MAP) { error_report(\"Device at bus %s addr %02x.%d requires iommu \" \"notifier which is currently not supported by \" \"intel-iommu emulation\", vtd_as->bus->qbus.name, PCI_SLOT(vtd_as->devfn), PCI_FUNC(vtd_as->devfn)); exit(1); } }"} {"target": 0, "idx": 8864, "func": "static int xen_pt_long_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, uint32_t *val, uint32_t dev_value, uint32_t valid_mask) { XenPTRegInfo *reg = cfg_entry->reg; uint32_t writable_mask = 0; uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask); /* modify emulate register */ writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); /* create value for writing to I/O device register */ *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); return 0; }"} {"target": 1, "idx": 8873, "func": "coroutine_fn iscsi_co_write_zeroes(BlockDriverState *bs, int64_t sector_num, int nb_sectors, BdrvRequestFlags flags) { IscsiLun *iscsilun = bs->opaque; struct IscsiTask iTask; uint64_t lba; uint32_t nb_blocks; if (!is_request_lun_aligned(sector_num, nb_sectors, iscsilun)) { return -EINVAL; } if (!iscsilun->lbp.lbpws) { /* WRITE SAME is not supported by the target */ return -ENOTSUP; } lba = sector_qemu2lun(sector_num, iscsilun); nb_blocks = sector_qemu2lun(nb_sectors, iscsilun); if (iscsilun->zeroblock == NULL) { iscsilun->zeroblock = g_malloc0(iscsilun->block_size); } iscsi_co_init_iscsitask(iscsilun, &iTask); retry: if (iscsi_writesame16_task(iscsilun->iscsi, iscsilun->lun, lba, iscsilun->zeroblock, iscsilun->block_size, nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP), 0, 0, iscsi_co_generic_cb, &iTask) == NULL) { return -EIO; } while (!iTask.complete) { iscsi_set_events(iscsilun); qemu_coroutine_yield(); } if (iTask.task != NULL) { scsi_free_scsi_task(iTask.task); iTask.task = NULL; } if (iTask.do_retry) { goto retry; } if (iTask.status != SCSI_STATUS_GOOD) { return -EIO; } return 0; }"} {"target": 1, "idx": 8874, "func": "static BlkverifyAIOCB *blkverify_aio_get(BlockDriverState *bs, bool is_write, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockCompletionFunc *cb, void *opaque) { BlkverifyAIOCB *acb = qemu_aio_get(&blkverify_aiocb_info, bs, cb, opaque); acb->is_write = is_write; acb->sector_num = sector_num; acb->nb_sectors = nb_sectors; acb->ret = -EINPROGRESS; acb->done = 0; acb->qiov = qiov; acb->buf = NULL; acb->verify = NULL; return acb; }"} {"target": 0, "idx": 8881, "func": "int av_write_header(AVFormatContext *s) { int ret, i; AVStream *st; // some sanity checks if (s->nb_streams == 0) { av_log(s, AV_LOG_ERROR, \"no streams\\n\"); return -1; } for(i=0;inb_streams;i++) { st = s->streams[i]; switch (st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: if(st->codec->sample_rate<=0){ av_log(s, AV_LOG_ERROR, \"sample rate not set\\n\"); return -1; } if(!st->codec->block_align) st->codec->block_align = st->codec->channels * av_get_bits_per_sample(st->codec->codec_id) >> 3; break; case AVMEDIA_TYPE_VIDEO: if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too? av_log(s, AV_LOG_ERROR, \"time base not set\\n\"); return -1; } if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){ av_log(s, AV_LOG_ERROR, \"dimensions not set\\n\"); return -1; } if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){ av_log(s, AV_LOG_ERROR, \"Aspect ratio mismatch between encoder and muxer layer\\n\"); return -1; } break; } if(s->oformat->codec_tag){ if(st->codec->codec_tag){ //FIXME //check that tag + id is in the table //if neither is in the table -> OK //if tag is in the table with another id -> FAIL //if id is in the table with another tag -> FAIL unless strict < ? }else st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id); } if(s->oformat->flags & AVFMT_GLOBALHEADER && !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER)) av_log(s, AV_LOG_WARNING, \"Codec for stream %d does not use global headers but container format requires global headers\\n\", i); } if (!s->priv_data && s->oformat->priv_data_size > 0) { s->priv_data = av_mallocz(s->oformat->priv_data_size); if (!s->priv_data) return AVERROR(ENOMEM); } #if LIBAVFORMAT_VERSION_MAJOR < 53 ff_metadata_mux_compat(s); #endif /* set muxer identification string */ if (!(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) { AVMetadata *m; AVMetadataTag *t; if (!(m = av_mallocz(sizeof(AVMetadata)))) return AVERROR(ENOMEM); av_metadata_set2(&m, \"encoder\", LIBAVFORMAT_IDENT, 0); metadata_conv(&m, s->oformat->metadata_conv, NULL); if ((t = av_metadata_get(m, \"\", NULL, AV_METADATA_IGNORE_SUFFIX))) av_metadata_set2(&s->metadata, t->key, t->value, 0); av_metadata_free(&m); } if(s->oformat->write_header){ ret = s->oformat->write_header(s); if (ret < 0) return ret; } /* init PTS generation */ for(i=0;inb_streams;i++) { int64_t den = AV_NOPTS_VALUE; st = s->streams[i]; switch (st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: den = (int64_t)st->time_base.num * st->codec->sample_rate; break; case AVMEDIA_TYPE_VIDEO: den = (int64_t)st->time_base.num * st->codec->time_base.den; break; default: break; } if (den != AV_NOPTS_VALUE) { if (den <= 0) return AVERROR_INVALIDDATA; av_frac_init(&st->pts, 0, 0, den); } } return 0; }"} {"target": 1, "idx": 8906, "func": "static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *pkt_pts) { AVFrame *decoded_frame, *filtered_frame = NULL; void *buffer_to_free = NULL; int i, ret = 0; float quality; #if CONFIG_AVFILTER int frame_available = 1; #endif if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame())) return AVERROR(ENOMEM); else avcodec_get_frame_defaults(ist->decoded_frame); decoded_frame = ist->decoded_frame; pkt->pts = *pkt_pts; pkt->dts = ist->last_dts; *pkt_pts = AV_NOPTS_VALUE; ret = avcodec_decode_video2(ist->st->codec, decoded_frame, got_output, pkt); if (ret < 0) return ret; quality = same_quant ? decoded_frame->quality : 0; if (!*got_output) { /* no picture yet */ return ret; } decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts, decoded_frame->pkt_dts); pkt->size = 0; pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free); rate_emu_sleep(ist); for (i = 0; i < nb_output_streams; i++) { OutputStream *ost = &output_streams[i]; int frame_size, resample_changed; if (!check_output_constraints(ist, ost) || !ost->encoding_needed) continue; #if CONFIG_AVFILTER resample_changed = ost->resample_width != decoded_frame->width || ost->resample_height != decoded_frame->height || ost->resample_pix_fmt != decoded_frame->format; if (resample_changed) { av_log(NULL, AV_LOG_INFO, \"Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\\n\", ist->file_index, ist->st->index, ost->resample_width, ost->resample_height, av_get_pix_fmt_name(ost->resample_pix_fmt), decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format)); avfilter_graph_free(&ost->graph); if (configure_video_filters(ist, ost)) { av_log(NULL, AV_LOG_FATAL, \"Error reinitializing filters!\\n\"); exit_program(1); } ost->resample_width = decoded_frame->width; ost->resample_height = decoded_frame->height; ost->resample_pix_fmt = decoded_frame->format; } if (ist->st->sample_aspect_ratio.num) decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio; if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) { FrameBuffer *buf = decoded_frame->opaque; AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays( decoded_frame->data, decoded_frame->linesize, AV_PERM_READ | AV_PERM_PRESERVE, ist->st->codec->width, ist->st->codec->height, ist->st->codec->pix_fmt); avfilter_copy_frame_props(fb, decoded_frame); fb->buf->priv = buf; fb->buf->free = filter_release_buffer; buf->refcount++; av_buffersrc_buffer(ost->input_video_filter, fb); } else av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame, decoded_frame->pts, decoded_frame->sample_aspect_ratio); if (!ist->filtered_frame && !(ist->filtered_frame = avcodec_alloc_frame())) { av_free(buffer_to_free); return AVERROR(ENOMEM); } else avcodec_get_frame_defaults(ist->filtered_frame); filtered_frame = ist->filtered_frame; frame_available = avfilter_poll_frame(ost->output_video_filter->inputs[0]); while (frame_available) { AVRational ist_pts_tb; get_filtered_video_frame(ost->output_video_filter, filtered_frame, &ost->picref, &ist_pts_tb); if (ost->picref) filtered_frame->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q); if (ost->picref->video && !ost->frame_aspect_ratio) ost->st->codec->sample_aspect_ratio = ost->picref->video->pixel_aspect; #else filtered_frame = decoded_frame; #endif do_video_out(output_files[ost->file_index].ctx, ost, ist, filtered_frame, &frame_size, same_quant ? quality : ost->st->codec->global_quality); if (vstats_filename && frame_size) do_video_stats(output_files[ost->file_index].ctx, ost, frame_size); #if CONFIG_AVFILTER frame_available = ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]); if (ost->picref) avfilter_unref_buffer(ost->picref); } #endif } av_free(buffer_to_free); return ret; }"} {"target": 0, "idx": 8915, "func": "static void test_validate_list(TestInputVisitorData *data, const void *unused) { UserDefOneList *head = NULL; Visitor *v; v = validate_test_init(data, \"[ { 'string': 'string0', 'integer': 42 }, { 'string': 'string1', 'integer': 43 }, { 'string': 'string2', 'integer': 44 } ]\"); visit_type_UserDefOneList(v, NULL, &head, &error_abort); qapi_free_UserDefOneList(head); }"} {"target": 0, "idx": 8930, "func": "static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int predictor, int point_transform){ int i, mb_x, mb_y; uint16_t (*buffer)[4]; int left[3], top[3], topleft[3]; const int linesize= s->linesize[0]; const int mask= (1<bits)-1; av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, (unsigned)s->mb_width * 4 * sizeof(s->ljpeg_buffer[0][0])); buffer= s->ljpeg_buffer; for(i=0; i<3; i++){ buffer[0][i]= 1 << (s->bits + point_transform - 1); } for(mb_y = 0; mb_y < s->mb_height; mb_y++) { const int modified_predictor= mb_y ? predictor : 1; uint8_t *ptr = s->picture.data[0] + (linesize * mb_y); if (s->interlaced && s->bottom_field) ptr += linesize >> 1; for(i=0; i<3; i++){ top[i]= left[i]= topleft[i]= buffer[0][i]; } for(mb_x = 0; mb_x < s->mb_width; mb_x++) { if (s->restart_interval && !s->restart_count) s->restart_count = s->restart_interval; for(i=0;i<3;i++) { int pred; topleft[i]= top[i]; top[i]= buffer[mb_x][i]; PREDICT(pred, topleft[i], top[i], left[i], modified_predictor); left[i]= buffer[mb_x][i]= mask & (pred + (mjpeg_decode_dc(s, s->dc_index[i]) << point_transform)); } if (s->restart_interval && !--s->restart_count) { align_get_bits(&s->gb); skip_bits(&s->gb, 16); /* skip RSTn */ } } if(s->rct){ for(mb_x = 0; mb_x < s->mb_width; mb_x++) { ptr[3*mb_x+1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200)>>2); ptr[3*mb_x+0] = buffer[mb_x][1] + ptr[3*mb_x+1]; ptr[3*mb_x+2] = buffer[mb_x][2] + ptr[3*mb_x+1]; } }else if(s->pegasus_rct){ for(mb_x = 0; mb_x < s->mb_width; mb_x++) { ptr[3*mb_x+1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2])>>2); ptr[3*mb_x+0] = buffer[mb_x][1] + ptr[3*mb_x+1]; ptr[3*mb_x+2] = buffer[mb_x][2] + ptr[3*mb_x+1]; } }else{ for(mb_x = 0; mb_x < s->mb_width; mb_x++) { ptr[3*mb_x+0] = buffer[mb_x][2]; ptr[3*mb_x+1] = buffer[mb_x][1]; ptr[3*mb_x+2] = buffer[mb_x][0]; } } } return 0; }"} {"target": 0, "idx": 8971, "func": "void vga_common_init(VGAState *s, DisplayState *ds, uint8_t *vga_ram_base, unsigned long vga_ram_offset, int vga_ram_size) { int i, j, v, b; for(i = 0;i < 256; i++) { v = 0; for(j = 0; j < 8; j++) { v |= ((i >> j) & 1) << (j * 4); } expand4[i] = v; v = 0; for(j = 0; j < 4; j++) { v |= ((i >> (2 * j)) & 3) << (j * 4); } expand2[i] = v; } for(i = 0; i < 16; i++) { v = 0; for(j = 0; j < 4; j++) { b = ((i >> j) & 1); v |= b << (2 * j); v |= b << (2 * j + 1); } expand4to8[i] = v; } vga_reset(s); s->vram_ptr = vga_ram_base; s->vram_offset = vga_ram_offset; s->vram_size = vga_ram_size; s->ds = ds; s->get_bpp = vga_get_bpp; s->get_offsets = vga_get_offsets; s->get_resolution = vga_get_resolution; s->update = vga_update_display; s->invalidate = vga_invalidate_display; s->screen_dump = vga_screen_dump; s->text_update = vga_update_text; switch (vga_retrace_method) { case VGA_RETRACE_DUMB: s->retrace = vga_dumb_retrace; s->update_retrace_info = vga_dumb_update_retrace_info; break; case VGA_RETRACE_PRECISE: s->retrace = vga_precise_retrace; s->update_retrace_info = vga_precise_update_retrace_info; memset(&s->retrace_info, 0, sizeof (s->retrace_info)); break; } }"} {"target": 0, "idx": 8978, "func": "void monitor_init(CharDriverState *chr, int show_banner) { int i; if (is_first_init) { key_timer = qemu_new_timer(vm_clock, release_keys, NULL); if (!key_timer) return; for (i = 0; i < MAX_MON; i++) { monitor_hd[i] = NULL; } is_first_init = 0; } for (i = 0; i < MAX_MON; i++) { if (monitor_hd[i] == NULL) { monitor_hd[i] = chr; break; } } hide_banner = !show_banner; qemu_chr_add_handlers(chr, term_can_read, term_read, term_event, cur_mon); readline_start(\"\", 0, monitor_command_cb, NULL); }"} {"target": 0, "idx": 8980, "func": "static int twinvq_read_bitstream(AVCodecContext *avctx, TwinVQContext *tctx, const uint8_t *buf, int buf_size) { TwinVQFrameData *bits = &tctx->bits; const TwinVQModeTab *mtab = tctx->mtab; int channels = tctx->avctx->channels; int sub; GetBitContext gb; int i, j, k; if (buf_size * 8 < avctx->bit_rate * mtab->size / avctx->sample_rate + 8) { av_log(avctx, AV_LOG_ERROR, \"Frame too small (%d bytes). Truncated file?\\n\", buf_size); return AVERROR(EINVAL); } init_get_bits(&gb, buf, buf_size * 8); skip_bits(&gb, get_bits(&gb, 8)); bits->window_type = get_bits(&gb, TWINVQ_WINDOW_TYPE_BITS); if (bits->window_type > 8) { av_log(avctx, AV_LOG_ERROR, \"Invalid window type, broken sample?\\n\"); return AVERROR_INVALIDDATA; } bits->ftype = ff_twinvq_wtype_to_ftype_table[tctx->bits.window_type]; sub = mtab->fmode[bits->ftype].sub; read_cb_data(tctx, &gb, bits->main_coeffs, bits->ftype); for (i = 0; i < channels; i++) for (j = 0; j < sub; j++) for (k = 0; k < mtab->fmode[bits->ftype].bark_n_coef; k++) bits->bark1[i][j][k] = get_bits(&gb, mtab->fmode[bits->ftype].bark_n_bit); for (i = 0; i < channels; i++) for (j = 0; j < sub; j++) bits->bark_use_hist[i][j] = get_bits1(&gb); if (bits->ftype == TWINVQ_FT_LONG) { for (i = 0; i < channels; i++) bits->gain_bits[i] = get_bits(&gb, TWINVQ_GAIN_BITS); } else { for (i = 0; i < channels; i++) { bits->gain_bits[i] = get_bits(&gb, TWINVQ_GAIN_BITS); for (j = 0; j < sub; j++) bits->sub_gain_bits[i * sub + j] = get_bits(&gb, TWINVQ_SUB_GAIN_BITS); } } for (i = 0; i < channels; i++) { bits->lpc_hist_idx[i] = get_bits(&gb, mtab->lsp_bit0); bits->lpc_idx1[i] = get_bits(&gb, mtab->lsp_bit1); for (j = 0; j < mtab->lsp_split; j++) bits->lpc_idx2[i][j] = get_bits(&gb, mtab->lsp_bit2); } if (bits->ftype == TWINVQ_FT_LONG) { read_cb_data(tctx, &gb, bits->ppc_coeffs, 3); for (i = 0; i < channels; i++) { bits->p_coef[i] = get_bits(&gb, mtab->ppc_period_bit); bits->g_coef[i] = get_bits(&gb, mtab->pgain_bit); } } return 0; }"} {"target": 0, "idx": 9001, "func": "int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr, void *buf, dma_addr_t len, DMADirection dir) { target_phys_addr_t paddr, plen; int err; #ifdef DEBUG_IOMMU fprintf(stderr, \"dma_memory_rw context=%p addr=0x\" DMA_ADDR_FMT \" len=0x\" DMA_ADDR_FMT \" dir=%d\\n\", dma, addr, len, dir); #endif while (len) { err = dma->translate(dma, addr, &paddr, &plen, dir); if (err) { /* * In case of failure on reads from the guest, we clean the * destination buffer so that a device that doesn't test * for errors will not expose qemu internal memory. */ memset(buf, 0, len); return -1; } /* The translation might be valid for larger regions. */ if (plen > len) { plen = len; } address_space_rw(dma->as, paddr, buf, plen, dir == DMA_DIRECTION_FROM_DEVICE); len -= plen; addr += plen; buf += plen; } return 0; }"} {"target": 1, "idx": 9024, "func": "static void blend_frame(AVFilterContext *ctx, AVFrame *top_buf, AVFrame *bottom_buf, AVFrame *dst_buf) { BlendContext *b = ctx->priv; AVFilterLink *inlink = ctx->inputs[0]; FilterParams *param; int plane; for (plane = 0; dst_buf->data[plane]; plane++) { int hsub = plane == 1 || plane == 2 ? b->hsub : 0; int vsub = plane == 1 || plane == 2 ? b->vsub : 0; int outw = dst_buf->width >> hsub; int outh = dst_buf->height >> vsub; uint8_t *dst = dst_buf->data[plane]; uint8_t *top = top_buf->data[plane]; uint8_t *bottom = bottom_buf->data[plane]; param = &b->params[plane]; param->values[VAR_N] = inlink->frame_count; param->values[VAR_T] = dst_buf->pts == AV_NOPTS_VALUE ? NAN : dst_buf->pts * av_q2d(inlink->time_base); param->values[VAR_W] = outw; param->values[VAR_H] = outh; param->values[VAR_SW] = outw / dst_buf->width; param->values[VAR_SH] = outh / dst_buf->height; param->blend(top, top_buf->linesize[plane], bottom, bottom_buf->linesize[plane], dst, dst_buf->linesize[plane], outw, outh, param); } }"} {"target": 0, "idx": 9032, "func": "START_TEST(simple_list) { int i; struct { const char *encoded; LiteralQObject decoded; } test_cases[] = { { .encoded = \"[43,42]\", .decoded = QLIT_QLIST(((LiteralQObject[]){ QLIT_QINT(43), QLIT_QINT(42), { } })), }, { .encoded = \"[43]\", .decoded = QLIT_QLIST(((LiteralQObject[]){ QLIT_QINT(43), { } })), }, { .encoded = \"[]\", .decoded = QLIT_QLIST(((LiteralQObject[]){ { } })), }, { .encoded = \"[{}]\", .decoded = QLIT_QLIST(((LiteralQObject[]){ QLIT_QDICT(((LiteralQDictEntry[]){ {}, })), {}, })), }, { } }; for (i = 0; test_cases[i].encoded; i++) { QObject *obj; QString *str; obj = qobject_from_json(test_cases[i].encoded); fail_unless(obj != NULL); fail_unless(qobject_type(obj) == QTYPE_QLIST); fail_unless(compare_litqobj_to_qobj(&test_cases[i].decoded, obj) == 1); str = qobject_to_json(obj); qobject_decref(obj); obj = qobject_from_json(qstring_get_str(str)); fail_unless(obj != NULL); fail_unless(qobject_type(obj) == QTYPE_QLIST); fail_unless(compare_litqobj_to_qobj(&test_cases[i].decoded, obj) == 1); qobject_decref(obj); QDECREF(str); } }"} {"target": 0, "idx": 9051, "func": "static void readline_update(ReadLineState *rs) { int i, delta, len; if (rs->cmd_buf_size != rs->last_cmd_buf_size || memcmp(rs->cmd_buf, rs->last_cmd_buf, rs->cmd_buf_size) != 0) { for(i = 0; i < rs->last_cmd_buf_index; i++) { monitor_printf(rs->mon, \"\\033[D\"); } rs->cmd_buf[rs->cmd_buf_size] = '\\0'; if (rs->read_password) { len = strlen(rs->cmd_buf); for(i = 0; i < len; i++) monitor_printf(rs->mon, \"*\"); } else { monitor_printf(rs->mon, \"%s\", rs->cmd_buf); } monitor_printf(rs->mon, \"\\033[K\"); memcpy(rs->last_cmd_buf, rs->cmd_buf, rs->cmd_buf_size); rs->last_cmd_buf_size = rs->cmd_buf_size; rs->last_cmd_buf_index = rs->cmd_buf_size; } if (rs->cmd_buf_index != rs->last_cmd_buf_index) { delta = rs->cmd_buf_index - rs->last_cmd_buf_index; if (delta > 0) { for(i = 0;i < delta; i++) { monitor_printf(rs->mon, \"\\033[C\"); } } else { delta = -delta; for(i = 0;i < delta; i++) { monitor_printf(rs->mon, \"\\033[D\"); } } rs->last_cmd_buf_index = rs->cmd_buf_index; } monitor_flush(rs->mon); }"} {"target": 1, "idx": 9079, "func": "void virtio_queue_update_rings(VirtIODevice *vdev, int n) { VRing *vring = &vdev->vq[n].vring; if (!vring->desc) { /* not yet setup -> nothing to do */ return; } vring->avail = vring->desc + vring->num * sizeof(VRingDesc); vring->used = vring_align(vring->avail + offsetof(VRingAvail, ring[vring->num]), vring->align); virtio_init_region_cache(vdev, n); }"} {"target": 1, "idx": 9082, "func": "static int qcow2_write_snapshots(BlockDriverState *bs) { BDRVQcowState *s = bs->opaque; QCowSnapshot *sn; QCowSnapshotHeader h; QCowSnapshotExtraData extra; int i, name_size, id_str_size, snapshots_size; struct { uint32_t nb_snapshots; uint64_t snapshots_offset; } QEMU_PACKED header_data; int64_t offset, snapshots_offset; int ret; /* compute the size of the snapshots */ offset = 0; for(i = 0; i < s->nb_snapshots; i++) { sn = s->snapshots + i; offset = align_offset(offset, 8); offset += sizeof(h); offset += sizeof(extra); offset += strlen(sn->id_str); offset += strlen(sn->name); } snapshots_size = offset; /* Allocate space for the new snapshot list */ snapshots_offset = qcow2_alloc_clusters(bs, snapshots_size); bdrv_flush(bs->file); offset = snapshots_offset; if (offset < 0) { return offset; } /* Write all snapshots to the new list */ for(i = 0; i < s->nb_snapshots; i++) { sn = s->snapshots + i; memset(&h, 0, sizeof(h)); h.l1_table_offset = cpu_to_be64(sn->l1_table_offset); h.l1_size = cpu_to_be32(sn->l1_size); /* If it doesn't fit in 32 bit, older implementations should treat it * as a disk-only snapshot rather than truncate the VM state */ if (sn->vm_state_size <= 0xffffffff) { h.vm_state_size = cpu_to_be32(sn->vm_state_size); } h.date_sec = cpu_to_be32(sn->date_sec); h.date_nsec = cpu_to_be32(sn->date_nsec); h.vm_clock_nsec = cpu_to_be64(sn->vm_clock_nsec); h.extra_data_size = cpu_to_be32(sizeof(extra)); memset(&extra, 0, sizeof(extra)); extra.vm_state_size_large = cpu_to_be64(sn->vm_state_size); extra.disk_size = cpu_to_be64(sn->disk_size); id_str_size = strlen(sn->id_str); name_size = strlen(sn->name); h.id_str_size = cpu_to_be16(id_str_size); h.name_size = cpu_to_be16(name_size); offset = align_offset(offset, 8); ret = bdrv_pwrite(bs->file, offset, &h, sizeof(h)); if (ret < 0) { goto fail; } offset += sizeof(h); ret = bdrv_pwrite(bs->file, offset, &extra, sizeof(extra)); if (ret < 0) { goto fail; } offset += sizeof(extra); ret = bdrv_pwrite(bs->file, offset, sn->id_str, id_str_size); if (ret < 0) { goto fail; } offset += id_str_size; ret = bdrv_pwrite(bs->file, offset, sn->name, name_size); if (ret < 0) { goto fail; } offset += name_size; } /* * Update the header to point to the new snapshot table. This requires the * new table and its refcounts to be stable on disk. */ ret = bdrv_flush(bs); if (ret < 0) { goto fail; } QEMU_BUILD_BUG_ON(offsetof(QCowHeader, snapshots_offset) != offsetof(QCowHeader, nb_snapshots) + sizeof(header_data.nb_snapshots)); header_data.nb_snapshots = cpu_to_be32(s->nb_snapshots); header_data.snapshots_offset = cpu_to_be64(snapshots_offset); ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, nb_snapshots), &header_data, sizeof(header_data)); if (ret < 0) { goto fail; } /* free the old snapshot table */ qcow2_free_clusters(bs, s->snapshots_offset, s->snapshots_size); s->snapshots_offset = snapshots_offset; s->snapshots_size = snapshots_size; return 0; fail: return ret; }"} {"target": 0, "idx": 9083, "func": "void ff_put_h264_qpel4_mc21_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { avc_luma_midv_qrt_4w_msa(src - (2 * stride) - 2, stride, dst, stride, 4, 0); }"} {"target": 1, "idx": 9089, "func": "static void mirror_start_job(const char *job_id, BlockDriverState *bs, BlockDriverState *target, const char *replaces, int64_t speed, uint32_t granularity, int64_t buf_size, BlockMirrorBackingMode backing_mode, BlockdevOnError on_source_error, BlockdevOnError on_target_error, bool unmap, BlockCompletionFunc *cb, void *opaque, Error **errp, const BlockJobDriver *driver, bool is_none_mode, BlockDriverState *base) { MirrorBlockJob *s; if (granularity == 0) { granularity = bdrv_get_default_bitmap_granularity(target); } assert ((granularity & (granularity - 1)) == 0); if (buf_size < 0) { error_setg(errp, \"Invalid parameter 'buf-size'\"); return; } if (buf_size == 0) { buf_size = DEFAULT_MIRROR_BUF_SIZE; } s = block_job_create(job_id, driver, bs, speed, cb, opaque, errp); if (!s) { return; } s->target = blk_new(); blk_insert_bs(s->target, target); s->replaces = g_strdup(replaces); s->on_source_error = on_source_error; s->on_target_error = on_target_error; s->is_none_mode = is_none_mode; s->backing_mode = backing_mode; s->base = base; s->granularity = granularity; s->buf_size = ROUND_UP(buf_size, granularity); s->unmap = unmap; s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); if (!s->dirty_bitmap) { g_free(s->replaces); blk_unref(s->target); block_job_unref(&s->common); return; } bdrv_op_block_all(target, s->common.blocker); s->common.co = qemu_coroutine_create(mirror_run); trace_mirror_start(bs, s, s->common.co, opaque); qemu_coroutine_enter(s->common.co, s); }"} {"target": 1, "idx": 9110, "func": "static void vmgenid_realize(DeviceState *dev, Error **errp) { VmGenIdState *vms = VMGENID(dev); if (!vms->write_pointer_available) { error_setg(errp, \"%s requires DMA write support in fw_cfg, \" \"which this machine type does not provide\", VMGENID_DEVICE); return; } /* Given that this function is executing, there is at least one VMGENID * device. Check if there are several. */ if (!find_vmgenid_dev()) { error_setg(errp, \"at most one %s device is permitted\", VMGENID_DEVICE); return; } qemu_register_reset(vmgenid_handle_reset, vms); }"} {"target": 0, "idx": 9130, "func": "static int vp8_lossy_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, uint8_t *data_start, unsigned int data_size) { WebPContext *s = avctx->priv_data; AVPacket pkt; int ret; if (!s->initialized) { ff_vp8_decode_init(avctx); s->initialized = 1; avctx->get_format = webp_get_format; } s->lossless = 0; if (data_size > INT_MAX) { av_log(avctx, AV_LOG_ERROR, \"unsupported chunk size\\n\"); return AVERROR_PATCHWELCOME; } av_init_packet(&pkt); pkt.data = data_start; pkt.size = data_size; ret = ff_vp8_decode_frame(avctx, p, got_frame, &pkt); if (s->has_alpha) { ret = vp8_lossy_decode_alpha(avctx, p, s->alpha_data, s->alpha_data_size); if (ret < 0) return ret; } return ret; }"} {"target": 1, "idx": 9142, "func": "static void gen_window_check1(DisasContext *dc, unsigned r1) { if (dc->tb->flags & XTENSA_TBFLAG_EXCM) { return; } if (option_enabled(dc, XTENSA_OPTION_WINDOWED_REGISTER) && r1 / 4 > dc->used_window) { int label = gen_new_label(); TCGv_i32 ws = tcg_temp_new_i32(); dc->used_window = r1 / 4; tcg_gen_deposit_i32(ws, cpu_SR[WINDOW_START], cpu_SR[WINDOW_START], dc->config->nareg / 4, dc->config->nareg / 4); tcg_gen_shr_i32(ws, ws, cpu_SR[WINDOW_BASE]); tcg_gen_andi_i32(ws, ws, (2 << (r1 / 4)) - 2); tcg_gen_brcondi_i32(TCG_COND_EQ, ws, 0, label); { TCGv_i32 pc = tcg_const_i32(dc->pc); TCGv_i32 w = tcg_const_i32(r1 / 4); gen_advance_ccount_cond(dc); gen_helper_window_check(cpu_env, pc, w); tcg_temp_free(w); tcg_temp_free(pc); } gen_set_label(label); tcg_temp_free(ws); } }"} {"target": 1, "idx": 9146, "func": "static void set_watchdog_timer(IPMIBmcSim *ibs, uint8_t *cmd, unsigned int cmd_len, uint8_t *rsp, unsigned int *rsp_len, unsigned int max_rsp_len) { IPMIInterface *s = ibs->parent.intf; IPMIInterfaceClass *k = IPMI_INTERFACE_GET_CLASS(s); unsigned int val; IPMI_CHECK_CMD_LEN(8); val = cmd[2] & 0x7; /* Validate use */ if (val == 0 || val > 5) { rsp[2] = IPMI_CC_INVALID_DATA_FIELD; return; } val = cmd[3] & 0x7; /* Validate action */ switch (val) { case IPMI_BMC_WATCHDOG_ACTION_NONE: break; case IPMI_BMC_WATCHDOG_ACTION_RESET: rsp[2] = k->do_hw_op(s, IPMI_RESET_CHASSIS, 1); break; case IPMI_BMC_WATCHDOG_ACTION_POWER_DOWN: rsp[2] = k->do_hw_op(s, IPMI_POWEROFF_CHASSIS, 1); break; case IPMI_BMC_WATCHDOG_ACTION_POWER_CYCLE: rsp[2] = k->do_hw_op(s, IPMI_POWERCYCLE_CHASSIS, 1); break; default: rsp[2] = IPMI_CC_INVALID_DATA_FIELD; } if (rsp[2]) { rsp[2] = IPMI_CC_INVALID_DATA_FIELD; return; } val = (cmd[3] >> 4) & 0x7; /* Validate preaction */ switch (val) { case IPMI_BMC_WATCHDOG_PRE_MSG_INT: case IPMI_BMC_WATCHDOG_PRE_NONE: break; case IPMI_BMC_WATCHDOG_PRE_NMI: if (!k->do_hw_op(s, IPMI_SEND_NMI, 1)) { /* NMI not supported. */ rsp[2] = IPMI_CC_INVALID_DATA_FIELD; return; } break; default: /* We don't support PRE_SMI */ rsp[2] = IPMI_CC_INVALID_DATA_FIELD; return; } ibs->watchdog_initialized = 1; ibs->watchdog_use = cmd[2] & IPMI_BMC_WATCHDOG_USE_MASK; ibs->watchdog_action = cmd[3] & IPMI_BMC_WATCHDOG_ACTION_MASK; ibs->watchdog_pretimeout = cmd[4]; ibs->watchdog_expired &= ~cmd[5]; ibs->watchdog_timeout = cmd[6] | (((uint16_t) cmd[7]) << 8); if (ibs->watchdog_running & IPMI_BMC_WATCHDOG_GET_DONT_STOP(ibs)) { do_watchdog_reset(ibs); } else { ibs->watchdog_running = 0; } }"} {"target": 1, "idx": 9192, "func": "static void sd_1d97_float(float *p, int i0, int i1) { int i; if (i1 <= i0 + 1) { if (i0 == 1) p[1] *= F_LFTG_X; else p[0] *= F_LFTG_K; return; } extend97_float(p, i0, i1); i0++; i1++; for (i = i0/2 - 2; i < i1/2 + 1; i++) p[2*i+1] -= 1.586134 * (p[2*i] + p[2*i+2]); for (i = i0/2 - 1; i < i1/2 + 1; i++) p[2*i] -= 0.052980 * (p[2*i-1] + p[2*i+1]); for (i = i0/2 - 1; i < i1/2; i++) p[2*i+1] += 0.882911 * (p[2*i] + p[2*i+2]); for (i = i0/2; i < i1/2; i++) p[2*i] += 0.443506 * (p[2*i-1] + p[2*i+1]); }"} {"target": 1, "idx": 9198, "func": "int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx) { int i, ret; Picture *pic; s->mb_skipped = 0; if (!ff_thread_can_start_frame(avctx)) { av_log(avctx, AV_LOG_ERROR, \"Attempt to start a frame outside SETUP state\\n\"); return -1; } /* mark & release old frames */ if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->f->buf[0]) { ff_mpeg_unref_picture(s, s->last_picture_ptr); } /* release forgotten pictures */ /* if (mpeg124/h263) */ for (i = 0; i < MAX_PICTURE_COUNT; i++) { if (&s->picture[i] != s->last_picture_ptr && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference && !s->picture[i].needs_realloc) { if (!(avctx->active_thread_type & FF_THREAD_FRAME)) av_log(avctx, AV_LOG_ERROR, \"releasing zombie picture\\n\"); ff_mpeg_unref_picture(s, &s->picture[i]); } } ff_mpeg_unref_picture(s, &s->current_picture); release_unused_pictures(s); if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) { // we already have a unused image // (maybe it was set before reading the header) pic = s->current_picture_ptr; } else { i = ff_find_unused_picture(s, 0); if (i < 0) { av_log(s->avctx, AV_LOG_ERROR, \"no frame buffer available\\n\"); return i; } pic = &s->picture[i]; } pic->reference = 0; if (!s->droppable) { if (s->pict_type != AV_PICTURE_TYPE_B) pic->reference = 3; } pic->f->coded_picture_number = s->coded_picture_number++; if (ff_alloc_picture(s, pic, 0) < 0) return -1; s->current_picture_ptr = pic; // FIXME use only the vars from current_pic s->current_picture_ptr->f->top_field_first = s->top_field_first; if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) { if (s->picture_structure != PICT_FRAME) s->current_picture_ptr->f->top_field_first = (s->picture_structure == PICT_TOP_FIELD) == s->first_field; } s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame && !s->progressive_sequence; s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME; s->current_picture_ptr->f->pict_type = s->pict_type; // if (s->flags && CODEC_FLAG_QSCALE) // s->current_picture_ptr->quality = s->new_picture_ptr->quality; s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; if ((ret = ff_mpeg_ref_picture(s, &s->current_picture, s->current_picture_ptr)) < 0) return ret; if (s->pict_type != AV_PICTURE_TYPE_B) { s->last_picture_ptr = s->next_picture_ptr; if (!s->droppable) s->next_picture_ptr = s->current_picture_ptr; } ff_dlog(s->avctx, \"L%p N%p C%p L%p N%p C%p type:%d drop:%d\\n\", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr, s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL, s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL, s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL, s->pict_type, s->droppable); if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) && (s->pict_type != AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)) { int h_chroma_shift, v_chroma_shift; av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift); if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0]) av_log(avctx, AV_LOG_DEBUG, \"allocating dummy last picture for B frame\\n\"); else if (s->pict_type != AV_PICTURE_TYPE_I) av_log(avctx, AV_LOG_ERROR, \"warning: first frame is no keyframe\\n\"); else if (s->picture_structure != PICT_FRAME) av_log(avctx, AV_LOG_DEBUG, \"allocate dummy last picture for field based first keyframe\\n\"); /* Allocate a dummy frame */ i = ff_find_unused_picture(s, 0); if (i < 0) { av_log(s->avctx, AV_LOG_ERROR, \"no frame buffer available\\n\"); return i; } s->last_picture_ptr = &s->picture[i]; s->last_picture_ptr->reference = 3; s->last_picture_ptr->f->key_frame = 0; s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P; if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) { s->last_picture_ptr = NULL; return -1; } if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) { for(i=0; iheight; i++) memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 0x80, avctx->width); for(i=0; iheight, v_chroma_shift); i++) { memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i, 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift)); memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i, 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift)); } if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){ for(i=0; iheight; i++) memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width); } } ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0); ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1); } if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) && s->pict_type == AV_PICTURE_TYPE_B) { /* Allocate a dummy frame */ i = ff_find_unused_picture(s, 0); if (i < 0) { av_log(s->avctx, AV_LOG_ERROR, \"no frame buffer available\\n\"); return i; } s->next_picture_ptr = &s->picture[i]; s->next_picture_ptr->reference = 3; s->next_picture_ptr->f->key_frame = 0; s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P; if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) { s->next_picture_ptr = NULL; return -1; } ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0); ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1); } #if 0 // BUFREF-FIXME memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data)); memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data)); #endif if (s->last_picture_ptr) { ff_mpeg_unref_picture(s, &s->last_picture); if (s->last_picture_ptr->f->buf[0] && (ret = ff_mpeg_ref_picture(s, &s->last_picture, s->last_picture_ptr)) < 0) return ret; } if (s->next_picture_ptr) { ff_mpeg_unref_picture(s, &s->next_picture); if (s->next_picture_ptr->f->buf[0] && (ret = ff_mpeg_ref_picture(s, &s->next_picture, s->next_picture_ptr)) < 0) return ret; } av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->f->buf[0])); if (s->picture_structure!= PICT_FRAME) { int i; for (i = 0; i < 4; i++) { if (s->picture_structure == PICT_BOTTOM_FIELD) { s->current_picture.f->data[i] += s->current_picture.f->linesize[i]; } s->current_picture.f->linesize[i] *= 2; s->last_picture.f->linesize[i] *= 2; s->next_picture.f->linesize[i] *= 2; } } s->err_recognition = avctx->err_recognition; /* set dequantizer, we can't do it during init as * it might change for mpeg4 and we can't do it in the header * decode as init is not called for mpeg4 there yet */ if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) { s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra; s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter; } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) { s->dct_unquantize_intra = s->dct_unquantize_h263_intra; s->dct_unquantize_inter = s->dct_unquantize_h263_inter; } else { s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra; s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter; } if (s->avctx->debug & FF_DEBUG_NOMC) { gray_frame(s->current_picture_ptr->f); } return 0; }"} {"target": 0, "idx": 9207, "func": "int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes) { NBDClientSession *client = nbd_get_client_session(bs); NBDRequest request = { .type = NBD_CMD_TRIM, .from = offset, .len = bytes, }; assert(!(client->info.flags & NBD_FLAG_READ_ONLY)); if (!(client->info.flags & NBD_FLAG_SEND_TRIM)) { return 0; } return nbd_co_request(bs, &request, NULL); }"} {"target": 0, "idx": 9211, "func": "static QEMUCursor *qxl_cursor(PCIQXLDevice *qxl, QXLCursor *cursor, uint32_t group_id) { QEMUCursor *c; size_t size; c = cursor_alloc(cursor->header.width, cursor->header.height); c->hot_x = cursor->header.hot_spot_x; c->hot_y = cursor->header.hot_spot_y; switch (cursor->header.type) { case SPICE_CURSOR_TYPE_ALPHA: size = sizeof(uint32_t) * cursor->header.width * cursor->header.height; qxl_unpack_chunks(c->data, size, qxl, &cursor->chunk, group_id); if (qxl->debug > 2) { cursor_print_ascii_art(c, \"qxl/alpha\"); } break; default: fprintf(stderr, \"%s: not implemented: type %d\\n\", __FUNCTION__, cursor->header.type); goto fail; } return c; fail: cursor_put(c); return NULL; }"} {"target": 0, "idx": 9218, "func": "QFloat *qfloat_from_double(double value) { QFloat *qf; qf = g_malloc(sizeof(*qf)); qf->value = value; QOBJECT_INIT(qf, &qfloat_type); return qf; }"} {"target": 0, "idx": 9220, "func": "void *qemu_blockalign(BlockDriverState *bs, size_t size) { return qemu_memalign(bdrv_opt_mem_align(bs), size); }"} {"target": 0, "idx": 9226, "func": "static void spapr_drc_release(sPAPRDRConnector *drc) { sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); drck->release(drc->dev); drc->awaiting_release = false; g_free(drc->fdt); drc->fdt = NULL; drc->fdt_start_offset = 0; object_property_del(OBJECT(drc), \"device\", &error_abort); drc->dev = NULL; }"} {"target": 1, "idx": 9227, "func": "static void jpeg_put_comments(MpegEncContext *s) { PutBitContext *p = &s->pb; int size; uint8_t *ptr; if (s->aspect_ratio_info /* && !lossless */) { /* JFIF header */ put_marker(p, APP0); put_bits(p, 16, 16); put_string(p, \"JFIF\"); /* this puts the trailing zero-byte too */ put_bits(p, 16, 0x0201); /* v 1.02 */ put_bits(p, 8, 0); /* units type: 0 - aspect ratio */ put_bits(p, 16, s->avctx->sample_aspect_ratio.num); put_bits(p, 16, s->avctx->sample_aspect_ratio.den); put_bits(p, 8, 0); /* thumbnail width */ put_bits(p, 8, 0); /* thumbnail height */ } /* comment */ if(!(s->flags & CODEC_FLAG_BITEXACT)){ put_marker(p, COM); flush_put_bits(p); ptr = pbBufPtr(p); put_bits(p, 16, 0); /* patched later */ put_string(p, LIBAVCODEC_IDENT); size = strlen(LIBAVCODEC_IDENT)+3; ptr[0] = size >> 8; ptr[1] = size; } }"} {"target": 1, "idx": 9229, "func": "static av_always_inline int vorbis_residue_decode_internal(vorbis_context *vc, vorbis_residue *vr, unsigned ch, uint8_t *do_not_decode, float *vec, unsigned vlen, unsigned ch_left, int vr_type) { GetBitContext *gb = &vc->gb; unsigned c_p_c = vc->codebooks[vr->classbook].dimensions; uint8_t *classifs = vr->classifs; unsigned pass, ch_used, i, j, k, l; unsigned max_output = (ch - 1) * vlen; int ptns_to_read = vr->ptns_to_read; int libvorbis_bug = 0; if (vr_type == 2) { for (j = 1; j < ch; ++j) do_not_decode[0] &= do_not_decode[j]; // FIXME - clobbering input if (do_not_decode[0]) return 0; ch_used = 1; max_output += vr->end / ch; } else { ch_used = ch; max_output += vr->end; } if (max_output > ch_left * vlen) { if (max_output <= ch_left * vlen + vr->partition_size*ch_used/ch) { ptns_to_read--; libvorbis_bug = 1; } else { av_log(vc->avctx, AV_LOG_ERROR, \"Insufficient output buffer\\n\"); return AVERROR_INVALIDDATA; } } av_dlog(NULL, \" residue type 0/1/2 decode begin, ch: %d cpc %d \\n\", ch, c_p_c); for (pass = 0; pass <= vr->maxpass; ++pass) { // FIXME OPTIMIZE? int voffset, partition_count, j_times_ptns_to_read; voffset = vr->begin; for (partition_count = 0; partition_count < ptns_to_read;) { // SPEC error if (!pass) { int ret; if ((ret = setup_classifs(vc, vr, do_not_decode, ch_used, partition_count)) < 0) return ret; } for (i = 0; (i < c_p_c) && (partition_count < ptns_to_read); ++i) { for (j_times_ptns_to_read = 0, j = 0; j < ch_used; ++j) { unsigned voffs; if (!do_not_decode[j]) { unsigned vqclass = classifs[j_times_ptns_to_read + partition_count]; int vqbook = vr->books[vqclass][pass]; if (vqbook >= 0 && vc->codebooks[vqbook].codevectors) { unsigned coffs; unsigned dim = vc->codebooks[vqbook].dimensions; unsigned step = FASTDIV(vr->partition_size << 1, dim << 1); vorbis_codebook codebook = vc->codebooks[vqbook]; if (vr_type == 0) { voffs = voffset+j*vlen; for (k = 0; k < step; ++k) { coffs = get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * dim; for (l = 0; l < dim; ++l) vec[voffs + k + l * step] += codebook.codevectors[coffs + l]; } } else if (vr_type == 1) { voffs = voffset + j * vlen; for (k = 0; k < step; ++k) { coffs = get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * dim; for (l = 0; l < dim; ++l, ++voffs) { vec[voffs]+=codebook.codevectors[coffs+l]; av_dlog(NULL, \" pass %d offs: %d curr: %f change: %f cv offs.: %d \\n\", pass, voffs, vec[voffs], codebook.codevectors[coffs+l], coffs); } } } else if (vr_type == 2 && ch == 2 && (voffset & 1) == 0 && (dim & 1) == 0) { // most frequent case optimized voffs = voffset >> 1; if (dim == 2) { for (k = 0; k < step; ++k) { coffs = get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * 2; vec[voffs + k ] += codebook.codevectors[coffs ]; vec[voffs + k + vlen] += codebook.codevectors[coffs + 1]; } } else if (dim == 4) { for (k = 0; k < step; ++k, voffs += 2) { coffs = get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * 4; vec[voffs ] += codebook.codevectors[coffs ]; vec[voffs + 1 ] += codebook.codevectors[coffs + 2]; vec[voffs + vlen ] += codebook.codevectors[coffs + 1]; vec[voffs + vlen + 1] += codebook.codevectors[coffs + 3]; } } else for (k = 0; k < step; ++k) { coffs = get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * dim; for (l = 0; l < dim; l += 2, voffs++) { vec[voffs ] += codebook.codevectors[coffs + l ]; vec[voffs + vlen] += codebook.codevectors[coffs + l + 1]; av_dlog(NULL, \" pass %d offs: %d curr: %f change: %f cv offs.: %d+%d \\n\", pass, voffset / ch + (voffs % ch) * vlen, vec[voffset / ch + (voffs % ch) * vlen], codebook.codevectors[coffs + l], coffs, l); } } } else if (vr_type == 2) { unsigned voffs_div = FASTDIV(voffset << 1, ch <<1); unsigned voffs_mod = voffset - voffs_div * ch; for (k = 0; k < step; ++k) { coffs = get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * dim; for (l = 0; l < dim; ++l) { vec[voffs_div + voffs_mod * vlen] += codebook.codevectors[coffs + l]; av_dlog(NULL, \" pass %d offs: %d curr: %f change: %f cv offs.: %d+%d \\n\", pass, voffs_div + voffs_mod * vlen, vec[voffs_div + voffs_mod * vlen], codebook.codevectors[coffs + l], coffs, l); if (++voffs_mod == ch) { voffs_div++; voffs_mod = 0; } } } } } } j_times_ptns_to_read += ptns_to_read; } ++partition_count; voffset += vr->partition_size; } } if (libvorbis_bug && !pass) { for (j = 0; j < ch_used; ++j) { if (!do_not_decode[j]) { get_vlc2(&vc->gb, vc->codebooks[vr->classbook].vlc.table, vc->codebooks[vr->classbook].nb_bits, 3); } } } } return 0; }"} {"target": 0, "idx": 9241, "func": "static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap) { ASFContext *asf = s->priv_data; GUID g; ByteIOContext *pb = &s->pb; AVStream *st; ASFStream *asf_st; int size, i, bps; INT64 gsize; get_guid(pb, &g); if (memcmp(&g, &asf_header, sizeof(GUID))) goto fail; get_le64(pb); get_le32(pb); get_byte(pb); get_byte(pb); memset(&asf->asfid2avid, -1, sizeof(asf->asfid2avid)); for(;;) { get_guid(pb, &g); gsize = get_le64(pb); #ifdef DEBUG printf(\"%08Lx: \", url_ftell(pb) - 24); print_guid(&g); printf(\" size=0x%Lx\\n\", gsize); #endif if (gsize < 24) goto fail; if (!memcmp(&g, &file_header, sizeof(GUID))) { get_guid(pb, &asf->hdr.guid); asf->hdr.file_size = get_le64(pb); asf->hdr.create_time = get_le64(pb); asf->hdr.packets_count = get_le64(pb); asf->hdr.play_time = get_le64(pb); asf->hdr.send_time = get_le64(pb); asf->hdr.preroll = get_le32(pb); asf->hdr.ignore = get_le32(pb); asf->hdr.flags = get_le32(pb); asf->hdr.min_pktsize = get_le32(pb); asf->hdr.max_pktsize = get_le32(pb); asf->hdr.max_bitrate = get_le32(pb); asf->packet_size = asf->hdr.max_pktsize; asf->nb_packets = asf->hdr.packets_count; } else if (!memcmp(&g, &stream_header, sizeof(GUID))) { int type, id, total_size; unsigned int tag1; INT64 pos1, pos2; pos1 = url_ftell(pb); st = av_mallocz(sizeof(AVStream)); if (!st) goto fail; s->streams[s->nb_streams] = st; asf_st = av_mallocz(sizeof(ASFStream)); if (!asf_st) goto fail; st->priv_data = asf_st; st->time_length = (asf->hdr.send_time - asf->hdr.preroll) / 10000; get_guid(pb, &g); if (!memcmp(&g, &audio_stream, sizeof(GUID))) { type = CODEC_TYPE_AUDIO; } else if (!memcmp(&g, &video_stream, sizeof(GUID))) { type = CODEC_TYPE_VIDEO; } else { goto fail; } get_guid(pb, &g); total_size = get_le64(pb); get_le32(pb); get_le32(pb); st->id = get_le16(pb) & 0x7f; /* stream id */ // mapping of asf ID to AV stream ID; asf->asfid2avid[st->id] = s->nb_streams++; get_le32(pb); st->codec.codec_type = type; st->codec.frame_rate = 1000; // in packet ticks if (type == CODEC_TYPE_AUDIO) { id = get_le16(pb); st->codec.codec_tag = id; st->codec.channels = get_le16(pb); st->codec.sample_rate = get_le32(pb); st->codec.bit_rate = get_le32(pb) * 8; st->codec.block_align = get_le16(pb); /* block align */ bps = get_le16(pb); /* bits per sample */ st->codec.codec_id = wav_codec_get_id(id, bps); size = get_le16(pb); if (size > 0) { st->extra_data = av_mallocz(size); get_buffer(pb, st->extra_data, size); st->extra_data_size = size; } /* We have to init the frame size at some point .... */ pos2 = url_ftell(pb); if (gsize > (pos2 + 8 - pos1 + 24)) { asf_st->ds_span = get_byte(pb); asf_st->ds_packet_size = get_le16(pb); asf_st->ds_chunk_size = get_le16(pb); asf_st->ds_data_size = get_le16(pb); asf_st->ds_silence_data = get_byte(pb); } //printf(\"Descrambling: ps:%d cs:%d ds:%d s:%d sd:%d\\n\", // asf_st->ds_packet_size, asf_st->ds_chunk_size, // asf_st->ds_data_size, asf_st->ds_span, asf_st->ds_silence_data); if (asf_st->ds_span > 1) { if (!asf_st->ds_chunk_size || (asf_st->ds_packet_size/asf_st->ds_chunk_size <= 1)) asf_st->ds_span = 0; // disable descrambling } switch (st->codec.codec_id) { case CODEC_ID_MP3LAME: st->codec.frame_size = MPA_FRAME_SIZE; break; case CODEC_ID_PCM_S16LE: case CODEC_ID_PCM_S16BE: case CODEC_ID_PCM_U16LE: case CODEC_ID_PCM_U16BE: case CODEC_ID_PCM_S8: case CODEC_ID_PCM_U8: case CODEC_ID_PCM_ALAW: case CODEC_ID_PCM_MULAW: st->codec.frame_size = 1; break; default: /* This is probably wrong, but it prevents a crash later */ st->codec.frame_size = 1; break; } } else { get_le32(pb); get_le32(pb); get_byte(pb); size = get_le16(pb); /* size */ get_le32(pb); /* size */ st->codec.width = get_le32(pb); st->codec.height = get_le32(pb); /* not available for asf */ get_le16(pb); /* panes */ get_le16(pb); /* depth */ tag1 = get_le32(pb); url_fskip(pb, 20); if (size > 40) { st->extra_data_size = size - 40; st->extra_data = av_mallocz(st->extra_data_size); get_buffer(pb, st->extra_data, st->extra_data_size); } st->codec.codec_tag = tag1; st->codec.codec_id = codec_get_id(codec_asf_bmp_tags, tag1); } pos2 = url_ftell(pb); url_fskip(pb, gsize - (pos2 - pos1 + 24)); } else if (!memcmp(&g, &data_header, sizeof(GUID))) { break; } else if (!memcmp(&g, &comment_header, sizeof(GUID))) { int len1, len2, len3, len4, len5; len1 = get_le16(pb); len2 = get_le16(pb); len3 = get_le16(pb); len4 = get_le16(pb); len5 = get_le16(pb); get_str16_nolen(pb, len1, s->title, sizeof(s->title)); get_str16_nolen(pb, len2, s->author, sizeof(s->author)); get_str16_nolen(pb, len3, s->copyright, sizeof(s->copyright)); get_str16_nolen(pb, len4, s->comment, sizeof(s->comment)); url_fskip(pb, len5); #if 0 } else if (!memcmp(&g, &head1_guid, sizeof(GUID))) { int v1, v2; get_guid(pb, &g); v1 = get_le32(pb); v2 = get_le16(pb); } else if (!memcmp(&g, &codec_comment_header, sizeof(GUID))) { int len, v1, n, num; char str[256], *q; char tag[16]; get_guid(pb, &g); print_guid(&g); n = get_le32(pb); for(i=0;i 0) { v1 = get_byte(pb); if ((q - tag) < sizeof(tag) - 1) *q++ = v1; len--; } *q = '\\0'; } #endif } else if (url_feof(pb)) { goto fail; } else { url_fseek(pb, gsize - 24, SEEK_CUR); } } get_guid(pb, &g); get_le64(pb); get_byte(pb); get_byte(pb); if (url_feof(pb)) goto fail; asf->data_offset = url_ftell(pb); asf->packet_size_left = 0; return 0; fail: for(i=0;inb_streams;i++) { AVStream *st = s->streams[i]; if (st) { av_free(st->priv_data); av_free(st->extra_data); } av_free(st); } //av_free(asf); return -1; }"} {"target": 0, "idx": 9248, "func": "static void guess_mv(ERContext *s) { uint8_t *fixed = s->er_temp_buffer; #define MV_FROZEN 3 #define MV_CHANGED 2 #define MV_UNCHANGED 1 const int mb_stride = s->mb_stride; const int mb_width = s->mb_width; int mb_height = s->mb_height; int i, depth, num_avail; int mb_x, mb_y, mot_step, mot_stride; if (s->last_pic.f && s->last_pic.f->data[0]) mb_height = FFMIN(mb_height, (s->last_pic.f->height+15)>>4); if (s->next_pic.f && s->next_pic.f->data[0]) mb_height = FFMIN(mb_height, (s->next_pic.f->height+15)>>4); set_mv_strides(s, &mot_step, &mot_stride); num_avail = 0; if (s->last_pic.motion_val[0]) ff_thread_await_progress(s->last_pic.tf, mb_height-1, 0); for (i = 0; i < mb_width * mb_height; i++) { const int mb_xy = s->mb_index2xy[i]; int f = 0; int error = s->error_status_table[mb_xy]; if (IS_INTRA(s->cur_pic.mb_type[mb_xy])) f = MV_FROZEN; // intra // FIXME check if (!(error & ER_MV_ERROR)) f = MV_FROZEN; // inter with undamaged MV fixed[mb_xy] = f; if (f == MV_FROZEN) num_avail++; else if(s->last_pic.f->data[0] && s->last_pic.motion_val[0]){ const int mb_y= mb_xy / s->mb_stride; const int mb_x= mb_xy % s->mb_stride; const int mot_index= (mb_x + mb_y*mot_stride) * mot_step; s->cur_pic.motion_val[0][mot_index][0]= s->last_pic.motion_val[0][mot_index][0]; s->cur_pic.motion_val[0][mot_index][1]= s->last_pic.motion_val[0][mot_index][1]; s->cur_pic.ref_index[0][4*mb_xy] = s->last_pic.ref_index[0][4*mb_xy]; } } if ((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) || num_avail <= mb_width / 2) { for (mb_y = 0; mb_y < mb_height; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { const int mb_xy = mb_x + mb_y * s->mb_stride; int mv_dir = (s->last_pic.f && s->last_pic.f->data[0]) ? MV_DIR_FORWARD : MV_DIR_BACKWARD; if (IS_INTRA(s->cur_pic.mb_type[mb_xy])) continue; if (!(s->error_status_table[mb_xy] & ER_MV_ERROR)) continue; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; s->decode_mb(s->opaque, 0, mv_dir, MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0); } } return; } for (depth = 0; ; depth++) { int changed, pass, none_left; none_left = 1; changed = 1; for (pass = 0; (changed || pass < 2) && pass < 10; pass++) { int mb_x, mb_y; int score_sum = 0; changed = 0; for (mb_y = 0; mb_y < mb_height; mb_y++) { for (mb_x = (mb_y ^ pass) & 1; mb_x < s->mb_width; mb_x+=2) { const int mb_xy = mb_x + mb_y * s->mb_stride; int mv_predictor[8][2] = { { 0 } }; int ref[8] = { 0 }; int pred_count = 0; int j; int best_score = 256 * 256 * 256 * 64; int best_pred = 0; const int mot_index = (mb_x + mb_y * mot_stride) * mot_step; int prev_x = 0, prev_y = 0, prev_ref = 0; if (fixed[mb_xy] == MV_FROZEN) continue; av_assert1(!IS_INTRA(s->cur_pic.mb_type[mb_xy])); av_assert1(s->last_pic.f && s->last_pic.f->data[0]); j = 0; if (mb_x > 0 && fixed[mb_xy - 1] == MV_FROZEN) j = 1; if (mb_x + 1 < mb_width && fixed[mb_xy + 1] == MV_FROZEN) j = 1; if (mb_y > 0 && fixed[mb_xy - mb_stride] == MV_FROZEN) j = 1; if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride] == MV_FROZEN) j = 1; if (j == 0) continue; j = 0; if (mb_x > 0 && fixed[mb_xy - 1 ] == MV_CHANGED) j = 1; if (mb_x + 1 < mb_width && fixed[mb_xy + 1 ] == MV_CHANGED) j = 1; if (mb_y > 0 && fixed[mb_xy - mb_stride] == MV_CHANGED) j = 1; if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride] == MV_CHANGED) j = 1; if (j == 0 && pass > 1) continue; none_left = 0; if (mb_x > 0 && fixed[mb_xy - 1]) { mv_predictor[pred_count][0] = s->cur_pic.motion_val[0][mot_index - mot_step][0]; mv_predictor[pred_count][1] = s->cur_pic.motion_val[0][mot_index - mot_step][1]; ref[pred_count] = s->cur_pic.ref_index[0][4 * (mb_xy - 1)]; pred_count++; } if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) { mv_predictor[pred_count][0] = s->cur_pic.motion_val[0][mot_index + mot_step][0]; mv_predictor[pred_count][1] = s->cur_pic.motion_val[0][mot_index + mot_step][1]; ref[pred_count] = s->cur_pic.ref_index[0][4 * (mb_xy + 1)]; pred_count++; } if (mb_y > 0 && fixed[mb_xy - mb_stride]) { mv_predictor[pred_count][0] = s->cur_pic.motion_val[0][mot_index - mot_stride * mot_step][0]; mv_predictor[pred_count][1] = s->cur_pic.motion_val[0][mot_index - mot_stride * mot_step][1]; ref[pred_count] = s->cur_pic.ref_index[0][4 * (mb_xy - s->mb_stride)]; pred_count++; } if (mb_y + 1cur_pic.motion_val[0][mot_index + mot_stride * mot_step][0]; mv_predictor[pred_count][1] = s->cur_pic.motion_val[0][mot_index + mot_stride * mot_step][1]; ref[pred_count] = s->cur_pic.ref_index[0][4 * (mb_xy + s->mb_stride)]; pred_count++; } if (pred_count == 0) continue; if (pred_count > 1) { int sum_x = 0, sum_y = 0, sum_r = 0; int max_x, max_y, min_x, min_y, max_r, min_r; for (j = 0; j < pred_count; j++) { sum_x += mv_predictor[j][0]; sum_y += mv_predictor[j][1]; sum_r += ref[j]; if (j && ref[j] != ref[j - 1]) goto skip_mean_and_median; } /* mean */ mv_predictor[pred_count][0] = sum_x / j; mv_predictor[pred_count][1] = sum_y / j; ref[pred_count] = sum_r / j; /* median */ if (pred_count >= 3) { min_y = min_x = min_r = 99999; max_y = max_x = max_r = -99999; } else { min_x = min_y = max_x = max_y = min_r = max_r = 0; } for (j = 0; j < pred_count; j++) { max_x = FFMAX(max_x, mv_predictor[j][0]); max_y = FFMAX(max_y, mv_predictor[j][1]); max_r = FFMAX(max_r, ref[j]); min_x = FFMIN(min_x, mv_predictor[j][0]); min_y = FFMIN(min_y, mv_predictor[j][1]); min_r = FFMIN(min_r, ref[j]); } mv_predictor[pred_count + 1][0] = sum_x - max_x - min_x; mv_predictor[pred_count + 1][1] = sum_y - max_y - min_y; ref[pred_count + 1] = sum_r - max_r - min_r; if (pred_count == 4) { mv_predictor[pred_count + 1][0] /= 2; mv_predictor[pred_count + 1][1] /= 2; ref[pred_count + 1] /= 2; } pred_count += 2; } skip_mean_and_median: /* zero MV */ pred_count++; prev_x = s->cur_pic.motion_val[0][mot_index][0]; prev_y = s->cur_pic.motion_val[0][mot_index][1]; prev_ref = s->cur_pic.ref_index[0][4 * mb_xy]; /* last MV */ mv_predictor[pred_count][0] = prev_x; mv_predictor[pred_count][1] = prev_y; ref[pred_count] = prev_ref; pred_count++; for (j = 0; j < pred_count; j++) { int *linesize = s->cur_pic.f->linesize; int score = 0; uint8_t *src = s->cur_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0]; s->cur_pic.motion_val[0][mot_index][0] = s->mv[0][0][0] = mv_predictor[j][0]; s->cur_pic.motion_val[0][mot_index][1] = s->mv[0][0][1] = mv_predictor[j][1]; // predictor intra or otherwise not available if (ref[j] < 0) continue; s->decode_mb(s->opaque, ref[j], MV_DIR_FORWARD, MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0); if (mb_x > 0 && fixed[mb_xy - 1]) { int k; for (k = 0; k < 16; k++) score += FFABS(src[k * linesize[0] - 1] - src[k * linesize[0]]); } if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) { int k; for (k = 0; k < 16; k++) score += FFABS(src[k * linesize[0] + 15] - src[k * linesize[0] + 16]); } if (mb_y > 0 && fixed[mb_xy - mb_stride]) { int k; for (k = 0; k < 16; k++) score += FFABS(src[k - linesize[0]] - src[k]); } if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride]) { int k; for (k = 0; k < 16; k++) score += FFABS(src[k + linesize[0] * 15] - src[k + linesize[0] * 16]); } if (score <= best_score) { // <= will favor the last MV best_score = score; best_pred = j; } } score_sum += best_score; s->mv[0][0][0] = mv_predictor[best_pred][0]; s->mv[0][0][1] = mv_predictor[best_pred][1]; for (i = 0; i < mot_step; i++) for (j = 0; j < mot_step; j++) { s->cur_pic.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0]; s->cur_pic.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1]; } s->decode_mb(s->opaque, ref[best_pred], MV_DIR_FORWARD, MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0); if (s->mv[0][0][0] != prev_x || s->mv[0][0][1] != prev_y) { fixed[mb_xy] = MV_CHANGED; changed++; } else fixed[mb_xy] = MV_UNCHANGED; } } } if (none_left) return; for (i = 0; i < mb_width * mb_height; i++) { int mb_xy = s->mb_index2xy[i]; if (fixed[mb_xy]) fixed[mb_xy] = MV_FROZEN; } } }"} {"target": 0, "idx": 9249, "func": "static inline void gen_neon_narrow(int size, TCGv dest, TCGv src) { switch (size) { case 0: gen_helper_neon_narrow_u8(dest, src); break; case 1: gen_helper_neon_narrow_u16(dest, src); break; case 2: tcg_gen_trunc_i64_i32(dest, src); break; default: abort(); } }"} {"target": 0, "idx": 9250, "func": "int spapr_populate_pci_dt(sPAPRPHBState *phb, uint32_t xics_phandle, void *fdt) { int bus_off, i, j, ret; char nodename[FDT_NAME_MAX]; uint32_t bus_range[] = { cpu_to_be32(0), cpu_to_be32(0xff) }; const uint64_t mmiosize = memory_region_size(&phb->memwindow); const uint64_t w32max = (1ULL << 32) - SPAPR_PCI_MEM_WIN_BUS_OFFSET; const uint64_t w32size = MIN(w32max, mmiosize); const uint64_t w64size = (mmiosize > w32size) ? (mmiosize - w32size) : 0; struct { uint32_t hi; uint64_t child; uint64_t parent; uint64_t size; } QEMU_PACKED ranges[] = { { cpu_to_be32(b_ss(1)), cpu_to_be64(0), cpu_to_be64(phb->io_win_addr), cpu_to_be64(memory_region_size(&phb->iospace)), }, { cpu_to_be32(b_ss(2)), cpu_to_be64(SPAPR_PCI_MEM_WIN_BUS_OFFSET), cpu_to_be64(phb->mem_win_addr), cpu_to_be64(w32size), }, { cpu_to_be32(b_ss(3)), cpu_to_be64(1ULL << 32), cpu_to_be64(phb->mem_win_addr + w32size), cpu_to_be64(w64size) }, }; const unsigned sizeof_ranges = (w64size ? 3 : 2) * sizeof(ranges[0]); uint64_t bus_reg[] = { cpu_to_be64(phb->buid), 0 }; uint32_t interrupt_map_mask[] = { cpu_to_be32(b_ddddd(-1)|b_fff(0)), 0x0, 0x0, cpu_to_be32(-1)}; uint32_t interrupt_map[PCI_SLOT_MAX * PCI_NUM_PINS][7]; uint32_t ddw_applicable[] = { cpu_to_be32(RTAS_IBM_QUERY_PE_DMA_WINDOW), cpu_to_be32(RTAS_IBM_CREATE_PE_DMA_WINDOW), cpu_to_be32(RTAS_IBM_REMOVE_PE_DMA_WINDOW) }; uint32_t ddw_extensions[] = { cpu_to_be32(1), cpu_to_be32(RTAS_IBM_RESET_PE_DMA_WINDOW) }; uint32_t associativity[] = {cpu_to_be32(0x4), cpu_to_be32(0x0), cpu_to_be32(0x0), cpu_to_be32(0x0), cpu_to_be32(phb->numa_node)}; sPAPRTCETable *tcet; PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus; sPAPRFDT s_fdt; /* Start populating the FDT */ snprintf(nodename, FDT_NAME_MAX, \"pci@%\" PRIx64, phb->buid); bus_off = fdt_add_subnode(fdt, 0, nodename); if (bus_off < 0) { return bus_off; } /* Write PHB properties */ _FDT(fdt_setprop_string(fdt, bus_off, \"device_type\", \"pci\")); _FDT(fdt_setprop_string(fdt, bus_off, \"compatible\", \"IBM,Logical_PHB\")); _FDT(fdt_setprop_cell(fdt, bus_off, \"#address-cells\", 0x3)); _FDT(fdt_setprop_cell(fdt, bus_off, \"#size-cells\", 0x2)); _FDT(fdt_setprop_cell(fdt, bus_off, \"#interrupt-cells\", 0x1)); _FDT(fdt_setprop(fdt, bus_off, \"used-by-rtas\", NULL, 0)); _FDT(fdt_setprop(fdt, bus_off, \"bus-range\", &bus_range, sizeof(bus_range))); _FDT(fdt_setprop(fdt, bus_off, \"ranges\", &ranges, sizeof_ranges)); _FDT(fdt_setprop(fdt, bus_off, \"reg\", &bus_reg, sizeof(bus_reg))); _FDT(fdt_setprop_cell(fdt, bus_off, \"ibm,pci-config-space-type\", 0x1)); _FDT(fdt_setprop_cell(fdt, bus_off, \"ibm,pe-total-#msi\", XICS_IRQS_SPAPR)); /* Dynamic DMA window */ if (phb->ddw_enabled) { _FDT(fdt_setprop(fdt, bus_off, \"ibm,ddw-applicable\", &ddw_applicable, sizeof(ddw_applicable))); _FDT(fdt_setprop(fdt, bus_off, \"ibm,ddw-extensions\", &ddw_extensions, sizeof(ddw_extensions))); } /* Advertise NUMA via ibm,associativity */ if (nb_numa_nodes > 1) { _FDT(fdt_setprop(fdt, bus_off, \"ibm,associativity\", associativity, sizeof(associativity))); } /* Build the interrupt-map, this must matches what is done * in pci_spapr_map_irq */ _FDT(fdt_setprop(fdt, bus_off, \"interrupt-map-mask\", &interrupt_map_mask, sizeof(interrupt_map_mask))); for (i = 0; i < PCI_SLOT_MAX; i++) { for (j = 0; j < PCI_NUM_PINS; j++) { uint32_t *irqmap = interrupt_map[i*PCI_NUM_PINS + j]; int lsi_num = pci_spapr_swizzle(i, j); irqmap[0] = cpu_to_be32(b_ddddd(i)|b_fff(0)); irqmap[1] = 0; irqmap[2] = 0; irqmap[3] = cpu_to_be32(j+1); irqmap[4] = cpu_to_be32(xics_phandle); irqmap[5] = cpu_to_be32(phb->lsi_table[lsi_num].irq); irqmap[6] = cpu_to_be32(0x8); } } /* Write interrupt map */ _FDT(fdt_setprop(fdt, bus_off, \"interrupt-map\", &interrupt_map, sizeof(interrupt_map))); tcet = spapr_tce_find_by_liobn(phb->dma_liobn[0]); if (!tcet) { return -1; } spapr_dma_dt(fdt, bus_off, \"ibm,dma-window\", tcet->liobn, tcet->bus_offset, tcet->nb_table << tcet->page_shift); /* Walk the bridges and program the bus numbers*/ spapr_phb_pci_enumerate(phb); _FDT(fdt_setprop_cell(fdt, bus_off, \"qemu,phb-enumerated\", 0x1)); /* Populate tree nodes with PCI devices attached */ s_fdt.fdt = fdt; s_fdt.node_off = bus_off; s_fdt.sphb = phb; pci_for_each_device(bus, pci_bus_num(bus), spapr_populate_pci_devices_dt, &s_fdt); ret = spapr_drc_populate_dt(fdt, bus_off, OBJECT(phb), SPAPR_DR_CONNECTOR_TYPE_PCI); if (ret) { return ret; } return 0; }"} {"target": 0, "idx": 9307, "func": "void spapr_dt_events(void *fdt, uint32_t check_exception_irq) { int event_sources, epow_events; uint32_t irq_ranges[] = {cpu_to_be32(check_exception_irq), cpu_to_be32(1)}; uint32_t interrupts[] = {cpu_to_be32(check_exception_irq), 0}; _FDT(event_sources = fdt_add_subnode(fdt, 0, \"event-sources\")); _FDT(fdt_setprop(fdt, event_sources, \"interrupt-controller\", NULL, 0)); _FDT(fdt_setprop_cell(fdt, event_sources, \"#interrupt-cells\", 2)); _FDT(fdt_setprop(fdt, event_sources, \"interrupt-ranges\", irq_ranges, sizeof(irq_ranges))); _FDT(epow_events = fdt_add_subnode(fdt, event_sources, \"epow-events\")); _FDT(fdt_setprop(fdt, epow_events, \"interrupts\", interrupts, sizeof(interrupts))); }"} {"target": 0, "idx": 9327, "func": "static void ff_dlog_link(void *ctx, AVFilterLink *link, int end) { if (link->type == AVMEDIA_TYPE_VIDEO) { av_dlog(ctx, \"link[%p s:%dx%d fmt:%-16s %-16s->%-16s]%s\", link, link->w, link->h, av_pix_fmt_descriptors[link->format].name, link->src ? link->src->filter->name : \"\", link->dst ? link->dst->filter->name : \"\", end ? \"\\n\" : \"\"); } else { char buf[128]; av_get_channel_layout_string(buf, sizeof(buf), -1, link->channel_layout); av_dlog(ctx, \"link[%p r:%\"PRId64\" cl:%s fmt:%-16s %-16s->%-16s]%s\", link, link->sample_rate, buf, av_get_sample_fmt_name(link->format), link->src ? link->src->filter->name : \"\", link->dst ? link->dst->filter->name : \"\", end ? \"\\n\" : \"\"); } }"} {"target": 1, "idx": 9333, "func": "static int vmdk_write(BlockDriverState *bs, int64_t sector_num, const uint8_t *buf, int nb_sectors, bool zeroed, bool zero_dry_run) { BDRVVmdkState *s = bs->opaque; VmdkExtent *extent = NULL; int n, ret; int64_t index_in_cluster; uint64_t extent_begin_sector, extent_relative_sector_num; uint64_t cluster_offset; VmdkMetaData m_data; if (sector_num > bs->total_sectors) { error_report(\"Wrong offset: sector_num=0x%\" PRIx64 \" total_sectors=0x%\" PRIx64 \"\\n\", sector_num, bs->total_sectors); return -EIO; } while (nb_sectors > 0) { extent = find_extent(s, sector_num, extent); if (!extent) { return -EIO; } ret = get_cluster_offset( bs, extent, &m_data, sector_num << 9, !extent->compressed, &cluster_offset); if (extent->compressed) { if (ret == VMDK_OK) { /* Refuse write to allocated cluster for streamOptimized */ error_report(\"Could not write to allocated cluster\" \" for streamOptimized\"); return -EIO; } else { /* allocate */ ret = get_cluster_offset( bs, extent, &m_data, sector_num << 9, 1, &cluster_offset); } } if (ret == VMDK_ERROR) { return -EINVAL; } extent_begin_sector = extent->end_sector - extent->sectors; extent_relative_sector_num = sector_num - extent_begin_sector; index_in_cluster = extent_relative_sector_num % extent->cluster_sectors; n = extent->cluster_sectors - index_in_cluster; if (n > nb_sectors) { n = nb_sectors; } if (zeroed) { /* Do zeroed write, buf is ignored */ if (extent->has_zero_grain && index_in_cluster == 0 && n >= extent->cluster_sectors) { n = extent->cluster_sectors; if (!zero_dry_run) { m_data.offset = VMDK_GTE_ZEROED; /* update L2 tables */ if (vmdk_L2update(extent, &m_data) != VMDK_OK) { return -EIO; } } } else { return -ENOTSUP; } } else { ret = vmdk_write_extent(extent, cluster_offset, index_in_cluster * 512, buf, n, sector_num); if (ret) { return ret; } if (m_data.valid) { /* update L2 tables */ if (vmdk_L2update(extent, &m_data) != VMDK_OK) { return -EIO; } } } nb_sectors -= n; sector_num += n; buf += n * 512; /* update CID on the first write every time the virtual disk is * opened */ if (!s->cid_updated) { ret = vmdk_write_cid(bs, time(NULL)); if (ret < 0) { return ret; } s->cid_updated = true; } } return 0; }"} {"target": 1, "idx": 9334, "func": "void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params, Error **errp) { MigrationState *s = migrate_get_current(); MigrationCapabilityStatusList *cap; if (s->state == MIG_STATE_ACTIVE || s->state == MIG_STATE_SETUP) { error_set(errp, QERR_MIGRATION_ACTIVE); return; } for (cap = params; cap; cap = cap->next) { s->enabled_capabilities[cap->value->capability] = cap->value->state; } }"} {"target": 1, "idx": 9346, "func": "void qmp_qmp_capabilities(Error **errp) { cur_mon->qmp.in_command_mode = true; }"} {"target": 0, "idx": 9356, "func": "static int http_connect(URLContext *h, const char *path, const char *hoststr, const char *auth, int *new_location) { HTTPContext *s = h->priv_data; int post, err; char line[1024]; char headers[1024] = \"\"; char *authstr = NULL; int64_t off = s->off; int len = 0; /* send http header */ post = h->flags & URL_WRONLY; authstr = ff_http_auth_create_response(&s->auth_state, auth, path, post ? \"POST\" : \"GET\"); /* set default headers if needed */ if (!has_header(s->headers, \"\\r\\nUser-Agent: \")) len += av_strlcatf(headers + len, sizeof(headers) - len, \"User-Agent: %s\\r\\n\", LIBAVFORMAT_IDENT); if (!has_header(s->headers, \"\\r\\nAccept: \")) len += av_strlcpy(headers + len, \"Accept: */*\\r\\n\", sizeof(headers) - len); if (!has_header(s->headers, \"\\r\\nRange: \")) len += av_strlcatf(headers + len, sizeof(headers) - len, \"Range: bytes=%\"PRId64\"-\\r\\n\", s->off); if (!has_header(s->headers, \"\\r\\nConnection: \")) len += av_strlcpy(headers + len, \"Connection: close\\r\\n\", sizeof(headers)-len); if (!has_header(s->headers, \"\\r\\nHost: \")) len += av_strlcatf(headers + len, sizeof(headers) - len, \"Host: %s\\r\\n\", hoststr); /* now add in custom headers */ av_strlcpy(headers+len, s->headers, sizeof(headers)-len); snprintf(s->buffer, sizeof(s->buffer), \"%s %s HTTP/1.1\\r\\n\" \"%s\" \"%s\" \"%s\" \"\\r\\n\", post ? \"POST\" : \"GET\", path, post && s->is_chunked ? \"Transfer-Encoding: chunked\\r\\n\" : \"\", headers, authstr ? authstr : \"\"); av_freep(&authstr); if (http_write(h, s->buffer, strlen(s->buffer)) < 0) return AVERROR(EIO); /* init input buffer */ s->buf_ptr = s->buffer; s->buf_end = s->buffer; s->line_count = 0; s->off = 0; s->filesize = -1; if (post) { /* always use chunked encoding for upload data */ s->chunksize = 0; /* Pretend that it did work. We didn't read any header yet, since * we've still to send the POST data, but the code calling this * function will check http_code after we return. */ s->http_code = 200; return 0; } /* wait for header */ for(;;) { if (http_get_line(s, line, sizeof(line)) < 0) return AVERROR(EIO); dprintf(NULL, \"header='%s'\\n\", line); err = process_line(h, line, s->line_count, new_location); if (err < 0) return err; if (err == 0) break; s->line_count++; } return (off == s->off) ? 0 : -1; }"} {"target": 0, "idx": 9358, "func": "static int dxtory_decode_v2_rgb(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size) { GetByteContext gb; GetBitContext gb2; int nslices, slice, slice_height; uint32_t off, slice_size; uint8_t *dst; int ret; bytestream2_init(&gb, src, src_size); nslices = bytestream2_get_le16(&gb); off = FFALIGN(nslices * 4 + 2, 16); if (src_size < off) { av_log(avctx, AV_LOG_ERROR, \"no slice data\\n\"); return AVERROR_INVALIDDATA; } if (!nslices || avctx->height % nslices) { avpriv_request_sample(avctx, \"%d slices for %dx%d\", nslices, avctx->width, avctx->height); return AVERROR_PATCHWELCOME; } slice_height = avctx->height / nslices; avctx->pix_fmt = AV_PIX_FMT_BGR24; if ((ret = ff_get_buffer(avctx, pic, 0)) < 0) return ret; dst = pic->data[0]; for (slice = 0; slice < nslices; slice++) { slice_size = bytestream2_get_le32(&gb); if (slice_size > src_size - off) { av_log(avctx, AV_LOG_ERROR, \"invalid slice size %\"PRIu32\" (only %\"PRIu32\" bytes left)\\n\", slice_size, src_size - off); return AVERROR_INVALIDDATA; } if (slice_size <= 16) { av_log(avctx, AV_LOG_ERROR, \"invalid slice size %\"PRIu32\"\\n\", slice_size); return AVERROR_INVALIDDATA; } if (AV_RL32(src + off) != slice_size - 16) { av_log(avctx, AV_LOG_ERROR, \"Slice sizes mismatch: got %\"PRIu32\" instead of %\"PRIu32\"\\n\", AV_RL32(src + off), slice_size - 16); } init_get_bits(&gb2, src + off + 16, (slice_size - 16) * 8); dx2_decode_slice_rgb(&gb2, avctx->width, slice_height, dst, pic->linesize[0]); dst += pic->linesize[0] * slice_height; off += slice_size; } return 0; }"} {"target": 0, "idx": 9359, "func": "void dsputil_init_mlib(void) { put_pixels_tab[0][0] = put_pixels16_mlib; put_pixels_tab[0][1] = put_pixels16_x2_mlib; put_pixels_tab[0][2] = put_pixels16_y2_mlib; put_pixels_tab[0][3] = put_pixels16_xy2_mlib; put_pixels_tab[1][0] = put_pixels8_mlib; put_pixels_tab[1][1] = put_pixels8_x2_mlib; put_pixels_tab[1][2] = put_pixels8_y2_mlib; put_pixels_tab[1][3] = put_pixels8_xy2_mlib; avg_pixels_tab[0][0] = avg_pixels16_mlib; avg_pixels_tab[0][1] = avg_pixels16_x2_mlib; avg_pixels_tab[0][2] = avg_pixels16_y2_mlib; avg_pixels_tab[0][3] = avg_pixels16_xy2_mlib; avg_pixels_tab[1][0] = avg_pixels8_mlib; avg_pixels_tab[1][1] = avg_pixels8_x2_mlib; avg_pixels_tab[1][2] = avg_pixels8_y2_mlib; avg_pixels_tab[1][3] = avg_pixels8_xy2_mlib; put_no_rnd_pixels_tab[0][0] = put_pixels16_mlib; put_no_rnd_pixels_tab[1][0] = put_pixels8_mlib; add_pixels_clamped = add_pixels_clamped_mlib; }"} {"target": 0, "idx": 9375, "func": "static inline int handle_cpu_signal(uintptr_t pc, unsigned long address, int is_write, sigset_t *old_set) { CPUState *cpu = current_cpu; CPUClass *cc; int ret; /* We must handle PC addresses from two different sources: * a call return address and a signal frame address. * * Within cpu_restore_state_from_tb we assume the former and adjust * the address by -GETPC_ADJ so that the address is within the call * insn so that addr does not accidentally match the beginning of the * next guest insn. * * However, when the PC comes from the signal frame, it points to * the actual faulting host insn and not a call insn. Subtracting * GETPC_ADJ in that case may accidentally match the previous guest insn. * * So for the later case, adjust forward to compensate for what * will be done later by cpu_restore_state_from_tb. */ if (helper_retaddr) { pc = helper_retaddr; } else { pc += GETPC_ADJ; } /* For synchronous signals we expect to be coming from the vCPU * thread (so current_cpu should be valid) and either from running * code or during translation which can fault as we cross pages. * * If neither is true then something has gone wrong and we should * abort rather than try and restart the vCPU execution. */ if (!cpu || !cpu->running) { printf(\"qemu:%s received signal outside vCPU context @ pc=0x%\" PRIxPTR \"\\n\", __func__, pc); abort(); } #if defined(DEBUG_SIGNAL) printf(\"qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\\n\", pc, address, is_write, *(unsigned long *)old_set); #endif /* XXX: locking issue */ if (is_write && h2g_valid(address)) { switch (page_unprotect(h2g(address), pc)) { case 0: /* Fault not caused by a page marked unwritable to protect * cached translations, must be the guest binary's problem. */ break; case 1: /* Fault caused by protection of cached translation; TBs * invalidated, so resume execution. Retain helper_retaddr * for a possible second fault. */ return 1; case 2: /* Fault caused by protection of cached translation, and the * currently executing TB was modified and must be exited * immediately. Clear helper_retaddr for next execution. */ helper_retaddr = 0; cpu_exit_tb_from_sighandler(cpu, old_set); /* NORETURN */ default: g_assert_not_reached(); } } /* Convert forcefully to guest address space, invalid addresses are still valid segv ones */ address = h2g_nocheck(address); cc = CPU_GET_CLASS(cpu); /* see if it is an MMU fault */ g_assert(cc->handle_mmu_fault); ret = cc->handle_mmu_fault(cpu, address, is_write, MMU_USER_IDX); if (ret == 0) { /* The MMU fault was handled without causing real CPU fault. * Retain helper_retaddr for a possible second fault. */ return 1; } /* All other paths lead to cpu_exit; clear helper_retaddr * for next execution. */ helper_retaddr = 0; if (ret < 0) { return 0; /* not an MMU fault */ } /* Now we have a real cpu fault. */ cpu_restore_state(cpu, pc); sigprocmask(SIG_SETMASK, old_set, NULL); cpu_loop_exit(cpu); /* never comes here */ return 1; }"} {"target": 0, "idx": 9390, "func": "static inline bool media_is_cd(SCSIDiskState *s) { uint64_t nb_sectors; if (s->qdev.type != TYPE_ROM) { return false; } if (!bdrv_is_inserted(s->qdev.conf.bs)) { return false; } bdrv_get_geometry(s->qdev.conf.bs, &nb_sectors); return nb_sectors <= CD_MAX_SECTORS; }"} {"target": 1, "idx": 9400, "func": "static void lm32_uclinux_init(QEMUMachineInitArgs *args) { const char *cpu_model = args->cpu_model; const char *kernel_filename = args->kernel_filename; const char *kernel_cmdline = args->kernel_cmdline; const char *initrd_filename = args->initrd_filename; LM32CPU *cpu; CPULM32State *env; DriveInfo *dinfo; MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *phys_ram = g_new(MemoryRegion, 1); qemu_irq *cpu_irq, irq[32]; HWSetup *hw; ResetInfo *reset_info; int i; /* memory map */ hwaddr flash_base = 0x04000000; size_t flash_sector_size = 256 * 1024; size_t flash_size = 32 * 1024 * 1024; hwaddr ram_base = 0x08000000; size_t ram_size = 64 * 1024 * 1024; hwaddr uart0_base = 0x80000000; hwaddr timer0_base = 0x80002000; hwaddr timer1_base = 0x80010000; hwaddr timer2_base = 0x80012000; int uart0_irq = 0; int timer0_irq = 1; int timer1_irq = 20; int timer2_irq = 21; hwaddr hwsetup_base = 0x0bffe000; hwaddr cmdline_base = 0x0bfff000; hwaddr initrd_base = 0x08400000; size_t initrd_max = 0x01000000; reset_info = g_malloc0(sizeof(ResetInfo)); if (cpu_model == NULL) { cpu_model = \"lm32-full\"; cpu = cpu_lm32_init(cpu_model); env = &cpu->env; reset_info->cpu = cpu; reset_info->flash_base = flash_base; memory_region_init_ram(phys_ram, NULL, \"lm32_uclinux.sdram\", ram_size); vmstate_register_ram_global(phys_ram); memory_region_add_subregion(address_space_mem, ram_base, phys_ram); dinfo = drive_get(IF_PFLASH, 0, 0); /* Spansion S29NS128P */ pflash_cfi02_register(flash_base, NULL, \"lm32_uclinux.flash\", flash_size, dinfo ? dinfo->bdrv : NULL, flash_sector_size, flash_size / flash_sector_size, 1, 2, 0x01, 0x7e, 0x43, 0x00, 0x555, 0x2aa, 1); /* create irq lines */ cpu_irq = qemu_allocate_irqs(cpu_irq_handler, env, 1); env->pic_state = lm32_pic_init(*cpu_irq); for (i = 0; i < 32; i++) { irq[i] = qdev_get_gpio_in(env->pic_state, i); sysbus_create_simple(\"lm32-uart\", uart0_base, irq[uart0_irq]); sysbus_create_simple(\"lm32-timer\", timer0_base, irq[timer0_irq]); sysbus_create_simple(\"lm32-timer\", timer1_base, irq[timer1_irq]); sysbus_create_simple(\"lm32-timer\", timer2_base, irq[timer2_irq]); /* make sure juart isn't the first chardev */ env->juart_state = lm32_juart_init(); reset_info->bootstrap_pc = flash_base; if (kernel_filename) { uint64_t entry; int kernel_size; kernel_size = load_elf(kernel_filename, NULL, NULL, &entry, NULL, NULL, 1, ELF_MACHINE, 0); reset_info->bootstrap_pc = entry; if (kernel_size < 0) { kernel_size = load_image_targphys(kernel_filename, ram_base, ram_size); reset_info->bootstrap_pc = ram_base; if (kernel_size < 0) { fprintf(stderr, \"qemu: could not load kernel '%s'\\n\", kernel_filename); /* generate a rom with the hardware description */ hw = hwsetup_init(); hwsetup_add_cpu(hw, \"LM32\", 75000000); hwsetup_add_flash(hw, \"flash\", flash_base, flash_size); hwsetup_add_ddr_sdram(hw, \"ddr_sdram\", ram_base, ram_size); hwsetup_add_timer(hw, \"timer0\", timer0_base, timer0_irq); hwsetup_add_timer(hw, \"timer1_dev_only\", timer1_base, timer1_irq); hwsetup_add_timer(hw, \"timer2_dev_only\", timer2_base, timer2_irq); hwsetup_add_uart(hw, \"uart\", uart0_base, uart0_irq); hwsetup_add_trailer(hw); hwsetup_create_rom(hw, hwsetup_base); hwsetup_free(hw); reset_info->hwsetup_base = hwsetup_base; if (kernel_cmdline && strlen(kernel_cmdline)) { pstrcpy_targphys(\"cmdline\", cmdline_base, TARGET_PAGE_SIZE, kernel_cmdline); reset_info->cmdline_base = cmdline_base; if (initrd_filename) { size_t initrd_size; initrd_size = load_image_targphys(initrd_filename, initrd_base, initrd_max); reset_info->initrd_base = initrd_base; reset_info->initrd_size = initrd_size; qemu_register_reset(main_cpu_reset, reset_info);"} {"target": 1, "idx": 9408, "func": "static av_cold int m101_decode_init(AVCodecContext *avctx) { if (avctx->extradata_size < 6*4) return AVERROR_INVALIDDATA; if (avctx->extradata[2*4] == 10) avctx->pix_fmt = AV_PIX_FMT_YUV422P10; else avctx->pix_fmt = AV_PIX_FMT_YUYV422; return 0; }"} {"target": 1, "idx": 9427, "func": "static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64]) { Mpeg4DecContext *ctx = (Mpeg4DecContext *)s; int cbpc, cbpy, i, cbp, pred_x, pred_y, mx, my, dquant; int16_t *mot_val; static const int8_t quant_tab[4] = { -1, -2, 1, 2 }; const int xy = s->mb_x + s->mb_y * s->mb_stride; av_assert2(s->h263_pred); if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type == AV_PICTURE_TYPE_S) { do { if (get_bits1(&s->gb)) { /* skip mb */ s->mb_intra = 0; for (i = 0; i < 6; i++) s->block_last_index[i] = -1; s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; if (s->pict_type == AV_PICTURE_TYPE_S && ctx->vol_sprite_usage == GMC_SPRITE) { s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0; s->mcsel = 1; s->mv[0][0][0] = get_amv(ctx, 0); s->mv[0][0][1] = get_amv(ctx, 1); s->mb_skipped = 0; } else { s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; s->mcsel = 0; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; s->mb_skipped = 1; goto end; cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2); if (cbpc < 0) { \"mcbpc damaged at %d %d\\n\", s->mb_x, s->mb_y); return -1; } while (cbpc == 20); s->bdsp.clear_blocks(s->block[0]); dquant = cbpc & 8; s->mb_intra = ((cbpc & 4) != 0); if (s->mb_intra) goto intra; if (s->pict_type == AV_PICTURE_TYPE_S && ctx->vol_sprite_usage == GMC_SPRITE && (cbpc & 16) == 0) s->mcsel = get_bits1(&s->gb); else s->mcsel = 0; cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1) ^ 0x0F; cbp = (cbpc & 3) | (cbpy << 2); if (dquant) ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]); if ((!s->progressive_sequence) && (cbp || (s->workaround_bugs & FF_BUG_XVID_ILACE))) s->interlaced_dct = get_bits1(&s->gb); s->mv_dir = MV_DIR_FORWARD; if ((cbpc & 16) == 0) { if (s->mcsel) { s->current_picture.mb_type[xy] = MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0; /* 16x16 global motion prediction */ s->mv_type = MV_TYPE_16X16; mx = get_amv(ctx, 0); my = get_amv(ctx, 1); s->mv[0][0][0] = mx; s->mv[0][0][1] = my; } else if ((!s->progressive_sequence) && get_bits1(&s->gb)) { s->current_picture.mb_type[xy] = MB_TYPE_16x8 | MB_TYPE_L0 | MB_TYPE_INTERLACED; /* 16x8 field motion prediction */ s->mv_type = MV_TYPE_FIELD; s->field_select[0][0] = get_bits1(&s->gb); s->field_select[0][1] = get_bits1(&s->gb); ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y); for (i = 0; i < 2; i++) { mx = ff_h263_decode_motion(s, pred_x, s->f_code); if (mx >= 0xffff) return -1; my = ff_h263_decode_motion(s, pred_y / 2, s->f_code); if (my >= 0xffff) return -1; s->mv[0][i][0] = mx; s->mv[0][i][1] = my; } else { s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0; /* 16x16 motion prediction */ s->mv_type = MV_TYPE_16X16; ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y); mx = ff_h263_decode_motion(s, pred_x, s->f_code); if (mx >= 0xffff) return -1; my = ff_h263_decode_motion(s, pred_y, s->f_code); if (my >= 0xffff) return -1; s->mv[0][0][0] = mx; s->mv[0][0][1] = my; } else { s->current_picture.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0; s->mv_type = MV_TYPE_8X8; for (i = 0; i < 4; i++) { mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y); mx = ff_h263_decode_motion(s, pred_x, s->f_code); if (mx >= 0xffff) return -1; my = ff_h263_decode_motion(s, pred_y, s->f_code); if (my >= 0xffff) return -1; s->mv[0][i][0] = mx; s->mv[0][i][1] = my; mot_val[0] = mx; mot_val[1] = my; } else if (s->pict_type == AV_PICTURE_TYPE_B) { int modb1; // first bit of modb int modb2; // second bit of modb int mb_type; s->mb_intra = 0; // B-frames never contain intra blocks s->mcsel = 0; // ... true gmc blocks if (s->mb_x == 0) { for (i = 0; i < 2; i++) { s->last_mv[i][0][0] = s->last_mv[i][0][1] = s->last_mv[i][1][0] = s->last_mv[i][1][1] = 0; ff_thread_await_progress(&s->next_picture_ptr->tf, s->mb_y, 0); /* if we skipped it in the future P Frame than skip it now too */ s->mb_skipped = s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]; // Note, skiptab=0 if last was GMC if (s->mb_skipped) { /* skip mb */ for (i = 0; i < 6; i++) s->block_last_index[i] = -1; s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; s->mv[0][0][0] = s->mv[0][0][1] = s->mv[1][0][0] = s->mv[1][0][1] = 0; s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; goto end; modb1 = get_bits1(&s->gb); if (modb1) { // like MB_TYPE_B_DIRECT but no vectors coded mb_type = MB_TYPE_DIRECT2 | MB_TYPE_SKIP | MB_TYPE_L0L1; cbp = 0; } else { modb2 = get_bits1(&s->gb); mb_type = get_vlc2(&s->gb, mb_type_b_vlc.table, MB_TYPE_B_VLC_BITS, 1); if (mb_type < 0) { av_log(s->avctx, AV_LOG_ERROR, \"illegal MB_type\\n\"); return -1; mb_type = mb_type_b_map[mb_type]; if (modb2) { cbp = 0; } else { s->bdsp.clear_blocks(s->block[0]); cbp = get_bits(&s->gb, 6); if ((!IS_DIRECT(mb_type)) && cbp) { if (get_bits1(&s->gb)) ff_set_qscale(s, s->qscale + get_bits1(&s->gb) * 4 - 2); if (!s->progressive_sequence) { if (cbp) s->interlaced_dct = get_bits1(&s->gb); if (!IS_DIRECT(mb_type) && get_bits1(&s->gb)) { mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED; mb_type &= ~MB_TYPE_16x16; if (USES_LIST(mb_type, 0)) { s->field_select[0][0] = get_bits1(&s->gb); s->field_select[0][1] = get_bits1(&s->gb); if (USES_LIST(mb_type, 1)) { s->field_select[1][0] = get_bits1(&s->gb); s->field_select[1][1] = get_bits1(&s->gb); s->mv_dir = 0; if ((mb_type & (MB_TYPE_DIRECT2 | MB_TYPE_INTERLACED)) == 0) { s->mv_type = MV_TYPE_16X16; if (USES_LIST(mb_type, 0)) { s->mv_dir = MV_DIR_FORWARD; mx = ff_h263_decode_motion(s, s->last_mv[0][0][0], s->f_code); my = ff_h263_decode_motion(s, s->last_mv[0][0][1], s->f_code); s->last_mv[0][1][0] = s->last_mv[0][0][0] = s->mv[0][0][0] = mx; s->last_mv[0][1][1] = s->last_mv[0][0][1] = s->mv[0][0][1] = my; if (USES_LIST(mb_type, 1)) { s->mv_dir |= MV_DIR_BACKWARD; mx = ff_h263_decode_motion(s, s->last_mv[1][0][0], s->b_code); my = ff_h263_decode_motion(s, s->last_mv[1][0][1], s->b_code); s->last_mv[1][1][0] = s->last_mv[1][0][0] = s->mv[1][0][0] = mx; s->last_mv[1][1][1] = s->last_mv[1][0][1] = s->mv[1][0][1] = my; } else if (!IS_DIRECT(mb_type)) { s->mv_type = MV_TYPE_FIELD; if (USES_LIST(mb_type, 0)) { s->mv_dir = MV_DIR_FORWARD; for (i = 0; i < 2; i++) { mx = ff_h263_decode_motion(s, s->last_mv[0][i][0], s->f_code); my = ff_h263_decode_motion(s, s->last_mv[0][i][1] / 2, s->f_code); s->last_mv[0][i][0] = s->mv[0][i][0] = mx; s->last_mv[0][i][1] = (s->mv[0][i][1] = my) * 2; if (USES_LIST(mb_type, 1)) { s->mv_dir |= MV_DIR_BACKWARD; for (i = 0; i < 2; i++) { mx = ff_h263_decode_motion(s, s->last_mv[1][i][0], s->b_code); my = ff_h263_decode_motion(s, s->last_mv[1][i][1] / 2, s->b_code); s->last_mv[1][i][0] = s->mv[1][i][0] = mx; s->last_mv[1][i][1] = (s->mv[1][i][1] = my) * 2; if (IS_DIRECT(mb_type)) { if (IS_SKIP(mb_type)) { mx = my = 0; } else { mx = ff_h263_decode_motion(s, 0, 1); my = ff_h263_decode_motion(s, 0, 1); s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT; mb_type |= ff_mpeg4_set_direct_mv(s, mx, my); s->current_picture.mb_type[xy] = mb_type; } else { /* I-Frame */ do { cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2); if (cbpc < 0) { \"I cbpc damaged at %d %d\\n\", s->mb_x, s->mb_y); return -1; } while (cbpc == 8); dquant = cbpc & 4; s->mb_intra = 1; intra: s->ac_pred = get_bits1(&s->gb); if (s->ac_pred) s->current_picture.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED; else s->current_picture.mb_type[xy] = MB_TYPE_INTRA; cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1); \"I cbpy damaged at %d %d\\n\", s->mb_x, s->mb_y); return -1; cbp = (cbpc & 3) | (cbpy << 2); ctx->use_intra_dc_vlc = s->qscale < ctx->intra_dc_threshold; if (dquant) ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]); if (!s->progressive_sequence) s->interlaced_dct = get_bits1(&s->gb); s->bdsp.clear_blocks(s->block[0]); /* decode each block */ for (i = 0; i < 6; i++) { if (mpeg4_decode_block(ctx, block[i], i, cbp & 32, 1, 0) < 0) return -1; cbp += cbp; goto end; /* decode each block */ for (i = 0; i < 6; i++) { if (mpeg4_decode_block(ctx, block[i], i, cbp & 32, 0, 0) < 0) return -1; cbp += cbp; end: /* per-MB end of slice check */ if (s->codec_id == AV_CODEC_ID_MPEG4) { int next = mpeg4_is_resync(ctx); if (next) { if (s->mb_x + s->mb_y*s->mb_width + 1 > next && (s->avctx->err_recognition & AV_EF_AGGRESSIVE)) { return -1; } else if (s->mb_x + s->mb_y*s->mb_width + 1 >= next) return SLICE_END; if (s->pict_type == AV_PICTURE_TYPE_B) { const int delta= s->mb_x + 1 == s->mb_width ? 2 : 1; ff_thread_await_progress(&s->next_picture_ptr->tf, (s->mb_x + delta >= s->mb_width) ? FFMIN(s->mb_y + 1, s->mb_height - 1) : s->mb_y, 0); if (s->next_picture.mbskip_table[xy + delta]) return SLICE_OK; return SLICE_END; return SLICE_OK;"} {"target": 1, "idx": 9439, "func": "static void arm_cpu_realizefn(DeviceState *dev, Error **errp) { CPUState *cs = CPU(dev); ARMCPU *cpu = ARM_CPU(dev); ARMCPUClass *acc = ARM_CPU_GET_CLASS(dev); CPUARMState *env = &cpu->env; int pagebits; Error *local_err = NULL; cpu_exec_realizefn(cs, &local_err); if (local_err != NULL) { error_propagate(errp, local_err); return; } /* Some features automatically imply others: */ if (arm_feature(env, ARM_FEATURE_V8)) { set_feature(env, ARM_FEATURE_V7); set_feature(env, ARM_FEATURE_ARM_DIV); set_feature(env, ARM_FEATURE_LPAE); } if (arm_feature(env, ARM_FEATURE_V7)) { set_feature(env, ARM_FEATURE_VAPA); set_feature(env, ARM_FEATURE_THUMB2); set_feature(env, ARM_FEATURE_MPIDR); if (!arm_feature(env, ARM_FEATURE_M)) { set_feature(env, ARM_FEATURE_V6K); } else { set_feature(env, ARM_FEATURE_V6); } /* Always define VBAR for V7 CPUs even if it doesn't exist in * non-EL3 configs. This is needed by some legacy boards. */ set_feature(env, ARM_FEATURE_VBAR); } if (arm_feature(env, ARM_FEATURE_V6K)) { set_feature(env, ARM_FEATURE_V6); set_feature(env, ARM_FEATURE_MVFR); } if (arm_feature(env, ARM_FEATURE_V6)) { set_feature(env, ARM_FEATURE_V5); if (!arm_feature(env, ARM_FEATURE_M)) { set_feature(env, ARM_FEATURE_AUXCR); } } if (arm_feature(env, ARM_FEATURE_V5)) { set_feature(env, ARM_FEATURE_V4T); } if (arm_feature(env, ARM_FEATURE_M)) { set_feature(env, ARM_FEATURE_THUMB_DIV); } if (arm_feature(env, ARM_FEATURE_ARM_DIV)) { set_feature(env, ARM_FEATURE_THUMB_DIV); } if (arm_feature(env, ARM_FEATURE_VFP4)) { set_feature(env, ARM_FEATURE_VFP3); set_feature(env, ARM_FEATURE_VFP_FP16); } if (arm_feature(env, ARM_FEATURE_VFP3)) { set_feature(env, ARM_FEATURE_VFP); } if (arm_feature(env, ARM_FEATURE_LPAE)) { set_feature(env, ARM_FEATURE_V7MP); set_feature(env, ARM_FEATURE_PXN); } if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { set_feature(env, ARM_FEATURE_CBAR); } if (arm_feature(env, ARM_FEATURE_THUMB2) && !arm_feature(env, ARM_FEATURE_M)) { set_feature(env, ARM_FEATURE_THUMB_DSP); } if (arm_feature(env, ARM_FEATURE_V7) && !arm_feature(env, ARM_FEATURE_M) && !arm_feature(env, ARM_FEATURE_PMSA)) { /* v7VMSA drops support for the old ARMv5 tiny pages, so we * can use 4K pages. */ pagebits = 12; } else { /* For CPUs which might have tiny 1K pages, or which have an * MPU and might have small region sizes, stick with 1K pages. */ pagebits = 10; } if (!set_preferred_target_page_bits(pagebits)) { /* This can only ever happen for hotplugging a CPU, or if * the board code incorrectly creates a CPU which it has * promised via minimum_page_size that it will not. */ error_setg(errp, \"This CPU requires a smaller page size than the \" \"system is using\"); return; } /* This cpu-id-to-MPIDR affinity is used only for TCG; KVM will override it. * We don't support setting cluster ID ([16..23]) (known as Aff2 * in later ARM ARM versions), or any of the higher affinity level fields, * so these bits always RAZ. */ if (cpu->mp_affinity == ARM64_AFFINITY_INVALID) { cpu->mp_affinity = arm_cpu_mp_affinity(cs->cpu_index, ARM_DEFAULT_CPUS_PER_CLUSTER); } if (cpu->reset_hivecs) { cpu->reset_sctlr |= (1 << 13); } if (cpu->cfgend) { if (arm_feature(&cpu->env, ARM_FEATURE_V7)) { cpu->reset_sctlr |= SCTLR_EE; } else { cpu->reset_sctlr |= SCTLR_B; } } if (!cpu->has_el3) { /* If the has_el3 CPU property is disabled then we need to disable the * feature. */ unset_feature(env, ARM_FEATURE_EL3); /* Disable the security extension feature bits in the processor feature * registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12]. */ cpu->id_pfr1 &= ~0xf0; cpu->id_aa64pfr0 &= ~0xf000; } if (!cpu->has_el2) { unset_feature(env, ARM_FEATURE_EL2); } if (!cpu->has_pmu) { unset_feature(env, ARM_FEATURE_PMU); cpu->id_aa64dfr0 &= ~0xf00; } if (!arm_feature(env, ARM_FEATURE_EL2)) { /* Disable the hypervisor feature bits in the processor feature * registers if we don't have EL2. These are id_pfr1[15:12] and * id_aa64pfr0_el1[11:8]. */ cpu->id_aa64pfr0 &= ~0xf00; cpu->id_pfr1 &= ~0xf000; } /* MPU can be configured out of a PMSA CPU either by setting has-mpu * to false or by setting pmsav7-dregion to 0. */ if (!cpu->has_mpu) { cpu->pmsav7_dregion = 0; } if (cpu->pmsav7_dregion == 0) { cpu->has_mpu = false; } if (arm_feature(env, ARM_FEATURE_PMSA) && arm_feature(env, ARM_FEATURE_V7)) { uint32_t nr = cpu->pmsav7_dregion; if (nr > 0xff) { error_setg(errp, \"PMSAv7 MPU #regions invalid %\" PRIu32, nr); return; } if (nr) { if (arm_feature(env, ARM_FEATURE_V8)) { /* PMSAv8 */ env->pmsav8.rbar[M_REG_NS] = g_new0(uint32_t, nr); env->pmsav8.rlar[M_REG_NS] = g_new0(uint32_t, nr); if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { env->pmsav8.rbar[M_REG_S] = g_new0(uint32_t, nr); env->pmsav8.rlar[M_REG_S] = g_new0(uint32_t, nr); } } else { env->pmsav7.drbar = g_new0(uint32_t, nr); env->pmsav7.drsr = g_new0(uint32_t, nr); env->pmsav7.dracr = g_new0(uint32_t, nr); } } } if (arm_feature(env, ARM_FEATURE_EL3)) { set_feature(env, ARM_FEATURE_VBAR); } register_cp_regs_for_features(cpu); arm_cpu_register_gdb_regs_for_features(cpu); init_cpreg_list(cpu); #ifndef CONFIG_USER_ONLY if (cpu->has_el3 || arm_feature(env, ARM_FEATURE_M_SECURITY)) { AddressSpace *as; cs->num_ases = 2; if (!cpu->secure_memory) { cpu->secure_memory = cs->memory; } as = address_space_init_shareable(cpu->secure_memory, \"cpu-secure-memory\"); cpu_address_space_init(cs, as, ARMASIdx_S); } else { cs->num_ases = 1; } cpu_address_space_init(cs, address_space_init_shareable(cs->memory, \"cpu-memory\"), ARMASIdx_NS); #endif qemu_init_vcpu(cs); cpu_reset(cs); acc->parent_realize(dev, errp); }"} {"target": 1, "idx": 9441, "func": "static void wait_for_serial(const char *side) { char *serialpath = g_strdup_printf(\"%s/%s\", tmpfs, side); FILE *serialfile = fopen(serialpath, \"r\"); const char *arch = qtest_get_arch(); int started = (strcmp(side, \"src_serial\") == 0 && strcmp(arch, \"ppc64\") == 0) ? 0 : 1; do { int readvalue = fgetc(serialfile); if (!started) { /* SLOF prints its banner before starting test, * to ignore it, mark the start of the test with '_', * ignore all characters until this marker */ switch (readvalue) { case '_': started = 1; break; case EOF: fseek(serialfile, 0, SEEK_SET); usleep(1000); break; } continue; } switch (readvalue) { case 'A': /* Fine */ break; case 'B': /* It's alive! */ fclose(serialfile); g_free(serialpath); return; case EOF: started = (strcmp(side, \"src_serial\") == 0 && strcmp(arch, \"ppc64\") == 0) ? 0 : 1; fseek(serialfile, 0, SEEK_SET); usleep(1000); break; default: fprintf(stderr, \"Unexpected %d on %s serial\\n\", readvalue, side); g_assert_not_reached(); } } while (true); }"} {"target": 1, "idx": 9458, "func": "static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { uint32_t val, insn, op, rm, rn, rd, shift, cond; int32_t offset; int i; TCGv_i32 tmp; TCGv_i32 tmp2; TCGv_i32 addr; if (s->condexec_mask) { cond = s->condexec_cond; if (cond != 0x0e) { /* Skip conditional when condition is AL. */ s->condlabel = gen_new_label(); arm_gen_test_cc(cond ^ 1, s->condlabel); s->condjmp = 1; } } insn = arm_lduw_code(env, s->pc, s->sctlr_b); s->pc += 2; switch (insn >> 12) { case 0: case 1: rd = insn & 7; op = (insn >> 11) & 3; if (op == 3) { /* add/subtract */ rn = (insn >> 3) & 7; tmp = load_reg(s, rn); if (insn & (1 << 10)) { /* immediate */ tmp2 = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp2, (insn >> 6) & 7); } else { /* reg */ rm = (insn >> 6) & 7; tmp2 = load_reg(s, rm); } if (insn & (1 << 9)) { if (s->condexec_mask) tcg_gen_sub_i32(tmp, tmp, tmp2); else gen_sub_CC(tmp, tmp, tmp2); } else { if (s->condexec_mask) tcg_gen_add_i32(tmp, tmp, tmp2); else gen_add_CC(tmp, tmp, tmp2); } tcg_temp_free_i32(tmp2); store_reg(s, rd, tmp); } else { /* shift immediate */ rm = (insn >> 3) & 7; shift = (insn >> 6) & 0x1f; tmp = load_reg(s, rm); gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0); if (!s->condexec_mask) gen_logic_CC(tmp); store_reg(s, rd, tmp); } break; case 2: case 3: /* arithmetic large immediate */ op = (insn >> 11) & 3; rd = (insn >> 8) & 0x7; if (op == 0) { /* mov */ tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, insn & 0xff); if (!s->condexec_mask) gen_logic_CC(tmp); store_reg(s, rd, tmp); } else { tmp = load_reg(s, rd); tmp2 = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp2, insn & 0xff); switch (op) { case 1: /* cmp */ gen_sub_CC(tmp, tmp, tmp2); tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp2); break; case 2: /* add */ if (s->condexec_mask) tcg_gen_add_i32(tmp, tmp, tmp2); else gen_add_CC(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); store_reg(s, rd, tmp); break; case 3: /* sub */ if (s->condexec_mask) tcg_gen_sub_i32(tmp, tmp, tmp2); else gen_sub_CC(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); store_reg(s, rd, tmp); break; } } break; case 4: if (insn & (1 << 11)) { rd = (insn >> 8) & 7; /* load pc-relative. Bit 1 of PC is ignored. */ val = s->pc + 2 + ((insn & 0xff) * 4); val &= ~(uint32_t)2; addr = tcg_temp_new_i32(); tcg_gen_movi_i32(addr, val); tmp = tcg_temp_new_i32(); gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); tcg_temp_free_i32(addr); store_reg(s, rd, tmp); break; } if (insn & (1 << 10)) { /* data processing extended or blx */ rd = (insn & 7) | ((insn >> 4) & 8); rm = (insn >> 3) & 0xf; op = (insn >> 8) & 3; switch (op) { case 0: /* add */ tmp = load_reg(s, rd); tmp2 = load_reg(s, rm); tcg_gen_add_i32(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); store_reg(s, rd, tmp); break; case 1: /* cmp */ tmp = load_reg(s, rd); tmp2 = load_reg(s, rm); gen_sub_CC(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); tcg_temp_free_i32(tmp); break; case 2: /* mov/cpy */ tmp = load_reg(s, rm); store_reg(s, rd, tmp); break; case 3:/* branch [and link] exchange thumb register */ tmp = load_reg(s, rm); if (insn & (1 << 7)) { ARCH(5); val = (uint32_t)s->pc | 1; tmp2 = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp2, val); store_reg(s, 14, tmp2); gen_bx(s, tmp); } else { /* Only BX works as exception-return, not BLX */ gen_bx_excret(s, tmp); } break; } break; } /* data processing register */ rd = insn & 7; rm = (insn >> 3) & 7; op = (insn >> 6) & 0xf; if (op == 2 || op == 3 || op == 4 || op == 7) { /* the shift/rotate ops want the operands backwards */ val = rm; rm = rd; rd = val; val = 1; } else { val = 0; } if (op == 9) { /* neg */ tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, 0); } else if (op != 0xf) { /* mvn doesn't read its first operand */ tmp = load_reg(s, rd); } else { TCGV_UNUSED_I32(tmp); } tmp2 = load_reg(s, rm); switch (op) { case 0x0: /* and */ tcg_gen_and_i32(tmp, tmp, tmp2); if (!s->condexec_mask) gen_logic_CC(tmp); break; case 0x1: /* eor */ tcg_gen_xor_i32(tmp, tmp, tmp2); if (!s->condexec_mask) gen_logic_CC(tmp); break; case 0x2: /* lsl */ if (s->condexec_mask) { gen_shl(tmp2, tmp2, tmp); } else { gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp); gen_logic_CC(tmp2); } break; case 0x3: /* lsr */ if (s->condexec_mask) { gen_shr(tmp2, tmp2, tmp); } else { gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp); gen_logic_CC(tmp2); } break; case 0x4: /* asr */ if (s->condexec_mask) { gen_sar(tmp2, tmp2, tmp); } else { gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp); gen_logic_CC(tmp2); } break; case 0x5: /* adc */ if (s->condexec_mask) { gen_adc(tmp, tmp2); } else { gen_adc_CC(tmp, tmp, tmp2); } break; case 0x6: /* sbc */ if (s->condexec_mask) { gen_sub_carry(tmp, tmp, tmp2); } else { gen_sbc_CC(tmp, tmp, tmp2); } break; case 0x7: /* ror */ if (s->condexec_mask) { tcg_gen_andi_i32(tmp, tmp, 0x1f); tcg_gen_rotr_i32(tmp2, tmp2, tmp); } else { gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp); gen_logic_CC(tmp2); } break; case 0x8: /* tst */ tcg_gen_and_i32(tmp, tmp, tmp2); gen_logic_CC(tmp); rd = 16; break; case 0x9: /* neg */ if (s->condexec_mask) tcg_gen_neg_i32(tmp, tmp2); else gen_sub_CC(tmp, tmp, tmp2); break; case 0xa: /* cmp */ gen_sub_CC(tmp, tmp, tmp2); rd = 16; break; case 0xb: /* cmn */ gen_add_CC(tmp, tmp, tmp2); rd = 16; break; case 0xc: /* orr */ tcg_gen_or_i32(tmp, tmp, tmp2); if (!s->condexec_mask) gen_logic_CC(tmp); break; case 0xd: /* mul */ tcg_gen_mul_i32(tmp, tmp, tmp2); if (!s->condexec_mask) gen_logic_CC(tmp); break; case 0xe: /* bic */ tcg_gen_andc_i32(tmp, tmp, tmp2); if (!s->condexec_mask) gen_logic_CC(tmp); break; case 0xf: /* mvn */ tcg_gen_not_i32(tmp2, tmp2); if (!s->condexec_mask) gen_logic_CC(tmp2); val = 1; rm = rd; break; } if (rd != 16) { if (val) { store_reg(s, rm, tmp2); if (op != 0xf) tcg_temp_free_i32(tmp); } else { store_reg(s, rd, tmp); tcg_temp_free_i32(tmp2); } } else { tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp2); } break; case 5: /* load/store register offset. */ rd = insn & 7; rn = (insn >> 3) & 7; rm = (insn >> 6) & 7; op = (insn >> 9) & 7; addr = load_reg(s, rn); tmp = load_reg(s, rm); tcg_gen_add_i32(addr, addr, tmp); tcg_temp_free_i32(tmp); if (op < 3) { /* store */ tmp = load_reg(s, rd); } else { tmp = tcg_temp_new_i32(); } switch (op) { case 0: /* str */ gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); break; case 1: /* strh */ gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); break; case 2: /* strb */ gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); break; case 3: /* ldrsb */ gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); break; case 4: /* ldr */ gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); break; case 5: /* ldrh */ gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); break; case 6: /* ldrb */ gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); break; case 7: /* ldrsh */ gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); break; } if (op >= 3) { /* load */ store_reg(s, rd, tmp); } else { tcg_temp_free_i32(tmp); } tcg_temp_free_i32(addr); break; case 6: /* load/store word immediate offset */ rd = insn & 7; rn = (insn >> 3) & 7; addr = load_reg(s, rn); val = (insn >> 4) & 0x7c; tcg_gen_addi_i32(addr, addr, val); if (insn & (1 << 11)) { /* load */ tmp = tcg_temp_new_i32(); gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); store_reg(s, rd, tmp); } else { /* store */ tmp = load_reg(s, rd); gen_aa32_st32(s, tmp, addr, get_mem_index(s)); tcg_temp_free_i32(tmp); } tcg_temp_free_i32(addr); break; case 7: /* load/store byte immediate offset */ rd = insn & 7; rn = (insn >> 3) & 7; addr = load_reg(s, rn); val = (insn >> 6) & 0x1f; tcg_gen_addi_i32(addr, addr, val); if (insn & (1 << 11)) { /* load */ tmp = tcg_temp_new_i32(); gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); store_reg(s, rd, tmp); } else { /* store */ tmp = load_reg(s, rd); gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); tcg_temp_free_i32(tmp); } tcg_temp_free_i32(addr); break; case 8: /* load/store halfword immediate offset */ rd = insn & 7; rn = (insn >> 3) & 7; addr = load_reg(s, rn); val = (insn >> 5) & 0x3e; tcg_gen_addi_i32(addr, addr, val); if (insn & (1 << 11)) { /* load */ tmp = tcg_temp_new_i32(); gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); store_reg(s, rd, tmp); } else { /* store */ tmp = load_reg(s, rd); gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); tcg_temp_free_i32(tmp); } tcg_temp_free_i32(addr); break; case 9: /* load/store from stack */ rd = (insn >> 8) & 7; addr = load_reg(s, 13); val = (insn & 0xff) * 4; tcg_gen_addi_i32(addr, addr, val); if (insn & (1 << 11)) { /* load */ tmp = tcg_temp_new_i32(); gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); store_reg(s, rd, tmp); } else { /* store */ tmp = load_reg(s, rd); gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); tcg_temp_free_i32(tmp); } tcg_temp_free_i32(addr); break; case 10: /* add to high reg */ rd = (insn >> 8) & 7; if (insn & (1 << 11)) { /* SP */ tmp = load_reg(s, 13); } else { /* PC. bit 1 is ignored. */ tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2); } val = (insn & 0xff) * 4; tcg_gen_addi_i32(tmp, tmp, val); store_reg(s, rd, tmp); break; case 11: /* misc */ op = (insn >> 8) & 0xf; switch (op) { case 0: /* adjust stack pointer */ tmp = load_reg(s, 13); val = (insn & 0x7f) * 4; if (insn & (1 << 7)) val = -(int32_t)val; tcg_gen_addi_i32(tmp, tmp, val); store_reg(s, 13, tmp); break; case 2: /* sign/zero extend. */ ARCH(6); rd = insn & 7; rm = (insn >> 3) & 7; tmp = load_reg(s, rm); switch ((insn >> 6) & 3) { case 0: gen_sxth(tmp); break; case 1: gen_sxtb(tmp); break; case 2: gen_uxth(tmp); break; case 3: gen_uxtb(tmp); break; } store_reg(s, rd, tmp); break; case 4: case 5: case 0xc: case 0xd: /* push/pop */ addr = load_reg(s, 13); if (insn & (1 << 8)) offset = 4; else offset = 0; for (i = 0; i < 8; i++) { if (insn & (1 << i)) offset += 4; } if ((insn & (1 << 11)) == 0) { tcg_gen_addi_i32(addr, addr, -offset); } for (i = 0; i < 8; i++) { if (insn & (1 << i)) { if (insn & (1 << 11)) { /* pop */ tmp = tcg_temp_new_i32(); gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); store_reg(s, i, tmp); } else { /* push */ tmp = load_reg(s, i); gen_aa32_st32(s, tmp, addr, get_mem_index(s)); tcg_temp_free_i32(tmp); } /* advance to the next address. */ tcg_gen_addi_i32(addr, addr, 4); } } TCGV_UNUSED_I32(tmp); if (insn & (1 << 8)) { if (insn & (1 << 11)) { /* pop pc */ tmp = tcg_temp_new_i32(); gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); /* don't set the pc until the rest of the instruction has completed */ } else { /* push lr */ tmp = load_reg(s, 14); gen_aa32_st32(s, tmp, addr, get_mem_index(s)); tcg_temp_free_i32(tmp); } tcg_gen_addi_i32(addr, addr, 4); } if ((insn & (1 << 11)) == 0) { tcg_gen_addi_i32(addr, addr, -offset); } /* write back the new stack pointer */ store_reg(s, 13, addr); /* set the new PC value */ if ((insn & 0x0900) == 0x0900) { store_reg_from_load(s, 15, tmp); } break; case 1: case 3: case 9: case 11: /* czb */ rm = insn & 7; tmp = load_reg(s, rm); s->condlabel = gen_new_label(); s->condjmp = 1; if (insn & (1 << 11)) tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel); else tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel); tcg_temp_free_i32(tmp); offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3; val = (uint32_t)s->pc + 2; val += offset; gen_jmp(s, val); break; case 15: /* IT, nop-hint. */ if ((insn & 0xf) == 0) { gen_nop_hint(s, (insn >> 4) & 0xf); break; } /* If Then. */ s->condexec_cond = (insn >> 4) & 0xe; s->condexec_mask = insn & 0x1f; /* No actual code generated for this insn, just setup state. */ break; case 0xe: /* bkpt */ { int imm8 = extract32(insn, 0, 8); ARCH(5); gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true), default_exception_el(s)); break; } case 0xa: /* rev, and hlt */ { int op1 = extract32(insn, 6, 2); if (op1 == 2) { /* HLT */ int imm6 = extract32(insn, 0, 6); gen_hlt(s, imm6); break; } /* Otherwise this is rev */ ARCH(6); rn = (insn >> 3) & 0x7; rd = insn & 0x7; tmp = load_reg(s, rn); switch (op1) { case 0: tcg_gen_bswap32_i32(tmp, tmp); break; case 1: gen_rev16(tmp); break; case 3: gen_revsh(tmp); break; default: g_assert_not_reached(); } store_reg(s, rd, tmp); break; } case 6: switch ((insn >> 5) & 7) { case 2: /* setend */ ARCH(6); if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) { gen_helper_setend(cpu_env); s->is_jmp = DISAS_UPDATE; } break; case 3: /* cps */ ARCH(6); if (IS_USER(s)) { break; } if (arm_dc_feature(s, ARM_FEATURE_M)) { tmp = tcg_const_i32((insn & (1 << 4)) != 0); /* FAULTMASK */ if (insn & 1) { addr = tcg_const_i32(19); gen_helper_v7m_msr(cpu_env, addr, tmp); tcg_temp_free_i32(addr); } /* PRIMASK */ if (insn & 2) { addr = tcg_const_i32(16); gen_helper_v7m_msr(cpu_env, addr, tmp); tcg_temp_free_i32(addr); } tcg_temp_free_i32(tmp); gen_lookup_tb(s); } else { if (insn & (1 << 4)) { shift = CPSR_A | CPSR_I | CPSR_F; } else { shift = 0; } gen_set_psr_im(s, ((insn & 7) << 6), 0, shift); } break; default: goto undef; } break; default: goto undef; } break; case 12: { /* load/store multiple */ TCGv_i32 loaded_var; TCGV_UNUSED_I32(loaded_var); rn = (insn >> 8) & 0x7; addr = load_reg(s, rn); for (i = 0; i < 8; i++) { if (insn & (1 << i)) { if (insn & (1 << 11)) { /* load */ tmp = tcg_temp_new_i32(); gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); if (i == rn) { loaded_var = tmp; } else { store_reg(s, i, tmp); } } else { /* store */ tmp = load_reg(s, i); gen_aa32_st32(s, tmp, addr, get_mem_index(s)); tcg_temp_free_i32(tmp); } /* advance to the next address */ tcg_gen_addi_i32(addr, addr, 4); } } if ((insn & (1 << rn)) == 0) { /* base reg not in list: base register writeback */ store_reg(s, rn, addr); } else { /* base reg in list: if load, complete it now */ if (insn & (1 << 11)) { store_reg(s, rn, loaded_var); } tcg_temp_free_i32(addr); } break; } case 13: /* conditional branch or swi */ cond = (insn >> 8) & 0xf; if (cond == 0xe) goto undef; if (cond == 0xf) { /* swi */ gen_set_pc_im(s, s->pc); s->svc_imm = extract32(insn, 0, 8); s->is_jmp = DISAS_SWI; break; } /* generate a conditional jump to next instruction */ s->condlabel = gen_new_label(); arm_gen_test_cc(cond ^ 1, s->condlabel); s->condjmp = 1; /* jump to the offset */ val = (uint32_t)s->pc + 2; offset = ((int32_t)insn << 24) >> 24; val += offset << 1; gen_jmp(s, val); break; case 14: if (insn & (1 << 11)) { if (disas_thumb2_insn(env, s, insn)) goto undef32; break; } /* unconditional branch */ val = (uint32_t)s->pc; offset = ((int32_t)insn << 21) >> 21; val += (offset << 1) + 2; gen_jmp(s, val); break; case 15: if (disas_thumb2_insn(env, s, insn)) goto undef32; break; } return; undef32: gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), default_exception_el(s)); return; illegal_op: undef: gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(), default_exception_el(s)); }"} {"target": 1, "idx": 9460, "func": "static int iscsi_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { IscsiLun *iscsilun = bs->opaque; struct iscsi_context *iscsi = NULL; struct iscsi_url *iscsi_url = NULL; struct scsi_task *task = NULL; struct scsi_inquiry_standard *inq = NULL; struct scsi_inquiry_supported_pages *inq_vpd; char *initiator_name = NULL; QemuOpts *opts; Error *local_err = NULL; const char *filename; int i, ret; if ((BDRV_SECTOR_SIZE % 512) != 0) { error_setg(errp, \"iSCSI: Invalid BDRV_SECTOR_SIZE. \" \"BDRV_SECTOR_SIZE(%lld) is not a multiple \" \"of 512\", BDRV_SECTOR_SIZE); return -EINVAL; opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); qemu_opts_absorb_qdict(opts, options, &local_err); if (local_err) { error_propagate(errp, local_err); ret = -EINVAL; filename = qemu_opt_get(opts, \"filename\"); iscsi_url = iscsi_parse_full_url(iscsi, filename); if (iscsi_url == NULL) { error_setg(errp, \"Failed to parse URL : %s\", filename); ret = -EINVAL; memset(iscsilun, 0, sizeof(IscsiLun)); initiator_name = parse_initiator_name(iscsi_url->target); iscsi = iscsi_create_context(initiator_name); if (iscsi == NULL) { error_setg(errp, \"iSCSI: Failed to create iSCSI context.\"); ret = -ENOMEM; if (iscsi_set_targetname(iscsi, iscsi_url->target)) { error_setg(errp, \"iSCSI: Failed to set target name.\"); ret = -EINVAL; if (iscsi_url->user != NULL) { ret = iscsi_set_initiator_username_pwd(iscsi, iscsi_url->user, iscsi_url->passwd); if (ret != 0) { error_setg(errp, \"Failed to set initiator username and password\"); ret = -EINVAL; /* check if we got CHAP username/password via the options */ parse_chap(iscsi, iscsi_url->target, &local_err); if (local_err != NULL) { error_propagate(errp, local_err); ret = -EINVAL; if (iscsi_set_session_type(iscsi, ISCSI_SESSION_NORMAL) != 0) { error_setg(errp, \"iSCSI: Failed to set session type to normal.\"); ret = -EINVAL; iscsi_set_header_digest(iscsi, ISCSI_HEADER_DIGEST_NONE_CRC32C); /* check if we got HEADER_DIGEST via the options */ parse_header_digest(iscsi, iscsi_url->target, &local_err); if (local_err != NULL) { error_propagate(errp, local_err); ret = -EINVAL; if (iscsi_full_connect_sync(iscsi, iscsi_url->portal, iscsi_url->lun) != 0) { error_setg(errp, \"iSCSI: Failed to connect to LUN : %s\", iscsi_get_error(iscsi)); ret = -EINVAL; iscsilun->iscsi = iscsi; iscsilun->aio_context = bdrv_get_aio_context(bs); iscsilun->lun = iscsi_url->lun; iscsilun->has_write_same = true; task = iscsi_do_inquiry(iscsilun->iscsi, iscsilun->lun, 0, 0, (void **) &inq, errp); if (task == NULL) { ret = -EINVAL; iscsilun->type = inq->periperal_device_type; scsi_free_scsi_task(task); task = NULL; iscsi_readcapacity_sync(iscsilun, &local_err); if (local_err != NULL) { error_propagate(errp, local_err); ret = -EINVAL; bs->total_sectors = sector_lun2qemu(iscsilun->num_blocks, iscsilun); bs->request_alignment = iscsilun->block_size; /* We don't have any emulation for devices other than disks and CD-ROMs, so * this must be sg ioctl compatible. We force it to be sg, otherwise qemu * will try to read from the device to guess the image format. */ if (iscsilun->type != TYPE_DISK && iscsilun->type != TYPE_ROM) { bs->sg = 1; task = iscsi_do_inquiry(iscsilun->iscsi, iscsilun->lun, 1, SCSI_INQUIRY_PAGECODE_SUPPORTED_VPD_PAGES, (void **) &inq_vpd, errp); if (task == NULL) { ret = -EINVAL; for (i = 0; i < inq_vpd->num_pages; i++) { struct scsi_task *inq_task; struct scsi_inquiry_logical_block_provisioning *inq_lbp; struct scsi_inquiry_block_limits *inq_bl; switch (inq_vpd->pages[i]) { case SCSI_INQUIRY_PAGECODE_LOGICAL_BLOCK_PROVISIONING: inq_task = iscsi_do_inquiry(iscsilun->iscsi, iscsilun->lun, 1, SCSI_INQUIRY_PAGECODE_LOGICAL_BLOCK_PROVISIONING, (void **) &inq_lbp, errp); if (inq_task == NULL) { ret = -EINVAL; memcpy(&iscsilun->lbp, inq_lbp, sizeof(struct scsi_inquiry_logical_block_provisioning)); scsi_free_scsi_task(inq_task); break; case SCSI_INQUIRY_PAGECODE_BLOCK_LIMITS: inq_task = iscsi_do_inquiry(iscsilun->iscsi, iscsilun->lun, 1, SCSI_INQUIRY_PAGECODE_BLOCK_LIMITS, (void **) &inq_bl, errp); if (inq_task == NULL) { ret = -EINVAL; memcpy(&iscsilun->bl, inq_bl, sizeof(struct scsi_inquiry_block_limits)); scsi_free_scsi_task(inq_task); break; default: break; scsi_free_scsi_task(task); task = NULL; iscsi_attach_aio_context(bs, iscsilun->aio_context); /* Guess the internal cluster (page) size of the iscsi target by the means * of opt_unmap_gran. Transfer the unmap granularity only if it has a * reasonable size */ if (iscsilun->bl.opt_unmap_gran * iscsilun->block_size >= 4 * 1024 && iscsilun->bl.opt_unmap_gran * iscsilun->block_size <= 16 * 1024 * 1024) { iscsilun->cluster_sectors = (iscsilun->bl.opt_unmap_gran * iscsilun->block_size) >> BDRV_SECTOR_BITS; if (iscsilun->lbprz && !(bs->open_flags & BDRV_O_NOCACHE)) { iscsilun->allocationmap = iscsi_allocationmap_init(iscsilun); if (iscsilun->allocationmap == NULL) { ret = -ENOMEM; out: qemu_opts_del(opts); g_free(initiator_name); if (iscsi_url != NULL) { iscsi_destroy_url(iscsi_url); if (task != NULL) { scsi_free_scsi_task(task); if (ret) { if (iscsi != NULL) { iscsi_destroy_context(iscsi); memset(iscsilun, 0, sizeof(IscsiLun)); return ret;"} {"target": 1, "idx": 9464, "func": "static inline void FUNC(idctSparseCol_extrashift)(int16_t *col) #else static inline void FUNC(idctSparseColPut)(pixel *dest, int line_size, int16_t *col) { int a0, a1, a2, a3, b0, b1, b2, b3; IDCT_COLS; dest[0] = av_clip_pixel((a0 + b0) >> COL_SHIFT); dest += line_size; dest[0] = av_clip_pixel((a1 + b1) >> COL_SHIFT); dest += line_size; dest[0] = av_clip_pixel((a2 + b2) >> COL_SHIFT); dest += line_size; dest[0] = av_clip_pixel((a3 + b3) >> COL_SHIFT); dest += line_size; dest[0] = av_clip_pixel((a3 - b3) >> COL_SHIFT); dest += line_size; dest[0] = av_clip_pixel((a2 - b2) >> COL_SHIFT); dest += line_size; dest[0] = av_clip_pixel((a1 - b1) >> COL_SHIFT); dest += line_size; dest[0] = av_clip_pixel((a0 - b0) >> COL_SHIFT); } static inline void FUNC(idctSparseColAdd)(pixel *dest, int line_size, int16_t *col) { int a0, a1, a2, a3, b0, b1, b2, b3; IDCT_COLS; dest[0] = av_clip_pixel(dest[0] + ((a0 + b0) >> COL_SHIFT)); dest += line_size; dest[0] = av_clip_pixel(dest[0] + ((a1 + b1) >> COL_SHIFT)); dest += line_size; dest[0] = av_clip_pixel(dest[0] + ((a2 + b2) >> COL_SHIFT)); dest += line_size; dest[0] = av_clip_pixel(dest[0] + ((a3 + b3) >> COL_SHIFT)); dest += line_size; dest[0] = av_clip_pixel(dest[0] + ((a3 - b3) >> COL_SHIFT)); dest += line_size; dest[0] = av_clip_pixel(dest[0] + ((a2 - b2) >> COL_SHIFT)); dest += line_size; dest[0] = av_clip_pixel(dest[0] + ((a1 - b1) >> COL_SHIFT)); dest += line_size; dest[0] = av_clip_pixel(dest[0] + ((a0 - b0) >> COL_SHIFT)); } static inline void FUNC(idctSparseCol)(int16_t *col) #endif { int a0, a1, a2, a3, b0, b1, b2, b3; IDCT_COLS; col[0 ] = ((a0 + b0) >> COL_SHIFT); col[8 ] = ((a1 + b1) >> COL_SHIFT); col[16] = ((a2 + b2) >> COL_SHIFT); col[24] = ((a3 + b3) >> COL_SHIFT); col[32] = ((a3 - b3) >> COL_SHIFT); col[40] = ((a2 - b2) >> COL_SHIFT); col[48] = ((a1 - b1) >> COL_SHIFT); col[56] = ((a0 - b0) >> COL_SHIFT); }"} {"target": 1, "idx": 9466, "func": "static void restore_median(uint8_t *src, int step, int stride, int width, int height, int slices, int rmode) { int i, j, slice; int A, B, C; uint8_t *bsrc; int slice_start, slice_height; const int cmask = ~rmode; for (slice = 0; slice < slices; slice++) { slice_start = ((slice * height) / slices) & cmask; slice_height = ((((slice + 1) * height) / slices) & cmask) - slice_start; bsrc = src + slice_start * stride; // first line - left neighbour prediction bsrc[0] += 0x80; A = bsrc[0]; for (i = step; i < width * step; i += step) { bsrc[i] += A; A = bsrc[i]; } bsrc += stride; if (slice_height == 1) // second line - first element has top prediction, the rest uses median C = bsrc[-stride]; bsrc[0] += C; A = bsrc[0]; for (i = step; i < width * step; i += step) { B = bsrc[i - stride]; bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C)); C = B; A = bsrc[i]; } bsrc += stride; // the rest of lines use continuous median prediction for (j = 2; j < slice_height; j++) { for (i = 0; i < width * step; i += step) { B = bsrc[i - stride]; bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C)); C = B; A = bsrc[i]; } bsrc += stride; } } }"} {"target": 0, "idx": 9481, "func": "static int decode_seq_header(AVSContext *h) { MpegEncContext *s = &h->s; int frame_rate_code; h->profile = get_bits(&s->gb,8); h->level = get_bits(&s->gb,8); skip_bits1(&s->gb); //progressive sequence s->width = get_bits(&s->gb,14); s->height = get_bits(&s->gb,14); skip_bits(&s->gb,2); //chroma format skip_bits(&s->gb,3); //sample_precision h->aspect_ratio = get_bits(&s->gb,4); frame_rate_code = get_bits(&s->gb,4); skip_bits(&s->gb,18);//bit_rate_lower skip_bits1(&s->gb); //marker_bit skip_bits(&s->gb,12);//bit_rate_upper s->low_delay = get_bits1(&s->gb); h->mb_width = (s->width + 15) >> 4; h->mb_height = (s->height + 15) >> 4; h->s.avctx->time_base.den = avpriv_frame_rate_tab[frame_rate_code].num; h->s.avctx->time_base.num = avpriv_frame_rate_tab[frame_rate_code].den; h->s.avctx->width = s->width; h->s.avctx->height = s->height; if(!h->top_qp) ff_cavs_init_top_lines(h); return 0; }"} {"target": 1, "idx": 9483, "func": "int ff_mpeg_er_init(MpegEncContext *s) { ERContext *er = &s->er; int mb_array_size = s->mb_height * s->mb_stride; int i; er->avctx = s->avctx; er->mb_index2xy = s->mb_index2xy; er->mb_num = s->mb_num; er->mb_width = s->mb_width; er->mb_height = s->mb_height; er->mb_stride = s->mb_stride; er->b8_stride = s->b8_stride; er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride); er->error_status_table = av_mallocz(mb_array_size); if (!er->er_temp_buffer || !er->error_status_table) goto fail; er->mbskip_table = s->mbskip_table; er->mbintra_table = s->mbintra_table; for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++) er->dc_val[i] = s->dc_val[i]; er->decode_mb = mpeg_er_decode_mb; er->opaque = s; return 0; fail: av_freep(&er->er_temp_buffer); av_freep(&er->error_status_table); return AVERROR(ENOMEM); }"} {"target": 1, "idx": 9495, "func": "static target_ulong remove_hpte(CPUPPCState *env, target_ulong ptex, target_ulong avpn, target_ulong flags, target_ulong *vp, target_ulong *rp) { uint8_t *hpte; target_ulong v, r, rb; if ((ptex * HASH_PTE_SIZE_64) & ~env->htab_mask) { return REMOVE_PARM; } hpte = env->external_htab + (ptex * HASH_PTE_SIZE_64); while (!lock_hpte(hpte, HPTE_V_HVLOCK)) { /* We have no real concurrency in qemu soft-emulation, so we * will never actually have a contested lock */ assert(0); } v = ldq_p(hpte); r = ldq_p(hpte + (HASH_PTE_SIZE_64/2)); if ((v & HPTE_V_VALID) == 0 || ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) || ((flags & H_ANDCOND) && (v & avpn) != 0)) { stq_p(hpte, v & ~HPTE_V_HVLOCK); assert(!(ldq_p(hpte) & HPTE_V_HVLOCK)); return REMOVE_NOT_FOUND; } *vp = v & ~HPTE_V_HVLOCK; *rp = r; stq_p(hpte, 0); rb = compute_tlbie_rb(v, r, ptex); ppc_tlb_invalidate_one(env, rb); assert(!(ldq_p(hpte) & HPTE_V_HVLOCK)); return REMOVE_SUCCESS; }"} {"target": 1, "idx": 9512, "func": "SerialState *serial_init(int base, qemu_irq irq, int baudbase, CharDriverState *chr) { SerialState *s; s = qemu_mallocz(sizeof(SerialState)); if (!s) return NULL; s->irq = irq; s->baudbase = baudbase; s->tx_timer = qemu_new_timer(vm_clock, serial_tx_done, s); if (!s->tx_timer) return NULL; qemu_register_reset(serial_reset, s); serial_reset(s); register_savevm(\"serial\", base, 2, serial_save, serial_load, s); register_ioport_write(base, 8, 1, serial_ioport_write, s); register_ioport_read(base, 8, 1, serial_ioport_read, s); s->chr = chr; qemu_chr_add_handlers(chr, serial_can_receive1, serial_receive1, serial_event, s); return s; }"} {"target": 1, "idx": 9514, "func": "static void kvmclock_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = kvmclock_realize; dc->vmsd = &kvmclock_vmsd; dc->props = kvmclock_properties; }"} {"target": 1, "idx": 9515, "func": "int vc1_decode_entry_point(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb) { int i; av_log(avctx, AV_LOG_DEBUG, \"Entry point: %08X\\n\", show_bits_long(gb, 32)); v->broken_link = get_bits1(gb); v->closed_entry = get_bits1(gb); v->panscanflag = get_bits1(gb); v->refdist_flag = get_bits1(gb); v->s.loop_filter = get_bits1(gb); v->fastuvmc = get_bits1(gb); v->extended_mv = get_bits1(gb); v->dquant = get_bits(gb, 2); v->vstransform = get_bits1(gb); v->overlap = get_bits1(gb); v->quantizer_mode = get_bits(gb, 2); if(v->hrd_param_flag){ for(i = 0; i < v->hrd_num_leaky_buckets; i++) { skip_bits(gb, 8); //hrd_full[n] } } if(get_bits1(gb)){ avctx->coded_width = (get_bits(gb, 12)+1)<<1; avctx->coded_height = (get_bits(gb, 12)+1)<<1; } if(v->extended_mv) v->extended_dmv = get_bits1(gb); if((v->range_mapy_flag = get_bits1(gb))) { av_log(avctx, AV_LOG_ERROR, \"Luma scaling is not supported, expect wrong picture\\n\"); v->range_mapy = get_bits(gb, 3); } if((v->range_mapuv_flag = get_bits1(gb))) { av_log(avctx, AV_LOG_ERROR, \"Chroma scaling is not supported, expect wrong picture\\n\"); v->range_mapuv = get_bits(gb, 3); } av_log(avctx, AV_LOG_DEBUG, \"Entry point info:\\n\" \"BrokenLink=%i, ClosedEntry=%i, PanscanFlag=%i\\n\" \"RefDist=%i, Postproc=%i, FastUVMC=%i, ExtMV=%i\\n\" \"DQuant=%i, VSTransform=%i, Overlap=%i, Qmode=%i\\n\", v->broken_link, v->closed_entry, v->panscanflag, v->refdist_flag, v->s.loop_filter, v->fastuvmc, v->extended_mv, v->dquant, v->vstransform, v->overlap, v->quantizer_mode); return 0; }"} {"target": 1, "idx": 9522, "func": "static inline void RENAME(bgr24ToUV_half)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, int width, uint32_t *unused) { int i; for (i=0; i>(RGB2YUV_SHIFT+1); dstV[i]= (RV*r + GV*g + BV*b + (257<>(RGB2YUV_SHIFT+1); } assert(src1 == src2); }"} {"target": 1, "idx": 9523, "func": "static int64_t guest_file_handle_add(FILE *fh, Error **errp) { GuestFileHandle *gfh; int64_t handle; handle = ga_get_fd_handle(ga_state, errp); if (handle < 0) { return -1; } gfh = g_malloc0(sizeof(GuestFileHandle)); gfh->id = handle; gfh->fh = fh; QTAILQ_INSERT_TAIL(&guest_file_state.filehandles, gfh, next); return handle; }"} {"target": 0, "idx": 9538, "func": "static inline void cpu_handle_interrupt(CPUState *cpu, TranslationBlock **last_tb) { CPUClass *cc = CPU_GET_CLASS(cpu); int interrupt_request = cpu->interrupt_request; if (unlikely(interrupt_request)) { if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { /* Mask out external interrupts for this step. */ interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; } if (interrupt_request & CPU_INTERRUPT_DEBUG) { cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; cpu->exception_index = EXCP_DEBUG; cpu_loop_exit(cpu); } if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) { /* Do nothing */ } else if (interrupt_request & CPU_INTERRUPT_HALT) { replay_interrupt(); cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; cpu->halted = 1; cpu->exception_index = EXCP_HLT; cpu_loop_exit(cpu); } #if defined(TARGET_I386) else if (interrupt_request & CPU_INTERRUPT_INIT) { X86CPU *x86_cpu = X86_CPU(cpu); CPUArchState *env = &x86_cpu->env; replay_interrupt(); cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0); do_cpu_init(x86_cpu); cpu->exception_index = EXCP_HALTED; cpu_loop_exit(cpu); } #else else if (interrupt_request & CPU_INTERRUPT_RESET) { replay_interrupt(); cpu_reset(cpu); cpu_loop_exit(cpu); } #endif /* The target hook has 3 exit conditions: False when the interrupt isn't processed, True when it is, and we should restart on a new TB, and via longjmp via cpu_loop_exit. */ else { replay_interrupt(); if (cc->cpu_exec_interrupt(cpu, interrupt_request)) { *last_tb = NULL; } /* The target hook may have updated the 'cpu->interrupt_request'; * reload the 'interrupt_request' value */ interrupt_request = cpu->interrupt_request; } if (interrupt_request & CPU_INTERRUPT_EXITTB) { cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; /* ensure that no TB jump will be modified as the program flow was changed */ *last_tb = NULL; } } if (unlikely(atomic_read(&cpu->exit_request) || replay_has_interrupt())) { atomic_set(&cpu->exit_request, 0); cpu->exception_index = EXCP_INTERRUPT; cpu_loop_exit(cpu); } }"} {"target": 0, "idx": 9540, "func": "static void test_redirector_tx(void) { int backend_sock[2], recv_sock; char *cmdline; uint32_t ret = 0, len = 0; char send_buf[] = \"Hello!!\"; char sock_path0[] = \"filter-redirector0.XXXXXX\"; char sock_path1[] = \"filter-redirector1.XXXXXX\"; char *recv_buf; uint32_t size = sizeof(send_buf); size = htonl(size); ret = socketpair(PF_UNIX, SOCK_STREAM, 0, backend_sock); g_assert_cmpint(ret, !=, -1); ret = mkstemp(sock_path0); g_assert_cmpint(ret, !=, -1); ret = mkstemp(sock_path1); g_assert_cmpint(ret, !=, -1); cmdline = g_strdup_printf(\"-netdev socket,id=qtest-bn0,fd=%d \" \"-device rtl8139,netdev=qtest-bn0,id=qtest-e0 \" \"-chardev socket,id=redirector0,path=%s,server,nowait \" \"-chardev socket,id=redirector1,path=%s,server,nowait \" \"-chardev socket,id=redirector2,path=%s,nowait \" \"-object filter-redirector,id=qtest-f0,netdev=qtest-bn0,\" \"queue=tx,outdev=redirector0 \" \"-object filter-redirector,id=qtest-f1,netdev=qtest-bn0,\" \"queue=tx,indev=redirector2 \" \"-object filter-redirector,id=qtest-f2,netdev=qtest-bn0,\" \"queue=tx,outdev=redirector1 \" , backend_sock[1], sock_path0, sock_path1, sock_path0); qtest_start(cmdline); g_free(cmdline); recv_sock = unix_connect(sock_path1, NULL); g_assert_cmpint(recv_sock, !=, -1); /* send a qmp command to guarantee that 'connected' is setting to true. */ qmp_discard_response(\"{ 'execute' : 'query-status'}\"); struct iovec iov[] = { { .iov_base = &size, .iov_len = sizeof(size), }, { .iov_base = send_buf, .iov_len = sizeof(send_buf), }, }; ret = iov_send(backend_sock[0], iov, 2, 0, sizeof(size) + sizeof(send_buf)); g_assert_cmpint(ret, ==, sizeof(send_buf) + sizeof(size)); close(backend_sock[0]); ret = qemu_recv(recv_sock, &len, sizeof(len), 0); g_assert_cmpint(ret, ==, sizeof(len)); len = ntohl(len); g_assert_cmpint(len, ==, sizeof(send_buf)); recv_buf = g_malloc(len); ret = qemu_recv(recv_sock, recv_buf, len, 0); g_assert_cmpstr(recv_buf, ==, send_buf); g_free(recv_buf); close(recv_sock); unlink(sock_path0); unlink(sock_path1); qtest_end(); }"} {"target": 0, "idx": 9577, "func": "int acpi_table_add(const char *t) { static const char *dfl_id = \"QEMUQEMU\"; char buf[1024], *p, *f; struct acpi_table_header acpi_hdr; unsigned long val; uint32_t length; struct acpi_table_header *acpi_hdr_p; size_t off; memset(&acpi_hdr, 0, sizeof(acpi_hdr)); if (get_param_value(buf, sizeof(buf), \"sig\", t)) { strncpy(acpi_hdr.signature, buf, 4); } else { strncpy(acpi_hdr.signature, dfl_id, 4); } if (get_param_value(buf, sizeof(buf), \"rev\", t)) { val = strtoul(buf, &p, 10); if (val > 255 || *p != '\\0') goto out; } else { val = 1; } acpi_hdr.revision = (int8_t)val; if (get_param_value(buf, sizeof(buf), \"oem_id\", t)) { strncpy(acpi_hdr.oem_id, buf, 6); } else { strncpy(acpi_hdr.oem_id, dfl_id, 6); } if (get_param_value(buf, sizeof(buf), \"oem_table_id\", t)) { strncpy(acpi_hdr.oem_table_id, buf, 8); } else { strncpy(acpi_hdr.oem_table_id, dfl_id, 8); } if (get_param_value(buf, sizeof(buf), \"oem_rev\", t)) { val = strtol(buf, &p, 10); if(*p != '\\0') goto out; } else { val = 1; } acpi_hdr.oem_revision = cpu_to_le32(val); if (get_param_value(buf, sizeof(buf), \"asl_compiler_id\", t)) { strncpy(acpi_hdr.asl_compiler_id, buf, 4); } else { strncpy(acpi_hdr.asl_compiler_id, dfl_id, 4); } if (get_param_value(buf, sizeof(buf), \"asl_compiler_rev\", t)) { val = strtol(buf, &p, 10); if(*p != '\\0') goto out; } else { val = 1; } acpi_hdr.asl_compiler_revision = cpu_to_le32(val); if (!get_param_value(buf, sizeof(buf), \"data\", t)) { buf[0] = '\\0'; } length = sizeof(acpi_hdr); f = buf; while (buf[0]) { struct stat s; char *n = strchr(f, ':'); if (n) *n = '\\0'; if(stat(f, &s) < 0) { fprintf(stderr, \"Can't stat file '%s': %s\\n\", f, strerror(errno)); goto out; } length += s.st_size; if (!n) break; *n = ':'; f = n + 1; } if (!acpi_tables) { acpi_tables_len = sizeof(uint16_t); acpi_tables = qemu_mallocz(acpi_tables_len); } acpi_tables = qemu_realloc(acpi_tables, acpi_tables_len + sizeof(uint16_t) + length); p = acpi_tables + acpi_tables_len; acpi_tables_len += sizeof(uint16_t) + length; *(uint16_t*)p = cpu_to_le32(length); p += sizeof(uint16_t); memcpy(p, &acpi_hdr, sizeof(acpi_hdr)); off = sizeof(acpi_hdr); f = buf; while (buf[0]) { struct stat s; int fd; char *n = strchr(f, ':'); if (n) *n = '\\0'; fd = open(f, O_RDONLY); if(fd < 0) goto out; if(fstat(fd, &s) < 0) { close(fd); goto out; } /* off < length is necessary because file size can be changed under our foot */ while(s.st_size && off < length) { int r; r = read(fd, p + off, s.st_size); if (r > 0) { off += r; s.st_size -= r; } else if ((r < 0 && errno != EINTR) || r == 0) { close(fd); goto out; } } close(fd); if (!n) break; f = n + 1; } if (off < length) { /* don't pass random value in process to guest */ memset(p + off, 0, length - off); } acpi_hdr_p = (struct acpi_table_header*)p; acpi_hdr_p->length = cpu_to_le32(length); acpi_hdr_p->checksum = acpi_checksum((uint8_t*)p, length); /* increase number of tables */ (*(uint16_t*)acpi_tables) = cpu_to_le32(le32_to_cpu(*(uint16_t*)acpi_tables) + 1); return 0; out: if (acpi_tables) { qemu_free(acpi_tables); acpi_tables = NULL; } return -1; }"} {"target": 0, "idx": 9579, "func": "static void armv7m_nvic_class_init(ObjectClass *klass, void *data) { NVICClass *nc = NVIC_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); SysBusDeviceClass *sdc = SYS_BUS_DEVICE_CLASS(klass); nc->parent_reset = dc->reset; nc->parent_init = sdc->init; sdc->init = armv7m_nvic_init; dc->vmsd = &vmstate_nvic; dc->reset = armv7m_nvic_reset; dc->props = armv7m_nvic_properties; }"} {"target": 0, "idx": 9583, "func": "static int ehci_qh_do_overlay(EHCIQueue *q) { int i; int dtoggle; int ping; int eps; int reload; // remember values in fields to preserve in qh after overlay dtoggle = q->qh.token & QTD_TOKEN_DTOGGLE; ping = q->qh.token & QTD_TOKEN_PING; q->qh.current_qtd = q->qtdaddr; q->qh.next_qtd = q->qtd.next; q->qh.altnext_qtd = q->qtd.altnext; q->qh.token = q->qtd.token; eps = get_field(q->qh.epchar, QH_EPCHAR_EPS); if (eps == EHCI_QH_EPS_HIGH) { q->qh.token &= ~QTD_TOKEN_PING; q->qh.token |= ping; } reload = get_field(q->qh.epchar, QH_EPCHAR_RL); set_field(&q->qh.altnext_qtd, reload, QH_ALTNEXT_NAKCNT); for (i = 0; i < 5; i++) { q->qh.bufptr[i] = q->qtd.bufptr[i]; } if (!(q->qh.epchar & QH_EPCHAR_DTC)) { // preserve QH DT bit q->qh.token &= ~QTD_TOKEN_DTOGGLE; q->qh.token |= dtoggle; } q->qh.bufptr[1] &= ~BUFPTR_CPROGMASK_MASK; q->qh.bufptr[2] &= ~BUFPTR_FRAMETAG_MASK; put_dwords(NLPTR_GET(q->qhaddr), (uint32_t *) &q->qh, sizeof(EHCIqh) >> 2); return 0; }"} {"target": 0, "idx": 9585, "func": "static uint32_t intel_hda_mmio_readw(void *opaque, target_phys_addr_t addr) { IntelHDAState *d = opaque; const IntelHDAReg *reg = intel_hda_reg_find(d, addr); return intel_hda_reg_read(d, reg, 0xffff); }"} {"target": 0, "idx": 9586, "func": "static void rtas_ibm_read_pci_config(sPAPREnvironment *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { uint64_t buid; uint32_t size, addr; if ((nargs != 4) || (nret != 2)) { rtas_st(rets, 0, -1); return; } buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2); size = rtas_ld(args, 3); addr = rtas_ld(args, 0); finish_read_pci_config(spapr, buid, addr, size, rets); }"} {"target": 1, "idx": 9611, "func": "static bool acpi_get_mcfg(AcpiMcfgInfo *mcfg) { Object *pci_host; QObject *o; pci_host = acpi_get_i386_pci_host(); g_assert(pci_host); o = object_property_get_qobject(pci_host, PCIE_HOST_MCFG_BASE, NULL); if (!o) { return false; } mcfg->mcfg_base = qnum_get_int(qobject_to_qnum(o)); qobject_decref(o); o = object_property_get_qobject(pci_host, PCIE_HOST_MCFG_SIZE, NULL); assert(o); mcfg->mcfg_size = qnum_get_int(qobject_to_qnum(o)); qobject_decref(o); return true; }"} {"target": 1, "idx": 9613, "func": "static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val) { #ifdef DEBUG_UNASSIGNED printf(\"Unassigned mem write \" TARGET_FMT_plx \" = 0x%x\\n\", addr, val); #endif #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) do_unassigned_access(addr, 1, 0, 0, 4); #endif }"} {"target": 0, "idx": 9627, "func": "void sparc64_get_context(CPUSPARCState *env) { abi_ulong ucp_addr; struct target_ucontext *ucp; target_mc_gregset_t *grp; target_mcontext_t *mcp; abi_ulong fp, i7, w_addr; int err; unsigned int i; target_sigset_t target_set; sigset_t set; ucp_addr = env->regwptr[UREG_I0]; if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) goto do_sigsegv; mcp = &ucp->tuc_mcontext; grp = &mcp->mc_gregs; /* Skip over the trap instruction, first. */ env->pc = env->npc; env->npc += 4; err = 0; sigprocmask(0, NULL, &set); host_to_target_sigset_internal(&target_set, &set); if (TARGET_NSIG_WORDS == 1) { err |= __put_user(target_set.sig[0], (abi_ulong *)&ucp->tuc_sigmask); } else { abi_ulong *src, *dst; src = target_set.sig; dst = ucp->tuc_sigmask.sig; for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) { err |= __put_user(*src, dst); } if (err) goto do_sigsegv; } /* XXX: tstate must be saved properly */ // err |= __put_user(env->tstate, &((*grp)[MC_TSTATE])); err |= __put_user(env->pc, &((*grp)[MC_PC])); err |= __put_user(env->npc, &((*grp)[MC_NPC])); err |= __put_user(env->y, &((*grp)[MC_Y])); err |= __put_user(env->gregs[1], &((*grp)[MC_G1])); err |= __put_user(env->gregs[2], &((*grp)[MC_G2])); err |= __put_user(env->gregs[3], &((*grp)[MC_G3])); err |= __put_user(env->gregs[4], &((*grp)[MC_G4])); err |= __put_user(env->gregs[5], &((*grp)[MC_G5])); err |= __put_user(env->gregs[6], &((*grp)[MC_G6])); err |= __put_user(env->gregs[7], &((*grp)[MC_G7])); err |= __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0])); err |= __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1])); err |= __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2])); err |= __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3])); err |= __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4])); err |= __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5])); err |= __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6])); err |= __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7])); w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; fp = i7 = 0; if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), abi_ulong) != 0) goto do_sigsegv; if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), abi_ulong) != 0) goto do_sigsegv; err |= __put_user(fp, &(mcp->mc_fp)); err |= __put_user(i7, &(mcp->mc_i7)); { uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; for (i = 0; i < 64; i++, dst++) { if (i & 1) { err |= __put_user(env->fpr[i/2].l.lower, dst); } else { err |= __put_user(env->fpr[i/2].l.upper, dst); } } } err |= __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr)); err |= __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr)); err |= __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs)); if (err) goto do_sigsegv; unlock_user_struct(ucp, ucp_addr, 1); return; do_sigsegv: unlock_user_struct(ucp, ucp_addr, 1); force_sig(TARGET_SIGSEGV); }"} {"target": 0, "idx": 9642, "func": "static int proxy_utimensat(FsContext *s, V9fsPath *fs_path, const struct timespec *buf) { int retval; retval = v9fs_request(s->private, T_UTIME, NULL, \"sqqqq\", fs_path, buf[0].tv_sec, buf[0].tv_nsec, buf[1].tv_sec, buf[1].tv_nsec); if (retval < 0) { errno = -retval; } return retval; }"} {"target": 1, "idx": 9649, "func": "static int subviewer_decode_frame(AVCodecContext *avctx, void *data, int *got_sub_ptr, AVPacket *avpkt) { char c; AVSubtitle *sub = data; const char *ptr = avpkt->data; AVBPrint buf; /* To be removed later */ if (sscanf(ptr, \"%*u:%*u:%*u.%*u,%*u:%*u:%*u.%*u%c\", &c) == 1) { av_log(avctx, AV_LOG_ERROR, \"AVPacket is not clean (contains timing \" \"information). You need to upgrade your libavformat or \" \"sanitize your packet.\\n\"); return AVERROR_INVALIDDATA; } av_bprint_init(&buf, 0, AV_BPRINT_SIZE_UNLIMITED); // note: no need to rescale pts & duration since they are in the same // timebase as ASS (1/100) if (ptr && avpkt->size > 0 && !subviewer_event_to_ass(&buf, ptr)) ff_ass_add_rect(sub, buf.str, avpkt->pts, avpkt->duration, 0); *got_sub_ptr = sub->num_rects > 0; av_bprint_finalize(&buf, NULL); return avpkt->size; }"} {"target": 1, "idx": 9659, "func": "void ff_mpeg4_pred_ac(MpegEncContext *s, int16_t *block, int n, int dir) { int i; int16_t *ac_val, *ac_val1; int8_t *const qscale_table = s->current_picture.qscale_table; /* find prediction */ ac_val = s->ac_val[0][0] + s->block_index[n] * 16; ac_val1 = ac_val; if (s->ac_pred) { if (dir == 0) { const int xy = s->mb_x - 1 + s->mb_y * s->mb_stride; /* left prediction */ ac_val -= 16; if (s->mb_x == 0 || s->qscale == qscale_table[xy] || n == 1 || n == 3) { /* same qscale */ for (i = 1; i < 8; i++) block[s->idsp.idct_permutation[i << 3]] += ac_val[i]; } else { /* different qscale, we must rescale */ for (i = 1; i < 8; i++) block[s->idsp.idct_permutation[i << 3]] += ROUNDED_DIV(ac_val[i] * qscale_table[xy], s->qscale); } } else { const int xy = s->mb_x + s->mb_y * s->mb_stride - s->mb_stride; /* top prediction */ ac_val -= 16 * s->block_wrap[n]; if (s->mb_y == 0 || s->qscale == qscale_table[xy] || n == 2 || n == 3) { /* same qscale */ for (i = 1; i < 8; i++) block[s->idsp.idct_permutation[i]] += ac_val[i + 8]; } else { /* different qscale, we must rescale */ for (i = 1; i < 8; i++) block[s->idsp.idct_permutation[i]] += ROUNDED_DIV(ac_val[i + 8] * qscale_table[xy], s->qscale); } } } /* left copy */ for (i = 1; i < 8; i++) ac_val1[i] = block[s->idsp.idct_permutation[i << 3]]; /* top copy */ for (i = 1; i < 8; i++) ac_val1[8 + i] = block[s->idsp.idct_permutation[i]]; }"} {"target": 1, "idx": 9660, "func": "static int csrhci_write(struct CharDriverState *chr, const uint8_t *buf, int len) { struct csrhci_s *s = (struct csrhci_s *) chr->opaque; int plen = s->in_len; if (!s->enable) return 0; s->in_len += len; memcpy(s->inpkt + plen, buf, len); while (1) { if (s->in_len >= 2 && plen < 2) s->in_hdr = csrhci_header_len(s->inpkt) + 1; if (s->in_len >= s->in_hdr && plen < s->in_hdr) s->in_data = csrhci_data_len(s->inpkt) + s->in_hdr; if (s->in_len >= s->in_data) { csrhci_in_packet(s, s->inpkt); memmove(s->inpkt, s->inpkt + s->in_len, s->in_len - s->in_data); s->in_len -= s->in_data; s->in_hdr = INT_MAX; s->in_data = INT_MAX; plen = 0; } else break; } return len; }"} {"target": 0, "idx": 9661, "func": "static int standard_decode_picture_header(VC9Context *v) { int status = 0; if (v->finterpflag) v->interpfrm = get_bits(&v->gb, 1); skip_bits(&v->gb, 2); //framecnt unused if (v->rangered) v->rangeredfrm = get_bits(&v->gb, 1); v->pict_type = get_bits(&v->gb, 1); if (v->avctx->max_b_frames && !v->pict_type) { if (get_bits(&v->gb, 1)) v->pict_type = I_TYPE; else v->pict_type = P_TYPE; } else v->pict_type++; //P_TYPE switch (v->pict_type) { case I_TYPE: status = decode_i_picture_header(v); break; case BI_TYPE: status = decode_b_picture_header(v); break; case P_TYPE: status = decode_p_picture_header(v); break; case B_TYPE: status = decode_b_picture_header(v); break; } if (status == FRAME_SKIPED) { av_log(v, AV_LOG_INFO, \"Skipping frame...\\n\"); return status; } /* AC/DC Syntax */ v->transacfrm = get_bits(&v->gb, 1); if (v->transacfrm) v->transacfrm += get_bits(&v->gb, 1); if (v->pict_type == I_TYPE || v->pict_type == BI_TYPE) { v->transacfrm2 = get_bits(&v->gb, 1); if (v->transacfrm2) v->transacfrm2 += get_bits(&v->gb, 1); } v->transacdctab = get_bits(&v->gb, 1); return 0; }"} {"target": 0, "idx": 9666, "func": "static void do_program_interrupt(CPUS390XState *env) { uint64_t mask, addr; LowCore *lowcore; hwaddr len = TARGET_PAGE_SIZE; int ilen = env->int_pgm_ilen; switch (ilen) { case ILEN_LATER: ilen = get_ilen(cpu_ldub_code(env, env->psw.addr)); break; case ILEN_LATER_INC: ilen = get_ilen(cpu_ldub_code(env, env->psw.addr)); env->psw.addr += ilen; break; default: assert(ilen == 2 || ilen == 4 || ilen == 6); } qemu_log_mask(CPU_LOG_INT, \"%s: code=0x%x ilen=%d\\n\", __func__, env->int_pgm_code, ilen); lowcore = cpu_physical_memory_map(env->psa, &len, 1); lowcore->pgm_ilen = cpu_to_be16(ilen); lowcore->pgm_code = cpu_to_be16(env->int_pgm_code); lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env)); lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr); mask = be64_to_cpu(lowcore->program_new_psw.mask); addr = be64_to_cpu(lowcore->program_new_psw.addr); cpu_physical_memory_unmap(lowcore, len, 1, len); DPRINTF(\"%s: %x %x %\" PRIx64 \" %\" PRIx64 \"\\n\", __func__, env->int_pgm_code, ilen, env->psw.mask, env->psw.addr); load_psw(env, mask, addr); }"} {"target": 0, "idx": 9669, "func": "int get_physical_address(CPUState * env, target_ulong * physical, int *prot, target_ulong address, int rw, int access_type) { /* P1, P2 and P4 areas do not use translation */ if ((address >= 0x80000000 && address < 0xc0000000) || address >= 0xe0000000) { if (!(env->sr & SR_MD) && (address < 0xe0000000 || address > 0xe4000000)) { /* Unauthorized access in user mode (only store queues are available) */ fprintf(stderr, \"Unauthorized access\\n\"); return (rw & PAGE_WRITE) ? MMU_DTLB_MISS_WRITE : MMU_DTLB_MISS_READ; } /* Mask upper 3 bits */ *physical = address & 0x1FFFFFFF; *prot = PAGE_READ | PAGE_WRITE; return MMU_OK; } /* If MMU is disabled, return the corresponding physical page */ if (!env->mmucr & MMUCR_AT) { *physical = address & 0x1FFFFFFF; *prot = PAGE_READ | PAGE_WRITE; return MMU_OK; } /* We need to resort to the MMU */ return get_mmu_address(env, physical, prot, address, rw, access_type); }"} {"target": 0, "idx": 9678, "func": "static int multiwrite_f(BlockBackend *blk, int argc, char **argv) { struct timeval t1, t2; int Cflag = 0, qflag = 0; int c, cnt; char **buf; int64_t offset, first_offset = 0; /* Some compilers get confused and warn if this is not initialized. */ int total = 0; int nr_iov; int nr_reqs; int pattern = 0xcd; QEMUIOVector *qiovs; int i; BlockRequest *reqs; while ((c = getopt(argc, argv, \"CqP:\")) != EOF) { switch (c) { case 'C': Cflag = 1; break; case 'q': qflag = 1; break; case 'P': pattern = parse_pattern(optarg); if (pattern < 0) { return 0; } break; default: return qemuio_command_usage(&writev_cmd); } } if (optind > argc - 2) { return qemuio_command_usage(&writev_cmd); } nr_reqs = 1; for (i = optind; i < argc; i++) { if (!strcmp(argv[i], \";\")) { nr_reqs++; } } reqs = g_new0(BlockRequest, nr_reqs); buf = g_new0(char *, nr_reqs); qiovs = g_new(QEMUIOVector, nr_reqs); for (i = 0; i < nr_reqs && optind < argc; i++) { int j; /* Read the offset of the request */ offset = cvtnum(argv[optind]); if (offset < 0) { printf(\"non-numeric offset argument -- %s\\n\", argv[optind]); goto out; } optind++; if (offset & 0x1ff) { printf(\"offset %lld is not sector aligned\\n\", (long long)offset); goto out; } if (i == 0) { first_offset = offset; } /* Read lengths for qiov entries */ for (j = optind; j < argc; j++) { if (!strcmp(argv[j], \";\")) { break; } } nr_iov = j - optind; /* Build request */ buf[i] = create_iovec(blk, &qiovs[i], &argv[optind], nr_iov, pattern); if (buf[i] == NULL) { goto out; } reqs[i].qiov = &qiovs[i]; reqs[i].sector = offset >> 9; reqs[i].nb_sectors = reqs[i].qiov->size >> 9; optind = j + 1; pattern++; } /* If there were empty requests at the end, ignore them */ nr_reqs = i; gettimeofday(&t1, NULL); cnt = do_aio_multiwrite(blk, reqs, nr_reqs, &total); gettimeofday(&t2, NULL); if (cnt < 0) { printf(\"aio_multiwrite failed: %s\\n\", strerror(-cnt)); goto out; } if (qflag) { goto out; } /* Finally, report back -- -C gives a parsable format */ t2 = tsub(t2, t1); print_report(\"wrote\", &t2, first_offset, total, total, cnt, Cflag); out: for (i = 0; i < nr_reqs; i++) { qemu_io_free(buf[i]); if (reqs[i].qiov != NULL) { qemu_iovec_destroy(&qiovs[i]); } } g_free(buf); g_free(reqs); g_free(qiovs); return 0; }"} {"target": 0, "idx": 9688, "func": "static void rtsp_cmd_setup(HTTPContext *c, const char *url, RTSPHeader *h) { FFStream *stream; int stream_index, port; char buf[1024]; char path1[1024]; const char *path; HTTPContext *rtp_c; RTSPTransportField *th; struct sockaddr_in dest_addr; RTSPActionServerSetup setup; /* find which url is asked */ url_split(NULL, 0, NULL, 0, NULL, path1, sizeof(path1), url); path = path1; if (*path == '/') path++; /* now check each stream */ for(stream = first_stream; stream != NULL; stream = stream->next) { if (!stream->is_feed && stream->fmt == &rtp_mux) { /* accept aggregate filenames only if single stream */ if (!strcmp(path, stream->filename)) { if (stream->nb_streams != 1) { rtsp_reply_error(c, RTSP_STATUS_AGGREGATE); return; } stream_index = 0; goto found; } for(stream_index = 0; stream_index < stream->nb_streams; stream_index++) { snprintf(buf, sizeof(buf), \"%s/streamid=%d\", stream->filename, stream_index); if (!strcmp(path, buf)) goto found; } } } /* no stream found */ rtsp_reply_error(c, RTSP_STATUS_SERVICE); /* XXX: right error ? */ return; found: /* generate session id if needed */ if (h->session_id[0] == '\\0') { snprintf(h->session_id, sizeof(h->session_id), \"%08x%08x\", (int)random(), (int)random()); } /* find rtp session, and create it if none found */ rtp_c = find_rtp_session(h->session_id); if (!rtp_c) { /* always prefer UDP */ th = find_transport(h, RTSP_PROTOCOL_RTP_UDP); if (!th) { th = find_transport(h, RTSP_PROTOCOL_RTP_TCP); if (!th) { rtsp_reply_error(c, RTSP_STATUS_TRANSPORT); return; } } rtp_c = rtp_new_connection(&c->from_addr, stream, h->session_id, th->protocol); if (!rtp_c) { rtsp_reply_error(c, RTSP_STATUS_BANDWIDTH); return; } /* open input stream */ if (open_input_stream(rtp_c, \"\") < 0) { rtsp_reply_error(c, RTSP_STATUS_INTERNAL); return; } } /* test if stream is OK (test needed because several SETUP needs to be done for a given file) */ if (rtp_c->stream != stream) { rtsp_reply_error(c, RTSP_STATUS_SERVICE); return; } /* test if stream is already set up */ if (rtp_c->rtp_ctx[stream_index]) { rtsp_reply_error(c, RTSP_STATUS_STATE); return; } /* check transport */ th = find_transport(h, rtp_c->rtp_protocol); if (!th || (th->protocol == RTSP_PROTOCOL_RTP_UDP && th->client_port_min <= 0)) { rtsp_reply_error(c, RTSP_STATUS_TRANSPORT); return; } /* setup default options */ setup.transport_option[0] = '\\0'; dest_addr = rtp_c->from_addr; dest_addr.sin_port = htons(th->client_port_min); /* add transport option if needed */ if (ff_rtsp_callback) { setup.ipaddr = ntohl(dest_addr.sin_addr.s_addr); if (ff_rtsp_callback(RTSP_ACTION_SERVER_SETUP, rtp_c->session_id, (char *)&setup, sizeof(setup), stream->rtsp_option) < 0) { rtsp_reply_error(c, RTSP_STATUS_TRANSPORT); return; } dest_addr.sin_addr.s_addr = htonl(setup.ipaddr); } /* setup stream */ if (rtp_new_av_stream(rtp_c, stream_index, &dest_addr, c) < 0) { rtsp_reply_error(c, RTSP_STATUS_TRANSPORT); return; } /* now everything is OK, so we can send the connection parameters */ rtsp_reply_header(c, RTSP_STATUS_OK); /* session ID */ url_fprintf(c->pb, \"Session: %s\\r\\n\", rtp_c->session_id); switch(rtp_c->rtp_protocol) { case RTSP_PROTOCOL_RTP_UDP: port = rtp_get_local_port(rtp_c->rtp_handles[stream_index]); url_fprintf(c->pb, \"Transport: RTP/AVP/UDP;unicast;\" \"client_port=%d-%d;server_port=%d-%d\", th->client_port_min, th->client_port_min + 1, port, port + 1); break; case RTSP_PROTOCOL_RTP_TCP: url_fprintf(c->pb, \"Transport: RTP/AVP/TCP;interleaved=%d-%d\", stream_index * 2, stream_index * 2 + 1); break; default: break; } if (setup.transport_option[0] != '\\0') { url_fprintf(c->pb, \";%s\", setup.transport_option); } url_fprintf(c->pb, \"\\r\\n\"); url_fprintf(c->pb, \"\\r\\n\"); }"} {"target": 0, "idx": 9689, "func": "static int show_format(WriterContext *w, AVFormatContext *fmt_ctx) { char val_str[128]; int64_t size = fmt_ctx->pb ? avio_size(fmt_ctx->pb) : -1; int ret = 0; writer_print_section_header(w, SECTION_ID_FORMAT); print_str(\"filename\", fmt_ctx->filename); print_int(\"nb_streams\", fmt_ctx->nb_streams); print_int(\"nb_programs\", fmt_ctx->nb_programs); print_str(\"format_name\", fmt_ctx->iformat->name); if (!do_bitexact) { if (fmt_ctx->iformat->long_name) print_str (\"format_long_name\", fmt_ctx->iformat->long_name); else print_str_opt(\"format_long_name\", \"unknown\"); } print_time(\"start_time\", fmt_ctx->start_time, &AV_TIME_BASE_Q); print_time(\"duration\", fmt_ctx->duration, &AV_TIME_BASE_Q); if (size >= 0) print_val (\"size\", size, unit_byte_str); else print_str_opt(\"size\", \"N/A\"); if (fmt_ctx->bit_rate > 0) print_val (\"bit_rate\", fmt_ctx->bit_rate, unit_bit_per_second_str); else print_str_opt(\"bit_rate\", \"N/A\"); print_int(\"probe_score\", av_format_get_probe_score(fmt_ctx)); ret = show_tags(w, fmt_ctx->metadata, SECTION_ID_FORMAT_TAGS); writer_print_section_footer(w); fflush(stdout); return ret; }"} {"target": 0, "idx": 9697, "func": "static void n8x0_init(ram_addr_t ram_size, const char *boot_device, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model, struct arm_boot_info *binfo, int model) { struct n800_s *s = (struct n800_s *) qemu_mallocz(sizeof(*s)); int sdram_size = binfo->ram_size; int onenandram_size = 0x00010000; DisplayState *ds; if (ram_size < sdram_size + onenandram_size + OMAP242X_SRAM_SIZE) { fprintf(stderr, \"This architecture uses %i bytes of memory\\n\", sdram_size + onenandram_size + OMAP242X_SRAM_SIZE); exit(1); } s->cpu = omap2420_mpu_init(sdram_size, cpu_model); /* Setup peripherals * * Believed external peripherals layout in the N810: * (spi bus 1) * tsc2005 * lcd_mipid * (spi bus 2) * Conexant cx3110x (WLAN) * optional: pc2400m (WiMAX) * (i2c bus 0) * TLV320AIC33 (audio codec) * TCM825x (camera by Toshiba) * lp5521 (clever LEDs) * tsl2563 (light sensor, hwmon, model 7, rev. 0) * lm8323 (keypad, manf 00, rev 04) * (i2c bus 1) * tmp105 (temperature sensor, hwmon) * menelaus (pm) * (somewhere on i2c - maybe N800-only) * tea5761 (FM tuner) * (serial 0) * GPS * (some serial port) * csr41814 (Bluetooth) */ n8x0_gpio_setup(s); n8x0_nand_setup(s); n8x0_i2c_setup(s); if (model == 800) n800_tsc_kbd_setup(s); else if (model == 810) { n810_tsc_setup(s); n810_kbd_setup(s); } n8x0_spi_setup(s); n8x0_dss_setup(s); n8x0_cbus_setup(s); n8x0_uart_setup(s); if (usb_enabled) n8x0_usb_setup(s); /* Setup initial (reset) machine state */ /* Start at the OneNAND bootloader. */ s->cpu->env->regs[15] = 0; if (kernel_filename) { /* Or at the linux loader. */ binfo->kernel_filename = kernel_filename; binfo->kernel_cmdline = kernel_cmdline; binfo->initrd_filename = initrd_filename; arm_load_kernel(s->cpu->env, binfo); qemu_register_reset(n8x0_boot_init, s); n8x0_boot_init(s); } if (option_rom[0] && (boot_device[0] == 'n' || !kernel_filename)) { int rom_size; uint8_t nolo_tags[0x10000]; /* No, wait, better start at the ROM. */ s->cpu->env->regs[15] = OMAP2_Q2_BASE + 0x400000; /* This is intended for loading the `secondary.bin' program from * Nokia images (the NOLO bootloader). The entry point seems * to be at OMAP2_Q2_BASE + 0x400000. * * The `2nd.bin' files contain some kind of earlier boot code and * for them the entry point needs to be set to OMAP2_SRAM_BASE. * * The code above is for loading the `zImage' file from Nokia * images. */ rom_size = load_image_targphys(option_rom[0], OMAP2_Q2_BASE + 0x400000, sdram_size - 0x400000); printf(\"%i bytes of image loaded\\n\", rom_size); n800_setup_nolo_tags(nolo_tags); cpu_physical_memory_write(OMAP2_SRAM_BASE, nolo_tags, 0x10000); } /* FIXME: We shouldn't really be doing this here. The LCD controller will set the size once configured, so this just sets an initial size until the guest activates the display. */ ds = get_displaystate(); ds->surface = qemu_resize_displaysurface(ds, 800, 480); dpy_resize(ds); }"} {"target": 0, "idx": 9699, "func": "void replay_fetch_data_kind(void) { if (replay_file) { if (!replay_has_unread_data) { replay_data_kind = replay_get_byte(); if (replay_data_kind == EVENT_INSTRUCTION) { replay_state.instructions_count = replay_get_dword(); } replay_check_error(); replay_has_unread_data = 1; if (replay_data_kind >= EVENT_COUNT) { error_report(\"Replay: unknown event kind %d\", replay_data_kind); exit(1); } } } }"} {"target": 0, "idx": 9700, "func": "static void DMA_run (void) { struct dma_cont *d; int icont, ichan; int rearm = 0; static int running = 0; if (running) { rearm = 1; goto out; } else { running = 1; } d = dma_controllers; for (icont = 0; icont < 2; icont++, d++) { for (ichan = 0; ichan < 4; ichan++) { int mask; mask = 1 << ichan; if ((0 == (d->mask & mask)) && (0 != (d->status & (mask << 4)))) { channel_run (icont, ichan); rearm = 1; } } } running = 0; out: if (rearm) qemu_bh_schedule_idle(dma_bh); }"} {"target": 0, "idx": 9701, "func": "int bdrv_img_create(const char *filename, const char *fmt, const char *base_filename, const char *base_fmt, char *options, uint64_t img_size, int flags) { QEMUOptionParameter *param = NULL, *create_options = NULL; QEMUOptionParameter *backing_fmt; BlockDriverState *bs = NULL; BlockDriver *drv, *proto_drv; int ret = 0; /* Find driver and parse its options */ drv = bdrv_find_format(fmt); if (!drv) { error_report(\"Unknown file format '%s'\", fmt); ret = -1; goto out; } proto_drv = bdrv_find_protocol(filename); if (!proto_drv) { error_report(\"Unknown protocol '%s'\", filename); ret = -1; goto out; } create_options = append_option_parameters(create_options, drv->create_options); create_options = append_option_parameters(create_options, proto_drv->create_options); /* Create parameter list with default values */ param = parse_option_parameters(\"\", create_options, param); set_option_parameter_int(param, BLOCK_OPT_SIZE, img_size); /* Parse -o options */ if (options) { param = parse_option_parameters(options, create_options, param); if (param == NULL) { error_report(\"Invalid options for file format '%s'.\", fmt); ret = -1; goto out; } } if (base_filename) { if (set_option_parameter(param, BLOCK_OPT_BACKING_FILE, base_filename)) { error_report(\"Backing file not supported for file format '%s'\", fmt); ret = -1; goto out; } } if (base_fmt) { if (set_option_parameter(param, BLOCK_OPT_BACKING_FMT, base_fmt)) { error_report(\"Backing file format not supported for file \" \"format '%s'\", fmt); ret = -1; goto out; } } backing_fmt = get_option_parameter(param, BLOCK_OPT_BACKING_FMT); if (backing_fmt && backing_fmt->value.s) { if (!bdrv_find_format(backing_fmt->value.s)) { error_report(\"Unknown backing file format '%s'\", backing_fmt->value.s); ret = -1; goto out; } } // The size for the image must always be specified, with one exception: // If we are using a backing file, we can obtain the size from there if (get_option_parameter(param, BLOCK_OPT_SIZE)->value.n == -1) { QEMUOptionParameter *backing_file = get_option_parameter(param, BLOCK_OPT_BACKING_FILE); if (backing_file && backing_file->value.s) { uint64_t size; const char *fmt = NULL; char buf[32]; if (backing_fmt && backing_fmt->value.s) { fmt = backing_fmt->value.s; } bs = bdrv_new(\"\"); ret = bdrv_open(bs, backing_file->value.s, flags, drv); if (ret < 0) { error_report(\"Could not open '%s'\", filename); ret = -1; goto out; } bdrv_get_geometry(bs, &size); size *= 512; snprintf(buf, sizeof(buf), \"%\" PRId64, size); set_option_parameter(param, BLOCK_OPT_SIZE, buf); } else { error_report(\"Image creation needs a size parameter\"); ret = -1; goto out; } } printf(\"Formatting '%s', fmt=%s \", filename, fmt); print_option_parameters(param); puts(\"\"); ret = bdrv_create(drv, filename, param); if (ret < 0) { if (ret == -ENOTSUP) { error_report(\"Formatting or formatting option not supported for \" \"file format '%s'\", fmt); } else if (ret == -EFBIG) { error_report(\"The image size is too large for file format '%s'\", fmt); } else { error_report(\"%s: error while creating %s: %s\", filename, fmt, strerror(-ret)); } } out: free_option_parameters(create_options); free_option_parameters(param); if (bs) { bdrv_delete(bs); } if (ret) { return 1; } return 0; }"} {"target": 0, "idx": 9712, "func": "static inline void reset_bit(uint32_t *field, int bit) { field[bit >> 5] &= ~(1 << (bit & 0x1F)); }"} {"target": 0, "idx": 9743, "func": "void bdrv_aio_cancel_async(BlockAIOCB *acb) { if (acb->aiocb_info->cancel_async) { acb->aiocb_info->cancel_async(acb); } }"} {"target": 0, "idx": 9745, "func": "Visitor *qapi_dealloc_get_visitor(QapiDeallocVisitor *v) { return &v->visitor; }"} {"target": 0, "idx": 9747, "func": "int av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *ctx, AVFilterBufferRef *samplesref, int av_unused flags) { BufferSourceContext *abuffer = ctx->priv; AVFilterLink *link; int ret, logged = 0; if (av_fifo_space(abuffer->fifo) < sizeof(samplesref)) { av_log(ctx, AV_LOG_ERROR, \"Buffering limit reached. Please consume some available frames \" \"before adding new ones.\\n\"); return AVERROR(EINVAL); } // Normalize input link = ctx->outputs[0]; if (samplesref->audio->sample_rate != link->sample_rate) { log_input_change(ctx, link, samplesref); logged = 1; abuffer->sample_rate = samplesref->audio->sample_rate; if (!abuffer->aresample) { ret = insert_filter(abuffer, link, &abuffer->aresample, \"aresample\"); if (ret < 0) return ret; } else { link = abuffer->aresample->outputs[0]; if (samplesref->audio->sample_rate == link->sample_rate) remove_filter(&abuffer->aresample); else if ((ret = reconfigure_filter(abuffer, abuffer->aresample)) < 0) return ret; } } link = ctx->outputs[0]; if (samplesref->format != link->format || samplesref->audio->channel_layout != link->channel_layout || samplesref->audio->planar != link->planar) { if (!logged) log_input_change(ctx, link, samplesref); abuffer->sample_format = samplesref->format; abuffer->channel_layout = samplesref->audio->channel_layout; abuffer->packing_format = samplesref->audio->planar; if (!abuffer->aconvert) { ret = insert_filter(abuffer, link, &abuffer->aconvert, \"aconvert\"); if (ret < 0) return ret; } else { link = abuffer->aconvert->outputs[0]; if (samplesref->format == link->format && samplesref->audio->channel_layout == link->channel_layout && samplesref->audio->planar == link->planar ) remove_filter(&abuffer->aconvert); else if ((ret = reconfigure_filter(abuffer, abuffer->aconvert)) < 0) return ret; } } if (sizeof(samplesref) != av_fifo_generic_write(abuffer->fifo, &samplesref, sizeof(samplesref), NULL)) { av_log(ctx, AV_LOG_ERROR, \"Error while writing to FIFO\\n\"); return AVERROR(EINVAL); } return 0; }"} {"target": 0, "idx": 9748, "func": "static V4L2Buffer* v4l2_dequeue_v4l2buf(V4L2Context *ctx, int timeout) { struct v4l2_plane planes[VIDEO_MAX_PLANES]; struct v4l2_buffer buf = { 0 }; V4L2Buffer* avbuf = NULL; struct pollfd pfd = { .events = POLLIN | POLLRDNORM | POLLPRI | POLLOUT | POLLWRNORM, /* default blocking capture */ .fd = ctx_to_m2mctx(ctx)->fd, }; int ret; if (V4L2_TYPE_IS_OUTPUT(ctx->type)) pfd.events = POLLOUT | POLLWRNORM; for (;;) { ret = poll(&pfd, 1, timeout); if (ret > 0) break; if (errno == EINTR) continue; /* timeout is being used to indicate last valid bufer when draining */ if (ctx_to_m2mctx(ctx)->draining) ctx->done = 1; return NULL; } /* 0. handle errors */ if (pfd.revents & POLLERR) { av_log(logger(ctx), AV_LOG_WARNING, \"%s POLLERR\\n\", ctx->name); return NULL; } /* 1. handle resolution changes */ if (pfd.revents & POLLPRI) { ret = v4l2_handle_event(ctx); if (ret < 0) { /* if re-init failed, abort */ ctx->done = EINVAL; return NULL; } if (ret) { /* if re-init was successful drop the buffer (if there was one) * since we had to reconfigure capture (unmap all buffers) */ return NULL; } } /* 2. dequeue the buffer */ if (pfd.revents & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM)) { if (!V4L2_TYPE_IS_OUTPUT(ctx->type)) { /* there is a capture buffer ready */ if (pfd.revents & (POLLIN | POLLRDNORM)) goto dequeue; /* the driver is ready to accept more input; instead of waiting for the capture * buffer to complete we return NULL so input can proceed (we are single threaded) */ if (pfd.revents & (POLLOUT | POLLWRNORM)) return NULL; } dequeue: memset(&buf, 0, sizeof(buf)); buf.memory = V4L2_MEMORY_MMAP; buf.type = ctx->type; if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) { memset(planes, 0, sizeof(planes)); buf.length = VIDEO_MAX_PLANES; buf.m.planes = planes; } ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_DQBUF, &buf); if (ret) { if (errno != EAGAIN) { ctx->done = errno; if (errno != EPIPE) av_log(logger(ctx), AV_LOG_DEBUG, \"%s VIDIOC_DQBUF, errno (%s)\\n\", ctx->name, av_err2str(AVERROR(errno))); } } else { avbuf = &ctx->buffers[buf.index]; avbuf->status = V4L2BUF_AVAILABLE; avbuf->buf = buf; if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) { memcpy(avbuf->planes, planes, sizeof(planes)); avbuf->buf.m.planes = avbuf->planes; } } } return avbuf; }"} {"target": 0, "idx": 9749, "func": "static void _decode_opc(DisasContext * ctx) { #if 0 fprintf(stderr, \"Translating opcode 0x%04x\\n\", ctx->opcode); #endif switch (ctx->opcode) { case 0x0019: /* div0u */ tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(SR_M | SR_Q | SR_T)); return; case 0x000b: /* rts */ CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr); ctx->flags |= DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x0028: /* clrmac */ tcg_gen_movi_i32(cpu_mach, 0); tcg_gen_movi_i32(cpu_macl, 0); return; case 0x0048: /* clrs */ tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_S); return; case 0x0008: /* clrt */ gen_clr_t(); return; case 0x0038: /* ldtlb */ CHECK_PRIVILEGED gen_helper_ldtlb(); return; case 0x002b: /* rte */ CHECK_PRIVILEGED CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_sr, cpu_ssr); tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc); ctx->flags |= DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x0058: /* sets */ tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_S); return; case 0x0018: /* sett */ gen_set_t(); return; case 0xfbfd: /* frchg */ tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR); ctx->bstate = BS_STOP; return; case 0xf3fd: /* fschg */ tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ); ctx->bstate = BS_STOP; return; case 0x0009: /* nop */ return; case 0x001b: /* sleep */ CHECK_PRIVILEGED gen_helper_sleep(tcg_const_i32(ctx->pc + 2)); return; } switch (ctx->opcode & 0xf000) { case 0x1000: /* mov.l Rm,@(disp,Rn) */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4); tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x5000: /* mov.l @(disp,Rm),Rn */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4); tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0xe000: /* mov #imm,Rn */ tcg_gen_movi_i32(REG(B11_8), B7_0s); return; case 0x9000: /* mov.w @(disp,PC),Rn */ { TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2); tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0xd000: /* mov.l @(disp,PC),Rn */ { TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3); tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x7000: /* add #imm,Rn */ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s); return; case 0xa000: /* bra disp */ CHECK_NOT_DELAY_SLOT ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2; tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc); ctx->flags |= DELAY_SLOT; return; case 0xb000: /* bsr disp */ CHECK_NOT_DELAY_SLOT tcg_gen_movi_i32(cpu_pr, ctx->pc + 4); ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2; tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc); ctx->flags |= DELAY_SLOT; return; } switch (ctx->opcode & 0xf00f) { case 0x6003: /* mov Rm,Rn */ tcg_gen_mov_i32(REG(B11_8), REG(B7_4)); return; case 0x2000: /* mov.b Rm,@Rn */ tcg_gen_qemu_st8(REG(B7_4), REG(B11_8), ctx->memidx); return; case 0x2001: /* mov.w Rm,@Rn */ tcg_gen_qemu_st16(REG(B7_4), REG(B11_8), ctx->memidx); return; case 0x2002: /* mov.l Rm,@Rn */ tcg_gen_qemu_st32(REG(B7_4), REG(B11_8), ctx->memidx); return; case 0x6000: /* mov.b @Rm,Rn */ tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx); return; case 0x6001: /* mov.w @Rm,Rn */ tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx); return; case 0x6002: /* mov.l @Rm,Rn */ tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx); return; case 0x2004: /* mov.b Rm,@-Rn */ { TCGv addr = tcg_temp_new(); tcg_gen_subi_i32(addr, REG(B11_8), 1); tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx); /* might cause re-execution */ tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1); /* modify register status */ tcg_temp_free(addr); } return; case 0x2005: /* mov.w Rm,@-Rn */ { TCGv addr = tcg_temp_new(); tcg_gen_subi_i32(addr, REG(B11_8), 2); tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx); tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 2); tcg_temp_free(addr); } return; case 0x2006: /* mov.l Rm,@-Rn */ { TCGv addr = tcg_temp_new(); tcg_gen_subi_i32(addr, REG(B11_8), 4); tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx); tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 4); } return; case 0x6004: /* mov.b @Rm+,Rn */ tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx); if ( B11_8 != B7_4 ) tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1); return; case 0x6005: /* mov.w @Rm+,Rn */ tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx); if ( B11_8 != B7_4 ) tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2); return; case 0x6006: /* mov.l @Rm+,Rn */ tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx); if ( B11_8 != B7_4 ) tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4); return; case 0x0004: /* mov.b Rm,@(R0,Rn) */ { TCGv addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(B11_8), REG(0)); tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x0005: /* mov.w Rm,@(R0,Rn) */ { TCGv addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(B11_8), REG(0)); tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x0006: /* mov.l Rm,@(R0,Rn) */ { TCGv addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(B11_8), REG(0)); tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x000c: /* mov.b @(R0,Rm),Rn */ { TCGv addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(B7_4), REG(0)); tcg_gen_qemu_ld8s(REG(B11_8), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x000d: /* mov.w @(R0,Rm),Rn */ { TCGv addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(B7_4), REG(0)); tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x000e: /* mov.l @(R0,Rm),Rn */ { TCGv addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(B7_4), REG(0)); tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x6008: /* swap.b Rm,Rn */ { TCGv highw, high, low; highw = tcg_temp_new(); tcg_gen_andi_i32(highw, REG(B7_4), 0xffff0000); high = tcg_temp_new(); tcg_gen_ext8u_i32(high, REG(B7_4)); tcg_gen_shli_i32(high, high, 8); low = tcg_temp_new(); tcg_gen_shri_i32(low, REG(B7_4), 8); tcg_gen_ext8u_i32(low, low); tcg_gen_or_i32(REG(B11_8), high, low); tcg_gen_or_i32(REG(B11_8), REG(B11_8), highw); tcg_temp_free(low); tcg_temp_free(high); } return; case 0x6009: /* swap.w Rm,Rn */ { TCGv high, low; high = tcg_temp_new(); tcg_gen_ext16u_i32(high, REG(B7_4)); tcg_gen_shli_i32(high, high, 16); low = tcg_temp_new(); tcg_gen_shri_i32(low, REG(B7_4), 16); tcg_gen_ext16u_i32(low, low); tcg_gen_or_i32(REG(B11_8), high, low); tcg_temp_free(low); tcg_temp_free(high); } return; case 0x200d: /* xtrct Rm,Rn */ { TCGv high, low; high = tcg_temp_new(); tcg_gen_ext16u_i32(high, REG(B7_4)); tcg_gen_shli_i32(high, high, 16); low = tcg_temp_new(); tcg_gen_shri_i32(low, REG(B11_8), 16); tcg_gen_ext16u_i32(low, low); tcg_gen_or_i32(REG(B11_8), high, low); tcg_temp_free(low); tcg_temp_free(high); } return; case 0x300c: /* add Rm,Rn */ tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4)); return; case 0x300e: /* addc Rm,Rn */ gen_helper_addc(REG(B11_8), REG(B7_4), REG(B11_8)); return; case 0x300f: /* addv Rm,Rn */ gen_helper_addv(REG(B11_8), REG(B7_4), REG(B11_8)); return; case 0x2009: /* and Rm,Rn */ tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4)); return; case 0x3000: /* cmp/eq Rm,Rn */ gen_cmp(TCG_COND_EQ, REG(B7_4), REG(B11_8)); return; case 0x3003: /* cmp/ge Rm,Rn */ gen_cmp(TCG_COND_GE, REG(B7_4), REG(B11_8)); return; case 0x3007: /* cmp/gt Rm,Rn */ gen_cmp(TCG_COND_GT, REG(B7_4), REG(B11_8)); return; case 0x3006: /* cmp/hi Rm,Rn */ gen_cmp(TCG_COND_GTU, REG(B7_4), REG(B11_8)); return; case 0x3002: /* cmp/hs Rm,Rn */ gen_cmp(TCG_COND_GEU, REG(B7_4), REG(B11_8)); return; case 0x200c: /* cmp/str Rm,Rn */ { int label1 = gen_new_label(); int label2 = gen_new_label(); TCGv cmp1 = tcg_temp_local_new(); TCGv cmp2 = tcg_temp_local_new(); tcg_gen_xor_i32(cmp1, REG(B7_4), REG(B11_8)); tcg_gen_andi_i32(cmp2, cmp1, 0xff000000); tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1); tcg_gen_andi_i32(cmp2, cmp1, 0x00ff0000); tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1); tcg_gen_andi_i32(cmp2, cmp1, 0x0000ff00); tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1); tcg_gen_andi_i32(cmp2, cmp1, 0x000000ff); tcg_gen_brcondi_i32(TCG_COND_EQ, cmp2, 0, label1); tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T); tcg_gen_br(label2); gen_set_label(label1); tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T); gen_set_label(label2); tcg_temp_free(cmp2); tcg_temp_free(cmp1); } return; case 0x2007: /* div0s Rm,Rn */ { gen_copy_bit_i32(cpu_sr, 8, REG(B11_8), 31); /* SR_Q */ gen_copy_bit_i32(cpu_sr, 9, REG(B7_4), 31); /* SR_M */ TCGv val = tcg_temp_new(); tcg_gen_xor_i32(val, REG(B7_4), REG(B11_8)); gen_copy_bit_i32(cpu_sr, 0, val, 31); /* SR_T */ tcg_temp_free(val); } return; case 0x3004: /* div1 Rm,Rn */ gen_helper_div1(REG(B11_8), REG(B7_4), REG(B11_8)); return; case 0x300d: /* dmuls.l Rm,Rn */ { TCGv_i64 tmp1 = tcg_temp_new_i64(); TCGv_i64 tmp2 = tcg_temp_new_i64(); tcg_gen_ext_i32_i64(tmp1, REG(B7_4)); tcg_gen_ext_i32_i64(tmp2, REG(B11_8)); tcg_gen_mul_i64(tmp1, tmp1, tmp2); tcg_gen_trunc_i64_i32(cpu_macl, tmp1); tcg_gen_shri_i64(tmp1, tmp1, 32); tcg_gen_trunc_i64_i32(cpu_mach, tmp1); tcg_temp_free_i64(tmp2); tcg_temp_free_i64(tmp1); } return; case 0x3005: /* dmulu.l Rm,Rn */ { TCGv_i64 tmp1 = tcg_temp_new_i64(); TCGv_i64 tmp2 = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(tmp1, REG(B7_4)); tcg_gen_extu_i32_i64(tmp2, REG(B11_8)); tcg_gen_mul_i64(tmp1, tmp1, tmp2); tcg_gen_trunc_i64_i32(cpu_macl, tmp1); tcg_gen_shri_i64(tmp1, tmp1, 32); tcg_gen_trunc_i64_i32(cpu_mach, tmp1); tcg_temp_free_i64(tmp2); tcg_temp_free_i64(tmp1); } return; case 0x600e: /* exts.b Rm,Rn */ tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4)); return; case 0x600f: /* exts.w Rm,Rn */ tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4)); return; case 0x600c: /* extu.b Rm,Rn */ tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4)); return; case 0x600d: /* extu.w Rm,Rn */ tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4)); return; case 0x000f: /* mac.l @Rm+,@Rn+ */ { TCGv arg0, arg1; arg0 = tcg_temp_new(); tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx); arg1 = tcg_temp_new(); tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx); gen_helper_macl(arg0, arg1); tcg_temp_free(arg1); tcg_temp_free(arg0); tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); } return; case 0x400f: /* mac.w @Rm+,@Rn+ */ { TCGv arg0, arg1; arg0 = tcg_temp_new(); tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx); arg1 = tcg_temp_new(); tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx); gen_helper_macw(arg0, arg1); tcg_temp_free(arg1); tcg_temp_free(arg0); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2); tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2); } return; case 0x0007: /* mul.l Rm,Rn */ tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8)); return; case 0x200f: /* muls.w Rm,Rn */ { TCGv arg0, arg1; arg0 = tcg_temp_new(); tcg_gen_ext16s_i32(arg0, REG(B7_4)); arg1 = tcg_temp_new(); tcg_gen_ext16s_i32(arg1, REG(B11_8)); tcg_gen_mul_i32(cpu_macl, arg0, arg1); tcg_temp_free(arg1); tcg_temp_free(arg0); } return; case 0x200e: /* mulu.w Rm,Rn */ { TCGv arg0, arg1; arg0 = tcg_temp_new(); tcg_gen_ext16u_i32(arg0, REG(B7_4)); arg1 = tcg_temp_new(); tcg_gen_ext16u_i32(arg1, REG(B11_8)); tcg_gen_mul_i32(cpu_macl, arg0, arg1); tcg_temp_free(arg1); tcg_temp_free(arg0); } return; case 0x600b: /* neg Rm,Rn */ tcg_gen_neg_i32(REG(B11_8), REG(B7_4)); return; case 0x600a: /* negc Rm,Rn */ gen_helper_negc(REG(B11_8), REG(B7_4)); return; case 0x6007: /* not Rm,Rn */ tcg_gen_not_i32(REG(B11_8), REG(B7_4)); return; case 0x200b: /* or Rm,Rn */ tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4)); return; case 0x400c: /* shad Rm,Rn */ { int label1 = gen_new_label(); int label2 = gen_new_label(); int label3 = gen_new_label(); int label4 = gen_new_label(); TCGv shift = tcg_temp_local_new(); tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1); /* Rm positive, shift to the left */ tcg_gen_andi_i32(shift, REG(B7_4), 0x1f); tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift); tcg_gen_br(label4); /* Rm negative, shift to the right */ gen_set_label(label1); tcg_gen_andi_i32(shift, REG(B7_4), 0x1f); tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2); tcg_gen_not_i32(shift, REG(B7_4)); tcg_gen_andi_i32(shift, shift, 0x1f); tcg_gen_addi_i32(shift, shift, 1); tcg_gen_sar_i32(REG(B11_8), REG(B11_8), shift); tcg_gen_br(label4); /* Rm = -32 */ gen_set_label(label2); tcg_gen_brcondi_i32(TCG_COND_LT, REG(B11_8), 0, label3); tcg_gen_movi_i32(REG(B11_8), 0); tcg_gen_br(label4); gen_set_label(label3); tcg_gen_movi_i32(REG(B11_8), 0xffffffff); gen_set_label(label4); tcg_temp_free(shift); } return; case 0x400d: /* shld Rm,Rn */ { int label1 = gen_new_label(); int label2 = gen_new_label(); int label3 = gen_new_label(); TCGv shift = tcg_temp_local_new(); tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1); /* Rm positive, shift to the left */ tcg_gen_andi_i32(shift, REG(B7_4), 0x1f); tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift); tcg_gen_br(label3); /* Rm negative, shift to the right */ gen_set_label(label1); tcg_gen_andi_i32(shift, REG(B7_4), 0x1f); tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2); tcg_gen_not_i32(shift, REG(B7_4)); tcg_gen_andi_i32(shift, shift, 0x1f); tcg_gen_addi_i32(shift, shift, 1); tcg_gen_shr_i32(REG(B11_8), REG(B11_8), shift); tcg_gen_br(label3); /* Rm = -32 */ gen_set_label(label2); tcg_gen_movi_i32(REG(B11_8), 0); gen_set_label(label3); tcg_temp_free(shift); } return; case 0x3008: /* sub Rm,Rn */ tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4)); return; case 0x300a: /* subc Rm,Rn */ gen_helper_subc(REG(B11_8), REG(B7_4), REG(B11_8)); return; case 0x300b: /* subv Rm,Rn */ gen_helper_subv(REG(B11_8), REG(B7_4), REG(B11_8)); return; case 0x2008: /* tst Rm,Rn */ { TCGv val = tcg_temp_new(); tcg_gen_and_i32(val, REG(B7_4), REG(B11_8)); gen_cmp_imm(TCG_COND_EQ, val, 0); tcg_temp_free(val); } return; case 0x200a: /* xor Rm,Rn */ tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4)); return; case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */ CHECK_FPU_ENABLED if (ctx->fpscr & FPSCR_SZ) { TCGv_i64 fp = tcg_temp_new_i64(); gen_load_fpr64(fp, XREG(B7_4)); gen_store_fpr64(fp, XREG(B11_8)); tcg_temp_free_i64(fp); } else { tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]); } return; case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */ CHECK_FPU_ENABLED if (ctx->fpscr & FPSCR_SZ) { TCGv addr_hi = tcg_temp_new(); int fr = XREG(B7_4); tcg_gen_addi_i32(addr_hi, REG(B11_8), 4); tcg_gen_qemu_st32(cpu_fregs[fr ], REG(B11_8), ctx->memidx); tcg_gen_qemu_st32(cpu_fregs[fr+1], addr_hi, ctx->memidx); tcg_temp_free(addr_hi); } else { tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], REG(B11_8), ctx->memidx); } return; case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */ CHECK_FPU_ENABLED if (ctx->fpscr & FPSCR_SZ) { TCGv addr_hi = tcg_temp_new(); int fr = XREG(B11_8); tcg_gen_addi_i32(addr_hi, REG(B7_4), 4); tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx); tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx); tcg_temp_free(addr_hi); } else { tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx); } return; case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */ CHECK_FPU_ENABLED if (ctx->fpscr & FPSCR_SZ) { TCGv addr_hi = tcg_temp_new(); int fr = XREG(B11_8); tcg_gen_addi_i32(addr_hi, REG(B7_4), 4); tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx); tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx); tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8); tcg_temp_free(addr_hi); } else { tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx); tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4); } return; case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */ CHECK_FPU_ENABLED if (ctx->fpscr & FPSCR_SZ) { TCGv addr = tcg_temp_new_i32(); int fr = XREG(B7_4); tcg_gen_subi_i32(addr, REG(B11_8), 4); tcg_gen_qemu_st32(cpu_fregs[fr+1], addr, ctx->memidx); tcg_gen_subi_i32(addr, REG(B11_8), 8); tcg_gen_qemu_st32(cpu_fregs[fr ], addr, ctx->memidx); tcg_gen_mov_i32(REG(B11_8), addr); tcg_temp_free(addr); } else { TCGv addr; addr = tcg_temp_new_i32(); tcg_gen_subi_i32(addr, REG(B11_8), 4); tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx); tcg_temp_free(addr); tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 4); } return; case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */ CHECK_FPU_ENABLED { TCGv addr = tcg_temp_new_i32(); tcg_gen_add_i32(addr, REG(B7_4), REG(0)); if (ctx->fpscr & FPSCR_SZ) { int fr = XREG(B11_8); tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx); tcg_gen_addi_i32(addr, addr, 4); tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx); } else { tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], addr, ctx->memidx); } tcg_temp_free(addr); } return; case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */ CHECK_FPU_ENABLED { TCGv addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(B11_8), REG(0)); if (ctx->fpscr & FPSCR_SZ) { int fr = XREG(B7_4); tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx); tcg_gen_addi_i32(addr, addr, 4); tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx); } else { tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx); } tcg_temp_free(addr); } return; case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */ case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */ case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */ case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */ case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */ case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */ { CHECK_FPU_ENABLED if (ctx->fpscr & FPSCR_PR) { TCGv_i64 fp0, fp1; if (ctx->opcode & 0x0110) break; /* illegal instruction */ fp0 = tcg_temp_new_i64(); fp1 = tcg_temp_new_i64(); gen_load_fpr64(fp0, DREG(B11_8)); gen_load_fpr64(fp1, DREG(B7_4)); switch (ctx->opcode & 0xf00f) { case 0xf000: /* fadd Rm,Rn */ gen_helper_fadd_DT(fp0, fp0, fp1); break; case 0xf001: /* fsub Rm,Rn */ gen_helper_fsub_DT(fp0, fp0, fp1); break; case 0xf002: /* fmul Rm,Rn */ gen_helper_fmul_DT(fp0, fp0, fp1); break; case 0xf003: /* fdiv Rm,Rn */ gen_helper_fdiv_DT(fp0, fp0, fp1); break; case 0xf004: /* fcmp/eq Rm,Rn */ gen_helper_fcmp_eq_DT(fp0, fp1); return; case 0xf005: /* fcmp/gt Rm,Rn */ gen_helper_fcmp_gt_DT(fp0, fp1); return; } gen_store_fpr64(fp0, DREG(B11_8)); tcg_temp_free_i64(fp0); tcg_temp_free_i64(fp1); } else { switch (ctx->opcode & 0xf00f) { case 0xf000: /* fadd Rm,Rn */ gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]); break; case 0xf001: /* fsub Rm,Rn */ gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]); break; case 0xf002: /* fmul Rm,Rn */ gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]); break; case 0xf003: /* fdiv Rm,Rn */ gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]); break; case 0xf004: /* fcmp/eq Rm,Rn */ gen_helper_fcmp_eq_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]); return; case 0xf005: /* fcmp/gt Rm,Rn */ gen_helper_fcmp_gt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]); return; } } } return; case 0xf00e: /* fmac FR0,RM,Rn */ { CHECK_FPU_ENABLED if (ctx->fpscr & FPSCR_PR) { break; /* illegal instruction */ } else { gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)], cpu_fregs[FREG(B11_8)]); return; } } } switch (ctx->opcode & 0xff00) { case 0xc900: /* and #imm,R0 */ tcg_gen_andi_i32(REG(0), REG(0), B7_0); return; case 0xcd00: /* and.b #imm,@(R0,GBR) */ { TCGv addr, val; addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(0), cpu_gbr); val = tcg_temp_new(); tcg_gen_qemu_ld8u(val, addr, ctx->memidx); tcg_gen_andi_i32(val, val, B7_0); tcg_gen_qemu_st8(val, addr, ctx->memidx); tcg_temp_free(val); tcg_temp_free(addr); } return; case 0x8b00: /* bf label */ CHECK_NOT_DELAY_SLOT gen_conditional_jump(ctx, ctx->pc + 2, ctx->pc + 4 + B7_0s * 2); ctx->bstate = BS_BRANCH; return; case 0x8f00: /* bf/s label */ CHECK_NOT_DELAY_SLOT gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0); ctx->flags |= DELAY_SLOT_CONDITIONAL; return; case 0x8900: /* bt label */ CHECK_NOT_DELAY_SLOT gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2, ctx->pc + 2); ctx->bstate = BS_BRANCH; return; case 0x8d00: /* bt/s label */ CHECK_NOT_DELAY_SLOT gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1); ctx->flags |= DELAY_SLOT_CONDITIONAL; return; case 0x8800: /* cmp/eq #imm,R0 */ gen_cmp_imm(TCG_COND_EQ, REG(0), B7_0s); return; case 0xc400: /* mov.b @(disp,GBR),R0 */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0); tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0xc500: /* mov.w @(disp,GBR),R0 */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2); tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0xc600: /* mov.l @(disp,GBR),R0 */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4); tcg_gen_qemu_ld32s(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0xc000: /* mov.b R0,@(disp,GBR) */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0); tcg_gen_qemu_st8(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0xc100: /* mov.w R0,@(disp,GBR) */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2); tcg_gen_qemu_st16(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0xc200: /* mov.l R0,@(disp,GBR) */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4); tcg_gen_qemu_st32(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x8000: /* mov.b R0,@(disp,Rn) */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, REG(B7_4), B3_0); tcg_gen_qemu_st8(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x8100: /* mov.w R0,@(disp,Rn) */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2); tcg_gen_qemu_st16(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x8400: /* mov.b @(disp,Rn),R0 */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, REG(B7_4), B3_0); tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0x8500: /* mov.w @(disp,Rn),R0 */ { TCGv addr = tcg_temp_new(); tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2); tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx); tcg_temp_free(addr); } return; case 0xc700: /* mova @(disp,PC),R0 */ tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3); return; case 0xcb00: /* or #imm,R0 */ tcg_gen_ori_i32(REG(0), REG(0), B7_0); return; case 0xcf00: /* or.b #imm,@(R0,GBR) */ { TCGv addr, val; addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(0), cpu_gbr); val = tcg_temp_new(); tcg_gen_qemu_ld8u(val, addr, ctx->memidx); tcg_gen_ori_i32(val, val, B7_0); tcg_gen_qemu_st8(val, addr, ctx->memidx); tcg_temp_free(val); tcg_temp_free(addr); } return; case 0xc300: /* trapa #imm */ { TCGv imm; CHECK_NOT_DELAY_SLOT tcg_gen_movi_i32(cpu_pc, ctx->pc); imm = tcg_const_i32(B7_0); gen_helper_trapa(imm); tcg_temp_free(imm); ctx->bstate = BS_BRANCH; } return; case 0xc800: /* tst #imm,R0 */ { TCGv val = tcg_temp_new(); tcg_gen_andi_i32(val, REG(0), B7_0); gen_cmp_imm(TCG_COND_EQ, val, 0); tcg_temp_free(val); } return; case 0xcc00: /* tst.b #imm,@(R0,GBR) */ { TCGv val = tcg_temp_new(); tcg_gen_add_i32(val, REG(0), cpu_gbr); tcg_gen_qemu_ld8u(val, val, ctx->memidx); tcg_gen_andi_i32(val, val, B7_0); gen_cmp_imm(TCG_COND_EQ, val, 0); tcg_temp_free(val); } return; case 0xca00: /* xor #imm,R0 */ tcg_gen_xori_i32(REG(0), REG(0), B7_0); return; case 0xce00: /* xor.b #imm,@(R0,GBR) */ { TCGv addr, val; addr = tcg_temp_new(); tcg_gen_add_i32(addr, REG(0), cpu_gbr); val = tcg_temp_new(); tcg_gen_qemu_ld8u(val, addr, ctx->memidx); tcg_gen_xori_i32(val, val, B7_0); tcg_gen_qemu_st8(val, addr, ctx->memidx); tcg_temp_free(val); tcg_temp_free(addr); } return; } switch (ctx->opcode & 0xf08f) { case 0x408e: /* ldc Rm,Rn_BANK */ CHECK_PRIVILEGED tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8)); return; case 0x4087: /* ldc.l @Rm+,Rn_BANK */ CHECK_PRIVILEGED tcg_gen_qemu_ld32s(ALTREG(B6_4), REG(B11_8), ctx->memidx); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); return; case 0x0082: /* stc Rm_BANK,Rn */ CHECK_PRIVILEGED tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4)); return; case 0x4083: /* stc.l Rm_BANK,@-Rn */ CHECK_PRIVILEGED { TCGv addr = tcg_temp_new(); tcg_gen_subi_i32(addr, REG(B11_8), 4); tcg_gen_qemu_st32(ALTREG(B6_4), addr, ctx->memidx); tcg_temp_free(addr); tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 4); } return; } switch (ctx->opcode & 0xf0ff) { case 0x0023: /* braf Rn */ CHECK_NOT_DELAY_SLOT tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4); ctx->flags |= DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x0003: /* bsrf Rn */ CHECK_NOT_DELAY_SLOT tcg_gen_movi_i32(cpu_pr, ctx->pc + 4); tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr); ctx->flags |= DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x4015: /* cmp/pl Rn */ gen_cmp_imm(TCG_COND_GT, REG(B11_8), 0); return; case 0x4011: /* cmp/pz Rn */ gen_cmp_imm(TCG_COND_GE, REG(B11_8), 0); return; case 0x4010: /* dt Rn */ tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1); gen_cmp_imm(TCG_COND_EQ, REG(B11_8), 0); return; case 0x402b: /* jmp @Rn */ CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8)); ctx->flags |= DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x400b: /* jsr @Rn */ CHECK_NOT_DELAY_SLOT tcg_gen_movi_i32(cpu_pr, ctx->pc + 4); tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8)); ctx->flags |= DELAY_SLOT; ctx->delayed_pc = (uint32_t) - 1; return; case 0x400e: /* ldc Rm,SR */ CHECK_PRIVILEGED tcg_gen_andi_i32(cpu_sr, REG(B11_8), 0x700083f3); ctx->bstate = BS_STOP; return; case 0x4007: /* ldc.l @Rm+,SR */ CHECK_PRIVILEGED { TCGv val = tcg_temp_new(); tcg_gen_qemu_ld32s(val, REG(B11_8), ctx->memidx); tcg_gen_andi_i32(cpu_sr, val, 0x700083f3); tcg_temp_free(val); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); ctx->bstate = BS_STOP; } return; case 0x0002: /* stc SR,Rn */ CHECK_PRIVILEGED tcg_gen_mov_i32(REG(B11_8), cpu_sr); return; case 0x4003: /* stc SR,@-Rn */ CHECK_PRIVILEGED { TCGv addr = tcg_temp_new(); tcg_gen_subi_i32(addr, REG(B11_8), 4); tcg_gen_qemu_st32(cpu_sr, addr, ctx->memidx); tcg_temp_free(addr); tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 4); } return; #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \\ case ldnum: \\ prechk \\ tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \\ return; \\ case ldpnum: \\ prechk \\ tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \\ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \\ return; \\ case stnum: \\ prechk \\ tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \\ return; \\ case stpnum: \\ prechk \\ { \\ TCGv addr = tcg_temp_new(); \\ tcg_gen_subi_i32(addr, REG(B11_8), 4); \\ tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \\ tcg_temp_free(addr); \\ tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 4); \\ } \\ return; LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {}) LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED) LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED) LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED) LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED) LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {}) LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {}) LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {}) LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED}) case 0x406a: /* lds Rm,FPSCR */ CHECK_FPU_ENABLED gen_helper_ld_fpscr(REG(B11_8)); ctx->bstate = BS_STOP; return; case 0x4066: /* lds.l @Rm+,FPSCR */ CHECK_FPU_ENABLED { TCGv addr = tcg_temp_new(); tcg_gen_qemu_ld32s(addr, REG(B11_8), ctx->memidx); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); gen_helper_ld_fpscr(addr); tcg_temp_free(addr); ctx->bstate = BS_STOP; } return; case 0x006a: /* sts FPSCR,Rn */ CHECK_FPU_ENABLED tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff); return; case 0x4062: /* sts FPSCR,@-Rn */ CHECK_FPU_ENABLED { TCGv addr, val; val = tcg_temp_new(); tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff); addr = tcg_temp_new(); tcg_gen_subi_i32(addr, REG(B11_8), 4); tcg_gen_qemu_st32(val, addr, ctx->memidx); tcg_temp_free(addr); tcg_temp_free(val); tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 4); } return; case 0x00c3: /* movca.l R0,@Rm */ tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx); return; case 0x40a9: /* MOVUA.L @Rm,R0 (Rm) -> R0 Load non-boundary-aligned data */ tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx); return; case 0x40e9: /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm Load non-boundary-aligned data */ tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx); tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); return; case 0x0029: /* movt Rn */ tcg_gen_andi_i32(REG(B11_8), cpu_sr, SR_T); return; case 0x0073: /* MOVCO.L LDST -> T If (T == 1) R0 -> (Rn) 0 -> LDST */ if (ctx->features & SH_FEATURE_SH4A) { int label = gen_new_label(); gen_clr_t(); tcg_gen_or_i32(cpu_sr, cpu_sr, cpu_ldst); tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label); tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx); gen_set_label(label); tcg_gen_movi_i32(cpu_ldst, 0); return; } else break; case 0x0063: /* MOVLI.L @Rm,R0 1 -> LDST (Rm) -> R0 When interrupt/exception occurred 0 -> LDST */ if (ctx->features & SH_FEATURE_SH4A) { tcg_gen_movi_i32(cpu_ldst, 0); tcg_gen_qemu_ld32s(REG(0), REG(B11_8), ctx->memidx); tcg_gen_movi_i32(cpu_ldst, 1); return; } else break; case 0x0093: /* ocbi @Rn */ { TCGv dummy = tcg_temp_new(); tcg_gen_qemu_ld32s(dummy, REG(B11_8), ctx->memidx); tcg_temp_free(dummy); } return; case 0x00a3: /* ocbp @Rn */ { TCGv dummy = tcg_temp_new(); tcg_gen_qemu_ld32s(dummy, REG(B11_8), ctx->memidx); tcg_temp_free(dummy); } return; case 0x00b3: /* ocbwb @Rn */ { TCGv dummy = tcg_temp_new(); tcg_gen_qemu_ld32s(dummy, REG(B11_8), ctx->memidx); tcg_temp_free(dummy); } return; case 0x0083: /* pref @Rn */ return; case 0x00d3: /* prefi @Rn */ if (ctx->features & SH_FEATURE_SH4A) return; else break; case 0x00e3: /* icbi @Rn */ if (ctx->features & SH_FEATURE_SH4A) return; else break; case 0x00ab: /* synco */ if (ctx->features & SH_FEATURE_SH4A) return; else break; case 0x4024: /* rotcl Rn */ { TCGv tmp = tcg_temp_new(); tcg_gen_mov_i32(tmp, cpu_sr); gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31); tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1); gen_copy_bit_i32(REG(B11_8), 0, tmp, 0); tcg_temp_free(tmp); } return; case 0x4025: /* rotcr Rn */ { TCGv tmp = tcg_temp_new(); tcg_gen_mov_i32(tmp, cpu_sr); gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0); tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1); gen_copy_bit_i32(REG(B11_8), 31, tmp, 0); tcg_temp_free(tmp); } return; case 0x4004: /* rotl Rn */ gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31); tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1); gen_copy_bit_i32(REG(B11_8), 0, cpu_sr, 0); return; case 0x4005: /* rotr Rn */ gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0); tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1); gen_copy_bit_i32(REG(B11_8), 31, cpu_sr, 0); return; case 0x4000: /* shll Rn */ case 0x4020: /* shal Rn */ gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31); tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1); return; case 0x4021: /* shar Rn */ gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0); tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1); return; case 0x4001: /* shlr Rn */ gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0); tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1); return; case 0x4008: /* shll2 Rn */ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2); return; case 0x4018: /* shll8 Rn */ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8); return; case 0x4028: /* shll16 Rn */ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16); return; case 0x4009: /* shlr2 Rn */ tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2); return; case 0x4019: /* shlr8 Rn */ tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8); return; case 0x4029: /* shlr16 Rn */ tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16); return; case 0x401b: /* tas.b @Rn */ { TCGv addr, val; addr = tcg_temp_local_new(); tcg_gen_mov_i32(addr, REG(B11_8)); val = tcg_temp_local_new(); tcg_gen_qemu_ld8u(val, addr, ctx->memidx); gen_cmp_imm(TCG_COND_EQ, val, 0); tcg_gen_ori_i32(val, val, 0x80); tcg_gen_qemu_st8(val, addr, ctx->memidx); tcg_temp_free(val); tcg_temp_free(addr); } return; case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */ CHECK_FPU_ENABLED tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul); return; case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */ CHECK_FPU_ENABLED tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]); return; case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */ CHECK_FPU_ENABLED if (ctx->fpscr & FPSCR_PR) { TCGv_i64 fp; if (ctx->opcode & 0x0100) break; /* illegal instruction */ fp = tcg_temp_new_i64(); gen_helper_float_DT(fp, cpu_fpul); gen_store_fpr64(fp, DREG(B11_8)); tcg_temp_free_i64(fp); } else { gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_fpul); } return; case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */ CHECK_FPU_ENABLED if (ctx->fpscr & FPSCR_PR) { TCGv_i64 fp; if (ctx->opcode & 0x0100) break; /* illegal instruction */ fp = tcg_temp_new_i64(); gen_load_fpr64(fp, DREG(B11_8)); gen_helper_ftrc_DT(cpu_fpul, fp); tcg_temp_free_i64(fp); } else { gen_helper_ftrc_FT(cpu_fpul, cpu_fregs[FREG(B11_8)]); } return; case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */ CHECK_FPU_ENABLED { gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]); } return; case 0xf05d: /* fabs FRn/DRn */ CHECK_FPU_ENABLED if (ctx->fpscr & FPSCR_PR) { if (ctx->opcode & 0x0100) break; /* illegal instruction */ TCGv_i64 fp = tcg_temp_new_i64(); gen_load_fpr64(fp, DREG(B11_8)); gen_helper_fabs_DT(fp, fp); gen_store_fpr64(fp, DREG(B11_8)); tcg_temp_free_i64(fp); } else { gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]); } return; case 0xf06d: /* fsqrt FRn */ CHECK_FPU_ENABLED if (ctx->fpscr & FPSCR_PR) { if (ctx->opcode & 0x0100) break; /* illegal instruction */ TCGv_i64 fp = tcg_temp_new_i64(); gen_load_fpr64(fp, DREG(B11_8)); gen_helper_fsqrt_DT(fp, fp); gen_store_fpr64(fp, DREG(B11_8)); tcg_temp_free_i64(fp); } else { gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]); } return; case 0xf07d: /* fsrra FRn */ CHECK_FPU_ENABLED break; case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */ CHECK_FPU_ENABLED if (!(ctx->fpscr & FPSCR_PR)) { tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0); } return; case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */ CHECK_FPU_ENABLED if (!(ctx->fpscr & FPSCR_PR)) { tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000); } return; case 0xf0ad: /* fcnvsd FPUL,DRn */ CHECK_FPU_ENABLED { TCGv_i64 fp = tcg_temp_new_i64(); gen_helper_fcnvsd_FT_DT(fp, cpu_fpul); gen_store_fpr64(fp, DREG(B11_8)); tcg_temp_free_i64(fp); } return; case 0xf0bd: /* fcnvds DRn,FPUL */ CHECK_FPU_ENABLED { TCGv_i64 fp = tcg_temp_new_i64(); gen_load_fpr64(fp, DREG(B11_8)); gen_helper_fcnvds_DT_FT(cpu_fpul, fp); tcg_temp_free_i64(fp); } return; } #if 0 fprintf(stderr, \"unknown instruction 0x%04x at pc 0x%08x\\n\", ctx->opcode, ctx->pc); fflush(stderr); #endif gen_helper_raise_illegal_instruction(); ctx->bstate = BS_EXCP; }"} {"target": 0, "idx": 9756, "func": "static void *spapr_create_fdt_skel(const char *cpu_model, target_phys_addr_t initrd_base, target_phys_addr_t initrd_size, const char *boot_device, const char *kernel_cmdline, long hash_shift) { void *fdt; CPUState *env; uint64_t mem_reg_property[] = { 0, cpu_to_be64(ram_size) }; uint32_t start_prop = cpu_to_be32(initrd_base); uint32_t end_prop = cpu_to_be32(initrd_base + initrd_size); uint32_t pft_size_prop[] = {0, cpu_to_be32(hash_shift)}; char hypertas_prop[] = \"hcall-pft\\0hcall-term\\0hcall-dabr\\0hcall-interrupt\" \"\\0hcall-tce\\0hcall-vio\\0hcall-splpar\"; uint32_t interrupt_server_ranges_prop[] = {0, cpu_to_be32(smp_cpus)}; int i; char *modelname; #define _FDT(exp) \\ do { \\ int ret = (exp); \\ if (ret < 0) { \\ fprintf(stderr, \"qemu: error creating device tree: %s: %s\\n\", \\ #exp, fdt_strerror(ret)); \\ exit(1); \\ } \\ } while (0) fdt = g_malloc0(FDT_MAX_SIZE); _FDT((fdt_create(fdt, FDT_MAX_SIZE))); _FDT((fdt_finish_reservemap(fdt))); /* Root node */ _FDT((fdt_begin_node(fdt, \"\"))); _FDT((fdt_property_string(fdt, \"device_type\", \"chrp\"))); _FDT((fdt_property_string(fdt, \"model\", \"IBM pSeries (emulated by qemu)\"))); _FDT((fdt_property_cell(fdt, \"#address-cells\", 0x2))); _FDT((fdt_property_cell(fdt, \"#size-cells\", 0x2))); /* /chosen */ _FDT((fdt_begin_node(fdt, \"chosen\"))); _FDT((fdt_property_string(fdt, \"bootargs\", kernel_cmdline))); _FDT((fdt_property(fdt, \"linux,initrd-start\", &start_prop, sizeof(start_prop)))); _FDT((fdt_property(fdt, \"linux,initrd-end\", &end_prop, sizeof(end_prop)))); _FDT((fdt_property_string(fdt, \"qemu,boot-device\", boot_device))); _FDT((fdt_end_node(fdt))); /* memory node */ _FDT((fdt_begin_node(fdt, \"memory@0\"))); _FDT((fdt_property_string(fdt, \"device_type\", \"memory\"))); _FDT((fdt_property(fdt, \"reg\", mem_reg_property, sizeof(mem_reg_property)))); _FDT((fdt_end_node(fdt))); /* cpus */ _FDT((fdt_begin_node(fdt, \"cpus\"))); _FDT((fdt_property_cell(fdt, \"#address-cells\", 0x1))); _FDT((fdt_property_cell(fdt, \"#size-cells\", 0x0))); modelname = g_strdup(cpu_model); for (i = 0; i < strlen(modelname); i++) { modelname[i] = toupper(modelname[i]); } for (env = first_cpu; env != NULL; env = env->next_cpu) { int index = env->cpu_index; uint32_t gserver_prop[] = {cpu_to_be32(index), 0}; /* HACK! */ char *nodename; uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40), 0xffffffff, 0xffffffff}; uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq() : TIMEBASE_FREQ; uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000; if (asprintf(&nodename, \"%s@%x\", modelname, index) < 0) { fprintf(stderr, \"Allocation failure\\n\"); exit(1); } _FDT((fdt_begin_node(fdt, nodename))); free(nodename); _FDT((fdt_property_cell(fdt, \"reg\", index))); _FDT((fdt_property_string(fdt, \"device_type\", \"cpu\"))); _FDT((fdt_property_cell(fdt, \"cpu-version\", env->spr[SPR_PVR]))); _FDT((fdt_property_cell(fdt, \"dcache-block-size\", env->dcache_line_size))); _FDT((fdt_property_cell(fdt, \"icache-block-size\", env->icache_line_size))); _FDT((fdt_property_cell(fdt, \"timebase-frequency\", tbfreq))); _FDT((fdt_property_cell(fdt, \"clock-frequency\", cpufreq))); _FDT((fdt_property_cell(fdt, \"ibm,slb-size\", env->slb_nr))); _FDT((fdt_property(fdt, \"ibm,pft-size\", pft_size_prop, sizeof(pft_size_prop)))); _FDT((fdt_property_string(fdt, \"status\", \"okay\"))); _FDT((fdt_property(fdt, \"64-bit\", NULL, 0))); _FDT((fdt_property_cell(fdt, \"ibm,ppc-interrupt-server#s\", index))); _FDT((fdt_property(fdt, \"ibm,ppc-interrupt-gserver#s\", gserver_prop, sizeof(gserver_prop)))); if (env->mmu_model & POWERPC_MMU_1TSEG) { _FDT((fdt_property(fdt, \"ibm,processor-segment-sizes\", segs, sizeof(segs)))); } _FDT((fdt_end_node(fdt))); } g_free(modelname); _FDT((fdt_end_node(fdt))); /* RTAS */ _FDT((fdt_begin_node(fdt, \"rtas\"))); _FDT((fdt_property(fdt, \"ibm,hypertas-functions\", hypertas_prop, sizeof(hypertas_prop)))); _FDT((fdt_end_node(fdt))); /* interrupt controller */ _FDT((fdt_begin_node(fdt, \"interrupt-controller@0\"))); _FDT((fdt_property_string(fdt, \"device_type\", \"PowerPC-External-Interrupt-Presentation\"))); _FDT((fdt_property_string(fdt, \"compatible\", \"IBM,ppc-xicp\"))); _FDT((fdt_property_cell(fdt, \"reg\", 0))); _FDT((fdt_property(fdt, \"interrupt-controller\", NULL, 0))); _FDT((fdt_property(fdt, \"ibm,interrupt-server-ranges\", interrupt_server_ranges_prop, sizeof(interrupt_server_ranges_prop)))); _FDT((fdt_property_cell(fdt, \"#interrupt-cells\", 2))); _FDT((fdt_property_cell(fdt, \"linux,phandle\", PHANDLE_XICP))); _FDT((fdt_property_cell(fdt, \"phandle\", PHANDLE_XICP))); _FDT((fdt_end_node(fdt))); /* vdevice */ _FDT((fdt_begin_node(fdt, \"vdevice\"))); _FDT((fdt_property_string(fdt, \"device_type\", \"vdevice\"))); _FDT((fdt_property_string(fdt, \"compatible\", \"IBM,vdevice\"))); _FDT((fdt_property_cell(fdt, \"#address-cells\", 0x1))); _FDT((fdt_property_cell(fdt, \"#size-cells\", 0x0))); _FDT((fdt_property_cell(fdt, \"#interrupt-cells\", 0x2))); _FDT((fdt_property(fdt, \"interrupt-controller\", NULL, 0))); _FDT((fdt_end_node(fdt))); _FDT((fdt_end_node(fdt))); /* close root node */ _FDT((fdt_finish(fdt))); return fdt; }"} {"target": 0, "idx": 9759, "func": "static int decode_extradata_ps(const uint8_t *data, int size, H264ParamSets *ps, int is_avc, void *logctx) { H2645Packet pkt = { 0 }; int i, ret = 0; ret = ff_h2645_packet_split(&pkt, data, size, logctx, is_avc, 2, AV_CODEC_ID_H264); if (ret < 0) { ret = 0; goto fail; } for (i = 0; i < pkt.nb_nals; i++) { H2645NAL *nal = &pkt.nals[i]; switch (nal->type) { case H264_NAL_SPS: ret = ff_h264_decode_seq_parameter_set(&nal->gb, logctx, ps, 0); if (ret < 0) goto fail; break; case H264_NAL_PPS: ret = ff_h264_decode_picture_parameter_set(&nal->gb, logctx, ps, nal->size_bits); if (ret < 0) goto fail; break; default: av_log(logctx, AV_LOG_VERBOSE, \"Ignoring NAL type %d in extradata\\n\", nal->type); break; } } fail: ff_h2645_packet_uninit(&pkt); return ret; }"} {"target": 1, "idx": 9765, "func": "int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset, unsigned int *bytes, uint64_t *cluster_offset) { BDRVQcow2State *s = bs->opaque; unsigned int l2_index; uint64_t l1_index, l2_offset, *l2_table; int l1_bits, c; unsigned int offset_in_cluster, nb_clusters; uint64_t bytes_available, bytes_needed; int ret; offset_in_cluster = offset_into_cluster(s, offset); bytes_needed = (uint64_t) *bytes + offset_in_cluster; l1_bits = s->l2_bits + s->cluster_bits; /* compute how many bytes there are between the start of the cluster * containing offset and the end of the l1 entry */ bytes_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1)) + offset_in_cluster; if (bytes_needed > bytes_available) { bytes_needed = bytes_available; } assert(bytes_needed <= INT_MAX); *cluster_offset = 0; /* seek to the l2 offset in the l1 table */ l1_index = offset >> l1_bits; if (l1_index >= s->l1_size) { ret = QCOW2_CLUSTER_UNALLOCATED; goto out; } l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; if (!l2_offset) { ret = QCOW2_CLUSTER_UNALLOCATED; goto out; } if (offset_into_cluster(s, l2_offset)) { qcow2_signal_corruption(bs, true, -1, -1, \"L2 table offset %#\" PRIx64 \" unaligned (L1 index: %#\" PRIx64 \")\", l2_offset, l1_index); return -EIO; } /* load the l2 table in memory */ ret = l2_load(bs, l2_offset, &l2_table); if (ret < 0) { return ret; } /* find the cluster offset for the given disk offset */ l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); *cluster_offset = be64_to_cpu(l2_table[l2_index]); /* nb_needed <= INT_MAX, thus nb_clusters <= INT_MAX, too */ nb_clusters = size_to_clusters(s, bytes_needed); ret = qcow2_get_cluster_type(*cluster_offset); switch (ret) { case QCOW2_CLUSTER_COMPRESSED: /* Compressed clusters can only be processed one by one */ c = 1; *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK; break; case QCOW2_CLUSTER_ZERO: if (s->qcow_version < 3) { qcow2_signal_corruption(bs, true, -1, -1, \"Zero cluster entry found\" \" in pre-v3 image (L2 offset: %#\" PRIx64 \", L2 index: %#x)\", l2_offset, l2_index); ret = -EIO; goto fail; } c = count_contiguous_clusters_by_type(nb_clusters, &l2_table[l2_index], QCOW2_CLUSTER_ZERO); *cluster_offset = 0; break; case QCOW2_CLUSTER_UNALLOCATED: /* how many empty clusters ? */ c = count_contiguous_clusters_by_type(nb_clusters, &l2_table[l2_index], QCOW2_CLUSTER_UNALLOCATED); *cluster_offset = 0; break; case QCOW2_CLUSTER_NORMAL: /* how many allocated clusters ? */ c = count_contiguous_clusters(nb_clusters, s->cluster_size, &l2_table[l2_index], QCOW_OFLAG_ZERO); *cluster_offset &= L2E_OFFSET_MASK; if (offset_into_cluster(s, *cluster_offset)) { qcow2_signal_corruption(bs, true, -1, -1, \"Data cluster offset %#\" PRIx64 \" unaligned (L2 offset: %#\" PRIx64 \", L2 index: %#x)\", *cluster_offset, l2_offset, l2_index); ret = -EIO; goto fail; } break; default: abort(); } qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); bytes_available = (c * s->cluster_size); out: if (bytes_available > bytes_needed) { bytes_available = bytes_needed; } *bytes = bytes_available - offset_in_cluster; return ret; fail: qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table); return ret; }"} {"target": 1, "idx": 9774, "func": "int ff_qsv_decode_init(AVCodecContext *avctx, QSVContext *q, mfxSession session) { mfxVideoParam param = { { 0 } }; int ret; q->async_fifo = av_fifo_alloc((1 + q->async_depth) * (sizeof(mfxSyncPoint) + sizeof(QSVFrame*))); if (!q->async_fifo) return AVERROR(ENOMEM); ret = qsv_init_session(avctx, q, session); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, \"Error initializing an MFX session\\n\"); return ret; } ret = ff_qsv_codec_id_to_mfx(avctx->codec_id); if (ret < 0) return ret; param.mfx.CodecId = ret; param.mfx.CodecProfile = avctx->profile; param.mfx.CodecLevel = avctx->level; param.mfx.FrameInfo.BitDepthLuma = 8; param.mfx.FrameInfo.BitDepthChroma = 8; param.mfx.FrameInfo.Shift = 0; param.mfx.FrameInfo.FourCC = MFX_FOURCC_NV12; param.mfx.FrameInfo.Width = avctx->coded_width; param.mfx.FrameInfo.Height = avctx->coded_height; param.mfx.FrameInfo.ChromaFormat = MFX_CHROMAFORMAT_YUV420; param.IOPattern = q->iopattern; param.AsyncDepth = q->async_depth; param.ExtParam = q->ext_buffers; param.NumExtParam = q->nb_ext_buffers; ret = MFXVideoDECODE_Init(q->session, ¶m); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, \"Error initializing the MFX video decoder\\n\"); return ff_qsv_error(ret); } return 0; }"} {"target": 1, "idx": 9775, "func": "static int kvm_init(MachineState *ms) { MachineClass *mc = MACHINE_GET_CLASS(ms); static const char upgrade_note[] = \"Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\\n\" \"(see http://sourceforge.net/projects/kvm).\\n\"; struct { const char *name; int num; } num_cpus[] = { { \"SMP\", smp_cpus }, { \"hotpluggable\", max_cpus }, { NULL, } }, *nc = num_cpus; int soft_vcpus_limit, hard_vcpus_limit; KVMState *s; const KVMCapabilityInfo *missing_cap; int ret; int i, type = 0; const char *kvm_type; s = KVM_STATE(ms->accelerator); /* * On systems where the kernel can support different base page * sizes, host page size may be different from TARGET_PAGE_SIZE, * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum * page size for the system though. */ assert(TARGET_PAGE_SIZE <= getpagesize()); page_size_init(); s->sigmask_len = 8; #ifdef KVM_CAP_SET_GUEST_DEBUG QTAILQ_INIT(&s->kvm_sw_breakpoints); #endif s->vmfd = -1; s->fd = qemu_open(\"/dev/kvm\", O_RDWR); if (s->fd == -1) { fprintf(stderr, \"Could not access KVM kernel module: %m\\n\"); ret = -errno; goto err; } ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0); if (ret < KVM_API_VERSION) { if (ret >= 0) { ret = -EINVAL; } fprintf(stderr, \"kvm version too old\\n\"); goto err; } if (ret > KVM_API_VERSION) { ret = -EINVAL; fprintf(stderr, \"kvm version not supported\\n\"); goto err; } s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS); /* If unspecified, use the default value */ if (!s->nr_slots) { s->nr_slots = 32; } s->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot)); for (i = 0; i < s->nr_slots; i++) { s->slots[i].slot = i; } /* check the vcpu limits */ soft_vcpus_limit = kvm_recommended_vcpus(s); hard_vcpus_limit = kvm_max_vcpus(s); while (nc->name) { if (nc->num > soft_vcpus_limit) { fprintf(stderr, \"Warning: Number of %s cpus requested (%d) exceeds \" \"the recommended cpus supported by KVM (%d)\\n\", nc->name, nc->num, soft_vcpus_limit); if (nc->num > hard_vcpus_limit) { fprintf(stderr, \"Number of %s cpus requested (%d) exceeds \" \"the maximum cpus supported by KVM (%d)\\n\", nc->name, nc->num, hard_vcpus_limit); exit(1); } } nc++; } kvm_type = qemu_opt_get(qemu_get_machine_opts(), \"kvm-type\"); if (mc->kvm_type) { type = mc->kvm_type(kvm_type); } else if (kvm_type) { ret = -EINVAL; fprintf(stderr, \"Invalid argument kvm-type=%s\\n\", kvm_type); goto err; } do { ret = kvm_ioctl(s, KVM_CREATE_VM, type); } while (ret == -EINTR); if (ret < 0) { fprintf(stderr, \"ioctl(KVM_CREATE_VM) failed: %d %s\\n\", -ret, strerror(-ret)); #ifdef TARGET_S390X fprintf(stderr, \"Please add the 'switch_amode' kernel parameter to \" \"your host kernel command line\\n\"); #endif goto err; } s->vmfd = ret; missing_cap = kvm_check_extension_list(s, kvm_required_capabilites); if (!missing_cap) { missing_cap = kvm_check_extension_list(s, kvm_arch_required_capabilities); } if (missing_cap) { ret = -EINVAL; fprintf(stderr, \"kvm does not support %s\\n%s\", missing_cap->name, upgrade_note); goto err; } s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO); s->broken_set_mem_region = 1; ret = kvm_check_extension(s, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS); if (ret > 0) { s->broken_set_mem_region = 0; } #ifdef KVM_CAP_VCPU_EVENTS s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); #endif s->robust_singlestep = kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP); #ifdef KVM_CAP_DEBUGREGS s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS); #endif #ifdef KVM_CAP_XSAVE s->xsave = kvm_check_extension(s, KVM_CAP_XSAVE); #endif #ifdef KVM_CAP_XCRS s->xcrs = kvm_check_extension(s, KVM_CAP_XCRS); #endif #ifdef KVM_CAP_PIT_STATE2 s->pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2); #endif #ifdef KVM_CAP_IRQ_ROUTING s->direct_msi = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0); #endif s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3); s->irq_set_ioctl = KVM_IRQ_LINE; if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) { s->irq_set_ioctl = KVM_IRQ_LINE_STATUS; } #ifdef KVM_CAP_READONLY_MEM kvm_readonly_mem_allowed = (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0); #endif kvm_eventfds_allowed = (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0); kvm_irqfds_allowed = (kvm_check_extension(s, KVM_CAP_IRQFD) > 0); kvm_resamplefds_allowed = (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0); ret = kvm_arch_init(s); if (ret < 0) { goto err; } ret = kvm_irqchip_create(s); if (ret < 0) { goto err; } kvm_state = s; memory_listener_register(&kvm_memory_listener, &address_space_memory); memory_listener_register(&kvm_io_listener, &address_space_io); s->many_ioeventfds = kvm_check_many_ioeventfds(); cpu_interrupt_handler = kvm_handle_interrupt; return 0; err: assert(ret < 0); if (s->vmfd >= 0) { close(s->vmfd); } if (s->fd != -1) { close(s->fd); } g_free(s->slots); return ret; }"} {"target": 1, "idx": 9779, "func": "int ff_msmpeg4_decode_init(MpegEncContext *s) { static int done = 0; int i; MVTable *mv; common_init(s); if (!done) { done = 1; for(i=0;ivlc, MV_VLC_BITS, mv->n + 1, mv->table_mv_bits, 1, 1, mv->table_mv_code, 2, 2); } init_vlc(&dc_lum_vlc[0], DC_VLC_BITS, 120, &table0_dc_lum[0][1], 8, 4, &table0_dc_lum[0][0], 8, 4); init_vlc(&dc_chroma_vlc[0], DC_VLC_BITS, 120, &table0_dc_chroma[0][1], 8, 4, &table0_dc_chroma[0][0], 8, 4); init_vlc(&dc_lum_vlc[1], DC_VLC_BITS, 120, &table1_dc_lum[0][1], 8, 4, &table1_dc_lum[0][0], 8, 4); init_vlc(&dc_chroma_vlc[1], DC_VLC_BITS, 120, &table1_dc_chroma[0][1], 8, 4, &table1_dc_chroma[0][0], 8, 4); init_vlc(&v2_dc_lum_vlc, DC_VLC_BITS, 512, &v2_dc_lum_table[0][1], 8, 4, &v2_dc_lum_table[0][0], 8, 4); init_vlc(&v2_dc_chroma_vlc, DC_VLC_BITS, 512, &v2_dc_chroma_table[0][1], 8, 4, &v2_dc_chroma_table[0][0], 8, 4); init_vlc(&cbpy_vlc, CBPY_VLC_BITS, 16, &cbpy_tab[0][1], 2, 1, &cbpy_tab[0][0], 2, 1); init_vlc(&v2_intra_cbpc_vlc, V2_INTRA_CBPC_VLC_BITS, 4, &v2_intra_cbpc[0][1], 2, 1, &v2_intra_cbpc[0][0], 2, 1); init_vlc(&v2_mb_type_vlc, V2_MB_TYPE_VLC_BITS, 8, &v2_mb_type[0][1], 2, 1, &v2_mb_type[0][0], 2, 1); init_vlc(&v2_mv_vlc, V2_MV_VLC_BITS, 33, &mvtab[0][1], 2, 1, &mvtab[0][0], 2, 1); for(i=0; i<4; i++){ init_vlc(&mb_non_intra_vlc[i], MB_NON_INTRA_VLC_BITS, 128, &wmv2_inter_table[i][0][1], 8, 4, &wmv2_inter_table[i][0][0], 8, 4); //FIXME name? } init_vlc(&mb_intra_vlc, MB_INTRA_VLC_BITS, 64, &table_mb_intra[0][1], 4, 2, &table_mb_intra[0][0], 4, 2); init_vlc(&v1_intra_cbpc_vlc, V1_INTRA_CBPC_VLC_BITS, 8, intra_MCBPC_bits, 1, 1, intra_MCBPC_code, 1, 1); init_vlc(&v1_inter_cbpc_vlc, V1_INTER_CBPC_VLC_BITS, 25, inter_MCBPC_bits, 1, 1, inter_MCBPC_code, 1, 1); init_vlc(&inter_intra_vlc, INTER_INTRA_VLC_BITS, 4, &table_inter_intra[0][1], 2, 1, &table_inter_intra[0][0], 2, 1); } switch(s->msmpeg4_version){ case 1: case 2: s->decode_mb= msmpeg4v12_decode_mb; break; case 3: case 4: s->decode_mb= msmpeg4v34_decode_mb; break; case 5: s->decode_mb= wmv2_decode_mb; break; } s->slice_height= s->mb_height; //to avoid 1/0 if the first frame isnt a keyframe return 0; }"} {"target": 1, "idx": 9780, "func": "void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns) { assert(job->busy); /* Check cancellation *before* setting busy = false, too! */ if (block_job_is_cancelled(job)) { return; } job->busy = false; if (!block_job_should_pause(job)) { co_aio_sleep_ns(blk_get_aio_context(job->blk), type, ns); } /* The job can be paused while sleeping, so check this again */ if (block_job_should_pause(job)) { qemu_coroutine_yield(); } job->busy = true; }"} {"target": 1, "idx": 9793, "func": "static void adb_mouse_reset(DeviceState *dev) { ADBDevice *d = ADB_DEVICE(dev); MouseState *s = ADB_MOUSE(dev); d->handler = 2; d->devaddr = ADB_DEVID_MOUSE; s->last_buttons_state = s->buttons_state = 0; s->dx = s->dy = s->dz = 0; }"} {"target": 1, "idx": 9795, "func": "static void raw_close(BlockDriverState *bs) { BDRVRawState *s = bs->opaque; if (s->fd >= 0) { close(s->fd); s->fd = -1; if (s->aligned_buf != NULL) qemu_free(s->aligned_buf); } }"} {"target": 1, "idx": 9835, "func": "static void start_frame_overlay(AVFilterLink *inlink, AVFilterBufferRef *inpicref) { AVFilterContext *ctx = inlink->dst; OverlayContext *over = ctx->priv; inpicref->pts = av_rescale_q(inpicref->pts, ctx->inputs[OVERLAY]->time_base, ctx->outputs[0]->time_base); if (!over->overpicref) over->overpicref = inpicref; else over->overpicref_next = inpicref; }"} {"target": 0, "idx": 9860, "func": "START_TEST(simple_dict) { int i; struct { const char *encoded; LiteralQObject decoded; } test_cases[] = { { .encoded = \"{\\\"foo\\\": 42, \\\"bar\\\": \\\"hello world\\\"}\", .decoded = QLIT_QDICT(((LiteralQDictEntry[]){ { \"foo\", QLIT_QINT(42) }, { \"bar\", QLIT_QSTR(\"hello world\") }, { } })), }, { .encoded = \"{}\", .decoded = QLIT_QDICT(((LiteralQDictEntry[]){ { } })), }, { .encoded = \"{\\\"foo\\\": 43}\", .decoded = QLIT_QDICT(((LiteralQDictEntry[]){ { \"foo\", QLIT_QINT(43) }, { } })), }, { } }; for (i = 0; test_cases[i].encoded; i++) { QObject *obj; QString *str; obj = qobject_from_json(test_cases[i].encoded); fail_unless(obj != NULL); fail_unless(qobject_type(obj) == QTYPE_QDICT); fail_unless(compare_litqobj_to_qobj(&test_cases[i].decoded, obj) == 1); str = qobject_to_json(obj); qobject_decref(obj); obj = qobject_from_json(qstring_get_str(str)); fail_unless(obj != NULL); fail_unless(qobject_type(obj) == QTYPE_QDICT); fail_unless(compare_litqobj_to_qobj(&test_cases[i].decoded, obj) == 1); qobject_decref(obj); QDECREF(str); } }"} {"target": 0, "idx": 9861, "func": "static void i440fx_pcihost_get_pci_hole64_end(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { PCIHostState *h = PCI_HOST_BRIDGE(obj); Range w64; pci_bus_get_w64_range(h->bus, &w64); visit_type_uint64(v, name, &w64.end, errp); }"} {"target": 0, "idx": 9866, "func": "void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) { int cssid, ssid, schid, m; SubchDev *sch; uint64_t addr; int cc; SCHIB schib; CPUS390XState *env = &cpu->env; uint8_t ar; addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return; } if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { /* * As operand exceptions have a lower priority than access exceptions, * we check whether the memory area is writeable (injecting the * access execption if it is not) first. */ if (!s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib))) { program_interrupt(env, PGM_OPERAND, 2); } return; } trace_ioinst_sch_id(\"stsch\", cssid, ssid, schid); sch = css_find_subch(m, cssid, ssid, schid); if (sch) { if (css_subch_visible(sch)) { css_do_stsch(sch, &schib); cc = 0; } else { /* Indicate no more subchannels in this css/ss */ cc = 3; } } else { if (css_schid_final(m, cssid, ssid, schid)) { cc = 3; /* No more subchannels in this css/ss */ } else { /* Store an empty schib. */ memset(&schib, 0, sizeof(schib)); cc = 0; } } if (cc != 3) { if (s390_cpu_virt_mem_write(cpu, addr, ar, &schib, sizeof(schib)) != 0) { return; } } else { /* Access exceptions have a higher priority than cc3 */ if (s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib)) != 0) { return; } } setcc(cpu, cc); }"} {"target": 0, "idx": 9872, "func": "static void cow_close(BlockDriverState *bs) { }"} {"target": 1, "idx": 9880, "func": "static int cllc_decode_frame(AVCodecContext *avctx, void *data, int *got_picture_ptr, AVPacket *avpkt) { CLLCContext *ctx = avctx->priv_data; AVFrame *pic = avctx->coded_frame; uint8_t *src = avpkt->data; uint8_t *swapped_buf_new; uint32_t info_tag, info_offset; GetBitContext gb; int coding_type, ret; if (pic->data[0]) avctx->release_buffer(avctx, pic); pic->reference = 0; /* Make sure our bswap16'd buffer is big enough */ swapped_buf_new = av_fast_realloc(ctx->swapped_buf, &ctx->swapped_buf_size, avpkt->size); if (!swapped_buf_new) { av_log(avctx, AV_LOG_ERROR, \"Could not realloc swapped buffer.\\n\"); return AVERROR(ENOMEM); } ctx->swapped_buf = swapped_buf_new; /* Skip the INFO header if present */ info_offset = 0; info_tag = AV_RL32(src); if (info_tag == MKTAG('I', 'N', 'F', 'O')) { info_offset = AV_RL32(src + 4); if (info_offset > UINT32_MAX - 8 || info_offset + 8 > avpkt->size) { av_log(avctx, AV_LOG_ERROR, \"Invalid INFO header offset: 0x%08X is too large.\\n\", info_offset); return AVERROR_INVALIDDATA; } info_offset += 8; src += info_offset; av_log(avctx, AV_LOG_DEBUG, \"Skipping INFO chunk.\\n\"); } /* bswap16 the buffer since CLLC's bitreader works in 16-bit words */ ctx->dsp.bswap16_buf((uint16_t *) ctx->swapped_buf, (uint16_t *) src, (avpkt->size - info_offset) / 2); init_get_bits(&gb, ctx->swapped_buf, (avpkt->size - info_offset) * 8); /* * Read in coding type. The types are as follows: * * 0 - YUY2 * 1 - BGR24 (Triples) * 2 - BGR24 (Quads) * 3 - BGRA */ coding_type = (AV_RL32(src) >> 8) & 0xFF; av_log(avctx, AV_LOG_DEBUG, \"Frame coding type: %d\\n\", coding_type); switch (coding_type) { case 1: case 2: avctx->pix_fmt = PIX_FMT_RGB24; avctx->bits_per_raw_sample = 8; ret = avctx->get_buffer(avctx, pic); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, \"Could not allocate buffer.\\n\"); return ret; } ret = decode_rgb24_frame(ctx, &gb, pic); if (ret < 0) return ret; break; case 3: avctx->pix_fmt = PIX_FMT_ARGB; avctx->bits_per_raw_sample = 8; ret = avctx->get_buffer(avctx, pic); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, \"Could not allocate buffer.\\n\"); return ret; } ret = decode_argb_frame(ctx, &gb, pic); if (ret < 0) return ret; break; default: av_log(avctx, AV_LOG_ERROR, \"Unknown coding type: %d.\\n\", coding_type); return AVERROR_INVALIDDATA; } pic->key_frame = 1; pic->pict_type = AV_PICTURE_TYPE_I; *got_picture_ptr = 1; *(AVFrame *)data = *pic; return avpkt->size; }"} {"target": 1, "idx": 9889, "func": "void mips_malta_init (ram_addr_t ram_size, int vga_ram_size, const char *boot_device, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { char buf[1024]; unsigned long bios_offset; target_long bios_size; int64_t kernel_entry; PCIBus *pci_bus; CPUState *env; RTCState *rtc_state; fdctrl_t *floppy_controller; MaltaFPGAState *malta_fpga; qemu_irq *i8259; int piix4_devfn; uint8_t *eeprom_buf; i2c_bus *smbus; int i; int index; BlockDriverState *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; BlockDriverState *fd[MAX_FD]; int fl_idx = 0; int fl_sectors = 0; /* init CPUs */ if (cpu_model == NULL) { #ifdef TARGET_MIPS64 cpu_model = \"20Kc\"; #else cpu_model = \"24Kf\"; #endif env = cpu_init(cpu_model); if (!env) { fprintf(stderr, \"Unable to find CPU definition\\n\"); qemu_register_reset(main_cpu_reset, env); /* allocate RAM */ cpu_register_physical_memory(0, ram_size, IO_MEM_RAM); /* Map the bios at two physical locations, as on the real board. */ bios_offset = ram_size + vga_ram_size; cpu_register_physical_memory(0x1e000000LL, BIOS_SIZE, bios_offset | IO_MEM_ROM); cpu_register_physical_memory(0x1fc00000LL, BIOS_SIZE, bios_offset | IO_MEM_ROM); /* FPGA */ malta_fpga = malta_fpga_init(0x1f000000LL, env->irq[2], serial_hds[2]); /* Load firmware in flash / BIOS unless we boot directly into a kernel. */ if (kernel_filename) { /* Write a small bootloader to the flash location. */ loaderparams.ram_size = ram_size; loaderparams.kernel_filename = kernel_filename; loaderparams.kernel_cmdline = kernel_cmdline; loaderparams.initrd_filename = initrd_filename; kernel_entry = load_kernel(env); env->CP0_Status &= ~((1 << CP0St_BEV) | (1 << CP0St_ERL)); write_bootloader(env, bios_offset, kernel_entry); } else { index = drive_get_index(IF_PFLASH, 0, fl_idx); if (index != -1) { /* Load firmware from flash. */ bios_size = 0x400000; fl_sectors = bios_size >> 16; #ifdef DEBUG_BOARD_INIT printf(\"Register parallel flash %d size \" TARGET_FMT_lx \" at \" \"offset %08lx addr %08llx '%s' %x\\n\", fl_idx, bios_size, bios_offset, 0x1e000000LL, bdrv_get_device_name(drives_table[index].bdrv), fl_sectors); #endif pflash_cfi01_register(0x1e000000LL, bios_offset, drives_table[index].bdrv, 65536, fl_sectors, 4, 0x0000, 0x0000, 0x0000, 0x0000); fl_idx++; } else { /* Load a BIOS image. */ if (bios_name == NULL) bios_name = BIOS_FILENAME; snprintf(buf, sizeof(buf), \"%s/%s\", bios_dir, bios_name); bios_size = load_image(buf, phys_ram_base + bios_offset); if ((bios_size < 0 || bios_size > BIOS_SIZE) && !kernel_filename) { \"qemu: Could not load MIPS bios '%s', and no -kernel argument was specified\\n\", buf); /* In little endian mode the 32bit words in the bios are swapped, a neat trick which allows bi-endian firmware. */ #ifndef TARGET_WORDS_BIGENDIAN { uint32_t *addr; for (addr = (uint32_t *)(phys_ram_base + bios_offset); addr < (uint32_t *)(phys_ram_base + bios_offset + bios_size); addr++) { *addr = bswap32(*addr); #endif /* Board ID = 0x420 (Malta Board with CoreLV) XXX: theoretically 0x1e000010 should map to flash and 0x1fc00010 should map to the board ID. */ stl_raw(phys_ram_base + bios_offset + 0x10, 0x00000420); /* Init internal devices */ cpu_mips_irq_init_cpu(env); cpu_mips_clock_init(env); /* Interrupt controller */ /* The 8259 is attached to the MIPS CPU INT0 pin, ie interrupt 2 */ i8259 = i8259_init(env->irq[2]); /* Northbridge */ pci_bus = pci_gt64120_init(i8259); /* Southbridge */ if (drive_get_max_bus(IF_IDE) >= MAX_IDE_BUS) { fprintf(stderr, \"qemu: too many IDE bus\\n\"); for(i = 0; i < MAX_IDE_BUS * MAX_IDE_DEVS; i++) { index = drive_get_index(IF_IDE, i / MAX_IDE_DEVS, i % MAX_IDE_DEVS); if (index != -1) hd[i] = drives_table[index].bdrv; else hd[i] = NULL; piix4_devfn = piix4_init(pci_bus, 80); pci_piix4_ide_init(pci_bus, hd, piix4_devfn + 1, i8259); usb_uhci_piix4_init(pci_bus, piix4_devfn + 2); smbus = piix4_pm_init(pci_bus, piix4_devfn + 3, 0x1100, i8259[9]); eeprom_buf = qemu_mallocz(8 * 256); /* XXX: make this persistent */ for (i = 0; i < 8; i++) { /* TODO: Populate SPD eeprom data. */ smbus_eeprom_device_init(smbus, 0x50 + i, eeprom_buf + (i * 256)); pit = pit_init(0x40, i8259[0]); DMA_init(0); /* Super I/O */ i8042_init(i8259[1], i8259[12], 0x60); rtc_state = rtc_init(0x70, i8259[8]); serial_init(0x3f8, i8259[4], 115200, serial_hds[0]); serial_init(0x2f8, i8259[3], 115200, serial_hds[1]); if (parallel_hds[0]) parallel_init(0x378, i8259[7], parallel_hds[0]); for(i = 0; i < MAX_FD; i++) { index = drive_get_index(IF_FLOPPY, 0, i); if (index != -1) fd[i] = drives_table[index].bdrv; else fd[i] = NULL; floppy_controller = fdctrl_init(i8259[6], 2, 0, 0x3f0, fd); /* Sound card */ #ifdef HAS_AUDIO audio_init(pci_bus); #endif /* Network card */ network_init(pci_bus); /* Optional PCI video card */ pci_cirrus_vga_init(pci_bus, phys_ram_base + ram_size, ram_size, vga_ram_size);"} {"target": 1, "idx": 9890, "func": "static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed) { int dir; size_t len = 0; #ifdef DEBUG_PACKET const char *str = NULL; #endif int pid; int ret; int i; USBDevice *dev; struct ohci_td td; uint32_t addr; int flag_r; int completion; addr = ed->head & OHCI_DPTR_MASK; /* See if this TD has already been submitted to the device. */ completion = (addr == ohci->async_td); if (completion && !ohci->async_complete) { #ifdef DEBUG_PACKET DPRINTF(\"Skipping async TD\\n\"); #endif return 1; } if (!ohci_read_td(ohci, addr, &td)) { fprintf(stderr, \"usb-ohci: TD read error at %x\\n\", addr); return 0; } dir = OHCI_BM(ed->flags, ED_D); switch (dir) { case OHCI_TD_DIR_OUT: case OHCI_TD_DIR_IN: /* Same value. */ break; default: dir = OHCI_BM(td.flags, TD_DP); break; } switch (dir) { case OHCI_TD_DIR_IN: #ifdef DEBUG_PACKET str = \"in\"; #endif pid = USB_TOKEN_IN; break; case OHCI_TD_DIR_OUT: #ifdef DEBUG_PACKET str = \"out\"; #endif pid = USB_TOKEN_OUT; break; case OHCI_TD_DIR_SETUP: #ifdef DEBUG_PACKET str = \"setup\"; #endif pid = USB_TOKEN_SETUP; break; default: fprintf(stderr, \"usb-ohci: Bad direction\\n\"); return 1; } if (td.cbp && td.be) { if ((td.cbp & 0xfffff000) != (td.be & 0xfffff000)) { len = (td.be & 0xfff) + 0x1001 - (td.cbp & 0xfff); } else { len = (td.be - td.cbp) + 1; } if (len && dir != OHCI_TD_DIR_IN && !completion) { ohci_copy_td(ohci, &td, ohci->usb_buf, len, 0); } } flag_r = (td.flags & OHCI_TD_R) != 0; #ifdef DEBUG_PACKET DPRINTF(\" TD @ 0x%.8x %\" PRId64 \" bytes %s r=%d cbp=0x%.8x be=0x%.8x\\n\", addr, (int64_t)len, str, flag_r, td.cbp, td.be); if (len > 0 && dir != OHCI_TD_DIR_IN) { DPRINTF(\" data:\"); for (i = 0; i < len; i++) printf(\" %.2x\", ohci->usb_buf[i]); DPRINTF(\"\\n\"); } #endif if (completion) { ret = ohci->usb_packet.len; ohci->async_td = 0; ohci->async_complete = 0; } else { ret = USB_RET_NODEV; for (i = 0; i < ohci->num_ports; i++) { dev = ohci->rhport[i].port.dev; if ((ohci->rhport[i].ctrl & OHCI_PORT_PES) == 0) continue; if (ohci->async_td) { /* ??? The hardware should allow one active packet per endpoint. We only allow one active packet per controller. This should be sufficient as long as devices respond in a timely manner. */ #ifdef DEBUG_PACKET DPRINTF(\"Too many pending packets\\n\"); #endif return 1; } ohci->usb_packet.pid = pid; ohci->usb_packet.devaddr = OHCI_BM(ed->flags, ED_FA); ohci->usb_packet.devep = OHCI_BM(ed->flags, ED_EN); ohci->usb_packet.data = ohci->usb_buf; ohci->usb_packet.len = len; ret = usb_handle_packet(dev, &ohci->usb_packet); if (ret != USB_RET_NODEV) break; } #ifdef DEBUG_PACKET DPRINTF(\"ret=%d\\n\", ret); #endif if (ret == USB_RET_ASYNC) { ohci->async_td = addr; return 1; } } if (ret >= 0) { if (dir == OHCI_TD_DIR_IN) { ohci_copy_td(ohci, &td, ohci->usb_buf, ret, 1); #ifdef DEBUG_PACKET DPRINTF(\" data:\"); for (i = 0; i < ret; i++) printf(\" %.2x\", ohci->usb_buf[i]); DPRINTF(\"\\n\"); #endif } else { ret = len; } } /* Writeback */ if (ret == len || (dir == OHCI_TD_DIR_IN && ret >= 0 && flag_r)) { /* Transmission succeeded. */ if (ret == len) { td.cbp = 0; } else { td.cbp += ret; if ((td.cbp & 0xfff) + ret > 0xfff) { td.cbp &= 0xfff; td.cbp |= td.be & ~0xfff; } } td.flags |= OHCI_TD_T1; td.flags ^= OHCI_TD_T0; OHCI_SET_BM(td.flags, TD_CC, OHCI_CC_NOERROR); OHCI_SET_BM(td.flags, TD_EC, 0); ed->head &= ~OHCI_ED_C; if (td.flags & OHCI_TD_T0) ed->head |= OHCI_ED_C; } else { if (ret >= 0) { DPRINTF(\"usb-ohci: Underrun\\n\"); OHCI_SET_BM(td.flags, TD_CC, OHCI_CC_DATAUNDERRUN); } else { switch (ret) { case USB_RET_NODEV: OHCI_SET_BM(td.flags, TD_CC, OHCI_CC_DEVICENOTRESPONDING); case USB_RET_NAK: DPRINTF(\"usb-ohci: got NAK\\n\"); return 1; case USB_RET_STALL: DPRINTF(\"usb-ohci: got STALL\\n\"); OHCI_SET_BM(td.flags, TD_CC, OHCI_CC_STALL); break; case USB_RET_BABBLE: DPRINTF(\"usb-ohci: got BABBLE\\n\"); OHCI_SET_BM(td.flags, TD_CC, OHCI_CC_DATAOVERRUN); break; default: fprintf(stderr, \"usb-ohci: Bad device response %d\\n\", ret); OHCI_SET_BM(td.flags, TD_CC, OHCI_CC_UNDEXPETEDPID); OHCI_SET_BM(td.flags, TD_EC, 3); break; } } ed->head |= OHCI_ED_H; } /* Retire this TD */ ed->head &= ~OHCI_DPTR_MASK; ed->head |= td.next & OHCI_DPTR_MASK; td.next = ohci->done; ohci->done = addr; i = OHCI_BM(td.flags, TD_DI); if (i < ohci->done_count) ohci->done_count = i; ohci_put_td(ohci, addr, &td); return OHCI_BM(td.flags, TD_CC) != OHCI_CC_NOERROR; }"} {"target": 1, "idx": 9893, "func": "void qdev_prop_set_drive(DeviceState *dev, const char *name, BlockBackend *value, Error **errp) { object_property_set_str(OBJECT(dev), value ? blk_name(value) : \"\", name, errp); }"} {"target": 1, "idx": 9894, "func": "int net_slirp_redir(const char *redir_str) { struct slirp_config_str *config; if (QTAILQ_EMPTY(&slirp_stacks)) { config = g_malloc(sizeof(*config)); pstrcpy(config->str, sizeof(config->str), redir_str); config->flags = SLIRP_CFG_HOSTFWD | SLIRP_CFG_LEGACY; config->next = slirp_configs; slirp_configs = config; return 0; } return slirp_hostfwd(QTAILQ_FIRST(&slirp_stacks), redir_str, 1); }"} {"target": 1, "idx": 9900, "func": "static int decode_frame(AVCodecContext *avctx, const uint8_t *databuf, float **out_samples) { ATRAC3Context *q = avctx->priv_data; int ret, i, ch; uint8_t *ptr1; if (q->coding_mode == JOINT_STEREO) { /* channel coupling mode */ /* Decode sound unit pairs (channels are expected to be even). * Multichannel joint stereo interleaves pairs (6ch: 2ch + 2ch + 2ch) */ const uint8_t *js_databuf; int js_pair, js_block_align; js_block_align = (avctx->block_align / avctx->channels) * 2; /* block pair */ for (ch = 0; ch < avctx->channels; ch = ch + 2) { js_pair = ch/2; js_databuf = databuf + js_pair * js_block_align; /* align to current pair */ /* Set the bitstream reader at the start of first channel sound unit. */ init_get_bits(&q->gb, js_databuf, js_block_align * 8); /* decode Sound Unit 1 */ ret = decode_channel_sound_unit(q, &q->gb, &q->units[ch], out_samples[ch], ch, JOINT_STEREO); if (ret != 0) return ret; /* Framedata of the su2 in the joint-stereo mode is encoded in * reverse byte order so we need to swap it first. */ if (js_databuf == q->decoded_bytes_buffer) { uint8_t *ptr2 = q->decoded_bytes_buffer + js_block_align - 1; ptr1 = q->decoded_bytes_buffer; for (i = 0; i < js_block_align / 2; i++, ptr1++, ptr2--) FFSWAP(uint8_t, *ptr1, *ptr2); } else { const uint8_t *ptr2 = js_databuf + js_block_align - 1; for (i = 0; i < js_block_align; i++) q->decoded_bytes_buffer[i] = *ptr2--; } /* Skip the sync codes (0xF8). */ ptr1 = q->decoded_bytes_buffer; for (i = 4; *ptr1 == 0xF8; i++, ptr1++) { if (i >= js_block_align) return AVERROR_INVALIDDATA; } /* set the bitstream reader at the start of the second Sound Unit */ init_get_bits8(&q->gb, ptr1, q->decoded_bytes_buffer + js_block_align - ptr1); /* Fill the Weighting coeffs delay buffer */ memmove(q->weighting_delay[js_pair], &q->weighting_delay[js_pair][2], 4 * sizeof(*q->weighting_delay[js_pair])); q->weighting_delay[js_pair][4] = get_bits1(&q->gb); q->weighting_delay[js_pair][5] = get_bits(&q->gb, 3); for (i = 0; i < 4; i++) { q->matrix_coeff_index_prev[js_pair][i] = q->matrix_coeff_index_now[js_pair][i]; q->matrix_coeff_index_now[js_pair][i] = q->matrix_coeff_index_next[js_pair][i]; q->matrix_coeff_index_next[js_pair][i] = get_bits(&q->gb, 2); } /* Decode Sound Unit 2. */ ret = decode_channel_sound_unit(q, &q->gb, &q->units[ch+1], out_samples[ch+1], ch+1, JOINT_STEREO); if (ret != 0) return ret; /* Reconstruct the channel coefficients. */ reverse_matrixing(out_samples[ch], out_samples[ch+1], q->matrix_coeff_index_prev[js_pair], q->matrix_coeff_index_now[js_pair]); channel_weighting(out_samples[ch], out_samples[ch+1], q->weighting_delay[js_pair]); } } else { /* single channels */ /* Decode the channel sound units. */ for (i = 0; i < avctx->channels; i++) { /* Set the bitstream reader at the start of a channel sound unit. */ init_get_bits(&q->gb, databuf + i * avctx->block_align / avctx->channels, avctx->block_align * 8 / avctx->channels); ret = decode_channel_sound_unit(q, &q->gb, &q->units[i], out_samples[i], i, q->coding_mode); if (ret != 0) return ret; } } /* Apply the iQMF synthesis filter. */ for (i = 0; i < avctx->channels; i++) { float *p1 = out_samples[i]; float *p2 = p1 + 256; float *p3 = p2 + 256; float *p4 = p3 + 256; ff_atrac_iqmf(p1, p2, 256, p1, q->units[i].delay_buf1, q->temp_buf); ff_atrac_iqmf(p4, p3, 256, p3, q->units[i].delay_buf2, q->temp_buf); ff_atrac_iqmf(p1, p3, 512, p1, q->units[i].delay_buf3, q->temp_buf); } return 0; }"} {"target": 0, "idx": 9901, "func": "static void decode_refpass(Jpeg2000T1Context *t1, int width, int height, int bpno, int vert_causal_ctx_csty_symbol) { int phalf, nhalf; int y0, x, y; phalf = 1 << (bpno - 1); nhalf = -phalf; for (y0 = 0; y0 < height; y0 += 4) for (x = 0; x < width; x++) for (y = y0; y < height && y < y0 + 4; y++) if ((t1->flags[y + 1][x + 1] & (JPEG2000_T1_SIG | JPEG2000_T1_VIS)) == JPEG2000_T1_SIG) { int flags_mask = (vert_causal_ctx_csty_symbol && y == y0 + 3) ? ~(JPEG2000_T1_SIG_S | JPEG2000_T1_SIG_SW | JPEG2000_T1_SIG_SE | JPEG2000_T1_SGN_S) : -1; int ctxno = ff_jpeg2000_getrefctxno(t1->flags[y + 1][x + 1] & flags_mask); int r = ff_mqc_decode(&t1->mqc, t1->mqc.cx_states + ctxno) ? phalf : nhalf; t1->data[y][x] += t1->data[y][x] < 0 ? -r : r; t1->flags[y + 1][x + 1] |= JPEG2000_T1_REF; } }"} {"target": 1, "idx": 9904, "func": "static void kvm_invoke_set_guest_debug(void *data) { struct kvm_set_guest_debug_data *dbg_data = data; CPUState *env = dbg_data->env; if (env->kvm_vcpu_dirty) { kvm_arch_put_registers(env); env->kvm_vcpu_dirty = 0; } dbg_data->err = kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg); }"} {"target": 1, "idx": 9911, "func": "static uint32_t find_subframe_rice_params(FlacEncodeContext *s, FlacSubframe *sub, int pred_order) { int pmin = get_max_p_order(s->options.min_partition_order, s->frame.blocksize, pred_order); int pmax = get_max_p_order(s->options.max_partition_order, s->frame.blocksize, pred_order); uint32_t bits = 8 + pred_order * sub->obits + 2 + 4; if (sub->type == FLAC_SUBFRAME_LPC) bits += 4 + 5 + pred_order * s->options.lpc_coeff_precision; bits += calc_rice_params(&sub->rc, pmin, pmax, sub->residual, s->frame.blocksize, pred_order); return bits; }"} {"target": 0, "idx": 9919, "func": "static int mmap_start(AVFormatContext *ctx) { struct video_data *s = ctx->priv_data; enum v4l2_buf_type type; int i, res; for (i = 0; i < s->buffers; i++) { struct v4l2_buffer buf; memset(&buf, 0, sizeof(struct v4l2_buffer)); buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; buf.memory = V4L2_MEMORY_MMAP; buf.index = i; res = ioctl(s->fd, VIDIOC_QBUF, &buf); if (res < 0) { av_log(ctx, AV_LOG_ERROR, \"ioctl(VIDIOC_QBUF): %s\\n\", strerror(errno)); return AVERROR(errno); } } type = V4L2_BUF_TYPE_VIDEO_CAPTURE; res = ioctl(s->fd, VIDIOC_STREAMON, &type); if (res < 0) { av_log(ctx, AV_LOG_ERROR, \"ioctl(VIDIOC_STREAMON): %s\\n\", strerror(errno)); return AVERROR(errno); } return 0; }"} {"target": 0, "idx": 9964, "func": "static void bdrv_rw_em_cb(void *opaque, int ret) { *(int *)opaque = ret; }"} {"target": 0, "idx": 9969, "func": "void ppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr, Error **errp) { int ret = 0; CPUPPCState *env = &cpu->env; PowerPCCPUClass *host_pcc; cpu->compat_pvr = compat_pvr; switch (compat_pvr) { case CPU_POWERPC_LOGICAL_2_05: env->spr[SPR_PCR] = PCR_TM_DIS | PCR_VSX_DIS | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05; break; case CPU_POWERPC_LOGICAL_2_06: case CPU_POWERPC_LOGICAL_2_06_PLUS: env->spr[SPR_PCR] = PCR_TM_DIS | PCR_COMPAT_2_07 | PCR_COMPAT_2_06; break; case CPU_POWERPC_LOGICAL_2_07: env->spr[SPR_PCR] = PCR_COMPAT_2_07; break; default: env->spr[SPR_PCR] = 0; break; } host_pcc = kvm_ppc_get_host_cpu_class(); if (host_pcc) { env->spr[SPR_PCR] &= host_pcc->pcr_mask; } if (kvm_enabled()) { ret = kvmppc_set_compat(cpu, cpu->compat_pvr); if (ret < 0) { error_setg_errno(errp, -ret, \"Unable to set CPU compatibility mode in KVM\"); } } }"} {"target": 0, "idx": 10004, "func": "static coroutine_fn int sd_co_pdiscard(BlockDriverState *bs, int64_t offset, int count) { SheepdogAIOCB acb; BDRVSheepdogState *s = bs->opaque; QEMUIOVector discard_iov; struct iovec iov; uint32_t zero = 0; if (!s->discard_supported) { return 0; } memset(&discard_iov, 0, sizeof(discard_iov)); memset(&iov, 0, sizeof(iov)); iov.iov_base = &zero; iov.iov_len = sizeof(zero); discard_iov.iov = &iov; discard_iov.niov = 1; if (!QEMU_IS_ALIGNED(offset | count, BDRV_SECTOR_SIZE)) { return -ENOTSUP; } sd_aio_setup(&acb, s, &discard_iov, offset >> BDRV_SECTOR_BITS, count >> BDRV_SECTOR_BITS, AIOCB_DISCARD_OBJ); retry: if (check_overlapping_aiocb(s, &acb)) { qemu_co_queue_wait(&s->overlapping_queue); goto retry; } sd_co_rw_vector(&acb); QLIST_REMOVE(&acb, aiocb_siblings); qemu_co_queue_restart_all(&s->overlapping_queue); return acb.ret; }"} {"target": 0, "idx": 10013, "func": "static void gdb_vm_stopped(void *opaque, int reason) { GDBState *s = opaque; char buf[256]; const char *type; int ret; if (s->state == RS_SYSCALL) return; /* disable single step if it was enable */ cpu_single_step(s->env, 0); if (reason == EXCP_DEBUG) { if (s->env->watchpoint_hit) { switch (s->env->watchpoint_hit->flags & BP_MEM_ACCESS) { case BP_MEM_READ: type = \"r\"; break; case BP_MEM_ACCESS: type = \"a\"; break; default: type = \"\"; break; } snprintf(buf, sizeof(buf), \"T%02x%swatch:\" TARGET_FMT_lx \";\", SIGTRAP, type, s->env->watchpoint_hit->vaddr); put_packet(s, buf); s->env->watchpoint_hit = NULL; return; } tb_flush(s->env); ret = SIGTRAP; } else if (reason == EXCP_INTERRUPT) { ret = SIGINT; } else { ret = 0; } snprintf(buf, sizeof(buf), \"S%02x\", ret); put_packet(s, buf); }"} {"target": 0, "idx": 10015, "func": "void qemu_peer_set_offload(NetClientState *nc, int csum, int tso4, int tso6, int ecn, int ufo) { if (!nc->peer || !nc->peer->info->set_offload) { return; } nc->peer->info->set_offload(nc->peer, csum, tso4, tso6, ecn, ufo); }"} {"target": 0, "idx": 10018, "func": "uint32_t helper_bcdsetsgn(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps) { int i; int invalid = 0; int sgnb = bcd_get_sgn(b); *r = *b; bcd_put_digit(r, bcd_preferred_sgn(sgnb, ps), 0); for (i = 1; i < 32; i++) { bcd_get_digit(b, i, &invalid); if (unlikely(invalid)) { return CRF_SO; } } return bcd_cmp_zero(r); }"} {"target": 0, "idx": 10020, "func": "uint32_t msix_bar_size(PCIDevice *dev) { return (dev->cap_present & QEMU_PCI_CAP_MSIX) ? dev->msix_bar_size : 0; }"} {"target": 0, "idx": 10034, "func": "static int mxf_parse_mpeg2_frame(AVFormatContext *s, AVStream *st, AVPacket *pkt, int *flags) { MXFStreamContext *sc = st->priv_data; MXFContext *mxf = s->priv_data; uint32_t c = -1; int i; *flags = 0; for(i = 0; i < pkt->size - 4; i++) { c = (c<<8) + pkt->data[i]; if (c == 0x1B5) { if (i + 2 < pkt->size && (pkt->data[i+1] & 0xf0) == 0x10) { // seq ext st->codec->profile = pkt->data[i+1] & 0x07; st->codec->level = pkt->data[i+2] >> 4; } else if (i + 5 < pkt->size && (pkt->data[i+1] & 0xf0) == 0x80) { // pict coding ext sc->interlaced = !(pkt->data[i+5] & 0x80); // progressive frame break; } } else if (c == 0x1b8) { // gop if (i + 4 < pkt->size) { if (pkt->data[i+4]>>6 & 0x01) // closed *flags |= 0x80; // random access if (!mxf->header_written) { unsigned hours = (pkt->data[i+1]>>2) & 0x1f; unsigned minutes = ((pkt->data[i+1] & 0x03) << 4) | (pkt->data[i+2]>>4); unsigned seconds = ((pkt->data[i+2] & 0x07) << 3) | (pkt->data[i+3]>>5); unsigned frames = ((pkt->data[i+3] & 0x1f) << 1) | (pkt->data[i+4]>>7); mxf->timecode_drop_frame = !!(pkt->data[i+1] & 0x80); mxf->timecode_start = (hours*3600 + minutes*60 + seconds) * mxf->timecode_base + frames; if (mxf->timecode_drop_frame) { unsigned tminutes = 60 * hours + minutes; mxf->timecode_start -= 2 * (tminutes - tminutes / 10); } av_log(s, AV_LOG_DEBUG, \"frame %d %d:%d:%d%c%d\\n\", mxf->timecode_start, hours, minutes, seconds, mxf->timecode_drop_frame ? ';':':', frames); } } } else if (c == 0x1b3) { // seq *flags |= 0x40; if (i + 4 < pkt->size) { switch ((pkt->data[i+4]>>4) & 0xf) { case 2: sc->aspect_ratio = (AVRational){ 4, 3}; break; case 3: sc->aspect_ratio = (AVRational){ 16, 9}; break; case 4: sc->aspect_ratio = (AVRational){221,100}; break; default: av_reduce(&sc->aspect_ratio.num, &sc->aspect_ratio.den, st->codec->width, st->codec->height, 1024*1024); } } } else if (c == 0x100) { // pic int pict_type = (pkt->data[i+2]>>3) & 0x07; if (pict_type == 2) { // P frame *flags |= 0x22; st->codec->gop_size = 1; } else if (pict_type == 3) { // B frame *flags |= 0x33; sc->temporal_reordering = -1; } else if (!pict_type) { av_log(s, AV_LOG_ERROR, \"error parsing mpeg2 frame\\n\"); return 0; } } } if (s->oformat != &mxf_d10_muxer) sc->codec_ul = mxf_get_mpeg2_codec_ul(st->codec); return !!sc->codec_ul; }"} {"target": 0, "idx": 10056, "func": "VirtIODevice *virtio_blk_init(DeviceState *dev, BlockConf *conf) { VirtIOBlock *s; int cylinders, heads, secs; static int virtio_blk_id; s = (VirtIOBlock *)virtio_common_init(\"virtio-blk\", VIRTIO_ID_BLOCK, sizeof(struct virtio_blk_config), sizeof(VirtIOBlock)); s->vdev.get_config = virtio_blk_update_config; s->vdev.get_features = virtio_blk_get_features; s->vdev.reset = virtio_blk_reset; s->bs = conf->dinfo->bdrv; s->conf = conf; s->rq = NULL; s->sector_mask = (s->conf->logical_block_size / BDRV_SECTOR_SIZE) - 1; bdrv_guess_geometry(s->bs, &cylinders, &heads, &secs); s->vq = virtio_add_queue(&s->vdev, 128, virtio_blk_handle_output); qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s); register_savevm(\"virtio-blk\", virtio_blk_id++, 2, virtio_blk_save, virtio_blk_load, s); return &s->vdev; }"} {"target": 0, "idx": 10060, "func": "int ffurl_register_protocol(URLProtocol *protocol) { URLProtocol **p; p = &first_protocol; while (*p != NULL) p = &(*p)->next; *p = protocol; protocol->next = NULL; return 0; }"} {"target": 0, "idx": 10075, "func": "static av_cold int common_init(AVCodecContext *avctx){ FFV1Context *s = avctx->priv_data; s->avctx= avctx; s->flags= avctx->flags; avcodec_get_frame_defaults(&s->picture); ff_dsputil_init(&s->dsp, avctx); s->width = avctx->width; s->height= avctx->height; assert(s->width && s->height); //defaults s->num_h_slices=1; s->num_v_slices=1; return 0; }"} {"target": 0, "idx": 10077, "func": "static bool e1000_has_rxbufs(E1000State *s, size_t total_size) { int bufs; /* Fast-path short packets */ if (total_size <= s->rxbuf_size) { return s->mac_reg[RDH] != s->mac_reg[RDT] || !s->check_rxov; } if (s->mac_reg[RDH] < s->mac_reg[RDT]) { bufs = s->mac_reg[RDT] - s->mac_reg[RDH]; } else if (s->mac_reg[RDH] > s->mac_reg[RDT] || !s->check_rxov) { bufs = s->mac_reg[RDLEN] / sizeof(struct e1000_rx_desc) + s->mac_reg[RDT] - s->mac_reg[RDH]; } else { return false; } return total_size <= bufs * s->rxbuf_size; }"} {"target": 1, "idx": 10110, "func": "static int get_uint8_equal(QEMUFile *f, void *pv, size_t size, VMStateField *field) { uint8_t *v = pv; uint8_t v2; qemu_get_8s(f, &v2); if (*v == v2) { return 0; error_report(\"%x != %x\", *v, v2); return -EINVAL;"} {"target": 1, "idx": 10115, "func": "static int vhdx_create_bat(BlockDriverState *bs, BDRVVHDXState *s, uint64_t image_size, VHDXImageType type, bool use_zero_blocks, uint64_t file_offset, uint32_t length) { int ret = 0; uint64_t data_file_offset; uint64_t total_sectors = 0; uint64_t sector_num = 0; uint64_t unused; int block_state; VHDXSectorInfo sinfo; assert(s->bat == NULL); /* this gives a data start after BAT/bitmap entries, and well * past any metadata entries (with a 4 MB buffer for future * expansion */ data_file_offset = file_offset + length + 5 * MiB; total_sectors = image_size >> s->logical_sector_size_bits; if (type == VHDX_TYPE_DYNAMIC) { /* All zeroes, so we can just extend the file - the end of the BAT * is the furthest thing we have written yet */ ret = bdrv_truncate(bs, data_file_offset); if (ret < 0) { goto exit; } } else if (type == VHDX_TYPE_FIXED) { ret = bdrv_truncate(bs, data_file_offset + image_size); if (ret < 0) { goto exit; } } else { ret = -ENOTSUP; goto exit; } if (type == VHDX_TYPE_FIXED || use_zero_blocks || bdrv_has_zero_init(bs) == 0) { /* for a fixed file, the default BAT entry is not zero */ s->bat = g_malloc0(length); block_state = type == VHDX_TYPE_FIXED ? PAYLOAD_BLOCK_FULLY_PRESENT : PAYLOAD_BLOCK_NOT_PRESENT; block_state = use_zero_blocks ? PAYLOAD_BLOCK_ZERO : block_state; /* fill the BAT by emulating sector writes of sectors_per_block size */ while (sector_num < total_sectors) { vhdx_block_translate(s, sector_num, s->sectors_per_block, &sinfo); sinfo.file_offset = data_file_offset + (sector_num << s->logical_sector_size_bits); sinfo.file_offset = ROUND_UP(sinfo.file_offset, MiB); vhdx_update_bat_table_entry(bs, s, &sinfo, &unused, &unused, block_state); cpu_to_le64s(&s->bat[sinfo.bat_idx]); sector_num += s->sectors_per_block; } ret = bdrv_pwrite(bs, file_offset, s->bat, length); if (ret < 0) { goto exit; } } exit: g_free(s->bat); return ret; }"} {"target": 0, "idx": 10124, "func": "static void cpu_ioreq_move(ioreq_t *req) { int i; if (!req->data_is_ptr) { if (req->dir == IOREQ_READ) { for (i = 0; i < req->count; i++) { read_phys_req_item(req->addr, req, i, &req->data); } } else if (req->dir == IOREQ_WRITE) { for (i = 0; i < req->count; i++) { write_phys_req_item(req->addr, req, i, &req->data); } } } else { uint64_t tmp; if (req->dir == IOREQ_READ) { for (i = 0; i < req->count; i++) { read_phys_req_item(req->addr, req, i, &tmp); write_phys_req_item(req->data, req, i, &tmp); } } else if (req->dir == IOREQ_WRITE) { for (i = 0; i < req->count; i++) { read_phys_req_item(req->data, req, i, &tmp); write_phys_req_item(req->addr, req, i, &tmp); } } } }"} {"target": 0, "idx": 10133, "func": "bool qemu_savevm_state_blocked(Monitor *mon) { SaveStateEntry *se; QTAILQ_FOREACH(se, &savevm_handlers, entry) { if (se->no_migrate) { monitor_printf(mon, \"state blocked by non-migratable device '%s'\\n\", se->idstr); return true; } } return false; }"} {"target": 0, "idx": 10136, "func": "static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int count, BdrvRequestFlags flags) { int ret; BDRVQcow2State *s = bs->opaque; uint32_t head = offset % s->cluster_size; uint32_t tail = (offset + count) % s->cluster_size; trace_qcow2_pwrite_zeroes_start_req(qemu_coroutine_self(), offset, count); if (offset + count == bs->total_sectors * BDRV_SECTOR_SIZE) { tail = 0; } if (head || tail) { int64_t cl_start = (offset - head) >> BDRV_SECTOR_BITS; uint64_t off; unsigned int nr; assert(head + count <= s->cluster_size); /* check whether remainder of cluster already reads as zero */ if (!(is_zero_sectors(bs, cl_start, DIV_ROUND_UP(head, BDRV_SECTOR_SIZE)) && is_zero_sectors(bs, (offset + count) >> BDRV_SECTOR_BITS, DIV_ROUND_UP(-tail & (s->cluster_size - 1), BDRV_SECTOR_SIZE)))) { return -ENOTSUP; } qemu_co_mutex_lock(&s->lock); /* We can have new write after previous check */ offset = cl_start << BDRV_SECTOR_BITS; count = s->cluster_size; nr = s->cluster_size; ret = qcow2_get_cluster_offset(bs, offset, &nr, &off); if (ret != QCOW2_CLUSTER_UNALLOCATED && ret != QCOW2_CLUSTER_ZERO_PLAIN && ret != QCOW2_CLUSTER_ZERO_ALLOC) { qemu_co_mutex_unlock(&s->lock); return -ENOTSUP; } } else { qemu_co_mutex_lock(&s->lock); } trace_qcow2_pwrite_zeroes(qemu_coroutine_self(), offset, count); /* Whatever is left can use real zero clusters */ ret = qcow2_zero_clusters(bs, offset, count >> BDRV_SECTOR_BITS, flags); qemu_co_mutex_unlock(&s->lock); return ret; }"} {"target": 0, "idx": 10137, "func": "static void gen_abso(DisasContext *ctx) { int l1 = gen_new_label(); int l2 = gen_new_label(); int l3 = gen_new_label(); /* Start with XER OV disabled, the most likely case */ tcg_gen_movi_tl(cpu_ov, 0); tcg_gen_brcondi_tl(TCG_COND_GE, cpu_gpr[rA(ctx->opcode)], 0, l2); tcg_gen_brcondi_tl(TCG_COND_NE, cpu_gpr[rA(ctx->opcode)], 0x80000000, l1); tcg_gen_movi_tl(cpu_ov, 1); tcg_gen_movi_tl(cpu_so, 1); tcg_gen_br(l2); gen_set_label(l1); tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); tcg_gen_br(l3); gen_set_label(l2); tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); gen_set_label(l3); if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); }"} {"target": 1, "idx": 10146, "func": "DeviceState *sysbus_create_varargs(const char *name, target_phys_addr_t addr, ...) { DeviceState *dev; SysBusDevice *s; va_list va; qemu_irq irq; int n; dev = qdev_create(NULL, name); s = sysbus_from_qdev(dev); qdev_init(dev); if (addr != (target_phys_addr_t)-1) { sysbus_mmio_map(s, 0, addr); } va_start(va, addr); n = 0; while (1) { irq = va_arg(va, qemu_irq); if (!irq) { break; } sysbus_connect_irq(s, n, irq); n++; } return dev; }"} {"target": 0, "idx": 10149, "func": "static int dca_decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; int lfe_samples; int num_core_channels = 0; int i; float *samples_flt = data; int16_t *samples_s16 = data; int out_size; DCAContext *s = avctx->priv_data; int channels; int core_ss_end; s->xch_present = 0; s->dca_buffer_size = dca_convert_bitstream(buf, buf_size, s->dca_buffer, DCA_MAX_FRAME_SIZE + DCA_MAX_EXSS_HEADER_SIZE); if (s->dca_buffer_size == AVERROR_INVALIDDATA) { av_log(avctx, AV_LOG_ERROR, \"Not a valid DCA frame\\n\"); return AVERROR_INVALIDDATA; } init_get_bits(&s->gb, s->dca_buffer, s->dca_buffer_size * 8); if (dca_parse_frame_header(s) < 0) { //seems like the frame is corrupt, try with the next one *data_size=0; return buf_size; } //set AVCodec values with parsed data avctx->sample_rate = s->sample_rate; avctx->bit_rate = s->bit_rate; avctx->frame_size = s->sample_blocks * 32; s->profile = FF_PROFILE_DTS; for (i = 0; i < (s->sample_blocks / 8); i++) { dca_decode_block(s, 0, i); } /* record number of core channels incase less than max channels are requested */ num_core_channels = s->prim_channels; if (s->ext_coding) s->core_ext_mask = dca_ext_audio_descr_mask[s->ext_descr]; else s->core_ext_mask = 0; core_ss_end = FFMIN(s->frame_size, s->dca_buffer_size) * 8; /* only scan for extensions if ext_descr was unknown or indicated a * supported XCh extension */ if (s->core_ext_mask < 0 || s->core_ext_mask & DCA_EXT_XCH) { /* if ext_descr was unknown, clear s->core_ext_mask so that the * extensions scan can fill it up */ s->core_ext_mask = FFMAX(s->core_ext_mask, 0); /* extensions start at 32-bit boundaries into bitstream */ skip_bits_long(&s->gb, (-get_bits_count(&s->gb)) & 31); while(core_ss_end - get_bits_count(&s->gb) >= 32) { uint32_t bits = get_bits_long(&s->gb, 32); switch(bits) { case 0x5a5a5a5a: { int ext_amode, xch_fsize; s->xch_base_channel = s->prim_channels; /* validate sync word using XCHFSIZE field */ xch_fsize = show_bits(&s->gb, 10); if((s->frame_size != (get_bits_count(&s->gb) >> 3) - 4 + xch_fsize) && (s->frame_size != (get_bits_count(&s->gb) >> 3) - 4 + xch_fsize + 1)) continue; /* skip length-to-end-of-frame field for the moment */ skip_bits(&s->gb, 10); s->core_ext_mask |= DCA_EXT_XCH; /* extension amode should == 1, number of channels in extension */ /* AFAIK XCh is not used for more channels */ if ((ext_amode = get_bits(&s->gb, 4)) != 1) { av_log(avctx, AV_LOG_ERROR, \"XCh extension amode %d not\" \" supported!\\n\",ext_amode); continue; } /* much like core primary audio coding header */ dca_parse_audio_coding_header(s, s->xch_base_channel); for (i = 0; i < (s->sample_blocks / 8); i++) { dca_decode_block(s, s->xch_base_channel, i); } s->xch_present = 1; break; } case 0x47004a03: /* XXCh: extended channels */ /* usually found either in core or HD part in DTS-HD HRA streams, * but not in DTS-ES which contains XCh extensions instead */ s->core_ext_mask |= DCA_EXT_XXCH; break; case 0x1d95f262: { int fsize96 = show_bits(&s->gb, 12) + 1; if (s->frame_size != (get_bits_count(&s->gb) >> 3) - 4 + fsize96) continue; av_log(avctx, AV_LOG_DEBUG, \"X96 extension found at %d bits\\n\", get_bits_count(&s->gb)); skip_bits(&s->gb, 12); av_log(avctx, AV_LOG_DEBUG, \"FSIZE96 = %d bytes\\n\", fsize96); av_log(avctx, AV_LOG_DEBUG, \"REVNO = %d\\n\", get_bits(&s->gb, 4)); s->core_ext_mask |= DCA_EXT_X96; break; } } skip_bits_long(&s->gb, (-get_bits_count(&s->gb)) & 31); } } else { /* no supported extensions, skip the rest of the core substream */ skip_bits_long(&s->gb, core_ss_end - get_bits_count(&s->gb)); } if (s->core_ext_mask & DCA_EXT_X96) s->profile = FF_PROFILE_DTS_96_24; else if (s->core_ext_mask & (DCA_EXT_XCH | DCA_EXT_XXCH)) s->profile = FF_PROFILE_DTS_ES; /* check for ExSS (HD part) */ if (s->dca_buffer_size - s->frame_size > 32 && get_bits_long(&s->gb, 32) == DCA_HD_MARKER) dca_exss_parse_header(s); avctx->profile = s->profile; channels = s->prim_channels + !!s->lfe; if (s->amode<16) { avctx->channel_layout = dca_core_channel_layout[s->amode]; if (s->xch_present && (!avctx->request_channels || avctx->request_channels > num_core_channels + !!s->lfe)) { avctx->channel_layout |= AV_CH_BACK_CENTER; if (s->lfe) { avctx->channel_layout |= AV_CH_LOW_FREQUENCY; s->channel_order_tab = dca_channel_reorder_lfe_xch[s->amode]; } else { s->channel_order_tab = dca_channel_reorder_nolfe_xch[s->amode]; } } else { channels = num_core_channels + !!s->lfe; s->xch_present = 0; /* disable further xch processing */ if (s->lfe) { avctx->channel_layout |= AV_CH_LOW_FREQUENCY; s->channel_order_tab = dca_channel_reorder_lfe[s->amode]; } else s->channel_order_tab = dca_channel_reorder_nolfe[s->amode]; } if (channels > !!s->lfe && s->channel_order_tab[channels - 1 - !!s->lfe] < 0) return AVERROR_INVALIDDATA; if (avctx->request_channels == 2 && s->prim_channels > 2) { channels = 2; s->output = DCA_STEREO; avctx->channel_layout = AV_CH_LAYOUT_STEREO; } } else { av_log(avctx, AV_LOG_ERROR, \"Non standard configuration %d !\\n\",s->amode); return AVERROR_INVALIDDATA; } /* There is nothing that prevents a dts frame to change channel configuration but Libav doesn't support that so only set the channels if it is previously unset. Ideally during the first probe for channels the crc should be checked and only set avctx->channels when the crc is ok. Right now the decoder could set the channels based on a broken first frame.*/ if (s->is_channels_set == 0) { s->is_channels_set = 1; avctx->channels = channels; } if (avctx->channels != channels) { av_log(avctx, AV_LOG_ERROR, \"DCA decoder does not support number of \" \"channels changing in stream. Skipping frame.\\n\"); return AVERROR_PATCHWELCOME; } out_size = 256 / 8 * s->sample_blocks * channels * av_get_bytes_per_sample(avctx->sample_fmt); if (*data_size < out_size) return AVERROR(EINVAL); *data_size = out_size; /* filter to get final output */ for (i = 0; i < (s->sample_blocks / 8); i++) { dca_filter_channels(s, i); /* If this was marked as a DTS-ES stream we need to subtract back- */ /* channel from SL & SR to remove matrixed back-channel signal */ if((s->source_pcm_res & 1) && s->xch_present) { float* back_chan = s->samples + s->channel_order_tab[s->xch_base_channel] * 256; float* lt_chan = s->samples + s->channel_order_tab[s->xch_base_channel - 2] * 256; float* rt_chan = s->samples + s->channel_order_tab[s->xch_base_channel - 1] * 256; s->dsp.vector_fmac_scalar(lt_chan, back_chan, -M_SQRT1_2, 256); s->dsp.vector_fmac_scalar(rt_chan, back_chan, -M_SQRT1_2, 256); } if (avctx->sample_fmt == AV_SAMPLE_FMT_FLT) { s->fmt_conv.float_interleave(samples_flt, s->samples_chanptr, 256, channels); samples_flt += 256 * channels; } else { s->fmt_conv.float_to_int16_interleave(samples_s16, s->samples_chanptr, 256, channels); samples_s16 += 256 * channels; } } /* update lfe history */ lfe_samples = 2 * s->lfe * (s->sample_blocks / 8); for (i = 0; i < 2 * s->lfe * 4; i++) { s->lfe_data[i] = s->lfe_data[i + lfe_samples]; } return buf_size; }"} {"target": 1, "idx": 10159, "func": "int spapr_populate_pci_dt(sPAPRPHBState *phb, uint32_t xics_phandle, void *fdt) { int bus_off, i, j; char nodename[256]; uint32_t bus_range[] = { cpu_to_be32(0), cpu_to_be32(0xff) }; struct { uint32_t hi; uint64_t child; uint64_t parent; uint64_t size; } QEMU_PACKED ranges[] = { { cpu_to_be32(b_ss(1)), cpu_to_be64(0), cpu_to_be64(phb->io_win_addr), cpu_to_be64(memory_region_size(&phb->iospace)), }, { cpu_to_be32(b_ss(2)), cpu_to_be64(SPAPR_PCI_MEM_WIN_BUS_OFFSET), cpu_to_be64(phb->mem_win_addr), cpu_to_be64(memory_region_size(&phb->memwindow)), }, }; uint64_t bus_reg[] = { cpu_to_be64(phb->buid), 0 }; uint32_t interrupt_map_mask[] = { cpu_to_be32(b_ddddd(-1)|b_fff(0)), 0x0, 0x0, cpu_to_be32(-1)}; uint32_t interrupt_map[PCI_SLOT_MAX * PCI_NUM_PINS][7]; /* Start populating the FDT */ sprintf(nodename, \"pci@%\" PRIx64, phb->buid); bus_off = fdt_add_subnode(fdt, 0, nodename); if (bus_off < 0) { return bus_off; } #define _FDT(exp) \\ do { \\ int ret = (exp); \\ if (ret < 0) { \\ return ret; \\ } \\ } while (0) /* Write PHB properties */ _FDT(fdt_setprop_string(fdt, bus_off, \"device_type\", \"pci\")); _FDT(fdt_setprop_string(fdt, bus_off, \"compatible\", \"IBM,Logical_PHB\")); _FDT(fdt_setprop_cell(fdt, bus_off, \"#address-cells\", 0x3)); _FDT(fdt_setprop_cell(fdt, bus_off, \"#size-cells\", 0x2)); _FDT(fdt_setprop_cell(fdt, bus_off, \"#interrupt-cells\", 0x1)); _FDT(fdt_setprop(fdt, bus_off, \"used-by-rtas\", NULL, 0)); _FDT(fdt_setprop(fdt, bus_off, \"bus-range\", &bus_range, sizeof(bus_range))); _FDT(fdt_setprop(fdt, bus_off, \"ranges\", &ranges, sizeof(ranges))); _FDT(fdt_setprop(fdt, bus_off, \"reg\", &bus_reg, sizeof(bus_reg))); _FDT(fdt_setprop_cell(fdt, bus_off, \"ibm,pci-config-space-type\", 0x1)); /* Build the interrupt-map, this must matches what is done * in pci_spapr_map_irq */ _FDT(fdt_setprop(fdt, bus_off, \"interrupt-map-mask\", &interrupt_map_mask, sizeof(interrupt_map_mask))); for (i = 0; i < PCI_SLOT_MAX; i++) { for (j = 0; j < PCI_NUM_PINS; j++) { uint32_t *irqmap = interrupt_map[i*PCI_NUM_PINS + j]; int lsi_num = pci_spapr_swizzle(i, j); irqmap[0] = cpu_to_be32(b_ddddd(i)|b_fff(0)); irqmap[1] = 0; irqmap[2] = 0; irqmap[3] = cpu_to_be32(j+1); irqmap[4] = cpu_to_be32(xics_phandle); irqmap[5] = cpu_to_be32(phb->lsi_table[lsi_num].irq); irqmap[6] = cpu_to_be32(0x8); } } /* Write interrupt map */ _FDT(fdt_setprop(fdt, bus_off, \"interrupt-map\", &interrupt_map, sizeof(interrupt_map))); object_child_foreach(OBJECT(phb), spapr_phb_children_dt, &((sPAPRTCEDT){ .fdt = fdt, .node_off = bus_off })); return 0; }"} {"target": 0, "idx": 10187, "func": "void bdrv_set_translation_hint(BlockDriverState *bs, int translation) { bs->translation = translation; }"} {"target": 0, "idx": 10193, "func": "static int coroutine_fn raw_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) { BDRVRawState *s = bs->opaque; if (offset > UINT64_MAX - s->offset) { return -EINVAL; } offset += s->offset; BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags); }"} {"target": 0, "idx": 10212, "func": "static bool balloon_stats_supported(const VirtIOBalloon *s) { VirtIODevice *vdev = VIRTIO_DEVICE(s); return vdev->guest_features & (1 << VIRTIO_BALLOON_F_STATS_VQ); }"} {"target": 1, "idx": 10215, "func": "static void pm_update_sci(PIIX4PMState *s) { int sci_level, pmsts; int64_t expire_time; pmsts = get_pmsts(s); sci_level = (((pmsts & s->pmen) & (RTC_EN | PWRBTN_EN | GBL_EN | TMROF_EN)) != 0); qemu_set_irq(s->irq, sci_level); /* schedule a timer interruption if needed */ if ((s->pmen & TMROF_EN) && !(pmsts & TMROF_EN)) { expire_time = muldiv64(s->tmr_overflow_time, ticks_per_sec, PM_FREQ); qemu_mod_timer(s->tmr_timer, expire_time); s->tmr_overflow_time += 0x800000; } else { qemu_del_timer(s->tmr_timer); } }"} {"target": 0, "idx": 10221, "func": "static int find_slice_quant(AVCodecContext *avctx, const AVFrame *pic, int trellis_node, int x, int y, int mbs_per_slice) { ProresContext *ctx = avctx->priv_data; int i, q, pq, xp, yp; const uint16_t *src; int slice_width_factor = av_log2(mbs_per_slice); int num_cblocks[MAX_PLANES], pwidth; int plane_factor[MAX_PLANES], is_chroma[MAX_PLANES]; const int min_quant = ctx->profile_info->min_quant; const int max_quant = ctx->profile_info->max_quant; int error, bits, bits_limit; int mbs, prev, cur, new_score; int slice_bits[TRELLIS_WIDTH], slice_score[TRELLIS_WIDTH]; mbs = x + mbs_per_slice; for (i = 0; i < ctx->num_planes; i++) { is_chroma[i] = (i == 1 || i == 2); plane_factor[i] = slice_width_factor + 2; if (is_chroma[i]) plane_factor[i] += ctx->chroma_factor - 3; if (!is_chroma[i] || ctx->chroma_factor == CFACTOR_Y444) { xp = x << 4; yp = y << 4; num_cblocks[i] = 4; pwidth = avctx->width; } else { xp = x << 3; yp = y << 4; num_cblocks[i] = 2; pwidth = avctx->width >> 1; } src = (const uint16_t*)(pic->data[i] + yp * pic->linesize[i]) + xp; get_slice_data(ctx, src, pic->linesize[i], xp, yp, pwidth, avctx->height, ctx->blocks[i], mbs_per_slice, num_cblocks[i]); } for (q = min_quant; q <= max_quant; q++) { ctx->nodes[trellis_node + q].prev_node = -1; ctx->nodes[trellis_node + q].quant = q; } // todo: maybe perform coarser quantising to fit into frame size when needed for (q = min_quant; q <= max_quant; q++) { bits = 0; error = 0; for (i = 0; i < ctx->num_planes; i++) { bits += estimate_slice_plane(ctx, &error, i, src, pic->linesize[i], mbs_per_slice, num_cblocks[i], plane_factor[i], ctx->quants[q]); } if (bits > 65000 * 8) { error = SCORE_LIMIT; break; } slice_bits[q] = bits; slice_score[q] = error; } bits_limit = mbs * ctx->bits_per_mb; for (pq = min_quant; pq <= max_quant; pq++) { prev = trellis_node - TRELLIS_WIDTH + pq; for (q = min_quant; q <= max_quant; q++) { cur = trellis_node + q; bits = ctx->nodes[prev].bits + slice_bits[q]; error = slice_score[q]; if (bits > bits_limit) error = SCORE_LIMIT; if (ctx->nodes[prev].score < SCORE_LIMIT && error < SCORE_LIMIT) new_score = ctx->nodes[prev].score + error; else new_score = SCORE_LIMIT; if (ctx->nodes[cur].prev_node == -1 || ctx->nodes[cur].score >= new_score) { ctx->nodes[cur].bits = bits; ctx->nodes[cur].score = new_score; ctx->nodes[cur].prev_node = prev; } } } error = ctx->nodes[trellis_node + min_quant].score; pq = trellis_node + min_quant; for (q = min_quant + 1; q <= max_quant; q++) { if (ctx->nodes[trellis_node + q].score <= error) { error = ctx->nodes[trellis_node + q].score; pq = trellis_node + q; } } return pq; }"} {"target": 0, "idx": 10222, "func": "static int discard_single_l2(BlockDriverState *bs, uint64_t offset, uint64_t nb_clusters, enum qcow2_discard_type type, bool full_discard) { BDRVQcow2State *s = bs->opaque; uint64_t *l2_table; int l2_index; int ret; int i; ret = get_cluster_table(bs, offset, &l2_table, &l2_index); if (ret < 0) { return ret; } /* Limit nb_clusters to one L2 table */ nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); assert(nb_clusters <= INT_MAX); for (i = 0; i < nb_clusters; i++) { uint64_t old_l2_entry; old_l2_entry = be64_to_cpu(l2_table[l2_index + i]); /* * If full_discard is false, make sure that a discarded area reads back * as zeroes for v3 images (we cannot do it for v2 without actually * writing a zero-filled buffer). We can skip the operation if the * cluster is already marked as zero, or if it's unallocated and we * don't have a backing file. * * TODO We might want to use bdrv_get_block_status(bs) here, but we're * holding s->lock, so that doesn't work today. * * If full_discard is true, the sector should not read back as zeroes, * but rather fall through to the backing file. */ switch (qcow2_get_cluster_type(old_l2_entry)) { case QCOW2_CLUSTER_UNALLOCATED: if (full_discard || !bs->backing) { continue; } break; case QCOW2_CLUSTER_ZERO_PLAIN: if (!full_discard) { continue; } break; case QCOW2_CLUSTER_ZERO_ALLOC: case QCOW2_CLUSTER_NORMAL: case QCOW2_CLUSTER_COMPRESSED: break; default: abort(); } /* First remove L2 entries */ qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); if (!full_discard && s->qcow_version >= 3) { l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); } else { l2_table[l2_index + i] = cpu_to_be64(0); } /* Then decrease the refcount */ qcow2_free_any_clusters(bs, old_l2_entry, 1, type); } qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); return nb_clusters; }"} {"target": 0, "idx": 10232, "func": "static int alac_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { ALACContext *alac = avctx->priv_data; enum RawDataBlockType element; int channels; int ch, ret, got_end; init_get_bits(&alac->gb, avpkt->data, avpkt->size * 8); got_end = 0; alac->nb_samples = 0; ch = 0; while (get_bits_left(&alac->gb) >= 3) { element = get_bits(&alac->gb, 3); if (element == TYPE_END) { got_end = 1; break; } if (element > TYPE_CPE && element != TYPE_LFE) { av_log(avctx, AV_LOG_ERROR, \"syntax element unsupported: %d\\n\", element); return AVERROR_PATCHWELCOME; } channels = (element == TYPE_CPE) ? 2 : 1; if (ch + channels > alac->channels) { av_log(avctx, AV_LOG_ERROR, \"invalid element channel count\\n\"); return AVERROR_INVALIDDATA; } ret = decode_element(avctx, data, alac_channel_layout_offsets[alac->channels - 1][ch], channels); if (ret < 0 && get_bits_left(&alac->gb)) return ret; ch += channels; } if (!got_end) { av_log(avctx, AV_LOG_ERROR, \"no end tag found. incomplete packet.\\n\"); return AVERROR_INVALIDDATA; } if (avpkt->size * 8 - get_bits_count(&alac->gb) > 8) { av_log(avctx, AV_LOG_ERROR, \"Error : %d bits left\\n\", avpkt->size * 8 - get_bits_count(&alac->gb)); } *got_frame_ptr = 1; *(AVFrame *)data = alac->frame; return avpkt->size; }"} {"target": 0, "idx": 10239, "func": "static void g364fb_init(DeviceState *dev, G364State *s) { s->vram = g_malloc0(s->vram_size); s->con = graphic_console_init(g364fb_update_display, g364fb_invalidate_display, g364fb_screen_dump, NULL, s); memory_region_init_io(&s->mem_ctrl, &g364fb_ctrl_ops, s, \"ctrl\", 0x180000); memory_region_init_ram_ptr(&s->mem_vram, \"vram\", s->vram_size, s->vram); vmstate_register_ram(&s->mem_vram, dev); memory_region_set_coalescing(&s->mem_vram); }"} {"target": 0, "idx": 10240, "func": "static void ppc_prep_init (int ram_size, int vga_ram_size, const char *boot_device, DisplayState *ds, const char **fd_filename, int snapshot, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { CPUState *env, *envs[MAX_CPUS]; char buf[1024]; nvram_t nvram; m48t59_t *m48t59; int PPC_io_memory; int linux_boot, i, nb_nics1, bios_size; unsigned long bios_offset; uint32_t kernel_base, kernel_size, initrd_base, initrd_size; PCIBus *pci_bus; qemu_irq *i8259; int ppc_boot_device = boot_device[0]; sysctrl = qemu_mallocz(sizeof(sysctrl_t)); if (sysctrl == NULL) return; linux_boot = (kernel_filename != NULL); /* init CPUs */ if (cpu_model == NULL) cpu_model = \"default\"; for (i = 0; i < smp_cpus; i++) { env = cpu_init(cpu_model); if (!env) { fprintf(stderr, \"Unable to find PowerPC CPU definition\\n\"); exit(1); } /* Set time-base frequency to 100 Mhz */ cpu_ppc_tb_init(env, 100UL * 1000UL * 1000UL); qemu_register_reset(&cpu_ppc_reset, env); register_savevm(\"cpu\", 0, 3, cpu_save, cpu_load, env); envs[i] = env; } /* allocate RAM */ cpu_register_physical_memory(0, ram_size, IO_MEM_RAM); /* allocate and load BIOS */ bios_offset = ram_size + vga_ram_size; if (bios_name == NULL) bios_name = BIOS_FILENAME; snprintf(buf, sizeof(buf), \"%s/%s\", bios_dir, bios_name); bios_size = load_image(buf, phys_ram_base + bios_offset); if (bios_size < 0 || bios_size > BIOS_SIZE) { cpu_abort(env, \"qemu: could not load PPC PREP bios '%s'\\n\", buf); exit(1); } if (env->nip < 0xFFF80000 && bios_size < 0x00100000) { cpu_abort(env, \"PowerPC 601 / 620 / 970 need a 1MB BIOS\\n\"); } bios_size = (bios_size + 0xfff) & ~0xfff; cpu_register_physical_memory((uint32_t)(-bios_size), bios_size, bios_offset | IO_MEM_ROM); if (linux_boot) { kernel_base = KERNEL_LOAD_ADDR; /* now we can load the kernel */ kernel_size = load_image(kernel_filename, phys_ram_base + kernel_base); if (kernel_size < 0) { cpu_abort(env, \"qemu: could not load kernel '%s'\\n\", kernel_filename); exit(1); } /* load initrd */ if (initrd_filename) { initrd_base = INITRD_LOAD_ADDR; initrd_size = load_image(initrd_filename, phys_ram_base + initrd_base); if (initrd_size < 0) { cpu_abort(env, \"qemu: could not load initial ram disk '%s'\\n\", initrd_filename); exit(1); } } else { initrd_base = 0; initrd_size = 0; } ppc_boot_device = 'm'; } else { kernel_base = 0; kernel_size = 0; initrd_base = 0; initrd_size = 0; } isa_mem_base = 0xc0000000; if (PPC_INPUT(env) != PPC_FLAGS_INPUT_6xx) { cpu_abort(env, \"Only 6xx bus is supported on PREP machine\\n\"); exit(1); } i8259 = i8259_init(first_cpu->irq_inputs[PPC6xx_INPUT_INT]); pci_bus = pci_prep_init(i8259); // pci_bus = i440fx_init(); /* Register 8 MB of ISA IO space (needed for non-contiguous map) */ PPC_io_memory = cpu_register_io_memory(0, PPC_prep_io_read, PPC_prep_io_write, sysctrl); cpu_register_physical_memory(0x80000000, 0x00800000, PPC_io_memory); /* init basic PC hardware */ pci_vga_init(pci_bus, ds, phys_ram_base + ram_size, ram_size, vga_ram_size, 0, 0); // openpic = openpic_init(0x00000000, 0xF0000000, 1); // pit = pit_init(0x40, i8259[0]); rtc_init(0x70, i8259[8]); serial_init(0x3f8, i8259[4], serial_hds[0]); nb_nics1 = nb_nics; if (nb_nics1 > NE2000_NB_MAX) nb_nics1 = NE2000_NB_MAX; for(i = 0; i < nb_nics1; i++) { if (nd_table[i].model == NULL || strcmp(nd_table[i].model, \"ne2k_isa\") == 0) { isa_ne2000_init(ne2000_io[i], i8259[ne2000_irq[i]], &nd_table[i]); } else { pci_nic_init(pci_bus, &nd_table[i], -1); } } for(i = 0; i < 2; i++) { isa_ide_init(ide_iobase[i], ide_iobase2[i], i8259[ide_irq[i]], bs_table[2 * i], bs_table[2 * i + 1]); } i8042_init(i8259[1], i8259[12], 0x60); DMA_init(1); // AUD_init(); // SB16_init(); fdctrl_init(i8259[6], 2, 0, 0x3f0, fd_table); /* Register speaker port */ register_ioport_read(0x61, 1, 1, speaker_ioport_read, NULL); register_ioport_write(0x61, 1, 1, speaker_ioport_write, NULL); /* Register fake IO ports for PREP */ sysctrl->reset_irq = first_cpu->irq_inputs[PPC6xx_INPUT_HRESET]; register_ioport_read(0x398, 2, 1, &PREP_io_read, sysctrl); register_ioport_write(0x398, 2, 1, &PREP_io_write, sysctrl); /* System control ports */ register_ioport_read(0x0092, 0x01, 1, &PREP_io_800_readb, sysctrl); register_ioport_write(0x0092, 0x01, 1, &PREP_io_800_writeb, sysctrl); register_ioport_read(0x0800, 0x52, 1, &PREP_io_800_readb, sysctrl); register_ioport_write(0x0800, 0x52, 1, &PREP_io_800_writeb, sysctrl); /* PCI intack location */ PPC_io_memory = cpu_register_io_memory(0, PPC_intack_read, PPC_intack_write, NULL); cpu_register_physical_memory(0xBFFFFFF0, 0x4, PPC_io_memory); /* PowerPC control and status register group */ #if 0 PPC_io_memory = cpu_register_io_memory(0, PPC_XCSR_read, PPC_XCSR_write, NULL); cpu_register_physical_memory(0xFEFF0000, 0x1000, PPC_io_memory); #endif if (usb_enabled) { usb_ohci_init_pci(pci_bus, 3, -1); } m48t59 = m48t59_init(i8259[8], 0, 0x0074, NVRAM_SIZE, 59); if (m48t59 == NULL) return; sysctrl->nvram = m48t59; /* Initialise NVRAM */ nvram.opaque = m48t59; nvram.read_fn = &m48t59_read; nvram.write_fn = &m48t59_write; PPC_NVRAM_set_params(&nvram, NVRAM_SIZE, \"PREP\", ram_size, ppc_boot_device, kernel_base, kernel_size, kernel_cmdline, initrd_base, initrd_size, /* XXX: need an option to load a NVRAM image */ 0, graphic_width, graphic_height, graphic_depth); /* Special port to get debug messages from Open-Firmware */ register_ioport_write(0x0F00, 4, 1, &PPC_debug_write, NULL); }"} {"target": 0, "idx": 10264, "func": "void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode) { ppc_vsr_t xb; ppc_vsr_t xt; float_status tstat; getVSR(rB(opcode) + 32, &xb, env); memset(&xt, 0, sizeof(xt)); helper_reset_fpstatus(env); if (unlikely(Rc(opcode) != 0)) { /* TODO: Support xsadddpo after round-to-odd is implemented */ abort(); } tstat = env->fp_status; set_float_exception_flags(0, &tstat); xt.f128 = float128_sqrt(xb.f128, &tstat); env->fp_status.float_exception_flags |= tstat.float_exception_flags; if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { if (float128_is_signaling_nan(xb.f128, &tstat)) { float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); xt.f128 = float128_snan_to_qnan(xb.f128); } else if (float128_is_quiet_nan(xb.f128, &tstat)) { xt.f128 = xb.f128; } else if (float128_is_neg(xb.f128) && !float128_is_zero(xb.f128)) { float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1); set_snan_bit_is_one(0, &env->fp_status); xt.f128 = float128_default_nan(&env->fp_status); } } helper_compute_fprf_float128(env, xt.f128); putVSR(rD(opcode) + 32, &xt, env); float_check_status(env); }"} {"target": 0, "idx": 10274, "func": "static int is_dup_page(uint8_t *page, uint8_t ch) { uint32_t val = ch << 24 | ch << 16 | ch << 8 | ch; uint32_t *array = (uint32_t *)page; int i; for (i = 0; i < (TARGET_PAGE_SIZE / 4); i++) { if (array[i] != val) return 0; } return 1; }"} {"target": 0, "idx": 10283, "func": "void qxl_log_command(PCIQXLDevice *qxl, const char *ring, QXLCommandExt *ext) { bool compat = ext->flags & QXL_COMMAND_FLAG_COMPAT; void *data; if (!qxl->cmdlog) { return; } fprintf(stderr, \"%\" PRId64 \" qxl-%d/%s:\", qemu_get_clock_ns(vm_clock), qxl->id, ring); fprintf(stderr, \" cmd @ 0x%\" PRIx64 \" %s%s\", ext->cmd.data, qxl_name(qxl_type, ext->cmd.type), compat ? \"(compat)\" : \"\"); data = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id); switch (ext->cmd.type) { case QXL_CMD_DRAW: if (!compat) { qxl_log_cmd_draw(qxl, data, ext->group_id); } else { qxl_log_cmd_draw_compat(qxl, data, ext->group_id); } break; case QXL_CMD_SURFACE: qxl_log_cmd_surface(qxl, data); break; case QXL_CMD_CURSOR: qxl_log_cmd_cursor(qxl, data, ext->group_id); break; } fprintf(stderr, \"\\n\"); }"} {"target": 0, "idx": 10288, "func": "int vnc_display_disable_login(DisplayState *ds) { VncDisplay *vs = ds ? (VncDisplay *)ds->opaque : vnc_display; if (!vs) { return -1; } if (vs->password) { g_free(vs->password); } vs->password = NULL; vs->auth = VNC_AUTH_VNC; return 0; }"} {"target": 0, "idx": 10293, "func": "static void read_storage_element0_info(SCLPDevice *sclp, SCCB *sccb) { int i, assigned; int subincrement_id = SCLP_STARTING_SUBINCREMENT_ID; ReadStorageElementInfo *storage_info = (ReadStorageElementInfo *) sccb; sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev(); assert(mhd); if ((ram_size >> mhd->increment_size) >= 0x10000) { sccb->h.response_code = cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION); return; } /* Return information regarding core memory */ storage_info->max_id = cpu_to_be16(mhd->standby_mem_size ? 1 : 0); assigned = ram_size >> mhd->increment_size; storage_info->assigned = cpu_to_be16(assigned); for (i = 0; i < assigned; i++) { storage_info->entries[i] = cpu_to_be32(subincrement_id); subincrement_id += SCLP_INCREMENT_UNIT; } sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION); }"} {"target": 0, "idx": 10317, "func": "void net_rx_pkt_attach_data(struct NetRxPkt *pkt, const void *data, size_t len, bool strip_vlan) { uint16_t tci = 0; uint16_t ploff; assert(pkt); pkt->vlan_stripped = false; if (strip_vlan) { pkt->vlan_stripped = eth_strip_vlan(data, pkt->ehdr_buf, &ploff, &tci); } if (pkt->vlan_stripped) { pkt->vec[0].iov_base = pkt->ehdr_buf; pkt->vec[0].iov_len = ploff - sizeof(struct vlan_header); pkt->vec[1].iov_base = (uint8_t *) data + ploff; pkt->vec[1].iov_len = len - ploff; pkt->vec_len = 2; pkt->tot_len = len - ploff + sizeof(struct eth_header); } else { pkt->vec[0].iov_base = (void *)data; pkt->vec[0].iov_len = len; pkt->vec_len = 1; pkt->tot_len = len; } pkt->tci = tci; }"} {"target": 1, "idx": 10349, "func": "static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, ram_addr_t memory, ram_addr_t region_offset) { int idx, eidx; if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) return -1; idx = SUBPAGE_IDX(start); eidx = SUBPAGE_IDX(end); #if defined(DEBUG_SUBPAGE) printf(\"%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\\n\", __func__, mmio, start, end, idx, eidx, memory); #endif memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); for (; idx <= eidx; idx++) { mmio->sub_io_index[idx] = memory; mmio->region_offset[idx] = region_offset; } return 0; }"} {"target": 0, "idx": 10356, "func": "static int load_dtb(hwaddr addr, const struct arm_boot_info *binfo) { void *fdt = NULL; int size, rc; uint32_t acells, scells; if (binfo->dtb_filename) { char *filename; filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, binfo->dtb_filename); if (!filename) { fprintf(stderr, \"Couldn't open dtb file %s\\n\", binfo->dtb_filename); goto fail; } fdt = load_device_tree(filename, &size); if (!fdt) { fprintf(stderr, \"Couldn't open dtb file %s\\n\", filename); g_free(filename); goto fail; } g_free(filename); } else if (binfo->get_dtb) { fdt = binfo->get_dtb(binfo, &size); if (!fdt) { fprintf(stderr, \"Board was unable to create a dtb blob\\n\"); goto fail; } } acells = qemu_fdt_getprop_cell(fdt, \"/\", \"#address-cells\"); scells = qemu_fdt_getprop_cell(fdt, \"/\", \"#size-cells\"); if (acells == 0 || scells == 0) { fprintf(stderr, \"dtb file invalid (#address-cells or #size-cells 0)\\n\"); goto fail; } if (scells < 2 && binfo->ram_size >= (1ULL << 32)) { /* This is user error so deserves a friendlier error message * than the failure of setprop_sized_cells would provide */ fprintf(stderr, \"qemu: dtb file not compatible with \" \"RAM size > 4GB\\n\"); goto fail; } rc = qemu_fdt_setprop_sized_cells(fdt, \"/memory\", \"reg\", acells, binfo->loader_start, scells, binfo->ram_size); if (rc < 0) { fprintf(stderr, \"couldn't set /memory/reg\\n\"); goto fail; } if (binfo->kernel_cmdline && *binfo->kernel_cmdline) { rc = qemu_fdt_setprop_string(fdt, \"/chosen\", \"bootargs\", binfo->kernel_cmdline); if (rc < 0) { fprintf(stderr, \"couldn't set /chosen/bootargs\\n\"); goto fail; } } if (binfo->initrd_size) { rc = qemu_fdt_setprop_cell(fdt, \"/chosen\", \"linux,initrd-start\", binfo->initrd_start); if (rc < 0) { fprintf(stderr, \"couldn't set /chosen/linux,initrd-start\\n\"); goto fail; } rc = qemu_fdt_setprop_cell(fdt, \"/chosen\", \"linux,initrd-end\", binfo->initrd_start + binfo->initrd_size); if (rc < 0) { fprintf(stderr, \"couldn't set /chosen/linux,initrd-end\\n\"); goto fail; } } if (binfo->modify_dtb) { binfo->modify_dtb(binfo, fdt); } qemu_fdt_dumpdtb(fdt, size); /* Put the DTB into the memory map as a ROM image: this will ensure * the DTB is copied again upon reset, even if addr points into RAM. */ rom_add_blob_fixed(\"dtb\", fdt, size, addr); g_free(fdt); return 0; fail: g_free(fdt); return -1; }"} {"target": 0, "idx": 10369, "func": "static void onenand_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { OneNANDState *s = (OneNANDState *) opaque; int offset = addr >> s->shift; int sec; switch (offset) { case 0x0000 ... 0x01ff: case 0x8000 ... 0x800f: if (s->cycle) { s->cycle = 0; if (value == 0x0000) { SETADDR(ONEN_BUF_BLOCK, ONEN_BUF_PAGE) onenand_load_main(s, sec, 1 << (PAGE_SHIFT - 9), s->data[0][0]); s->addr[ONEN_BUF_PAGE] += 4; s->addr[ONEN_BUF_PAGE] &= 0xff; } break; } switch (value) { case 0x00f0: /* Reset OneNAND */ onenand_reset(s, 0); break; case 0x00e0: /* Load Data into Buffer */ s->cycle = 1; break; case 0x0090: /* Read Identification Data */ memset(s->boot[0], 0, 3 << s->shift); s->boot[0][0 << s->shift] = s->id.man & 0xff; s->boot[0][1 << s->shift] = s->id.dev & 0xff; s->boot[0][2 << s->shift] = s->wpstatus & 0xff; break; default: fprintf(stderr, \"%s: unknown OneNAND boot command %\"PRIx64\"\\n\", __FUNCTION__, value); } break; case 0xf100 ... 0xf107: /* Start addresses */ s->addr[offset - 0xf100] = value; break; case 0xf200: /* Start buffer */ s->bufaddr = (value >> 8) & 0xf; if (PAGE_SHIFT == 11) s->count = (value & 3) ?: 4; else if (PAGE_SHIFT == 10) s->count = (value & 1) ?: 2; break; case 0xf220: /* Command */ if (s->intstatus & (1 << 15)) break; s->command = value; onenand_command(s); break; case 0xf221: /* System Configuration 1 */ s->config[0] = value; onenand_intr_update(s); qemu_set_irq(s->rdy, (s->config[0] >> 7) & 1); break; case 0xf222: /* System Configuration 2 */ s->config[1] = value; break; case 0xf241: /* Interrupt */ s->intstatus &= value; if ((1 << 15) & ~s->intstatus) s->status &= ~(ONEN_ERR_CMD | ONEN_ERR_ERASE | ONEN_ERR_PROG | ONEN_ERR_LOAD); onenand_intr_update(s); break; case 0xf24c: /* Unlock Start Block Address */ s->unladdr[0] = value & (s->blocks - 1); /* For some reason we have to set the end address to by default * be same as start because the software forgets to write anything * in there. */ s->unladdr[1] = value & (s->blocks - 1); break; case 0xf24d: /* Unlock End Block Address */ s->unladdr[1] = value & (s->blocks - 1); break; default: fprintf(stderr, \"%s: unknown OneNAND register %x\\n\", __FUNCTION__, offset); } }"} {"target": 0, "idx": 10376, "func": "static void qmp_output_type_null(Visitor *v, const char *name, Error **errp) { QmpOutputVisitor *qov = to_qov(v); qmp_output_add_obj(qov, name, qnull()); }"} {"target": 0, "idx": 10383, "func": "static void xhci_child_detach(USBPort *uport, USBDevice *child) { USBBus *bus = usb_bus_from_device(child); XHCIState *xhci = container_of(bus, XHCIState, bus); xhci_detach_slot(xhci, uport); }"} {"target": 1, "idx": 10389, "func": "PPC_OP(addme) { T1 = T0; T0 += xer_ca + (-1); if (T1 != 0) xer_ca = 1; RETURN(); }"} {"target": 1, "idx": 10391, "func": "static bool liveness_pass_2(TCGContext *s) { int nb_globals = s->nb_globals; int nb_temps, i, oi, oi_next; bool changes = false; /* Create a temporary for each indirect global. */ for (i = 0; i < nb_globals; ++i) { TCGTemp *its = &s->temps[i]; if (its->indirect_reg) { TCGTemp *dts = tcg_temp_alloc(s); dts->type = its->type; dts->base_type = its->base_type; its->state_ptr = dts; } else { its->state_ptr = NULL; } /* All globals begin dead. */ its->state = TS_DEAD; } for (nb_temps = s->nb_temps; i < nb_temps; ++i) { TCGTemp *its = &s->temps[i]; its->state_ptr = NULL; its->state = TS_DEAD; } for (oi = s->gen_op_buf[0].next; oi != 0; oi = oi_next) { TCGOp *op = &s->gen_op_buf[oi]; TCGOpcode opc = op->opc; const TCGOpDef *def = &tcg_op_defs[opc]; TCGLifeData arg_life = op->life; int nb_iargs, nb_oargs, call_flags; TCGTemp *arg_ts, *dir_ts; oi_next = op->next; if (opc == INDEX_op_call) { nb_oargs = op->callo; nb_iargs = op->calli; call_flags = op->args[nb_oargs + nb_iargs + 1]; } else { nb_iargs = def->nb_iargs; nb_oargs = def->nb_oargs; /* Set flags similar to how calls require. */ if (def->flags & TCG_OPF_BB_END) { /* Like writing globals: save_globals */ call_flags = 0; } else if (def->flags & TCG_OPF_SIDE_EFFECTS) { /* Like reading globals: sync_globals */ call_flags = TCG_CALL_NO_WRITE_GLOBALS; } else { /* No effect on globals. */ call_flags = (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS); } } /* Make sure that input arguments are available. */ for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { arg_ts = arg_temp(op->args[i]); if (arg_ts) { dir_ts = arg_ts->state_ptr; if (dir_ts && arg_ts->state == TS_DEAD) { TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32 ? INDEX_op_ld_i32 : INDEX_op_ld_i64); TCGOp *lop = tcg_op_insert_before(s, op, lopc, 3); lop->args[0] = temp_arg(dir_ts); lop->args[1] = temp_arg(arg_ts->mem_base); lop->args[2] = arg_ts->mem_offset; /* Loaded, but synced with memory. */ arg_ts->state = TS_MEM; } } } /* Perform input replacement, and mark inputs that became dead. No action is required except keeping temp_state up to date so that we reload when needed. */ for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { arg_ts = arg_temp(op->args[i]); if (arg_ts) { dir_ts = arg_ts->state_ptr; if (dir_ts) { op->args[i] = temp_arg(dir_ts); changes = true; if (IS_DEAD_ARG(i)) { arg_ts->state = TS_DEAD; } } } } /* Liveness analysis should ensure that the following are all correct, for call sites and basic block end points. */ if (call_flags & TCG_CALL_NO_READ_GLOBALS) { /* Nothing to do */ } else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) { for (i = 0; i < nb_globals; ++i) { /* Liveness should see that globals are synced back, that is, either TS_DEAD or TS_MEM. */ arg_ts = &s->temps[i]; tcg_debug_assert(arg_ts->state_ptr == 0 || arg_ts->state != 0); } } else { for (i = 0; i < nb_globals; ++i) { /* Liveness should see that globals are saved back, that is, TS_DEAD, waiting to be reloaded. */ arg_ts = &s->temps[i]; tcg_debug_assert(arg_ts->state_ptr == 0 || arg_ts->state == TS_DEAD); } } /* Outputs become available. */ for (i = 0; i < nb_oargs; i++) { arg_ts = arg_temp(op->args[i]); dir_ts = arg_ts->state_ptr; if (!dir_ts) { continue; } op->args[i] = temp_arg(dir_ts); changes = true; /* The output is now live and modified. */ arg_ts->state = 0; /* Sync outputs upon their last write. */ if (NEED_SYNC_ARG(i)) { TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32 ? INDEX_op_st_i32 : INDEX_op_st_i64); TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3); sop->args[0] = temp_arg(dir_ts); sop->args[1] = temp_arg(arg_ts->mem_base); sop->args[2] = arg_ts->mem_offset; arg_ts->state = TS_MEM; } /* Drop outputs that are dead. */ if (IS_DEAD_ARG(i)) { arg_ts->state = TS_DEAD; } } } return changes; }"} {"target": 0, "idx": 10399, "func": "static void update_stream_timings(AVFormatContext *ic) { int64_t start_time, start_time1, start_time_text, end_time, end_time1; int64_t duration, duration1, filesize; int i; AVStream *st; AVProgram *p; start_time = INT64_MAX; start_time_text = INT64_MAX; end_time = INT64_MIN; duration = INT64_MIN; for (i = 0; i < ic->nb_streams; i++) { st = ic->streams[i]; if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) { start_time1 = av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q); if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE || st->codec->codec_type == AVMEDIA_TYPE_DATA) { if (start_time1 < start_time_text) start_time_text = start_time1; } else start_time = FFMIN(start_time, start_time1); end_time1 = AV_NOPTS_VALUE; if (st->duration != AV_NOPTS_VALUE) { end_time1 = start_time1 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q); end_time = FFMAX(end_time, end_time1); } for (p = NULL; (p = av_find_program_from_stream(ic, p, i)); ) { if (p->start_time == AV_NOPTS_VALUE || p->start_time > start_time1) p->start_time = start_time1; if (p->end_time < end_time1) p->end_time = end_time1; } } if (st->duration != AV_NOPTS_VALUE) { duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q); duration = FFMAX(duration, duration1); } } if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE)) start_time = start_time_text; else if (start_time > start_time_text) av_log(ic, AV_LOG_VERBOSE, \"Ignoring outlier non primary stream starttime %f\\n\", start_time_text / (float)AV_TIME_BASE); if (start_time != INT64_MAX) { ic->start_time = start_time; if (end_time != INT64_MIN) { if (ic->nb_programs) { for (i = 0; i < ic->nb_programs; i++) { p = ic->programs[i]; if (p->start_time != AV_NOPTS_VALUE && p->end_time > p->start_time) duration = FFMAX(duration, p->end_time - p->start_time); } } else duration = FFMAX(duration, end_time - start_time); } } if (duration != INT64_MIN && duration > 0 && ic->duration == AV_NOPTS_VALUE) { ic->duration = duration; } if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) { /* compute the bitrate */ double bitrate = (double) filesize * 8.0 * AV_TIME_BASE / (double) ic->duration; if (bitrate >= 0 && (!AV_HAVE_INCOMPATIBLE_LIBAV_ABI || bitrate <= INT_MAX)) ic->bit_rate = bitrate; } }"} {"target": 1, "idx": 10409, "func": "GuestMemoryBlockInfo *qmp_guest_get_memory_block_info(Error **errp) { Error *local_err = NULL; char *dirpath; int dirfd; char *buf; GuestMemoryBlockInfo *info; dirpath = g_strdup_printf(\"/sys/devices/system/memory/\"); dirfd = open(dirpath, O_RDONLY | O_DIRECTORY); if (dirfd == -1) { error_setg_errno(errp, errno, \"open(\\\"%s\\\")\", dirpath); g_free(dirpath); return NULL; } g_free(dirpath); buf = g_malloc0(20); ga_read_sysfs_file(dirfd, \"block_size_bytes\", buf, 20, &local_err); if (local_err) { g_free(buf); error_propagate(errp, local_err); return NULL; } info = g_new0(GuestMemoryBlockInfo, 1); info->size = strtol(buf, NULL, 16); /* the unit is bytes */ g_free(buf); return info; }"} {"target": 0, "idx": 10412, "func": "AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret) { AVProbeData lpd = *pd; AVInputFormat *fmt1 = NULL, *fmt; int score, nodat = 0, score_max = 0; const static uint8_t zerobuffer[AVPROBE_PADDING_SIZE]; if (!lpd.buf) lpd.buf = zerobuffer; if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) { int id3len = ff_id3v2_tag_len(lpd.buf); if (lpd.buf_size > id3len + 16) { lpd.buf += id3len; lpd.buf_size -= id3len; } else nodat = 1; } fmt = NULL; while ((fmt1 = av_iformat_next(fmt1))) { if (!is_opened == !(fmt1->flags & AVFMT_NOFILE)) continue; score = 0; if (fmt1->read_probe) { score = fmt1->read_probe(&lpd); if (fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions)) score = FFMAX(score, nodat ? AVPROBE_SCORE_EXTENSION / 2 - 1 : 1); } else if (fmt1->extensions) { if (av_match_ext(lpd.filename, fmt1->extensions)) score = AVPROBE_SCORE_EXTENSION; } if (score > score_max) { score_max = score; fmt = fmt1; } else if (score == score_max) fmt = NULL; } if (nodat) score_max = FFMIN(AVPROBE_SCORE_EXTENSION / 2 - 1, score_max); *score_ret = score_max; return fmt; }"} {"target": 1, "idx": 10422, "func": "static int virtio_balloon_init_pci(PCIDevice *pci_dev) { VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev); VirtIODevice *vdev; vdev = virtio_balloon_init(&pci_dev->qdev); virtio_init_pci(proxy, vdev); return 0;"} {"target": 1, "idx": 10439, "func": "static target_ulong h_client_architecture_support(PowerPCCPU *cpu_, sPAPRMachineState *spapr, target_ulong opcode, target_ulong *args) { target_ulong list = ppc64_phys_to_real(args[0]); target_ulong ov_table; PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu_); CPUState *cs; bool cpu_match = false, cpu_update = true; unsigned old_cpu_version = cpu_->cpu_version; unsigned compat_lvl = 0, cpu_version = 0; unsigned max_lvl = get_compat_level(cpu_->max_compat); int counter; sPAPROptionVector *ov5_guest; /* Parse PVR list */ for (counter = 0; counter < 512; ++counter) { uint32_t pvr, pvr_mask; pvr_mask = ldl_be_phys(&address_space_memory, list); list += 4; pvr = ldl_be_phys(&address_space_memory, list); list += 4; trace_spapr_cas_pvr_try(pvr); if (!max_lvl && ((cpu_->env.spr[SPR_PVR] & pvr_mask) == (pvr & pvr_mask))) { cpu_match = true; cpu_version = 0; } else if (pvr == cpu_->cpu_version) { cpu_match = true; cpu_version = cpu_->cpu_version; } else if (!cpu_match) { cas_handle_compat_cpu(pcc, pvr, max_lvl, &compat_lvl, &cpu_version); } /* Terminator record */ if (~pvr_mask & pvr) { break; } } /* Parsing finished */ trace_spapr_cas_pvr(cpu_->cpu_version, cpu_match, cpu_version, pcc->pcr_mask); /* Update CPUs */ if (old_cpu_version != cpu_version) { CPU_FOREACH(cs) { SetCompatState s = { .cpu_version = cpu_version, .err = NULL, }; run_on_cpu(cs, do_set_compat, &s); if (s.err) { error_report_err(s.err); return H_HARDWARE; } } } if (!cpu_version) { cpu_update = false; } /* For the future use: here @ov_table points to the first option vector */ ov_table = list; ov5_guest = spapr_ovec_parse_vector(ov_table, 5); /* NOTE: there are actually a number of ov5 bits where input from the * guest is always zero, and the platform/QEMU enables them independently * of guest input. To model these properly we'd want some sort of mask, * but since they only currently apply to memory migration as defined * by LoPAPR 1.1, 14.5.4.8, which QEMU doesn't implement, we don't need * to worry about this. */ spapr_ovec_intersect(spapr->ov5_cas, spapr->ov5, ov5_guest); spapr_ovec_cleanup(ov5_guest); if (spapr_h_cas_compose_response(spapr, args[1], args[2], cpu_update)) { qemu_system_reset_request(); } return H_SUCCESS; }"} {"target": 1, "idx": 10462, "func": "int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes) { NBDClientSession *client = nbd_get_client_session(bs); NBDRequest request = { .type = NBD_CMD_TRIM, .from = offset, .len = bytes, }; if (!(client->info.flags & NBD_FLAG_SEND_TRIM)) { return 0; } return nbd_co_request(bs, &request, NULL); }"} {"target": 0, "idx": 10470, "func": "static int rv34_decode_macroblock(RV34DecContext *r, int8_t *intra_types) { MpegEncContext *s = &r->s; GetBitContext *gb = &s->gb; int cbp, cbp2; int i, blknum, blkoff; DCTELEM block16[64]; int luma_dc_quant; int dist; int mb_pos = s->mb_x + s->mb_y * s->mb_stride; // Calculate which neighbours are available. Maybe it's worth optimizing too. memset(r->avail_cache, 0, sizeof(r->avail_cache)); fill_rectangle(r->avail_cache + 5, 2, 2, 4, 1, 4); dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width; if(s->mb_x && dist) r->avail_cache[4] = r->avail_cache[8] = s->current_picture_ptr->mb_type[mb_pos - 1]; if(dist >= s->mb_width) r->avail_cache[1] = r->avail_cache[2] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride]; if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1) r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1]; if(s->mb_x && dist > s->mb_width) r->avail_cache[0] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1]; s->qscale = r->si.quant; cbp = cbp2 = rv34_decode_mb_header(r, intra_types); r->cbp_luma [s->mb_x + s->mb_y * s->mb_stride] = cbp; r->cbp_chroma[s->mb_x + s->mb_y * s->mb_stride] = cbp >> 16; if(s->pict_type == FF_I_TYPE) r->deblock_coefs[mb_pos] = 0; else r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r); s->current_picture_ptr->qscale_table[s->mb_x + s->mb_y * s->mb_stride] = s->qscale; if(cbp == -1) return -1; luma_dc_quant = r->block_type == RV34_MB_P_MIX16x16 ? r->luma_dc_quant_p[s->qscale] : r->luma_dc_quant_i[s->qscale]; if(r->is16){ memset(block16, 0, sizeof(block16)); rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0); rv34_dequant4x4_16x16(block16, rv34_qscale_tab[luma_dc_quant],rv34_qscale_tab[s->qscale]); rv34_inv_transform_noround(block16); } for(i = 0; i < 16; i++, cbp >>= 1){ if(!r->is16 && !(cbp & 1)) continue; blknum = ((i & 2) >> 1) + ((i & 8) >> 2); blkoff = ((i & 1) << 2) + ((i & 4) << 3); if(cbp & 1) rv34_decode_block(s->block[blknum] + blkoff, gb, r->cur_vlcs, r->luma_vlc, 0); rv34_dequant4x4(s->block[blknum] + blkoff, rv34_qscale_tab[s->qscale],rv34_qscale_tab[s->qscale]); if(r->is16) //FIXME: optimize s->block[blknum][blkoff] = block16[(i & 3) | ((i & 0xC) << 1)]; rv34_inv_transform(s->block[blknum] + blkoff); } if(r->block_type == RV34_MB_P_MIX16x16) r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1); for(; i < 24; i++, cbp >>= 1){ if(!(cbp & 1)) continue; blknum = ((i & 4) >> 2) + 4; blkoff = ((i & 1) << 2) + ((i & 2) << 4); rv34_decode_block(s->block[blknum] + blkoff, gb, r->cur_vlcs, r->chroma_vlc, 1); rv34_dequant4x4(s->block[blknum] + blkoff, rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]],rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]]); rv34_inv_transform(s->block[blknum] + blkoff); } if(IS_INTRA(s->current_picture_ptr->mb_type[s->mb_x + s->mb_y*s->mb_stride])) rv34_output_macroblock(r, intra_types, cbp2, r->is16); else rv34_apply_differences(r, cbp2); return 0; }"} {"target": 0, "idx": 10471, "func": "static int index_search_timestamp(AVIndexEntry *entries, int nb_entries, int wanted_timestamp) { int a, b, m; int64_t timestamp; if (nb_entries <= 0) return -1; a = 0; b = nb_entries - 1; while (a <= b) { m = (a + b) >> 1; timestamp = entries[m].timestamp; if (timestamp == wanted_timestamp) goto found; else if (timestamp > wanted_timestamp) { b = m - 1; } else { a = m + 1; } } m = a; if (m > 0) m--; found: return m; }"} {"target": 0, "idx": 10494, "func": "static void vnc_disconnect_finish(VncState *vs) { if (vs->input.buffer) qemu_free(vs->input.buffer); if (vs->output.buffer) qemu_free(vs->output.buffer); #ifdef CONFIG_VNC_TLS vnc_tls_client_cleanup(vs); #endif /* CONFIG_VNC_TLS */ #ifdef CONFIG_VNC_SASL vnc_sasl_client_cleanup(vs); #endif /* CONFIG_VNC_SASL */ audio_del(vs); VncState *p, *parent = NULL; for (p = vs->vd->clients; p != NULL; p = p->next) { if (p == vs) { if (parent) parent->next = p->next; else vs->vd->clients = p->next; break; } parent = p; } if (!vs->vd->clients) dcl->idle = 1; vnc_remove_timer(vs->vd); qemu_free(vs); }"} {"target": 1, "idx": 10501, "func": "static void set_pixel_format(VncState *vs, int bits_per_pixel, int depth, int big_endian_flag, int true_color_flag, int red_max, int green_max, int blue_max, int red_shift, int green_shift, int blue_shift) { if (!true_color_flag) { vs->client_pf.rmax = red_max; vs->client_pf.rbits = hweight_long(red_max); vs->client_pf.rshift = red_shift; vs->client_pf.rmask = red_max << red_shift; vs->client_pf.gmax = green_max; vs->client_pf.gbits = hweight_long(green_max); vs->client_pf.gshift = green_shift; vs->client_pf.gmask = green_max << green_shift; vs->client_pf.bmax = blue_max; vs->client_pf.bbits = hweight_long(blue_max); vs->client_pf.bshift = blue_shift; vs->client_pf.bmask = blue_max << blue_shift; vs->client_pf.bits_per_pixel = bits_per_pixel; vs->client_pf.bytes_per_pixel = bits_per_pixel / 8; vs->client_pf.depth = bits_per_pixel == 32 ? 24 : bits_per_pixel; vs->client_be = big_endian_flag; set_pixel_conversion(vs); graphic_hw_invalidate(NULL); graphic_hw_update(NULL);"} {"target": 1, "idx": 10522, "func": "static uint8_t qpci_pc_config_readb(QPCIBus *bus, int devfn, uint8_t offset) { outl(0xcf8, (1 << 31) | (devfn << 8) | offset); return inb(0xcfc); }"} {"target": 0, "idx": 10575, "func": "int kvm_has_xcrs(void) { return kvm_state->xcrs; }"} {"target": 0, "idx": 10579, "func": "static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src) { gen_mov_reg_V(cpu_tmp0, src); gen_mov_reg_N(dst, src); tcg_gen_xor_tl(dst, dst, cpu_tmp0); }"} {"target": 1, "idx": 10590, "func": "static void tilegx_cpu_initfn(Object *obj) { CPUState *cs = CPU(obj); TileGXCPU *cpu = TILEGX_CPU(obj); CPUTLGState *env = &cpu->env; static bool tcg_initialized; cs->env_ptr = env; cpu_exec_init(cs, &error_abort); if (tcg_enabled() && !tcg_initialized) { tcg_initialized = true; tilegx_tcg_init(); } }"} {"target": 1, "idx": 10592, "func": "getouraddr(void) { char buff[256]; struct hostent *he = NULL; if (gethostname(buff,256) == 0) he = gethostbyname(buff); if (he) our_addr = *(struct in_addr *)he->h_addr; if (our_addr.s_addr == 0) our_addr.s_addr = loopback_addr.s_addr; }"} {"target": 0, "idx": 10596, "func": "void av_aes_crypt(AVAES *a, uint8_t *dst_, const uint8_t *src_, int count, uint8_t *iv_, int decrypt) { av_aes_block *dst = (av_aes_block *) dst_; const av_aes_block *src = (const av_aes_block *) src_; av_aes_block *iv = (av_aes_block *) iv_; while (count--) { addkey(&a->state[1], src, &a->round_key[a->rounds]); if (decrypt) { crypt(a, 0, inv_sbox, dec_multbl); if (iv) { addkey(&a->state[0], &a->state[0], iv); memcpy(iv, src, 16); } addkey(dst, &a->state[0], &a->round_key[0]); } else { if (iv) addkey(&a->state[1], &a->state[1], iv); crypt(a, 2, sbox, enc_multbl); addkey(dst, &a->state[0], &a->round_key[0]); if (iv) memcpy(iv, dst, 16); } src++; dst++; } }"} {"target": 1, "idx": 10599, "func": "static void spapr_cpu_core_realize_child(Object *child, Error **errp) { Error *local_err = NULL; sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); CPUState *cs = CPU(child); PowerPCCPU *cpu = POWERPC_CPU(cs); Object *obj; obj = object_new(spapr->icp_type); object_property_add_child(OBJECT(cpu), \"icp\", obj, NULL); object_property_add_const_link(obj, \"xics\", OBJECT(spapr), &error_abort); object_property_set_bool(obj, true, \"realized\", &local_err); if (local_err) { goto error; } object_property_set_bool(child, true, \"realized\", &local_err); if (local_err) { goto error; } spapr_cpu_init(spapr, cpu, &local_err); if (local_err) { goto error; } xics_cpu_setup(XICS_FABRIC(spapr), cpu, ICP(obj)); return; error: object_unparent(obj); error_propagate(errp, local_err); }"} {"target": 1, "idx": 10606, "func": "static int read_diff_float_data(ALSDecContext *ctx, unsigned int ra_frame) { AVCodecContext *avctx = ctx->avctx; GetBitContext *gb = &ctx->gb; SoftFloat_IEEE754 *acf = ctx->acf; int *shift_value = ctx->shift_value; int *last_shift_value = ctx->last_shift_value; int *last_acf_mantissa = ctx->last_acf_mantissa; int **raw_mantissa = ctx->raw_mantissa; int *nbits = ctx->nbits; unsigned char *larray = ctx->larray; int frame_length = ctx->cur_frame_length; SoftFloat_IEEE754 scale = av_int2sf_ieee754(0x1u, 23); unsigned int partA_flag; unsigned int highest_byte; unsigned int shift_amp; uint32_t tmp_32; int use_acf; int nchars; int i; int c; long k; long nbits_aligned; unsigned long acc; unsigned long j; uint32_t sign; uint32_t e; uint32_t mantissa; skip_bits_long(gb, 32); //num_bytes_diff_float use_acf = get_bits1(gb); if (ra_frame) { memset(last_acf_mantissa, 0, avctx->channels * sizeof(*last_acf_mantissa)); memset(last_shift_value, 0, avctx->channels * sizeof(*last_shift_value) ); ff_mlz_flush_dict(ctx->mlz); } for (c = 0; c < avctx->channels; ++c) { if (use_acf) { //acf_flag if (get_bits1(gb)) { tmp_32 = get_bits(gb, 23); last_acf_mantissa[c] = tmp_32; } else { tmp_32 = last_acf_mantissa[c]; } acf[c] = av_bits2sf_ieee754(tmp_32); } else { acf[c] = FLOAT_1; } highest_byte = get_bits(gb, 2); partA_flag = get_bits1(gb); shift_amp = get_bits1(gb); if (shift_amp) { shift_value[c] = get_bits(gb, 8); last_shift_value[c] = shift_value[c]; } else { shift_value[c] = last_shift_value[c]; } if (partA_flag) { if (!get_bits1(gb)) { //uncompressed for (i = 0; i < frame_length; ++i) { if (ctx->raw_samples[c][i] == 0) { ctx->raw_mantissa[c][i] = get_bits_long(gb, 32); } } } else { //compressed nchars = 0; for (i = 0; i < frame_length; ++i) { if (ctx->raw_samples[c][i] == 0) { nchars += 4; } } tmp_32 = ff_mlz_decompression(ctx->mlz, gb, nchars, larray); if(tmp_32 != nchars) { av_log(ctx->avctx, AV_LOG_ERROR, \"Error in MLZ decompression (%d, %d).\\n\", tmp_32, nchars); return AVERROR_INVALIDDATA; } for (i = 0; i < frame_length; ++i) { ctx->raw_mantissa[c][i] = AV_RB32(larray); } } } //decode part B if (highest_byte) { for (i = 0; i < frame_length; ++i) { if (ctx->raw_samples[c][i] != 0) { //The following logic is taken from Tabel 14.45 and 14.46 from the ISO spec if (av_cmp_sf_ieee754(acf[c], FLOAT_1)) { nbits[i] = 23 - av_log2(abs(ctx->raw_samples[c][i])); } else { nbits[i] = 23; } nbits[i] = FFMIN(nbits[i], highest_byte*8); } } if (!get_bits1(gb)) { //uncompressed for (i = 0; i < frame_length; ++i) { if (ctx->raw_samples[c][i] != 0) { raw_mantissa[c][i] = get_bits(gb, nbits[i]); } } } else { //compressed nchars = 0; for (i = 0; i < frame_length; ++i) { if (ctx->raw_samples[c][i]) { nchars += (int) nbits[i] / 8; if (nbits[i] & 7) { ++nchars; } } } tmp_32 = ff_mlz_decompression(ctx->mlz, gb, nchars, larray); if(tmp_32 != nchars) { av_log(ctx->avctx, AV_LOG_ERROR, \"Error in MLZ decompression (%d, %d).\\n\", tmp_32, nchars); return AVERROR_INVALIDDATA; } j = 0; for (i = 0; i < frame_length; ++i) { if (ctx->raw_samples[c][i]) { if (nbits[i] & 7) { nbits_aligned = 8 * ((unsigned int)(nbits[i] / 8) + 1); } else { nbits_aligned = nbits[i]; } acc = 0; for (k = 0; k < nbits_aligned/8; ++k) { acc = (acc << 8) + larray[j++]; } acc >>= (nbits_aligned - nbits[i]); raw_mantissa[c][i] = acc; } } } } for (i = 0; i < frame_length; ++i) { SoftFloat_IEEE754 pcm_sf = av_int2sf_ieee754(ctx->raw_samples[c][i], 0); pcm_sf = av_div_sf_ieee754(pcm_sf, scale); if (ctx->raw_samples[c][i] != 0) { if (!av_cmp_sf_ieee754(acf[c], FLOAT_1)) { pcm_sf = multiply(acf[c], pcm_sf); } sign = pcm_sf.sign; e = pcm_sf.exp; mantissa = (pcm_sf.mant | 0x800000) + raw_mantissa[c][i]; while(mantissa >= 0x1000000) { e++; mantissa >>= 1; } if (mantissa) e += (shift_value[c] - 127); mantissa &= 0x007fffffUL; tmp_32 = (sign << 31) | ((e + EXP_BIAS) << 23) | (mantissa); ctx->raw_samples[c][i] = tmp_32; } else { ctx->raw_samples[c][i] = raw_mantissa[c][i] & 0x007fffffUL; } } align_get_bits(gb); } return 0; }"} {"target": 1, "idx": 10608, "func": "static int X264_frame(AVCodecContext *ctx, uint8_t *buf, int bufsize, void *data) { X264Context *x4 = ctx->priv_data; AVFrame *frame = data; x264_nal_t *nal; int nnal, i; x264_picture_t pic_out; x264_picture_init( &x4->pic ); x4->pic.img.i_csp = X264_CSP_I420; x4->pic.img.i_plane = 3; if (frame) { for (i = 0; i < 3; i++) { x4->pic.img.plane[i] = frame->data[i]; x4->pic.img.i_stride[i] = frame->linesize[i]; } x4->pic.i_pts = frame->pts; x4->pic.i_type = frame->pict_type == AV_PICTURE_TYPE_I ? X264_TYPE_KEYFRAME : frame->pict_type == AV_PICTURE_TYPE_P ? X264_TYPE_P : frame->pict_type == AV_PICTURE_TYPE_B ? X264_TYPE_B : X264_TYPE_AUTO; if (x4->params.b_tff != frame->top_field_first) { x4->params.b_tff = frame->top_field_first; x264_encoder_reconfig(x4->enc, &x4->params); } } do { if (x264_encoder_encode(x4->enc, &nal, &nnal, frame? &x4->pic: NULL, &pic_out) < 0) return -1; bufsize = encode_nals(ctx, buf, bufsize, nal, nnal, 0); if (bufsize < 0) return -1; } while (!bufsize && !frame && x264_encoder_delayed_frames(x4->enc)); /* FIXME: libx264 now provides DTS, but AVFrame doesn't have a field for it. */ x4->out_pic.pts = pic_out.i_pts; switch (pic_out.i_type) { case X264_TYPE_IDR: case X264_TYPE_I: x4->out_pic.pict_type = AV_PICTURE_TYPE_I; break; case X264_TYPE_P: x4->out_pic.pict_type = AV_PICTURE_TYPE_P; break; case X264_TYPE_B: case X264_TYPE_BREF: x4->out_pic.pict_type = AV_PICTURE_TYPE_B; break; } x4->out_pic.key_frame = pic_out.b_keyframe; x4->out_pic.quality = (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA; return bufsize; }"} {"target": 1, "idx": 10611, "func": "static int get_real_id(const char *devpath, const char *idname, uint16_t *val) { FILE *f; char name[128]; long id; snprintf(name, sizeof(name), \"%s%s\", devpath, idname); f = fopen(name, \"r\"); if (f == NULL) { error_report(\"%s: %s: %m\", __func__, name); return -1; } if (fscanf(f, \"%li\\n\", &id) == 1) { *val = id; } else { return -1; } return 0; }"} {"target": 0, "idx": 10622, "func": "static int parse_keyframes_index(AVFormatContext *s, AVIOContext *ioc, AVStream *vstream, int64_t max_pos) { unsigned int timeslen = 0, fileposlen = 0, i; char str_val[256]; int64_t *times = NULL; int64_t *filepositions = NULL; int ret = AVERROR(ENOSYS); int64_t initial_pos = avio_tell(ioc); AVDictionaryEntry *creator = av_dict_get(s->metadata, \"metadatacreator\", NULL, 0); if (creator && !strcmp(creator->value, \"MEGA\")) { /* Files with this metadatacreator tag seem to have filepositions * pointing at the 4 trailer bytes of the previous packet, * which isn't the norm (nor what we expect here, nor what * jwplayer + lighttpd expect, nor what flvtool2 produces). * Just ignore the index in this case, instead of risking trying * to adjust it to something that might or might not work. */ return 0; } if(vstream->nb_index_entries>0){ av_log(s, AV_LOG_WARNING, \"Skiping duplicate index\\n\"); return 0; } while (avio_tell(ioc) < max_pos - 2 && amf_get_string(ioc, str_val, sizeof(str_val)) > 0) { int64_t** current_array; unsigned int arraylen; // Expect array object in context if (avio_r8(ioc) != AMF_DATA_TYPE_ARRAY) break; arraylen = avio_rb32(ioc); if(arraylen>>28) break; if (!strcmp(KEYFRAMES_TIMESTAMP_TAG , str_val) && !times){ current_array= × timeslen= arraylen; }else if (!strcmp(KEYFRAMES_BYTEOFFSET_TAG, str_val) && !filepositions){ current_array= &filepositions; fileposlen= arraylen; }else // unexpected metatag inside keyframes, will not use such metadata for indexing break; if (!(*current_array = av_mallocz(sizeof(**current_array) * arraylen))) { ret = AVERROR(ENOMEM); goto finish; } for (i = 0; i < arraylen && avio_tell(ioc) < max_pos - 1; i++) { if (avio_r8(ioc) != AMF_DATA_TYPE_NUMBER) goto finish; current_array[0][i] = av_int2dbl(avio_rb64(ioc)); } if (times && filepositions) { // All done, exiting at a position allowing amf_parse_object // to finish parsing the object ret = 0; break; } } if (timeslen == fileposlen) { for(i = 0; i < timeslen; i++) av_add_index_entry(vstream, filepositions[i], times[i]*1000, 0, 0, AVINDEX_KEYFRAME); } else av_log(s, AV_LOG_WARNING, \"Invalid keyframes object, skipping.\\n\"); finish: av_freep(×); av_freep(&filepositions); avio_seek(ioc, initial_pos, SEEK_SET); return ret; }"} {"target": 0, "idx": 10659, "func": "void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic){ int i; InternalBuffer *buf, *last; AVCodecInternal *avci = s->internal; assert(s->codec_type == AVMEDIA_TYPE_VIDEO); assert(pic->type==FF_BUFFER_TYPE_INTERNAL); assert(avci->buffer_count); if (avci->buffer) { buf = NULL; /* avoids warning */ for (i = 0; i < avci->buffer_count; i++) { //just 3-5 checks so is not worth to optimize buf = &avci->buffer[i]; if (buf->data[0] == pic->data[0]) break; } assert(i < avci->buffer_count); avci->buffer_count--; last = &avci->buffer[avci->buffer_count]; FFSWAP(InternalBuffer, *buf, *last); } for (i = 0; i < AV_NUM_DATA_POINTERS; i++) { pic->data[i]=NULL; // pic->base[i]=NULL; } //printf(\"R%X\\n\", pic->opaque); if(s->debug&FF_DEBUG_BUFFERS) av_log(s, AV_LOG_DEBUG, \"default_release_buffer called on pic %p, %d \" \"buffers used\\n\", pic, avci->buffer_count); }"} {"target": 1, "idx": 10662, "func": "static void iv_Decode_Chunk(Indeo3DecodeContext *s, uint8_t *cur, uint8_t *ref, int width, int height, const uint8_t *buf1, int cb_offset, const uint8_t *hdr, const uint8_t *buf2, int min_width_160) { uint8_t bit_buf; unsigned int bit_pos, lv, lv1, lv2; int *width_tbl, width_tbl_arr[10]; const signed char *ref_vectors; uint8_t *cur_frm_pos, *ref_frm_pos, *cp, *cp2; uint32_t *cur_lp, *ref_lp; const uint32_t *correction_lp[2], *correctionloworder_lp[2], *correctionhighorder_lp[2]; uint8_t *correction_type_sp[2]; struct ustr strip_tbl[20], *strip; int i, j, k, lp1, lp2, flag1, cmd, blks_width, blks_height, region_160_width, rle_v1, rle_v2, rle_v3; unsigned short res; bit_buf = 0; ref_vectors = NULL; width_tbl = width_tbl_arr + 1; i = (width < 0 ? width + 3 : width)/4; for(j = -1; j < 8; j++) width_tbl[j] = i * j; strip = strip_tbl; for(region_160_width = 0; region_160_width < (width - min_width_160); region_160_width += min_width_160); strip->ypos = strip->xpos = 0; for(strip->width = min_width_160; width > strip->width; strip->width *= 2); strip->height = height; strip->split_direction = 0; strip->split_flag = 0; strip->usl7 = 0; bit_pos = 0; rle_v1 = rle_v2 = rle_v3 = 0; while(strip >= strip_tbl) { if(bit_pos <= 0) { bit_pos = 8; bit_buf = *buf1++; } bit_pos -= 2; cmd = (bit_buf >> bit_pos) & 0x03; if(cmd == 0) { strip++; if(strip >= strip_tbl + FF_ARRAY_ELEMS(strip_tbl)) { av_log(s->avctx, AV_LOG_WARNING, \"out of range strip\\n\"); break; } memcpy(strip, strip-1, sizeof(*strip)); strip->split_flag = 1; strip->split_direction = 0; strip->height = (strip->height > 8 ? ((strip->height+8)>>4)<<3 : 4); continue; } else if(cmd == 1) { strip++; if(strip >= strip_tbl + FF_ARRAY_ELEMS(strip_tbl)) { av_log(s->avctx, AV_LOG_WARNING, \"out of range strip\\n\"); break; } memcpy(strip, strip-1, sizeof(*strip)); strip->split_flag = 1; strip->split_direction = 1; strip->width = (strip->width > 8 ? ((strip->width+8)>>4)<<3 : 4); continue; } else if(cmd == 2) { if(strip->usl7 == 0) { strip->usl7 = 1; ref_vectors = NULL; continue; } } else if(cmd == 3) { if(strip->usl7 == 0) { strip->usl7 = 1; ref_vectors = (const signed char*)buf2 + (*buf1 * 2); buf1++; continue; } } cur_frm_pos = cur + width * strip->ypos + strip->xpos; if((blks_width = strip->width) < 0) blks_width += 3; blks_width >>= 2; blks_height = strip->height; if(ref_vectors != NULL) { ref_frm_pos = ref + (ref_vectors[0] + strip->ypos) * width + ref_vectors[1] + strip->xpos; } else ref_frm_pos = cur_frm_pos - width_tbl[4]; if(cmd == 2) { if(bit_pos <= 0) { bit_pos = 8; bit_buf = *buf1++; } bit_pos -= 2; cmd = (bit_buf >> bit_pos) & 0x03; if(cmd == 0 || ref_vectors != NULL) { for(lp1 = 0; lp1 < blks_width; lp1++) { for(i = 0, j = 0; i < blks_height; i++, j += width_tbl[1]) ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)ref_frm_pos)[j]; cur_frm_pos += 4; ref_frm_pos += 4; } } else if(cmd != 1) return; } else { k = *buf1 >> 4; j = *buf1 & 0x0f; buf1++; lv = j + cb_offset; if((lv - 8) <= 7 && (k == 0 || k == 3 || k == 10)) { cp2 = s->ModPred + ((lv - 8) << 7); cp = ref_frm_pos; for(i = 0; i < blks_width << 2; i++) { int v = *cp >> 1; *(cp++) = cp2[v]; } } if(k == 1 || k == 4) { lv = (hdr[j] & 0xf) + cb_offset; correction_type_sp[0] = s->corrector_type + (lv << 8); correction_lp[0] = correction + (lv << 8); lv = (hdr[j] >> 4) + cb_offset; correction_lp[1] = correction + (lv << 8); correction_type_sp[1] = s->corrector_type + (lv << 8); } else { correctionloworder_lp[0] = correctionloworder_lp[1] = correctionloworder + (lv << 8); correctionhighorder_lp[0] = correctionhighorder_lp[1] = correctionhighorder + (lv << 8); correction_type_sp[0] = correction_type_sp[1] = s->corrector_type + (lv << 8); correction_lp[0] = correction_lp[1] = correction + (lv << 8); } switch(k) { case 1: case 0: /********** CASE 0 **********/ for( ; blks_height > 0; blks_height -= 4) { for(lp1 = 0; lp1 < blks_width; lp1++) { for(lp2 = 0; lp2 < 4; ) { k = *buf1++; cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2]; ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2]; if ((uint8_t *)cur_lp >= cur_end-3) break; switch(correction_type_sp[0][k]) { case 0: *cur_lp = av_le2ne32(((av_le2ne32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1); lp2++; break; case 1: res = ((av_le2ne16(((unsigned short *)(ref_lp))[0]) >> 1) + correction_lp[lp2 & 0x01][*buf1]) << 1; ((unsigned short *)cur_lp)[0] = av_le2ne16(res); res = ((av_le2ne16(((unsigned short *)(ref_lp))[1]) >> 1) + correction_lp[lp2 & 0x01][k]) << 1; ((unsigned short *)cur_lp)[1] = av_le2ne16(res); buf1++; lp2++; break; case 2: if(lp2 == 0) { for(i = 0, j = 0; i < 2; i++, j += width_tbl[1]) cur_lp[j] = ref_lp[j]; lp2 += 2; } break; case 3: if(lp2 < 2) { for(i = 0, j = 0; i < (3 - lp2); i++, j += width_tbl[1]) cur_lp[j] = ref_lp[j]; lp2 = 3; } break; case 8: if(lp2 == 0) { RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3) if(rle_v1 == 1 || ref_vectors != NULL) { for(i = 0, j = 0; i < 4; i++, j += width_tbl[1]) cur_lp[j] = ref_lp[j]; } RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2) break; } else { rle_v1 = 1; rle_v2 = *buf1 - 1; } case 5: LP2_CHECK(buf1,rle_v3,lp2) case 4: for(i = 0, j = 0; i < (4 - lp2); i++, j += width_tbl[1]) cur_lp[j] = ref_lp[j]; lp2 = 4; break; case 7: if(rle_v3 != 0) rle_v3 = 0; else { buf1--; rle_v3 = 1; } case 6: if(ref_vectors != NULL) { for(i = 0, j = 0; i < 4; i++, j += width_tbl[1]) cur_lp[j] = ref_lp[j]; } lp2 = 4; break; case 9: lv1 = *buf1++; lv = (lv1 & 0x7F) << 1; lv += (lv << 8); lv += (lv << 16); for(i = 0, j = 0; i < 4; i++, j += width_tbl[1]) cur_lp[j] = lv; LV1_CHECK(buf1,rle_v3,lv1,lp2) break; default: return; } } cur_frm_pos += 4; ref_frm_pos += 4; } cur_frm_pos += ((width - blks_width) * 4); ref_frm_pos += ((width - blks_width) * 4); } break; case 4: case 3: /********** CASE 3 **********/ if(ref_vectors != NULL) return; flag1 = 1; for( ; blks_height > 0; blks_height -= 8) { for(lp1 = 0; lp1 < blks_width; lp1++) { for(lp2 = 0; lp2 < 4; ) { k = *buf1++; cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2]; ref_lp = ((uint32_t *)cur_frm_pos) + width_tbl[(lp2 * 2) - 1]; switch(correction_type_sp[lp2 & 0x01][k]) { case 0: cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1); if(lp2 > 0 || flag1 == 0 || strip->ypos != 0) cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE; else cur_lp[0] = av_le2ne32(((av_le2ne32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1); lp2++; break; case 1: res = ((av_le2ne16(((unsigned short *)ref_lp)[0]) >> 1) + correction_lp[lp2 & 0x01][*buf1]) << 1; ((unsigned short *)cur_lp)[width_tbl[2]] = av_le2ne16(res); res = ((av_le2ne16(((unsigned short *)ref_lp)[1]) >> 1) + correction_lp[lp2 & 0x01][k]) << 1; ((unsigned short *)cur_lp)[width_tbl[2]+1] = av_le2ne16(res); if(lp2 > 0 || flag1 == 0 || strip->ypos != 0) cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE; else cur_lp[0] = cur_lp[width_tbl[1]]; buf1++; lp2++; break; case 2: if(lp2 == 0) { for(i = 0, j = 0; i < 4; i++, j += width_tbl[1]) cur_lp[j] = *ref_lp; lp2 += 2; } break; case 3: if(lp2 < 2) { for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1]) cur_lp[j] = *ref_lp; lp2 = 3; } break; case 6: lp2 = 4; break; case 7: if(rle_v3 != 0) rle_v3 = 0; else { buf1--; rle_v3 = 1; } lp2 = 4; break; case 8: if(lp2 == 0) { RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3) if(rle_v1 == 1) { for(i = 0, j = 0; i < 8; i++, j += width_tbl[1]) cur_lp[j] = ref_lp[j]; } RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2) break; } else { rle_v2 = (*buf1) - 1; rle_v1 = 1; } case 5: LP2_CHECK(buf1,rle_v3,lp2) case 4: for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1]) cur_lp[j] = *ref_lp; lp2 = 4; break; case 9: av_log(s->avctx, AV_LOG_ERROR, \"UNTESTED.\\n\"); lv1 = *buf1++; lv = (lv1 & 0x7F) << 1; lv += (lv << 8); lv += (lv << 16); for(i = 0, j = 0; i < 4; i++, j += width_tbl[1]) cur_lp[j] = lv; LV1_CHECK(buf1,rle_v3,lv1,lp2) break; default: return; } } cur_frm_pos += 4; } cur_frm_pos += (((width * 2) - blks_width) * 4); flag1 = 0; } break; case 10: /********** CASE 10 **********/ if(ref_vectors == NULL) { flag1 = 1; for( ; blks_height > 0; blks_height -= 8) { for(lp1 = 0; lp1 < blks_width; lp1 += 2) { for(lp2 = 0; lp2 < 4; ) { k = *buf1++; cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2]; ref_lp = ((uint32_t *)cur_frm_pos) + width_tbl[(lp2 * 2) - 1]; lv1 = ref_lp[0]; lv2 = ref_lp[1]; if(lp2 == 0 && flag1 != 0) { #if HAVE_BIGENDIAN lv1 = lv1 & 0xFF00FF00; lv1 = (lv1 >> 8) | lv1; lv2 = lv2 & 0xFF00FF00; lv2 = (lv2 >> 8) | lv2; #else lv1 = lv1 & 0x00FF00FF; lv1 = (lv1 << 8) | lv1; lv2 = lv2 & 0x00FF00FF; lv2 = (lv2 << 8) | lv2; #endif } switch(correction_type_sp[lp2 & 0x01][k]) { case 0: cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(lv1) >> 1) + correctionloworder_lp[lp2 & 0x01][k]) << 1); cur_lp[width_tbl[1]+1] = av_le2ne32(((av_le2ne32(lv2) >> 1) + correctionhighorder_lp[lp2 & 0x01][k]) << 1); if(lp2 > 0 || strip->ypos != 0 || flag1 == 0) { cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE; cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE; } else { cur_lp[0] = cur_lp[width_tbl[1]]; cur_lp[1] = cur_lp[width_tbl[1]+1]; } lp2++; break; case 1: cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(lv1) >> 1) + correctionloworder_lp[lp2 & 0x01][*buf1]) << 1); cur_lp[width_tbl[1]+1] = av_le2ne32(((av_le2ne32(lv2) >> 1) + correctionloworder_lp[lp2 & 0x01][k]) << 1); if(lp2 > 0 || strip->ypos != 0 || flag1 == 0) { cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE; cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE; } else { cur_lp[0] = cur_lp[width_tbl[1]]; cur_lp[1] = cur_lp[width_tbl[1]+1]; } buf1++; lp2++; break; case 2: if(lp2 == 0) { if(flag1 != 0) { for(i = 0, j = width_tbl[1]; i < 3; i++, j += width_tbl[1]) { cur_lp[j] = lv1; cur_lp[j+1] = lv2; } cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE; cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE; } else { for(i = 0, j = 0; i < 4; i++, j += width_tbl[1]) { cur_lp[j] = lv1; cur_lp[j+1] = lv2; } } lp2 += 2; } break; case 3: if(lp2 < 2) { if(lp2 == 0 && flag1 != 0) { for(i = 0, j = width_tbl[1]; i < 5; i++, j += width_tbl[1]) { cur_lp[j] = lv1; cur_lp[j+1] = lv2; } cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE; cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE; } else { for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1]) { cur_lp[j] = lv1; cur_lp[j+1] = lv2; } } lp2 = 3; } break; case 8: if(lp2 == 0) { RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3) if(rle_v1 == 1) { if(flag1 != 0) { for(i = 0, j = width_tbl[1]; i < 7; i++, j += width_tbl[1]) { cur_lp[j] = lv1; cur_lp[j+1] = lv2; } cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE; cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE; } else { for(i = 0, j = 0; i < 8; i++, j += width_tbl[1]) { cur_lp[j] = lv1; cur_lp[j+1] = lv2; } } } RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2) break; } else { rle_v1 = 1; rle_v2 = (*buf1) - 1; } case 5: LP2_CHECK(buf1,rle_v3,lp2) case 4: if(lp2 == 0 && flag1 != 0) { for(i = 0, j = width_tbl[1]; i < 7; i++, j += width_tbl[1]) { cur_lp[j] = lv1; cur_lp[j+1] = lv2; } cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE; cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE; } else { for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1]) { cur_lp[j] = lv1; cur_lp[j+1] = lv2; } } lp2 = 4; break; case 6: lp2 = 4; break; case 7: if(lp2 == 0) { if(rle_v3 != 0) rle_v3 = 0; else { buf1--; rle_v3 = 1; } lp2 = 4; } break; case 9: av_log(s->avctx, AV_LOG_ERROR, \"UNTESTED.\\n\"); lv1 = *buf1; lv = (lv1 & 0x7F) << 1; lv += (lv << 8); lv += (lv << 16); for(i = 0, j = 0; i < 8; i++, j += width_tbl[1]) cur_lp[j] = lv; LV1_CHECK(buf1,rle_v3,lv1,lp2) break; default: return; } } cur_frm_pos += 8; } cur_frm_pos += (((width * 2) - blks_width) * 4); flag1 = 0; } } else { for( ; blks_height > 0; blks_height -= 8) { for(lp1 = 0; lp1 < blks_width; lp1 += 2) { for(lp2 = 0; lp2 < 4; ) { k = *buf1++; cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2]; ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2 * 2]; switch(correction_type_sp[lp2 & 0x01][k]) { case 0: lv1 = correctionloworder_lp[lp2 & 0x01][k]; lv2 = correctionhighorder_lp[lp2 & 0x01][k]; cur_lp[0] = av_le2ne32(((av_le2ne32(ref_lp[0]) >> 1) + lv1) << 1); cur_lp[1] = av_le2ne32(((av_le2ne32(ref_lp[1]) >> 1) + lv2) << 1); cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(ref_lp[width_tbl[1]]) >> 1) + lv1) << 1); cur_lp[width_tbl[1]+1] = av_le2ne32(((av_le2ne32(ref_lp[width_tbl[1]+1]) >> 1) + lv2) << 1); lp2++; break; case 1: lv1 = correctionloworder_lp[lp2 & 0x01][*buf1++]; lv2 = correctionloworder_lp[lp2 & 0x01][k]; cur_lp[0] = av_le2ne32(((av_le2ne32(ref_lp[0]) >> 1) + lv1) << 1); cur_lp[1] = av_le2ne32(((av_le2ne32(ref_lp[1]) >> 1) + lv2) << 1); cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(ref_lp[width_tbl[1]]) >> 1) + lv1) << 1); cur_lp[width_tbl[1]+1] = av_le2ne32(((av_le2ne32(ref_lp[width_tbl[1]+1]) >> 1) + lv2) << 1); lp2++; break; case 2: if(lp2 == 0) { for(i = 0, j = 0; i < 4; i++, j += width_tbl[1]) { cur_lp[j] = ref_lp[j]; cur_lp[j+1] = ref_lp[j+1]; } lp2 += 2; } break; case 3: if(lp2 < 2) { for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1]) { cur_lp[j] = ref_lp[j]; cur_lp[j+1] = ref_lp[j+1]; } lp2 = 3; } break; case 8: if(lp2 == 0) { RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3) for(i = 0, j = 0; i < 8; i++, j += width_tbl[1]) { ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)ref_frm_pos)[j]; ((uint32_t *)cur_frm_pos)[j+1] = ((uint32_t *)ref_frm_pos)[j+1]; } RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2) break; } else { rle_v1 = 1; rle_v2 = (*buf1) - 1; } case 5: case 7: LP2_CHECK(buf1,rle_v3,lp2) case 6: case 4: for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1]) { cur_lp[j] = ref_lp[j]; cur_lp[j+1] = ref_lp[j+1]; } lp2 = 4; break; case 9: av_log(s->avctx, AV_LOG_ERROR, \"UNTESTED.\\n\"); lv1 = *buf1; lv = (lv1 & 0x7F) << 1; lv += (lv << 8); lv += (lv << 16); for(i = 0, j = 0; i < 8; i++, j += width_tbl[1]) ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)cur_frm_pos)[j+1] = lv; LV1_CHECK(buf1,rle_v3,lv1,lp2) break; default: return; } } cur_frm_pos += 8; ref_frm_pos += 8; } cur_frm_pos += (((width * 2) - blks_width) * 4); ref_frm_pos += (((width * 2) - blks_width) * 4); } } break; case 11: /********** CASE 11 **********/ if(ref_vectors == NULL) return; for( ; blks_height > 0; blks_height -= 8) { for(lp1 = 0; lp1 < blks_width; lp1++) { for(lp2 = 0; lp2 < 4; ) { k = *buf1++; cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2]; ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2 * 2]; switch(correction_type_sp[lp2 & 0x01][k]) { case 0: cur_lp[0] = av_le2ne32(((av_le2ne32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1); cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(ref_lp[width_tbl[1]]) >> 1) + correction_lp[lp2 & 0x01][k]) << 1); lp2++; break; case 1: lv1 = (unsigned short)(correction_lp[lp2 & 0x01][*buf1++]); lv2 = (unsigned short)(correction_lp[lp2 & 0x01][k]); res = (unsigned short)(((av_le2ne16(((unsigned short *)ref_lp)[0]) >> 1) + lv1) << 1); ((unsigned short *)cur_lp)[0] = av_le2ne16(res); res = (unsigned short)(((av_le2ne16(((unsigned short *)ref_lp)[1]) >> 1) + lv2) << 1); ((unsigned short *)cur_lp)[1] = av_le2ne16(res); res = (unsigned short)(((av_le2ne16(((unsigned short *)ref_lp)[width_tbl[2]]) >> 1) + lv1) << 1); ((unsigned short *)cur_lp)[width_tbl[2]] = av_le2ne16(res); res = (unsigned short)(((av_le2ne16(((unsigned short *)ref_lp)[width_tbl[2]+1]) >> 1) + lv2) << 1); ((unsigned short *)cur_lp)[width_tbl[2]+1] = av_le2ne16(res); lp2++; break; case 2: if(lp2 == 0) { for(i = 0, j = 0; i < 4; i++, j += width_tbl[1]) cur_lp[j] = ref_lp[j]; lp2 += 2; } break; case 3: if(lp2 < 2) { for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1]) cur_lp[j] = ref_lp[j]; lp2 = 3; } break; case 8: if(lp2 == 0) { RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3) for(i = 0, j = 0; i < 8; i++, j += width_tbl[1]) cur_lp[j] = ref_lp[j]; RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2) break; } else { rle_v1 = 1; rle_v2 = (*buf1) - 1; } case 5: case 7: LP2_CHECK(buf1,rle_v3,lp2) case 4: case 6: for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1]) cur_lp[j] = ref_lp[j]; lp2 = 4; break; case 9: av_log(s->avctx, AV_LOG_ERROR, \"UNTESTED.\\n\"); lv1 = *buf1++; lv = (lv1 & 0x7F) << 1; lv += (lv << 8); lv += (lv << 16); for(i = 0, j = 0; i < 4; i++, j += width_tbl[1]) cur_lp[j] = lv; LV1_CHECK(buf1,rle_v3,lv1,lp2) break; default: return; } } cur_frm_pos += 4; ref_frm_pos += 4; } cur_frm_pos += (((width * 2) - blks_width) * 4); ref_frm_pos += (((width * 2) - blks_width) * 4); } break; default: return; } } for( ; strip >= strip_tbl; strip--) { if(strip->split_flag != 0) { strip->split_flag = 0; strip->usl7 = (strip-1)->usl7; if(strip->split_direction) { strip->xpos += strip->width; strip->width = (strip-1)->width - strip->width; if(region_160_width <= strip->xpos && width < strip->width + strip->xpos) strip->width = width - strip->xpos; } else { strip->ypos += strip->height; strip->height = (strip-1)->height - strip->height; } break; } } } }"} {"target": 1, "idx": 10673, "func": "static int bethsoftvid_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; BethsoftvidContext * vid = avctx->priv_data; char block_type; uint8_t * dst; uint8_t * frame_end; int remaining = avctx->width; // number of bytes remaining on a line const int wrap_to_next_line = vid->frame.linesize[0] - avctx->width; int code; int yoffset; if (avctx->reget_buffer(avctx, &vid->frame)) { av_log(avctx, AV_LOG_ERROR, \"reget_buffer() failed\\n\"); return -1; } dst = vid->frame.data[0]; frame_end = vid->frame.data[0] + vid->frame.linesize[0] * avctx->height; switch(block_type = *buf++){ case PALETTE_BLOCK: return set_palette(&vid->frame, buf, buf_size); case VIDEO_YOFF_P_FRAME: yoffset = bytestream_get_le16(&buf); if(yoffset >= avctx->height) return -1; dst += vid->frame.linesize[0] * yoffset; } // main code while((code = *buf++)){ int length = code & 0x7f; // copy any bytes starting at the current position, and ending at the frame width while(length > remaining){ if(code < 0x80) bytestream_get_buffer(&buf, dst, remaining); else if(block_type == VIDEO_I_FRAME) memset(dst, buf[0], remaining); length -= remaining; // decrement the number of bytes to be copied dst += remaining + wrap_to_next_line; // skip over extra bytes at end of frame remaining = avctx->width; if(dst == frame_end) goto end; } // copy any remaining bytes after / if line overflows if(code < 0x80) bytestream_get_buffer(&buf, dst, length); else if(block_type == VIDEO_I_FRAME) memset(dst, *buf++, length); remaining -= length; dst += length; } end: *data_size = sizeof(AVFrame); *(AVFrame*)data = vid->frame; return buf_size; }"} {"target": 1, "idx": 10679, "func": "static int get_S2prot(CPUARMState *env, int s2ap, int xn) { int prot = 0; if (s2ap & 1) { prot |= PAGE_READ; } if (s2ap & 2) { prot |= PAGE_WRITE; } if (!xn) { prot |= PAGE_EXEC; } return prot; }"} {"target": 1, "idx": 10683, "func": "void qvirtio_pci_foreach(QPCIBus *bus, uint16_t device_type, void (*func)(QVirtioDevice *d, void *data), void *data) { QVirtioPCIForeachData d = { .func = func, .device_type = device_type, .user_data = data }; qpci_device_foreach(bus, PCI_VENDOR_ID_REDHAT_QUMRANET, -1, qvirtio_pci_foreach_callback, &d); }"} {"target": 1, "idx": 10685, "func": "static void gen_slbmfee(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); #else if (unlikely(ctx->pr)) { gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); return; } gen_helper_load_slb_esid(cpu_gpr[rS(ctx->opcode)], cpu_env, cpu_gpr[rB(ctx->opcode)]); #endif }"} {"target": 1, "idx": 10692, "func": "static int qcow2_make_empty(BlockDriverState *bs) { BDRVQcow2State *s = bs->opaque; uint64_t offset, end_offset; int step = QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size); int l1_clusters, ret = 0; l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t)); if (s->qcow_version >= 3 && !s->snapshots && 3 + l1_clusters <= s->refcount_block_size && s->crypt_method_header != QCOW_CRYPT_LUKS) { /* The following function only works for qcow2 v3 images (it requires * the dirty flag) and only as long as there are no snapshots (because * it completely empties the image). Furthermore, the L1 table and three * additional clusters (image header, refcount table, one refcount * block) have to fit inside one refcount block. It cannot be used * for LUKS (yet) as it throws away the LUKS header cluster(s) */ return make_completely_empty(bs); } /* This fallback code simply discards every active cluster; this is slow, * but works in all cases */ end_offset = bs->total_sectors * BDRV_SECTOR_SIZE; for (offset = 0; offset < end_offset; offset += step) { /* As this function is generally used after committing an external * snapshot, QCOW2_DISCARD_SNAPSHOT seems appropriate. Also, the * default action for this kind of discard is to pass the discard, * which will ideally result in an actually smaller image file, as * is probably desired. */ ret = qcow2_cluster_discard(bs, offset, MIN(step, end_offset - offset), QCOW2_DISCARD_SNAPSHOT, true); if (ret < 0) { break; } } return ret; }"} {"target": 0, "idx": 10722, "func": "static VncBasicInfoList *qmp_query_server_entry(QIOChannelSocket *ioc, bool websocket, VncBasicInfoList *prev) { VncBasicInfoList *list; VncBasicInfo *info; Error *err = NULL; SocketAddress *addr; addr = qio_channel_socket_get_local_address(ioc, &err); if (!addr) { error_free(err); return prev; } info = g_new0(VncBasicInfo, 1); vnc_init_basic_info(addr, info, &err); qapi_free_SocketAddress(addr); if (err) { qapi_free_VncBasicInfo(info); error_free(err); return prev; } info->websocket = websocket; list = g_new0(VncBasicInfoList, 1); list->value = info; list->next = prev; return list; }"} {"target": 1, "idx": 10728, "func": "uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip) { const ARMCPRegInfo *ri = rip; return ri->readfn(env, ri); }"} {"target": 1, "idx": 10729, "func": "static NetSocketState *net_socket_fd_init_dgram(VLANState *vlan, int fd, int is_connected) { struct sockaddr_in saddr; int newfd; socklen_t saddr_len; NetSocketState *s; /* fd passed: multicast: \"learn\" dgram_dst address from bound address and save it * Because this may be \"shared\" socket from a \"master\" process, datagrams would be recv() * by ONLY ONE process: we must \"clone\" this dgram socket --jjo */ if (is_connected) { if (getsockname(fd, (struct sockaddr *) &saddr, &saddr_len) == 0) { /* must be bound */ if (saddr.sin_addr.s_addr==0) { fprintf(stderr, \"qemu: error: init_dgram: fd=%d unbound, cannot setup multicast dst addr\\n\", fd); return NULL; } /* clone dgram socket */ newfd = net_socket_mcast_create(&saddr); if (newfd < 0) { /* error already reported by net_socket_mcast_create() */ close(fd); return NULL; } /* clone newfd to fd, close newfd */ dup2(newfd, fd); close(newfd); } else { fprintf(stderr, \"qemu: error: init_dgram: fd=%d failed getsockname(): %s\\n\", fd, strerror(errno)); return NULL; } } s = qemu_mallocz(sizeof(NetSocketState)); if (!s) return NULL; s->fd = fd; s->vc = qemu_new_vlan_client(vlan, net_socket_receive_dgram, s); qemu_set_fd_handler(s->fd, net_socket_send_dgram, NULL, s); /* mcast: save bound address as dst */ if (is_connected) s->dgram_dst=saddr; snprintf(s->vc->info_str, sizeof(s->vc->info_str), \"socket: fd=%d (%s mcast=%s:%d)\", fd, is_connected? \"cloned\" : \"\", inet_ntoa(saddr.sin_addr), ntohs(saddr.sin_port)); return s; }"} {"target": 1, "idx": 10736, "func": "static int filter_frame(AVFilterLink *link, AVFilterBufferRef *picref) { AVFilterContext *ctx = link->dst; YADIFContext *yadif = ctx->priv; av_assert0(picref); if (picref->video->h < 3 || picref->video->w < 3) { av_log(ctx, AV_LOG_ERROR, \"Video of less than 3 columns or lines is not supported\\n\"); return AVERROR(EINVAL); } if (yadif->frame_pending) return_frame(ctx, 1); if (yadif->prev) avfilter_unref_buffer(yadif->prev); yadif->prev = yadif->cur; yadif->cur = yadif->next; yadif->next = picref; if (!yadif->cur) return 0; if (yadif->auto_enable && !yadif->cur->video->interlaced) { yadif->out = avfilter_ref_buffer(yadif->cur, ~AV_PERM_WRITE); if (!yadif->out) return AVERROR(ENOMEM); avfilter_unref_bufferp(&yadif->prev); if (yadif->out->pts != AV_NOPTS_VALUE) yadif->out->pts *= 2; return ff_filter_frame(ctx->outputs[0], yadif->out); } if (!yadif->prev && !(yadif->prev = avfilter_ref_buffer(yadif->cur, ~AV_PERM_WRITE))) return AVERROR(ENOMEM); yadif->out = ff_get_video_buffer(ctx->outputs[0], PERM_RWP, link->w, link->h); if (!yadif->out) return AVERROR(ENOMEM); avfilter_copy_buffer_ref_props(yadif->out, yadif->cur); yadif->out->video->interlaced = 0; if (yadif->out->pts != AV_NOPTS_VALUE) yadif->out->pts *= 2; return return_frame(ctx, 0); }"} {"target": 0, "idx": 10739, "func": "void ff_put_h264_qpel16_mc23_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { avc_luma_midv_qrt_16w_msa(src - (2 * stride) - 2, stride, dst, stride, 16, 1); }"} {"target": 0, "idx": 10751, "func": "static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){ int i; // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264) CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*21*2); //(width + edge + align)*interlaced*MBsize*tolerance s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21; //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer() CHECKED_ALLOCZ(s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t)) s->me.temp= s->me.scratchpad; s->rd_scratchpad= s->me.scratchpad; s->b_scratchpad= s->me.scratchpad; s->obmc_scratchpad= s->me.scratchpad + 16; if (s->encoding) { CHECKED_ALLOCZ(s->me.map , ME_MAP_SIZE*sizeof(uint32_t)) CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t)) if(s->avctx->noise_reduction){ CHECKED_ALLOCZ(s->dct_error_sum, 2 * 64 * sizeof(int)) } } CHECKED_ALLOCZ(s->blocks, 64*12*2 * sizeof(DCTELEM)) s->block= s->blocks[0]; for(i=0;i<12;i++){ s->pblocks[i] = &s->block[i]; } return 0; fail: return -1; //free() through MPV_common_end() }"} {"target": 0, "idx": 10762, "func": "void ff_put_h264_qpel4_mc31_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { avc_luma_hv_qrt_4w_msa(src - 2, src - (stride * 2) + sizeof(uint8_t), stride, dst, stride, 4); }"} {"target": 0, "idx": 10778, "func": "yuv2rgb48_1_c_template(SwsContext *c, const uint16_t *buf0, const uint16_t *ubuf0, const uint16_t *ubuf1, const uint16_t *vbuf0, const uint16_t *vbuf1, const uint16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, enum PixelFormat dstFormat, int flags, int y, enum PixelFormat target) { int i; if (uvalpha < 2048) { for (i = 0; i < (dstW >> 1); i++) { int Y1 = buf0[i * 2] >> 7; int Y2 = buf0[i * 2 + 1] >> 7; int U = ubuf1[i] >> 7; int V = vbuf1[i] >> 7; const uint8_t *r = (const uint8_t *) c->table_rV[V], *g = (const uint8_t *)(c->table_gU[U] + c->table_gV[V]), *b = (const uint8_t *) c->table_bU[U]; dest[ 0] = dest[ 1] = r_b[Y1]; dest[ 2] = dest[ 3] = g[Y1]; dest[ 4] = dest[ 5] = b_r[Y1]; dest[ 6] = dest[ 7] = r_b[Y2]; dest[ 8] = dest[ 9] = g[Y2]; dest[10] = dest[11] = b_r[Y2]; dest += 12; } } else { for (i = 0; i < (dstW >> 1); i++) { int Y1 = buf0[i * 2] >> 7; int Y2 = buf0[i * 2 + 1] >> 7; int U = (ubuf0[i] + ubuf1[i]) >> 8; int V = (vbuf0[i] + vbuf1[i]) >> 8; const uint8_t *r = (const uint8_t *) c->table_rV[V], *g = (const uint8_t *)(c->table_gU[U] + c->table_gV[V]), *b = (const uint8_t *) c->table_bU[U]; dest[ 0] = dest[ 1] = r_b[Y1]; dest[ 2] = dest[ 3] = g[Y1]; dest[ 4] = dest[ 5] = b_r[Y1]; dest[ 6] = dest[ 7] = r_b[Y2]; dest[ 8] = dest[ 9] = g[Y2]; dest[10] = dest[11] = b_r[Y2]; dest += 12; } } }"} {"target": 1, "idx": 10783, "func": "int net_init_tap(QemuOpts *opts, Monitor *mon, const char *name, VLANState *vlan) { TAPState *s; int fd, vnet_hdr = 0; if (qemu_opt_get(opts, \"fd\")) { if (qemu_opt_get(opts, \"ifname\") || qemu_opt_get(opts, \"script\") || qemu_opt_get(opts, \"downscript\") || qemu_opt_get(opts, \"vnet_hdr\")) { error_report(\"ifname=, script=, downscript= and vnet_hdr= is invalid with fd=\"); return -1; } fd = net_handle_fd_param(mon, qemu_opt_get(opts, \"fd\")); if (fd == -1) { return -1; } fcntl(fd, F_SETFL, O_NONBLOCK); vnet_hdr = tap_probe_vnet_hdr(fd); } else { if (!qemu_opt_get(opts, \"script\")) { qemu_opt_set(opts, \"script\", DEFAULT_NETWORK_SCRIPT); } if (!qemu_opt_get(opts, \"downscript\")) { qemu_opt_set(opts, \"downscript\", DEFAULT_NETWORK_DOWN_SCRIPT); } fd = net_tap_init(opts, &vnet_hdr); if (fd == -1) { return -1; } } s = net_tap_fd_init(vlan, \"tap\", name, fd, vnet_hdr); if (!s) { close(fd); return -1; } if (tap_set_sndbuf(s->fd, opts) < 0) { return -1; } if (qemu_opt_get(opts, \"fd\")) { snprintf(s->nc.info_str, sizeof(s->nc.info_str), \"fd=%d\", fd); } else { const char *ifname, *script, *downscript; ifname = qemu_opt_get(opts, \"ifname\"); script = qemu_opt_get(opts, \"script\"); downscript = qemu_opt_get(opts, \"downscript\"); snprintf(s->nc.info_str, sizeof(s->nc.info_str), \"ifname=%s,script=%s,downscript=%s\", ifname, script, downscript); if (strcmp(downscript, \"no\") != 0) { snprintf(s->down_script, sizeof(s->down_script), \"%s\", downscript); snprintf(s->down_script_arg, sizeof(s->down_script_arg), \"%s\", ifname); } } if (qemu_opt_get_bool(opts, \"vhost\", !!qemu_opt_get(opts, \"vhostfd\") || qemu_opt_get_bool(opts, \"vhostforce\", false))) { int vhostfd, r; bool force = qemu_opt_get_bool(opts, \"vhostforce\", false); if (qemu_opt_get(opts, \"vhostfd\")) { r = net_handle_fd_param(mon, qemu_opt_get(opts, \"vhostfd\")); if (r == -1) { return -1; } vhostfd = r; } else { vhostfd = -1; } s->vhost_net = vhost_net_init(&s->nc, vhostfd, force); if (!s->vhost_net) { error_report(\"vhost-net requested but could not be initialized\"); return -1; } } else if (qemu_opt_get(opts, \"vhostfd\")) { error_report(\"vhostfd= is not valid without vhost\"); return -1; } return 0; }"} {"target": 1, "idx": 10803, "func": "static int get_uint64_equal(QEMUFile *f, void *pv, size_t size) { uint64_t *v = pv; uint64_t v2; qemu_get_be64s(f, &v2); if (*v == v2) { return 0; } return -EINVAL; }"} {"target": 1, "idx": 10813, "func": "void spapr_drc_detach(sPAPRDRConnector *drc, DeviceState *d, Error **errp) { trace_spapr_drc_detach(spapr_drc_index(drc)); if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_ISOLATED) { trace_spapr_drc_awaiting_isolated(spapr_drc_index(drc)); drc->awaiting_release = true; return; } if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI && drc->allocation_state != SPAPR_DR_ALLOCATION_STATE_UNUSABLE) { trace_spapr_drc_awaiting_unusable(spapr_drc_index(drc)); drc->awaiting_release = true; return; } if (drc->awaiting_allocation) { drc->awaiting_release = true; trace_spapr_drc_awaiting_allocation(spapr_drc_index(drc)); return; } spapr_drc_release(drc); }"} {"target": 1, "idx": 10815, "func": "static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base, struct vhost_log *log) { int fds[VHOST_MEMORY_MAX_NREGIONS]; size_t fd_num = 0; bool shmfd = virtio_has_feature(dev->protocol_features, VHOST_USER_PROTOCOL_F_LOG_SHMFD); VhostUserMsg msg = { .request = VHOST_USER_SET_LOG_BASE, .flags = VHOST_USER_VERSION, .u64 = base, .size = sizeof(m.u64), }; if (shmfd && log->fd != -1) { fds[fd_num++] = log->fd; } vhost_user_write(dev, &msg, fds, fd_num); if (shmfd) { msg.size = 0; if (vhost_user_read(dev, &msg) < 0) { return 0; } if (msg.request != VHOST_USER_SET_LOG_BASE) { error_report(\"Received unexpected msg type. \" \"Expected %d received %d\", VHOST_USER_SET_LOG_BASE, msg.request); return -1; } } return 0; }"} {"target": 1, "idx": 10820, "func": "static void *vmstate_base_addr(void *opaque, VMStateField *field, bool alloc) { void *base_addr = opaque + field->offset; if (field->flags & VMS_POINTER) { if (alloc && (field->flags & VMS_ALLOC)) { gsize size = 0; if (field->flags & VMS_VBUFFER) { size = vmstate_size(opaque, field); } else { int n_elems = vmstate_n_elems(opaque, field); if (n_elems) { size = n_elems * field->size; } } if (size) { *((void **)base_addr + field->start) = g_malloc(size); } } base_addr = *(void **)base_addr + field->start; } return base_addr; }"} {"target": 1, "idx": 10828, "func": "static void add_pixels_clamped_c(const int16_t *block, uint8_t *av_restrict pixels, ptrdiff_t line_size) { int i; /* read the pixels */ for (i = 0; i < 8; i++) { pixels[0] = av_clip_uint8(pixels[0] + block[0]); pixels[1] = av_clip_uint8(pixels[1] + block[1]); pixels[2] = av_clip_uint8(pixels[2] + block[2]); pixels[3] = av_clip_uint8(pixels[3] + block[3]); pixels[4] = av_clip_uint8(pixels[4] + block[4]); pixels[5] = av_clip_uint8(pixels[5] + block[5]); pixels[6] = av_clip_uint8(pixels[6] + block[6]); pixels[7] = av_clip_uint8(pixels[7] + block[7]); pixels += line_size; block += 8; } }"} {"target": 0, "idx": 10835, "func": "const char *avio_enum_protocols(void **opaque, int output) { URLProtocol *p; *opaque = ffurl_protocol_next(*opaque); if (!(p = *opaque)) return NULL; if ((output && p->url_write) || (!output && p->url_read)) return p->name; return avio_enum_protocols(opaque, output); }"} {"target": 1, "idx": 10840, "func": "static int read_highpass(AVCodecContext *avctx, uint8_t *ptr, int plane, AVFrame *frame) { PixletContext *ctx = avctx->priv_data; ptrdiff_t stride = frame->linesize[plane] / 2; int i, ret; for (i = 0; i < ctx->levels * 3; i++) { int32_t a = bytestream2_get_be32(&ctx->gb); int32_t b = bytestream2_get_be32(&ctx->gb); int32_t c = bytestream2_get_be32(&ctx->gb); int32_t d = bytestream2_get_be32(&ctx->gb); int16_t *dest = (int16_t *)frame->data[plane] + ctx->band[plane][i + 1].x + stride * ctx->band[plane][i + 1].y; unsigned size = ctx->band[plane][i + 1].size; uint32_t magic; magic = bytestream2_get_be32(&ctx->gb); if (magic != 0xDEADBEEF) { av_log(avctx, AV_LOG_ERROR, \"wrong magic number: 0x%08\"PRIX32 \" for plane %d, band %d\\n\", magic, plane, i); } ret = read_high_coeffs(avctx, ptr + bytestream2_tell(&ctx->gb), dest, size, c, (b >= FFABS(a)) ? b : a, d, ctx->band[plane][i + 1].width, stride); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, \"error in highpass coefficients for plane %d, band %d\\n\", plane, i); return ret; } bytestream2_skip(&ctx->gb, ret); } return 0; }"} {"target": 1, "idx": 10843, "func": "static int vp9_superframe_split_filter(AVBSFContext *ctx, AVPacket *out) { VP9SFSplitContext *s = ctx->priv_data; AVPacket *in; int i, j, ret, marker; int is_superframe = !!s->buffer_pkt; if (!s->buffer_pkt) { ret = ff_bsf_get_packet(ctx, &s->buffer_pkt); if (ret < 0) return ret; in = s->buffer_pkt; marker = in->data[in->size - 1]; if ((marker & 0xe0) == 0xc0) { int length_size = 1 + ((marker >> 3) & 0x3); int nb_frames = 1 + (marker & 0x7); int idx_size = 2 + nb_frames * length_size; if (in->size >= idx_size && in->data[in->size - idx_size] == marker) { GetByteContext bc; int total_size = 0; bytestream2_init(&bc, in->data + in->size + 1 - idx_size, nb_frames * length_size); for (i = 0; i < nb_frames; i++) { int frame_size = 0; for (j = 0; j < length_size; j++) frame_size |= bytestream2_get_byte(&bc) << (j * 8); total_size += frame_size; if (total_size > in->size - idx_size) { av_log(ctx, AV_LOG_ERROR, \"Invalid frame size in a superframe: %d\\n\", frame_size); ret = AVERROR(EINVAL); goto fail; } s->sizes[i] = frame_size; } s->nb_frames = nb_frames; s->next_frame = 0; s->next_frame_offset = 0; is_superframe = 1; } } } if (is_superframe) { GetBitContext gb; int profile, invisible = 0; ret = av_packet_ref(out, s->buffer_pkt); if (ret < 0) goto fail; out->data += s->next_frame_offset; out->size = s->sizes[s->next_frame]; s->next_frame_offset += out->size; s->next_frame++; if (s->next_frame >= s->nb_frames) av_packet_free(&s->buffer_pkt); ret = init_get_bits8(&gb, out->data, out->size); if (ret < 0) goto fail; get_bits(&gb, 2); // frame_marker profile = get_bits1(&gb); profile |= get_bits1(&gb) << 1; if (profile == 3) get_bits1(&gb); if (!get_bits1(&gb)) { get_bits1(&gb); invisible = !get_bits1(&gb); } if (invisible) out->pts = AV_NOPTS_VALUE; } else { av_packet_move_ref(out, s->buffer_pkt); av_packet_free(&s->buffer_pkt); } return 0; fail: av_packet_free(&s->buffer_pkt); return ret; }"} {"target": 1, "idx": 10860, "func": "static int coroutine_fn v9fs_complete_renameat(V9fsPDU *pdu, int32_t olddirfid, V9fsString *old_name, int32_t newdirfid, V9fsString *new_name) { int err = 0; V9fsState *s = pdu->s; V9fsFidState *newdirfidp = NULL, *olddirfidp = NULL; olddirfidp = get_fid(pdu, olddirfid); if (olddirfidp == NULL) { err = -ENOENT; goto out; } if (newdirfid != -1) { newdirfidp = get_fid(pdu, newdirfid); if (newdirfidp == NULL) { err = -ENOENT; goto out; } } else { newdirfidp = get_fid(pdu, olddirfid); } err = v9fs_co_renameat(pdu, &olddirfidp->path, old_name, &newdirfidp->path, new_name); if (err < 0) { goto out; } if (s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT) { /* Only for path based fid we need to do the below fixup */ v9fs_fix_fid_paths(pdu, &olddirfidp->path, old_name, &newdirfidp->path, new_name); } out: if (olddirfidp) { put_fid(pdu, olddirfidp); } if (newdirfidp) { put_fid(pdu, newdirfidp); } return err; }"} {"target": 1, "idx": 10862, "func": "static int decode_bytes(const uint8_t *input, uint8_t *out, int bytes) { int i, off; uint32_t c; const uint32_t *buf; uint32_t *output = (uint32_t *)out; off = (intptr_t)input & 3; buf = (const uint32_t *)(input - off); c = av_be2ne32((0x537F6103 >> (off * 8)) | (0x537F6103 << (32 - (off * 8)))); bytes += 3 + off; for (i = 0; i < bytes / 4; i++) output[i] = c ^ buf[i]; if (off) avpriv_request_sample(NULL, \"Offset of %d\", off); return off; }"} {"target": 0, "idx": 10867, "func": "static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ SnowContext *s = avctx->priv_data; CABACContext * const c= &s->c; AVFrame *pict = data; const int width= s->avctx->width; const int height= s->avctx->height; int used_count= 0; int log2_threshold, level, orientation, plane_index, i; ff_init_cabac_encoder(c, buf, buf_size); ff_init_cabac_states(c, ff_h264_lps_range, ff_h264_mps_state, ff_h264_lps_state, 64); s->input_picture = *pict; memset(s->header_state, 0, sizeof(s->header_state)); s->keyframe=avctx->gop_size==0 || avctx->frame_number % avctx->gop_size == 0; pict->pict_type= s->keyframe ? FF_I_TYPE : FF_P_TYPE; if(pict->quality){ s->qlog= rint(QROOT*log(pict->quality / (float)FF_QP2LAMBDA)/log(2)); //<64 >60 s->qlog += 61; }else{ s->qlog= LOSSLESS_QLOG; } for(i=0; imb_band.stride * s->mb_band.height; i++){ s->mb_band.buf[i]= s->keyframe; } frame_start(s); if(pict->pict_type == P_TYPE){ int block_width = (width +15)>>4; int block_height= (height+15)>>4; int stride= s->current_picture.linesize[0]; uint8_t *src_plane= s->input_picture.data[0]; int src_stride= s->input_picture.linesize[0]; int x,y; assert(s->current_picture.data[0]); assert(s->last_picture.data[0]); s->m.avctx= s->avctx; s->m.current_picture.data[0]= s->current_picture.data[0]; s->m. last_picture.data[0]= s-> last_picture.data[0]; s->m. new_picture.data[0]= s-> input_picture.data[0]; s->m.current_picture_ptr= &s->m.current_picture; s->m. last_picture_ptr= &s->m. last_picture; s->m.linesize= s->m. last_picture.linesize[0]= s->m. new_picture.linesize[0]= s->m.current_picture.linesize[0]= stride; s->m.width = width; s->m.height= height; s->m.mb_width = block_width; s->m.mb_height= block_height; s->m.mb_stride= s->m.mb_width+1; s->m.b8_stride= 2*s->m.mb_width+1; s->m.f_code=1; s->m.pict_type= pict->pict_type; s->m.me_method= s->avctx->me_method; s->m.me.scene_change_score=0; s->m.flags= s->avctx->flags; s->m.quarter_sample= (s->avctx->flags & CODEC_FLAG_QPEL)!=0; s->m.out_format= FMT_H263; s->m.unrestricted_mv= 1; s->m.lambda= pict->quality * 3/2; //FIXME bug somewhere else s->m.qscale= (s->m.lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7); s->m.lambda2= (s->m.lambda*s->m.lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT; if(!s->motion_val8){ s->motion_val8 = av_mallocz(s->m.b8_stride*block_height*2*2*sizeof(int16_t)); s->motion_val16= av_mallocz(s->m.mb_stride*block_height*2*sizeof(int16_t)); } s->m.mb_type= s->mb_type; //dummies, to avoid segfaults s->m.current_picture.mb_mean = s->mb_mean; s->m.current_picture.mb_var = (int16_t*)s->dummy; s->m.current_picture.mc_mb_var= (int16_t*)s->dummy; s->m.current_picture.mb_type = s->dummy; s->m.current_picture.motion_val[0]= s->motion_val8; s->m.p_mv_table= s->motion_val16; s->m.dsp= s->dsp; //move ff_init_me(&s->m); s->m.me.pre_pass=1; s->m.me.dia_size= s->avctx->pre_dia_size; s->m.first_slice_line=1; for(y= block_height-1; y >= 0; y--) { uint8_t src[stride*16]; s->m.new_picture.data[0]= src - y*16*stride; //ugly s->m.mb_y= y; for(i=0; i<16 && i + 16*y=0 ;x--) { s->m.mb_x= x; ff_init_block_index(&s->m); ff_update_block_index(&s->m); ff_pre_estimate_p_frame_motion(&s->m, x, y); } s->m.first_slice_line=0; } s->m.me.pre_pass=0; s->m.me.dia_size= s->avctx->dia_size; s->m.first_slice_line=1; for (y = 0; y < block_height; y++) { uint8_t src[stride*16]; s->m.new_picture.data[0]= src - y*16*stride; //ugly s->m.mb_y= y; assert(width <= stride); assert(width <= 16*block_width); for(i=0; i<16 && i + 16*ymb_band.stride); s->m.mb_x= x; ff_init_block_index(&s->m); ff_update_block_index(&s->m); ff_estimate_p_frame_motion(&s->m, x, y); s->mb_band .buf[mb_xy]= (s->m.mb_type[x + y*s->m.mb_stride]&CANDIDATE_MB_TYPE_INTER) ? 0 : 2; s->mv_band[0].buf[mb_xy]= s->motion_val16[x + y*s->m.mb_stride][0]; s->mv_band[1].buf[mb_xy]= s->motion_val16[x + y*s->m.mb_stride][1]; if(s->mb_band .buf[x + y*(s->mb_band.stride)]==2 && 0){ int dc0=128, dc1=128, dc, dc2, dir; int offset= (s->avctx->flags & CODEC_FLAG_QPEL) ? 64 : 32; dc =s->mb_mean[x + y *s->m.mb_stride ]; if(x) dc0=s->mb_mean[x + y *s->m.mb_stride - 1]; if(y) dc1=s->mb_mean[x + (y-1)*s->m.mb_stride ]; dc2= (dc0+dc1)>>1; #if 0 if (ABS(dc0 - dc) < ABS(dc1 - dc) && ABS(dc0 - dc) < ABS(dc2 - dc)) dir= 1; else if(ABS(dc0 - dc) >=ABS(dc1 - dc) && ABS(dc1 - dc) < ABS(dc2 - dc)) dir=-1; else dir=0; #endif if(ABS(dc0 - dc) < ABS(dc1 - dc) && x){ s->mv_band[0].buf[mb_xy]= s->mv_band[0].buf[x + y*(s->mb_band.stride)-1] - offset; s->mv_band[1].buf[mb_xy]= s->mv_band[1].buf[x + y*(s->mb_band.stride)-1]; s->mb_mean[x + y *s->m.mb_stride ]= dc0; }else if(y){ s->mv_band[0].buf[mb_xy]= s->mv_band[0].buf[x + (y-1)*(s->mb_band.stride)]; s->mv_band[1].buf[mb_xy]= s->mv_band[1].buf[x + (y-1)*(s->mb_band.stride)] - offset; s->mb_mean[x + y *s->m.mb_stride ]= dc1; } } // s->mb_band .buf[x + y*(s->mb_band.stride)]=1; //FIXME intra only test } s->m.first_slice_line=0; } assert(s->m.pict_type == P_TYPE); if(s->m.me.scene_change_score > s->avctx->scenechange_threshold){ s->m.pict_type= pict->pict_type =I_TYPE; for(i=0; imb_band.stride * s->mb_band.height; i++){ s->mb_band.buf[i]= 1; s->mv_band[0].buf[i]= s->mv_band[1].buf[i]= 0; } //printf(\"Scene change detected, encoding as I Frame %d %d\\n\", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum); } } s->m.first_slice_line=1; s->qbias= pict->pict_type == P_TYPE ? 2 : 0; encode_header(s); decorrelate(s, &s->mb_band , s->mb_band .buf, s->mb_band .stride, 0, 1); decorrelate(s, &s->mv_band[0], s->mv_band[0].buf, s->mv_band[0].stride, 0, 1); decorrelate(s, &s->mv_band[1], s->mv_band[1].buf, s->mv_band[1].stride, 0 ,1); encode_subband(s, &s->mb_band , s->mb_band .buf, NULL, s->mb_band .stride, 0); encode_subband(s, &s->mv_band[0], s->mv_band[0].buf, NULL, s->mv_band[0].stride, 0); encode_subband(s, &s->mv_band[1], s->mv_band[1].buf, NULL, s->mv_band[1].stride, 0); //FIXME avoid this correlate(s, &s->mb_band , s->mb_band .buf, s->mb_band .stride, 1, 1); correlate(s, &s->mv_band[0], s->mv_band[0].buf, s->mv_band[0].stride, 1, 1); correlate(s, &s->mv_band[1], s->mv_band[1].buf, s->mv_band[1].stride, 1, 1); for(plane_index=0; plane_index<3; plane_index++){ Plane *p= &s->plane[plane_index]; int w= p->width; int h= p->height; int x, y; int bits= put_bits_count(&s->c.pb); //FIXME optimize if(pict->data[plane_index]) //FIXME gray hack for(y=0; yspatial_dwt_buffer[y*w + x]= pict->data[plane_index][y*pict->linesize[plane_index] + x]<<8; } } predict_plane(s, s->spatial_dwt_buffer, plane_index, 0); if(s->qlog == LOSSLESS_QLOG){ for(y=0; yspatial_dwt_buffer[y*w + x]= (s->spatial_dwt_buffer[y*w + x] + 127)>>8; } } } ff_spatial_dwt(s->spatial_dwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count); for(level=0; levelspatial_decomposition_count; level++){ for(orientation=level ? 1 : 0; orientation<4; orientation++){ SubBand *b= &p->band[level][orientation]; quantize(s, b, b->buf, b->stride, s->qbias); if(orientation==0) decorrelate(s, b, b->buf, b->stride, pict->pict_type == P_TYPE, 0); encode_subband(s, b, b->buf, b->parent ? b->parent->buf : NULL, b->stride, orientation); assert(b->parent==NULL || b->parent->stride == b->stride*2); if(orientation==0) correlate(s, b, b->buf, b->stride, 1, 0); } } // av_log(NULL, AV_LOG_DEBUG, \"plane:%d bits:%d\\n\", plane_index, put_bits_count(&s->c.pb) - bits); for(level=0; levelspatial_decomposition_count; level++){ for(orientation=level ? 1 : 0; orientation<4; orientation++){ SubBand *b= &p->band[level][orientation]; dequantize(s, b, b->buf, b->stride); } } ff_spatial_idwt(s->spatial_dwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count); if(s->qlog == LOSSLESS_QLOG){ for(y=0; yspatial_dwt_buffer[y*w + x]<<=8; } } } predict_plane(s, s->spatial_dwt_buffer, plane_index, 1); //FIXME optimize for(y=0; yspatial_dwt_buffer[y*w + x]+128)>>8; if(v&(~255)) v= ~(v>>31); s->current_picture.data[plane_index][y*s->current_picture.linesize[plane_index] + x]= v; } } if(s->avctx->flags&CODEC_FLAG_PSNR){ int64_t error= 0; if(pict->data[plane_index]) //FIXME gray hack for(y=0; ycurrent_picture.data[plane_index][y*s->current_picture.linesize[plane_index] + x] - pict->data[plane_index][y*pict->linesize[plane_index] + x]; error += d*d; } } s->avctx->error[plane_index] += error; s->avctx->error[3] += error; } } if(s->last_picture.data[0]) avctx->release_buffer(avctx, &s->last_picture); emms_c(); return put_cabac_terminate(c, 1); }"} {"target": 0, "idx": 10889, "func": "static void rtas_ibm_set_eeh_option(PowerPCCPU *cpu, sPAPREnvironment *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { sPAPRPHBState *sphb; sPAPRPHBClass *spc; uint32_t addr, option; uint64_t buid; int ret; if ((nargs != 4) || (nret != 1)) { goto param_error_exit; } buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2); addr = rtas_ld(args, 0); option = rtas_ld(args, 3); sphb = find_phb(spapr, buid); if (!sphb) { goto param_error_exit; } spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb); if (!spc->eeh_set_option) { goto param_error_exit; } ret = spc->eeh_set_option(sphb, addr, option); rtas_st(rets, 0, ret); return; param_error_exit: rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); }"} {"target": 0, "idx": 10897, "func": "static bool nvic_user_access_ok(NVICState *s, hwaddr offset) { /* Return true if unprivileged access to this register is permitted. */ switch (offset) { case 0xf00: /* STIR: accessible only if CCR.USERSETMPEND permits */ return s->cpu->env.v7m.ccr & R_V7M_CCR_USERSETMPEND_MASK; default: /* All other user accesses cause a BusFault unconditionally */ return false; } }"} {"target": 1, "idx": 10912, "func": "static void kvm_arm_gic_realize(DeviceState *dev, Error **errp) { int i; GICState *s = KVM_ARM_GIC(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev); KVMARMGICClass *kgc = KVM_ARM_GIC_GET_CLASS(s); Error *local_err = NULL; int ret; kgc->parent_realize(dev, &local_err); if (local_err) { error_propagate(errp, local_err); i = s->num_irq - GIC_INTERNAL; /* For the GIC, also expose incoming GPIO lines for PPIs for each CPU. * GPIO array layout is thus: * [0..N-1] SPIs * [N..N+31] PPIs for CPU 0 * [N+32..N+63] PPIs for CPU 1 * ... */ i += (GIC_INTERNAL * s->num_cpu); qdev_init_gpio_in(dev, kvm_arm_gic_set_irq, i); /* We never use our outbound IRQ/FIQ lines but provide them so that * we maintain the same interface as the non-KVM GIC. */ for (i = 0; i < s->num_cpu; i++) { sysbus_init_irq(sbd, &s->parent_irq[i]); for (i = 0; i < s->num_cpu; i++) { sysbus_init_irq(sbd, &s->parent_fiq[i]); /* Try to create the device via the device control API */ s->dev_fd = -1; ret = kvm_create_device(kvm_state, KVM_DEV_TYPE_ARM_VGIC_V2, false); if (ret >= 0) { s->dev_fd = ret; } else if (ret != -ENODEV && ret != -ENOTSUP) { error_setg_errno(errp, -ret, \"error creating in-kernel VGIC\"); if (kvm_gic_supports_attr(s, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0)) { uint32_t numirqs = s->num_irq; kvm_gic_access(s, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0, 0, &numirqs, 1); /* Tell the kernel to complete VGIC initialization now */ if (kvm_gic_supports_attr(s, KVM_DEV_ARM_VGIC_GRP_CTRL, KVM_DEV_ARM_VGIC_CTRL_INIT)) { kvm_gic_access(s, KVM_DEV_ARM_VGIC_GRP_CTRL, KVM_DEV_ARM_VGIC_CTRL_INIT, 0, 0, 1); /* Distributor */ memory_region_init_reservation(&s->iomem, OBJECT(s), \"kvm-gic_dist\", 0x1000); sysbus_init_mmio(sbd, &s->iomem); kvm_arm_register_device(&s->iomem, (KVM_ARM_DEVICE_VGIC_V2 << KVM_ARM_DEVICE_ID_SHIFT) | KVM_VGIC_V2_ADDR_TYPE_DIST, KVM_DEV_ARM_VGIC_GRP_ADDR, KVM_VGIC_V2_ADDR_TYPE_DIST, s->dev_fd); /* CPU interface for current core. Unlike arm_gic, we don't * provide the \"interface for core #N\" memory regions, because * cores with a VGIC don't have those. */ memory_region_init_reservation(&s->cpuiomem[0], OBJECT(s), \"kvm-gic_cpu\", 0x1000); sysbus_init_mmio(sbd, &s->cpuiomem[0]); kvm_arm_register_device(&s->cpuiomem[0], (KVM_ARM_DEVICE_VGIC_V2 << KVM_ARM_DEVICE_ID_SHIFT) | KVM_VGIC_V2_ADDR_TYPE_CPU, KVM_DEV_ARM_VGIC_GRP_ADDR, KVM_VGIC_V2_ADDR_TYPE_CPU, s->dev_fd);"} {"target": 0, "idx": 10928, "func": "void inter_predict(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y) { int x_off = mb_x << 4, y_off = mb_y << 4; int width = 16*s->mb_width, height = 16*s->mb_height; AVFrame *ref = s->framep[mb->ref_frame]; VP56mv *bmv = mb->bmv; switch (mb->partitioning) { case VP8_SPLITMVMODE_NONE: vp8_mc_part(s, dst, ref, x_off, y_off, 0, 0, 16, 16, width, height, &mb->mv); break; case VP8_SPLITMVMODE_4x4: { int x, y; VP56mv uvmv; /* Y */ for (y = 0; y < 4; y++) { for (x = 0; x < 4; x++) { vp8_mc(s, 1, dst[0] + 4*y*s->linesize + x*4, ref->data[0], &bmv[4*y + x], 4*x + x_off, 4*y + y_off, 4, 4, width, height, s->linesize, s->put_pixels_tab[2]); } } /* U/V */ x_off >>= 1; y_off >>= 1; width >>= 1; height >>= 1; for (y = 0; y < 2; y++) { for (x = 0; x < 2; x++) { uvmv.x = mb->bmv[ 2*y * 4 + 2*x ].x + mb->bmv[ 2*y * 4 + 2*x+1].x + mb->bmv[(2*y+1) * 4 + 2*x ].x + mb->bmv[(2*y+1) * 4 + 2*x+1].x; uvmv.y = mb->bmv[ 2*y * 4 + 2*x ].y + mb->bmv[ 2*y * 4 + 2*x+1].y + mb->bmv[(2*y+1) * 4 + 2*x ].y + mb->bmv[(2*y+1) * 4 + 2*x+1].y; uvmv.x = (uvmv.x + 2 + (uvmv.x >> (INT_BIT-1))) >> 2; uvmv.y = (uvmv.y + 2 + (uvmv.y >> (INT_BIT-1))) >> 2; if (s->profile == 3) { uvmv.x &= ~7; uvmv.y &= ~7; } vp8_mc(s, 0, dst[1] + 4*y*s->uvlinesize + x*4, ref->data[1], &uvmv, 4*x + x_off, 4*y + y_off, 4, 4, width, height, s->uvlinesize, s->put_pixels_tab[2]); vp8_mc(s, 0, dst[2] + 4*y*s->uvlinesize + x*4, ref->data[2], &uvmv, 4*x + x_off, 4*y + y_off, 4, 4, width, height, s->uvlinesize, s->put_pixels_tab[2]); } } break; } case VP8_SPLITMVMODE_16x8: vp8_mc_part(s, dst, ref, x_off, y_off, 0, 0, 16, 8, width, height, &bmv[0]); vp8_mc_part(s, dst, ref, x_off, y_off, 0, 8, 16, 8, width, height, &bmv[1]); break; case VP8_SPLITMVMODE_8x16: vp8_mc_part(s, dst, ref, x_off, y_off, 0, 0, 8, 16, width, height, &bmv[0]); vp8_mc_part(s, dst, ref, x_off, y_off, 8, 0, 8, 16, width, height, &bmv[1]); break; case VP8_SPLITMVMODE_8x8: vp8_mc_part(s, dst, ref, x_off, y_off, 0, 0, 8, 8, width, height, &bmv[0]); vp8_mc_part(s, dst, ref, x_off, y_off, 8, 0, 8, 8, width, height, &bmv[1]); vp8_mc_part(s, dst, ref, x_off, y_off, 0, 8, 8, 8, width, height, &bmv[2]); vp8_mc_part(s, dst, ref, x_off, y_off, 8, 8, 8, 8, width, height, &bmv[3]); break; } }"} {"target": 0, "idx": 10932, "func": "static int64_t cpu_get_icount_locked(void) { int64_t icount; CPUState *cpu = current_cpu; icount = timers_state.qemu_icount; if (cpu) { if (!cpu_can_do_io(cpu)) { fprintf(stderr, \"Bad clock read\\n\"); } icount -= (cpu->icount_decr.u16.low + cpu->icount_extra); } return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount); }"} {"target": 0, "idx": 10938, "func": "static void sigp_start(CPUState *cs, run_on_cpu_data arg) { S390CPU *cpu = S390_CPU(cs); SigpInfo *si = arg.host_ptr; if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) { si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; return; } s390_cpu_set_state(CPU_STATE_OPERATING, cpu); si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; }"} {"target": 0, "idx": 10941, "func": "static void pchip_write(void *opaque, hwaddr addr, uint64_t v32, unsigned size) { TyphoonState *s = opaque; uint64_t val, oldval; if (addr & 4) { val = v32 << 32 | s->latch_tmp; addr ^= 4; } else { s->latch_tmp = v32; return; } switch (addr) { case 0x0000: /* WSBA0: Window Space Base Address Register. */ s->pchip.win[0].base_addr = val; break; case 0x0040: /* WSBA1 */ s->pchip.win[1].base_addr = val; break; case 0x0080: /* WSBA2 */ s->pchip.win[2].base_addr = val; break; case 0x00c0: /* WSBA3 */ s->pchip.win[3].base_addr = val; break; case 0x0100: /* WSM0: Window Space Mask Register. */ s->pchip.win[0].mask = val; break; case 0x0140: /* WSM1 */ s->pchip.win[1].mask = val; break; case 0x0180: /* WSM2 */ s->pchip.win[2].mask = val; break; case 0x01c0: /* WSM3 */ s->pchip.win[3].mask = val; break; case 0x0200: /* TBA0: Translated Base Address Register. */ s->pchip.win[0].translated_base_pfn = val >> 10; break; case 0x0240: /* TBA1 */ s->pchip.win[1].translated_base_pfn = val >> 10; break; case 0x0280: /* TBA2 */ s->pchip.win[2].translated_base_pfn = val >> 10; break; case 0x02c0: /* TBA3 */ s->pchip.win[3].translated_base_pfn = val >> 10; break; case 0x0300: /* PCTL: Pchip Control Register. */ oldval = s->pchip.ctl; oldval &= ~0x00001cff0fc7ffull; /* RW fields */ oldval |= val & 0x00001cff0fc7ffull; s->pchip.ctl = oldval; break; case 0x0340: /* PLAT: Pchip Master Latency Register. */ break; case 0x03c0: /* PERROR: Pchip Error Register. */ break; case 0x0400: /* PERRMASK: Pchip Error Mask Register. */ break; case 0x0440: /* PERRSET: Pchip Error Set Register. */ break; case 0x0480: /* TLBIV: Translation Buffer Invalidate Virtual Register. */ break; case 0x04c0: /* TLBIA: Translation Buffer Invalidate All Register (WO). */ break; case 0x0500: /* PMONCTL */ case 0x0540: /* PMONCNT */ case 0x0800: /* SPRST */ break; default: cpu_unassigned_access(current_cpu, addr, true, false, 0, size); return; } }"} {"target": 1, "idx": 10956, "func": "static void vga_precise_update_retrace_info(VGAState *s) { int htotal_chars; int hretr_start_char; int hretr_skew_chars; int hretr_end_char; int vtotal_lines; int vretr_start_line; int vretr_end_line; int div2, sldiv2, dots; int clocking_mode; int clock_sel; const int hz[] = {25175000, 28322000, 25175000, 25175000}; int64_t chars_per_sec; struct vga_precise_retrace *r = &s->retrace_info.precise; htotal_chars = s->cr[0x00] + 5; hretr_start_char = s->cr[0x04]; hretr_skew_chars = (s->cr[0x05] >> 5) & 3; hretr_end_char = s->cr[0x05] & 0x1f; vtotal_lines = (s->cr[0x06] | (((s->cr[0x07] & 1) | ((s->cr[0x07] >> 4) & 2)) << 8)) + 2 ; vretr_start_line = s->cr[0x10] | ((((s->cr[0x07] >> 2) & 1) | ((s->cr[0x07] >> 6) & 2)) << 8) ; vretr_end_line = s->cr[0x11] & 0xf; div2 = (s->cr[0x17] >> 2) & 1; sldiv2 = (s->cr[0x17] >> 3) & 1; clocking_mode = (s->sr[0x01] >> 3) & 1; clock_sel = (s->msr >> 2) & 3; dots = (s->msr & 1) ? 8 : 9; chars_per_sec = hz[clock_sel] / dots; htotal_chars <<= clocking_mode; r->total_chars = vtotal_lines * htotal_chars; if (r->freq) { r->ticks_per_char = ticks_per_sec / (r->total_chars * r->freq); } else { r->ticks_per_char = ticks_per_sec / chars_per_sec; } r->vstart = vretr_start_line; r->vend = r->vstart + vretr_end_line + 1; r->hstart = hretr_start_char + hretr_skew_chars; r->hend = r->hstart + hretr_end_char + 1; r->htotal = htotal_chars; #if 0 printf ( \"hz=%f\\n\" \"htotal = %d\\n\" \"hretr_start = %d\\n\" \"hretr_skew = %d\\n\" \"hretr_end = %d\\n\" \"vtotal = %d\\n\" \"vretr_start = %d\\n\" \"vretr_end = %d\\n\" \"div2 = %d sldiv2 = %d\\n\" \"clocking_mode = %d\\n\" \"clock_sel = %d %d\\n\" \"dots = %d\\n\" \"ticks/char = %lld\\n\" \"\\n\", (double) ticks_per_sec / (r->ticks_per_char * r->total_chars), htotal_chars, hretr_start_char, hretr_skew_chars, hretr_end_char, vtotal_lines, vretr_start_line, vretr_end_line, div2, sldiv2, clocking_mode, clock_sel, hz[clock_sel], dots, r->ticks_per_char ); #endif }"} {"target": 1, "idx": 10960, "func": "int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt, const char *filename, void *logctx, unsigned int offset, unsigned int max_probe_size) { AVProbeData pd = { filename ? filename : \"\", NULL, -offset }; uint8_t *buf = NULL; uint8_t *mime_type; int ret = 0, probe_size, buf_offset = 0; int score = 0; if (!max_probe_size) { max_probe_size = PROBE_BUF_MAX; } else if (max_probe_size > PROBE_BUF_MAX) { max_probe_size = PROBE_BUF_MAX; } else if (max_probe_size < PROBE_BUF_MIN) { av_log(logctx, AV_LOG_ERROR, \"Specified probe size value %u cannot be < %u\\n\", max_probe_size, PROBE_BUF_MIN); return AVERROR(EINVAL); } if (offset >= max_probe_size) { return AVERROR(EINVAL); } if (!*fmt && pb->av_class && av_opt_get(pb, \"mime_type\", AV_OPT_SEARCH_CHILDREN, &mime_type) >= 0 && mime_type) { if (!av_strcasecmp(mime_type, \"audio/aacp\")) { *fmt = av_find_input_format(\"aac\"); } av_freep(&mime_type); } for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt; probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) { if (probe_size < offset) { continue; } score = probe_size < max_probe_size ? AVPROBE_SCORE_RETRY : 0; /* read probe data */ if ((ret = av_reallocp(&buf, probe_size + AVPROBE_PADDING_SIZE)) < 0) return ret; if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) { /* fail if error was not end of file, otherwise, lower score */ if (ret != AVERROR_EOF) { av_free(buf); return ret; } score = 0; ret = 0; /* error was end of file, nothing read */ } buf_offset += ret; pd.buf_size = buf_offset - offset; pd.buf = &buf[offset]; memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE); /* guess file format */ *fmt = av_probe_input_format2(&pd, 1, &score); if(*fmt){ if(score <= AVPROBE_SCORE_RETRY){ //this can only be true in the last iteration av_log(logctx, AV_LOG_WARNING, \"Format %s detected only with low score of %d, misdetection possible!\\n\", (*fmt)->name, score); }else av_log(logctx, AV_LOG_DEBUG, \"Format %s probed with size=%d and score=%d\\n\", (*fmt)->name, probe_size, score); #if 0 FILE *f = fopen(\"probestat.tmp\", \"ab\"); fprintf(f, \"probe_size:%d format:%s score:%d filename:%s\\n\", probe_size, (*fmt)->name, score, filename); fclose(f); #endif } } if (!*fmt) { av_free(buf); return AVERROR_INVALIDDATA; } /* rewind. reuse probe buffer to avoid seeking */ ret = ffio_rewind_with_probe_data(pb, &buf, pd.buf_size); return ret < 0 ? ret : score; }"} {"target": 1, "idx": 10964, "func": "static ssize_t drop_sync(QIOChannel *ioc, size_t size) { ssize_t ret = 0; char small[1024]; char *buffer; buffer = sizeof(small) < size ? small : g_malloc(MIN(65536, size)); while (size > 0) { ssize_t count = read_sync(ioc, buffer, MIN(65536, size)); if (count <= 0) { goto cleanup; } assert(count <= size); size -= count; ret += count; } cleanup: if (buffer != small) { g_free(buffer); } return ret; }"} {"target": 1, "idx": 10965, "func": "void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz) { VirtQueueElement *elem; VirtQueueElementOld data; int i; qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld)); /* TODO: teach all callers that this can fail, and return failure instead * of asserting here. * When we do, we might be able to re-enable NDEBUG below. */ #ifdef NDEBUG #error building with NDEBUG is not supported #endif assert(ARRAY_SIZE(data.in_addr) >= data.in_num); assert(ARRAY_SIZE(data.out_addr) >= data.out_num); elem = virtqueue_alloc_element(sz, data.out_num, data.in_num); elem->index = data.index; for (i = 0; i < elem->in_num; i++) { elem->in_addr[i] = data.in_addr[i]; } for (i = 0; i < elem->out_num; i++) { elem->out_addr[i] = data.out_addr[i]; } for (i = 0; i < elem->in_num; i++) { /* Base is overwritten by virtqueue_map. */ elem->in_sg[i].iov_base = 0; elem->in_sg[i].iov_len = data.in_sg[i].iov_len; } for (i = 0; i < elem->out_num; i++) { /* Base is overwritten by virtqueue_map. */ elem->out_sg[i].iov_base = 0; elem->out_sg[i].iov_len = data.out_sg[i].iov_len; } virtqueue_map(vdev, elem); return elem; }"} {"target": 1, "idx": 10970, "func": "static void mirror_read_complete(void *opaque, int ret) { MirrorOp *op = opaque; MirrorBlockJob *s = op->s; if (ret < 0) { BlockDriverState *source = s->common.bs; BlockErrorAction action; bdrv_set_dirty(source, op->sector_num, op->nb_sectors); action = mirror_error_action(s, true, -ret); if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { s->ret = ret; } mirror_iteration_done(op, ret); return; } bdrv_aio_writev(s->target, op->sector_num, &op->qiov, op->nb_sectors, mirror_write_complete, op); }"} {"target": 0, "idx": 10986, "func": "static BlockDriverAIOCB *qcow_aio_writev(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque) { BDRVQcowState *s = bs->opaque; QCowAIOCB *acb; s->cluster_cache_offset = -1; /* disable compressed cache */ acb = qemu_aio_get(bs, cb, opaque); if (!acb) return NULL; acb->hd_aiocb = NULL; acb->sector_num = sector_num; acb->qiov = qiov; if (qiov->niov > 1) { acb->buf = acb->orig_buf = qemu_memalign(512, qiov->size); qemu_iovec_to_buffer(qiov, acb->buf); } else { acb->buf = (uint8_t *)qiov->iov->iov_base; } acb->nb_sectors = nb_sectors; acb->n = 0; qcow_aio_write_cb(acb, 0); return &acb->common; }"} {"target": 0, "idx": 11006, "func": "static uint32_t vmsvga_bios_read(void *opaque, uint32_t address) { printf(\"%s: what are we supposed to return?\\n\", __FUNCTION__); return 0xcafe; }"} {"target": 0, "idx": 11019, "func": "static int mov_write_ms_tag(AVIOContext *pb, MOVTrack *track) { int64_t pos = avio_tell(pb); avio_wb32(pb, 0); avio_wl32(pb, track->tag); // store it byteswapped track->enc->codec_tag = av_bswap16(track->tag >> 16); ff_put_wav_header(pb, track->enc, 0); return update_size(pb, pos); }"} {"target": 0, "idx": 11023, "func": "static int copy_to_pbr(DCAXllDecoder *s, uint8_t *data, int size, int delay) { if (size > DCA_XLL_PBR_BUFFER_MAX) return AVERROR(ENOSPC); if (!s->pbr_buffer && !(s->pbr_buffer = av_malloc(DCA_XLL_PBR_BUFFER_MAX + DCA_BUFFER_PADDING_SIZE))) return AVERROR(ENOMEM); memcpy(s->pbr_buffer, data, size); s->pbr_length = size; s->pbr_delay = delay; return 0; }"} {"target": 0, "idx": 11027, "func": "static void sigill_handler (int sig) { if (!canjump) { signal (sig, SIG_DFL); raise (sig); } canjump = 0; siglongjmp (jmpbuf, 1); }"} {"target": 1, "idx": 11055, "func": "static void v9fs_lcreate(void *opaque) { int32_t dfid, flags, mode; gid_t gid; ssize_t err = 0; ssize_t offset = 7; V9fsString name; V9fsFidState *fidp; struct stat stbuf; V9fsQID qid; int32_t iounit; V9fsPDU *pdu = opaque; pdu_unmarshal(pdu, offset, \"dsddd\", &dfid, &name, &flags, &mode, &gid); fidp = get_fid(pdu, dfid); if (fidp == NULL) { err = -ENOENT; goto out_nofid; } flags = get_dotl_openflags(pdu->s, flags); err = v9fs_co_open2(pdu, fidp, &name, gid, flags | O_CREAT, mode, &stbuf); if (err < 0) { goto out; } fidp->fid_type = P9_FID_FILE; fidp->open_flags = flags; if (flags & O_EXCL) { /* * We let the host file system do O_EXCL check * We should not reclaim such fd */ fidp->flags |= FID_NON_RECLAIMABLE; } iounit = get_iounit(pdu, &fidp->path); stat_to_qid(&stbuf, &qid); offset += pdu_marshal(pdu, offset, \"Qd\", &qid, iounit); err = offset; out: put_fid(pdu, fidp); out_nofid: trace_v9fs_lcreate_return(pdu->tag, pdu->id, qid.type, qid.version, qid.path, iounit); complete_pdu(pdu->s, pdu, err); v9fs_string_free(&name); }"} {"target": 0, "idx": 11066, "func": "static int hevc_handle_packet(AVFormatContext *ctx, PayloadContext *rtp_hevc_ctx, AVStream *st, AVPacket *pkt, uint32_t *timestamp, const uint8_t *buf, int len, uint16_t seq, int flags) { const uint8_t *rtp_pl = buf; int tid, lid, nal_type; int first_fragment, last_fragment, fu_type; uint8_t new_nal_header[2]; int res = 0; /* sanity check for size of input packet: 1 byte payload at least */ if (len < RTP_HEVC_PAYLOAD_HEADER_SIZE + 1) { av_log(ctx, AV_LOG_ERROR, \"Too short RTP/HEVC packet, got %d bytes\\n\", len); return AVERROR_INVALIDDATA; } /* decode the HEVC payload header according to section 4 of draft version 6: 0 1 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |F| Type | LayerId | TID | +-------------+-----------------+ Forbidden zero (F): 1 bit NAL unit type (Type): 6 bits NUH layer ID (LayerId): 6 bits NUH temporal ID plus 1 (TID): 3 bits */ nal_type = (buf[0] >> 1) & 0x3f; lid = ((buf[0] << 5) & 0x20) | ((buf[1] >> 3) & 0x1f); tid = buf[1] & 0x07; /* sanity check for correct layer ID */ if (lid) { /* future scalable or 3D video coding extensions */ avpriv_report_missing_feature(ctx, \"Multi-layer HEVC coding\\n\"); return AVERROR_PATCHWELCOME; } /* sanity check for correct temporal ID */ if (!tid) { av_log(ctx, AV_LOG_ERROR, \"Illegal temporal ID in RTP/HEVC packet\\n\"); return AVERROR_INVALIDDATA; } /* sanity check for correct NAL unit type */ if (nal_type > 50) { av_log(ctx, AV_LOG_ERROR, \"Unsupported (HEVC) NAL type (%d)\\n\", nal_type); return AVERROR_INVALIDDATA; } switch (nal_type) { /* aggregated packets (AP) */ case 48: /* pass the HEVC payload header */ buf += RTP_HEVC_PAYLOAD_HEADER_SIZE; len -= RTP_HEVC_PAYLOAD_HEADER_SIZE; /* pass the HEVC DONL field */ if (rtp_hevc_ctx->using_donl_field) { buf += RTP_HEVC_DONL_FIELD_SIZE; len -= RTP_HEVC_DONL_FIELD_SIZE; } /* fall-through */ /* video parameter set (VPS) */ case 32: /* sequence parameter set (SPS) */ case 33: /* picture parameter set (PPS) */ case 34: /* supplemental enhancement information (SEI) */ case 39: /* single NAL unit packet */ default: /* sanity check for size of input packet: 1 byte payload at least */ if (len < 1) { av_log(ctx, AV_LOG_ERROR, \"Too short RTP/HEVC packet, got %d bytes of NAL unit type %d\\n\", len, nal_type); return AVERROR_INVALIDDATA; } /* create A/V packet */ if ((res = av_new_packet(pkt, sizeof(start_sequence) + len)) < 0) return res; /* A/V packet: copy start sequence */ memcpy(pkt->data, start_sequence, sizeof(start_sequence)); /* A/V packet: copy NAL unit data */ memcpy(pkt->data + sizeof(start_sequence), buf, len); break; /* fragmentation unit (FU) */ case 49: /* pass the HEVC payload header */ buf += RTP_HEVC_PAYLOAD_HEADER_SIZE; len -= RTP_HEVC_PAYLOAD_HEADER_SIZE; if (len < 1) return AVERROR_INVALIDDATA; /* decode the FU header 0 1 2 3 4 5 6 7 +-+-+-+-+-+-+-+-+ |S|E| FuType | +---------------+ Start fragment (S): 1 bit End fragment (E): 1 bit FuType: 6 bits */ first_fragment = buf[0] & 0x80; last_fragment = buf[0] & 0x40; fu_type = buf[0] & 0x3f; /* pass the HEVC FU header */ buf += RTP_HEVC_FU_HEADER_SIZE; len -= RTP_HEVC_FU_HEADER_SIZE; /* pass the HEVC DONL field */ if (rtp_hevc_ctx->using_donl_field) { buf += RTP_HEVC_DONL_FIELD_SIZE; len -= RTP_HEVC_DONL_FIELD_SIZE; } av_dlog(ctx, \" FU type %d with %d bytes\\n\", fu_type, len); if (len > 0) { new_nal_header[0] = (rtp_pl[0] & 0x81) | (fu_type << 1); new_nal_header[1] = rtp_pl[1]; /* start fragment vs. subsequent fragments */ if (first_fragment) { if (!last_fragment) { /* create A/V packet which is big enough */ if ((res = av_new_packet(pkt, sizeof(start_sequence) + sizeof(new_nal_header) + len)) < 0) return res; /* A/V packet: copy start sequence */ memcpy(pkt->data, start_sequence, sizeof(start_sequence)); /* A/V packet: copy new NAL header */ memcpy(pkt->data + sizeof(start_sequence), new_nal_header, sizeof(new_nal_header)); /* A/V packet: copy NAL unit data */ memcpy(pkt->data + sizeof(start_sequence) + sizeof(new_nal_header), buf, len); } else { av_log(ctx, AV_LOG_ERROR, \"Illegal combination of S and E bit in RTP/HEVC packet\\n\"); res = AVERROR_INVALIDDATA; } } else { /* create A/V packet */ if ((res = av_new_packet(pkt, len)) < 0) return res; /* A/V packet: copy NAL unit data */ memcpy(pkt->data, buf, len); } } else { /* sanity check for size of input packet: 1 byte payload at least */ av_log(ctx, AV_LOG_ERROR, \"Too short RTP/HEVC packet, got %d bytes of NAL unit type %d\\n\", len, nal_type); res = AVERROR_INVALIDDATA; } break; /* PACI packet */ case 50: /* Temporal scalability control information (TSCI) */ avpriv_report_missing_feature(ctx, \"PACI packets for RTP/HEVC\\n\"); res = AVERROR_PATCHWELCOME; break; } pkt->stream_index = st->index; return res; }"} {"target": 1, "idx": 11069, "func": "static void openpic_cpu_write_internal(void *opaque, hwaddr addr, uint32_t val, int idx) { OpenPICState *opp = opaque; IRQ_src_t *src; IRQ_dst_t *dst; int s_IRQ, n_IRQ; DPRINTF(\"%s: cpu %d addr \" TARGET_FMT_plx \" <= %08x\\n\", __func__, idx, addr, val); if (idx < 0) { return; } if (addr & 0xF) return; dst = &opp->dst[idx]; addr &= 0xFF0; switch (addr) { case 0x40: /* IPIDR */ case 0x50: case 0x60: case 0x70: idx = (addr - 0x40) >> 4; /* we use IDE as mask which CPUs to deliver the IPI to still. */ write_IRQreg_ide(opp, opp->irq_ipi0 + idx, opp->src[opp->irq_ipi0 + idx].ide | val); openpic_set_irq(opp, opp->irq_ipi0 + idx, 1); openpic_set_irq(opp, opp->irq_ipi0 + idx, 0); break; case 0x80: /* PCTP */ dst->pctp = val & 0x0000000F; break; case 0x90: /* WHOAMI */ /* Read-only register */ break; case 0xA0: /* PIAC */ /* Read-only register */ break; case 0xB0: /* PEOI */ DPRINTF(\"PEOI\\n\"); s_IRQ = IRQ_get_next(opp, &dst->servicing); IRQ_resetbit(&dst->servicing, s_IRQ); dst->servicing.next = -1; /* Set up next servicing IRQ */ s_IRQ = IRQ_get_next(opp, &dst->servicing); /* Check queued interrupts. */ n_IRQ = IRQ_get_next(opp, &dst->raised); src = &opp->src[n_IRQ]; if (n_IRQ != -1 && (s_IRQ == -1 || IPVP_PRIORITY(src->ipvp) > dst->servicing.priority)) { DPRINTF(\"Raise OpenPIC INT output cpu %d irq %d\\n\", idx, n_IRQ); openpic_irq_raise(opp, idx, src); } break; default: break; } }"} {"target": 1, "idx": 11075, "func": "int kvm_init(void) { static const char upgrade_note[] = \"Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\\n\" \"(see http://sourceforge.net/projects/kvm).\\n\"; KVMState *s; const KVMCapabilityInfo *missing_cap; int ret; int i; s = g_malloc0(sizeof(KVMState)); #ifdef KVM_CAP_SET_GUEST_DEBUG QTAILQ_INIT(&s->kvm_sw_breakpoints); #endif for (i = 0; i < ARRAY_SIZE(s->slots); i++) { s->slots[i].slot = i; } s->vmfd = -1; s->fd = qemu_open(\"/dev/kvm\", O_RDWR); if (s->fd == -1) { fprintf(stderr, \"Could not access KVM kernel module: %m\\n\"); ret = -errno; goto err; } ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0); if (ret < KVM_API_VERSION) { if (ret > 0) { ret = -EINVAL; } fprintf(stderr, \"kvm version too old\\n\"); goto err; } if (ret > KVM_API_VERSION) { ret = -EINVAL; fprintf(stderr, \"kvm version not supported\\n\"); goto err; } s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0); if (s->vmfd < 0) { #ifdef TARGET_S390X fprintf(stderr, \"Please add the 'switch_amode' kernel parameter to \" \"your host kernel command line\\n\"); #endif ret = s->vmfd; goto err; } missing_cap = kvm_check_extension_list(s, kvm_required_capabilites); if (!missing_cap) { missing_cap = kvm_check_extension_list(s, kvm_arch_required_capabilities); } if (missing_cap) { ret = -EINVAL; fprintf(stderr, \"kvm does not support %s\\n%s\", missing_cap->name, upgrade_note); goto err; } s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO); s->broken_set_mem_region = 1; ret = kvm_check_extension(s, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS); if (ret > 0) { s->broken_set_mem_region = 0; } #ifdef KVM_CAP_VCPU_EVENTS s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); #endif s->robust_singlestep = kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP); #ifdef KVM_CAP_DEBUGREGS s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS); #endif #ifdef KVM_CAP_XSAVE s->xsave = kvm_check_extension(s, KVM_CAP_XSAVE); #endif #ifdef KVM_CAP_XCRS s->xcrs = kvm_check_extension(s, KVM_CAP_XCRS); #endif ret = kvm_arch_init(s); if (ret < 0) { goto err; } kvm_state = s; cpu_register_phys_memory_client(&kvm_cpu_phys_memory_client); s->many_ioeventfds = kvm_check_many_ioeventfds(); cpu_interrupt_handler = kvm_handle_interrupt; return 0; err: if (s) { if (s->vmfd >= 0) { close(s->vmfd); } if (s->fd != -1) { close(s->fd); } } g_free(s); return ret; }"} {"target": 0, "idx": 11087, "func": "static int process_line(URLContext *h, char *line, int line_count, int *new_location) { HTTPContext *s = h->priv_data; char *tag, *p, *end; /* end of header */ if (line[0] == '\\0') { s->end_header = 1; return 0; } p = line; if (line_count == 0) { while (!isspace(*p) && *p != '\\0') p++; while (isspace(*p)) p++; s->http_code = strtol(p, &end, 10); av_dlog(NULL, \"http_code=%d\\n\", s->http_code); /* error codes are 4xx and 5xx, but regard 401 as a success, so we * don't abort until all headers have been parsed. */ if (s->http_code >= 400 && s->http_code < 600 && (s->http_code != 401 || s->auth_state.auth_type != HTTP_AUTH_NONE) && (s->http_code != 407 || s->proxy_auth_state.auth_type != HTTP_AUTH_NONE)) { end += strspn(end, SPACE_CHARS); av_log(h, AV_LOG_WARNING, \"HTTP error %d %s\\n\", s->http_code, end); return -1; } } else { while (*p != '\\0' && *p != ':') p++; if (*p != ':') return 1; *p = '\\0'; tag = line; p++; while (isspace(*p)) p++; if (!av_strcasecmp(tag, \"Location\")) { av_strlcpy(s->location, p, sizeof(s->location)); *new_location = 1; } else if (!av_strcasecmp (tag, \"Content-Length\") && s->filesize == -1) { s->filesize = strtoll(p, NULL, 10); } else if (!av_strcasecmp (tag, \"Content-Range\")) { /* \"bytes $from-$to/$document_size\" */ const char *slash; if (!strncmp (p, \"bytes \", 6)) { p += 6; s->off = strtoll(p, NULL, 10); if ((slash = strchr(p, '/')) && strlen(slash) > 0) s->filesize = strtoll(slash+1, NULL, 10); } if (s->seekable == -1 && (!s->is_akamai || s->filesize != 2147483647)) h->is_streamed = 0; /* we _can_ in fact seek */ } else if (!av_strcasecmp(tag, \"Accept-Ranges\") && !strncmp(p, \"bytes\", 5) && s->seekable == -1) { h->is_streamed = 0; } else if (!av_strcasecmp (tag, \"Transfer-Encoding\") && !av_strncasecmp(p, \"chunked\", 7)) { s->filesize = -1; s->chunksize = 0; } else if (!av_strcasecmp (tag, \"WWW-Authenticate\")) { ff_http_auth_handle_header(&s->auth_state, tag, p); } else if (!av_strcasecmp (tag, \"Authentication-Info\")) { ff_http_auth_handle_header(&s->auth_state, tag, p); } else if (!av_strcasecmp (tag, \"Proxy-Authenticate\")) { ff_http_auth_handle_header(&s->proxy_auth_state, tag, p); } else if (!av_strcasecmp (tag, \"Connection\")) { if (!strcmp(p, \"close\")) s->willclose = 1; } else if (!av_strcasecmp (tag, \"Server\") && !av_strcasecmp (p, \"AkamaiGHost\")) { s->is_akamai = 1; } else if (!av_strcasecmp (tag, \"Content-Type\") && p) { av_free(s->mime_type); s->mime_type = av_strdup(p); } } return 1; }"} {"target": 1, "idx": 11095, "func": "g_malloc(size_t n_bytes) { void *mem; __coverity_negative_sink__(n_bytes); mem = malloc(n_bytes == 0 ? 1 : n_bytes); if (!mem) __coverity_panic__(); return mem; }"} {"target": 0, "idx": 11124, "func": "void qemu_get_timedate(struct tm *tm, int offset) { time_t ti; time(&ti); ti += offset; if (rtc_date_offset == -1) { if (rtc_utc) gmtime_r(&ti, tm); else localtime_r(&ti, tm); } else { ti -= rtc_date_offset; gmtime_r(&ti, tm); } }"} {"target": 0, "idx": 11132, "func": "static uint64_t omap_lpg_read(void *opaque, target_phys_addr_t addr, unsigned size) { struct omap_lpg_s *s = (struct omap_lpg_s *) opaque; int offset = addr & OMAP_MPUI_REG_MASK; if (size != 1) { return omap_badwidth_read8(opaque, addr); } switch (offset) { case 0x00: /* LCR */ return s->control; case 0x04: /* PMR */ return s->power; } OMAP_BAD_REG(addr); return 0; }"} {"target": 0, "idx": 11151, "func": "static int gif_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data) { GIFContext *s = avctx->priv_data; AVFrame *pict = data; AVFrame *const p = (AVFrame *)&s->picture; uint8_t *outbuf_ptr = outbuf; *p = *pict; p->pict_type = FF_I_TYPE; p->key_frame = 1; gif_image_write_header(&outbuf_ptr, avctx->width, avctx->height, -1, (uint32_t *)pict->data[1]); gif_image_write_image(&outbuf_ptr, 0, 0, avctx->width, avctx->height, pict->data[0], pict->linesize[0], PIX_FMT_PAL8); return outbuf_ptr - outbuf; }"} {"target": 0, "idx": 11155, "func": "static int local_mksock(FsContext *ctx2, const char *path) { struct sockaddr_un addr; int s; addr.sun_family = AF_UNIX; snprintf(addr.sun_path, 108, \"%s\", rpath(ctx2, path)); s = socket(PF_UNIX, SOCK_STREAM, 0); if (s == -1) { return -1; } if (bind(s, (struct sockaddr *)&addr, sizeof(addr))) { close(s); return -1; } close(s); return 0; }"} {"target": 0, "idx": 11158, "func": "av_cold int avcodec_close(AVCodecContext *avctx) { int ret = ff_lock_avcodec(avctx); if (ret < 0) return ret; if (avcodec_is_open(avctx)) { FramePool *pool = avctx->internal->pool; int i; if (HAVE_THREADS && avctx->internal->frame_thread_encoder && avctx->thread_count > 1) { ff_unlock_avcodec(); ff_frame_thread_encoder_free(avctx); ff_lock_avcodec(avctx); } if (HAVE_THREADS && avctx->thread_opaque) ff_thread_free(avctx); if (avctx->codec && avctx->codec->close) avctx->codec->close(avctx); avctx->coded_frame = NULL; avctx->internal->byte_buffer_size = 0; av_freep(&avctx->internal->byte_buffer); if (!avctx->refcounted_frames) av_frame_unref(&avctx->internal->to_free); for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++) av_buffer_pool_uninit(&pool->pools[i]); av_freep(&avctx->internal->pool); av_freep(&avctx->internal); } if (avctx->priv_data && avctx->codec && avctx->codec->priv_class) av_opt_free(avctx->priv_data); av_opt_free(avctx); av_freep(&avctx->priv_data); if (av_codec_is_encoder(avctx->codec)) av_freep(&avctx->extradata); avctx->codec = NULL; avctx->active_thread_type = 0; ff_unlock_avcodec(); return 0; }"} {"target": 1, "idx": 11160, "func": "static void virtio_device_free_virtqueues(VirtIODevice *vdev) { int i; if (!vdev->vq) { return; } for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { VRingMemoryRegionCaches *caches; if (vdev->vq[i].vring.num == 0) { break; } caches = atomic_read(&vdev->vq[i].vring.caches); atomic_set(&vdev->vq[i].vring.caches, NULL); virtio_free_region_cache(caches); } g_free(vdev->vq); }"} {"target": 1, "idx": 11194, "func": "static void mpeg_decode_picture_coding_extension(Mpeg1Context *s1) { MpegEncContext *s = &s1->mpeg_enc_ctx; s->full_pel[0] = s->full_pel[1] = 0; s->mpeg_f_code[0][0] = get_bits(&s->gb, 4); s->mpeg_f_code[0][1] = get_bits(&s->gb, 4); s->mpeg_f_code[1][0] = get_bits(&s->gb, 4); s->mpeg_f_code[1][1] = get_bits(&s->gb, 4); if (!s->pict_type && s1->mpeg_enc_ctx_allocated) { av_log(s->avctx, AV_LOG_ERROR, \"Missing picture start code, guessing missing values\\n\"); if (s->mpeg_f_code[1][0] == 15 && s->mpeg_f_code[1][1] == 15) { if (s->mpeg_f_code[0][0] == 15 && s->mpeg_f_code[0][1] == 15) s->pict_type = AV_PICTURE_TYPE_I; else s->pict_type = AV_PICTURE_TYPE_P; } else s->pict_type = AV_PICTURE_TYPE_B; s->current_picture.f->pict_type = s->pict_type; s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; } s->intra_dc_precision = get_bits(&s->gb, 2); s->picture_structure = get_bits(&s->gb, 2); s->top_field_first = get_bits1(&s->gb); s->frame_pred_frame_dct = get_bits1(&s->gb); s->concealment_motion_vectors = get_bits1(&s->gb); s->q_scale_type = get_bits1(&s->gb); s->intra_vlc_format = get_bits1(&s->gb); s->alternate_scan = get_bits1(&s->gb); s->repeat_first_field = get_bits1(&s->gb); s->chroma_420_type = get_bits1(&s->gb); s->progressive_frame = get_bits1(&s->gb); if (s->progressive_sequence && !s->progressive_frame) { s->progressive_frame = 1; av_log(s->avctx, AV_LOG_ERROR, \"interlaced frame in progressive sequence, ignoring\\n\"); } if (s->picture_structure == 0 || (s->progressive_frame && s->picture_structure != PICT_FRAME)) { av_log(s->avctx, AV_LOG_ERROR, \"picture_structure %d invalid, ignoring\\n\", s->picture_structure); s->picture_structure = PICT_FRAME; } if (s->progressive_sequence && !s->frame_pred_frame_dct) av_log(s->avctx, AV_LOG_WARNING, \"invalid frame_pred_frame_dct\\n\"); if (s->picture_structure == PICT_FRAME) { s->first_field = 0; s->v_edge_pos = 16 * s->mb_height; } else { s->first_field ^= 1; s->v_edge_pos = 8 * s->mb_height; memset(s->mbskip_table, 0, s->mb_stride * s->mb_height); } if (s->alternate_scan) { ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan); ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan); } else { ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct); ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct); } /* composite display not parsed */ ff_dlog(s->avctx, \"intra_dc_precision=%d\\n\", s->intra_dc_precision); ff_dlog(s->avctx, \"picture_structure=%d\\n\", s->picture_structure); ff_dlog(s->avctx, \"top field first=%d\\n\", s->top_field_first); ff_dlog(s->avctx, \"repeat first field=%d\\n\", s->repeat_first_field); ff_dlog(s->avctx, \"conceal=%d\\n\", s->concealment_motion_vectors); ff_dlog(s->avctx, \"intra_vlc_format=%d\\n\", s->intra_vlc_format); ff_dlog(s->avctx, \"alternate_scan=%d\\n\", s->alternate_scan); ff_dlog(s->avctx, \"frame_pred_frame_dct=%d\\n\", s->frame_pred_frame_dct); ff_dlog(s->avctx, \"progressive_frame=%d\\n\", s->progressive_frame); }"} {"target": 1, "idx": 11197, "func": "static av_always_inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square, int height, int delta, int list, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int src_x_offset, int src_y_offset, qpel_mc_func *qpix_op, h264_chroma_mc_func chroma_op, int pixel_shift, int chroma_idc) { const int mx = h->mv_cache[list][scan8[n]][0] + src_x_offset * 8; int my = h->mv_cache[list][scan8[n]][1] + src_y_offset * 8; const int luma_xy = (mx & 3) + ((my & 3) << 2); int offset = ((mx >> 2) << pixel_shift) + (my >> 2) * h->mb_linesize; uint8_t *src_y = pic->f.data[0] + offset; uint8_t *src_cb, *src_cr; int extra_width = 0; int extra_height = 0; int emu = 0; const int full_mx = mx >> 2; const int full_my = my >> 2; const int pic_width = 16 * h->mb_width; const int pic_height = 16 * h->mb_height >> MB_FIELD(h); int ysh; if (mx & 7) extra_width -= 3; if (my & 7) extra_height -= 3; if (full_mx < 0 - extra_width || full_my < 0 - extra_height || full_mx + 16 /*FIXME*/ > pic_width + extra_width || full_my + 16 /*FIXME*/ > pic_height + extra_height) { h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_y - (2 << pixel_shift) - 2 * h->mb_linesize, h->mb_linesize, 16 + 5, 16 + 5 /*FIXME*/, full_mx - 2, full_my - 2, pic_width, pic_height); src_y = h->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize; emu = 1; } qpix_op[luma_xy](dest_y, src_y, h->mb_linesize); // FIXME try variable height perhaps? if (!square) qpix_op[luma_xy](dest_y + delta, src_y + delta, h->mb_linesize); if (CONFIG_GRAY && h->flags & CODEC_FLAG_GRAY) return; if (chroma_idc == 3 /* yuv444 */) { src_cb = pic->f.data[1] + offset; if (emu) { h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cb - (2 << pixel_shift) - 2 * h->mb_linesize, h->mb_linesize, 16 + 5, 16 + 5 /*FIXME*/, full_mx - 2, full_my - 2, pic_width, pic_height); src_cb = h->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize; } qpix_op[luma_xy](dest_cb, src_cb, h->mb_linesize); // FIXME try variable height perhaps? if (!square) qpix_op[luma_xy](dest_cb + delta, src_cb + delta, h->mb_linesize); src_cr = pic->f.data[2] + offset; if (emu) { h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cr - (2 << pixel_shift) - 2 * h->mb_linesize, h->mb_linesize, 16 + 5, 16 + 5 /*FIXME*/, full_mx - 2, full_my - 2, pic_width, pic_height); src_cr = h->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize; } qpix_op[luma_xy](dest_cr, src_cr, h->mb_linesize); // FIXME try variable height perhaps? if (!square) qpix_op[luma_xy](dest_cr + delta, src_cr + delta, h->mb_linesize); return; } ysh = 3 - (chroma_idc == 2 /* yuv422 */); if (chroma_idc == 1 /* yuv420 */ && MB_FIELD(h)) { // chroma offset when predicting from a field of opposite parity my += 2 * ((h->mb_y & 1) - (pic->reference - 1)); emu |= (my >> 3) < 0 || (my >> 3) + 8 >= (pic_height >> 1); } src_cb = pic->f.data[1] + ((mx >> 3) << pixel_shift) + (my >> ysh) * h->mb_uvlinesize; src_cr = pic->f.data[2] + ((mx >> 3) << pixel_shift) + (my >> ysh) * h->mb_uvlinesize; if (emu) { h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cb, h->mb_uvlinesize, 9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh), pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */)); src_cb = h->edge_emu_buffer; } chroma_op(dest_cb, src_cb, h->mb_uvlinesize, height >> (chroma_idc == 1 /* yuv420 */), mx & 7, (my << (chroma_idc == 2 /* yuv422 */)) & 7); if (emu) { h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cr, h->mb_uvlinesize, 9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh), pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */)); src_cr = h->edge_emu_buffer; } chroma_op(dest_cr, src_cr, h->mb_uvlinesize, height >> (chroma_idc == 1 /* yuv420 */), mx & 7, (my << (chroma_idc == 2 /* yuv422 */)) & 7); }"} {"target": 1, "idx": 11241, "func": "static int av_cold libopus_encode_init(AVCodecContext *avctx) { LibopusEncContext *opus = avctx->priv_data; const uint8_t *channel_mapping; OpusMSEncoder *enc; int ret = OPUS_OK; int coupled_stream_count, header_size, frame_size; coupled_stream_count = opus_coupled_streams[avctx->channels - 1]; opus->stream_count = avctx->channels - coupled_stream_count; channel_mapping = libav_libopus_channel_map[avctx->channels - 1]; /* FIXME: Opus can handle up to 255 channels. However, the mapping for * anything greater than 8 is undefined. */ if (avctx->channels > 8) av_log(avctx, AV_LOG_WARNING, \"Channel layout undefined for %d channels.\\n\", avctx->channels); if (!avctx->bit_rate) { /* Sane default copied from opusenc */ avctx->bit_rate = 64000 * opus->stream_count + 32000 * coupled_stream_count; av_log(avctx, AV_LOG_WARNING, \"No bit rate set. Defaulting to %d bps.\\n\", avctx->bit_rate); } if (avctx->bit_rate < 500 || avctx->bit_rate > 256000 * avctx->channels) { av_log(avctx, AV_LOG_ERROR, \"The bit rate %d bps is unsupported. \" \"Please choose a value between 500 and %d.\\n\", avctx->bit_rate, 256000 * avctx->channels); return AVERROR(EINVAL); } frame_size = opus->opts.frame_duration * 48000 / 1000; switch (frame_size) { case 120: case 240: if (opus->opts.application != OPUS_APPLICATION_RESTRICTED_LOWDELAY) av_log(avctx, AV_LOG_WARNING, \"LPC mode cannot be used with a frame duration of less \" \"than 10ms. Enabling restricted low-delay mode.\\n\" \"Use a longer frame duration if this is not what you want.\\n\"); /* Frame sizes less than 10 ms can only use MDCT mode, so switching to * RESTRICTED_LOWDELAY avoids an unnecessary extra 2.5ms lookahead. */ opus->opts.application = OPUS_APPLICATION_RESTRICTED_LOWDELAY; case 480: case 960: case 1920: case 2880: opus->opts.packet_size = avctx->frame_size = frame_size * avctx->sample_rate / 48000; break; default: av_log(avctx, AV_LOG_ERROR, \"Invalid frame duration: %g.\\n\" \"Frame duration must be exactly one of: 2.5, 5, 10, 20, 40 or 60.\\n\", opus->opts.frame_duration); return AVERROR(EINVAL); } if (avctx->compression_level < 0 || avctx->compression_level > 10) { av_log(avctx, AV_LOG_WARNING, \"Compression level must be in the range 0 to 10. \" \"Defaulting to 10.\\n\"); opus->opts.complexity = 10; } else { opus->opts.complexity = avctx->compression_level; } if (avctx->cutoff) { switch (avctx->cutoff) { case 4000: opus->opts.max_bandwidth = OPUS_BANDWIDTH_NARROWBAND; break; case 6000: opus->opts.max_bandwidth = OPUS_BANDWIDTH_MEDIUMBAND; break; case 8000: opus->opts.max_bandwidth = OPUS_BANDWIDTH_WIDEBAND; break; case 12000: opus->opts.max_bandwidth = OPUS_BANDWIDTH_SUPERWIDEBAND; break; case 20000: opus->opts.max_bandwidth = OPUS_BANDWIDTH_FULLBAND; break; default: av_log(avctx, AV_LOG_WARNING, \"Invalid frequency cutoff: %d. Using default maximum bandwidth.\\n\" \"Cutoff frequency must be exactly one of: 4000, 6000, 8000, 12000 or 20000.\\n\", avctx->cutoff); avctx->cutoff = 0; } } enc = opus_multistream_encoder_create(avctx->sample_rate, avctx->channels, opus->stream_count, coupled_stream_count, channel_mapping, opus->opts.application, &ret); if (ret != OPUS_OK) { av_log(avctx, AV_LOG_ERROR, \"Failed to create encoder: %s\\n\", opus_strerror(ret)); return ff_opus_error_to_averror(ret); } ret = libopus_configure_encoder(avctx, enc, &opus->opts); if (ret != OPUS_OK) { ret = ff_opus_error_to_averror(ret); goto fail; } header_size = 19 + (avctx->channels > 2 ? 2 + avctx->channels : 0); avctx->extradata = av_malloc(header_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!avctx->extradata) { av_log(avctx, AV_LOG_ERROR, \"Failed to allocate extradata.\\n\"); ret = AVERROR(ENOMEM); goto fail; } avctx->extradata_size = header_size; opus->samples = av_mallocz(frame_size * avctx->channels * av_get_bytes_per_sample(avctx->sample_fmt)); if (!opus->samples) { av_log(avctx, AV_LOG_ERROR, \"Failed to allocate samples buffer.\\n\"); ret = AVERROR(ENOMEM); goto fail; } ret = opus_multistream_encoder_ctl(enc, OPUS_GET_LOOKAHEAD(&avctx->initial_padding)); if (ret != OPUS_OK) av_log(avctx, AV_LOG_WARNING, \"Unable to get number of lookahead samples: %s\\n\", opus_strerror(ret)); libopus_write_header(avctx, opus->stream_count, coupled_stream_count, opus_vorbis_channel_map[avctx->channels - 1]); ff_af_queue_init(avctx, &opus->afq); opus->enc = enc; return 0; fail: opus_multistream_encoder_destroy(enc); av_freep(&avctx->extradata); return ret; }"} {"target": 1, "idx": 11248, "func": "static int common_end(HYuvContext *s) { int i; for(i = 0; i < 3; i++) { av_freep(&s->temp[i]); } return 0; }"} {"target": 1, "idx": 11254, "func": "static int coroutine_fn raw_co_write_zeroes( BlockDriverState *bs, int64_t sector_num, int nb_sectors, BdrvRequestFlags flags) { BDRVRawState *s = bs->opaque; if (!(flags & BDRV_REQ_MAY_UNMAP)) { return -ENOTSUP; } if (!s->discard_zeroes) { return -ENOTSUP; } return paio_submit_co(bs, s->fd, sector_num, NULL, nb_sectors, QEMU_AIO_DISCARD); }"} {"target": 0, "idx": 11261, "func": "static always_inline int dv_rl2vlc(int run, int l, uint32_t* vlc) { *vlc = dv_vlc_map[run][((uint16_t)l)&0x1ff].vlc; return dv_vlc_map[run][((uint16_t)l)&0x1ff].size; }"} {"target": 0, "idx": 11263, "func": "static int dma_buf_prepare(BMDMAState *bm, int is_write) { IDEState *s = bmdma_active_if(bm); struct { uint32_t addr; uint32_t size; } prd; int l, len; qemu_sglist_init(&s->sg, s->nsector / (TARGET_PAGE_SIZE/512) + 1); s->io_buffer_size = 0; for(;;) { if (bm->cur_prd_len == 0) { /* end of table (with a fail safe of one page) */ if (bm->cur_prd_last || (bm->cur_addr - bm->addr) >= 4096) return s->io_buffer_size != 0; cpu_physical_memory_read(bm->cur_addr, (uint8_t *)&prd, 8); bm->cur_addr += 8; prd.addr = le32_to_cpu(prd.addr); prd.size = le32_to_cpu(prd.size); len = prd.size & 0xfffe; if (len == 0) len = 0x10000; bm->cur_prd_len = len; bm->cur_prd_addr = prd.addr; bm->cur_prd_last = (prd.size & 0x80000000); } l = bm->cur_prd_len; if (l > 0) { qemu_sglist_add(&s->sg, bm->cur_prd_addr, l); bm->cur_prd_addr += l; bm->cur_prd_len -= l; s->io_buffer_size += l; } } return 1; }"} {"target": 0, "idx": 11264, "func": "static int usb_host_claim_interfaces(USBHostDevice *s, int configuration) { USBDevice *udev = USB_DEVICE(s); struct libusb_config_descriptor *conf; int rc, i; for (i = 0; i < USB_MAX_INTERFACES; i++) { udev->altsetting[i] = 0; } udev->ninterfaces = 0; udev->configuration = 0; if (configuration == 0) { /* address state - ignore */ return USB_RET_SUCCESS; } usb_host_detach_kernel(s); rc = libusb_get_active_config_descriptor(s->dev, &conf); if (rc != 0) { return USB_RET_STALL; } for (i = 0; i < conf->bNumInterfaces; i++) { trace_usb_host_claim_interface(s->bus_num, s->addr, configuration, i); rc = libusb_claim_interface(s->dh, i); usb_host_libusb_error(\"libusb_claim_interface\", rc); if (rc != 0) { return USB_RET_STALL; } s->ifs[i].claimed = true; } udev->ninterfaces = conf->bNumInterfaces; udev->configuration = configuration; libusb_free_config_descriptor(conf); return USB_RET_SUCCESS; }"} {"target": 0, "idx": 11270, "func": "static void term_completion(void) { int len, i, j, max_width, nb_cols; char *cmdline; nb_completions = 0; cmdline = qemu_malloc(term_cmd_buf_index + 1); if (!cmdline) return; memcpy(cmdline, term_cmd_buf, term_cmd_buf_index); cmdline[term_cmd_buf_index] = '\\0'; find_completion(cmdline); qemu_free(cmdline); /* no completion found */ if (nb_completions <= 0) return; if (nb_completions == 1) { len = strlen(completions[0]); for(i = completion_index; i < len; i++) { term_insert_char(completions[0][i]); } /* extra space for next argument. XXX: make it more generic */ if (len > 0 && completions[0][len - 1] != '/') term_insert_char(' '); } else { term_printf(\"\\n\"); max_width = 0; for(i = 0; i < nb_completions; i++) { len = strlen(completions[i]); if (len > max_width) max_width = len; } max_width += 2; if (max_width < 10) max_width = 10; else if (max_width > 80) max_width = 80; nb_cols = 80 / max_width; j = 0; for(i = 0; i < nb_completions; i++) { term_printf(\"%-*s\", max_width, completions[i]); if (++j == nb_cols || i == (nb_completions - 1)) { term_printf(\"\\n\"); j = 0; } } term_show_prompt2(); } }"} {"target": 1, "idx": 11288, "func": "static int parse_vdiname(BDRVSheepdogState *s, const char *filename, char *vdi, uint32_t *snapid, char *tag) { char *p, *q, *uri; const char *host_spec, *vdi_spec; int nr_sep, ret; strstart(filename, \"sheepdog:\", &filename); p = q = g_strdup(filename); /* count the number of separators */ nr_sep = 0; while (*p) { if (*p == ':') { nr_sep++; } p++; } p = q; /* use the first two tokens as host_spec. */ if (nr_sep >= 2) { host_spec = p; p = strchr(p, ':'); p++; p = strchr(p, ':'); *p++ = '\\0'; } else { host_spec = \"\"; } vdi_spec = p; p = strchr(vdi_spec, ':'); if (p) { *p++ = '#'; } uri = g_strdup_printf(\"sheepdog://%s/%s\", host_spec, vdi_spec); ret = sd_parse_uri(s, uri, vdi, snapid, tag); g_free(q); g_free(uri); return ret; }"} {"target": 1, "idx": 11293, "func": "static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length){ int x, y, x2, y2; const int width= f->avctx->width; const int height= f->avctx->height; uint16_t *dst= (uint16_t*)f->current_picture.data[0]; const int stride= f->current_picture.linesize[0]>>1; for(y=0; y>2) + 8*(y2>>2); dst[y2*stride+x2]= color[(bits>>index)&3]; } } dst+=16; } dst += 16 * stride - x; } return 0; }"} {"target": 1, "idx": 11295, "func": "static int apc_read_header(AVFormatContext *s) { AVIOContext *pb = s->pb; AVStream *st; avio_rl32(pb); /* CRYO */ avio_rl32(pb); /* _APC */ avio_rl32(pb); /* 1.20 */ st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_id = AV_CODEC_ID_ADPCM_IMA_APC; avio_rl32(pb); /* number of samples */ st->codecpar->sample_rate = avio_rl32(pb); /* initial predictor values for adpcm decoder */ if (ff_get_extradata(s, st->codecpar, pb, 2 * 4) < 0) return AVERROR(ENOMEM); if (avio_rl32(pb)) { st->codecpar->channels = 2; st->codecpar->channel_layout = AV_CH_LAYOUT_STEREO; } else { st->codecpar->channels = 1; st->codecpar->channel_layout = AV_CH_LAYOUT_MONO; } st->codecpar->bits_per_coded_sample = 4; st->codecpar->bit_rate = st->codecpar->bits_per_coded_sample * st->codecpar->channels * st->codecpar->sample_rate; st->codecpar->block_align = 1; return 0; }"} {"target": 0, "idx": 11309, "func": "static inline uint16_t vring_avail_idx(VirtQueue *vq) { hwaddr pa; pa = vq->vring.avail + offsetof(VRingAvail, idx); return virtio_lduw_phys(vq->vdev, pa); }"} {"target": 0, "idx": 11313, "func": "void cpu_x86_update_cr0(CPUX86State *env) { int pg_state, pe_state; #if defined(DEBUG_MMU) printf(\"CR0 update: CR0=0x%08x\\n\", env->cr[0]); #endif pg_state = env->cr[0] & CR0_PG_MASK; if (pg_state != last_pg_state) { tlb_flush(env); last_pg_state = pg_state; } /* update PE flag in hidden flags */ pe_state = (env->cr[0] & CR0_PE_MASK); env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT); /* ensure that ADDSEG is always set in real mode */ env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT); }"} {"target": 0, "idx": 11314, "func": "static void branch(DBDMA_channel *ch) { dbdma_cmd *current = &ch->current; ch->regs[DBDMA_CMDPTR_LO] = current->cmd_dep; ch->regs[DBDMA_STATUS] |= cpu_to_be32(BT); dbdma_cmdptr_load(ch); }"} {"target": 1, "idx": 11326, "func": "void *ff_png_zalloc(void *opaque, unsigned int items, unsigned int size) { if(items >= UINT_MAX / size) return NULL; return av_malloc(items * size); }"} {"target": 1, "idx": 11343, "func": "static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf) { int current; if (s->qdev.type != TYPE_ROM) { return -1; } current = media_is_dvd(s) ? MMC_PROFILE_DVD_ROM : MMC_PROFILE_CD_ROM; memset(outbuf, 0, 40); stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */ stw_be_p(&outbuf[6], current); /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */ outbuf[10] = 0x03; /* persistent, current */ outbuf[11] = 8; /* two profiles */ stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM); outbuf[14] = (current == MMC_PROFILE_DVD_ROM); stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM); outbuf[18] = (current == MMC_PROFILE_CD_ROM); /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */ stw_be_p(&outbuf[20], 1); outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */ outbuf[23] = 8; stl_be_p(&outbuf[24], 1); /* SCSI */ outbuf[28] = 1; /* DBE = 1, mandatory */ /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */ stw_be_p(&outbuf[32], 3); outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */ outbuf[35] = 4; outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */ /* TODO: Random readable, CD read, DVD read, drive serial number, power management */ return 40; }"} {"target": 1, "idx": 11349, "func": "static void rx_stop_cont_test(const QVirtioBus *bus, QVirtioDevice *dev, QGuestAllocator *alloc, QVirtQueue *vq, int socket) { uint64_t req_addr; uint32_t free_head; char test[] = \"TEST\"; char buffer[64]; int len = htonl(sizeof(test)); struct iovec iov[] = { { .iov_base = &len, .iov_len = sizeof(len), }, { .iov_base = test, .iov_len = sizeof(test), }, }; int ret; req_addr = guest_alloc(alloc, 64); free_head = qvirtqueue_add(vq, req_addr, 64, true, false); qvirtqueue_kick(bus, dev, vq, free_head); qmp(\"{ 'execute' : 'stop'}\"); ret = iov_send(socket, iov, 2, 0, sizeof(len) + sizeof(test)); g_assert_cmpint(ret, ==, sizeof(test) + sizeof(len)); /* We could check the status, but this command is more importantly to * ensure the packet data gets queued in QEMU, before we do 'cont'. */ qmp(\"{ 'execute' : 'query-status'}\"); qmp(\"{ 'execute' : 'cont'}\"); qvirtio_wait_queue_isr(bus, dev, vq, QVIRTIO_NET_TIMEOUT_US); memread(req_addr + VNET_HDR_SIZE, buffer, sizeof(test)); g_assert_cmpstr(buffer, ==, \"TEST\"); guest_free(alloc, req_addr); }"} {"target": 0, "idx": 11354, "func": "static inline void fill_caches(H264Context *h, int mb_type){ MpegEncContext * const s = &h->s; const int mb_xy= s->mb_x + s->mb_y*s->mb_stride; int topleft_xy, top_xy, topright_xy, left_xy[2]; int topleft_type, top_type, topright_type, left_type[2]; int left_block[4]; int i; //wow what a mess, why didnt they simplify the interlacing&intra stuff, i cant imagine that these complex rules are worth it if(h->sps.mb_aff){ //FIXME topleft_xy = 0; /* avoid warning */ top_xy = 0; /* avoid warning */ topright_xy = 0; /* avoid warning */ }else{ topleft_xy = mb_xy-1 - s->mb_stride; top_xy = mb_xy - s->mb_stride; topright_xy= mb_xy+1 - s->mb_stride; left_xy[0] = mb_xy-1; left_xy[1] = mb_xy-1; left_block[0]= 0; left_block[1]= 1; left_block[2]= 2; left_block[3]= 3; } topleft_type = h->slice_table[topleft_xy ] == h->slice_num ? s->current_picture.mb_type[topleft_xy] : 0; top_type = h->slice_table[top_xy ] == h->slice_num ? s->current_picture.mb_type[top_xy] : 0; topright_type= h->slice_table[topright_xy] == h->slice_num ? s->current_picture.mb_type[topright_xy]: 0; left_type[0] = h->slice_table[left_xy[0] ] == h->slice_num ? s->current_picture.mb_type[left_xy[0]] : 0; left_type[1] = h->slice_table[left_xy[1] ] == h->slice_num ? s->current_picture.mb_type[left_xy[1]] : 0; if(IS_INTRA(mb_type)){ h->topleft_samples_available= h->top_samples_available= h->left_samples_available= 0xFFFF; h->topright_samples_available= 0xEEEA; if(!IS_INTRA(top_type) && (top_type==0 || h->pps.constrained_intra_pred)){ h->topleft_samples_available= 0xB3FF; h->top_samples_available= 0x33FF; h->topright_samples_available= 0x26EA; } for(i=0; i<2; i++){ if(!IS_INTRA(left_type[i]) && (left_type[i]==0 || h->pps.constrained_intra_pred)){ h->topleft_samples_available&= 0xDF5F; h->left_samples_available&= 0x5F5F; } } if(!IS_INTRA(topleft_type) && (topleft_type==0 || h->pps.constrained_intra_pred)) h->topleft_samples_available&= 0x7FFF; if(!IS_INTRA(topright_type) && (topright_type==0 || h->pps.constrained_intra_pred)) h->topright_samples_available&= 0xFBFF; if(IS_INTRA4x4(mb_type)){ if(IS_INTRA4x4(top_type)){ h->intra4x4_pred_mode_cache[4+8*0]= h->intra4x4_pred_mode[top_xy][4]; h->intra4x4_pred_mode_cache[5+8*0]= h->intra4x4_pred_mode[top_xy][5]; h->intra4x4_pred_mode_cache[6+8*0]= h->intra4x4_pred_mode[top_xy][6]; h->intra4x4_pred_mode_cache[7+8*0]= h->intra4x4_pred_mode[top_xy][3]; }else{ int pred; if(IS_INTRA16x16(top_type) || (IS_INTER(top_type) && !h->pps.constrained_intra_pred)) pred= 2; else{ pred= -1; } h->intra4x4_pred_mode_cache[4+8*0]= h->intra4x4_pred_mode_cache[5+8*0]= h->intra4x4_pred_mode_cache[6+8*0]= h->intra4x4_pred_mode_cache[7+8*0]= pred; } for(i=0; i<2; i++){ if(IS_INTRA4x4(left_type[i])){ h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]= h->intra4x4_pred_mode[left_xy[i]][left_block[0+2*i]]; h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= h->intra4x4_pred_mode[left_xy[i]][left_block[1+2*i]]; }else{ int pred; if(IS_INTRA16x16(left_type[i]) || (IS_INTER(left_type[i]) && !h->pps.constrained_intra_pred)) pred= 2; else{ pred= -1; } h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]= h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= pred; } } } } /* 0 . T T. T T T T 1 L . .L . . . . 2 L . .L . . . . 3 . T TL . . . . 4 L . .L . . . . 5 L . .. . . . . */ //FIXME constraint_intra_pred & partitioning & nnz (lets hope this is just a typo in the spec) if(top_type){ h->non_zero_count_cache[4+8*0]= h->non_zero_count[top_xy][10]; h->non_zero_count_cache[5+8*0]= h->non_zero_count[top_xy][11]; h->non_zero_count_cache[6+8*0]= h->non_zero_count[top_xy][14]; h->non_zero_count_cache[7+8*0]= h->non_zero_count[top_xy][15]; h->non_zero_count_cache[1+8*0]= h->non_zero_count[top_xy][18]; h->non_zero_count_cache[2+8*0]= h->non_zero_count[top_xy][19]; h->non_zero_count_cache[1+8*3]= h->non_zero_count[top_xy][22]; h->non_zero_count_cache[2+8*3]= h->non_zero_count[top_xy][23]; }else{ h->non_zero_count_cache[4+8*0]= h->non_zero_count_cache[5+8*0]= h->non_zero_count_cache[6+8*0]= h->non_zero_count_cache[7+8*0]= h->non_zero_count_cache[1+8*0]= h->non_zero_count_cache[2+8*0]= h->non_zero_count_cache[1+8*3]= h->non_zero_count_cache[2+8*3]= 64; } if(left_type[0]){ h->non_zero_count_cache[3+8*1]= h->non_zero_count[left_xy[0]][5]; h->non_zero_count_cache[3+8*2]= h->non_zero_count[left_xy[0]][7]; h->non_zero_count_cache[0+8*1]= h->non_zero_count[left_xy[0]][17]; //FIXME left_block h->non_zero_count_cache[0+8*4]= h->non_zero_count[left_xy[0]][21]; }else{ h->non_zero_count_cache[3+8*1]= h->non_zero_count_cache[3+8*2]= h->non_zero_count_cache[0+8*1]= h->non_zero_count_cache[0+8*4]= 64; } if(left_type[1]){ h->non_zero_count_cache[3+8*3]= h->non_zero_count[left_xy[1]][13]; h->non_zero_count_cache[3+8*4]= h->non_zero_count[left_xy[1]][15]; h->non_zero_count_cache[0+8*2]= h->non_zero_count[left_xy[1]][19]; h->non_zero_count_cache[0+8*5]= h->non_zero_count[left_xy[1]][23]; }else{ h->non_zero_count_cache[3+8*3]= h->non_zero_count_cache[3+8*4]= h->non_zero_count_cache[0+8*2]= h->non_zero_count_cache[0+8*5]= 64; } #if 1 if(IS_INTER(mb_type)){ int list; for(list=0; list<2; list++){ if((!IS_8X8(mb_type)) && !USES_LIST(mb_type, list)){ /*if(!h->mv_cache_clean[list]){ memset(h->mv_cache [list], 0, 8*5*2*sizeof(int16_t)); //FIXME clean only input? clean at all? memset(h->ref_cache[list], PART_NOT_AVAILABLE, 8*5*sizeof(int8_t)); h->mv_cache_clean[list]= 1; }*/ continue; //FIXME direct mode ... } h->mv_cache_clean[list]= 0; if(IS_INTER(topleft_type)){ const int b_xy = h->mb2b_xy[topleft_xy] + 3 + 3*h->b_stride; const int b8_xy= h->mb2b8_xy[topleft_xy] + 1 + h->b8_stride; *(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy]; h->ref_cache[list][scan8[0] - 1 - 1*8]= s->current_picture.ref_index[list][b8_xy]; }else{ *(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= 0; h->ref_cache[list][scan8[0] - 1 - 1*8]= topleft_type ? LIST_NOT_USED : PART_NOT_AVAILABLE; } if(IS_INTER(top_type)){ const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride; const int b8_xy= h->mb2b8_xy[top_xy] + h->b8_stride; *(uint32_t*)h->mv_cache[list][scan8[0] + 0 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 0]; *(uint32_t*)h->mv_cache[list][scan8[0] + 1 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 1]; *(uint32_t*)h->mv_cache[list][scan8[0] + 2 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 2]; *(uint32_t*)h->mv_cache[list][scan8[0] + 3 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 3]; h->ref_cache[list][scan8[0] + 0 - 1*8]= h->ref_cache[list][scan8[0] + 1 - 1*8]= s->current_picture.ref_index[list][b8_xy + 0]; h->ref_cache[list][scan8[0] + 2 - 1*8]= h->ref_cache[list][scan8[0] + 3 - 1*8]= s->current_picture.ref_index[list][b8_xy + 1]; }else{ *(uint32_t*)h->mv_cache [list][scan8[0] + 0 - 1*8]= *(uint32_t*)h->mv_cache [list][scan8[0] + 1 - 1*8]= *(uint32_t*)h->mv_cache [list][scan8[0] + 2 - 1*8]= *(uint32_t*)h->mv_cache [list][scan8[0] + 3 - 1*8]= 0; *(uint32_t*)&h->ref_cache[list][scan8[0] + 0 - 1*8]= ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE)&0xFF)*0x01010101; } if(IS_INTER(topright_type)){ const int b_xy= h->mb2b_xy[topright_xy] + 3*h->b_stride; const int b8_xy= h->mb2b8_xy[topright_xy] + h->b8_stride; *(uint32_t*)h->mv_cache[list][scan8[0] + 4 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy]; h->ref_cache[list][scan8[0] + 4 - 1*8]= s->current_picture.ref_index[list][b8_xy]; }else{ *(uint32_t*)h->mv_cache [list][scan8[0] + 4 - 1*8]= 0; h->ref_cache[list][scan8[0] + 4 - 1*8]= topright_type ? LIST_NOT_USED : PART_NOT_AVAILABLE; } //FIXME unify cleanup or sth if(IS_INTER(left_type[0])){ const int b_xy= h->mb2b_xy[left_xy[0]] + 3; const int b8_xy= h->mb2b8_xy[left_xy[0]] + 1; *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 0*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[0]]; *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[1]]; h->ref_cache[list][scan8[0] - 1 + 0*8]= h->ref_cache[list][scan8[0] - 1 + 1*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[0]>>1)]; }else{ *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 0*8]= *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 1*8]= 0; h->ref_cache[list][scan8[0] - 1 + 0*8]= h->ref_cache[list][scan8[0] - 1 + 1*8]= left_type[0] ? LIST_NOT_USED : PART_NOT_AVAILABLE; } if(IS_INTER(left_type[1])){ const int b_xy= h->mb2b_xy[left_xy[1]] + 3; const int b8_xy= h->mb2b8_xy[left_xy[1]] + 1; *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 2*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[2]]; *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 3*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[3]]; h->ref_cache[list][scan8[0] - 1 + 2*8]= h->ref_cache[list][scan8[0] - 1 + 3*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[2]>>1)]; }else{ *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 2*8]= *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 3*8]= 0; h->ref_cache[list][scan8[0] - 1 + 2*8]= h->ref_cache[list][scan8[0] - 1 + 3*8]= left_type[0] ? LIST_NOT_USED : PART_NOT_AVAILABLE; } h->ref_cache[list][scan8[5 ]+1] = h->ref_cache[list][scan8[7 ]+1] = h->ref_cache[list][scan8[13]+1] = //FIXME remove past 3 (init somewher else) h->ref_cache[list][scan8[4 ]] = h->ref_cache[list][scan8[12]] = PART_NOT_AVAILABLE; *(uint32_t*)h->mv_cache [list][scan8[5 ]+1]= *(uint32_t*)h->mv_cache [list][scan8[7 ]+1]= *(uint32_t*)h->mv_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewher else) *(uint32_t*)h->mv_cache [list][scan8[4 ]]= *(uint32_t*)h->mv_cache [list][scan8[12]]= 0; } //FIXME } #endif }"} {"target": 1, "idx": 11357, "func": "static void nbd_read(void *opaque) { NBDClient *client = opaque; if (client->recv_coroutine) { qemu_coroutine_enter(client->recv_coroutine, NULL); } else { qemu_coroutine_enter(qemu_coroutine_create(nbd_trip), client); } }"} {"target": 1, "idx": 11373, "func": "int qemu_eventfd(int *fds) { #if defined(SYS_eventfd) int ret; ret = syscall(SYS_eventfd, 0); if (ret >= 0) { fds[0] = fds[1] = ret; return 0; } else if (!(ret == -1 && errno == ENOSYS)) return ret; #endif return pipe(fds); }"} {"target": 1, "idx": 11384, "func": "int64_t ga_get_fd_handle(GAState *s, Error **errp) { int64_t handle; g_assert(s->pstate_filepath); /* we blacklist commands and avoid operations that potentially require * writing to disk when we're in a frozen state. this includes opening * new files, so we should never get here in that situation */ g_assert(!ga_is_frozen(s)); handle = s->pstate.fd_counter++; if (s->pstate.fd_counter < 0) { s->pstate.fd_counter = 0; } if (!write_persistent_state(&s->pstate, s->pstate_filepath)) { error_setg(errp, \"failed to commit persistent state to disk\"); } return handle; }"} {"target": 0, "idx": 11388, "func": "static void opt_input_file(const char *filename) { AVFormatContext *ic; AVFormatParameters params, *ap = ¶ms; AVInputFormat *file_iformat = NULL; int err, i, ret, rfps, rfps_base; int64_t timestamp; if (last_asked_format) { if (!(file_iformat = av_find_input_format(last_asked_format))) { fprintf(stderr, \"Unknown input format: '%s'\\n\", last_asked_format); ffmpeg_exit(1); } last_asked_format = NULL; } if (!strcmp(filename, \"-\")) filename = \"pipe:\"; using_stdin |= !strncmp(filename, \"pipe:\", 5) || !strcmp(filename, \"/dev/stdin\"); /* get default parameters from command line */ ic = avformat_alloc_context(); if (!ic) { print_error(filename, AVERROR(ENOMEM)); ffmpeg_exit(1); } memset(ap, 0, sizeof(*ap)); ap->prealloced_context = 1; ap->sample_rate = audio_sample_rate; ap->channels = audio_channels; ap->time_base.den = frame_rate.num; ap->time_base.num = frame_rate.den; ap->width = frame_width; ap->height = frame_height; ap->pix_fmt = frame_pix_fmt; // ap->sample_fmt = audio_sample_fmt; //FIXME:not implemented in libavformat ap->channel = video_channel; ap->standard = video_standard; set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL); ic->video_codec_id = find_codec_or_die(video_codec_name , AVMEDIA_TYPE_VIDEO , 0, avcodec_opts[AVMEDIA_TYPE_VIDEO ]->strict_std_compliance); ic->audio_codec_id = find_codec_or_die(audio_codec_name , AVMEDIA_TYPE_AUDIO , 0, avcodec_opts[AVMEDIA_TYPE_AUDIO ]->strict_std_compliance); ic->subtitle_codec_id= find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 0, avcodec_opts[AVMEDIA_TYPE_SUBTITLE]->strict_std_compliance); ic->flags |= AVFMT_FLAG_NONBLOCK; if(pgmyuv_compatibility_hack) ic->video_codec_id= CODEC_ID_PGMYUV; /* open the input file with generic libav function */ err = av_open_input_file(&ic, filename, file_iformat, 0, ap); if (err < 0) { print_error(filename, err); ffmpeg_exit(1); } if(opt_programid) { int i, j; int found=0; for(i=0; inb_streams; i++){ ic->streams[i]->discard= AVDISCARD_ALL; } for(i=0; inb_programs; i++){ AVProgram *p= ic->programs[i]; if(p->id != opt_programid){ p->discard = AVDISCARD_ALL; }else{ found=1; for(j=0; jnb_stream_indexes; j++){ ic->streams[p->stream_index[j]]->discard= AVDISCARD_DEFAULT; } } } if(!found){ fprintf(stderr, \"Specified program id not found\\n\"); ffmpeg_exit(1); } opt_programid=0; } ic->loop_input = loop_input; /* If not enough info to get the stream parameters, we decode the first frames to get it. (used in mpeg case for example) */ ret = av_find_stream_info(ic); if (ret < 0 && verbose >= 0) { fprintf(stderr, \"%s: could not find codec parameters\\n\", filename); av_close_input_file(ic); ffmpeg_exit(1); } timestamp = start_time; /* add the stream start time */ if (ic->start_time != AV_NOPTS_VALUE) timestamp += ic->start_time; /* if seeking requested, we execute it */ if (start_time != 0) { ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD); if (ret < 0) { fprintf(stderr, \"%s: could not seek to position %0.3f\\n\", filename, (double)timestamp / AV_TIME_BASE); } /* reset seek info */ start_time = 0; } /* update the current parameters so that they match the one of the input stream */ for(i=0;inb_streams;i++) { AVStream *st = ic->streams[i]; AVCodecContext *dec = st->codec; avcodec_thread_init(dec, thread_count); input_codecs = grow_array(input_codecs, sizeof(*input_codecs), &nb_input_codecs, nb_input_codecs + 1); switch (dec->codec_type) { case AVMEDIA_TYPE_AUDIO: input_codecs[nb_input_codecs-1] = avcodec_find_decoder_by_name(audio_codec_name); set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_AUDIO], AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM, input_codecs[nb_input_codecs-1]); //fprintf(stderr, \"\\nInput Audio channels: %d\", dec->channels); channel_layout = dec->channel_layout; audio_channels = dec->channels; audio_sample_rate = dec->sample_rate; audio_sample_fmt = dec->sample_fmt; if(audio_disable) st->discard= AVDISCARD_ALL; /* Note that av_find_stream_info can add more streams, and we * currently have no chance of setting up lowres decoding * early enough for them. */ if (dec->lowres) audio_sample_rate >>= dec->lowres; break; case AVMEDIA_TYPE_VIDEO: input_codecs[nb_input_codecs-1] = avcodec_find_decoder_by_name(video_codec_name); set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_VIDEO], AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM, input_codecs[nb_input_codecs-1]); frame_height = dec->height; frame_width = dec->width; if(ic->streams[i]->sample_aspect_ratio.num) frame_aspect_ratio=av_q2d(ic->streams[i]->sample_aspect_ratio); else frame_aspect_ratio=av_q2d(dec->sample_aspect_ratio); frame_aspect_ratio *= (float) dec->width / dec->height; frame_pix_fmt = dec->pix_fmt; rfps = ic->streams[i]->r_frame_rate.num; rfps_base = ic->streams[i]->r_frame_rate.den; if (dec->lowres) { dec->flags |= CODEC_FLAG_EMU_EDGE; frame_height >>= dec->lowres; frame_width >>= dec->lowres; } if(me_threshold) dec->debug |= FF_DEBUG_MV; if (dec->time_base.den != rfps*dec->ticks_per_frame || dec->time_base.num != rfps_base) { if (verbose >= 0) fprintf(stderr,\"\\nSeems stream %d codec frame rate differs from container frame rate: %2.2f (%d/%d) -> %2.2f (%d/%d)\\n\", i, (float)dec->time_base.den / dec->time_base.num, dec->time_base.den, dec->time_base.num, (float)rfps / rfps_base, rfps, rfps_base); } /* update the current frame rate to match the stream frame rate */ frame_rate.num = rfps; frame_rate.den = rfps_base; if(video_disable) st->discard= AVDISCARD_ALL; else if(video_discard) st->discard= video_discard; break; case AVMEDIA_TYPE_DATA: break; case AVMEDIA_TYPE_SUBTITLE: input_codecs[nb_input_codecs-1] = avcodec_find_decoder_by_name(subtitle_codec_name); if(subtitle_disable) st->discard = AVDISCARD_ALL; break; case AVMEDIA_TYPE_ATTACHMENT: case AVMEDIA_TYPE_UNKNOWN: break; default: abort(); } } input_files[nb_input_files] = ic; input_files_ts_offset[nb_input_files] = input_ts_offset - (copy_ts ? 0 : timestamp); /* dump the file content */ if (verbose >= 0) dump_format(ic, nb_input_files, filename, 0); nb_input_files++; video_channel = 0; av_freep(&video_codec_name); av_freep(&audio_codec_name); av_freep(&subtitle_codec_name); }"} {"target": 0, "idx": 11402, "func": "static void gen_load_fpr32h(TCGv_i32 t, int reg) { TCGv_i64 t64 = tcg_temp_new_i64(); tcg_gen_shri_i64(t64, fpu_f64[reg], 32); tcg_gen_trunc_i64_i32(t, t64); tcg_temp_free_i64(t64); }"} {"target": 0, "idx": 11414, "func": "void block_job_completed(BlockJob *job, int ret) { BlockDriverState *bs = job->bs; assert(bs->job == job); job->cb(job->opaque, ret); bs->job = NULL; g_free(job); bdrv_set_in_use(bs, 0); }"} {"target": 0, "idx": 11446, "func": "static int alloc_frame_buffer(MpegEncContext *s, Picture *pic) { int r; if (s->avctx->hwaccel) { assert(!pic->hwaccel_data_private); if (s->avctx->hwaccel->priv_data_size) { pic->hwaccel_data_private = av_malloc(s->avctx->hwaccel->priv_data_size); if (!pic->hwaccel_data_private) { av_log(s->avctx, AV_LOG_ERROR, \"alloc_frame_buffer() failed (hwaccel private data allocation)\\n\"); return -1; } } } r = s->avctx->get_buffer(s->avctx, (AVFrame*)pic); if (r<0 || !pic->age || !pic->type || !pic->data[0]) { av_log(s->avctx, AV_LOG_ERROR, \"get_buffer() failed (%d %d %d %p)\\n\", r, pic->age, pic->type, pic->data[0]); av_freep(&pic->hwaccel_data_private); return -1; } if (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])) { av_log(s->avctx, AV_LOG_ERROR, \"get_buffer() failed (stride changed)\\n\"); free_frame_buffer(s, pic); return -1; } if (pic->linesize[1] != pic->linesize[2]) { av_log(s->avctx, AV_LOG_ERROR, \"get_buffer() failed (uv stride mismatch)\\n\"); free_frame_buffer(s, pic); return -1; } return 0; }"} {"target": 0, "idx": 11448, "func": "static int asf_parse_packet(AVFormatContext *s, AVIOContext *pb, AVPacket *pkt) { ASFContext *asf = s->priv_data; ASFStream *asf_st = 0; for (;;) { int ret; if (avio_feof(pb)) return AVERROR_EOF; if (asf->packet_size_left < FRAME_HEADER_SIZE || asf->packet_segments < 1 && asf->packet_time_start == 0) { int ret = asf->packet_size_left + asf->packet_padsize; assert(ret >= 0); /* fail safe */ avio_skip(pb, ret); asf->packet_pos = avio_tell(pb); if (asf->data_object_size != (uint64_t)-1 && (asf->packet_pos - asf->data_object_offset >= asf->data_object_size)) return AVERROR_EOF; /* Do not exceed the size of the data object */ return 1; } if (asf->packet_time_start == 0) { if (asf_read_frame_header(s, pb) < 0) { asf->packet_time_start = asf->packet_segments = 0; continue; } if (asf->stream_index < 0 || s->streams[asf->stream_index]->discard >= AVDISCARD_ALL || (!asf->packet_key_frame && (s->streams[asf->stream_index]->discard >= AVDISCARD_NONKEY || asf->streams[s->streams[asf->stream_index]->id].skip_to_key))) { asf->packet_time_start = 0; /* unhandled packet (should not happen) */ avio_skip(pb, asf->packet_frag_size); asf->packet_size_left -= asf->packet_frag_size; if (asf->stream_index < 0) av_log(s, AV_LOG_ERROR, \"ff asf skip %d (unknown stream)\\n\", asf->packet_frag_size); continue; } asf->asf_st = &asf->streams[s->streams[asf->stream_index]->id]; asf->asf_st->skip_to_key = 0; } asf_st = asf->asf_st; av_assert0(asf_st); if (!asf_st->frag_offset && asf->packet_frag_offset) { av_dlog(s, \"skipping asf data pkt with fragment offset for \" \"stream:%d, expected:%d but got %d from pkt)\\n\", asf->stream_index, asf_st->frag_offset, asf->packet_frag_offset); avio_skip(pb, asf->packet_frag_size); asf->packet_size_left -= asf->packet_frag_size; continue; } if (asf->packet_replic_size == 1) { // frag_offset is here used as the beginning timestamp asf->packet_frag_timestamp = asf->packet_time_start; asf->packet_time_start += asf->packet_time_delta; asf_st->packet_obj_size = asf->packet_frag_size = avio_r8(pb); asf->packet_size_left--; asf->packet_multi_size--; if (asf->packet_multi_size < asf_st->packet_obj_size) { asf->packet_time_start = 0; avio_skip(pb, asf->packet_multi_size); asf->packet_size_left -= asf->packet_multi_size; continue; } asf->packet_multi_size -= asf_st->packet_obj_size; } if (asf_st->pkt.size != asf_st->packet_obj_size || // FIXME is this condition sufficient? asf_st->frag_offset + asf->packet_frag_size > asf_st->pkt.size) { if (asf_st->pkt.data) { av_log(s, AV_LOG_INFO, \"freeing incomplete packet size %d, new %d\\n\", asf_st->pkt.size, asf_st->packet_obj_size); asf_st->frag_offset = 0; av_free_packet(&asf_st->pkt); } /* new packet */ av_new_packet(&asf_st->pkt, asf_st->packet_obj_size); asf_st->seq = asf->packet_seq; if (asf->ts_is_pts) { asf_st->pkt.pts = asf->packet_frag_timestamp - asf->hdr.preroll; } else asf_st->pkt.dts = asf->packet_frag_timestamp - asf->hdr.preroll; asf_st->pkt.stream_index = asf->stream_index; asf_st->pkt.pos = asf_st->packet_pos = asf->packet_pos; asf_st->pkt_clean = 0; if (asf_st->pkt.data && asf_st->palette_changed) { uint8_t *pal; pal = av_packet_new_side_data(&asf_st->pkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE); if (!pal) { av_log(s, AV_LOG_ERROR, \"Cannot append palette to packet\\n\"); } else { memcpy(pal, asf_st->palette, AVPALETTE_SIZE); asf_st->palette_changed = 0; } } av_dlog(asf, \"new packet: stream:%d key:%d packet_key:%d audio:%d size:%d\\n\", asf->stream_index, asf->packet_key_frame, asf_st->pkt.flags & AV_PKT_FLAG_KEY, s->streams[asf->stream_index]->codec->codec_type == AVMEDIA_TYPE_AUDIO, asf_st->packet_obj_size); if (s->streams[asf->stream_index]->codec->codec_type == AVMEDIA_TYPE_AUDIO) asf->packet_key_frame = 1; if (asf->packet_key_frame) asf_st->pkt.flags |= AV_PKT_FLAG_KEY; } /* read data */ av_dlog(asf, \"READ PACKET s:%d os:%d o:%d,%d l:%d DATA:%p\\n\", s->packet_size, asf_st->pkt.size, asf->packet_frag_offset, asf_st->frag_offset, asf->packet_frag_size, asf_st->pkt.data); asf->packet_size_left -= asf->packet_frag_size; if (asf->packet_size_left < 0) continue; if (asf->packet_frag_offset >= asf_st->pkt.size || asf->packet_frag_size > asf_st->pkt.size - asf->packet_frag_offset) { av_log(s, AV_LOG_ERROR, \"packet fragment position invalid %u,%u not in %u\\n\", asf->packet_frag_offset, asf->packet_frag_size, asf_st->pkt.size); continue; } if (asf->packet_frag_offset != asf_st->frag_offset && !asf_st->pkt_clean) { memset(asf_st->pkt.data + asf_st->frag_offset, 0, asf_st->pkt.size - asf_st->frag_offset); asf_st->pkt_clean = 1; } ret = avio_read(pb, asf_st->pkt.data + asf->packet_frag_offset, asf->packet_frag_size); if (ret != asf->packet_frag_size) { if (ret < 0 || asf->packet_frag_offset + ret == 0) return ret < 0 ? ret : AVERROR_EOF; if (asf_st->ds_span > 1) { // scrambling, we can either drop it completely or fill the remainder // TODO: should we fill the whole packet instead of just the current // fragment? memset(asf_st->pkt.data + asf->packet_frag_offset + ret, 0, asf->packet_frag_size - ret); ret = asf->packet_frag_size; } else { // no scrambling, so we can return partial packets av_shrink_packet(&asf_st->pkt, asf->packet_frag_offset + ret); } } if (s->key && s->keylen == 20) ff_asfcrypt_dec(s->key, asf_st->pkt.data + asf->packet_frag_offset, ret); asf_st->frag_offset += ret; /* test if whole packet is read */ if (asf_st->frag_offset == asf_st->pkt.size) { // workaround for macroshit radio DVR-MS files if (s->streams[asf->stream_index]->codec->codec_id == AV_CODEC_ID_MPEG2VIDEO && asf_st->pkt.size > 100) { int i; for (i = 0; i < asf_st->pkt.size && !asf_st->pkt.data[i]; i++) ; if (i == asf_st->pkt.size) { av_log(s, AV_LOG_DEBUG, \"discarding ms fart\\n\"); asf_st->frag_offset = 0; av_free_packet(&asf_st->pkt); continue; } } /* return packet */ if (asf_st->ds_span > 1) { if (asf_st->pkt.size != asf_st->ds_packet_size * asf_st->ds_span) { av_log(s, AV_LOG_ERROR, \"pkt.size != ds_packet_size * ds_span (%d %d %d)\\n\", asf_st->pkt.size, asf_st->ds_packet_size, asf_st->ds_span); } else { /* packet descrambling */ AVBufferRef *buf = av_buffer_alloc(asf_st->pkt.size + FF_INPUT_BUFFER_PADDING_SIZE); if (buf) { uint8_t *newdata = buf->data; int offset = 0; memset(newdata + asf_st->pkt.size, 0, FF_INPUT_BUFFER_PADDING_SIZE); while (offset < asf_st->pkt.size) { int off = offset / asf_st->ds_chunk_size; int row = off / asf_st->ds_span; int col = off % asf_st->ds_span; int idx = row + col * asf_st->ds_packet_size / asf_st->ds_chunk_size; assert(offset + asf_st->ds_chunk_size <= asf_st->pkt.size); assert(idx + 1 <= asf_st->pkt.size / asf_st->ds_chunk_size); memcpy(newdata + offset, asf_st->pkt.data + idx * asf_st->ds_chunk_size, asf_st->ds_chunk_size); offset += asf_st->ds_chunk_size; } av_buffer_unref(&asf_st->pkt.buf); asf_st->pkt.buf = buf; asf_st->pkt.data = buf->data; } } } asf_st->frag_offset = 0; *pkt = asf_st->pkt; #if FF_API_DESTRUCT_PACKET FF_DISABLE_DEPRECATION_WARNINGS asf_st->pkt.destruct = NULL; FF_ENABLE_DEPRECATION_WARNINGS #endif asf_st->pkt.buf = 0; asf_st->pkt.size = 0; asf_st->pkt.data = 0; asf_st->pkt.side_data_elems = 0; asf_st->pkt.side_data = NULL; break; // packet completed } } return 0; }"} {"target": 1, "idx": 11450, "func": "static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread) { HEVCContext *s = avctxt->priv_data; int ctb_size = 1 << s->ps.sps->log2_ctb_size; int more_data = 1; int x_ctb = 0; int y_ctb = 0; int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[s->sh.slice_ctb_addr_rs]; if (!ctb_addr_ts && s->sh.dependent_slice_segment_flag) { av_log(s->avctx, AV_LOG_ERROR, \"Impossible initial tile.\\n\"); return AVERROR_INVALIDDATA; } if (s->sh.dependent_slice_segment_flag) { int prev_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts - 1]; if (s->tab_slice_address[prev_rs] != s->sh.slice_addr) { av_log(s->avctx, AV_LOG_ERROR, \"Previous slice segment missing\\n\"); return AVERROR_INVALIDDATA; } } while (more_data && ctb_addr_ts < s->ps.sps->ctb_size) { int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts]; x_ctb = (ctb_addr_rs % ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size; y_ctb = (ctb_addr_rs / ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size; hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts); ff_hevc_cabac_init(s, ctb_addr_ts); hls_sao_param(s, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size); s->deblock[ctb_addr_rs].beta_offset = s->sh.beta_offset; s->deblock[ctb_addr_rs].tc_offset = s->sh.tc_offset; s->filter_slice_edges[ctb_addr_rs] = s->sh.slice_loop_filter_across_slices_enabled_flag; more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0); if (more_data < 0) { s->tab_slice_address[ctb_addr_rs] = -1; return more_data; } ctb_addr_ts++; ff_hevc_save_states(s, ctb_addr_ts); ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size); } if (x_ctb + ctb_size >= s->ps.sps->width && y_ctb + ctb_size >= s->ps.sps->height) ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size); return ctb_addr_ts; }"} {"target": 1, "idx": 11451, "func": "static void reset_packet_state(AVFormatContext *s) { ASFContext *asf = s->priv_data; int i; asf->state = PARSE_PACKET_HEADER; asf->offset = 0; asf->return_subpayload = 0; asf->sub_left = 0; asf->sub_header_offset = 0; asf->packet_offset = asf->first_packet_offset; asf->pad_len = 0; asf->rep_data_len = 0; asf->dts_delta = 0; asf->mult_sub_len = 0; asf->nb_mult_left = 0; asf->nb_sub = 0; asf->prop_flags = 0; asf->sub_dts = 0; asf->dts = 0; for (i = 0; i < asf->nb_streams; i++) { ASFPacket *pkt = &asf->asf_st[i]->pkt; pkt->size_left = 0; pkt->data_size = 0; pkt->duration = 0; pkt->flags = 0; pkt->dts = 0; pkt->duration = 0; av_free_packet(&pkt->avpkt); av_init_packet(&pkt->avpkt); } }"} {"target": 1, "idx": 11457, "func": "static int nbd_parse_offset_hole_payload(NBDStructuredReplyChunk *chunk, uint8_t *payload, uint64_t orig_offset, QEMUIOVector *qiov, Error **errp) { uint64_t offset; uint32_t hole_size; if (chunk->length != sizeof(offset) + sizeof(hole_size)) { error_setg(errp, \"Protocol error: invalid payload for \" \"NBD_REPLY_TYPE_OFFSET_HOLE\"); return -EINVAL; } offset = payload_advance64(&payload); hole_size = payload_advance32(&payload); if (offset < orig_offset || hole_size > qiov->size || offset > orig_offset + qiov->size - hole_size) { error_setg(errp, \"Protocol error: server sent chunk exceeding requested\" \" region\"); return -EINVAL; } qemu_iovec_memset(qiov, offset - orig_offset, 0, hole_size); return 0; }"} {"target": 0, "idx": 11469, "func": "static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size, const uint8_t * buf, int buf_size) { AVSContext *h = avctx->priv_data; MpegEncContext *s = &h->s; int input_size; const uint8_t *buf_end; const uint8_t *buf_ptr; AVFrame *picture = data; uint32_t stc = -1; s->avctx = avctx; if (buf_size == 0) { if(!s->low_delay && h->DPB[0].data[0]) { *data_size = sizeof(AVPicture); *picture = *(AVFrame *) &h->DPB[0]; } return 0; } buf_ptr = buf; buf_end = buf + buf_size; for(;;) { buf_ptr = ff_find_start_code(buf_ptr,buf_end, &stc); if(stc & 0xFFFFFE00) return FFMAX(0, buf_ptr - buf - s->parse_context.last_index); input_size = (buf_end - buf_ptr)*8; switch(stc) { case CAVS_START_CODE: init_get_bits(&s->gb, buf_ptr, input_size); decode_seq_header(h); break; case PIC_I_START_CODE: if(!h->got_keyframe) { if(h->DPB[0].data[0]) avctx->release_buffer(avctx, (AVFrame *)&h->DPB[0]); if(h->DPB[1].data[0]) avctx->release_buffer(avctx, (AVFrame *)&h->DPB[1]); h->got_keyframe = 1; } case PIC_PB_START_CODE: *data_size = 0; if(!h->got_keyframe) break; init_get_bits(&s->gb, buf_ptr, input_size); h->stc = stc; if(decode_pic(h)) break; *data_size = sizeof(AVPicture); if(h->pic_type != FF_B_TYPE) { if(h->DPB[1].data[0]) { *picture = *(AVFrame *) &h->DPB[1]; } else { *data_size = 0; } } else *picture = *(AVFrame *) &h->picture; break; case EXT_START_CODE: //mpeg_decode_extension(avctx,buf_ptr, input_size); break; case USER_START_CODE: //mpeg_decode_user_data(avctx,buf_ptr, input_size); break; default: if (stc >= SLICE_MIN_START_CODE && stc <= SLICE_MAX_START_CODE) { init_get_bits(&s->gb, buf_ptr, input_size); decode_slice_header(h, &s->gb); } break; } } }"} {"target": 1, "idx": 11492, "func": "AVEvalExpr * ff_parse(const char *s, const char * const *const_name, double (**func1)(void *, double), const char **func1_name, double (**func2)(void *, double, double), const char **func2_name, const char **error){ Parser p; AVEvalExpr * e; char w[strlen(s) + 1], * wp = w; while (*s) if (!isspace(*s++)) *wp++ = s[-1]; *wp++ = 0; p.stack_index=100; p.s= w; p.const_name = const_name; p.func1 = func1; p.func1_name = func1_name; p.func2 = func2; p.func2_name = func2_name; p.error= error; e = parse_expr(&p); if (!verify_expr(e)) { ff_eval_free(e); return NULL; } return e; }"} {"target": 1, "idx": 11497, "func": "static int qemu_rdma_post_send_control(RDMAContext *rdma, uint8_t *buf, RDMAControlHeader *head) { int ret = 0; RDMAWorkRequestData *wr = &rdma->wr_data[RDMA_WRID_CONTROL]; struct ibv_send_wr *bad_wr; struct ibv_sge sge = { .addr = (uint64_t)(wr->control), .length = head->len + sizeof(RDMAControlHeader), .lkey = wr->control_mr->lkey, }; struct ibv_send_wr send_wr = { .wr_id = RDMA_WRID_SEND_CONTROL, .opcode = IBV_WR_SEND, .send_flags = IBV_SEND_SIGNALED, .sg_list = &sge, .num_sge = 1, }; DDDPRINTF(\"CONTROL: sending %s..\\n\", control_desc[head->type]); /* * We don't actually need to do a memcpy() in here if we used * the \"sge\" properly, but since we're only sending control messages * (not RAM in a performance-critical path), then its OK for now. * * The copy makes the RDMAControlHeader simpler to manipulate * for the time being. */ assert(head->len <= RDMA_CONTROL_MAX_BUFFER - sizeof(*head)); memcpy(wr->control, head, sizeof(RDMAControlHeader)); control_to_network((void *) wr->control); if (buf) { memcpy(wr->control + sizeof(RDMAControlHeader), buf, head->len); } ret = ibv_post_send(rdma->qp, &send_wr, &bad_wr); if (ret > 0) { fprintf(stderr, \"Failed to use post IB SEND for control!\\n\"); return -ret; } ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_SEND_CONTROL, NULL); if (ret < 0) { fprintf(stderr, \"rdma migration: send polling control error!\\n\"); } return ret; }"} {"target": 1, "idx": 11498, "func": "static void pci_ich9_ahci_realize(PCIDevice *dev, Error **errp) { struct AHCIPCIState *d; int sata_cap_offset; uint8_t *sata_cap; d = ICH_AHCI(dev); ahci_realize(&d->ahci, DEVICE(dev), pci_get_address_space(dev), 6); pci_config_set_prog_interface(dev->config, AHCI_PROGMODE_MAJOR_REV_1); dev->config[PCI_CACHE_LINE_SIZE] = 0x08; /* Cache line size */ dev->config[PCI_LATENCY_TIMER] = 0x00; /* Latency timer */ pci_config_set_interrupt_pin(dev->config, 1); /* XXX Software should program this register */ dev->config[0x90] = 1 << 6; /* Address Map Register - AHCI mode */ d->ahci.irq = pci_allocate_irq(dev); pci_register_bar(dev, ICH9_IDP_BAR, PCI_BASE_ADDRESS_SPACE_IO, &d->ahci.idp); pci_register_bar(dev, ICH9_MEM_BAR, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->ahci.mem); sata_cap_offset = pci_add_capability2(dev, PCI_CAP_ID_SATA, ICH9_SATA_CAP_OFFSET, SATA_CAP_SIZE, errp); if (sata_cap_offset < 0) { return; } sata_cap = dev->config + sata_cap_offset; pci_set_word(sata_cap + SATA_CAP_REV, 0x10); pci_set_long(sata_cap + SATA_CAP_BAR, (ICH9_IDP_BAR + 0x4) | (ICH9_IDP_INDEX_LOG2 << 4)); d->ahci.idp_offset = ICH9_IDP_INDEX; /* Although the AHCI 1.3 specification states that the first capability * should be PMCAP, the Intel ICH9 data sheet specifies that the ICH9 * AHCI device puts the MSI capability first, pointing to 0x80. */ msi_init(dev, ICH9_MSI_CAP_OFFSET, 1, true, false); }"} {"target": 1, "idx": 11504, "func": "static void parse_cmdline(const char *cmdline, int *pnb_args, char **args) { const char *p; int nb_args, ret; char buf[1024]; p = cmdline; nb_args = 0; for (;;) { while (qemu_isspace(*p)) { p++; } if (*p == '\\0') { break; } if (nb_args >= MAX_ARGS) { break; } ret = get_str(buf, sizeof(buf), &p); args[nb_args] = g_strdup(buf); nb_args++; if (ret < 0) { break; } } *pnb_args = nb_args; }"} {"target": 1, "idx": 11517, "func": "static void mpc8544ds_init(ram_addr_t ram_size, const char *boot_device, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { PCIBus *pci_bus; CPUState *env; uint64_t elf_entry; uint64_t elf_lowaddr; target_phys_addr_t entry=0; target_phys_addr_t loadaddr=UIMAGE_LOAD_BASE; target_long kernel_size=0; target_ulong dt_base=DTB_LOAD_BASE; target_ulong initrd_base=INITRD_LOAD_BASE; target_long initrd_size=0; void *fdt; int i=0; unsigned int pci_irq_nrs[4] = {1, 2, 3, 4}; qemu_irq *irqs, *mpic, *pci_irqs; SerialState * serial[2]; /* Setup CPU */ env = cpu_ppc_init(\"e500v2_v30\"); if (!env) { fprintf(stderr, \"Unable to initialize CPU!\\n\"); exit(1); } /* Fixup Memory size on a alignment boundary */ ram_size &= ~(RAM_SIZES_ALIGN - 1); /* Register Memory */ cpu_register_physical_memory(0, ram_size, qemu_ram_alloc(ram_size)); /* MPIC */ irqs = qemu_mallocz(sizeof(qemu_irq) * OPENPIC_OUTPUT_NB); irqs[OPENPIC_OUTPUT_INT] = ((qemu_irq *)env->irq_inputs)[PPCE500_INPUT_INT]; irqs[OPENPIC_OUTPUT_CINT] = ((qemu_irq *)env->irq_inputs)[PPCE500_INPUT_CINT]; mpic = mpic_init(MPC8544_MPIC_REGS_BASE, 1, &irqs, NULL); /* Serial */ if (serial_hds[0]) serial[0] = serial_mm_init(MPC8544_SERIAL0_REGS_BASE, 0, mpic[12+26], 399193, serial_hds[0], 1); if (serial_hds[1]) serial[0] = serial_mm_init(MPC8544_SERIAL1_REGS_BASE, 0, mpic[12+26], 399193, serial_hds[0], 1); /* PCI */ pci_irqs = qemu_malloc(sizeof(qemu_irq) * 4); pci_irqs[0] = mpic[pci_irq_nrs[0]]; pci_irqs[1] = mpic[pci_irq_nrs[1]]; pci_irqs[2] = mpic[pci_irq_nrs[2]]; pci_irqs[3] = mpic[pci_irq_nrs[3]]; pci_bus = ppce500_pci_init(pci_irqs, MPC8544_PCI_REGS_BASE); if (!pci_bus) printf(\"couldn't create PCI controller!\\n\"); isa_mmio_init(MPC8544_PCI_IO, MPC8544_PCI_IOLEN); if (pci_bus) { /* Register network interfaces. */ for (i = 0; i < nb_nics; i++) { pci_nic_init(&nd_table[i], \"virtio\", NULL); } } /* Load kernel. */ if (kernel_filename) { kernel_size = load_uimage(kernel_filename, &entry, &loadaddr, NULL); if (kernel_size < 0) { kernel_size = load_elf(kernel_filename, 0, &elf_entry, &elf_lowaddr, NULL, 1, ELF_MACHINE, 0); entry = elf_entry; loadaddr = elf_lowaddr; } /* XXX try again as binary */ if (kernel_size < 0) { fprintf(stderr, \"qemu: could not load kernel '%s'\\n\", kernel_filename); exit(1); } } /* Load initrd. */ if (initrd_filename) { initrd_size = load_image_targphys(initrd_filename, initrd_base, ram_size - initrd_base); if (initrd_size < 0) { fprintf(stderr, \"qemu: could not load initial ram disk '%s'\\n\", initrd_filename); exit(1); } } /* If we're loading a kernel directly, we must load the device tree too. */ if (kernel_filename) { fdt = mpc8544_load_device_tree(dt_base, ram_size, initrd_base, initrd_size, kernel_cmdline); if (fdt == NULL) { fprintf(stderr, \"couldn't load device tree\\n\"); exit(1); } /* Set initial guest state. */ env->gpr[1] = (16<<20) - 8; env->gpr[3] = dt_base; env->nip = entry; /* XXX we currently depend on KVM to create some initial TLB entries. */ } if (kvm_enabled()) kvmppc_init(); return; }"} {"target": 0, "idx": 11528, "func": "static void read_packets(WriterContext *w, AVFormatContext *fmt_ctx) { int i, ret = 0; int64_t cur_ts = fmt_ctx->start_time; if (read_intervals_nb == 0) { ReadInterval interval = (ReadInterval) { .has_start = 0, .has_end = 0 }; ret = read_interval_packets(w, fmt_ctx, &interval, &cur_ts); } else { for (i = 0; i < read_intervals_nb; i++) { ret = read_interval_packets(w, fmt_ctx, &read_intervals[i], &cur_ts); if (ret < 0) break; } } }"} {"target": 1, "idx": 11549, "func": "static void vga_update_memory_access(VGACommonState *s) { hwaddr base, offset, size; if (s->legacy_address_space == NULL) { return; } if (s->has_chain4_alias) { memory_region_del_subregion(s->legacy_address_space, &s->chain4_alias); object_unparent(OBJECT(&s->chain4_alias)); s->has_chain4_alias = false; s->plane_updated = 0xf; } if ((s->sr[VGA_SEQ_PLANE_WRITE] & VGA_SR02_ALL_PLANES) == VGA_SR02_ALL_PLANES && s->sr[VGA_SEQ_MEMORY_MODE] & VGA_SR04_CHN_4M) { offset = 0; switch ((s->gr[VGA_GFX_MISC] >> 2) & 3) { case 0: base = 0xa0000; size = 0x20000; break; case 1: base = 0xa0000; size = 0x10000; offset = s->bank_offset; break; case 2: base = 0xb0000; size = 0x8000; break; case 3: default: base = 0xb8000; size = 0x8000; break; } assert(offset + size <= s->vram_size); memory_region_init_alias(&s->chain4_alias, memory_region_owner(&s->vram), \"vga.chain4\", &s->vram, offset, size); memory_region_add_subregion_overlap(s->legacy_address_space, base, &s->chain4_alias, 2); s->has_chain4_alias = true; } }"} {"target": 0, "idx": 11570, "func": "int av_opt_set_from_string(void *ctx, const char *opts, const char *const *shorthand, const char *key_val_sep, const char *pairs_sep) { int ret, count = 0; const char *dummy_shorthand = NULL; char key_buf[68], *value; const char *key; if (!opts) return 0; if (!shorthand) shorthand = &dummy_shorthand; while (*opts) { if ((ret = get_key(&opts, key_val_sep, key_buf, sizeof(key_buf))) < 0) { if (*shorthand) { key = *(shorthand++); } else { av_log(ctx, AV_LOG_ERROR, \"No option name near '%s'\\n\", opts); return AVERROR(EINVAL); } } else { key = key_buf; while (*shorthand) /* discard all remaining shorthand */ shorthand++; } if (!(value = av_get_token(&opts, pairs_sep))) return AVERROR(ENOMEM); if (*opts && strchr(pairs_sep, *opts)) opts++; av_log(ctx, AV_LOG_DEBUG, \"Setting '%s' to value '%s'\\n\", key, value); if ((ret = av_opt_set(ctx, key, value, 0)) < 0) { if (ret == AVERROR_OPTION_NOT_FOUND) av_log(ctx, AV_LOG_ERROR, \"Option '%s' not found\\n\", key); av_free(value); return ret; } av_free(value); count++; } return count; }"} {"target": 0, "idx": 11595, "func": "void helper_sysexit(void) { int cpl; cpl = env->hflags & HF_CPL_MASK; if (env->sysenter_cs == 0 || cpl != 0) { raise_exception_err(EXCP0D_GPF, 0); } cpu_x86_set_cpl(env, 3); cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_W_MASK | DESC_A_MASK); ESP = ECX; EIP = EDX; #ifdef USE_KQEMU if (kqemu_is_ok(env)) { env->exception_index = -1; cpu_loop_exit(); } #endif }"} {"target": 0, "idx": 11597, "func": "static inline void gen_efsnabs(DisasContext *ctx) { if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_APU); return; } tcg_gen_ori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000); }"} {"target": 0, "idx": 11611, "func": "static int pte64_check(struct mmu_ctx_hash64 *ctx, target_ulong pte0, target_ulong pte1, int h, int rwx) { target_ulong mmask; int access, ret, pp; ret = -1; /* Check validity and table match */ if ((pte0 & HPTE64_V_VALID) && (h == !!(pte0 & HPTE64_V_SECONDARY))) { bool nx; /* Check vsid & api */ mmask = PTE64_CHECK_MASK; pp = (pte1 & HPTE64_R_PP) | ((pte1 & HPTE64_R_PP0) >> 61); /* No execute if either noexec or guarded bits set */ nx = (pte1 & HPTE64_R_N) || (pte1 & HPTE64_R_G); if (HPTE64_V_COMPARE(pte0, ctx->ptem)) { if (ctx->raddr != (hwaddr)-1ULL) { /* all matches should have equal RPN, WIMG & PP */ if ((ctx->raddr & mmask) != (pte1 & mmask)) { qemu_log(\"Bad RPN/WIMG/PP\\n\"); return -3; } } /* Compute access rights */ access = ppc_hash64_pp_check(ctx->key, pp, nx); /* Keep the matching PTE informations */ ctx->raddr = pte1; ctx->prot = access; ret = ppc_hash64_check_prot(ctx->prot, rwx); if (ret == 0) { /* Access granted */ LOG_MMU(\"PTE access granted !\\n\"); } else { /* Access right violation */ LOG_MMU(\"PTE access rejected\\n\"); } } } return ret; }"} {"target": 0, "idx": 11612, "func": "static uint64_t bmdma_addr_read(void *opaque, target_phys_addr_t addr, unsigned width) { BMDMAState *bm = opaque; uint32_t mask = (1ULL << (width * 8)) - 1; uint64_t data; data = (bm->addr >> (addr * 8)) & mask; #ifdef DEBUG_IDE printf(\"%s: 0x%08x\\n\", __func__, (unsigned)*data); #endif return data; }"} {"target": 0, "idx": 11613, "func": "static void qerror_abort(const QError *qerr, const char *fmt, ...) { va_list ap; fprintf(stderr, \"qerror: bad call in function '%s':\\n\", qerr->func); fprintf(stderr, \"qerror: -> \"); va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); fprintf(stderr, \"\\nqerror: call at %s:%d\\n\", qerr->file, qerr->linenr); abort(); }"} {"target": 0, "idx": 11616, "func": "static void gen_nabs(DisasContext *ctx) { int l1 = gen_new_label(); int l2 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_GT, cpu_gpr[rA(ctx->opcode)], 0, l1); tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); tcg_gen_br(l2); gen_set_label(l1); tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); gen_set_label(l2); if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); }"} {"target": 0, "idx": 11628, "func": "static void vtd_iotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id, hwaddr addr, uint8_t am) { VTDIOTLBPageInvInfo info; assert(am <= VTD_MAMV); info.domain_id = domain_id; info.gfn = addr >> VTD_PAGE_SHIFT_4K; info.mask = ~((1 << am) - 1); g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_page, &info); }"} {"target": 1, "idx": 11657, "func": "static void sh_serial_write(void *opaque, hwaddr offs, uint64_t val, unsigned size) { sh_serial_state *s = opaque; unsigned char ch; #ifdef DEBUG_SERIAL printf(\"sh_serial: write offs=0x%02x val=0x%02x\\n\", offs, val); #endif switch(offs) { case 0x00: /* SMR */ s->smr = val & ((s->feat & SH_SERIAL_FEAT_SCIF) ? 0x7b : 0xff); return; case 0x04: /* BRR */ s->brr = val; return; case 0x08: /* SCR */ /* TODO : For SH7751, SCIF mask should be 0xfb. */ s->scr = val & ((s->feat & SH_SERIAL_FEAT_SCIF) ? 0xfa : 0xff); if (!(val & (1 << 5))) s->flags |= SH_SERIAL_FLAG_TEND; if ((s->feat & SH_SERIAL_FEAT_SCIF) && s->txi) { qemu_set_irq(s->txi, val & (1 << 7)); } if (!(val & (1 << 6))) { qemu_set_irq(s->rxi, 0); } return; case 0x0c: /* FTDR / TDR */ if (s->chr) { ch = val; qemu_chr_fe_write(s->chr, &ch, 1); } s->dr = val; s->flags &= ~SH_SERIAL_FLAG_TDE; return; #if 0 case 0x14: /* FRDR / RDR */ ret = 0; break; #endif } if (s->feat & SH_SERIAL_FEAT_SCIF) { switch(offs) { case 0x10: /* FSR */ if (!(val & (1 << 6))) s->flags &= ~SH_SERIAL_FLAG_TEND; if (!(val & (1 << 5))) s->flags &= ~SH_SERIAL_FLAG_TDE; if (!(val & (1 << 4))) s->flags &= ~SH_SERIAL_FLAG_BRK; if (!(val & (1 << 1))) s->flags &= ~SH_SERIAL_FLAG_RDF; if (!(val & (1 << 0))) s->flags &= ~SH_SERIAL_FLAG_DR; if (!(val & (1 << 1)) || !(val & (1 << 0))) { if (s->rxi) { qemu_set_irq(s->rxi, 0); } } return; case 0x18: /* FCR */ s->fcr = val; switch ((val >> 6) & 3) { case 0: s->rtrg = 1; break; case 1: s->rtrg = 4; break; case 2: s->rtrg = 8; break; case 3: s->rtrg = 14; break; } if (val & (1 << 1)) { sh_serial_clear_fifo(s); s->sr &= ~(1 << 1); } return; case 0x20: /* SPTR */ s->sptr = val & 0xf3; return; case 0x24: /* LSR */ return; } } else { switch(offs) { #if 0 case 0x0c: ret = s->dr; break; case 0x10: ret = 0; break; #endif case 0x1c: s->sptr = val & 0x8f; return; } } fprintf(stderr, \"sh_serial: unsupported write to 0x%02\" HWADDR_PRIx \"\\n\", offs); abort(); }"} {"target": 0, "idx": 11683, "func": "void qmp_x_input_send_event(bool has_console, int64_t console, InputEventList *events, Error **errp) { InputEventList *e; QemuConsole *con; con = NULL; if (has_console) { con = qemu_console_lookup_by_index(console); if (!con) { error_setg(errp, \"console %\" PRId64 \" not found\", console); return; } } if (!runstate_is_running() && !runstate_check(RUN_STATE_SUSPENDED)) { error_setg(errp, \"VM not running\"); return; } for (e = events; e != NULL; e = e->next) { InputEvent *event = e->value; if (!qemu_input_find_handler(1 << event->kind, con)) { error_setg(errp, \"Input handler not found for \" \"event type %s\", InputEventKind_lookup[event->kind]); return; } } for (e = events; e != NULL; e = e->next) { InputEvent *event = e->value; qemu_input_event_send(con, event); } qemu_input_event_sync(); }"} {"target": 0, "idx": 11687, "func": "static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf) { uint8_t event_code, media_status; media_status = 0; if (s->tray_open) { media_status = MS_TRAY_OPEN; } else if (bdrv_is_inserted(s->qdev.conf.bs)) { media_status = MS_MEDIA_PRESENT; } /* Event notification descriptor */ event_code = MEC_NO_CHANGE; if (media_status != MS_TRAY_OPEN) { if (s->media_event) { event_code = MEC_NEW_MEDIA; s->media_event = false; } else if (s->eject_request) { event_code = MEC_EJECT_REQUESTED; s->eject_request = false; } } outbuf[0] = event_code; outbuf[1] = media_status; /* These fields are reserved, just clear them. */ outbuf[2] = 0; outbuf[3] = 0; return 4; }"} {"target": 0, "idx": 11688, "func": "static av_cold int targa_encode_init(AVCodecContext *avctx) { avctx->coded_frame = av_frame_alloc(); if (!avctx->coded_frame) return AVERROR(ENOMEM); avctx->coded_frame->key_frame = 1; avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; return 0; }"} {"target": 0, "idx": 11691, "func": "void qapi_dealloc_visitor_cleanup(QapiDeallocVisitor *v) { g_free(v); }"} {"target": 0, "idx": 11692, "func": "static void imx25_pdk_init(MachineState *machine) { IMX25PDK *s = g_new0(IMX25PDK, 1); unsigned int ram_size; unsigned int alias_offset; int i; object_initialize(&s->soc, sizeof(s->soc), TYPE_FSL_IMX25); object_property_add_child(OBJECT(machine), \"soc\", OBJECT(&s->soc), &error_abort); object_property_set_bool(OBJECT(&s->soc), true, \"realized\", &error_fatal); /* We need to initialize our memory */ if (machine->ram_size > (FSL_IMX25_SDRAM0_SIZE + FSL_IMX25_SDRAM1_SIZE)) { error_report(\"WARNING: RAM size \" RAM_ADDR_FMT \" above max supported, \" \"reduced to %x\", machine->ram_size, FSL_IMX25_SDRAM0_SIZE + FSL_IMX25_SDRAM1_SIZE); machine->ram_size = FSL_IMX25_SDRAM0_SIZE + FSL_IMX25_SDRAM1_SIZE; } memory_region_allocate_system_memory(&s->ram, NULL, \"imx25.ram\", machine->ram_size); memory_region_add_subregion(get_system_memory(), FSL_IMX25_SDRAM0_ADDR, &s->ram); /* initialize the alias memory if any */ for (i = 0, ram_size = machine->ram_size, alias_offset = 0; (i < 2) && ram_size; i++) { unsigned int size; static const struct { hwaddr addr; unsigned int size; } ram[2] = { { FSL_IMX25_SDRAM0_ADDR, FSL_IMX25_SDRAM0_SIZE }, { FSL_IMX25_SDRAM1_ADDR, FSL_IMX25_SDRAM1_SIZE }, }; size = MIN(ram_size, ram[i].size); ram_size -= size; if (size < ram[i].size) { memory_region_init_alias(&s->ram_alias, NULL, \"ram.alias\", &s->ram, alias_offset, ram[i].size - size); memory_region_add_subregion(get_system_memory(), ram[i].addr + size, &s->ram_alias); } alias_offset += ram[i].size; } imx25_pdk_binfo.ram_size = machine->ram_size; imx25_pdk_binfo.kernel_filename = machine->kernel_filename; imx25_pdk_binfo.kernel_cmdline = machine->kernel_cmdline; imx25_pdk_binfo.initrd_filename = machine->initrd_filename; imx25_pdk_binfo.loader_start = FSL_IMX25_SDRAM0_ADDR; imx25_pdk_binfo.board_id = 1771, imx25_pdk_binfo.nb_cpus = 1; /* * We test explicitly for qtest here as it is not done (yet?) in * arm_load_kernel(). Without this the \"make check\" command would * fail. */ if (!qtest_enabled()) { arm_load_kernel(&s->soc.cpu, &imx25_pdk_binfo); } else { /* * This I2C device doesn't exist on the real board. * We add it here (only on qtest usage) to be able to do a bit * of simple qtest. See \"make check\" for details. */ i2c_create_slave((I2CBus *)qdev_get_child_bus(DEVICE(&s->soc.i2c[0]), \"i2c-bus.0\"), \"ds1338\", 0x68); } }"} {"target": 0, "idx": 11693, "func": "static void gd_ungrab_pointer(GtkDisplayState *s) { #if GTK_CHECK_VERSION(3, 0, 0) GdkDisplay *display = gtk_widget_get_display(s->drawing_area); GdkDeviceManager *mgr = gdk_display_get_device_manager(display); GList *devices = gdk_device_manager_list_devices(mgr, GDK_DEVICE_TYPE_MASTER); GList *tmp = devices; while (tmp) { GdkDevice *dev = tmp->data; if (gdk_device_get_source(dev) == GDK_SOURCE_MOUSE) { gdk_device_ungrab(dev, GDK_CURRENT_TIME); } tmp = tmp->next; } g_list_free(devices); #else gdk_pointer_ungrab(GDK_CURRENT_TIME); #endif }"} {"target": 1, "idx": 11696, "func": "void machine_register_compat_props(MachineState *machine) { MachineClass *mc = MACHINE_GET_CLASS(machine); int i; GlobalProperty *p; if (!mc->compat_props) { return; } for (i = 0; i < mc->compat_props->len; i++) { p = g_array_index(mc->compat_props, GlobalProperty *, i); qdev_prop_register_global(p); } }"} {"target": 1, "idx": 11723, "func": "static int y216_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { AVFrame *pic = data; const uint16_t *src = (uint16_t *)avpkt->data; uint16_t *y, *u, *v, aligned_width = FFALIGN(avctx->width, 4); int i, j, ret; if (avpkt->size < 4 * avctx->height * aligned_width) { av_log(avctx, AV_LOG_ERROR, \"Insufficient input data.\\n\"); return AVERROR(EINVAL); } if ((ret = ff_get_buffer(avctx, pic, 0)) < 0) return ret; pic->key_frame = 1; pic->pict_type = AV_PICTURE_TYPE_I; y = (uint16_t *)pic->data[0]; u = (uint16_t *)pic->data[1]; v = (uint16_t *)pic->data[2]; for (i = 0; i < avctx->height; i++) { for (j = 0; j < avctx->width >> 1; j++) { u[ j ] = src[4 * j ] << 2 | src[4 * j ] >> 14; y[2 * j ] = src[4 * j + 1] << 2 | src[4 * j + 1] >> 14; v[ j ] = src[4 * j + 2] << 2 | src[4 * j + 2] >> 14; y[2 * j + 1] = src[4 * j + 3] << 2 | src[4 * j + 3] >> 14; } y += pic->linesize[0] >> 1; u += pic->linesize[1] >> 1; v += pic->linesize[2] >> 1; src += aligned_width << 1; } *got_frame = 1; return avpkt->size; }"} {"target": 1, "idx": 11731, "func": "void do_addo (void) { T2 = T0; T0 += T1; if (likely(!((T2 ^ T1 ^ (-1)) & (T2 ^ T0) & (1 << 31)))) { xer_ov = 0; } else { xer_so = 1; xer_ov = 1; } }"} {"target": 1, "idx": 11732, "func": "static void nbd_coroutine_start(NbdClientSession *s, struct nbd_request *request) { /* Poor man semaphore. The free_sema is locked when no other request * can be accepted, and unlocked after receiving one reply. */ if (s->in_flight >= MAX_NBD_REQUESTS - 1) { qemu_co_mutex_lock(&s->free_sema); assert(s->in_flight < MAX_NBD_REQUESTS); } s->in_flight++; /* s->recv_coroutine[i] is set as soon as we get the send_lock. */ }"} {"target": 1, "idx": 11735, "func": "void vga_init(VGAState *s) { int vga_io_memory; register_savevm(\"vga\", 0, 2, vga_save, vga_load, s); register_ioport_write(0x3c0, 16, 1, vga_ioport_write, s); register_ioport_write(0x3b4, 2, 1, vga_ioport_write, s); register_ioport_write(0x3d4, 2, 1, vga_ioport_write, s); register_ioport_write(0x3ba, 1, 1, vga_ioport_write, s); register_ioport_write(0x3da, 1, 1, vga_ioport_write, s); register_ioport_read(0x3c0, 16, 1, vga_ioport_read, s); register_ioport_read(0x3b4, 2, 1, vga_ioport_read, s); register_ioport_read(0x3d4, 2, 1, vga_ioport_read, s); register_ioport_read(0x3ba, 1, 1, vga_ioport_read, s); register_ioport_read(0x3da, 1, 1, vga_ioport_read, s); s->bank_offset = 0; #ifdef CONFIG_BOCHS_VBE s->vbe_regs[VBE_DISPI_INDEX_ID] = VBE_DISPI_ID0; s->vbe_bank_mask = ((s->vram_size >> 16) - 1); #if defined (TARGET_I386) register_ioport_read(0x1ce, 1, 2, vbe_ioport_read_index, s); register_ioport_read(0x1cf, 1, 2, vbe_ioport_read_data, s); register_ioport_write(0x1ce, 1, 2, vbe_ioport_write_index, s); register_ioport_write(0x1cf, 1, 2, vbe_ioport_write_data, s); /* old Bochs IO ports */ register_ioport_read(0xff80, 1, 2, vbe_ioport_read_index, s); register_ioport_read(0xff81, 1, 2, vbe_ioport_read_data, s); register_ioport_write(0xff80, 1, 2, vbe_ioport_write_index, s); register_ioport_write(0xff81, 1, 2, vbe_ioport_write_data, s); #else register_ioport_read(0x1ce, 1, 2, vbe_ioport_read_index, s); register_ioport_read(0x1d0, 1, 2, vbe_ioport_read_data, s); register_ioport_write(0x1ce, 1, 2, vbe_ioport_write_index, s); register_ioport_write(0x1d0, 1, 2, vbe_ioport_write_data, s); #endif #endif /* CONFIG_BOCHS_VBE */ vga_io_memory = cpu_register_io_memory(0, vga_mem_read, vga_mem_write, s); cpu_register_physical_memory(isa_mem_base + 0x000a0000, 0x20000, vga_io_memory); }"} {"target": 1, "idx": 11745, "func": "void audio_decode_example(const char *outfilename, const char *filename) { AVCodec *codec; AVCodecContext *c= NULL; int out_size, size, len; FILE *f, *outfile; uint8_t *outbuf; uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE], *inbuf_ptr; printf(\"Audio decoding\\n\"); /* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */ memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE); /* find the mpeg audio decoder */ codec = avcodec_find_decoder(CODEC_ID_MP2); if (!codec) { fprintf(stderr, \"codec not found\\n\"); exit(1); } c= avcodec_alloc_context(); /* open it */ if (avcodec_open(c, codec) < 0) { fprintf(stderr, \"could not open codec\\n\"); exit(1); } outbuf = malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); f = fopen(filename, \"r\"); if (!f) { fprintf(stderr, \"could not open %s\\n\", filename); exit(1); } outfile = fopen(outfilename, \"w\"); if (!outfile) { free(c); exit(1); } /* decode until eof */ inbuf_ptr = inbuf; for(;;) { size = fread(inbuf, 1, INBUF_SIZE, f); if (size == 0) break; inbuf_ptr = inbuf; while (size > 0) { len = avcodec_decode_audio(c, (short *)outbuf, &out_size, inbuf_ptr, size); if (len < 0) { fprintf(stderr, \"Error while decoding\\n\"); exit(1); } if (out_size > 0) { /* if a frame has been decoded, output it */ fwrite(outbuf, 1, out_size, outfile); } size -= len; inbuf_ptr += len; } } fclose(outfile); fclose(f); free(outbuf); avcodec_close(c); free(c); }"} {"target": 0, "idx": 11825, "func": "static int nprobe(AVFormatContext *s, uint8_t *enc_header, const uint8_t *n_val) { OMAContext *oc = s->priv_data; uint32_t pos, taglen, datalen; struct AVDES av_des; if (!enc_header || !n_val) return -1; pos = OMA_ENC_HEADER_SIZE + oc->k_size; if (!memcmp(&enc_header[pos], \"EKB \", 4)) pos += 32; if (AV_RB32(&enc_header[pos]) != oc->rid) av_log(s, AV_LOG_DEBUG, \"Mismatching RID\\n\"); taglen = AV_RB32(&enc_header[pos+32]); datalen = AV_RB32(&enc_header[pos+36]) >> 4; pos += 44 + taglen; av_des_init(&av_des, n_val, 192, 1); while (datalen-- > 0) { av_des_crypt(&av_des, oc->r_val, &enc_header[pos], 2, NULL, 1); kset(s, oc->r_val, NULL, 16); if (!rprobe(s, enc_header, oc->r_val)) return 0; pos += 16; } return -1; }"} {"target": 0, "idx": 11841, "func": "static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd) { s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT; s->io_buffer_index = 0; s->io_buffer_size = 0; s->dma_cmd = dma_cmd; switch (dma_cmd) { case IDE_DMA_READ: block_acct_start(bdrv_get_stats(s->bs), &s->acct, s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ); break; case IDE_DMA_WRITE: block_acct_start(bdrv_get_stats(s->bs), &s->acct, s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE); break; default: break; } ide_start_dma(s, ide_dma_cb); }"} {"target": 0, "idx": 11847, "func": "void s390_feat_bitmap_to_ascii(const S390FeatBitmap bitmap, void *opaque, void (*fn)(const char *name, void *opaque)) { S390Feat feat; feat = find_first_bit(bitmap, S390_FEAT_MAX); while (feat < S390_FEAT_MAX) { fn(s390_feat_def(feat)->name, opaque); feat = find_next_bit(bitmap, S390_FEAT_MAX, feat + 1); }; }"} {"target": 1, "idx": 11863, "func": "static uint64_t hpet_get_ticks(void) { uint64_t ticks; ticks = ns_to_ticks(qemu_get_clock(vm_clock) + hpet_statep->hpet_offset); return ticks; }"} {"target": 0, "idx": 11871, "func": "static void compute_pkt_fields(AVFormatContext *s, AVStream *st, AVCodecParserContext *pc, AVPacket *pkt) { int num, den, presentation_delayed, delay, i; int64_t offset; if (s->flags & AVFMT_FLAG_NOFILLIN) return; if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE) pkt->dts= AV_NOPTS_VALUE; if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == FF_B_TYPE) //FIXME Set low_delay = 0 when has_b_frames = 1 st->codec->has_b_frames = 1; /* do we have a video B-frame ? */ delay= st->codec->has_b_frames; presentation_delayed = 0; /* XXX: need has_b_frame, but cannot get it if the codec is not initialized */ if (delay && pc && pc->pict_type != FF_B_TYPE) presentation_delayed = 1; if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63 /*&& pkt->dts-(1LL<pts_wrap_bits) < pkt->pts*/){ pkt->dts -= 1LL<pts_wrap_bits; } // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg) // we take the conservative approach and discard both // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly. if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){ av_log(s, AV_LOG_WARNING, \"invalid dts/pts combination\\n\"); pkt->dts= pkt->pts= AV_NOPTS_VALUE; } if (pkt->duration == 0) { compute_frame_duration(&num, &den, st, pc, pkt); if (den && num) { pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN); if(pkt->duration != 0 && s->packet_buffer) update_initial_durations(s, st, pkt); } } /* correct timestamps with byte offset if demuxers only have timestamps on packet boundaries */ if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){ /* this will estimate bitrate based on this frame's duration and size */ offset = av_rescale(pc->offset, pkt->duration, pkt->size); if(pkt->pts != AV_NOPTS_VALUE) pkt->pts += offset; if(pkt->dts != AV_NOPTS_VALUE) pkt->dts += offset; } if (pc && pc->dts_sync_point >= 0) { // we have synchronization info from the parser int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num; if (den > 0) { int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den; if (pkt->dts != AV_NOPTS_VALUE) { // got DTS from the stream, update reference timestamp st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den; pkt->pts = pkt->dts + pc->pts_dts_delta * num / den; } else if (st->reference_dts != AV_NOPTS_VALUE) { // compute DTS based on reference timestamp pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den; pkt->pts = pkt->dts + pc->pts_dts_delta * num / den; } if (pc->dts_sync_point > 0) st->reference_dts = pkt->dts; // new reference } } /* This may be redundant, but it should not hurt. */ if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts) presentation_delayed = 1; // av_log(NULL, AV_LOG_DEBUG, \"IN delayed:%d pts:%\"PRId64\", dts:%\"PRId64\" cur_dts:%\"PRId64\" st:%d pc:%p\\n\", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc); /* interpolate PTS and DTS if they are not present */ //We skip H264 currently because delay and has_b_frames are not reliably set if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){ if (presentation_delayed) { /* DTS = decompression timestamp */ /* PTS = presentation timestamp */ if (pkt->dts == AV_NOPTS_VALUE) pkt->dts = st->last_IP_pts; update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); if (pkt->dts == AV_NOPTS_VALUE) pkt->dts = st->cur_dts; /* this is tricky: the dts must be incremented by the duration of the frame we are displaying, i.e. the last I- or P-frame */ if (st->last_IP_duration == 0) st->last_IP_duration = pkt->duration; if(pkt->dts != AV_NOPTS_VALUE) st->cur_dts = pkt->dts + st->last_IP_duration; st->last_IP_duration = pkt->duration; st->last_IP_pts= pkt->pts; /* cannot compute PTS if not present (we can compute it only by knowing the future */ } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){ if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){ int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts); int64_t new_diff= FFABS(st->cur_dts - pkt->pts); if(old_diff < new_diff && old_diff < (pkt->duration>>3)){ pkt->pts += pkt->duration; // av_log(NULL, AV_LOG_DEBUG, \"id:%d old:%\"PRId64\" new:%\"PRId64\" dur:%d cur:%\"PRId64\" size:%d\\n\", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size); } } /* presentation is not delayed : PTS and DTS are the same */ if(pkt->pts == AV_NOPTS_VALUE) pkt->pts = pkt->dts; update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts); if(pkt->pts == AV_NOPTS_VALUE) pkt->pts = st->cur_dts; pkt->dts = pkt->pts; if(pkt->pts != AV_NOPTS_VALUE) st->cur_dts = pkt->pts + pkt->duration; } } if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){ st->pts_buffer[0]= pkt->pts; for(i=0; ipts_buffer[i] > st->pts_buffer[i+1]; i++) FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]); if(pkt->dts == AV_NOPTS_VALUE) pkt->dts= st->pts_buffer[0]; if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet } if(pkt->dts > st->cur_dts) st->cur_dts = pkt->dts; } // av_log(NULL, AV_LOG_ERROR, \"OUTdelayed:%d/%d pts:%\"PRId64\", dts:%\"PRId64\" cur_dts:%\"PRId64\"\\n\", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts); /* update flags */ if(is_intra_only(st->codec)) pkt->flags |= AV_PKT_FLAG_KEY; else if (pc) { pkt->flags = 0; /* keyframe computation */ if (pc->key_frame == 1) pkt->flags |= AV_PKT_FLAG_KEY; else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE) pkt->flags |= AV_PKT_FLAG_KEY; } if (pc) pkt->convergence_duration = pc->convergence_duration; }"} {"target": 0, "idx": 11882, "func": "static void avc_biwgt_8width_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, int32_t height, int32_t log2_denom, int32_t src_weight, int32_t dst_weight, int32_t offset_in) { uint8_t cnt; v16i8 src_wgt, dst_wgt, wgt; v16i8 src0, src1, src2, src3; v16i8 dst0, dst1, dst2, dst3; v8i16 temp0, temp1, temp2, temp3; v8i16 denom, offset, add_val; int32_t val = 128 * (src_weight + dst_weight); offset_in = ((offset_in + 1) | 1) << log2_denom; src_wgt = __msa_fill_b(src_weight); dst_wgt = __msa_fill_b(dst_weight); offset = __msa_fill_h(offset_in); denom = __msa_fill_h(log2_denom + 1); add_val = __msa_fill_h(val); offset += add_val; wgt = __msa_ilvev_b(dst_wgt, src_wgt); for (cnt = height / 4; cnt--;) { LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3); src += (4 * src_stride); LOAD_4VECS_SB(dst, dst_stride, dst0, dst1, dst2, dst3); XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128); XORI_B_4VECS_SB(dst0, dst1, dst2, dst3, dst0, dst1, dst2, dst3, 128); ILVR_B_4VECS_SH(src0, src1, src2, src3, dst0, dst1, dst2, dst3, temp0, temp1, temp2, temp3); temp0 = __msa_dpadd_s_h(offset, wgt, (v16i8) temp0); temp1 = __msa_dpadd_s_h(offset, wgt, (v16i8) temp1); temp2 = __msa_dpadd_s_h(offset, wgt, (v16i8) temp2); temp3 = __msa_dpadd_s_h(offset, wgt, (v16i8) temp3); SRA_4VECS(temp0, temp1, temp2, temp3, temp0, temp1, temp2, temp3, denom); temp0 = CLIP_UNSIGNED_CHAR_H(temp0); temp1 = CLIP_UNSIGNED_CHAR_H(temp1); temp2 = CLIP_UNSIGNED_CHAR_H(temp2); temp3 = CLIP_UNSIGNED_CHAR_H(temp3); PCKEV_B_STORE_8_BYTES_4(temp0, temp1, temp2, temp3, dst, dst_stride); dst += 4 * dst_stride; } }"} {"target": 0, "idx": 11883, "func": "static void add_input_streams(OptionsContext *o, AVFormatContext *ic) { int i; char *next, *codec_tag = NULL; for (i = 0; i < ic->nb_streams; i++) { AVStream *st = ic->streams[i]; AVCodecContext *dec = st->codec; InputStream *ist = av_mallocz(sizeof(*ist)); char *framerate = NULL; if (!ist) exit(1); GROW_ARRAY(input_streams, nb_input_streams); input_streams[nb_input_streams - 1] = ist; ist->st = st; ist->file_index = nb_input_files; ist->discard = 1; st->discard = AVDISCARD_ALL; ist->ts_scale = 1.0; MATCH_PER_STREAM_OPT(ts_scale, dbl, ist->ts_scale, ic, st); MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, ic, st); if (codec_tag) { uint32_t tag = strtol(codec_tag, &next, 0); if (*next) tag = AV_RL32(codec_tag); st->codec->codec_tag = tag; } ist->dec = choose_decoder(o, ic, st); ist->opts = filter_codec_opts(o->g->codec_opts, ist->st->codec->codec_id, ic, st, ist->dec); ist->reinit_filters = -1; MATCH_PER_STREAM_OPT(reinit_filters, i, ist->reinit_filters, ic, st); ist->filter_in_rescale_delta_last = AV_NOPTS_VALUE; switch (dec->codec_type) { case AVMEDIA_TYPE_VIDEO: if(!ist->dec) ist->dec = avcodec_find_decoder(dec->codec_id); if (dec->lowres) { dec->flags |= CODEC_FLAG_EMU_EDGE; } ist->resample_height = dec->height; ist->resample_width = dec->width; ist->resample_pix_fmt = dec->pix_fmt; MATCH_PER_STREAM_OPT(frame_rates, str, framerate, ic, st); if (framerate && av_parse_video_rate(&ist->framerate, framerate) < 0) { av_log(NULL, AV_LOG_ERROR, \"Error parsing framerate %s.\\n\", framerate); exit(1); } ist->top_field_first = -1; MATCH_PER_STREAM_OPT(top_field_first, i, ist->top_field_first, ic, st); break; case AVMEDIA_TYPE_AUDIO: ist->guess_layout_max = INT_MAX; MATCH_PER_STREAM_OPT(guess_layout_max, i, ist->guess_layout_max, ic, st); guess_input_channel_layout(ist); ist->resample_sample_fmt = dec->sample_fmt; ist->resample_sample_rate = dec->sample_rate; ist->resample_channels = dec->channels; ist->resample_channel_layout = dec->channel_layout; break; case AVMEDIA_TYPE_DATA: case AVMEDIA_TYPE_SUBTITLE: if(!ist->dec) ist->dec = avcodec_find_decoder(dec->codec_id); MATCH_PER_STREAM_OPT(fix_sub_duration, i, ist->fix_sub_duration, ic, st); break; case AVMEDIA_TYPE_ATTACHMENT: case AVMEDIA_TYPE_UNKNOWN: break; default: abort(); } } }"} {"target": 0, "idx": 11895, "func": "static av_cold int asink_init(AVFilterContext *ctx, void *opaque) { BufferSinkContext *buf = ctx->priv; AVABufferSinkParams *params = opaque; if (params && params->sample_fmts) { buf->sample_fmts = ff_copy_int_list(params->sample_fmts); if (!buf->sample_fmts) return AVERROR(ENOMEM); } if (params && params->sample_rates) { buf->sample_rates = ff_copy_int_list(params->sample_rates); if (!buf->sample_rates) return AVERROR(ENOMEM); } if (params && (params->channel_layouts || params->channel_counts)) { if (params->all_channel_counts) { av_log(ctx, AV_LOG_ERROR, \"Conflicting all_channel_counts and list in parameters\\n\"); return AVERROR(EINVAL); } buf->channel_layouts = concat_channels_lists(params->channel_layouts, params->channel_counts); if (!buf->channel_layouts) return AVERROR(ENOMEM); } if (params) buf->all_channel_counts = params->all_channel_counts; return common_init(ctx); }"} {"target": 0, "idx": 11899, "func": "FWCfgState *pc_memory_init(PCMachineState *pcms, MemoryRegion *system_memory, MemoryRegion *rom_memory, MemoryRegion **ram_memory, PcGuestInfo *guest_info) { int linux_boot, i; MemoryRegion *ram, *option_rom_mr; MemoryRegion *ram_below_4g, *ram_above_4g; FWCfgState *fw_cfg; MachineState *machine = MACHINE(pcms); assert(machine->ram_size == pcms->below_4g_mem_size + pcms->above_4g_mem_size); linux_boot = (machine->kernel_filename != NULL); /* Allocate RAM. We allocate it as a single memory region and use * aliases to address portions of it, mostly for backwards compatibility * with older qemus that used qemu_ram_alloc(). */ ram = g_malloc(sizeof(*ram)); memory_region_allocate_system_memory(ram, NULL, \"pc.ram\", machine->ram_size); *ram_memory = ram; ram_below_4g = g_malloc(sizeof(*ram_below_4g)); memory_region_init_alias(ram_below_4g, NULL, \"ram-below-4g\", ram, 0, pcms->below_4g_mem_size); memory_region_add_subregion(system_memory, 0, ram_below_4g); e820_add_entry(0, pcms->below_4g_mem_size, E820_RAM); if (pcms->above_4g_mem_size > 0) { ram_above_4g = g_malloc(sizeof(*ram_above_4g)); memory_region_init_alias(ram_above_4g, NULL, \"ram-above-4g\", ram, pcms->below_4g_mem_size, pcms->above_4g_mem_size); memory_region_add_subregion(system_memory, 0x100000000ULL, ram_above_4g); e820_add_entry(0x100000000ULL, pcms->above_4g_mem_size, E820_RAM); } if (!guest_info->has_reserved_memory && (machine->ram_slots || (machine->maxram_size > machine->ram_size))) { MachineClass *mc = MACHINE_GET_CLASS(machine); error_report(\"\\\"-memory 'slots|maxmem'\\\" is not supported by: %s\", mc->name); exit(EXIT_FAILURE); } /* initialize hotplug memory address space */ if (guest_info->has_reserved_memory && (machine->ram_size < machine->maxram_size)) { ram_addr_t hotplug_mem_size = machine->maxram_size - machine->ram_size; if (machine->ram_slots > ACPI_MAX_RAM_SLOTS) { error_report(\"unsupported amount of memory slots: %\"PRIu64, machine->ram_slots); exit(EXIT_FAILURE); } if (QEMU_ALIGN_UP(machine->maxram_size, TARGET_PAGE_SIZE) != machine->maxram_size) { error_report(\"maximum memory size must by aligned to multiple of \" \"%d bytes\", TARGET_PAGE_SIZE); exit(EXIT_FAILURE); } pcms->hotplug_memory.base = ROUND_UP(0x100000000ULL + pcms->above_4g_mem_size, 1ULL << 30); if (pcms->enforce_aligned_dimm) { /* size hotplug region assuming 1G page max alignment per slot */ hotplug_mem_size += (1ULL << 30) * machine->ram_slots; } if ((pcms->hotplug_memory.base + hotplug_mem_size) < hotplug_mem_size) { error_report(\"unsupported amount of maximum memory: \" RAM_ADDR_FMT, machine->maxram_size); exit(EXIT_FAILURE); } memory_region_init(&pcms->hotplug_memory.mr, OBJECT(pcms), \"hotplug-memory\", hotplug_mem_size); memory_region_add_subregion(system_memory, pcms->hotplug_memory.base, &pcms->hotplug_memory.mr); } /* Initialize PC system firmware */ pc_system_firmware_init(rom_memory, guest_info->isapc_ram_fw); option_rom_mr = g_malloc(sizeof(*option_rom_mr)); memory_region_init_ram(option_rom_mr, NULL, \"pc.rom\", PC_ROM_SIZE, &error_abort); vmstate_register_ram_global(option_rom_mr); memory_region_add_subregion_overlap(rom_memory, PC_ROM_MIN_VGA, option_rom_mr, 1); fw_cfg = bochs_bios_init(); rom_set_fw(fw_cfg); if (guest_info->has_reserved_memory && pcms->hotplug_memory.base) { uint64_t *val = g_malloc(sizeof(*val)); uint64_t res_mem_end = pcms->hotplug_memory.base + memory_region_size(&pcms->hotplug_memory.mr); *val = cpu_to_le64(ROUND_UP(res_mem_end, 0x1ULL << 30)); fw_cfg_add_file(fw_cfg, \"etc/reserved-memory-end\", val, sizeof(*val)); } if (linux_boot) { load_linux(pcms, fw_cfg); } for (i = 0; i < nb_option_roms; i++) { rom_add_option(option_rom[i].name, option_rom[i].bootindex); } guest_info->fw_cfg = fw_cfg; return fw_cfg; }"} {"target": 0, "idx": 11912, "func": "static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *iov, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque, int is_write) { VectorTranslationState *s = qemu_mallocz(sizeof(*s)); BlockDriverAIOCB *aiocb = qemu_aio_get(bs, cb, opaque); s->this_aiocb = aiocb; s->iov = iov; s->bounce = qemu_memalign(512, nb_sectors * 512); s->is_write = is_write; if (is_write) { qemu_iovec_to_buffer(s->iov, s->bounce); s->aiocb = bdrv_aio_write(bs, sector_num, s->bounce, nb_sectors, bdrv_aio_rw_vector_cb, s); } else { s->aiocb = bdrv_aio_read(bs, sector_num, s->bounce, nb_sectors, bdrv_aio_rw_vector_cb, s); } return aiocb; }"} {"target": 0, "idx": 11914, "func": "static void pc_machine_set_max_ram_below_4g(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { PCMachineState *pcms = PC_MACHINE(obj); Error *error = NULL; uint64_t value; visit_type_size(v, name, &value, &error); if (error) { error_propagate(errp, error); return; } if (value > (1ULL << 32)) { error_setg(&error, \"Machine option 'max-ram-below-4g=%\"PRIu64 \"' expects size less than or equal to 4G\", value); error_propagate(errp, error); return; } if (value < (1ULL << 20)) { error_report(\"Warning: small max_ram_below_4g(%\"PRIu64 \") less than 1M. BIOS may not work..\", value); } pcms->max_ram_below_4g = value; }"} {"target": 1, "idx": 11927, "func": "static int rv30_decode_intra_types(RV34DecContext *r, GetBitContext *gb, int8_t *dst) { int i, j, k; for(i = 0; i < 4; i++, dst += r->intra_types_stride - 4){ for(j = 0; j < 4; j+= 2){ int code = svq3_get_ue_golomb(gb) << 1; if(code >= 81*2){ av_log(r->s.avctx, AV_LOG_ERROR, \"Incorrect intra prediction code\\n\"); return -1; } for(k = 0; k < 2; k++){ int A = dst[-r->intra_types_stride] + 1; int B = dst[-1] + 1; *dst++ = rv30_itype_from_context[A * 90 + B * 9 + rv30_itype_code[code + k]]; if(dst[-1] == 9){ av_log(r->s.avctx, AV_LOG_ERROR, \"Incorrect intra prediction mode\\n\"); return -1; } } } } return 0; }"} {"target": 1, "idx": 11930, "func": "static int stdio_get_buffer(void *opaque, uint8_t *buf, int64_t pos, int size) { QEMUFileStdio *s = opaque; FILE *fp = s->stdio_file; int bytes; do { clearerr(fp); bytes = fread(buf, 1, size, fp); } while ((bytes == 0) && ferror(fp) && (errno == EINTR)); return bytes; }"} {"target": 0, "idx": 11941, "func": "static av_cold int set_channel_info(AC3EncodeContext *s, int channels, int64_t *channel_layout) { int ch_layout; if (channels < 1 || channels > AC3_MAX_CHANNELS) return AVERROR(EINVAL); if ((uint64_t)*channel_layout > 0x7FF) return AVERROR(EINVAL); ch_layout = *channel_layout; if (!ch_layout) ch_layout = avcodec_guess_channel_layout(channels, CODEC_ID_AC3, NULL); if (av_get_channel_layout_nb_channels(ch_layout) != channels) return AVERROR(EINVAL); s->lfe_on = !!(ch_layout & AV_CH_LOW_FREQUENCY); s->channels = channels; s->fbw_channels = channels - s->lfe_on; s->lfe_channel = s->lfe_on ? s->fbw_channels : -1; if (s->lfe_on) ch_layout -= AV_CH_LOW_FREQUENCY; switch (ch_layout) { case AV_CH_LAYOUT_MONO: s->channel_mode = AC3_CHMODE_MONO; break; case AV_CH_LAYOUT_STEREO: s->channel_mode = AC3_CHMODE_STEREO; break; case AV_CH_LAYOUT_SURROUND: s->channel_mode = AC3_CHMODE_3F; break; case AV_CH_LAYOUT_2_1: s->channel_mode = AC3_CHMODE_2F1R; break; case AV_CH_LAYOUT_4POINT0: s->channel_mode = AC3_CHMODE_3F1R; break; case AV_CH_LAYOUT_QUAD: case AV_CH_LAYOUT_2_2: s->channel_mode = AC3_CHMODE_2F2R; break; case AV_CH_LAYOUT_5POINT0: case AV_CH_LAYOUT_5POINT0_BACK: s->channel_mode = AC3_CHMODE_3F2R; break; default: return AVERROR(EINVAL); } s->has_center = (s->channel_mode & 0x01) && s->channel_mode != AC3_CHMODE_MONO; s->has_surround = s->channel_mode & 0x04; s->channel_map = ff_ac3_enc_channel_map[s->channel_mode][s->lfe_on]; *channel_layout = ch_layout; if (s->lfe_on) *channel_layout |= AV_CH_LOW_FREQUENCY; return 0; }"} {"target": 0, "idx": 11945, "func": "static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index, Node *nodes, MemoryRegionSection *sections) { PhysPageEntry *p; int i; for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) { if (lp.ptr == PHYS_MAP_NODE_NIL) { return §ions[PHYS_SECTION_UNASSIGNED]; } p = nodes[lp.ptr]; lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)]; } return §ions[lp.ptr]; }"} {"target": 0, "idx": 11947, "func": "static int ram_save_block(QEMUFile *f, bool last_stage) { RAMBlock *block = last_seen_block; ram_addr_t offset = last_offset; int bytes_sent = -1; MemoryRegion *mr; ram_addr_t current_addr; if (!block) block = QTAILQ_FIRST(&ram_list.blocks); do { mr = block->mr; if (migration_bitmap_test_and_reset_dirty(mr, offset)) { uint8_t *p; int cont = (block == last_sent_block) ? RAM_SAVE_FLAG_CONTINUE : 0; p = memory_region_get_ram_ptr(mr) + offset; if (is_dup_page(p)) { acct_info.dup_pages++; save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_COMPRESS); qemu_put_byte(f, *p); bytes_sent = 1; } else if (migrate_use_xbzrle()) { current_addr = block->offset + offset; bytes_sent = save_xbzrle_page(f, p, current_addr, block, offset, cont, last_stage); if (!last_stage) { p = get_cached_data(XBZRLE.cache, current_addr); } } /* either we didn't send yet (we may have had XBZRLE overflow) */ if (bytes_sent == -1) { save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE); qemu_put_buffer(f, p, TARGET_PAGE_SIZE); bytes_sent = TARGET_PAGE_SIZE; acct_info.norm_pages++; } /* if page is unmodified, continue to the next */ if (bytes_sent != 0) { last_sent_block = block; break; } } offset += TARGET_PAGE_SIZE; if (offset >= block->length) { offset = 0; block = QTAILQ_NEXT(block, next); if (!block) block = QTAILQ_FIRST(&ram_list.blocks); } } while (block != last_seen_block || offset != last_offset); last_seen_block = block; last_offset = offset; return bytes_sent; }"} {"target": 0, "idx": 11955, "func": "static int os_host_main_loop_wait(int64_t timeout) { int ret; static int spin_counter; glib_pollfds_fill(&timeout); /* If the I/O thread is very busy or we are incorrectly busy waiting in * the I/O thread, this can lead to starvation of the BQL such that the * VCPU threads never run. To make sure we can detect the later case, * print a message to the screen. If we run into this condition, create * a fake timeout in order to give the VCPU threads a chance to run. */ if (!timeout && (spin_counter > MAX_MAIN_LOOP_SPIN)) { static bool notified; if (!notified && !qtest_driver()) { fprintf(stderr, \"main-loop: WARNING: I/O thread spun for %d iterations\\n\", MAX_MAIN_LOOP_SPIN); notified = true; } timeout = SCALE_MS; } if (timeout) { spin_counter = 0; qemu_mutex_unlock_iothread(); } else { spin_counter++; } ret = qemu_poll_ns((GPollFD *)gpollfds->data, gpollfds->len, timeout); if (timeout) { qemu_mutex_lock_iothread(); } glib_pollfds_poll(); return ret; }"} {"target": 0, "idx": 11963, "func": "static void qemu_kill_report(void) { if (!qtest_driver() && shutdown_signal != -1) { fprintf(stderr, \"qemu: terminating on signal %d\", shutdown_signal); if (shutdown_pid == 0) { /* This happens for eg ^C at the terminal, so it's worth * avoiding printing an odd message in that case. */ fputc('\\n', stderr); } else { fprintf(stderr, \" from pid \" FMT_pid \"\\n\", shutdown_pid); } shutdown_signal = -1; } }"} {"target": 0, "idx": 11965, "func": "static void term_bol(void) { term_cmd_buf_index = 0; }"} {"target": 0, "idx": 11968, "func": "static void imx_serial_write(void *opaque, target_phys_addr_t offset, uint64_t value, unsigned size) { IMXSerialState *s = (IMXSerialState *)opaque; unsigned char ch; DPRINTF(\"write(offset=%x, value = %x) to %s\\n\", offset >> 2, (unsigned int)value, s->chr ? s->chr->label : \"NODEV\"); switch (offset >> 2) { case 0x10: /* UTXD */ ch = value; if (s->ucr2 & UCR2_TXEN) { if (s->chr) { qemu_chr_fe_write(s->chr, &ch, 1); } s->usr1 &= ~USR1_TRDY; imx_update(s); s->usr1 |= USR1_TRDY; imx_update(s); } break; case 0x20: /* UCR1 */ s->ucr1 = value & 0xffff; DPRINTF(\"write(ucr1=%x)\\n\", (unsigned int)value); imx_update(s); break; case 0x21: /* UCR2 */ /* * Only a few bits in control register 2 are implemented as yet. * If it's intended to use a real serial device as a back-end, this * register will have to be implemented more fully. */ if (!(value & UCR2_SRST)) { imx_serial_reset(s); imx_update(s); value |= UCR2_SRST; } if (value & UCR2_RXEN) { if (!(s->ucr2 & UCR2_RXEN)) { qemu_chr_accept_input(s->chr); } } s->ucr2 = value & 0xffff; break; case 0x25: /* USR1 */ value &= USR1_AWAKE | USR1_AIRINT | USR1_DTRD | USR1_AGTIM | USR1_FRAMERR | USR1_ESCF | USR1_RTSD | USR1_PARTYER; s->usr1 &= ~value; break; case 0x26: /* USR2 */ /* * Writing 1 to some bits clears them; all other * values are ignored */ value &= USR2_ADET | USR2_DTRF | USR2_IDLE | USR2_ACST | USR2_RIDELT | USR2_IRINT | USR2_WAKE | USR2_DCDDELT | USR2_RTSF | USR2_BRCD | USR2_ORE; s->usr2 &= ~value; break; /* * Linux expects to see what it writes to these registers * We don't currently alter the baud rate */ case 0x29: /* UBIR */ s->ubrc = value & 0xffff; break; case 0x2a: /* UBMR */ s->ubmr = value & 0xffff; break; case 0x2c: /* One ms reg */ s->onems = value & 0xffff; break; case 0x24: /* FIFO control register */ s->ufcr = value & 0xffff; break; case 0x22: /* UCR3 */ s->ucr3 = value & 0xffff; break; case 0x2d: /* UTS1 */ case 0x23: /* UCR4 */ IPRINTF(\"Unimplemented Register %x written to\\n\", offset >> 2); /* TODO */ break; default: IPRINTF(\"imx_serial_write: Bad offset 0x%x\\n\", (int)offset); } }"} {"target": 0, "idx": 11981, "func": "static int cinepak_decode (CinepakContext *s) { uint8_t *eod = (s->data + s->size); int i, result, strip_size, frame_flags, num_strips; int y0 = 0; int encoded_buf_size; /* if true, Cinepak data is from a Sega FILM/CPK file */ int sega_film_data = 0; if (s->size < 10) return -1; frame_flags = s->data[0]; num_strips = BE_16 (&s->data[8]); encoded_buf_size = ((s->data[1] << 16) | BE_16 (&s->data[2])); if (encoded_buf_size != s->size) sega_film_data = 1; if (sega_film_data) s->data += 12; else s->data += 10; if (num_strips > MAX_STRIPS) num_strips = MAX_STRIPS; for (i=0; i < num_strips; i++) { if ((s->data + 12) > eod) return -1; s->strips[i].id = BE_16 (s->data); s->strips[i].y1 = y0; s->strips[i].x1 = 0; s->strips[i].y2 = y0 + BE_16 (&s->data[8]); s->strips[i].x2 = s->avctx->width; strip_size = BE_16 (&s->data[2]) - 12; s->data += 12; strip_size = ((s->data + strip_size) > eod) ? (eod - s->data) : strip_size; if ((i > 0) && !(frame_flags & 0x01)) { memcpy (s->strips[i].v4_codebook, s->strips[i-1].v4_codebook, sizeof(s->strips[i].v4_codebook)); memcpy (s->strips[i].v1_codebook, s->strips[i-1].v1_codebook, sizeof(s->strips[i].v1_codebook)); } result = cinepak_decode_strip (s, &s->strips[i], s->data, strip_size); if (result != 0) return result; s->data += strip_size; y0 = s->strips[i].y2; } return 0; }"} {"target": 1, "idx": 11984, "func": "int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res) { BDRVQcowState *s = bs->opaque; int64_t size; int nb_clusters, refcount1, refcount2, i; QCowSnapshot *sn; uint16_t *refcount_table; int ret; size = bdrv_getlength(bs->file); nb_clusters = size_to_clusters(s, size); refcount_table = qemu_mallocz(nb_clusters * sizeof(uint16_t)); /* header */ inc_refcounts(bs, res, refcount_table, nb_clusters, 0, s->cluster_size); /* current L1 table */ ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters, s->l1_table_offset, s->l1_size, 1); if (ret < 0) { return ret; } /* snapshots */ for(i = 0; i < s->nb_snapshots; i++) { sn = s->snapshots + i; ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters, sn->l1_table_offset, sn->l1_size, 0); if (ret < 0) { return ret; } } inc_refcounts(bs, res, refcount_table, nb_clusters, s->snapshots_offset, s->snapshots_size); /* refcount data */ inc_refcounts(bs, res, refcount_table, nb_clusters, s->refcount_table_offset, s->refcount_table_size * sizeof(uint64_t)); for(i = 0; i < s->refcount_table_size; i++) { uint64_t offset, cluster; offset = s->refcount_table[i]; cluster = offset >> s->cluster_bits; /* Refcount blocks are cluster aligned */ if (offset & (s->cluster_size - 1)) { fprintf(stderr, \"ERROR refcount block %d is not \" \"cluster aligned; refcount table entry corrupted\\n\", i); res->corruptions++; continue; } if (cluster >= nb_clusters) { fprintf(stderr, \"ERROR refcount block %d is outside image\\n\", i); res->corruptions++; continue; } if (offset != 0) { inc_refcounts(bs, res, refcount_table, nb_clusters, offset, s->cluster_size); if (refcount_table[cluster] != 1) { fprintf(stderr, \"ERROR refcount block %d refcount=%d\\n\", i, refcount_table[cluster]); res->corruptions++; } } } /* compare ref counts */ for(i = 0; i < nb_clusters; i++) { refcount1 = get_refcount(bs, i); if (refcount1 < 0) { fprintf(stderr, \"Can't get refcount for cluster %d: %s\\n\", i, strerror(-refcount1)); res->check_errors++; continue; } refcount2 = refcount_table[i]; if (refcount1 != refcount2) { fprintf(stderr, \"%s cluster %d refcount=%d reference=%d\\n\", refcount1 < refcount2 ? \"ERROR\" : \"Leaked\", i, refcount1, refcount2); if (refcount1 < refcount2) { res->corruptions++; } else { res->leaks++; } } } qemu_free(refcount_table); return 0; }"} {"target": 1, "idx": 12004, "func": "static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val) { TCGv tmp; tmp = new_tmp(); tcg_gen_movi_i32(tmp, val); return gen_set_psr(s, mask, spsr, tmp); }"} {"target": 0, "idx": 12006, "func": "int configure_filtergraph(FilterGraph *fg) { AVFilterInOut *inputs, *outputs, *cur; int ret, i, simple = !fg->graph_desc; const char *graph_desc = simple ? fg->outputs[0]->ost->avfilter : fg->graph_desc; avfilter_graph_free(&fg->graph); if (!(fg->graph = avfilter_graph_alloc())) return AVERROR(ENOMEM); if (simple) { OutputStream *ost = fg->outputs[0]->ost; char args[512]; AVDictionaryEntry *e = NULL; args[0] = 0; while ((e = av_dict_get(ost->sws_dict, \"\", e, AV_DICT_IGNORE_SUFFIX))) { av_strlcatf(args, sizeof(args), \"%s=%s:\", e->key, e->value); } if (strlen(args)) args[strlen(args)-1] = 0; fg->graph->scale_sws_opts = av_strdup(args); args[0] = 0; while ((e = av_dict_get(ost->swr_opts, \"\", e, AV_DICT_IGNORE_SUFFIX))) { av_strlcatf(args, sizeof(args), \"%s=%s:\", e->key, e->value); } if (strlen(args)) args[strlen(args)-1] = 0; av_opt_set(fg->graph, \"aresample_swr_opts\", args, 0); args[0] = '\\0'; while ((e = av_dict_get(fg->outputs[0]->ost->resample_opts, \"\", e, AV_DICT_IGNORE_SUFFIX))) { av_strlcatf(args, sizeof(args), \"%s=%s:\", e->key, e->value); } if (strlen(args)) args[strlen(args) - 1] = '\\0'; fg->graph->resample_lavr_opts = av_strdup(args); e = av_dict_get(ost->encoder_opts, \"threads\", NULL, 0); if (e) av_opt_set(fg->graph, \"threads\", e->value, 0); } if ((ret = avfilter_graph_parse2(fg->graph, graph_desc, &inputs, &outputs)) < 0) return ret; if (simple && (!inputs || inputs->next || !outputs || outputs->next)) { const char *num_inputs; const char *num_outputs; if (!outputs) { num_outputs = \"0\"; } else if (outputs->next) { num_outputs = \">1\"; } else { num_outputs = \"1\"; } if (!inputs) { num_inputs = \"0\"; } else if (inputs->next) { num_inputs = \">1\"; } else { num_inputs = \"1\"; } av_log(NULL, AV_LOG_ERROR, \"Simple filtergraph '%s' was expected \" \"to have exactly 1 input and 1 output.\" \" However, it had %s input(s) and %s output(s).\" \" Please adjust, or use a complex filtergraph (-filter_complex) instead.\\n\", graph_desc, num_inputs, num_outputs); return AVERROR(EINVAL); } for (cur = inputs, i = 0; cur; cur = cur->next, i++) if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0) { avfilter_inout_free(&inputs); avfilter_inout_free(&outputs); return ret; } avfilter_inout_free(&inputs); for (cur = outputs, i = 0; cur; cur = cur->next, i++) configure_output_filter(fg, fg->outputs[i], cur); avfilter_inout_free(&outputs); if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0) return ret; fg->reconfiguration = 1; for (i = 0; i < fg->nb_outputs; i++) { OutputStream *ost = fg->outputs[i]->ost; if (!ost->enc) { /* identical to the same check in ffmpeg.c, needed because complex filter graphs are initialized earlier */ av_log(NULL, AV_LOG_ERROR, \"Encoder (codec %s) not found for output stream #%d:%d\\n\", avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index); return AVERROR(EINVAL); } if (ost && ost->enc->type == AVMEDIA_TYPE_AUDIO && !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) av_buffersink_set_frame_size(ost->filter->filter, ost->enc_ctx->frame_size); } return 0; }"} {"target": 0, "idx": 12030, "func": "static void vhost_ccw_scsi_realize(VirtioCcwDevice *ccw_dev, Error **errp) { VHostSCSICcw *dev = VHOST_SCSI_CCW(ccw_dev); DeviceState *vdev = DEVICE(&dev->vdev); Error *err = NULL; qdev_set_parent_bus(vdev, BUS(&ccw_dev->bus)); object_property_set_bool(OBJECT(vdev), true, \"realized\", &err); if (err) { error_propagate(errp, err); } }"} {"target": 0, "idx": 12041, "func": "ssize_t nbd_send_request(int csock, struct nbd_request *request) { uint8_t buf[4 + 4 + 8 + 8 + 4]; cpu_to_be32w((uint32_t*)buf, NBD_REQUEST_MAGIC); cpu_to_be32w((uint32_t*)(buf + 4), request->type); cpu_to_be64w((uint64_t*)(buf + 8), request->handle); cpu_to_be64w((uint64_t*)(buf + 16), request->from); cpu_to_be32w((uint32_t*)(buf + 24), request->len); TRACE(\"Sending request to client: \" \"{ .from = %\" PRIu64\", .len = %u, .handle = %\" PRIu64\", .type=%i}\", request->from, request->len, request->handle, request->type); if (write_sync(csock, buf, sizeof(buf)) != sizeof(buf)) { LOG(\"writing to socket failed\"); errno = EINVAL; return -1; } return 0; }"} {"target": 0, "idx": 12049, "func": "static void dmg_close(BlockDriverState *bs) { BDRVDMGState *s = bs->opaque; close(s->fd); if(s->n_chunks>0) { free(s->types); free(s->offsets); free(s->lengths); free(s->sectors); free(s->sectorcounts); } free(s->compressed_chunk); free(s->uncompressed_chunk); inflateEnd(&s->zstream); }"} {"target": 0, "idx": 12053, "func": "static void lance_mem_write(void *opaque, target_phys_addr_t addr, uint64_t val, unsigned size) { SysBusPCNetState *d = opaque; trace_lance_mem_writew(addr, val & 0xffff); pcnet_ioport_writew(&d->state, addr, val & 0xffff); }"} {"target": 1, "idx": 12055, "func": "static int flv_write_packet(AVFormatContext *s, AVPacket *pkt) { AVIOContext *pb = s->pb; AVCodecContext *enc = s->streams[pkt->stream_index]->codec; FLVContext *flv = s->priv_data; FLVStreamContext *sc = s->streams[pkt->stream_index]->priv_data; unsigned ts; int size = pkt->size; uint8_t *data = NULL; int flags = 0, flags_size; if (enc->codec_id == AV_CODEC_ID_VP6F || enc->codec_id == AV_CODEC_ID_VP6A || enc->codec_id == AV_CODEC_ID_AAC) flags_size = 2; else if (enc->codec_id == AV_CODEC_ID_H264) flags_size = 5; else flags_size = 1; if (flv->delay == AV_NOPTS_VALUE) flv->delay = -pkt->dts; if (pkt->dts < -flv->delay) { av_log(s, AV_LOG_WARNING, \"Packets are not in the proper order with respect to DTS\\n\"); return AVERROR(EINVAL); } ts = pkt->dts + flv->delay; // add delay to force positive dts if (s->event_flags & AVSTREAM_EVENT_FLAG_METADATA_UPDATED) { write_metadata(s, ts); s->event_flags &= ~AVSTREAM_EVENT_FLAG_METADATA_UPDATED; } switch (enc->codec_type) { case AVMEDIA_TYPE_VIDEO: avio_w8(pb, FLV_TAG_TYPE_VIDEO); flags = enc->codec_tag; if (flags == 0) { av_log(s, AV_LOG_ERROR, \"video codec %X not compatible with flv\\n\", enc->codec_id); return -1; } flags |= pkt->flags & AV_PKT_FLAG_KEY ? FLV_FRAME_KEY : FLV_FRAME_INTER; break; case AVMEDIA_TYPE_AUDIO: flags = get_audio_flags(s, enc); assert(size); avio_w8(pb, FLV_TAG_TYPE_AUDIO); break; case AVMEDIA_TYPE_DATA: avio_w8(pb, FLV_TAG_TYPE_META); break; default: return AVERROR(EINVAL); } if (enc->codec_id == AV_CODEC_ID_H264) /* check if extradata looks like MP4 */ if (enc->extradata_size > 0 && *(uint8_t*)enc->extradata != 1) if (ff_avc_parse_nal_units_buf(pkt->data, &data, &size) < 0) return -1; /* check Speex packet duration */ if (enc->codec_id == AV_CODEC_ID_SPEEX && ts - sc->last_ts > 160) av_log(s, AV_LOG_WARNING, \"Warning: Speex stream has more than \" \"8 frames per packet. Adobe Flash \" \"Player cannot handle this!\\n\"); if (sc->last_ts < ts) sc->last_ts = ts; avio_wb24(pb, size + flags_size); avio_wb24(pb, ts); avio_w8(pb, (ts >> 24) & 0x7F); // timestamps are 32 bits _signed_ avio_wb24(pb, flv->reserved); if (enc->codec_type == AVMEDIA_TYPE_DATA) { int data_size; int64_t metadata_size_pos = avio_tell(pb); avio_w8(pb, AMF_DATA_TYPE_STRING); put_amf_string(pb, \"onTextData\"); avio_w8(pb, AMF_DATA_TYPE_MIXEDARRAY); avio_wb32(pb, 2); put_amf_string(pb, \"type\"); avio_w8(pb, AMF_DATA_TYPE_STRING); put_amf_string(pb, \"Text\"); put_amf_string(pb, \"text\"); avio_w8(pb, AMF_DATA_TYPE_STRING); put_amf_string(pb, pkt->data); put_amf_string(pb, \"\"); avio_w8(pb, AMF_END_OF_OBJECT); /* write total size of tag */ data_size = avio_tell(pb) - metadata_size_pos; avio_seek(pb, metadata_size_pos - 10, SEEK_SET); avio_wb24(pb, data_size); avio_seek(pb, data_size + 10 - 3, SEEK_CUR); avio_wb32(pb, data_size + 11); } else { avio_w8(pb,flags); if (enc->codec_id == AV_CODEC_ID_VP6F || enc->codec_id == AV_CODEC_ID_VP6A) { if (enc->extradata_size) avio_w8(pb, enc->extradata[0]); else avio_w8(pb, ((FFALIGN(enc->width, 16) - enc->width) << 4) | (FFALIGN(enc->height, 16) - enc->height)); } else if (enc->codec_id == AV_CODEC_ID_AAC) avio_w8(pb, 1); // AAC raw else if (enc->codec_id == AV_CODEC_ID_H264) { avio_w8(pb, 1); // AVC NALU avio_wb24(pb, pkt->pts - pkt->dts); } avio_write(pb, data ? data : pkt->data, size); avio_wb32(pb, size + flags_size + 11); // previous tag size flv->duration = FFMAX(flv->duration, pkt->pts + flv->delay + pkt->duration); } av_free(data); return pb->error; }"} {"target": 0, "idx": 12056, "func": "matroska_probe (AVProbeData *p) { uint64_t total = 0; int len_mask = 0x80, size = 1, n = 1; uint8_t probe_data[] = { 'm', 'a', 't', 'r', 'o', 's', 'k', 'a' }; if (p->buf_size < 5) return 0; /* ebml header? */ if ((p->buf[0] << 24 | p->buf[1] << 16 | p->buf[2] << 8 | p->buf[3]) != EBML_ID_HEADER) return 0; /* length of header */ total = p->buf[4]; while (size <= 8 && !(total & len_mask)) { size++; len_mask >>= 1; } if (size > 8) return 0; total &= (len_mask - 1); while (n < size) total = (total << 8) | p->buf[4 + n++]; /* does the probe data contain the whole header? */ if (p->buf_size < 4 + size + total) return 0; /* the header must contain the document type 'matroska'. For now, * we don't parse the whole header but simply check for the * availability of that array of characters inside the header. * Not fully fool-proof, but good enough. */ for (n = 4 + size; n <= 4 + size + total - sizeof(probe_data); n++) if (!memcmp (&p->buf[n], probe_data, sizeof(probe_data))) return AVPROBE_SCORE_MAX; return 0; }"} {"target": 1, "idx": 12068, "func": "static int siff_read_packet(AVFormatContext *s, AVPacket *pkt) { SIFFContext *c = s->priv_data; int size; if (c->has_video){ if (c->cur_frame >= c->frames) return AVERROR_EOF; if (c->curstrm == -1){ c->pktsize = avio_rl32(s->pb) - 4; c->flags = avio_rl16(s->pb); c->gmcsize = (c->flags & VB_HAS_GMC) ? 4 : 0; if (c->gmcsize) avio_read(s->pb, c->gmc, c->gmcsize); c->sndsize = (c->flags & VB_HAS_AUDIO) ? avio_rl32(s->pb): 0; c->curstrm = !!(c->flags & VB_HAS_AUDIO); } if (!c->curstrm){ size = c->pktsize - c->sndsize - c->gmcsize - 2; size = ffio_limit(s->pb, size); if(size < 0 || c->pktsize < c->sndsize) return AVERROR_INVALIDDATA; if (av_new_packet(pkt, size + c->gmcsize + 2) < 0) return AVERROR(ENOMEM); AV_WL16(pkt->data, c->flags); if (c->gmcsize) memcpy(pkt->data + 2, c->gmc, c->gmcsize); avio_read(s->pb, pkt->data + 2 + c->gmcsize, size); pkt->stream_index = 0; c->curstrm = -1; }else{ if ((size = av_get_packet(s->pb, pkt, c->sndsize - 4)) < 0) return AVERROR(EIO); pkt->stream_index = 1; pkt->duration = size; c->curstrm = 0; } if(!c->cur_frame || c->curstrm) pkt->flags |= AV_PKT_FLAG_KEY; if (c->curstrm == -1) c->cur_frame++; }else{ size = av_get_packet(s->pb, pkt, c->block_align); if(!size) return AVERROR_EOF; if(size < 0) return AVERROR(EIO); pkt->duration = size; } return pkt->size; }"} {"target": 1, "idx": 12082, "func": "static void dequantization_float(int x, int y, Jpeg2000Cblk *cblk, Jpeg2000Component *comp, Jpeg2000T1Context *t1, Jpeg2000Band *band) { int i, j, idx; float *datap = &comp->data[(comp->coord[0][1] - comp->coord[0][0]) * y + x]; for (j = 0; j < (cblk->coord[1][1] - cblk->coord[1][0]); ++j) for (i = 0; i < (cblk->coord[0][1] - cblk->coord[0][0]); ++i) { idx = (comp->coord[0][1] - comp->coord[0][0]) * j + i; datap[idx] = (float)(t1->data[j][i]) * band->f_stepsize; } }"} {"target": 1, "idx": 12093, "func": "device_init(usb_host_register_devices) USBDevice *usb_host_device_open(const char *devname) { struct USBAutoFilter filter = { 0, 0, 0, 0 }; USBDevice *dev; USBHostDevice *s; char *p; dev = usb_create(NULL /* FIXME */, \"USB Host Device\"); s = DO_UPCAST(USBHostDevice, dev, dev); if (strstr(devname, \"auto:\")) { if (parse_filter(devname+5, &filter) < 0) goto fail; } else { if ((p = strchr(devname, '.'))) { filter.bus_num = strtoul(devname, NULL, 0); filter.addr = strtoul(devname, NULL, 0); } else if ((p = strchr(devname, ':'))) { filter.vendor_id = strtoul(devname, NULL, 16); filter.product_id = strtoul(devname, NULL, 16); } else { goto fail; } } qdev_prop_set_uint32(&dev->qdev, \"bus\", filter.bus_num); qdev_prop_set_uint32(&dev->qdev, \"addr\", filter.addr); qdev_prop_set_uint32(&dev->qdev, \"vendorid\", filter.vendor_id); qdev_prop_set_uint32(&dev->qdev, \"productid\", filter.product_id); qdev_init(&dev->qdev); return dev; fail: qdev_free(&dev->qdev); return NULL; }"} {"target": 1, "idx": 12094, "func": "void qemu_error_internal(const char *file, int linenr, const char *func, const char *fmt, ...) { va_list va; QError *qerror; assert(qemu_error_sink != NULL); va_start(va, fmt); qerror = qerror_from_info(file, linenr, func, fmt, &va); va_end(va); switch (qemu_error_sink->dest) { case ERR_SINK_FILE: qerror_print(qerror); QDECREF(qerror); break; case ERR_SINK_MONITOR: assert(qemu_error_sink->mon->error == NULL); qemu_error_sink->mon->error = qerror; break; } }"} {"target": 0, "idx": 12098, "func": "av_cold void ff_vc1dsp_init(VC1DSPContext* dsp) { dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_c; dsp->vc1_inv_trans_4x8 = vc1_inv_trans_4x8_c; dsp->vc1_inv_trans_8x4 = vc1_inv_trans_8x4_c; dsp->vc1_inv_trans_4x4 = vc1_inv_trans_4x4_c; dsp->vc1_inv_trans_8x8_dc = vc1_inv_trans_8x8_dc_c; dsp->vc1_inv_trans_4x8_dc = vc1_inv_trans_4x8_dc_c; dsp->vc1_inv_trans_8x4_dc = vc1_inv_trans_8x4_dc_c; dsp->vc1_inv_trans_4x4_dc = vc1_inv_trans_4x4_dc_c; dsp->vc1_h_overlap = vc1_h_overlap_c; dsp->vc1_v_overlap = vc1_v_overlap_c; dsp->vc1_v_loop_filter4 = vc1_v_loop_filter4_c; dsp->vc1_h_loop_filter4 = vc1_h_loop_filter4_c; dsp->vc1_v_loop_filter8 = vc1_v_loop_filter8_c; dsp->vc1_h_loop_filter8 = vc1_h_loop_filter8_c; dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_c; dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_c; dsp->put_vc1_mspel_pixels_tab[ 0] = ff_put_pixels8x8_c; dsp->put_vc1_mspel_pixels_tab[ 1] = put_vc1_mspel_mc10_c; dsp->put_vc1_mspel_pixels_tab[ 2] = put_vc1_mspel_mc20_c; dsp->put_vc1_mspel_pixels_tab[ 3] = put_vc1_mspel_mc30_c; dsp->put_vc1_mspel_pixels_tab[ 4] = put_vc1_mspel_mc01_c; dsp->put_vc1_mspel_pixels_tab[ 5] = put_vc1_mspel_mc11_c; dsp->put_vc1_mspel_pixels_tab[ 6] = put_vc1_mspel_mc21_c; dsp->put_vc1_mspel_pixels_tab[ 7] = put_vc1_mspel_mc31_c; dsp->put_vc1_mspel_pixels_tab[ 8] = put_vc1_mspel_mc02_c; dsp->put_vc1_mspel_pixels_tab[ 9] = put_vc1_mspel_mc12_c; dsp->put_vc1_mspel_pixels_tab[10] = put_vc1_mspel_mc22_c; dsp->put_vc1_mspel_pixels_tab[11] = put_vc1_mspel_mc32_c; dsp->put_vc1_mspel_pixels_tab[12] = put_vc1_mspel_mc03_c; dsp->put_vc1_mspel_pixels_tab[13] = put_vc1_mspel_mc13_c; dsp->put_vc1_mspel_pixels_tab[14] = put_vc1_mspel_mc23_c; dsp->put_vc1_mspel_pixels_tab[15] = put_vc1_mspel_mc33_c; dsp->avg_vc1_mspel_pixels_tab[ 0] = ff_avg_pixels8x8_c; dsp->avg_vc1_mspel_pixels_tab[ 1] = avg_vc1_mspel_mc10_c; dsp->avg_vc1_mspel_pixels_tab[ 2] = avg_vc1_mspel_mc20_c; dsp->avg_vc1_mspel_pixels_tab[ 3] = avg_vc1_mspel_mc30_c; dsp->avg_vc1_mspel_pixels_tab[ 4] = avg_vc1_mspel_mc01_c; dsp->avg_vc1_mspel_pixels_tab[ 5] = avg_vc1_mspel_mc11_c; dsp->avg_vc1_mspel_pixels_tab[ 6] = avg_vc1_mspel_mc21_c; dsp->avg_vc1_mspel_pixels_tab[ 7] = avg_vc1_mspel_mc31_c; dsp->avg_vc1_mspel_pixels_tab[ 8] = avg_vc1_mspel_mc02_c; dsp->avg_vc1_mspel_pixels_tab[ 9] = avg_vc1_mspel_mc12_c; dsp->avg_vc1_mspel_pixels_tab[10] = avg_vc1_mspel_mc22_c; dsp->avg_vc1_mspel_pixels_tab[11] = avg_vc1_mspel_mc32_c; dsp->avg_vc1_mspel_pixels_tab[12] = avg_vc1_mspel_mc03_c; dsp->avg_vc1_mspel_pixels_tab[13] = avg_vc1_mspel_mc13_c; dsp->avg_vc1_mspel_pixels_tab[14] = avg_vc1_mspel_mc23_c; dsp->avg_vc1_mspel_pixels_tab[15] = avg_vc1_mspel_mc33_c; dsp->put_no_rnd_vc1_chroma_pixels_tab[0]= put_no_rnd_vc1_chroma_mc8_c; dsp->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_no_rnd_vc1_chroma_mc8_c; if (HAVE_ALTIVEC) ff_vc1dsp_init_altivec(dsp); if (HAVE_MMX) ff_vc1dsp_init_mmx(dsp); }"} {"target": 0, "idx": 12101, "func": "static void intra_predict_vert_8x8_msa(uint8_t *src, uint8_t *dst, int32_t dst_stride) { uint32_t row; uint32_t src_data1, src_data2; src_data1 = LW(src); src_data2 = LW(src + 4); for (row = 8; row--;) { SW(src_data1, dst); SW(src_data2, (dst + 4)); dst += dst_stride; } }"} {"target": 0, "idx": 12105, "func": "static void spapr_phb_placement(sPAPRMachineState *spapr, uint32_t index, uint64_t *buid, hwaddr *pio, hwaddr *mmio, unsigned n_dma, uint32_t *liobns, Error **errp) { const uint64_t base_buid = 0x800000020000000ULL; const hwaddr phb_spacing = 0x1000000000ULL; /* 64 GiB */ const hwaddr mmio_offset = 0xa0000000; /* 2 GiB + 512 MiB */ const hwaddr pio_offset = 0x80000000; /* 2 GiB */ const uint32_t max_index = 255; const hwaddr phb0_alignment = 0x10000000000ULL; /* 1 TiB */ uint64_t ram_top = MACHINE(spapr)->ram_size; hwaddr phb0_base, phb_base; int i; /* Do we have hotpluggable memory? */ if (MACHINE(spapr)->maxram_size > ram_top) { /* Can't just use maxram_size, because there may be an * alignment gap between normal and hotpluggable memory * regions */ ram_top = spapr->hotplug_memory.base + memory_region_size(&spapr->hotplug_memory.mr); } phb0_base = QEMU_ALIGN_UP(ram_top, phb0_alignment); if (index > max_index) { error_setg(errp, \"\\\"index\\\" for PAPR PHB is too large (max %u)\", max_index); return; } *buid = base_buid + index; for (i = 0; i < n_dma; ++i) { liobns[i] = SPAPR_PCI_LIOBN(index, i); } phb_base = phb0_base + index * phb_spacing; *pio = phb_base + pio_offset; *mmio = phb_base + mmio_offset; }"} {"target": 0, "idx": 12109, "func": "int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, int64_t pos, int size) { QEMUIOVector qiov; struct iovec iov = { .iov_base = (void *) buf, .iov_len = size, }; qemu_iovec_init_external(&qiov, &iov, 1); return bdrv_writev_vmstate(bs, &qiov, pos); }"} {"target": 0, "idx": 12117, "func": "static int swr_convert_internal(struct SwrContext *s, AudioData *out, int out_count, AudioData *in , int in_count){ AudioData *postin, *midbuf, *preout; int ret/*, in_max*/; AudioData preout_tmp, midbuf_tmp; if(s->full_convert){ av_assert0(!s->resample); swri_audio_convert(s->full_convert, out, in, in_count); return out_count; } // in_max= out_count*(int64_t)s->in_sample_rate / s->out_sample_rate + resample_filter_taps; // in_count= FFMIN(in_count, in_in + 2 - s->hist_buffer_count); if((ret=swri_realloc_audio(&s->postin, in_count))<0) return ret; if(s->resample_first){ av_assert0(s->midbuf.ch_count == s->used_ch_count); if((ret=swri_realloc_audio(&s->midbuf, out_count))<0) return ret; }else{ av_assert0(s->midbuf.ch_count == s->out.ch_count); if((ret=swri_realloc_audio(&s->midbuf, in_count))<0) return ret; } if((ret=swri_realloc_audio(&s->preout, out_count))<0) return ret; postin= &s->postin; midbuf_tmp= s->midbuf; midbuf= &midbuf_tmp; preout_tmp= s->preout; preout= &preout_tmp; if(s->int_sample_fmt == s-> in_sample_fmt && s->in.planar && !s->channel_map) postin= in; if(s->resample_first ? !s->resample : !s->rematrix) midbuf= postin; if(s->resample_first ? !s->rematrix : !s->resample) preout= midbuf; if(s->int_sample_fmt == s->out_sample_fmt && s->out.planar && !(s->out_sample_fmt==AV_SAMPLE_FMT_S32P && (s->dither.output_sample_bits&31))){ if(preout==in){ out_count= FFMIN(out_count, in_count); //TODO check at the end if this is needed or redundant av_assert0(s->in.planar); //we only support planar internally so it has to be, we support copying non planar though copy(out, in, out_count); return out_count; } else if(preout==postin) preout= midbuf= postin= out; else if(preout==midbuf) preout= midbuf= out; else preout= out; } if(in != postin){ swri_audio_convert(s->in_convert, postin, in, in_count); } if(s->resample_first){ if(postin != midbuf) out_count= resample(s, midbuf, out_count, postin, in_count); if(midbuf != preout) swri_rematrix(s, preout, midbuf, out_count, preout==out); }else{ if(postin != midbuf) swri_rematrix(s, midbuf, postin, in_count, midbuf==out); if(midbuf != preout) out_count= resample(s, preout, out_count, midbuf, in_count); } if(preout != out && out_count){ AudioData *conv_src = preout; if(s->dither.method){ int ch; int dither_count= FFMAX(out_count, 1<<16); if (preout == in) { conv_src = &s->dither.temp; if((ret=swri_realloc_audio(&s->dither.temp, dither_count))<0) return ret; } if((ret=swri_realloc_audio(&s->dither.noise, dither_count))<0) return ret; if(ret) for(ch=0; chdither.noise.ch_count; ch++) swri_get_dither(s, s->dither.noise.ch[ch], s->dither.noise.count, 12345678913579<dither.noise.fmt); av_assert0(s->dither.noise.ch_count == preout->ch_count); if(s->dither.noise_pos + out_count > s->dither.noise.count) s->dither.noise_pos = 0; if (s->dither.method < SWR_DITHER_NS){ if (s->mix_2_1_simd) { int len1= out_count&~15; int off = len1 * preout->bps; if(len1) for(ch=0; chch_count; ch++) s->mix_2_1_simd(conv_src->ch[ch], preout->ch[ch], s->dither.noise.ch[ch] + s->dither.noise.bps * s->dither.noise_pos, s->native_simd_one, 0, 0, len1); if(out_count != len1) for(ch=0; chch_count; ch++) s->mix_2_1_f(conv_src->ch[ch] + off, preout->ch[ch] + off, s->dither.noise.ch[ch] + s->dither.noise.bps * s->dither.noise_pos + off + len1, s->native_one, 0, 0, out_count - len1); } else { for(ch=0; chch_count; ch++) s->mix_2_1_f(conv_src->ch[ch], preout->ch[ch], s->dither.noise.ch[ch] + s->dither.noise.bps * s->dither.noise_pos, s->native_one, 0, 0, out_count); } } else { switch(s->int_sample_fmt) { case AV_SAMPLE_FMT_S16P :swri_noise_shaping_int16(s, conv_src, preout, &s->dither.noise, out_count); break; case AV_SAMPLE_FMT_S32P :swri_noise_shaping_int32(s, conv_src, preout, &s->dither.noise, out_count); break; case AV_SAMPLE_FMT_FLTP :swri_noise_shaping_float(s, conv_src, preout, &s->dither.noise, out_count); break; case AV_SAMPLE_FMT_DBLP :swri_noise_shaping_double(s,conv_src, preout, &s->dither.noise, out_count); break; } } s->dither.noise_pos += out_count; } //FIXME packed doesn't need more than 1 chan here! swri_audio_convert(s->out_convert, out, conv_src, out_count); } return out_count; }"} {"target": 0, "idx": 12121, "func": "void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD) { CPU_DoubleU farg1, farg2; uint32_t ret = 0; farg1.ll = arg1; farg2.ll = arg2; if (unlikely(float64_is_nan(farg1.d) || float64_is_nan(farg2.d))) { ret = 0x01UL; } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) { ret = 0x08UL; } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) { ret = 0x04UL; } else { ret = 0x02UL; } env->fpscr &= ~(0x0F << FPSCR_FPRF); env->fpscr |= ret << FPSCR_FPRF; env->crf[crfD] = ret; if (unlikely (ret == 0x01UL)) { if (float64_is_signaling_nan(farg1.d) || float64_is_signaling_nan(farg2.d)) { /* sNaN comparison */ fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXVC); } else { /* qNaN comparison */ fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC); } } }"} {"target": 0, "idx": 12123, "func": "int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix) { BDRVQcowState *s = bs->opaque; int64_t size, i, highest_cluster; int nb_clusters, refcount1, refcount2; QCowSnapshot *sn; uint16_t *refcount_table; int ret; size = bdrv_getlength(bs->file); nb_clusters = size_to_clusters(s, size); refcount_table = g_malloc0(nb_clusters * sizeof(uint16_t)); /* header */ inc_refcounts(bs, res, refcount_table, nb_clusters, 0, s->cluster_size); /* current L1 table */ ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters, s->l1_table_offset, s->l1_size, 1); if (ret < 0) { goto fail; } /* snapshots */ for(i = 0; i < s->nb_snapshots; i++) { sn = s->snapshots + i; ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters, sn->l1_table_offset, sn->l1_size, 0); if (ret < 0) { goto fail; } } inc_refcounts(bs, res, refcount_table, nb_clusters, s->snapshots_offset, s->snapshots_size); /* refcount data */ inc_refcounts(bs, res, refcount_table, nb_clusters, s->refcount_table_offset, s->refcount_table_size * sizeof(uint64_t)); for(i = 0; i < s->refcount_table_size; i++) { uint64_t offset, cluster; offset = s->refcount_table[i]; cluster = offset >> s->cluster_bits; /* Refcount blocks are cluster aligned */ if (offset & (s->cluster_size - 1)) { fprintf(stderr, \"ERROR refcount block %\" PRId64 \" is not \" \"cluster aligned; refcount table entry corrupted\\n\", i); res->corruptions++; continue; } if (cluster >= nb_clusters) { fprintf(stderr, \"ERROR refcount block %\" PRId64 \" is outside image\\n\", i); res->corruptions++; continue; } if (offset != 0) { inc_refcounts(bs, res, refcount_table, nb_clusters, offset, s->cluster_size); if (refcount_table[cluster] != 1) { fprintf(stderr, \"ERROR refcount block %\" PRId64 \" refcount=%d\\n\", i, refcount_table[cluster]); res->corruptions++; } } } /* compare ref counts */ for (i = 0, highest_cluster = 0; i < nb_clusters; i++) { refcount1 = get_refcount(bs, i); if (refcount1 < 0) { fprintf(stderr, \"Can't get refcount for cluster %\" PRId64 \": %s\\n\", i, strerror(-refcount1)); res->check_errors++; continue; } refcount2 = refcount_table[i]; if (refcount1 > 0 || refcount2 > 0) { highest_cluster = i; } if (refcount1 != refcount2) { /* Check if we're allowed to fix the mismatch */ int *num_fixed = NULL; if (refcount1 > refcount2 && (fix & BDRV_FIX_LEAKS)) { num_fixed = &res->leaks_fixed; } else if (refcount1 < refcount2 && (fix & BDRV_FIX_ERRORS)) { num_fixed = &res->corruptions_fixed; } fprintf(stderr, \"%s cluster %\" PRId64 \" refcount=%d reference=%d\\n\", num_fixed != NULL ? \"Repairing\" : refcount1 < refcount2 ? \"ERROR\" : \"Leaked\", i, refcount1, refcount2); if (num_fixed) { ret = update_refcount(bs, i << s->cluster_bits, 1, refcount2 - refcount1); if (ret >= 0) { (*num_fixed)++; continue; } } /* And if we couldn't, print an error */ if (refcount1 < refcount2) { res->corruptions++; } else { res->leaks++; } } } res->image_end_offset = (highest_cluster + 1) * s->cluster_size; ret = 0; fail: g_free(refcount_table); return ret; }"} {"target": 0, "idx": 12132, "func": "void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread, Error **errp) { AioContext *old_context; AioContext *new_context; BlockDriverState *bs; bs = bdrv_find_node(node_name); if (!bs) { error_setg(errp, \"Cannot find node %s\", node_name); return; } /* If we want to allow more extreme test scenarios this guard could be * removed. For now it protects against accidents. */ if (bdrv_has_blk(bs)) { error_setg(errp, \"Node %s is in use\", node_name); return; } if (iothread->type == QTYPE_QSTRING) { IOThread *obj = iothread_by_id(iothread->u.s); if (!obj) { error_setg(errp, \"Cannot find iothread %s\", iothread->u.s); return; } new_context = iothread_get_aio_context(obj); } else { new_context = qemu_get_aio_context(); } old_context = bdrv_get_aio_context(bs); aio_context_acquire(old_context); bdrv_set_aio_context(bs, new_context); aio_context_release(old_context); }"} {"target": 0, "idx": 12137, "func": "static void test_dealloc_partial(void) { static const char text[] = \"don't leak me\"; UserDefTwo *ud2 = NULL; Error *err = NULL; /* create partial object */ { QDict *ud2_dict; QmpInputVisitor *qiv; ud2_dict = qdict_new(); qdict_put_obj(ud2_dict, \"string0\", QOBJECT(qstring_from_str(text))); qiv = qmp_input_visitor_new(QOBJECT(ud2_dict), false); visit_type_UserDefTwo(qmp_input_get_visitor(qiv), NULL, &ud2, &err); qmp_input_visitor_cleanup(qiv); QDECREF(ud2_dict); } /* verify partial success */ assert(ud2 != NULL); assert(ud2->string0 != NULL); assert(strcmp(ud2->string0, text) == 0); assert(ud2->dict1 == NULL); /* confirm & release construction error */ error_free_or_abort(&err); /* tear down partial object */ qapi_free_UserDefTwo(ud2); }"} {"target": 0, "idx": 12139, "func": "size_t tcg_code_size(void) { unsigned int i; size_t total; qemu_mutex_lock(®ion.lock); total = region.agg_size_full; for (i = 0; i < n_tcg_ctxs; i++) { const TCGContext *s = tcg_ctxs[i]; size_t size; size = atomic_read(&s->code_gen_ptr) - s->code_gen_buffer; g_assert(size <= s->code_gen_buffer_size); total += size; } qemu_mutex_unlock(®ion.lock); return total; }"} {"target": 0, "idx": 12148, "func": "static inline abi_long host_to_target_stat64(void *cpu_env, abi_ulong target_addr, struct stat *host_st) { #ifdef TARGET_ARM if (((CPUARMState *)cpu_env)->eabi) { struct target_eabi_stat64 *target_st; if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) return -TARGET_EFAULT; memset(target_st, 0, sizeof(struct target_eabi_stat64)); __put_user(host_st->st_dev, &target_st->st_dev); __put_user(host_st->st_ino, &target_st->st_ino); #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO __put_user(host_st->st_ino, &target_st->__st_ino); #endif __put_user(host_st->st_mode, &target_st->st_mode); __put_user(host_st->st_nlink, &target_st->st_nlink); __put_user(host_st->st_uid, &target_st->st_uid); __put_user(host_st->st_gid, &target_st->st_gid); __put_user(host_st->st_rdev, &target_st->st_rdev); __put_user(host_st->st_size, &target_st->st_size); __put_user(host_st->st_blksize, &target_st->st_blksize); __put_user(host_st->st_blocks, &target_st->st_blocks); __put_user(host_st->st_atime, &target_st->target_st_atime); __put_user(host_st->st_mtime, &target_st->target_st_mtime); __put_user(host_st->st_ctime, &target_st->target_st_ctime); unlock_user_struct(target_st, target_addr, 1); } else #endif { #if TARGET_LONG_BITS == 64 struct target_stat *target_st; #else struct target_stat64 *target_st; #endif if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0)) return -TARGET_EFAULT; memset(target_st, 0, sizeof(*target_st)); __put_user(host_st->st_dev, &target_st->st_dev); __put_user(host_st->st_ino, &target_st->st_ino); #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO __put_user(host_st->st_ino, &target_st->__st_ino); #endif __put_user(host_st->st_mode, &target_st->st_mode); __put_user(host_st->st_nlink, &target_st->st_nlink); __put_user(host_st->st_uid, &target_st->st_uid); __put_user(host_st->st_gid, &target_st->st_gid); __put_user(host_st->st_rdev, &target_st->st_rdev); /* XXX: better use of kernel struct */ __put_user(host_st->st_size, &target_st->st_size); __put_user(host_st->st_blksize, &target_st->st_blksize); __put_user(host_st->st_blocks, &target_st->st_blocks); __put_user(host_st->st_atime, &target_st->target_st_atime); __put_user(host_st->st_mtime, &target_st->target_st_mtime); __put_user(host_st->st_ctime, &target_st->target_st_ctime); unlock_user_struct(target_st, target_addr, 1); } return 0; }"} {"target": 1, "idx": 12166, "func": "static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm, struct image_info *info) { abi_ulong stack_base, size, error; int i; /* Create enough stack to hold everything. If we don't use * it for args, we'll use it for something else... */ size = x86_stack_size; if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE) size = MAX_ARG_PAGES*TARGET_PAGE_SIZE; error = target_mmap(0, size + qemu_host_page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (error == -1) { perror(\"stk mmap\"); exit(-1); } /* we reserve one extra page at the top of the stack as guard */ target_mprotect(error + size, qemu_host_page_size, PROT_NONE); stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE; p += stack_base; for (i = 0 ; i < MAX_ARG_PAGES ; i++) { if (bprm->page[i]) { info->rss++; /* FIXME - check return value of memcpy_to_target() for failure */ memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE); free(bprm->page[i]); } stack_base += TARGET_PAGE_SIZE; } return p; }"} {"target": 1, "idx": 12194, "func": "void xen_map_cache_init(void) { unsigned long size; struct rlimit rlimit_as; mapcache = g_malloc0(sizeof (MapCache)); QTAILQ_INIT(&mapcache->locked_entries); mapcache->last_address_index = -1; getrlimit(RLIMIT_AS, &rlimit_as); if (rlimit_as.rlim_max < MCACHE_MAX_SIZE) { rlimit_as.rlim_cur = rlimit_as.rlim_max; } else { rlimit_as.rlim_cur = MCACHE_MAX_SIZE; } setrlimit(RLIMIT_AS, &rlimit_as); mapcache->max_mcache_size = rlimit_as.rlim_cur; mapcache->nr_buckets = (((mapcache->max_mcache_size >> XC_PAGE_SHIFT) + (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >> (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)); size = mapcache->nr_buckets * sizeof (MapCacheEntry); size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1); DPRINTF(\"%s, nr_buckets = %lx size %lu\\n\", __func__, mapcache->nr_buckets, size); mapcache->entry = g_malloc0(size); }"} {"target": 1, "idx": 12203, "func": "static BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size, const char *filename) { int score_max = 0, score; BlockDriver *drv = NULL, *d; QLIST_FOREACH(d, &bdrv_drivers, list) { if (d->bdrv_probe) { score = d->bdrv_probe(buf, buf_size, filename); if (score > score_max) { score_max = score; drv = d; } } } return drv; }"} {"target": 1, "idx": 12211, "func": "static void do_change(const char *device, const char *target) { if (strcmp(device, \"vnc\") == 0) { do_change_vnc(target); } else { do_change_block(device, target); } }"} {"target": 1, "idx": 12212, "func": "static int asf_parse_packet(AVFormatContext *s, AVIOContext *pb, AVPacket *pkt) { ASFContext *asf = s->priv_data; ASFStream *asf_st = 0; for (;;) { int ret; if (url_feof(pb)) return AVERROR_EOF; if (asf->packet_size_left < FRAME_HEADER_SIZE) { int ret = asf->packet_size_left + asf->packet_padsize; assert(ret >= 0); /* fail safe */ avio_skip(pb, ret); asf->packet_pos = avio_tell(pb); if (asf->data_object_size != (uint64_t)-1 && (asf->packet_pos - asf->data_object_offset >= asf->data_object_size)) return AVERROR_EOF; /* Do not exceed the size of the data object */ return 1; } if (asf->packet_time_start == 0) { if (asf_read_frame_header(s, pb) < 0) { asf->packet_time_start = asf->packet_segments = 0; continue; } if (asf->stream_index < 0 || s->streams[asf->stream_index]->discard >= AVDISCARD_ALL || (!asf->packet_key_frame && (s->streams[asf->stream_index]->discard >= AVDISCARD_NONKEY || asf->streams[s->streams[asf->stream_index]->id].skip_to_key))) { asf->packet_time_start = 0; /* unhandled packet (should not happen) */ avio_skip(pb, asf->packet_frag_size); asf->packet_size_left -= asf->packet_frag_size; if (asf->stream_index < 0) av_log(s, AV_LOG_ERROR, \"ff asf skip %d (unknown stream)\\n\", asf->packet_frag_size); continue; } asf->asf_st = &asf->streams[s->streams[asf->stream_index]->id]; asf->asf_st->skip_to_key = 0; } asf_st = asf->asf_st; av_assert0(asf_st); if (asf->packet_replic_size == 1) { // frag_offset is here used as the beginning timestamp asf->packet_frag_timestamp = asf->packet_time_start; asf->packet_time_start += asf->packet_time_delta; asf_st->packet_obj_size = asf->packet_frag_size = avio_r8(pb); asf->packet_size_left--; asf->packet_multi_size--; if (asf->packet_multi_size < asf_st->packet_obj_size) { asf->packet_time_start = 0; avio_skip(pb, asf->packet_multi_size); asf->packet_size_left -= asf->packet_multi_size; continue; } asf->packet_multi_size -= asf_st->packet_obj_size; } if (asf_st->pkt.size != asf_st->packet_obj_size || // FIXME is this condition sufficient? asf_st->frag_offset + asf->packet_frag_size > asf_st->pkt.size) { if (asf_st->pkt.data) { av_log(s, AV_LOG_INFO, \"freeing incomplete packet size %d, new %d\\n\", asf_st->pkt.size, asf_st->packet_obj_size); asf_st->frag_offset = 0; av_free_packet(&asf_st->pkt); } /* new packet */ av_new_packet(&asf_st->pkt, asf_st->packet_obj_size); asf_st->seq = asf->packet_seq; asf_st->pkt.dts = asf->packet_frag_timestamp - asf->hdr.preroll; asf_st->pkt.stream_index = asf->stream_index; asf_st->pkt.pos = asf_st->packet_pos = asf->packet_pos; if (asf_st->pkt.data && asf_st->palette_changed) { uint8_t *pal; pal = av_packet_new_side_data(&asf_st->pkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE); if (!pal) { av_log(s, AV_LOG_ERROR, \"Cannot append palette to packet\\n\"); } else { memcpy(pal, asf_st->palette, AVPALETTE_SIZE); asf_st->palette_changed = 0; } } av_dlog(asf, \"new packet: stream:%d key:%d packet_key:%d audio:%d size:%d\\n\", asf->stream_index, asf->packet_key_frame, asf_st->pkt.flags & AV_PKT_FLAG_KEY, s->streams[asf->stream_index]->codec->codec_type == AVMEDIA_TYPE_AUDIO, asf_st->packet_obj_size); if (s->streams[asf->stream_index]->codec->codec_type == AVMEDIA_TYPE_AUDIO) asf->packet_key_frame = 1; if (asf->packet_key_frame) asf_st->pkt.flags |= AV_PKT_FLAG_KEY; } /* read data */ av_dlog(asf, \"READ PACKET s:%d os:%d o:%d,%d l:%d DATA:%p\\n\", s->packet_size, asf_st->pkt.size, asf->packet_frag_offset, asf_st->frag_offset, asf->packet_frag_size, asf_st->pkt.data); asf->packet_size_left -= asf->packet_frag_size; if (asf->packet_size_left < 0) continue; if (asf->packet_frag_offset >= asf_st->pkt.size || asf->packet_frag_size > asf_st->pkt.size - asf->packet_frag_offset) { av_log(s, AV_LOG_ERROR, \"packet fragment position invalid %u,%u not in %u\\n\", asf->packet_frag_offset, asf->packet_frag_size, asf_st->pkt.size); continue; } if (asf->packet_frag_offset != asf_st->frag_offset && !asf_st->pkt_clean) { memset(asf_st->pkt.data + asf_st->frag_offset, 0, asf_st->pkt.size - asf_st->frag_offset); asf_st->pkt_clean = 1; } ret = avio_read(pb, asf_st->pkt.data + asf->packet_frag_offset, asf->packet_frag_size); if (ret != asf->packet_frag_size) { if (ret < 0 || asf->packet_frag_offset + ret == 0) return ret < 0 ? ret : AVERROR_EOF; if (asf_st->ds_span > 1) { // scrambling, we can either drop it completely or fill the remainder // TODO: should we fill the whole packet instead of just the current // fragment? memset(asf_st->pkt.data + asf->packet_frag_offset + ret, 0, asf->packet_frag_size - ret); ret = asf->packet_frag_size; } else { // no scrambling, so we can return partial packets av_shrink_packet(&asf_st->pkt, asf->packet_frag_offset + ret); } } if (s->key && s->keylen == 20) ff_asfcrypt_dec(s->key, asf_st->pkt.data + asf->packet_frag_offset, ret); asf_st->frag_offset += ret; /* test if whole packet is read */ if (asf_st->frag_offset == asf_st->pkt.size) { // workaround for macroshit radio DVR-MS files if (s->streams[asf->stream_index]->codec->codec_id == AV_CODEC_ID_MPEG2VIDEO && asf_st->pkt.size > 100) { int i; for (i = 0; i < asf_st->pkt.size && !asf_st->pkt.data[i]; i++) ; if (i == asf_st->pkt.size) { av_log(s, AV_LOG_DEBUG, \"discarding ms fart\\n\"); asf_st->frag_offset = 0; av_free_packet(&asf_st->pkt); continue; } } /* return packet */ if (asf_st->ds_span > 1) { if (asf_st->pkt.size != asf_st->ds_packet_size * asf_st->ds_span) { av_log(s, AV_LOG_ERROR, \"pkt.size != ds_packet_size * ds_span (%d %d %d)\\n\", asf_st->pkt.size, asf_st->ds_packet_size, asf_st->ds_span); } else { /* packet descrambling */ AVBufferRef *buf = av_buffer_alloc(asf_st->pkt.size + FF_INPUT_BUFFER_PADDING_SIZE); if (buf) { uint8_t *newdata = buf->data; int offset = 0; memset(newdata + asf_st->pkt.size, 0, FF_INPUT_BUFFER_PADDING_SIZE); while (offset < asf_st->pkt.size) { int off = offset / asf_st->ds_chunk_size; int row = off / asf_st->ds_span; int col = off % asf_st->ds_span; int idx = row + col * asf_st->ds_packet_size / asf_st->ds_chunk_size; assert(offset + asf_st->ds_chunk_size <= asf_st->pkt.size); assert(idx + 1 <= asf_st->pkt.size / asf_st->ds_chunk_size); memcpy(newdata + offset, asf_st->pkt.data + idx * asf_st->ds_chunk_size, asf_st->ds_chunk_size); offset += asf_st->ds_chunk_size; } av_buffer_unref(&asf_st->pkt.buf); asf_st->pkt.buf = buf; asf_st->pkt.data = buf->data; } } } asf_st->frag_offset = 0; *pkt = asf_st->pkt; #if FF_API_DESTRUCT_PACKET FF_DISABLE_DEPRECATION_WARNINGS asf_st->pkt.destruct = NULL; FF_ENABLE_DEPRECATION_WARNINGS #endif asf_st->pkt.buf = 0; asf_st->pkt.size = 0; asf_st->pkt.data = 0; asf_st->pkt.side_data_elems = 0; asf_st->pkt.side_data = NULL; break; // packet completed } } return 0; }"} {"target": 1, "idx": 12225, "func": "static void rc4030_unrealize(DeviceState *dev, Error **errp) { rc4030State *s = RC4030(dev); int i; timer_free(s->periodic_timer); address_space_destroy(&s->dma_as); object_unparent(OBJECT(&s->dma_tt)); object_unparent(OBJECT(&s->dma_tt_alias)); object_unparent(OBJECT(&s->dma_mr)); for (i = 0; i < MAX_TL_ENTRIES; ++i) { memory_region_del_subregion(&s->dma_mr, &s->dma_mrs[i]); object_unparent(OBJECT(&s->dma_mrs[i])); } }"} {"target": 1, "idx": 12231, "func": "static int flic_decode_frame_8BPP(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size) { FlicDecodeContext *s = (FlicDecodeContext *)avctx->priv_data; int stream_ptr = 0; int stream_ptr_after_color_chunk; int pixel_ptr; int palette_ptr; unsigned char palette_idx1; unsigned char palette_idx2; unsigned int frame_size; int num_chunks; unsigned int chunk_size; int chunk_type; int i, j; int color_packets; int color_changes; int color_shift; unsigned char r, g, b; int lines; int compressed_lines; int starting_line; signed short line_packets; int y_ptr; signed char byte_run; int pixel_skip; int pixel_countdown; unsigned char *pixels; int pixel_limit; s->frame.reference = 1; s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if (avctx->reget_buffer(avctx, &s->frame) < 0) { av_log(avctx, AV_LOG_ERROR, \"reget_buffer() failed\\n\"); return -1; } pixels = s->frame.data[0]; pixel_limit = s->avctx->height * s->frame.linesize[0]; frame_size = LE_32(&buf[stream_ptr]); stream_ptr += 6; /* skip the magic number */ num_chunks = LE_16(&buf[stream_ptr]); stream_ptr += 10; /* skip padding */ frame_size -= 16; /* iterate through the chunks */ while ((frame_size > 0) && (num_chunks > 0)) { chunk_size = LE_32(&buf[stream_ptr]); stream_ptr += 4; chunk_type = LE_16(&buf[stream_ptr]); stream_ptr += 2; switch (chunk_type) { case FLI_256_COLOR: case FLI_COLOR: stream_ptr_after_color_chunk = stream_ptr + chunk_size - 6; /* check special case: If this file is from the Magic Carpet * game and uses 6-bit colors even though it reports 256-color * chunks in a 0xAF12-type file (fli_type is set to 0xAF13 during * initialization) */ if ((chunk_type == FLI_256_COLOR) && (s->fli_type != FLC_MAGIC_CARPET_SYNTHETIC_TYPE_CODE)) color_shift = 0; else color_shift = 2; /* set up the palette */ color_packets = LE_16(&buf[stream_ptr]); stream_ptr += 2; palette_ptr = 0; for (i = 0; i < color_packets; i++) { /* first byte is how many colors to skip */ palette_ptr += buf[stream_ptr++]; /* next byte indicates how many entries to change */ color_changes = buf[stream_ptr++]; /* if there are 0 color changes, there are actually 256 */ if (color_changes == 0) color_changes = 256; for (j = 0; j < color_changes; j++) { unsigned int entry; /* wrap around, for good measure */ if ((unsigned)palette_ptr >= 256) palette_ptr = 0; r = buf[stream_ptr++] << color_shift; g = buf[stream_ptr++] << color_shift; b = buf[stream_ptr++] << color_shift; entry = (r << 16) | (g << 8) | b; if (s->palette[palette_ptr] != entry) s->new_palette = 1; s->palette[palette_ptr++] = entry; } } /* color chunks sometimes have weird 16-bit alignment issues; * therefore, take the hardline approach and set the stream_ptr * to the value calculated w.r.t. the size specified by the color * chunk header */ stream_ptr = stream_ptr_after_color_chunk; break; case FLI_DELTA: y_ptr = 0; compressed_lines = LE_16(&buf[stream_ptr]); stream_ptr += 2; while (compressed_lines > 0) { line_packets = LE_16(&buf[stream_ptr]); stream_ptr += 2; if (line_packets < 0) { line_packets = -line_packets; y_ptr += line_packets * s->frame.linesize[0]; } else { compressed_lines--; pixel_ptr = y_ptr; pixel_countdown = s->avctx->width; for (i = 0; i < line_packets; i++) { /* account for the skip bytes */ pixel_skip = buf[stream_ptr++]; pixel_ptr += pixel_skip; pixel_countdown -= pixel_skip; byte_run = buf[stream_ptr++]; if (byte_run < 0) { byte_run = -byte_run; palette_idx1 = buf[stream_ptr++]; palette_idx2 = buf[stream_ptr++]; CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++, pixel_countdown -= 2) { pixels[pixel_ptr++] = palette_idx1; pixels[pixel_ptr++] = palette_idx2; } } else { CHECK_PIXEL_PTR(byte_run * 2); for (j = 0; j < byte_run * 2; j++, pixel_countdown--) { palette_idx1 = buf[stream_ptr++]; pixels[pixel_ptr++] = palette_idx1; } } } y_ptr += s->frame.linesize[0]; } } break; case FLI_LC: /* line compressed */ starting_line = LE_16(&buf[stream_ptr]); stream_ptr += 2; y_ptr = 0; y_ptr += starting_line * s->frame.linesize[0]; compressed_lines = LE_16(&buf[stream_ptr]); stream_ptr += 2; while (compressed_lines > 0) { pixel_ptr = y_ptr; pixel_countdown = s->avctx->width; line_packets = buf[stream_ptr++]; if (line_packets > 0) { for (i = 0; i < line_packets; i++) { /* account for the skip bytes */ pixel_skip = buf[stream_ptr++]; pixel_ptr += pixel_skip; pixel_countdown -= pixel_skip; byte_run = buf[stream_ptr++]; if (byte_run > 0) { CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++, pixel_countdown--) { palette_idx1 = buf[stream_ptr++]; pixels[pixel_ptr++] = palette_idx1; } } else { byte_run = -byte_run; palette_idx1 = buf[stream_ptr++]; CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++, pixel_countdown--) { pixels[pixel_ptr++] = palette_idx1; } } } } y_ptr += s->frame.linesize[0]; compressed_lines--; } break; case FLI_BLACK: /* set the whole frame to color 0 (which is usually black) */ memset(pixels, 0, s->frame.linesize[0] * s->avctx->height); break; case FLI_BRUN: /* Byte run compression: This chunk type only occurs in the first * FLI frame and it will update the entire frame. */ y_ptr = 0; for (lines = 0; lines < s->avctx->height; lines++) { pixel_ptr = y_ptr; /* disregard the line packets; instead, iterate through all * pixels on a row */ stream_ptr++; pixel_countdown = s->avctx->width; while (pixel_countdown > 0) { byte_run = buf[stream_ptr++]; if (byte_run > 0) { palette_idx1 = buf[stream_ptr++]; CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++) { pixels[pixel_ptr++] = palette_idx1; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, \"pixel_countdown < 0 (%d)\\n\", pixel_countdown); } } else { /* copy bytes if byte_run < 0 */ byte_run = -byte_run; CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++) { palette_idx1 = buf[stream_ptr++]; pixels[pixel_ptr++] = palette_idx1; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, \"pixel_countdown < 0 (%d)\\n\", pixel_countdown); } } } y_ptr += s->frame.linesize[0]; } break; case FLI_COPY: /* copy the chunk (uncompressed frame) */ if (chunk_size - 6 > s->avctx->width * s->avctx->height) { av_log(avctx, AV_LOG_ERROR, \"In chunk FLI_COPY : source data (%d bytes) \" \\ \"bigger than image, skipping chunk\\n\", chunk_size - 6); stream_ptr += chunk_size - 6; } else { for (y_ptr = 0; y_ptr < s->frame.linesize[0] * s->avctx->height; y_ptr += s->frame.linesize[0]) { memcpy(&pixels[y_ptr], &buf[stream_ptr], s->avctx->width); stream_ptr += s->avctx->width; } } break; case FLI_MINI: /* some sort of a thumbnail? disregard this chunk... */ stream_ptr += chunk_size - 6; break; default: av_log(avctx, AV_LOG_ERROR, \"Unrecognized chunk type: %d\\n\", chunk_type); break; } frame_size -= chunk_size; num_chunks--; } /* by the end of the chunk, the stream ptr should equal the frame * size (minus 1, possibly); if it doesn't, issue a warning */ if ((stream_ptr != buf_size) && (stream_ptr != buf_size - 1)) av_log(avctx, AV_LOG_ERROR, \"Processed FLI chunk where chunk size = %d \" \\ \"and final chunk ptr = %d\\n\", buf_size, stream_ptr); /* make the palette available on the way out */ memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE); if (s->new_palette) { s->frame.palette_has_changed = 1; s->new_palette = 0; } *data_size=sizeof(AVFrame); *(AVFrame*)data = s->frame; return buf_size; }"} {"target": 1, "idx": 12259, "func": "static ExitStatus trans_fop_dew_0e(DisasContext *ctx, uint32_t insn, const DisasInsn *di) { unsigned rt = extract32(insn, 0, 5); unsigned ra = assemble_ra64(insn); return do_fop_dew(ctx, rt, ra, di->f_dew); }"} {"target": 0, "idx": 12298, "func": "static void check_watchpoint(int offset, int len_mask, int flags) { CPUArchState *env = cpu_single_env; target_ulong pc, cs_base; TranslationBlock *tb; target_ulong vaddr; CPUWatchpoint *wp; int cpu_flags; if (env->watchpoint_hit) { /* We re-entered the check after replacing the TB. Now raise * the debug interrupt so that is will trigger after the * current instruction. */ cpu_interrupt(env, CPU_INTERRUPT_DEBUG); return; } vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset; QTAILQ_FOREACH(wp, &env->watchpoints, entry) { if ((vaddr == (wp->vaddr & len_mask) || (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) { wp->flags |= BP_WATCHPOINT_HIT; if (!env->watchpoint_hit) { env->watchpoint_hit = wp; tb = tb_find_pc(env->mem_io_pc); if (!tb) { cpu_abort(env, \"check_watchpoint: could not find TB for \" \"pc=%p\", (void *)env->mem_io_pc); } cpu_restore_state(tb, env, env->mem_io_pc); tb_phys_invalidate(tb, -1); if (wp->flags & BP_STOP_BEFORE_ACCESS) { env->exception_index = EXCP_DEBUG; cpu_loop_exit(env); } else { cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags); tb_gen_code(env, pc, cs_base, cpu_flags, 1); cpu_resume_from_signal(env, NULL); } } } else { wp->flags &= ~BP_WATCHPOINT_HIT; } } }"} {"target": 0, "idx": 12308, "func": "BalloonInfo *qmp_query_balloon(Error **errp) { BalloonInfo *info; if (kvm_enabled() && !kvm_has_sync_mmu()) { error_set(errp, QERR_KVM_MISSING_CAP, \"synchronous MMU\", \"balloon\"); return NULL; } info = g_malloc0(sizeof(*info)); if (qemu_balloon_status(info) == 0) { error_set(errp, QERR_DEVICE_NOT_ACTIVE, \"balloon\"); qapi_free_BalloonInfo(info); return NULL; } return info; }"} {"target": 0, "idx": 12310, "func": "static void pc_init1(MemoryRegion *system_memory, MemoryRegion *system_io, ram_addr_t ram_size, const char *boot_device, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model, int pci_enabled, int kvmclock_enabled) { int i; ram_addr_t below_4g_mem_size, above_4g_mem_size; PCIBus *pci_bus; ISABus *isa_bus; PCII440FXState *i440fx_state; int piix3_devfn = -1; qemu_irq *cpu_irq; qemu_irq *gsi; qemu_irq *i8259; qemu_irq *smi_irq; GSIState *gsi_state; DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; BusState *idebus[MAX_IDE_BUS]; ISADevice *rtc_state; ISADevice *floppy; MemoryRegion *ram_memory; MemoryRegion *pci_memory; MemoryRegion *rom_memory; DeviceState *icc_bridge; FWCfgState *fw_cfg = NULL; if (xen_enabled() && xen_hvm_init() != 0) { fprintf(stderr, \"xen hardware virtual machine initialisation failed\\n\"); exit(1); } icc_bridge = qdev_create(NULL, TYPE_ICC_BRIDGE); object_property_add_child(qdev_get_machine(), \"icc-bridge\", OBJECT(icc_bridge), NULL); pc_cpus_init(cpu_model, icc_bridge); pc_acpi_init(\"acpi-dsdt.aml\"); if (kvm_enabled() && kvmclock_enabled) { kvmclock_create(); } if (ram_size >= 0xe0000000 ) { above_4g_mem_size = ram_size - 0xe0000000; below_4g_mem_size = 0xe0000000; } else { above_4g_mem_size = 0; below_4g_mem_size = ram_size; } if (pci_enabled) { pci_memory = g_new(MemoryRegion, 1); memory_region_init(pci_memory, \"pci\", INT64_MAX); rom_memory = pci_memory; } else { pci_memory = NULL; rom_memory = system_memory; } /* allocate ram and load rom/bios */ if (!xen_enabled()) { fw_cfg = pc_memory_init(system_memory, kernel_filename, kernel_cmdline, initrd_filename, below_4g_mem_size, above_4g_mem_size, rom_memory, &ram_memory); } gsi_state = g_malloc0(sizeof(*gsi_state)); if (kvm_irqchip_in_kernel()) { kvm_pc_setup_irq_routing(pci_enabled); gsi = qemu_allocate_irqs(kvm_pc_gsi_handler, gsi_state, GSI_NUM_PINS); } else { gsi = qemu_allocate_irqs(gsi_handler, gsi_state, GSI_NUM_PINS); } if (pci_enabled) { pci_bus = i440fx_init(&i440fx_state, &piix3_devfn, &isa_bus, gsi, system_memory, system_io, ram_size, below_4g_mem_size, 0x100000000ULL - below_4g_mem_size, 0x100000000ULL + above_4g_mem_size, (sizeof(hwaddr) == 4 ? 0 : ((uint64_t)1 << 62)), pci_memory, ram_memory); } else { pci_bus = NULL; i440fx_state = NULL; isa_bus = isa_bus_new(NULL, system_io); no_hpet = 1; } isa_bus_irqs(isa_bus, gsi); if (kvm_irqchip_in_kernel()) { i8259 = kvm_i8259_init(isa_bus); } else if (xen_enabled()) { i8259 = xen_interrupt_controller_init(); } else { cpu_irq = pc_allocate_cpu_irq(); i8259 = i8259_init(isa_bus, cpu_irq[0]); } for (i = 0; i < ISA_NUM_IRQS; i++) { gsi_state->i8259_irq[i] = i8259[i]; } if (pci_enabled) { ioapic_init_gsi(gsi_state, \"i440fx\"); } qdev_init_nofail(icc_bridge); pc_register_ferr_irq(gsi[13]); pc_vga_init(isa_bus, pci_enabled ? pci_bus : NULL); if (xen_enabled()) { pci_create_simple(pci_bus, -1, \"xen-platform\"); } /* init basic PC hardware */ pc_basic_device_init(isa_bus, gsi, &rtc_state, &floppy, xen_enabled()); pc_nic_init(isa_bus, pci_bus); ide_drive_get(hd, MAX_IDE_BUS); if (pci_enabled) { PCIDevice *dev; if (xen_enabled()) { dev = pci_piix3_xen_ide_init(pci_bus, hd, piix3_devfn + 1); } else { dev = pci_piix3_ide_init(pci_bus, hd, piix3_devfn + 1); } idebus[0] = qdev_get_child_bus(&dev->qdev, \"ide.0\"); idebus[1] = qdev_get_child_bus(&dev->qdev, \"ide.1\"); } else { for(i = 0; i < MAX_IDE_BUS; i++) { ISADevice *dev; dev = isa_ide_init(isa_bus, ide_iobase[i], ide_iobase2[i], ide_irq[i], hd[MAX_IDE_DEVS * i], hd[MAX_IDE_DEVS * i + 1]); idebus[i] = qdev_get_child_bus(DEVICE(dev), \"ide.0\"); } } pc_cmos_init(below_4g_mem_size, above_4g_mem_size, boot_device, floppy, idebus[0], idebus[1], rtc_state); if (pci_enabled && usb_enabled(false)) { pci_create_simple(pci_bus, piix3_devfn + 2, \"piix3-usb-uhci\"); } if (pci_enabled && acpi_enabled) { i2c_bus *smbus; smi_irq = qemu_allocate_irqs(pc_acpi_smi_interrupt, x86_env_get_cpu(first_cpu), 1); /* TODO: Populate SPD eeprom data. */ smbus = piix4_pm_init(pci_bus, piix3_devfn + 3, 0xb100, gsi[9], *smi_irq, kvm_enabled(), fw_cfg); smbus_eeprom_init(smbus, 8, NULL, 0); } if (pci_enabled) { pc_pci_device_init(pci_bus); } if (has_pvpanic) { pvpanic_init(isa_bus); } }"} {"target": 0, "idx": 12317, "func": "static void test_visitor_out_alternate(TestOutputVisitorData *data, const void *unused) { QObject *arg; UserDefAlternate *tmp; QDict *qdict; tmp = g_new0(UserDefAlternate, 1); tmp->type = QTYPE_QINT; tmp->u.i = 42; visit_type_UserDefAlternate(data->ov, NULL, &tmp, &error_abort); arg = visitor_get(data); g_assert(qobject_type(arg) == QTYPE_QINT); g_assert_cmpint(qint_get_int(qobject_to_qint(arg)), ==, 42); qapi_free_UserDefAlternate(tmp); visitor_reset(data); tmp = g_new0(UserDefAlternate, 1); tmp->type = QTYPE_QSTRING; tmp->u.s = g_strdup(\"hello\"); visit_type_UserDefAlternate(data->ov, NULL, &tmp, &error_abort); arg = visitor_get(data); g_assert(qobject_type(arg) == QTYPE_QSTRING); g_assert_cmpstr(qstring_get_str(qobject_to_qstring(arg)), ==, \"hello\"); qapi_free_UserDefAlternate(tmp); visitor_reset(data); tmp = g_new0(UserDefAlternate, 1); tmp->type = QTYPE_QDICT; tmp->u.udfu.integer = 1; tmp->u.udfu.string = g_strdup(\"str\"); tmp->u.udfu.enum1 = ENUM_ONE_VALUE1; tmp->u.udfu.u.value1.boolean = true; visit_type_UserDefAlternate(data->ov, NULL, &tmp, &error_abort); arg = visitor_get(data); g_assert_cmpint(qobject_type(arg), ==, QTYPE_QDICT); qdict = qobject_to_qdict(arg); g_assert_cmpint(qdict_size(qdict), ==, 4); g_assert_cmpint(qdict_get_int(qdict, \"integer\"), ==, 1); g_assert_cmpstr(qdict_get_str(qdict, \"string\"), ==, \"str\"); g_assert_cmpstr(qdict_get_str(qdict, \"enum1\"), ==, \"value1\"); g_assert_cmpint(qdict_get_bool(qdict, \"boolean\"), ==, true); qapi_free_UserDefAlternate(tmp); }"} {"target": 0, "idx": 12322, "func": "static int qcow2_discard_refcount_block(BlockDriverState *bs, uint64_t discard_block_offs) { BDRVQcow2State *s = bs->opaque; uint64_t refblock_offs = get_refblock_offset(s, discard_block_offs); uint64_t cluster_index = discard_block_offs >> s->cluster_bits; uint32_t block_index = cluster_index & (s->refcount_block_size - 1); void *refblock; int ret; assert(discard_block_offs != 0); ret = qcow2_cache_get(bs, s->refcount_block_cache, refblock_offs, &refblock); if (ret < 0) { return ret; } if (s->get_refcount(refblock, block_index) != 1) { qcow2_signal_corruption(bs, true, -1, -1, \"Invalid refcount:\" \" refblock offset %#\" PRIx64 \", reftable index %u\" \", block offset %#\" PRIx64 \", refcount %#\" PRIx64, refblock_offs, offset_to_reftable_index(s, discard_block_offs), discard_block_offs, s->get_refcount(refblock, block_index)); qcow2_cache_put(bs, s->refcount_block_cache, &refblock); return -EINVAL; } s->set_refcount(refblock, block_index, 0); qcow2_cache_entry_mark_dirty(bs, s->refcount_block_cache, refblock); qcow2_cache_put(bs, s->refcount_block_cache, &refblock); if (cluster_index < s->free_cluster_index) { s->free_cluster_index = cluster_index; } refblock = qcow2_cache_is_table_offset(bs, s->refcount_block_cache, discard_block_offs); if (refblock) { /* discard refblock from the cache if refblock is cached */ qcow2_cache_discard(bs, s->refcount_block_cache, refblock); } update_refcount_discard(bs, discard_block_offs, s->cluster_size); return 0; }"} {"target": 0, "idx": 12325, "func": "static void test_qemu_strtoul_full_correct(void) { const char *str = \"123\"; unsigned long res = 999; int err; err = qemu_strtoul(str, NULL, 0, &res); g_assert_cmpint(err, ==, 0); g_assert_cmpint(res, ==, 123); }"} {"target": 0, "idx": 12351, "func": "int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt, const char *filename, void *logctx, unsigned int offset, unsigned int max_probe_size) { AVProbeData pd = { filename ? filename : \"\" }; uint8_t *buf = NULL; uint8_t *mime_type; int ret = 0, probe_size, buf_offset = 0; int score = 0; if (!max_probe_size) max_probe_size = PROBE_BUF_MAX; else if (max_probe_size < PROBE_BUF_MIN) { av_log(logctx, AV_LOG_ERROR, \"Specified probe size value %u cannot be < %u\\n\", max_probe_size, PROBE_BUF_MIN); return AVERROR(EINVAL); } if (offset >= max_probe_size) return AVERROR(EINVAL); #ifdef FF_API_PROBE_MIME if (pb->av_class) av_opt_get(pb, \"mime_type\", AV_OPT_SEARCH_CHILDREN, &pd.mime_type); #endif #if !FF_API_PROBE_MIME if (!*fmt && pb->av_class && av_opt_get(pb, \"mime_type\", AV_OPT_SEARCH_CHILDREN, &mime_type) >= 0 && mime_type) { if (!av_strcasecmp(mime_type, \"audio/aacp\")) { *fmt = av_find_input_format(\"aac\"); } av_freep(&mime_type); } #endif for (probe_size = PROBE_BUF_MIN; probe_size <= max_probe_size && !*fmt; probe_size = FFMIN(probe_size << 1, FFMAX(max_probe_size, probe_size + 1))) { score = probe_size < max_probe_size ? AVPROBE_SCORE_RETRY : 0; /* Read probe data. */ if ((ret = av_reallocp(&buf, probe_size + AVPROBE_PADDING_SIZE)) < 0) goto fail; if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) { /* Fail if error was not end of file, otherwise, lower score. */ if (ret != AVERROR_EOF) goto fail; score = 0; ret = 0; /* error was end of file, nothing read */ } buf_offset += ret; if (buf_offset < offset) continue; pd.buf_size = buf_offset - offset; pd.buf = &buf[offset]; memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE); /* Guess file format. */ *fmt = av_probe_input_format2(&pd, 1, &score); if (*fmt) { /* This can only be true in the last iteration. */ if (score <= AVPROBE_SCORE_RETRY) { av_log(logctx, AV_LOG_WARNING, \"Format %s detected only with low score of %d, \" \"misdetection possible!\\n\", (*fmt)->name, score); } else av_log(logctx, AV_LOG_DEBUG, \"Format %s probed with size=%d and score=%d\\n\", (*fmt)->name, probe_size, score); #if 0 FILE *f = fopen(\"probestat.tmp\", \"ab\"); fprintf(f, \"probe_size:%d format:%s score:%d filename:%s\\n\", probe_size, (*fmt)->name, score, filename); fclose(f); #endif } } if (!*fmt) ret = AVERROR_INVALIDDATA; fail: /* Rewind. Reuse probe buffer to avoid seeking. */ if (ret >= 0) ret = ffio_rewind_with_probe_data(pb, &buf, buf_offset); #ifdef FF_API_PROBE_MIME av_free(pd.mime_type); #endif return ret < 0 ? ret : score; }"} {"target": 0, "idx": 12364, "func": "static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, const AVFrame *reference) { int i, mb_x, mb_y; uint8_t *data[MAX_COMPONENTS]; const uint8_t *reference_data[MAX_COMPONENTS]; int linesize[MAX_COMPONENTS]; GetBitContext mb_bitmask_gb; if (mb_bitmask) init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height); if (s->flipped && s->avctx->flags & CODEC_FLAG_EMU_EDGE) { av_log(s->avctx, AV_LOG_ERROR, \"Can not flip image with CODEC_FLAG_EMU_EDGE set!\\n\"); s->flipped = 0; } if (s->flipped && s->avctx->lowres) { av_log(s->avctx, AV_LOG_ERROR, \"Can not flip image with lowres\\n\"); s->flipped = 0; } for (i = 0; i < nb_components; i++) { int c = s->comp_index[i]; data[c] = s->picture_ptr->data[c]; reference_data[c] = reference ? reference->data[c] : NULL; linesize[c] = s->linesize[c]; s->coefs_finished[c] |= 1; if (s->flipped) { // picture should be flipped upside-down for this codec int offset = (linesize[c] * (s->v_scount[i] * (8 * s->mb_height - ((s->height / s->v_max) & 7)) - 1)); data[c] += offset; reference_data[c] += offset; linesize[c] *= -1; } } for (mb_y = 0; mb_y < s->mb_height; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb); if (s->restart_interval && !s->restart_count) s->restart_count = s->restart_interval; if (get_bits_left(&s->gb) < 0) { av_log(s->avctx, AV_LOG_ERROR, \"overread %d\\n\", -get_bits_left(&s->gb)); return -1; } for (i = 0; i < nb_components; i++) { uint8_t *ptr; int n, h, v, x, y, c, j; int block_offset; n = s->nb_blocks[i]; c = s->comp_index[i]; h = s->h_scount[i]; v = s->v_scount[i]; x = 0; y = 0; for (j = 0; j < n; j++) { block_offset = ((linesize[c] * (v * mb_y + y) * 8) + (h * mb_x + x) * 8); if (s->interlaced && s->bottom_field) block_offset += linesize[c] >> 1; ptr = data[c] + block_offset; if (!s->progressive) { if (copy_mb) copy_block8(ptr, reference_data[c] + block_offset, linesize[c], linesize[c], 8); else { s->dsp.clear_block(s->block); if (decode_block(s, s->block, i, s->dc_index[i], s->ac_index[i], s->quant_matrixes[s->quant_index[c]]) < 0) { av_log(s->avctx, AV_LOG_ERROR, \"error y=%d x=%d\\n\", mb_y, mb_x); return -1; } s->dsp.idct_put(ptr, linesize[c], s->block); } } else { int block_idx = s->block_stride[c] * (v * mb_y + y) + (h * mb_x + x); DCTELEM *block = s->blocks[c][block_idx]; if (Ah) block[0] += get_bits1(&s->gb) * s->quant_matrixes[s->quant_index[c]][0] << Al; else if (decode_dc_progressive(s, block, i, s->dc_index[i], s->quant_matrixes[s->quant_index[c]], Al) < 0) { av_log(s->avctx, AV_LOG_ERROR, \"error y=%d x=%d\\n\", mb_y, mb_x); return -1; } } // av_log(s->avctx, AV_LOG_DEBUG, \"mb: %d %d processed\\n\", // mb_y, mb_x); // av_log(NULL, AV_LOG_DEBUG, \"%d %d %d %d %d %d %d %d \\n\", // mb_x, mb_y, x, y, c, s->bottom_field, // (v * mb_y + y) * 8, (h * mb_x + x) * 8); if (++x == h) { x = 0; y++; } } } if (s->restart_interval) { s->restart_count--; if(s->restart_count == 0 && s->avctx->codec_id == CODEC_ID_THP){ align_get_bits(&s->gb); for (i = 0; i < nb_components; i++) /* reset dc */ s->last_dc[i] = 1024; } i = 8 + ((-get_bits_count(&s->gb)) & 7); /* skip RSTn */ if (show_bits(&s->gb, i) == (1 << i) - 1) { int pos = get_bits_count(&s->gb); align_get_bits(&s->gb); while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF) skip_bits(&s->gb, 8); if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) { for (i = 0; i < nb_components; i++) /* reset dc */ s->last_dc[i] = 1024; } else skip_bits_long(&s->gb, pos - get_bits_count(&s->gb)); } } } } return 0; }"} {"target": 0, "idx": 12373, "func": "static void arm_cpu_register_types(void) { int i; type_register_static(&arm_cpu_type_info); for (i = 0; i < ARRAY_SIZE(arm_cpus); i++) { cpu_register(&arm_cpus[i]); } }"} {"target": 0, "idx": 12375, "func": "static void gpio_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { struct gpio_state_t *s = opaque; D(printf(\"%s %x=%x\\n\", __func__, addr, (unsigned)value)); addr >>= 2; switch (addr) { case RW_PA_DOUT: /* Decode nand pins. */ s->nand->ale = !!(value & (1 << 6)); s->nand->cle = !!(value & (1 << 5)); s->nand->ce = !!(value & (1 << 4)); s->regs[addr] = value; break; case RW_PD_DOUT: /* Temp sensor clk. */ if ((s->regs[addr] ^ value) & 2) tempsensor_clkedge(&s->tempsensor, !!(value & 2), !!(value & 16)); s->regs[addr] = value; break; default: s->regs[addr] = value; break; } }"} {"target": 0, "idx": 12379, "func": "static ssize_t eth_rx(NetClientState *nc, const uint8_t *buf, size_t size) { XilinxAXIEnet *s = qemu_get_nic_opaque(nc); static const unsigned char sa_bcast[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; static const unsigned char sa_ipmcast[3] = {0x01, 0x00, 0x52}; uint32_t app[6] = {0}; int promisc = s->fmi & (1 << 31); int unicast, broadcast, multicast, ip_multicast = 0; uint32_t csum32; uint16_t csum16; int i; DENET(qemu_log(\"%s: %zd bytes\\n\", __func__, size)); unicast = ~buf[0] & 0x1; broadcast = memcmp(buf, sa_bcast, 6) == 0; multicast = !unicast && !broadcast; if (multicast && (memcmp(sa_ipmcast, buf, sizeof sa_ipmcast) == 0)) { ip_multicast = 1; } /* Jumbo or vlan sizes ? */ if (!(s->rcw[1] & RCW1_JUM)) { if (size > 1518 && size <= 1522 && !(s->rcw[1] & RCW1_VLAN)) { return size; } } /* Basic Address filters. If you want to use the extended filters you'll generally have to place the ethernet mac into promiscuous mode to avoid the basic filtering from dropping most frames. */ if (!promisc) { if (unicast) { if (!enet_match_addr(buf, s->uaw[0], s->uaw[1])) { return size; } } else { if (broadcast) { /* Broadcast. */ if (s->regs[R_RAF] & RAF_BCAST_REJ) { return size; } } else { int drop = 1; /* Multicast. */ if (s->regs[R_RAF] & RAF_MCAST_REJ) { return size; } for (i = 0; i < 4; i++) { if (enet_match_addr(buf, s->maddr[i][0], s->maddr[i][1])) { drop = 0; break; } } if (drop) { return size; } } } } /* Extended mcast filtering enabled? */ if (axienet_newfunc_enabled(s) && axienet_extmcf_enabled(s)) { if (unicast) { if (!enet_match_addr(buf, s->ext_uaw[0], s->ext_uaw[1])) { return size; } } else { if (broadcast) { /* Broadcast. ??? */ if (s->regs[R_RAF] & RAF_BCAST_REJ) { return size; } } else { int idx, bit; /* Multicast. */ if (!memcmp(buf, sa_ipmcast, 3)) { return size; } idx = (buf[4] & 0x7f) << 8; idx |= buf[5]; bit = 1 << (idx & 0x1f); idx >>= 5; if (!(s->ext_mtable[idx] & bit)) { return size; } } } } if (size < 12) { s->regs[R_IS] |= IS_RX_REJECT; enet_update_irq(s); return -1; } if (size > (s->c_rxmem - 4)) { size = s->c_rxmem - 4; } memcpy(s->rxmem, buf, size); memset(s->rxmem + size, 0, 4); /* Clear the FCS. */ if (s->rcw[1] & RCW1_FCS) { size += 4; /* fcs is inband. */ } app[0] = 5 << 28; csum32 = net_checksum_add(size - 14, (uint8_t *)s->rxmem + 14); /* Fold it once. */ csum32 = (csum32 & 0xffff) + (csum32 >> 16); /* And twice to get rid of possible carries. */ csum16 = (csum32 & 0xffff) + (csum32 >> 16); app[3] = csum16; app[4] = size & 0xffff; s->stats.rx_bytes += size; s->stats.rx++; if (multicast) { s->stats.rx_mcast++; app[2] |= 1 | (ip_multicast << 1); } else if (broadcast) { s->stats.rx_bcast++; app[2] |= 1 << 3; } /* Good frame. */ app[2] |= 1 << 6; s->rxsize = size; s->rxpos = 0; s->rxapp = g_memdup(app, sizeof(app)); axienet_eth_rx_notify(s); enet_update_irq(s); return size; }"} {"target": 0, "idx": 12386, "func": "static int get_physical_address_code(CPUState *env, target_phys_addr_t *physical, int *prot, target_ulong address, int mmu_idx) { unsigned int i; uint64_t context; int is_user = (mmu_idx == MMU_USER_IDX || mmu_idx == MMU_USER_SECONDARY_IDX); if ((env->lsu & IMMU_E) == 0 || (env->pstate & PS_RED) != 0) { /* IMMU disabled */ *physical = ultrasparc_truncate_physical(address); *prot = PAGE_EXEC; return 0; } if (env->tl == 0) { /* PRIMARY context */ context = env->dmmu.mmu_primary_context & 0x1fff; } else { /* NUCLEUS context */ context = 0; } for (i = 0; i < 64; i++) { // ctx match, vaddr match, valid? if (ultrasparc_tag_match(&env->itlb[i], address, context, physical)) { // access ok? if ((env->itlb[i].tte & 0x4) && is_user) { if (env->immu.sfsr) /* Fault status register */ env->immu.sfsr = 2; /* overflow (not read before another fault) */ env->immu.sfsr |= (is_user << 3) | 1; env->exception_index = TT_TFAULT; #ifdef DEBUG_MMU printf(\"TFAULT at 0x%\" PRIx64 \"\\n\", address); #endif return 1; } *prot = PAGE_EXEC; TTE_SET_USED(env->itlb[i].tte); return 0; } } #ifdef DEBUG_MMU printf(\"TMISS at 0x%\" PRIx64 \"\\n\", address); #endif /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */ env->immu.tag_access = (address & ~0x1fffULL) | context; env->exception_index = TT_TMISS; return 1; }"} {"target": 0, "idx": 12396, "func": "static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, uint8_t *dest, long dstW, long dstY) { long dummy=0; switch(c->dstFormat) { #ifdef HAVE_MMX case IMGFMT_BGR32: { asm volatile( YSCALEYUV2RGBX WRITEBGR32(%4, %5, %%REGa) :: \"r\" (&c->redDither), \"m\" (dummy), \"m\" (dummy), \"m\" (dummy), \"r\" (dest), \"m\" (dstW) : \"%\"REG_a, \"%\"REG_d, \"%\"REG_S ); } break; case IMGFMT_BGR24: { asm volatile( YSCALEYUV2RGBX \"lea (%%\"REG_a\", %%\"REG_a\", 2), %%\"REG_b\"\\n\\t\" //FIXME optimize \"add %4, %%\"REG_b\" \\n\\t\" WRITEBGR24(%%REGb, %5, %%REGa) :: \"r\" (&c->redDither), \"m\" (dummy), \"m\" (dummy), \"m\" (dummy), \"r\" (dest), \"m\" (dstW) : \"%\"REG_a, \"%\"REG_b, \"%\"REG_d, \"%\"REG_S //FIXME ebx ); } break; case IMGFMT_BGR15: { asm volatile( YSCALEYUV2RGBX /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ #ifdef DITHER1XBPP \"paddusb \"MANGLE(b5Dither)\", %%mm2\\n\\t\" \"paddusb \"MANGLE(g5Dither)\", %%mm4\\n\\t\" \"paddusb \"MANGLE(r5Dither)\", %%mm5\\n\\t\" #endif WRITEBGR15(%4, %5, %%REGa) :: \"r\" (&c->redDither), \"m\" (dummy), \"m\" (dummy), \"m\" (dummy), \"r\" (dest), \"m\" (dstW) : \"%\"REG_a, \"%\"REG_d, \"%\"REG_S ); } break; case IMGFMT_BGR16: { asm volatile( YSCALEYUV2RGBX /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ #ifdef DITHER1XBPP \"paddusb \"MANGLE(b5Dither)\", %%mm2\\n\\t\" \"paddusb \"MANGLE(g6Dither)\", %%mm4\\n\\t\" \"paddusb \"MANGLE(r5Dither)\", %%mm5\\n\\t\" #endif WRITEBGR16(%4, %5, %%REGa) :: \"r\" (&c->redDither), \"m\" (dummy), \"m\" (dummy), \"m\" (dummy), \"r\" (dest), \"m\" (dstW) : \"%\"REG_a, \"%\"REG_d, \"%\"REG_S ); } break; case IMGFMT_YUY2: { asm volatile( YSCALEYUV2PACKEDX /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ \"psraw $3, %%mm3 \\n\\t\" \"psraw $3, %%mm4 \\n\\t\" \"psraw $3, %%mm1 \\n\\t\" \"psraw $3, %%mm7 \\n\\t\" WRITEYUY2(%4, %5, %%REGa) :: \"r\" (&c->redDither), \"m\" (dummy), \"m\" (dummy), \"m\" (dummy), \"r\" (dest), \"m\" (dstW) : \"%\"REG_a, \"%\"REG_d, \"%\"REG_S ); } break; #endif default: #ifdef HAVE_ALTIVEC /* The following list of supported dstFormat values should match what's found in the body of altivec_yuv2packedX() */ if(c->dstFormat==IMGFMT_ABGR || c->dstFormat==IMGFMT_BGRA || c->dstFormat==IMGFMT_BGR24 || c->dstFormat==IMGFMT_RGB24 || c->dstFormat==IMGFMT_RGBA || c->dstFormat==IMGFMT_ARGB) altivec_yuv2packedX (c, lumFilter, lumSrc, lumFilterSize, chrFilter, chrSrc, chrFilterSize, dest, dstW, dstY); else #endif yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize, chrFilter, chrSrc, chrFilterSize, dest, dstW, dstY); break; } }"} {"target": 0, "idx": 12397, "func": "static inline void RENAME(hyscale_fast)(SwsContext *c, int16_t *dst, int dstWidth, const uint8_t *src, int srcW, int xInc) { int32_t *filterPos = c->hLumFilterPos; int16_t *filter = c->hLumFilter; void *mmx2FilterCode= c->lumMmx2FilterCode; int i; #if defined(PIC) DECLARE_ALIGNED(8, uint64_t, ebxsave); #endif __asm__ volatile( #if defined(PIC) \"mov %%\"REG_b\", %5 \\n\\t\" #endif \"pxor %%mm7, %%mm7 \\n\\t\" \"mov %0, %%\"REG_c\" \\n\\t\" \"mov %1, %%\"REG_D\" \\n\\t\" \"mov %2, %%\"REG_d\" \\n\\t\" \"mov %3, %%\"REG_b\" \\n\\t\" \"xor %%\"REG_a\", %%\"REG_a\" \\n\\t\" // i PREFETCH\" (%%\"REG_c\") \\n\\t\" PREFETCH\" 32(%%\"REG_c\") \\n\\t\" PREFETCH\" 64(%%\"REG_c\") \\n\\t\" #if ARCH_X86_64 #define CALL_MMX2_FILTER_CODE \\ \"movl (%%\"REG_b\"), %%esi \\n\\t\"\\ \"call *%4 \\n\\t\"\\ \"movl (%%\"REG_b\", %%\"REG_a\"), %%esi \\n\\t\"\\ \"add %%\"REG_S\", %%\"REG_c\" \\n\\t\"\\ \"add %%\"REG_a\", %%\"REG_D\" \\n\\t\"\\ \"xor %%\"REG_a\", %%\"REG_a\" \\n\\t\"\\ #else #define CALL_MMX2_FILTER_CODE \\ \"movl (%%\"REG_b\"), %%esi \\n\\t\"\\ \"call *%4 \\n\\t\"\\ \"addl (%%\"REG_b\", %%\"REG_a\"), %%\"REG_c\" \\n\\t\"\\ \"add %%\"REG_a\", %%\"REG_D\" \\n\\t\"\\ \"xor %%\"REG_a\", %%\"REG_a\" \\n\\t\"\\ #endif /* ARCH_X86_64 */ CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE #if defined(PIC) \"mov %5, %%\"REG_b\" \\n\\t\" #endif :: \"m\" (src), \"m\" (dst), \"m\" (filter), \"m\" (filterPos), \"m\" (mmx2FilterCode) #if defined(PIC) ,\"m\" (ebxsave) #endif : \"%\"REG_a, \"%\"REG_c, \"%\"REG_d, \"%\"REG_S, \"%\"REG_D #if !defined(PIC) ,\"%\"REG_b #endif ); for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128; }"} {"target": 0, "idx": 12399, "func": "int avfilter_parse_graph(AVFilterGraph *graph, const char *filters, AVFilterInOut *openLinks, AVClass *log_ctx) { int index = 0; char chr = 0; int pad = 0; AVFilterInOut *currInputs = NULL; do { AVFilterContext *filter; filters += consume_whitespace(filters); pad = parse_inputs(&filters, &currInputs, &openLinks, log_ctx); if(pad < 0) goto fail; if(!(filter = parse_filter(&filters, graph, index, log_ctx))) goto fail; if(filter->input_count == 1 && !currInputs && !index) { // First input can be ommitted if it is \"[in]\" const char *tmp = \"[in]\"; pad = parse_inputs(&tmp, &currInputs, &openLinks, log_ctx); if(pad < 0) goto fail; } if(link_filter_inouts(filter, &currInputs, &openLinks, log_ctx) < 0) goto fail; pad = parse_outputs(&filters, &currInputs, &openLinks, log_ctx); if(pad < 0) goto fail; filters += consume_whitespace(filters); chr = *filters++; if(chr == ';' && currInputs) { av_log(log_ctx, AV_LOG_ERROR, \"Could not find a output to link when parsing \\\"%s\\\"\\n\", filters - 1); goto fail; } index++; } while(chr == ',' || chr == ';'); if(openLinks && !strcmp(openLinks->name, \"out\") && currInputs) { // Last output can be ommitted if it is \"[out]\" const char *tmp = \"[out]\"; if(parse_outputs(&tmp, &currInputs, &openLinks, log_ctx) < 0) goto fail; } return 0; fail: avfilter_destroy_graph(graph); free_inout(openLinks); free_inout(currInputs); return -1; }"} {"target": 0, "idx": 12418, "func": "static av_cold int qtrle_encode_init(AVCodecContext *avctx) { QtrleEncContext *s = avctx->priv_data; if (av_image_check_size(avctx->width, avctx->height, 0, avctx) < 0) { return -1; } s->avctx=avctx; switch (avctx->pix_fmt) { case AV_PIX_FMT_RGB555BE: s->pixel_size = 2; break; case AV_PIX_FMT_RGB24: s->pixel_size = 3; break; case AV_PIX_FMT_ARGB: s->pixel_size = 4; break; default: av_log(avctx, AV_LOG_ERROR, \"Unsupported colorspace.\\n\"); break; } avctx->bits_per_coded_sample = s->pixel_size*8; s->rlecode_table = av_mallocz(s->avctx->width); s->skip_table = av_mallocz(s->avctx->width); s->length_table = av_mallocz((s->avctx->width + 1)*sizeof(int)); if (!s->skip_table || !s->length_table || !s->rlecode_table) { av_log(avctx, AV_LOG_ERROR, \"Error allocating memory.\\n\"); return -1; } if (avpicture_alloc(&s->previous_frame, avctx->pix_fmt, avctx->width, avctx->height) < 0) { av_log(avctx, AV_LOG_ERROR, \"Error allocating picture\\n\"); return -1; } s->max_buf_size = s->avctx->width*s->avctx->height*s->pixel_size*2 /* image base material */ + 15 /* header + footer */ + s->avctx->height*2 /* skip code+rle end */ + s->avctx->width/MAX_RLE_BULK + 1 /* rle codes */; avctx->coded_frame = av_frame_alloc(); if (!avctx->coded_frame) { qtrle_encode_end(avctx); return AVERROR(ENOMEM); } return 0; }"} {"target": 0, "idx": 12419, "func": "static inline void check_for_slice(AVSContext *h) { GetBitContext *gb = &h->s.gb; int align; if(h->mbx) return; align = (-get_bits_count(gb)) & 7; if((show_bits_long(gb,24+align) & 0xFFFFFF) == 0x000001) { skip_bits_long(gb,24+align); h->stc = get_bits(gb,8); decode_slice_header(h,gb); } }"} {"target": 1, "idx": 12426, "func": "ISADevice *isa_create_simple(const char *name) { ISADevice *dev; dev = isa_create(name); if (qdev_init(&dev->qdev) != 0) { return NULL; } return dev; }"} {"target": 0, "idx": 12440, "func": "envlist_create(void) { envlist_t *envlist; if ((envlist = malloc(sizeof (*envlist))) == NULL) return (NULL); LIST_INIT(&envlist->el_entries); envlist->el_count = 0; return (envlist); }"} {"target": 0, "idx": 12446, "func": "void qemu_fopen_ops_buffered(MigrationState *migration_state) { QEMUFileBuffered *s; s = g_malloc0(sizeof(*s)); s->migration_state = migration_state; s->xfer_limit = migration_state->bandwidth_limit / 10; s->migration_state->complete = false; s->file = qemu_fopen_ops(s, &buffered_file_ops); migration_state->file = s->file; qemu_thread_create(&s->thread, buffered_file_thread, s, QEMU_THREAD_DETACHED); }"} {"target": 0, "idx": 12485, "func": "static int nbd_negotiate_handle_export_name(NBDClient *client, uint32_t length) { int rc = -EINVAL; char name[256]; /* Client sends: [20 .. xx] export name (length bytes) */ TRACE(\"Checking length\"); if (length > 255) { LOG(\"Bad length received\"); goto fail; } if (nbd_negotiate_read(client->ioc, name, length) != length) { LOG(\"read failed\"); goto fail; } name[length] = '\\0'; TRACE(\"Client requested export '%s'\", name); client->exp = nbd_export_find(name); if (!client->exp) { LOG(\"export not found\"); goto fail; } QTAILQ_INSERT_TAIL(&client->exp->clients, client, next); nbd_export_get(client->exp); rc = 0; fail: return rc; }"} {"target": 1, "idx": 12518, "func": "void av_bitstream_filter_close(AVBitStreamFilterContext *bsfc){ if(bsfc->filter->close) bsfc->filter->close(bsfc); av_freep(&bsfc->priv_data); av_parser_close(bsfc->parser); av_free(bsfc); }"} {"target": 1, "idx": 12520, "func": "static void mov_text_cleanup_ftab(MovTextContext *m) { int i; for(i = 0; i < m->count_f; i++) { av_freep(&m->ftab[i]->font); av_freep(&m->ftab[i]); } av_freep(&m->ftab); }"} {"target": 1, "idx": 12522, "func": "static inline bool extended_addresses_enabled(CPUARMState *env) { return arm_feature(env, ARM_FEATURE_LPAE) && (env->cp15.c2_control & (1 << 31)); }"} {"target": 0, "idx": 12542, "func": "static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type) { const int mb_xy = sl->mb_xy; int top_xy, left_xy[LEFT_MBS]; int top_type, left_type[LEFT_MBS]; uint8_t *nnz; uint8_t *nnz_cache; top_xy = mb_xy - (h->mb_stride << MB_FIELD(sl)); /* Wow, what a mess, why didn't they simplify the interlacing & intra * stuff, I can't imagine that these complex rules are worth it. */ left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1; if (FRAME_MBAFF(h)) { const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]); const int curr_mb_field_flag = IS_INTERLACED(mb_type); if (sl->mb_y & 1) { if (left_mb_field_flag != curr_mb_field_flag) left_xy[LTOP] -= h->mb_stride; } else { if (curr_mb_field_flag) top_xy += h->mb_stride & (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1); if (left_mb_field_flag != curr_mb_field_flag) left_xy[LBOT] += h->mb_stride; } } sl->top_mb_xy = top_xy; sl->left_mb_xy[LTOP] = left_xy[LTOP]; sl->left_mb_xy[LBOT] = left_xy[LBOT]; { /* For sufficiently low qp, filtering wouldn't do anything. * This is a conservative estimate: could also check beta_offset * and more accurate chroma_qp. */ int qp_thresh = sl->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice int qp = h->cur_pic.qscale_table[mb_xy]; if (qp <= qp_thresh && (left_xy[LTOP] < 0 || ((qp + h->cur_pic.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) && (top_xy < 0 || ((qp + h->cur_pic.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) { if (!FRAME_MBAFF(h)) return 1; if ((left_xy[LTOP] < 0 || ((qp + h->cur_pic.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) && (top_xy < h->mb_stride || ((qp + h->cur_pic.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh)) return 1; } } top_type = h->cur_pic.mb_type[top_xy]; left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]]; left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]]; if (sl->deblocking_filter == 2) { if (h->slice_table[top_xy] != sl->slice_num) top_type = 0; if (h->slice_table[left_xy[LBOT]] != sl->slice_num) left_type[LTOP] = left_type[LBOT] = 0; } else { if (h->slice_table[top_xy] == 0xFFFF) top_type = 0; if (h->slice_table[left_xy[LBOT]] == 0xFFFF) left_type[LTOP] = left_type[LBOT] = 0; } sl->top_type = top_type; sl->left_type[LTOP] = left_type[LTOP]; sl->left_type[LBOT] = left_type[LBOT]; if (IS_INTRA(mb_type)) return 0; fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy, top_type, left_type, mb_xy, 0); if (sl->list_count == 2) fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy, top_type, left_type, mb_xy, 1); nnz = h->non_zero_count[mb_xy]; nnz_cache = sl->non_zero_count_cache; AV_COPY32(&nnz_cache[4 + 8 * 1], &nnz[0]); AV_COPY32(&nnz_cache[4 + 8 * 2], &nnz[4]); AV_COPY32(&nnz_cache[4 + 8 * 3], &nnz[8]); AV_COPY32(&nnz_cache[4 + 8 * 4], &nnz[12]); sl->cbp = h->cbp_table[mb_xy]; if (top_type) { nnz = h->non_zero_count[top_xy]; AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[3 * 4]); } if (left_type[LTOP]) { nnz = h->non_zero_count[left_xy[LTOP]]; nnz_cache[3 + 8 * 1] = nnz[3 + 0 * 4]; nnz_cache[3 + 8 * 2] = nnz[3 + 1 * 4]; nnz_cache[3 + 8 * 3] = nnz[3 + 2 * 4]; nnz_cache[3 + 8 * 4] = nnz[3 + 3 * 4]; } /* CAVLC 8x8dct requires NNZ values for residual decoding that differ * from what the loop filter needs */ if (!CABAC(h) && h->pps.transform_8x8_mode) { if (IS_8x8DCT(top_type)) { nnz_cache[4 + 8 * 0] = nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12; nnz_cache[6 + 8 * 0] = nnz_cache[7 + 8 * 0] = (h->cbp_table[top_xy] & 0x8000) >> 12; } if (IS_8x8DCT(left_type[LTOP])) { nnz_cache[3 + 8 * 1] = nnz_cache[3 + 8 * 2] = (h->cbp_table[left_xy[LTOP]] & 0x2000) >> 12; // FIXME check MBAFF } if (IS_8x8DCT(left_type[LBOT])) { nnz_cache[3 + 8 * 3] = nnz_cache[3 + 8 * 4] = (h->cbp_table[left_xy[LBOT]] & 0x8000) >> 12; // FIXME check MBAFF } if (IS_8x8DCT(mb_type)) { nnz_cache[scan8[0]] = nnz_cache[scan8[1]] = nnz_cache[scan8[2]] = nnz_cache[scan8[3]] = (sl->cbp & 0x1000) >> 12; nnz_cache[scan8[0 + 4]] = nnz_cache[scan8[1 + 4]] = nnz_cache[scan8[2 + 4]] = nnz_cache[scan8[3 + 4]] = (sl->cbp & 0x2000) >> 12; nnz_cache[scan8[0 + 8]] = nnz_cache[scan8[1 + 8]] = nnz_cache[scan8[2 + 8]] = nnz_cache[scan8[3 + 8]] = (sl->cbp & 0x4000) >> 12; nnz_cache[scan8[0 + 12]] = nnz_cache[scan8[1 + 12]] = nnz_cache[scan8[2 + 12]] = nnz_cache[scan8[3 + 12]] = (sl->cbp & 0x8000) >> 12; } } return 0; }"} {"target": 1, "idx": 12567, "func": "uint64_t migrate_max_downtime(void) { return max_downtime; }"} {"target": 1, "idx": 12579, "func": "static int h264_parse(AVCodecParserContext *s, AVCodecContext *avctx, const uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size) { H264ParseContext *p = s->priv_data; ParseContext *pc = &p->pc; int next; if (!p->got_first) { p->got_first = 1; if (avctx->extradata_size) { ff_h264_decode_extradata(avctx->extradata, avctx->extradata_size, &p->ps, &p->is_avc, &p->nal_length_size, avctx->err_recognition, avctx); } } if (s->flags & PARSER_FLAG_COMPLETE_FRAMES) { next = buf_size; } else { next = h264_find_frame_end(p, buf, buf_size, avctx); if (ff_combine_frame(pc, next, &buf, &buf_size) < 0) { *poutbuf = NULL; *poutbuf_size = 0; return buf_size; } if (next < 0 && next != END_NOT_FOUND) { av_assert1(pc->last_index + next >= 0); h264_find_frame_end(p, &pc->buffer[pc->last_index + next], -next, avctx); // update state } } parse_nal_units(s, avctx, buf, buf_size); if (avctx->framerate.num) avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1})); if (p->sei.picture_timing.cpb_removal_delay >= 0) { s->dts_sync_point = p->sei.buffering_period.present; s->dts_ref_dts_delta = p->sei.picture_timing.cpb_removal_delay; s->pts_dts_delta = p->sei.picture_timing.dpb_output_delay; } else { s->dts_sync_point = INT_MIN; s->dts_ref_dts_delta = INT_MIN; s->pts_dts_delta = INT_MIN; } if (s->flags & PARSER_FLAG_ONCE) { s->flags &= PARSER_FLAG_COMPLETE_FRAMES; } if (s->dts_sync_point >= 0) { int64_t den = avctx->time_base.den * avctx->pkt_timebase.num; if (den > 0) { int64_t num = avctx->time_base.num * avctx->pkt_timebase.den; if (s->dts != AV_NOPTS_VALUE) { // got DTS from the stream, update reference timestamp p->reference_dts = s->dts - av_rescale(s->dts_ref_dts_delta, num, den); } else if (p->reference_dts != AV_NOPTS_VALUE) { // compute DTS based on reference timestamp s->dts = p->reference_dts + av_rescale(s->dts_ref_dts_delta, num, den); } if (p->reference_dts != AV_NOPTS_VALUE && s->pts == AV_NOPTS_VALUE) s->pts = s->dts + av_rescale(s->pts_dts_delta, num, den); if (s->dts_sync_point > 0) p->reference_dts = s->dts; // new reference } } *poutbuf = buf; *poutbuf_size = buf_size; return next; }"} {"target": 1, "idx": 12614, "func": "static int flv_read_packet(AVFormatContext *s, AVPacket *pkt) { FLVContext *flv = s->priv_data; int ret, i, type, size, flags; int stream_type=-1; int64_t next, pos; int64_t dts, pts = AV_NOPTS_VALUE; AVStream *st = NULL; for(;;avio_skip(s->pb, 4)){ /* pkt size is repeated at end. skip it */ pos = avio_tell(s->pb); type = avio_r8(s->pb); size = avio_rb24(s->pb); dts = avio_rb24(s->pb); dts |= avio_r8(s->pb) << 24; av_dlog(s, \"type:%d, size:%d, dts:%\"PRId64\"\\n\", type, size, dts); if (url_feof(s->pb)) return AVERROR_EOF; avio_skip(s->pb, 3); /* stream id, always 0 */ flags = 0; if(size == 0) continue; next= size + avio_tell(s->pb); if (type == FLV_TAG_TYPE_AUDIO) { stream_type=FLV_STREAM_TYPE_AUDIO; flags = avio_r8(s->pb); size--; } else if (type == FLV_TAG_TYPE_VIDEO) { stream_type=FLV_STREAM_TYPE_VIDEO; flags = avio_r8(s->pb); size--; if ((flags & 0xf0) == 0x50) /* video info / command frame */ goto skip; } else if (type == FLV_TAG_TYPE_META) { if (size > 13+1+4 && dts == 0) { // Header-type metadata stuff flv_read_metabody(s, next); goto skip; } else if (dts != 0) { // Script-data \"special\" metadata frames - don't skip stream_type=FLV_STREAM_TYPE_DATA; } else { goto skip; } } else { av_log(s, AV_LOG_DEBUG, \"skipping flv packet: type %d, size %d, flags %d\\n\", type, size, flags); skip: avio_seek(s->pb, next, SEEK_SET); continue; } /* skip empty data packets */ if (!size) continue; /* now find stream */ for(i=0;inb_streams;i++) { st = s->streams[i]; if (st->id == stream_type) break; } if(i == s->nb_streams){ av_log(s, AV_LOG_WARNING, \"Stream discovered after head already parsed\\n\"); st= create_stream(s, stream_type); s->ctx_flags &= ~AVFMTCTX_NOHEADER; } av_dlog(s, \"%d %X %d \\n\", stream_type, flags, st->discard); if( (st->discard >= AVDISCARD_NONKEY && !((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY || (stream_type == FLV_STREAM_TYPE_AUDIO))) ||(st->discard >= AVDISCARD_BIDIR && ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_DISP_INTER && (stream_type == FLV_STREAM_TYPE_VIDEO))) || st->discard >= AVDISCARD_ALL ){ avio_seek(s->pb, next, SEEK_SET); continue; } if ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY) av_add_index_entry(st, pos, dts, size, 0, AVINDEX_KEYFRAME); break; } // if not streamed and no duration from metadata then seek to end to find the duration from the timestamps if(s->pb->seekable && (!s->duration || s->duration==AV_NOPTS_VALUE)){ int size; const int64_t pos= avio_tell(s->pb); const int64_t fsize= avio_size(s->pb); avio_seek(s->pb, fsize-4, SEEK_SET); size= avio_rb32(s->pb); avio_seek(s->pb, fsize-3-size, SEEK_SET); if(size == avio_rb24(s->pb) + 11){ uint32_t ts = avio_rb24(s->pb); ts |= avio_r8(s->pb) << 24; s->duration = ts * (int64_t)AV_TIME_BASE / 1000; } avio_seek(s->pb, pos, SEEK_SET); } if(stream_type == FLV_STREAM_TYPE_AUDIO){ if(!st->codec->channels || !st->codec->sample_rate || !st->codec->bits_per_coded_sample) { st->codec->channels = (flags & FLV_AUDIO_CHANNEL_MASK) == FLV_STEREO ? 2 : 1; st->codec->sample_rate = (44100 << ((flags & FLV_AUDIO_SAMPLERATE_MASK) >> FLV_AUDIO_SAMPLERATE_OFFSET) >> 3); st->codec->bits_per_coded_sample = (flags & FLV_AUDIO_SAMPLESIZE_MASK) ? 16 : 8; } if(!st->codec->codec_id){ flv_set_audio_codec(s, st, flags & FLV_AUDIO_CODECID_MASK); } } else if(stream_type == FLV_STREAM_TYPE_VIDEO) { size -= flv_set_video_codec(s, st, flags & FLV_VIDEO_CODECID_MASK); } if (st->codec->codec_id == CODEC_ID_AAC || st->codec->codec_id == CODEC_ID_H264 || st->codec->codec_id == CODEC_ID_MPEG4) { int type = avio_r8(s->pb); size--; if (st->codec->codec_id == CODEC_ID_H264 || st->codec->codec_id == CODEC_ID_MPEG4) { int32_t cts = (avio_rb24(s->pb)+0xff800000)^0xff800000; // sign extension pts = dts + cts; if (cts < 0) { // dts are wrong flv->wrong_dts = 1; av_log(s, AV_LOG_WARNING, \"negative cts, previous timestamps might be wrong\\n\"); } if (flv->wrong_dts) dts = AV_NOPTS_VALUE; } if (type == 0) { if ((ret = flv_get_extradata(s, st, size)) < 0) return ret; if (st->codec->codec_id == CODEC_ID_AAC) { MPEG4AudioConfig cfg; ff_mpeg4audio_get_config(&cfg, st->codec->extradata, st->codec->extradata_size); st->codec->channels = cfg.channels; if (cfg.ext_sample_rate) st->codec->sample_rate = cfg.ext_sample_rate; else st->codec->sample_rate = cfg.sample_rate; av_dlog(s, \"mp4a config channels %d sample rate %d\\n\", st->codec->channels, st->codec->sample_rate); } ret = AVERROR(EAGAIN); goto leave; } } /* skip empty data packets */ if (!size) { ret = AVERROR(EAGAIN); goto leave; } ret= av_get_packet(s->pb, pkt, size); if (ret < 0) { return AVERROR(EIO); } /* note: we need to modify the packet size here to handle the last packet */ pkt->size = ret; pkt->dts = dts; pkt->pts = pts == AV_NOPTS_VALUE ? dts : pts; pkt->stream_index = st->index; if ( stream_type == FLV_STREAM_TYPE_AUDIO || ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY) || stream_type == FLV_STREAM_TYPE_DATA) pkt->flags |= AV_PKT_FLAG_KEY; leave: avio_skip(s->pb, 4); return ret; }"} {"target": 0, "idx": 12628, "func": "static void fill_note(struct memelfnote *note, const char *name, int type, unsigned int sz, void *data) { unsigned int namesz; namesz = strlen(name) + 1; note->name = name; note->namesz = namesz; note->namesz_rounded = roundup(namesz, sizeof (int32_t)); note->type = type; note->datasz = roundup(sz, sizeof (int32_t));; note->data = data; /* * We calculate rounded up note size here as specified by * ELF document. */ note->notesz = sizeof (struct elf_note) + note->namesz_rounded + note->datasz; }"} {"target": 0, "idx": 12648, "func": "int bdrv_snapshot_goto(BlockDriverState *bs, const char *snapshot_id) { BlockDriver *drv = bs->drv; int ret, open_ret; if (!drv) return -ENOMEDIUM; if (drv->bdrv_snapshot_goto) return drv->bdrv_snapshot_goto(bs, snapshot_id); if (bs->file) { drv->bdrv_close(bs); ret = bdrv_snapshot_goto(bs->file, snapshot_id); open_ret = drv->bdrv_open(bs, NULL, bs->open_flags); if (open_ret < 0) { bdrv_delete(bs->file); bs->drv = NULL; return open_ret; } return ret; } return -ENOTSUP; }"} {"target": 0, "idx": 12651, "func": "static int MPA_encode_init(AVCodecContext *avctx) { MpegAudioContext *s = avctx->priv_data; int freq = avctx->sample_rate; int bitrate = avctx->bit_rate; int channels = avctx->channels; int i, v, table; float a; if (channels > 2) return -1; bitrate = bitrate / 1000; s->nb_channels = channels; s->freq = freq; s->bit_rate = bitrate * 1000; avctx->frame_size = MPA_FRAME_SIZE; /* encoding freq */ s->lsf = 0; for(i=0;i<3;i++) { if (mpa_freq_tab[i] == freq) break; if ((mpa_freq_tab[i] / 2) == freq) { s->lsf = 1; break; } } if (i == 3){ av_log(avctx, AV_LOG_ERROR, \"Sampling rate %d is not allowed in mp2\\n\", freq); return -1; } s->freq_index = i; /* encoding bitrate & frequency */ for(i=0;i<15;i++) { if (mpa_bitrate_tab[s->lsf][1][i] == bitrate) break; } if (i == 15){ av_log(avctx, AV_LOG_ERROR, \"bitrate %d is not allowed in mp2\\n\", bitrate); return -1; } s->bitrate_index = i; /* compute total header size & pad bit */ a = (float)(bitrate * 1000 * MPA_FRAME_SIZE) / (freq * 8.0); s->frame_size = ((int)a) * 8; /* frame fractional size to compute padding */ s->frame_frac = 0; s->frame_frac_incr = (int)((a - floor(a)) * 65536.0); /* select the right allocation table */ table = l2_select_table(bitrate, s->nb_channels, freq, s->lsf); /* number of used subbands */ s->sblimit = sblimit_table[table]; s->alloc_table = alloc_tables[table]; #ifdef DEBUG av_log(avctx, AV_LOG_DEBUG, \"%d kb/s, %d Hz, frame_size=%d bits, table=%d, padincr=%x\\n\", bitrate, freq, s->frame_size, table, s->frame_frac_incr); #endif for(i=0;inb_channels;i++) s->samples_offset[i] = 0; for(i=0;i<257;i++) { int v; v = mpa_enwindow[i]; #if WFRAC_BITS != 16 v = (v + (1 << (16 - WFRAC_BITS - 1))) >> (16 - WFRAC_BITS); #endif filter_bank[i] = v; if ((i & 63) != 0) v = -v; if (i != 0) filter_bank[512 - i] = v; } for(i=0;i<64;i++) { v = (int)(pow(2.0, (3 - i) / 3.0) * (1 << 20)); if (v <= 0) v = 1; scale_factor_table[i] = v; #ifdef USE_FLOATS scale_factor_inv_table[i] = pow(2.0, -(3 - i) / 3.0) / (float)(1 << 20); #else #define P 15 scale_factor_shift[i] = 21 - P - (i / 3); scale_factor_mult[i] = (1 << P) * pow(2.0, (i % 3) / 3.0); #endif } for(i=0;i<128;i++) { v = i - 64; if (v <= -3) v = 0; else if (v < 0) v = 1; else if (v == 0) v = 2; else if (v < 3) v = 3; else v = 4; scale_diff_table[i] = v; } for(i=0;i<17;i++) { v = quant_bits[i]; if (v < 0) v = -v; else v = v * 3; total_quant_bits[i] = 12 * v; } avctx->coded_frame= avcodec_alloc_frame(); avctx->coded_frame->key_frame= 1; return 0; }"} {"target": 0, "idx": 12664, "func": "static void lsi_execute_script(LSIState *s) { uint32_t insn; uint32_t addr, addr_high; int opcode; int insn_processed = 0; s->istat1 |= LSI_ISTAT1_SRUN; again: insn_processed++; insn = read_dword(s, s->dsp); addr = read_dword(s, s->dsp + 4); addr_high = 0; DPRINTF(\"SCRIPTS dsp=%08x opcode %08x arg %08x\\n\", s->dsp, insn, addr); s->dsps = addr; s->dcmd = insn >> 24; s->dsp += 8; switch (insn >> 30) { case 0: /* Block move. */ if (s->sist1 & LSI_SIST1_STO) { DPRINTF(\"Delayed select timeout\\n\"); lsi_stop_script(s); break; } s->dbc = insn & 0xffffff; s->rbc = s->dbc; if (insn & (1 << 29)) { /* Indirect addressing. */ addr = read_dword(s, addr); } else if (insn & (1 << 28)) { uint32_t buf[2]; int32_t offset; /* Table indirect addressing. */ offset = sxt24(addr); cpu_physical_memory_read(s->dsa + offset, (uint8_t *)buf, 8); /* byte count is stored in bits 0:23 only */ s->dbc = cpu_to_le32(buf[0]) & 0xffffff; s->rbc = s->dbc; addr = cpu_to_le32(buf[1]); /* 40-bit DMA, upper addr bits [39:32] stored in first DWORD of * table, bits [31:24] */ if (lsi_dma_40bit(s)) addr_high = cpu_to_le32(buf[0]) >> 24; } if ((s->sstat1 & PHASE_MASK) != ((insn >> 24) & 7)) { DPRINTF(\"Wrong phase got %d expected %d\\n\", s->sstat1 & PHASE_MASK, (insn >> 24) & 7); lsi_script_scsi_interrupt(s, LSI_SIST0_MA, 0); break; } s->dnad = addr; s->dnad64 = addr_high; /* ??? Set ESA. */ s->ia = s->dsp - 8; switch (s->sstat1 & 0x7) { case PHASE_DO: s->waiting = 2; lsi_do_dma(s, 1); if (s->waiting) s->waiting = 3; break; case PHASE_DI: s->waiting = 2; s->current_dma_len = s->dbc; lsi_do_dma(s, 0); if (s->waiting) s->waiting = 3; break; case PHASE_CMD: lsi_do_command(s); break; case PHASE_ST: lsi_do_status(s); break; case PHASE_MO: lsi_do_msgout(s); break; case PHASE_MI: lsi_do_msgin(s); break; default: BADF(\"Unimplemented phase %d\\n\", s->sstat1 & PHASE_MASK); exit(1); } s->dfifo = s->dbc & 0xff; s->ctest5 = (s->ctest5 & 0xfc) | ((s->dbc >> 8) & 3); s->sbc = s->dbc; s->rbc -= s->dbc; s->ua = addr + s->dbc; break; case 1: /* IO or Read/Write instruction. */ opcode = (insn >> 27) & 7; if (opcode < 5) { uint32_t id; if (insn & (1 << 25)) { id = read_dword(s, s->dsa + sxt24(insn)); } else { id = addr; } id = (id >> 16) & 0xf; if (insn & (1 << 26)) { addr = s->dsp + sxt24(addr); } s->dnad = addr; switch (opcode) { case 0: /* Select */ s->sdid = id; if (s->current_dma_len && (s->ssid & 0xf) == id) { DPRINTF(\"Already reselected by target %d\\n\", id); break; } s->sstat0 |= LSI_SSTAT0_WOA; s->scntl1 &= ~LSI_SCNTL1_IARB; if (id >= LSI_MAX_DEVS || !s->scsi_dev[id]) { DPRINTF(\"Selected absent target %d\\n\", id); lsi_script_scsi_interrupt(s, 0, LSI_SIST1_STO); lsi_disconnect(s); break; } DPRINTF(\"Selected target %d%s\\n\", id, insn & (1 << 3) ? \" ATN\" : \"\"); /* ??? Linux drivers compain when this is set. Maybe it only applies in low-level mode (unimplemented). lsi_script_scsi_interrupt(s, LSI_SIST0_CMP, 0); */ s->current_dev = s->scsi_dev[id]; s->current_tag = id << 8; s->scntl1 |= LSI_SCNTL1_CON; if (insn & (1 << 3)) { s->socl |= LSI_SOCL_ATN; } lsi_set_phase(s, PHASE_MO); break; case 1: /* Disconnect */ DPRINTF(\"Wait Disconect\\n\"); s->scntl1 &= ~LSI_SCNTL1_CON; break; case 2: /* Wait Reselect */ lsi_wait_reselect(s); break; case 3: /* Set */ DPRINTF(\"Set%s%s%s%s\\n\", insn & (1 << 3) ? \" ATN\" : \"\", insn & (1 << 6) ? \" ACK\" : \"\", insn & (1 << 9) ? \" TM\" : \"\", insn & (1 << 10) ? \" CC\" : \"\"); if (insn & (1 << 3)) { s->socl |= LSI_SOCL_ATN; lsi_set_phase(s, PHASE_MO); } if (insn & (1 << 9)) { BADF(\"Target mode not implemented\\n\"); exit(1); } if (insn & (1 << 10)) s->carry = 1; break; case 4: /* Clear */ DPRINTF(\"Clear%s%s%s%s\\n\", insn & (1 << 3) ? \" ATN\" : \"\", insn & (1 << 6) ? \" ACK\" : \"\", insn & (1 << 9) ? \" TM\" : \"\", insn & (1 << 10) ? \" CC\" : \"\"); if (insn & (1 << 3)) { s->socl &= ~LSI_SOCL_ATN; } if (insn & (1 << 10)) s->carry = 0; break; } } else { uint8_t op0; uint8_t op1; uint8_t data8; int reg; int operator; #ifdef DEBUG_LSI static const char *opcode_names[3] = {\"Write\", \"Read\", \"Read-Modify-Write\"}; static const char *operator_names[8] = {\"MOV\", \"SHL\", \"OR\", \"XOR\", \"AND\", \"SHR\", \"ADD\", \"ADC\"}; #endif reg = ((insn >> 16) & 0x7f) | (insn & 0x80); data8 = (insn >> 8) & 0xff; opcode = (insn >> 27) & 7; operator = (insn >> 24) & 7; DPRINTF(\"%s reg 0x%x %s data8=0x%02x sfbr=0x%02x%s\\n\", opcode_names[opcode - 5], reg, operator_names[operator], data8, s->sfbr, (insn & (1 << 23)) ? \" SFBR\" : \"\"); op0 = op1 = 0; switch (opcode) { case 5: /* From SFBR */ op0 = s->sfbr; op1 = data8; break; case 6: /* To SFBR */ if (operator) op0 = lsi_reg_readb(s, reg); op1 = data8; break; case 7: /* Read-modify-write */ if (operator) op0 = lsi_reg_readb(s, reg); if (insn & (1 << 23)) { op1 = s->sfbr; } else { op1 = data8; } break; } switch (operator) { case 0: /* move */ op0 = op1; break; case 1: /* Shift left */ op1 = op0 >> 7; op0 = (op0 << 1) | s->carry; s->carry = op1; break; case 2: /* OR */ op0 |= op1; break; case 3: /* XOR */ op0 ^= op1; break; case 4: /* AND */ op0 &= op1; break; case 5: /* SHR */ op1 = op0 & 1; op0 = (op0 >> 1) | (s->carry << 7); s->carry = op1; break; case 6: /* ADD */ op0 += op1; s->carry = op0 < op1; break; case 7: /* ADC */ op0 += op1 + s->carry; if (s->carry) s->carry = op0 <= op1; else s->carry = op0 < op1; break; } switch (opcode) { case 5: /* From SFBR */ case 7: /* Read-modify-write */ lsi_reg_writeb(s, reg, op0); break; case 6: /* To SFBR */ s->sfbr = op0; break; } } break; case 2: /* Transfer Control. */ { int cond; int jmp; if ((insn & 0x002e0000) == 0) { DPRINTF(\"NOP\\n\"); break; } if (s->sist1 & LSI_SIST1_STO) { DPRINTF(\"Delayed select timeout\\n\"); lsi_stop_script(s); break; } cond = jmp = (insn & (1 << 19)) != 0; if (cond == jmp && (insn & (1 << 21))) { DPRINTF(\"Compare carry %d\\n\", s->carry == jmp); cond = s->carry != 0; } if (cond == jmp && (insn & (1 << 17))) { DPRINTF(\"Compare phase %d %c= %d\\n\", (s->sstat1 & PHASE_MASK), jmp ? '=' : '!', ((insn >> 24) & 7)); cond = (s->sstat1 & PHASE_MASK) == ((insn >> 24) & 7); } if (cond == jmp && (insn & (1 << 18))) { uint8_t mask; mask = (~insn >> 8) & 0xff; DPRINTF(\"Compare data 0x%x & 0x%x %c= 0x%x\\n\", s->sfbr, mask, jmp ? '=' : '!', insn & mask); cond = (s->sfbr & mask) == (insn & mask); } if (cond == jmp) { if (insn & (1 << 23)) { /* Relative address. */ addr = s->dsp + sxt24(addr); } switch ((insn >> 27) & 7) { case 0: /* Jump */ DPRINTF(\"Jump to 0x%08x\\n\", addr); s->dsp = addr; break; case 1: /* Call */ DPRINTF(\"Call 0x%08x\\n\", addr); s->temp = s->dsp; s->dsp = addr; break; case 2: /* Return */ DPRINTF(\"Return to 0x%08x\\n\", s->temp); s->dsp = s->temp; break; case 3: /* Interrupt */ DPRINTF(\"Interrupt 0x%08x\\n\", s->dsps); if ((insn & (1 << 20)) != 0) { s->istat0 |= LSI_ISTAT0_INTF; lsi_update_irq(s); } else { lsi_script_dma_interrupt(s, LSI_DSTAT_SIR); } break; default: DPRINTF(\"Illegal transfer control\\n\"); lsi_script_dma_interrupt(s, LSI_DSTAT_IID); break; } } else { DPRINTF(\"Control condition failed\\n\"); } } break; case 3: if ((insn & (1 << 29)) == 0) { /* Memory move. */ uint32_t dest; /* ??? The docs imply the destination address is loaded into the TEMP register. However the Linux drivers rely on the value being presrved. */ dest = read_dword(s, s->dsp); s->dsp += 4; lsi_memcpy(s, dest, addr, insn & 0xffffff); } else { uint8_t data[7]; int reg; int n; int i; if (insn & (1 << 28)) { addr = s->dsa + sxt24(addr); } n = (insn & 7); reg = (insn >> 16) & 0xff; if (insn & (1 << 24)) { cpu_physical_memory_read(addr, data, n); DPRINTF(\"Load reg 0x%x size %d addr 0x%08x = %08x\\n\", reg, n, addr, *(int *)data); for (i = 0; i < n; i++) { lsi_reg_writeb(s, reg + i, data[i]); } } else { DPRINTF(\"Store reg 0x%x size %d addr 0x%08x\\n\", reg, n, addr); for (i = 0; i < n; i++) { data[i] = lsi_reg_readb(s, reg + i); } cpu_physical_memory_write(addr, data, n); } } } if (insn_processed > 10000 && !s->waiting) { /* Some windows drivers make the device spin waiting for a memory location to change. If we have been executed a lot of code then assume this is the case and force an unexpected device disconnect. This is apparently sufficient to beat the drivers into submission. */ if (!(s->sien0 & LSI_SIST0_UDC)) fprintf(stderr, \"inf. loop with UDC masked\\n\"); lsi_script_scsi_interrupt(s, LSI_SIST0_UDC, 0); lsi_disconnect(s); } else if (s->istat1 & LSI_ISTAT1_SRUN && !s->waiting) { if (s->dcntl & LSI_DCNTL_SSM) { lsi_script_dma_interrupt(s, LSI_DSTAT_SSI); } else { goto again; } } DPRINTF(\"SCRIPTS execution stopped\\n\"); }"} {"target": 0, "idx": 12666, "func": "static void kqemu_update_cpuid(CPUState *env) { int critical_features_mask, features, ext_features, ext_features_mask; uint32_t eax, ebx, ecx, edx; /* the following features are kept identical on the host and target cpus because they are important for user code. Strictly speaking, only SSE really matters because the OS must support it if the user code uses it. */ critical_features_mask = CPUID_CMOV | CPUID_CX8 | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_SEP; ext_features_mask = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR; if (!is_cpuid_supported()) { features = 0; ext_features = 0; } else { cpuid(1, eax, ebx, ecx, edx); features = edx; ext_features = ecx; } #ifdef __x86_64__ /* NOTE: on x86_64 CPUs, SYSENTER is not supported in compatibility mode, so in order to have the best performances it is better not to use it */ features &= ~CPUID_SEP; #endif env->cpuid_features = (env->cpuid_features & ~critical_features_mask) | (features & critical_features_mask); env->cpuid_ext_features = (env->cpuid_ext_features & ~ext_features_mask) | (ext_features & ext_features_mask); /* XXX: we could update more of the target CPUID state so that the non accelerated code sees exactly the same CPU features as the accelerated code */ }"} {"target": 0, "idx": 12671, "func": "static GenericList *qapi_dealloc_next_list(Visitor *v, GenericList **listp, size_t size) { GenericList *list = *listp; QapiDeallocVisitor *qov = to_qov(v); StackEntry *e = QTAILQ_FIRST(&qov->stack); if (e && e->is_list_head) { e->is_list_head = false; return list; } if (list) { list = list->next; g_free(*listp); return list; } return NULL; }"} {"target": 0, "idx": 12674, "func": "static void qbool_destroy_obj(QObject *obj) { assert(obj != NULL); g_free(qobject_to_qbool(obj)); }"} {"target": 0, "idx": 12680, "func": "static void frame_thread_free(AVCodecContext *avctx, int thread_count) { FrameThreadContext *fctx = avctx->thread_opaque; AVCodec *codec = avctx->codec; int i; park_frame_worker_threads(fctx, thread_count); if (fctx->prev_thread) update_context_from_thread(fctx->threads->avctx, fctx->prev_thread->avctx, 0); fctx->die = 1; for (i = 0; i < thread_count; i++) { PerThreadContext *p = &fctx->threads[i]; pthread_mutex_lock(&p->mutex); pthread_cond_signal(&p->input_cond); pthread_mutex_unlock(&p->mutex); pthread_join(p->thread, NULL); if (codec->close) codec->close(p->avctx); avctx->codec = NULL; release_delayed_buffers(p); } for (i = 0; i < thread_count; i++) { PerThreadContext *p = &fctx->threads[i]; avcodec_default_free_buffers(p->avctx); pthread_mutex_destroy(&p->mutex); pthread_mutex_destroy(&p->progress_mutex); pthread_cond_destroy(&p->input_cond); pthread_cond_destroy(&p->progress_cond); pthread_cond_destroy(&p->output_cond); av_freep(&p->avpkt.data); if (i) av_freep(&p->avctx->priv_data); av_freep(&p->avctx); } av_freep(&fctx->threads); pthread_mutex_destroy(&fctx->buffer_mutex); av_freep(&avctx->thread_opaque); }"} {"target": 0, "idx": 12685, "func": "static QemuOpt *qemu_opt_find(QemuOpts *opts, const char *name) { QemuOpt *opt; TAILQ_FOREACH(opt, &opts->head, next) { if (strcmp(opt->name, name) != 0) continue; return opt; } return NULL; }"} {"target": 0, "idx": 12702, "func": "uint64_t helper_cmptun (uint64_t a, uint64_t b) { float64 fa, fb; fa = t_to_float64(a); fb = t_to_float64(b); if (float64_is_quiet_nan(fa) || float64_is_quiet_nan(fb)) return 0x4000000000000000ULL; else return 0; }"} {"target": 0, "idx": 12718, "func": "static av_cold int mpc8_decode_init(AVCodecContext * avctx) { int i; MPCContext *c = avctx->priv_data; GetBitContext gb; static int vlc_initialized = 0; static VLC_TYPE band_table[542][2]; static VLC_TYPE q1_table[520][2]; static VLC_TYPE q9up_table[524][2]; static VLC_TYPE scfi0_table[1 << MPC8_SCFI0_BITS][2]; static VLC_TYPE scfi1_table[1 << MPC8_SCFI1_BITS][2]; static VLC_TYPE dscf0_table[560][2]; static VLC_TYPE dscf1_table[598][2]; static VLC_TYPE q3_0_table[512][2]; static VLC_TYPE q3_1_table[516][2]; static VLC_TYPE codes_table[5708][2]; if(avctx->extradata_size < 2){ av_log(avctx, AV_LOG_ERROR, \"Too small extradata size (%i)!\\n\", avctx->extradata_size); return -1; } memset(c->oldDSCF, 0, sizeof(c->oldDSCF)); av_lfg_init(&c->rnd, 0xDEADBEEF); dsputil_init(&c->dsp, avctx); ff_mpc_init(); init_get_bits(&gb, avctx->extradata, 16); skip_bits(&gb, 3);//sample rate c->maxbands = get_bits(&gb, 5) + 1; skip_bits(&gb, 4);//channels c->MSS = get_bits1(&gb); c->frames = 1 << (get_bits(&gb, 3) * 2); if(vlc_initialized) return 0; av_log(avctx, AV_LOG_DEBUG, \"Initing VLC\\n\"); band_vlc.table = band_table; band_vlc.table_allocated = 542; init_vlc(&band_vlc, MPC8_BANDS_BITS, MPC8_BANDS_SIZE, mpc8_bands_bits, 1, 1, mpc8_bands_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); q1_vlc.table = q1_table; q1_vlc.table_allocated = 520; init_vlc(&q1_vlc, MPC8_Q1_BITS, MPC8_Q1_SIZE, mpc8_q1_bits, 1, 1, mpc8_q1_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); q9up_vlc.table = q9up_table; q9up_vlc.table_allocated = 524; init_vlc(&q9up_vlc, MPC8_Q9UP_BITS, MPC8_Q9UP_SIZE, mpc8_q9up_bits, 1, 1, mpc8_q9up_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); scfi_vlc[0].table = scfi0_table; scfi_vlc[0].table_allocated = 1 << MPC8_SCFI0_BITS; init_vlc(&scfi_vlc[0], MPC8_SCFI0_BITS, MPC8_SCFI0_SIZE, mpc8_scfi0_bits, 1, 1, mpc8_scfi0_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); scfi_vlc[1].table = scfi1_table; scfi_vlc[1].table_allocated = 1 << MPC8_SCFI1_BITS; init_vlc(&scfi_vlc[1], MPC8_SCFI1_BITS, MPC8_SCFI1_SIZE, mpc8_scfi1_bits, 1, 1, mpc8_scfi1_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); dscf_vlc[0].table = dscf0_table; dscf_vlc[0].table_allocated = 560; init_vlc(&dscf_vlc[0], MPC8_DSCF0_BITS, MPC8_DSCF0_SIZE, mpc8_dscf0_bits, 1, 1, mpc8_dscf0_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); dscf_vlc[1].table = dscf1_table; dscf_vlc[1].table_allocated = 598; init_vlc(&dscf_vlc[1], MPC8_DSCF1_BITS, MPC8_DSCF1_SIZE, mpc8_dscf1_bits, 1, 1, mpc8_dscf1_codes, 1, 1, INIT_VLC_USE_NEW_STATIC); q3_vlc[0].table = q3_0_table; q3_vlc[0].table_allocated = 512; init_vlc_sparse(&q3_vlc[0], MPC8_Q3_BITS, MPC8_Q3_SIZE, mpc8_q3_bits, 1, 1, mpc8_q3_codes, 1, 1, mpc8_q3_syms, 1, 1, INIT_VLC_USE_NEW_STATIC); q3_vlc[1].table = q3_1_table; q3_vlc[1].table_allocated = 516; init_vlc_sparse(&q3_vlc[1], MPC8_Q4_BITS, MPC8_Q4_SIZE, mpc8_q4_bits, 1, 1, mpc8_q4_codes, 1, 1, mpc8_q4_syms, 1, 1, INIT_VLC_USE_NEW_STATIC); for(i = 0; i < 2; i++){ res_vlc[i].table = &codes_table[vlc_offsets[0+i]]; res_vlc[i].table_allocated = vlc_offsets[1+i] - vlc_offsets[0+i]; init_vlc(&res_vlc[i], MPC8_RES_BITS, MPC8_RES_SIZE, &mpc8_res_bits[i], 1, 1, &mpc8_res_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); q2_vlc[i].table = &codes_table[vlc_offsets[2+i]]; q2_vlc[i].table_allocated = vlc_offsets[3+i] - vlc_offsets[2+i]; init_vlc(&q2_vlc[i], MPC8_Q2_BITS, MPC8_Q2_SIZE, &mpc8_q2_bits[i], 1, 1, &mpc8_q2_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); quant_vlc[0][i].table = &codes_table[vlc_offsets[4+i]]; quant_vlc[0][i].table_allocated = vlc_offsets[5+i] - vlc_offsets[4+i]; init_vlc(&quant_vlc[0][i], MPC8_Q5_BITS, MPC8_Q5_SIZE, &mpc8_q5_bits[i], 1, 1, &mpc8_q5_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); quant_vlc[1][i].table = &codes_table[vlc_offsets[6+i]]; quant_vlc[1][i].table_allocated = vlc_offsets[7+i] - vlc_offsets[6+i]; init_vlc(&quant_vlc[1][i], MPC8_Q6_BITS, MPC8_Q6_SIZE, &mpc8_q6_bits[i], 1, 1, &mpc8_q6_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); quant_vlc[2][i].table = &codes_table[vlc_offsets[8+i]]; quant_vlc[2][i].table_allocated = vlc_offsets[9+i] - vlc_offsets[8+i]; init_vlc(&quant_vlc[2][i], MPC8_Q7_BITS, MPC8_Q7_SIZE, &mpc8_q7_bits[i], 1, 1, &mpc8_q7_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); quant_vlc[3][i].table = &codes_table[vlc_offsets[10+i]]; quant_vlc[3][i].table_allocated = vlc_offsets[11+i] - vlc_offsets[10+i]; init_vlc(&quant_vlc[3][i], MPC8_Q8_BITS, MPC8_Q8_SIZE, &mpc8_q8_bits[i], 1, 1, &mpc8_q8_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); } vlc_initialized = 1; avctx->sample_fmt = SAMPLE_FMT_S16; avctx->channel_layout = (avctx->channels==2) ? CH_LAYOUT_STEREO : CH_LAYOUT_MONO; return 0; }"} {"target": 0, "idx": 12727, "func": "Aml *aml_arg(int pos) { Aml *var; uint8_t op = 0x68 /* ARG0 op */ + pos; assert(pos <= 6); var = aml_opcode(op); return var; }"} {"target": 0, "idx": 12745, "func": "static int vnc_display_listen_addr(VncDisplay *vd, SocketAddressLegacy *addr, const char *name, QIOChannelSocket ***lsock, guint **lsock_tag, size_t *nlsock, Error **errp) { QIODNSResolver *resolver = qio_dns_resolver_get_instance(); SocketAddressLegacy **rawaddrs = NULL; size_t nrawaddrs = 0; Error *listenerr = NULL; bool listening = false; size_t i; if (qio_dns_resolver_lookup_sync(resolver, addr, &nrawaddrs, &rawaddrs, errp) < 0) { return -1; } for (i = 0; i < nrawaddrs; i++) { QIOChannelSocket *sioc = qio_channel_socket_new(); qio_channel_set_name(QIO_CHANNEL(sioc), name); if (qio_channel_socket_listen_sync( sioc, rawaddrs[i], listenerr == NULL ? &listenerr : NULL) < 0) { object_unref(OBJECT(sioc)); continue; } listening = true; (*nlsock)++; *lsock = g_renew(QIOChannelSocket *, *lsock, *nlsock); *lsock_tag = g_renew(guint, *lsock_tag, *nlsock); (*lsock)[*nlsock - 1] = sioc; (*lsock_tag)[*nlsock - 1] = 0; } for (i = 0; i < nrawaddrs; i++) { qapi_free_SocketAddressLegacy(rawaddrs[i]); } g_free(rawaddrs); if (listenerr) { if (!listening) { error_propagate(errp, listenerr); return -1; } else { error_free(listenerr); } } for (i = 0; i < *nlsock; i++) { (*lsock_tag)[i] = qio_channel_add_watch( QIO_CHANNEL((*lsock)[i]), G_IO_IN, vnc_listen_io, vd, NULL); } return 0; }"} {"target": 0, "idx": 12762, "func": "static void OPLWriteReg(FM_OPL *OPL, int r, int v) { OPL_CH *CH; int slot; int block_fnum; switch(r&0xe0) { case 0x00: /* 00-1f:control */ switch(r&0x1f) { case 0x01: /* wave selector enable */ if(OPL->type&OPL_TYPE_WAVESEL) { OPL->wavesel = v&0x20; if(!OPL->wavesel) { /* preset compatible mode */ int c; for(c=0;cmax_ch;c++) { OPL->P_CH[c].SLOT[SLOT1].wavetable = &SIN_TABLE[0]; OPL->P_CH[c].SLOT[SLOT2].wavetable = &SIN_TABLE[0]; } } } return; case 0x02: /* Timer 1 */ OPL->T[0] = (256-v)*4; break; case 0x03: /* Timer 2 */ OPL->T[1] = (256-v)*16; return; case 0x04: /* IRQ clear / mask and Timer enable */ if(v&0x80) { /* IRQ flag clear */ OPL_STATUS_RESET(OPL,0x7f); } else { /* set IRQ mask ,timer enable*/ UINT8 st1 = v&1; UINT8 st2 = (v>>1)&1; /* IRQRST,T1MSK,t2MSK,EOSMSK,BRMSK,x,ST2,ST1 */ OPL_STATUS_RESET(OPL,v&0x78); OPL_STATUSMASK_SET(OPL,((~v)&0x78)|0x01); /* timer 2 */ if(OPL->st[1] != st2) { double interval = st2 ? (double)OPL->T[1]*OPL->TimerBase : 0.0; OPL->st[1] = st2; if (OPL->TimerHandler) (OPL->TimerHandler)(OPL->TimerParam+1,interval); } /* timer 1 */ if(OPL->st[0] != st1) { double interval = st1 ? (double)OPL->T[0]*OPL->TimerBase : 0.0; OPL->st[0] = st1; if (OPL->TimerHandler) (OPL->TimerHandler)(OPL->TimerParam+0,interval); } } return; #if BUILD_Y8950 case 0x06: /* Key Board OUT */ if(OPL->type&OPL_TYPE_KEYBOARD) { if(OPL->keyboardhandler_w) OPL->keyboardhandler_w(OPL->keyboard_param,v); else LOG(LOG_WAR,(\"OPL:write unmapped KEYBOARD port\\n\")); } return; case 0x07: /* DELTA-T control : START,REC,MEMDATA,REPT,SPOFF,x,x,RST */ if(OPL->type&OPL_TYPE_ADPCM) YM_DELTAT_ADPCM_Write(OPL->deltat,r-0x07,v); return; case 0x08: /* MODE,DELTA-T : CSM,NOTESEL,x,x,smpl,da/ad,64k,rom */ OPL->mode = v; v&=0x1f; /* for DELTA-T unit */ case 0x09: /* START ADD */ case 0x0a: case 0x0b: /* STOP ADD */ case 0x0c: case 0x0d: /* PRESCALE */ case 0x0e: case 0x0f: /* ADPCM data */ case 0x10: /* DELTA-N */ case 0x11: /* DELTA-N */ case 0x12: /* EG-CTRL */ if(OPL->type&OPL_TYPE_ADPCM) YM_DELTAT_ADPCM_Write(OPL->deltat,r-0x07,v); return; #if 0 case 0x15: /* DAC data */ case 0x16: case 0x17: /* SHIFT */ return; case 0x18: /* I/O CTRL (Direction) */ if(OPL->type&OPL_TYPE_IO) OPL->portDirection = v&0x0f; return; case 0x19: /* I/O DATA */ if(OPL->type&OPL_TYPE_IO) { OPL->portLatch = v; if(OPL->porthandler_w) OPL->porthandler_w(OPL->port_param,v&OPL->portDirection); } return; case 0x1a: /* PCM data */ return; #endif #endif } break; case 0x20: /* am,vib,ksr,eg type,mul */ slot = slot_array[r&0x1f]; if(slot == -1) return; set_mul(OPL,slot,v); return; case 0x40: slot = slot_array[r&0x1f]; if(slot == -1) return; set_ksl_tl(OPL,slot,v); return; case 0x60: slot = slot_array[r&0x1f]; if(slot == -1) return; set_ar_dr(OPL,slot,v); return; case 0x80: slot = slot_array[r&0x1f]; if(slot == -1) return; set_sl_rr(OPL,slot,v); return; case 0xa0: switch(r) { case 0xbd: /* amsep,vibdep,r,bd,sd,tom,tc,hh */ { UINT8 rkey = OPL->rythm^v; OPL->ams_table = &AMS_TABLE[v&0x80 ? AMS_ENT : 0]; OPL->vib_table = &VIB_TABLE[v&0x40 ? VIB_ENT : 0]; OPL->rythm = v&0x3f; if(OPL->rythm&0x20) { #if 0 usrintf_showmessage(\"OPL Rythm mode select\"); #endif /* BD key on/off */ if(rkey&0x10) { if(v&0x10) { OPL->P_CH[6].op1_out[0] = OPL->P_CH[6].op1_out[1] = 0; OPL_KEYON(&OPL->P_CH[6].SLOT[SLOT1]); OPL_KEYON(&OPL->P_CH[6].SLOT[SLOT2]); } else { OPL_KEYOFF(&OPL->P_CH[6].SLOT[SLOT1]); OPL_KEYOFF(&OPL->P_CH[6].SLOT[SLOT2]); } } /* SD key on/off */ if(rkey&0x08) { if(v&0x08) OPL_KEYON(&OPL->P_CH[7].SLOT[SLOT2]); else OPL_KEYOFF(&OPL->P_CH[7].SLOT[SLOT2]); }/* TAM key on/off */ if(rkey&0x04) { if(v&0x04) OPL_KEYON(&OPL->P_CH[8].SLOT[SLOT1]); else OPL_KEYOFF(&OPL->P_CH[8].SLOT[SLOT1]); } /* TOP-CY key on/off */ if(rkey&0x02) { if(v&0x02) OPL_KEYON(&OPL->P_CH[8].SLOT[SLOT2]); else OPL_KEYOFF(&OPL->P_CH[8].SLOT[SLOT2]); } /* HH key on/off */ if(rkey&0x01) { if(v&0x01) OPL_KEYON(&OPL->P_CH[7].SLOT[SLOT1]); else OPL_KEYOFF(&OPL->P_CH[7].SLOT[SLOT1]); } } } return; } /* keyon,block,fnum */ if( (r&0x0f) > 8) return; CH = &OPL->P_CH[r&0x0f]; if(!(r&0x10)) { /* a0-a8 */ block_fnum = (CH->block_fnum&0x1f00) | v; } else { /* b0-b8 */ int keyon = (v>>5)&1; block_fnum = ((v&0x1f)<<8) | (CH->block_fnum&0xff); if(CH->keyon != keyon) { if( (CH->keyon=keyon) ) { CH->op1_out[0] = CH->op1_out[1] = 0; OPL_KEYON(&CH->SLOT[SLOT1]); OPL_KEYON(&CH->SLOT[SLOT2]); } else { OPL_KEYOFF(&CH->SLOT[SLOT1]); OPL_KEYOFF(&CH->SLOT[SLOT2]); } } } /* update */ if(CH->block_fnum != block_fnum) { int blockRv = 7-(block_fnum>>10); int fnum = block_fnum&0x3ff; CH->block_fnum = block_fnum; CH->ksl_base = KSL_TABLE[block_fnum>>6]; CH->fc = OPL->FN_TABLE[fnum]>>blockRv; CH->kcode = CH->block_fnum>>9; if( (OPL->mode&0x40) && CH->block_fnum&0x100) CH->kcode |=1; CALC_FCSLOT(CH,&CH->SLOT[SLOT1]); CALC_FCSLOT(CH,&CH->SLOT[SLOT2]); } return; case 0xc0: /* FB,C */ if( (r&0x0f) > 8) return; CH = &OPL->P_CH[r&0x0f]; { int feedback = (v>>1)&7; CH->FB = feedback ? (8+1) - feedback : 0; CH->CON = v&1; set_algorythm(CH); } return; case 0xe0: /* wave type */ slot = slot_array[r&0x1f]; if(slot == -1) return; CH = &OPL->P_CH[slot/2]; if(OPL->wavesel) { /* LOG(LOG_INF,(\"OPL SLOT %d wave select %d\\n\",slot,v&3)); */ CH->SLOT[slot&1].wavetable = &SIN_TABLE[(v&0x03)*SIN_ENT]; } return; } }"} {"target": 0, "idx": 12765, "func": "static int qemu_rbd_set_conf(rados_t cluster, const char *conf, bool only_read_conf_file, Error **errp) { char *p, *buf; char *name; char *value; Error *local_err = NULL; int ret = 0; buf = g_strdup(conf); p = buf; while (p) { name = qemu_rbd_next_tok(RBD_MAX_CONF_NAME_SIZE, p, '=', \"conf option name\", &p, &local_err); if (local_err) { break; } qemu_rbd_unescape(name); if (!p) { error_setg(errp, \"conf option %s has no value\", name); ret = -EINVAL; break; } value = qemu_rbd_next_tok(RBD_MAX_CONF_VAL_SIZE, p, ':', \"conf option value\", &p, &local_err); if (local_err) { break; } qemu_rbd_unescape(value); if (strcmp(name, \"conf\") == 0) { /* read the conf file alone, so it doesn't override more specific settings for a particular device */ if (only_read_conf_file) { ret = rados_conf_read_file(cluster, value); if (ret < 0) { error_setg_errno(errp, -ret, \"error reading conf file %s\", value); break; } } } else if (strcmp(name, \"id\") == 0) { /* ignore, this is parsed by qemu_rbd_parse_clientname() */ } else if (!only_read_conf_file) { ret = rados_conf_set(cluster, name, value); if (ret < 0) { error_setg_errno(errp, -ret, \"invalid conf option %s\", name); ret = -EINVAL; break; } } } if (local_err) { error_propagate(errp, local_err); ret = -EINVAL; } g_free(buf); return ret; }"} {"target": 0, "idx": 12775, "func": "static void unpack_modes(Vp3DecodeContext *s, GetBitContext *gb) { int i, j, k; int scheme; int current_macroblock; int current_fragment; int coding_mode; debug_vp3(\" vp3: unpacking encoding modes\\n\"); if (s->keyframe) { debug_vp3(\" keyframe-- all blocks are coded as INTRA\\n\"); for (i = 0; i < s->fragment_count; i++) s->all_fragments[i].coding_method = MODE_INTRA; } else { /* fetch the mode coding scheme for this frame */ scheme = get_bits(gb, 3); debug_modes(\" using mode alphabet %d\\n\", scheme); /* is it a custom coding scheme? */ if (scheme == 0) { debug_modes(\" custom mode alphabet ahead:\\n\"); for (i = 0; i < 8; i++) ModeAlphabet[0][i] = get_bits(gb, 3); } for (i = 0; i < 8; i++) debug_modes(\" mode[%d][%d] = %d\\n\", scheme, i, ModeAlphabet[scheme][i]); /* iterate through all of the macroblocks that contain 1 or more * coded fragments */ for (i = 0; i < s->u_superblock_start; i++) { for (j = 0; j < 4; j++) { current_macroblock = s->superblock_macroblocks[i * 4 + j]; if ((current_macroblock == -1) || (!s->macroblock_coded[current_macroblock])) continue; /* mode 7 means get 3 bits for each coding mode */ if (scheme == 7) coding_mode = get_bits(gb, 3); else coding_mode = ModeAlphabet[scheme][get_mode_code(gb)]; for (k = 0; k < 6; k++) { current_fragment = s->macroblock_fragments[current_macroblock * 6 + k]; if (s->all_fragments[current_fragment].coding_method != MODE_COPY) s->all_fragments[current_fragment].coding_method = coding_mode; } debug_modes(\" coding method for macroblock starting @ fragment %d = %d\\n\", s->macroblock_fragments[current_macroblock * 6], coding_mode); } } } }"} {"target": 0, "idx": 12778, "func": "static int arm946_prbs_read(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t *value) { if (ri->crm >= 8) { return EXCP_UDEF; } *value = env->cp15.c6_region[ri->crm]; return 0; }"} {"target": 1, "idx": 12796, "func": "static void lsi_do_dma(LSIState *s, int out) { uint32_t count, id; target_phys_addr_t addr; SCSIDevice *dev; assert(s->current); if (!s->current->dma_len) { /* Wait until data is available. */ DPRINTF(\"DMA no data available\\n\"); return; } id = (s->current->tag >> 8) & 0xf; dev = s->bus.devs[id]; if (!dev) { lsi_bad_selection(s, id); return; } count = s->dbc; if (count > s->current->dma_len) count = s->current->dma_len; addr = s->dnad; /* both 40 and Table Indirect 64-bit DMAs store upper bits in dnad64 */ if (lsi_dma_40bit(s) || lsi_dma_ti64bit(s)) addr |= ((uint64_t)s->dnad64 << 32); else if (s->dbms) addr |= ((uint64_t)s->dbms << 32); else if (s->sbms) addr |= ((uint64_t)s->sbms << 32); DPRINTF(\"DMA addr=0x\" TARGET_FMT_plx \" len=%d\\n\", addr, count); s->csbc += count; s->dnad += count; s->dbc -= count; if (s->current->dma_buf == NULL) { s->current->dma_buf = dev->info->get_buf(dev, s->current->tag); } /* ??? Set SFBR to first data byte. */ if (out) { cpu_physical_memory_read(addr, s->current->dma_buf, count); } else { cpu_physical_memory_write(addr, s->current->dma_buf, count); } s->current->dma_len -= count; if (s->current->dma_len == 0) { s->current->dma_buf = NULL; if (out) { /* Write the data. */ dev->info->write_data(dev, s->current->tag); } else { /* Request any remaining data. */ dev->info->read_data(dev, s->current->tag); } } else { s->current->dma_buf += count; lsi_resume_script(s); } }"} {"target": 1, "idx": 12801, "func": "static void iothread_instance_finalize(Object *obj) { IOThread *iothread = IOTHREAD(obj); iothread_stop(obj, NULL); qemu_cond_destroy(&iothread->init_done_cond); qemu_mutex_destroy(&iothread->init_done_lock); aio_context_unref(iothread->ctx);"} {"target": 1, "idx": 12817, "func": "static void gen_isync(DisasContext *ctx) { /* * We need to check for a pending TLB flush. This can only happen in * kernel mode however so check MSR_PR */ if (!ctx->pr) { gen_check_tlb_flush(ctx); } gen_stop_exception(ctx); }"} {"target": 1, "idx": 12823, "func": "static void realview_init(ram_addr_t ram_size, const char *boot_device, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model, enum realview_board_type board_type) { CPUState *env = NULL; ram_addr_t ram_offset; DeviceState *dev, *sysctl, *gpio2; SysBusDevice *busdev; qemu_irq *irqp; qemu_irq pic[64]; qemu_irq mmc_irq[2]; PCIBus *pci_bus; NICInfo *nd; i2c_bus *i2c; int n; int done_nic = 0; qemu_irq cpu_irq[4]; int is_mpcore = 0; int is_pb = 0; uint32_t proc_id = 0; uint32_t sys_id; ram_addr_t low_ram_size; switch (board_type) { case BOARD_EB: break; case BOARD_EB_MPCORE: is_mpcore = 1; break; case BOARD_PB_A8: is_pb = 1; break; case BOARD_PBX_A9: is_mpcore = 1; is_pb = 1; break; } for (n = 0; n < smp_cpus; n++) { env = cpu_init(cpu_model); if (!env) { fprintf(stderr, \"Unable to find CPU definition\\n\"); exit(1); } irqp = arm_pic_init_cpu(env); cpu_irq[n] = irqp[ARM_PIC_CPU_IRQ]; } if (arm_feature(env, ARM_FEATURE_V7)) { if (is_mpcore) { proc_id = 0x0c000000; } else { proc_id = 0x0e000000; } } else if (arm_feature(env, ARM_FEATURE_V6K)) { proc_id = 0x06000000; } else if (arm_feature(env, ARM_FEATURE_V6)) { proc_id = 0x04000000; } else { proc_id = 0x02000000; } if (is_pb && ram_size > 0x20000000) { /* Core tile RAM. */ low_ram_size = ram_size - 0x20000000; ram_size = 0x20000000; ram_offset = qemu_ram_alloc(NULL, \"realview.lowmem\", low_ram_size); cpu_register_physical_memory(0x20000000, low_ram_size, ram_offset | IO_MEM_RAM); } ram_offset = qemu_ram_alloc(NULL, \"realview.highmem\", ram_size); low_ram_size = ram_size; if (low_ram_size > 0x10000000) low_ram_size = 0x10000000; /* SDRAM at address zero. */ cpu_register_physical_memory(0, low_ram_size, ram_offset | IO_MEM_RAM); if (is_pb) { /* And again at a high address. */ cpu_register_physical_memory(0x70000000, ram_size, ram_offset | IO_MEM_RAM); } else { ram_size = low_ram_size; } sys_id = is_pb ? 0x01780500 : 0xc1400400; sysctl = qdev_create(NULL, \"realview_sysctl\"); qdev_prop_set_uint32(sysctl, \"sys_id\", sys_id); qdev_init_nofail(sysctl); qdev_prop_set_uint32(sysctl, \"proc_id\", proc_id); sysbus_mmio_map(sysbus_from_qdev(sysctl), 0, 0x10000000); if (is_mpcore) { dev = qdev_create(NULL, is_pb ? \"a9mpcore_priv\": \"realview_mpcore\"); qdev_prop_set_uint32(dev, \"num-cpu\", smp_cpus); qdev_init_nofail(dev); busdev = sysbus_from_qdev(dev); if (is_pb) { realview_binfo.smp_priv_base = 0x1f000000; } else { realview_binfo.smp_priv_base = 0x10100000; } sysbus_mmio_map(busdev, 0, realview_binfo.smp_priv_base); for (n = 0; n < smp_cpus; n++) { sysbus_connect_irq(busdev, n, cpu_irq[n]); } } else { uint32_t gic_addr = is_pb ? 0x1e000000 : 0x10040000; /* For now just create the nIRQ GIC, and ignore the others. */ dev = sysbus_create_simple(\"realview_gic\", gic_addr, cpu_irq[0]); } for (n = 0; n < 64; n++) { pic[n] = qdev_get_gpio_in(dev, n); } sysbus_create_simple(\"pl050_keyboard\", 0x10006000, pic[20]); sysbus_create_simple(\"pl050_mouse\", 0x10007000, pic[21]); sysbus_create_simple(\"pl011\", 0x10009000, pic[12]); sysbus_create_simple(\"pl011\", 0x1000a000, pic[13]); sysbus_create_simple(\"pl011\", 0x1000b000, pic[14]); sysbus_create_simple(\"pl011\", 0x1000c000, pic[15]); /* DMA controller is optional, apparently. */ sysbus_create_simple(\"pl081\", 0x10030000, pic[24]); sysbus_create_simple(\"sp804\", 0x10011000, pic[4]); sysbus_create_simple(\"sp804\", 0x10012000, pic[5]); sysbus_create_simple(\"pl061\", 0x10013000, pic[6]); sysbus_create_simple(\"pl061\", 0x10014000, pic[7]); gpio2 = sysbus_create_simple(\"pl061\", 0x10015000, pic[8]); sysbus_create_simple(\"pl110_versatile\", 0x10020000, pic[23]); dev = sysbus_create_varargs(\"pl181\", 0x10005000, pic[17], pic[18], NULL); /* Wire up MMC card detect and read-only signals. These have * to go to both the PL061 GPIO and the sysctl register. * Note that the PL181 orders these lines (readonly,inserted) * and the PL061 has them the other way about. Also the card * detect line is inverted. */ mmc_irq[0] = qemu_irq_split( qdev_get_gpio_in(sysctl, ARM_SYSCTL_GPIO_MMC_WPROT), qdev_get_gpio_in(gpio2, 1)); mmc_irq[1] = qemu_irq_split( qdev_get_gpio_in(sysctl, ARM_SYSCTL_GPIO_MMC_CARDIN), qemu_irq_invert(qdev_get_gpio_in(gpio2, 0))); qdev_connect_gpio_out(dev, 0, mmc_irq[0]); qdev_connect_gpio_out(dev, 1, mmc_irq[1]); sysbus_create_simple(\"pl031\", 0x10017000, pic[10]); if (!is_pb) { dev = sysbus_create_varargs(\"realview_pci\", 0x60000000, pic[48], pic[49], pic[50], pic[51], NULL); pci_bus = (PCIBus *)qdev_get_child_bus(dev, \"pci\"); if (usb_enabled) { usb_ohci_init_pci(pci_bus, -1); } n = drive_get_max_bus(IF_SCSI); while (n >= 0) { pci_create_simple(pci_bus, -1, \"lsi53c895a\"); n--; } } for(n = 0; n < nb_nics; n++) { nd = &nd_table[n]; if ((!nd->model && !done_nic) || strcmp(nd->model, is_pb ? \"lan9118\" : \"smc91c111\") == 0) { if (is_pb) { lan9118_init(nd, 0x4e000000, pic[28]); } else { smc91c111_init(nd, 0x4e000000, pic[28]); } done_nic = 1; } else { pci_nic_init_nofail(nd, \"rtl8139\", NULL); } } dev = sysbus_create_simple(\"realview_i2c\", 0x10002000, NULL); i2c = (i2c_bus *)qdev_get_child_bus(dev, \"i2c\"); i2c_create_slave(i2c, \"ds1338\", 0x68); /* Memory map for RealView Emulation Baseboard: */ /* 0x10000000 System registers. */ /* 0x10001000 System controller. */ /* 0x10002000 Two-Wire Serial Bus. */ /* 0x10003000 Reserved. */ /* 0x10004000 AACI. */ /* 0x10005000 MCI. */ /* 0x10006000 KMI0. */ /* 0x10007000 KMI1. */ /* 0x10008000 Character LCD. (EB) */ /* 0x10009000 UART0. */ /* 0x1000a000 UART1. */ /* 0x1000b000 UART2. */ /* 0x1000c000 UART3. */ /* 0x1000d000 SSPI. */ /* 0x1000e000 SCI. */ /* 0x1000f000 Reserved. */ /* 0x10010000 Watchdog. */ /* 0x10011000 Timer 0+1. */ /* 0x10012000 Timer 2+3. */ /* 0x10013000 GPIO 0. */ /* 0x10014000 GPIO 1. */ /* 0x10015000 GPIO 2. */ /* 0x10002000 Two-Wire Serial Bus - DVI. (PB) */ /* 0x10017000 RTC. */ /* 0x10018000 DMC. */ /* 0x10019000 PCI controller config. */ /* 0x10020000 CLCD. */ /* 0x10030000 DMA Controller. */ /* 0x10040000 GIC1. (EB) */ /* 0x10050000 GIC2. (EB) */ /* 0x10060000 GIC3. (EB) */ /* 0x10070000 GIC4. (EB) */ /* 0x10080000 SMC. */ /* 0x1e000000 GIC1. (PB) */ /* 0x1e001000 GIC2. (PB) */ /* 0x1e002000 GIC3. (PB) */ /* 0x1e003000 GIC4. (PB) */ /* 0x40000000 NOR flash. */ /* 0x44000000 DoC flash. */ /* 0x48000000 SRAM. */ /* 0x4c000000 Configuration flash. */ /* 0x4e000000 Ethernet. */ /* 0x4f000000 USB. */ /* 0x50000000 PISMO. */ /* 0x54000000 PISMO. */ /* 0x58000000 PISMO. */ /* 0x5c000000 PISMO. */ /* 0x60000000 PCI. */ /* 0x61000000 PCI Self Config. */ /* 0x62000000 PCI Config. */ /* 0x63000000 PCI IO. */ /* 0x64000000 PCI mem 0. */ /* 0x68000000 PCI mem 1. */ /* 0x6c000000 PCI mem 2. */ /* ??? Hack to map an additional page of ram for the secondary CPU startup code. I guess this works on real hardware because the BootROM happens to be in ROM/flash or in memory that isn't clobbered until after Linux boots the secondary CPUs. */ ram_offset = qemu_ram_alloc(NULL, \"realview.hack\", 0x1000); cpu_register_physical_memory(SMP_BOOT_ADDR, 0x1000, ram_offset | IO_MEM_RAM); realview_binfo.ram_size = ram_size; realview_binfo.kernel_filename = kernel_filename; realview_binfo.kernel_cmdline = kernel_cmdline; realview_binfo.initrd_filename = initrd_filename; realview_binfo.nb_cpus = smp_cpus; realview_binfo.board_id = realview_board_id[board_type]; realview_binfo.loader_start = (board_type == BOARD_PB_A8 ? 0x70000000 : 0); arm_load_kernel(first_cpu, &realview_binfo); }"} {"target": 0, "idx": 12829, "func": "static uint32_t taihu_cpld_readb (void *opaque, hwaddr addr) { taihu_cpld_t *cpld; uint32_t ret; cpld = opaque; switch (addr) { case 0x0: ret = cpld->reg0; break; case 0x1: ret = cpld->reg1; break; default: ret = 0; break; } return ret; }"} {"target": 0, "idx": 12830, "func": "static void sd_lock_command(SDState *sd) { int erase, lock, clr_pwd, set_pwd, pwd_len; erase = !!(sd->data[0] & 0x08); lock = sd->data[0] & 0x04; clr_pwd = sd->data[0] & 0x02; set_pwd = sd->data[0] & 0x01; if (sd->blk_len > 1) pwd_len = sd->data[1]; else pwd_len = 0; if (erase) { if (!(sd->card_status & CARD_IS_LOCKED) || sd->blk_len > 1 || set_pwd || clr_pwd || lock || sd->wp_switch || (sd->csd[14] & 0x20)) { sd->card_status |= LOCK_UNLOCK_FAILED; return; } memset(sd->wp_groups, 0, sizeof(int) * (sd->size >> (HWBLOCK_SHIFT + SECTOR_SHIFT + WPGROUP_SHIFT))); sd->csd[14] &= ~0x10; sd->card_status &= ~CARD_IS_LOCKED; sd->pwd_len = 0; /* Erasing the entire card here! */ fprintf(stderr, \"SD: Card force-erased by CMD42\\n\"); return; } if (sd->blk_len < 2 + pwd_len || pwd_len <= sd->pwd_len || pwd_len > sd->pwd_len + 16) { sd->card_status |= LOCK_UNLOCK_FAILED; return; } if (sd->pwd_len && memcmp(sd->pwd, sd->data + 2, sd->pwd_len)) { sd->card_status |= LOCK_UNLOCK_FAILED; return; } pwd_len -= sd->pwd_len; if ((pwd_len && !set_pwd) || (clr_pwd && (set_pwd || lock)) || (lock && !sd->pwd_len && !set_pwd) || (!set_pwd && !clr_pwd && (((sd->card_status & CARD_IS_LOCKED) && lock) || (!(sd->card_status & CARD_IS_LOCKED) && !lock)))) { sd->card_status |= LOCK_UNLOCK_FAILED; return; } if (set_pwd) { memcpy(sd->pwd, sd->data + 2 + sd->pwd_len, pwd_len); sd->pwd_len = pwd_len; } if (clr_pwd) { sd->pwd_len = 0; } if (lock) sd->card_status |= CARD_IS_LOCKED; else sd->card_status &= ~CARD_IS_LOCKED; }"} {"target": 0, "idx": 12854, "func": "static void spin_reset(void *opaque) { SpinState *s = opaque; int i; for (i = 0; i < MAX_CPUS; i++) { SpinInfo *info = &s->spin[i]; info->pir = i; info->r3 = i; info->addr = 1; } }"} {"target": 0, "idx": 12862, "func": "static int dds_decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { DDSContext *ctx = avctx->priv_data; GetByteContext *gbc = &ctx->gbc; AVFrame *frame = data; int mipmap; int ret; ff_texturedsp_init(&ctx->texdsp); bytestream2_init(gbc, avpkt->data, avpkt->size); if (bytestream2_get_bytes_left(gbc) < 128) { av_log(avctx, AV_LOG_ERROR, \"Frame is too small (%d).\\n\", bytestream2_get_bytes_left(gbc)); return AVERROR_INVALIDDATA; } if (bytestream2_get_le32(gbc) != MKTAG('D', 'D', 'S', ' ') || bytestream2_get_le32(gbc) != 124) { // header size av_log(avctx, AV_LOG_ERROR, \"Invalid DDS header.\\n\"); return AVERROR_INVALIDDATA; } bytestream2_skip(gbc, 4); // flags avctx->height = bytestream2_get_le32(gbc); avctx->width = bytestream2_get_le32(gbc); ret = av_image_check_size(avctx->width, avctx->height, 0, avctx); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, \"Invalid image size %dx%d.\\n\", avctx->width, avctx->height); return ret; } /* Since codec is based on 4x4 blocks, size is aligned to 4. */ avctx->coded_width = FFALIGN(avctx->width, TEXTURE_BLOCK_W); avctx->coded_height = FFALIGN(avctx->height, TEXTURE_BLOCK_H); bytestream2_skip(gbc, 4); // pitch bytestream2_skip(gbc, 4); // depth mipmap = bytestream2_get_le32(gbc); if (mipmap != 0) av_log(avctx, AV_LOG_VERBOSE, \"Found %d mipmaps (ignored).\\n\", mipmap); /* Extract pixel format information, considering additional elements * in reserved1 and reserved2. */ ret = parse_pixel_format(avctx); if (ret < 0) return ret; ret = ff_get_buffer(avctx, frame, 0); if (ret < 0) return ret; if (ctx->compressed) { int size = (avctx->coded_height / TEXTURE_BLOCK_H) * (avctx->coded_width / TEXTURE_BLOCK_W) * ctx->tex_ratio; ctx->slice_count = av_clip(avctx->thread_count, 1, avctx->coded_height / TEXTURE_BLOCK_H); if (bytestream2_get_bytes_left(gbc) < size) { av_log(avctx, AV_LOG_ERROR, \"Compressed Buffer is too small (%d < %d).\\n\", bytestream2_get_bytes_left(gbc), size); return AVERROR_INVALIDDATA; } /* Use the decompress function on the texture, one block per thread. */ ctx->tex_data = gbc->buffer; avctx->execute2(avctx, decompress_texture_thread, frame, NULL, ctx->slice_count); } else { int linesize = av_image_get_linesize(avctx->pix_fmt, frame->width, 0); if (ctx->paletted) { int i; uint32_t *p = (uint32_t*) frame->data[1]; /* Use the first 1024 bytes as palette, then copy the rest. */ for (i = 0; i < 256; i++) { uint32_t rgba = 0; rgba |= bytestream2_get_byte(gbc) << 16; rgba |= bytestream2_get_byte(gbc) << 8; rgba |= bytestream2_get_byte(gbc) << 0; rgba |= bytestream2_get_byte(gbc) << 24; p[i] = rgba; } frame->palette_has_changed = 1; } if (bytestream2_get_bytes_left(gbc) < frame->height * linesize) { av_log(avctx, AV_LOG_ERROR, \"Buffer is too small (%d < %d).\\n\", bytestream2_get_bytes_left(gbc), frame->height * linesize); return AVERROR_INVALIDDATA; } av_image_copy_plane(frame->data[0], frame->linesize[0], gbc->buffer, linesize, linesize, frame->height); } /* Run any post processing here if needed. */ if (avctx->pix_fmt == AV_PIX_FMT_BGRA || avctx->pix_fmt == AV_PIX_FMT_RGBA || avctx->pix_fmt == AV_PIX_FMT_YA8) run_postproc(avctx, frame); /* Frame is ready to be output. */ frame->pict_type = AV_PICTURE_TYPE_I; frame->key_frame = 1; *got_frame = 1; return avpkt->size; }"} {"target": 0, "idx": 12869, "func": "static QObject *parser_context_pop_token(JSONParserContext *ctxt) { qobject_decref(ctxt->current); assert(!g_queue_is_empty(ctxt->buf)); ctxt->current = g_queue_pop_head(ctxt->buf); return ctxt->current; }"} {"target": 0, "idx": 12871, "func": "static void pc_fw_cfg_guest_info(PcGuestInfo *guest_info) { PcRomPciInfo *info; if (!guest_info->has_pci_info) { return; } info = g_malloc(sizeof *info); info->w32_min = cpu_to_le64(guest_info->pci_info.w32.begin); info->w32_max = cpu_to_le64(guest_info->pci_info.w32.end); info->w64_min = cpu_to_le64(guest_info->pci_info.w64.begin); info->w64_max = cpu_to_le64(guest_info->pci_info.w64.end); /* Pass PCI hole info to guest via a side channel. * Required so guest PCI enumeration does the right thing. */ fw_cfg_add_file(guest_info->fw_cfg, \"etc/pci-info\", info, sizeof *info); }"} {"target": 0, "idx": 12921, "func": "static double get_video_clock(VideoState *is) { double delta; if (is->paused) { //FIXME timing gets messed after pause delta = 0; } else { delta = (av_gettime() - is->video_current_pts_time) / 1000000.0; } return is->video_current_pts + delta; }"} {"target": 0, "idx": 12936, "func": "static void set_downmix_coeffs(AC3DecodeContext *s) { int i; float cmix = gain_levels[s->center_mix_level]; float smix = gain_levels[s->surround_mix_level]; for(i=0; ifbw_channels; i++) { s->downmix_coeffs[i][0] = gain_levels[ac3_default_coeffs[s->channel_mode][i][0]]; s->downmix_coeffs[i][1] = gain_levels[ac3_default_coeffs[s->channel_mode][i][1]]; } if(s->channel_mode > 1 && s->channel_mode & 1) { s->downmix_coeffs[1][0] = s->downmix_coeffs[1][1] = cmix; } if(s->channel_mode == AC3_CHMODE_2F1R || s->channel_mode == AC3_CHMODE_3F1R) { int nf = s->channel_mode - 2; s->downmix_coeffs[nf][0] = s->downmix_coeffs[nf][1] = smix * LEVEL_MINUS_3DB; } if(s->channel_mode == AC3_CHMODE_2F2R || s->channel_mode == AC3_CHMODE_3F2R) { int nf = s->channel_mode - 4; s->downmix_coeffs[nf][0] = s->downmix_coeffs[nf+1][1] = smix; } s->downmix_coeff_sum[0] = s->downmix_coeff_sum[1] = 0.0f; for(i=0; ifbw_channels; i++) { s->downmix_coeff_sum[0] += s->downmix_coeffs[i][0]; s->downmix_coeff_sum[1] += s->downmix_coeffs[i][1]; } }"} {"target": 1, "idx": 12938, "func": "static void calc_diffs(const DecimateContext *dm, struct qitem *q, const AVFrame *f1, const AVFrame *f2) { int64_t maxdiff = -1; int64_t *bdiffs = dm->bdiffs; int plane, i, j; memset(bdiffs, 0, dm->bdiffsize * sizeof(*bdiffs)); for (plane = 0; plane < (dm->chroma ? 3 : 1); plane++) { int x, y, xl; const int linesize1 = f1->linesize[plane]; const int linesize2 = f2->linesize[plane]; const uint8_t *f1p = f1->data[plane]; const uint8_t *f2p = f2->data[plane]; int width = plane ? FF_CEIL_RSHIFT(f1->width, dm->hsub) : f1->width; int height = plane ? FF_CEIL_RSHIFT(f1->height, dm->vsub) : f1->height; int hblockx = dm->blockx / 2; int hblocky = dm->blocky / 2; if (plane) { hblockx >>= dm->hsub; hblocky >>= dm->vsub; } for (y = 0; y < height; y++) { int ydest = y / hblocky; int xdest = 0; #define CALC_DIFF(nbits) do { \\ for (x = 0; x < width; x += hblockx) { \\ int64_t acc = 0; \\ int m = FFMIN(width, x + hblockx); \\ for (xl = x; xl < m; xl++) \\ acc += abs(((const uint##nbits##_t *)f1p)[xl] - \\ ((const uint##nbits##_t *)f2p)[xl]); \\ bdiffs[ydest * dm->nxblocks + xdest] += acc; \\ xdest++; \\ } \\ } while (0) if (dm->depth == 8) CALC_DIFF(8); else CALC_DIFF(16); f1p += linesize1; f2p += linesize2; } } for (i = 0; i < dm->nyblocks - 1; i++) { for (j = 0; j < dm->nxblocks - 1; j++) { int64_t tmp = bdiffs[ i * dm->nxblocks + j ] + bdiffs[ i * dm->nxblocks + j + 1] + bdiffs[(i + 1) * dm->nxblocks + j ] + bdiffs[(i + 1) * dm->nxblocks + j + 1]; if (tmp > maxdiff) maxdiff = tmp; } } q->totdiff = 0; for (i = 0; i < dm->bdiffsize; i++) q->totdiff += bdiffs[i]; q->maxbdiff = maxdiff; }"} {"target": 1, "idx": 12963, "func": "int spapr_rtas_register(const char *name, spapr_rtas_fn fn) { int i; for (i = 0; i < (rtas_next - rtas_table); i++) { if (strcmp(name, rtas_table[i].name) == 0) { fprintf(stderr, \"RTAS call \\\"%s\\\" registered twice\\n\", name); exit(1); } } assert(rtas_next < (rtas_table + TOKEN_MAX)); rtas_next->name = name; rtas_next->fn = fn; return (rtas_next++ - rtas_table) + TOKEN_BASE; }"} {"target": 1, "idx": 12966, "func": "static void test_qemu_strtol_whitespace(void) { const char *str = \" \\t \"; char f = 'X'; const char *endptr = &f; long res = 999; int err; err = qemu_strtol(str, &endptr, 0, &res); g_assert_cmpint(err, ==, 0); g_assert_cmpint(res, ==, 0); g_assert(endptr == str); }"} {"target": 0, "idx": 12988, "func": "static void compute_exp_strategy(AC3EncodeContext *s) { int ch, blk, blk1; for (ch = !s->cpl_on; ch <= s->fbw_channels; ch++) { uint8_t *exp_strategy = s->exp_strategy[ch]; uint8_t *exp = s->blocks[0].exp[ch]; int exp_diff; /* estimate if the exponent variation & decide if they should be reused in the next frame */ exp_strategy[0] = EXP_NEW; exp += AC3_MAX_COEFS; for (blk = 1; blk < AC3_MAX_BLOCKS; blk++, exp += AC3_MAX_COEFS) { if ((ch == CPL_CH && (!s->blocks[blk].cpl_in_use || !s->blocks[blk-1].cpl_in_use)) || (ch > CPL_CH && (s->blocks[blk].channel_in_cpl[ch] != s->blocks[blk-1].channel_in_cpl[ch]))) { exp_strategy[blk] = EXP_NEW; continue; } exp_diff = s->dsp.sad[0](NULL, exp, exp - AC3_MAX_COEFS, 16, 16); exp_strategy[blk] = EXP_REUSE; if (ch == CPL_CH && exp_diff > (EXP_DIFF_THRESHOLD * (s->blocks[blk].end_freq[ch] - s->start_freq[ch]) / AC3_MAX_COEFS)) exp_strategy[blk] = EXP_NEW; else if (ch > CPL_CH && exp_diff > EXP_DIFF_THRESHOLD) exp_strategy[blk] = EXP_NEW; } /* now select the encoding strategy type : if exponents are often recoded, we use a coarse encoding */ blk = 0; while (blk < AC3_MAX_BLOCKS) { blk1 = blk + 1; while (blk1 < AC3_MAX_BLOCKS && exp_strategy[blk1] == EXP_REUSE) blk1++; switch (blk1 - blk) { case 1: exp_strategy[blk] = EXP_D45; break; case 2: case 3: exp_strategy[blk] = EXP_D25; break; default: exp_strategy[blk] = EXP_D15; break; } blk = blk1; } } if (s->lfe_on) { ch = s->lfe_channel; s->exp_strategy[ch][0] = EXP_D15; for (blk = 1; blk < AC3_MAX_BLOCKS; blk++) s->exp_strategy[ch][blk] = EXP_REUSE; } }"} {"target": 0, "idx": 12999, "func": "static uint32_t nvram_readw (void *opaque, target_phys_addr_t addr) { M48t59State *NVRAM = opaque; uint32_t retval; retval = m48t59_read(NVRAM, addr) << 8; retval |= m48t59_read(NVRAM, addr + 1); return retval; }"} {"target": 0, "idx": 13016, "func": "static uint32_t sdhci_read(SDHCIState *s, unsigned int offset, unsigned size) { uint32_t ret = 0; switch (offset & ~0x3) { case SDHC_SYSAD: ret = s->sdmasysad; break; case SDHC_BLKSIZE: ret = s->blksize | (s->blkcnt << 16); break; case SDHC_ARGUMENT: ret = s->argument; break; case SDHC_TRNMOD: ret = s->trnmod | (s->cmdreg << 16); break; case SDHC_RSPREG0 ... SDHC_RSPREG3: ret = s->rspreg[((offset & ~0x3) - SDHC_RSPREG0) >> 2]; break; case SDHC_BDATA: if (sdhci_buff_access_is_sequential(s, offset - SDHC_BDATA)) { ret = SDHCI_GET_CLASS(s)->bdata_read(s, size); DPRINT_L2(\"read %ub: addr[0x%04x] -> %u(0x%x)\\n\", size, offset, ret, ret); return ret; } break; case SDHC_PRNSTS: ret = s->prnsts; break; case SDHC_HOSTCTL: ret = s->hostctl | (s->pwrcon << 8) | (s->blkgap << 16) | (s->wakcon << 24); break; case SDHC_CLKCON: ret = s->clkcon | (s->timeoutcon << 16); break; case SDHC_NORINTSTS: ret = s->norintsts | (s->errintsts << 16); break; case SDHC_NORINTSTSEN: ret = s->norintstsen | (s->errintstsen << 16); break; case SDHC_NORINTSIGEN: ret = s->norintsigen | (s->errintsigen << 16); break; case SDHC_ACMD12ERRSTS: ret = s->acmd12errsts; break; case SDHC_CAPAREG: ret = s->capareg; break; case SDHC_MAXCURR: ret = s->maxcurr; break; case SDHC_ADMAERR: ret = s->admaerr; break; case SDHC_ADMASYSADDR: ret = (uint32_t)s->admasysaddr; break; case SDHC_ADMASYSADDR + 4: ret = (uint32_t)(s->admasysaddr >> 32); break; case SDHC_SLOT_INT_STATUS: ret = (SD_HOST_SPECv2_VERS << 16) | sdhci_slotint(s); break; default: ERRPRINT(\"bad %ub read: addr[0x%04x]\\n\", size, offset); break; } ret >>= (offset & 0x3) * 8; ret &= (1ULL << (size * 8)) - 1; DPRINT_L2(\"read %ub: addr[0x%04x] -> %u(0x%x)\\n\", size, offset, ret, ret); return ret; }"} {"target": 0, "idx": 13017, "func": "static int raw_reopen_prepare(BDRVReopenState *state, BlockReopenQueue *queue, Error **errp) { BDRVRawState *s; BDRVRawReopenState *raw_s; int ret = 0; Error *local_err = NULL; assert(state != NULL); assert(state->bs != NULL); s = state->bs->opaque; state->opaque = g_new0(BDRVRawReopenState, 1); raw_s = state->opaque; #ifdef CONFIG_LINUX_AIO raw_s->use_aio = s->use_aio; /* we can use s->aio_ctx instead of a copy, because the use_aio flag is * valid in the 'false' condition even if aio_ctx is set, and raw_set_aio() * won't override aio_ctx if aio_ctx is non-NULL */ if (raw_set_aio(&s->aio_ctx, &raw_s->use_aio, state->flags)) { error_setg(errp, \"Could not set AIO state\"); return -1; } #endif if (s->type == FTYPE_FD || s->type == FTYPE_CD) { raw_s->open_flags |= O_NONBLOCK; } raw_parse_flags(state->flags, &raw_s->open_flags); raw_s->fd = -1; int fcntl_flags = O_APPEND | O_NONBLOCK; #ifdef O_NOATIME fcntl_flags |= O_NOATIME; #endif #ifdef O_ASYNC /* Not all operating systems have O_ASYNC, and those that don't * will not let us track the state into raw_s->open_flags (typically * you achieve the same effect with an ioctl, for example I_SETSIG * on Solaris). But we do not use O_ASYNC, so that's fine. */ assert((s->open_flags & O_ASYNC) == 0); #endif if ((raw_s->open_flags & ~fcntl_flags) == (s->open_flags & ~fcntl_flags)) { /* dup the original fd */ /* TODO: use qemu fcntl wrapper */ #ifdef F_DUPFD_CLOEXEC raw_s->fd = fcntl(s->fd, F_DUPFD_CLOEXEC, 0); #else raw_s->fd = dup(s->fd); if (raw_s->fd != -1) { qemu_set_cloexec(raw_s->fd); } #endif if (raw_s->fd >= 0) { ret = fcntl_setfl(raw_s->fd, raw_s->open_flags); if (ret) { qemu_close(raw_s->fd); raw_s->fd = -1; } } } /* If we cannot use fcntl, or fcntl failed, fall back to qemu_open() */ if (raw_s->fd == -1) { assert(!(raw_s->open_flags & O_CREAT)); raw_s->fd = qemu_open(state->bs->filename, raw_s->open_flags); if (raw_s->fd == -1) { error_setg_errno(errp, errno, \"Could not reopen file\"); ret = -1; } } /* Fail already reopen_prepare() if we can't get a working O_DIRECT * alignment with the new fd. */ if (raw_s->fd != -1) { raw_probe_alignment(state->bs, raw_s->fd, &local_err); if (local_err) { qemu_close(raw_s->fd); raw_s->fd = -1; error_propagate(errp, local_err); ret = -EINVAL; } } return ret; }"} {"target": 0, "idx": 13027, "func": "static void put_frame( AVFormatContext *s, ASFStream *stream, AVStream *avst, int timestamp, const uint8_t *buf, int m_obj_size, int flags ) { ASFContext *asf = s->priv_data; int m_obj_offset, payload_len, frag_len1; m_obj_offset = 0; while (m_obj_offset < m_obj_size) { payload_len = m_obj_size - m_obj_offset; if (asf->packet_timestamp_start == -1) { asf->multi_payloads_present = (payload_len < MULTI_PAYLOAD_CONSTANT); asf->packet_size_left = PACKET_SIZE; if (asf->multi_payloads_present){ frag_len1 = MULTI_PAYLOAD_CONSTANT - 1; } else { frag_len1 = SINGLE_PAYLOAD_DATA_LENGTH; } asf->packet_timestamp_start = timestamp; } else { // multi payloads frag_len1 = asf->packet_size_left - PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS - PACKET_HEADER_MIN_SIZE - 1; asf->packet_timestamp_start = timestamp; if(frag_len1 < payload_len && avst->codec->codec_type == CODEC_TYPE_AUDIO){ flush_packet(s); continue; } } if (frag_len1 > 0) { if (payload_len > frag_len1) payload_len = frag_len1; else if (payload_len == (frag_len1 - 1)) payload_len = frag_len1 - 2; //additional byte need to put padding length put_payload_header(s, stream, timestamp+PREROLL_TIME, m_obj_size, m_obj_offset, payload_len, flags); put_buffer(&asf->pb, buf, payload_len); if (asf->multi_payloads_present) asf->packet_size_left -= (payload_len + PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS); else asf->packet_size_left -= (payload_len + PAYLOAD_HEADER_SIZE_SINGLE_PAYLOAD); asf->packet_timestamp_end = timestamp; asf->packet_nb_payloads++; } else { payload_len = 0; } m_obj_offset += payload_len; buf += payload_len; if (!asf->multi_payloads_present) flush_packet(s); else if (asf->packet_size_left <= (PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS + PACKET_HEADER_MIN_SIZE + 1)) flush_packet(s); } stream->seq++; }"} {"target": 1, "idx": 13034, "func": "static void render_fragments(Vp3DecodeContext *s, int first_fragment, int width, int height, int plane /* 0 = Y, 1 = U, 2 = V */) { int x, y; int m, n; int i = first_fragment; int16_t *dequantizer; DCTELEM __align16 output_samples[64]; unsigned char *output_plane; unsigned char *last_plane; unsigned char *golden_plane; int stride; int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef; int upper_motion_limit, lower_motion_limit; int motion_halfpel_index; uint8_t *motion_source; debug_vp3(\" vp3: rendering final fragments for %s\\n\", (plane == 0) ? \"Y plane\" : (plane == 1) ? \"U plane\" : \"V plane\"); /* set up plane-specific parameters */ if (plane == 0) { dequantizer = s->intra_y_dequant; output_plane = s->current_frame.data[0]; last_plane = s->last_frame.data[0]; golden_plane = s->golden_frame.data[0]; stride = s->current_frame.linesize[0]; if (!s->flipped_image) stride = -stride; upper_motion_limit = 7 * s->current_frame.linesize[0]; lower_motion_limit = height * s->current_frame.linesize[0] + width - 8; } else if (plane == 1) { dequantizer = s->intra_c_dequant; output_plane = s->current_frame.data[1]; last_plane = s->last_frame.data[1]; golden_plane = s->golden_frame.data[1]; stride = s->current_frame.linesize[1]; if (!s->flipped_image) stride = -stride; upper_motion_limit = 7 * s->current_frame.linesize[1]; lower_motion_limit = height * s->current_frame.linesize[1] + width - 8; } else { dequantizer = s->intra_c_dequant; output_plane = s->current_frame.data[2]; last_plane = s->last_frame.data[2]; golden_plane = s->golden_frame.data[2]; stride = s->current_frame.linesize[2]; if (!s->flipped_image) stride = -stride; upper_motion_limit = 7 * s->current_frame.linesize[2]; lower_motion_limit = height * s->current_frame.linesize[2] + width - 8; } /* for each fragment row... */ for (y = 0; y < height; y += 8) { /* for each fragment in a row... */ for (x = 0; x < width; x += 8, i++) { if ((i < 0) || (i >= s->fragment_count)) { av_log(s->avctx, AV_LOG_ERROR, \" vp3:render_fragments(): bad fragment number (%d)\\n\", i); return; } /* transform if this block was coded */ if ((s->all_fragments[i].coding_method != MODE_COPY) && !((s->avctx->flags & CODEC_FLAG_GRAY) && plane)) { if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) || (s->all_fragments[i].coding_method == MODE_GOLDEN_MV)) motion_source= golden_plane; else motion_source= last_plane; motion_source += s->all_fragments[i].first_pixel; motion_halfpel_index = 0; /* sort out the motion vector if this fragment is coded * using a motion vector method */ if ((s->all_fragments[i].coding_method > MODE_INTRA) && (s->all_fragments[i].coding_method != MODE_USING_GOLDEN)) { int src_x, src_y; motion_x = s->all_fragments[i].motion_x; motion_y = s->all_fragments[i].motion_y; if(plane){ motion_x= (motion_x>>1) | (motion_x&1); motion_y= (motion_y>>1) | (motion_y&1); } src_x= (motion_x>>1) + x; src_y= (motion_y>>1) + y; if ((motion_x == 0xbeef) || (motion_y == 0xbeef)) av_log(s->avctx, AV_LOG_ERROR, \" help! got beefy vector! (%X, %X)\\n\", motion_x, motion_y); motion_halfpel_index = motion_x & 0x01; motion_source += (motion_x >> 1); // motion_y = -motion_y; motion_halfpel_index |= (motion_y & 0x01) << 1; motion_source += ((motion_y >> 1) * stride); if(src_x<0 || src_y<0 || src_x + 9 >= width || src_y + 9 >= height){ uint8_t *temp= s->edge_emu_buffer; if(stride<0) temp -= 9*stride; else temp += 9*stride; ff_emulated_edge_mc(temp, motion_source, stride, 9, 9, src_x, src_y, width, height); motion_source= temp; } } /* first, take care of copying a block from either the * previous or the golden frame */ if (s->all_fragments[i].coding_method != MODE_INTRA) { //Note, it is possible to implement all MC cases with put_no_rnd_pixels_l2 which would look more like the VP3 source but this would be slower as put_no_rnd_pixels_tab is better optimzed if(motion_halfpel_index != 3){ s->dsp.put_no_rnd_pixels_tab[1][motion_halfpel_index]( output_plane + s->all_fragments[i].first_pixel, motion_source, stride, 8); }else{ int d= (motion_x ^ motion_y)>>31; // d is 0 if motion_x and _y have the same sign, else -1 s->dsp.put_no_rnd_pixels_l2[1]( output_plane + s->all_fragments[i].first_pixel, motion_source - d, motion_source + stride + 1 + d, stride, 8); } } /* dequantize the DCT coefficients */ debug_idct(\"fragment %d, coding mode %d, DC = %d, dequant = %d:\\n\", i, s->all_fragments[i].coding_method, s->all_fragments[i].coeffs[0], dequantizer[0]); /* invert DCT and place (or add) in final output */ s->dsp.vp3_idct(s->all_fragments[i].coeffs, dequantizer, s->all_fragments[i].coeff_count, output_samples); if (s->all_fragments[i].coding_method == MODE_INTRA) { s->dsp.put_signed_pixels_clamped(output_samples, output_plane + s->all_fragments[i].first_pixel, stride); } else { s->dsp.add_pixels_clamped(output_samples, output_plane + s->all_fragments[i].first_pixel, stride); } debug_idct(\"block after idct_%s():\\n\", (s->all_fragments[i].coding_method == MODE_INTRA)? \"put\" : \"add\"); for (m = 0; m < 8; m++) { for (n = 0; n < 8; n++) { debug_idct(\" %3d\", *(output_plane + s->all_fragments[i].first_pixel + (m * stride + n))); } debug_idct(\"\\n\"); } debug_idct(\"\\n\"); } else { /* copy directly from the previous frame */ s->dsp.put_pixels_tab[1][0]( output_plane + s->all_fragments[i].first_pixel, last_plane + s->all_fragments[i].first_pixel, stride, 8); } } } emms_c(); }"} {"target": 1, "idx": 13036, "func": "static int enable_write_target(BDRVVVFATState *s, Error **errp) { BlockDriver *bdrv_qcow; QEMUOptionParameter *options; int ret; int size = sector2cluster(s, s->sector_count); s->used_clusters = calloc(size, 1); array_init(&(s->commits), sizeof(commit_t)); s->qcow_filename = g_malloc(1024); ret = get_tmp_filename(s->qcow_filename, 1024); if (ret < 0) { error_setg_errno(errp, -ret, \"can't create temporary file\"); goto err; } bdrv_qcow = bdrv_find_format(\"qcow\"); options = parse_option_parameters(\"\", bdrv_qcow->create_options, NULL); set_option_parameter_int(options, BLOCK_OPT_SIZE, s->sector_count * 512); set_option_parameter(options, BLOCK_OPT_BACKING_FILE, \"fat:\"); ret = bdrv_create(bdrv_qcow, s->qcow_filename, options, errp); if (ret < 0) { goto err; } s->qcow = NULL; ret = bdrv_open(&s->qcow, s->qcow_filename, NULL, NULL, BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_NO_FLUSH, bdrv_qcow, errp); if (ret < 0) { goto err; } #ifndef _WIN32 unlink(s->qcow_filename); #endif bdrv_set_backing_hd(s->bs, bdrv_new(\"\", &error_abort)); s->bs->backing_hd->drv = &vvfat_write_target; s->bs->backing_hd->opaque = g_malloc(sizeof(void*)); *(void**)s->bs->backing_hd->opaque = s; return 0; err: g_free(s->qcow_filename); s->qcow_filename = NULL; return ret; }"} {"target": 1, "idx": 13039, "func": "static av_cold int cinepak_encode_end(AVCodecContext *avctx) { CinepakEncContext *s = avctx->priv_data; int x; av_free(s->codebook_input); av_free(s->codebook_closest); av_free(s->strip_buf); av_free(s->frame_buf); av_free(s->mb); #ifdef CINEPAKENC_DEBUG av_free(s->best_mb); #endif for(x = 0; x < 3; x++) av_free(s->pict_bufs[x]); av_log(avctx, AV_LOG_INFO, \"strip coding stats: %i V1 mode, %i V4 mode, %i MC mode (%i V1 encs, %i V4 encs, %i skips)\\n\", s->num_v1_mode, s->num_v4_mode, s->num_mc_mode, s->num_v1_encs, s->num_v4_encs, s->num_skips); return 0; }"} {"target": 0, "idx": 13046, "func": "static unsigned virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc, hwaddr desc_pa, unsigned int max) { unsigned int next; /* If this descriptor says it doesn't chain, we're done. */ if (!(desc->flags & VRING_DESC_F_NEXT)) { return max; } /* Check they're not leading us off end of descriptors. */ next = desc->next; /* Make sure compiler knows to grab that: we don't want it changing! */ smp_wmb(); if (next >= max) { error_report(\"Desc next is %u\", next); exit(1); } vring_desc_read(vdev, desc, desc_pa, next); return next; }"} {"target": 0, "idx": 13060, "func": "static void setup_rt_frame(int usig, struct emulated_sigaction *ka, target_siginfo_t *info, target_sigset_t *set, CPUState *env) { struct rt_sigframe *frame; abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); struct target_sigaltstack stack; int i, err = 0; if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) return /* 1 */; __put_user_error(&frame->info, (abi_ulong *)&frame->pinfo, err); __put_user_error(&frame->uc, (abi_ulong *)&frame->puc, err); err |= copy_siginfo_to_user(&frame->info, info); /* Clear all the bits of the ucontext we don't use. */ memset(&frame->uc, 0, offsetof(struct target_ucontext, tuc_mcontext)); memset(&stack, 0, sizeof(stack)); __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp); __put_user(target_sigaltstack_used.ss_size, &stack.ss_size); __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags); memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack)); err |= setup_sigcontext(&frame->uc.tuc_mcontext, /*&frame->fpstate,*/ env, set->sig[0]); for(i = 0; i < TARGET_NSIG_WORDS; i++) { if (__put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i])) goto end; } if (err == 0) err = setup_return(env, ka, &frame->retcode, frame, usig); if (err == 0) { /* * For realtime signals we must also set the second and third * arguments for the signal handler. * -- Peter Maydell 2000-12-06 */ env->regs[1] = (abi_ulong)frame->pinfo; env->regs[2] = (abi_ulong)frame->puc; } end: unlock_user_struct(frame, frame_addr, 1); // return err; }"} {"target": 0, "idx": 13083, "func": "static void RENAME(lumRangeFromJpeg)(int16_t *dst, int width) { int i; for (i = 0; i < width; i++) dst[i] = (dst[i]*14071 + 33561947)>>14; }"} {"target": 1, "idx": 13091, "func": "void do_POWER_abso (void) { if (T0 == INT32_MIN) { T0 = INT32_MAX; xer_ov = 1; xer_so = 1; } else { T0 = -T0; xer_ov = 0; } }"} {"target": 0, "idx": 13110, "func": "static int lag_decode_zero_run_line(LagarithContext *l, uint8_t *dst, const uint8_t *src, int width, int esc_count) { int i = 0; int count; uint8_t zero_run = 0; const uint8_t *start = src; uint8_t mask1 = -(esc_count < 2); uint8_t mask2 = -(esc_count < 3); uint8_t *end = dst + (width - 2); output_zeros: if (l->zeros_rem) { count = FFMIN(l->zeros_rem, width - i); memset(dst, 0, count); l->zeros_rem -= count; dst += count; } while (dst < end) { i = 0; while (!zero_run && dst + i < end) { i++; zero_run = !(src[i] | (src[i + 1] & mask1) | (src[i + 2] & mask2)); } if (zero_run) { zero_run = 0; i += esc_count; memcpy(dst, src, i); dst += i; l->zeros_rem = lag_calc_zero_run(src[i]); src += i + 1; goto output_zeros; } else { memcpy(dst, src, i); src += i; } } return src - start; }"} {"target": 1, "idx": 13121, "func": "static int stream_component_open(VideoState *is, int stream_index) { AVFormatContext *ic = is->ic; AVCodecContext *enc; AVCodec *codec; SDL_AudioSpec wanted_spec, spec; if (stream_index < 0 || stream_index >= ic->nb_streams) return -1; enc = ic->streams[stream_index]->codec; /* prepare audio output */ if (enc->codec_type == CODEC_TYPE_AUDIO) { if (enc->channels > 0) { enc->request_channels = FFMIN(2, enc->channels); } else { enc->request_channels = 2; } } codec = avcodec_find_decoder(enc->codec_id); enc->debug_mv = debug_mv; enc->debug = debug; enc->workaround_bugs = workaround_bugs; enc->lowres = lowres; if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE; enc->idct_algo= idct; if(fast) enc->flags2 |= CODEC_FLAG2_FAST; enc->skip_frame= skip_frame; enc->skip_idct= skip_idct; enc->skip_loop_filter= skip_loop_filter; enc->error_recognition= error_recognition; enc->error_concealment= error_concealment; avcodec_thread_init(enc, thread_count); set_context_opts(enc, avcodec_opts[enc->codec_type], 0); if (!codec || avcodec_open(enc, codec) < 0) return -1; /* prepare audio output */ if (enc->codec_type == CODEC_TYPE_AUDIO) { wanted_spec.freq = enc->sample_rate; wanted_spec.format = AUDIO_S16SYS; wanted_spec.channels = enc->channels; wanted_spec.silence = 0; wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; wanted_spec.callback = sdl_audio_callback; wanted_spec.userdata = is; if (SDL_OpenAudio(&wanted_spec, &spec) < 0) { fprintf(stderr, \"SDL_OpenAudio: %s\\n\", SDL_GetError()); return -1; } is->audio_hw_buf_size = spec.size; is->audio_src_fmt= SAMPLE_FMT_S16; } ic->streams[stream_index]->discard = AVDISCARD_DEFAULT; switch(enc->codec_type) { case CODEC_TYPE_AUDIO: is->audio_stream = stream_index; is->audio_st = ic->streams[stream_index]; is->audio_buf_size = 0; is->audio_buf_index = 0; /* init averaging filter */ is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB); is->audio_diff_avg_count = 0; /* since we do not have a precise anough audio fifo fullness, we correct audio sync only if larger than this threshold */ is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate; memset(&is->audio_pkt, 0, sizeof(is->audio_pkt)); packet_queue_init(&is->audioq); SDL_PauseAudio(0); break; case CODEC_TYPE_VIDEO: is->video_stream = stream_index; is->video_st = ic->streams[stream_index]; is->frame_last_delay = 40e-3; is->frame_timer = (double)av_gettime() / 1000000.0; is->video_current_pts_time = av_gettime(); packet_queue_init(&is->videoq); is->video_tid = SDL_CreateThread(video_thread, is); break; case CODEC_TYPE_SUBTITLE: is->subtitle_stream = stream_index; is->subtitle_st = ic->streams[stream_index]; packet_queue_init(&is->subtitleq); is->subtitle_tid = SDL_CreateThread(subtitle_thread, is); break; default: break; } return 0; }"} {"target": 1, "idx": 13135, "func": "static int cirrus_bitblt_common_patterncopy(CirrusVGAState * s, const uint8_t * src) { uint8_t *dst; dst = s->vga.vram_ptr + (s->cirrus_blt_dstaddr & s->cirrus_addr_mask); if (BLTUNSAFE(s)) return 0; (*s->cirrus_rop) (s, dst, src, s->cirrus_blt_dstpitch, 0, s->cirrus_blt_width, s->cirrus_blt_height); cirrus_invalidate_region(s, s->cirrus_blt_dstaddr, s->cirrus_blt_dstpitch, s->cirrus_blt_width, s->cirrus_blt_height); return 1; }"} {"target": 0, "idx": 13147, "func": "static int sls_flags_filename_process(struct AVFormatContext *s, HLSContext *hls, VariantStream *vs, HLSSegment *en, double duration, int64_t pos, int64_t size) { if ((hls->flags & (HLS_SECOND_LEVEL_SEGMENT_SIZE | HLS_SECOND_LEVEL_SEGMENT_DURATION)) && strlen(vs->current_segment_final_filename_fmt)) { av_strlcpy(vs->avf->filename, vs->current_segment_final_filename_fmt, sizeof(vs->avf->filename)); if (hls->flags & HLS_SECOND_LEVEL_SEGMENT_SIZE) { char * filename = av_strdup(vs->avf->filename); // %%s will be %s after strftime if (!filename) { av_free(en); return AVERROR(ENOMEM); } if (replace_int_data_in_filename(vs->avf->filename, sizeof(vs->avf->filename), filename, 's', pos + size) < 1) { av_log(hls, AV_LOG_ERROR, \"Invalid second level segment filename template '%s', \" \"you can try to remove second_level_segment_size flag\\n\", filename); av_free(filename); av_free(en); return AVERROR(EINVAL); } av_free(filename); } if (hls->flags & HLS_SECOND_LEVEL_SEGMENT_DURATION) { char * filename = av_strdup(vs->avf->filename); // %%t will be %t after strftime if (!filename) { av_free(en); return AVERROR(ENOMEM); } if (replace_int_data_in_filename(vs->avf->filename, sizeof(vs->avf->filename), filename, 't', (int64_t)round(duration * HLS_MICROSECOND_UNIT)) < 1) { av_log(hls, AV_LOG_ERROR, \"Invalid second level segment filename template '%s', \" \"you can try to remove second_level_segment_time flag\\n\", filename); av_free(filename); av_free(en); return AVERROR(EINVAL); } av_free(filename); } } return 0; }"} {"target": 0, "idx": 13167, "func": "static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { MachineState *machine = MACHINE(OBJECT(hotplug_dev)); MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev); Error *local_err = NULL; CPUCore *cc = CPU_CORE(dev); sPAPRCPUCore *sc = SPAPR_CPU_CORE(dev); char *base_core_type = spapr_get_cpu_core_type(machine->cpu_model); const char *type = object_get_typename(OBJECT(dev)); CPUArchId *core_slot; int node_id; int index; if (dev->hotplugged && !mc->has_hotpluggable_cpus) { error_setg(&local_err, \"CPU hotplug not supported for this machine\"); goto out; } if (strcmp(base_core_type, type)) { error_setg(&local_err, \"CPU core type should be %s\", base_core_type); goto out; } if (cc->core_id % smp_threads) { error_setg(&local_err, \"invalid core id %d\", cc->core_id); goto out; } if (cc->nr_threads != smp_threads) { error_setg(errp, \"invalid nr-threads %d, must be %d\", cc->nr_threads, smp_threads); return; } core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index); if (!core_slot) { error_setg(&local_err, \"core id %d out of range\", cc->core_id); goto out; } if (core_slot->cpu) { error_setg(&local_err, \"core %d already populated\", cc->core_id); goto out; } node_id = numa_get_node_for_cpu(cc->core_id); if (node_id == nb_numa_nodes) { /* by default CPUState::numa_node was 0 if it's not set via CLI * keep it this way for now but in future we probably should * refuse to start up with incomplete numa mapping */ node_id = 0; } if (sc->node_id == CPU_UNSET_NUMA_NODE_ID) { sc->node_id = node_id; } else if (sc->node_id != node_id) { error_setg(&local_err, \"node-id %d must match numa node specified\" \"with -numa option for cpu-index %d\", sc->node_id, cc->core_id); goto out; } out: g_free(base_core_type); error_propagate(errp, local_err); }"} {"target": 0, "idx": 13176, "func": "void sparc_iommu_memory_rw(void *opaque, target_phys_addr_t addr, uint8_t *buf, int len, int is_write) { int l; uint32_t flags; target_phys_addr_t page, phys_addr; while (len > 0) { page = addr & TARGET_PAGE_MASK; l = (page + TARGET_PAGE_SIZE) - addr; if (l > len) l = len; flags = iommu_page_get_flags(opaque, page); if (!(flags & IOPTE_VALID)) return; phys_addr = iommu_translate_pa(opaque, addr, flags); if (is_write) { if (!(flags & IOPTE_WRITE)) return; cpu_physical_memory_write(phys_addr, buf, len); } else { cpu_physical_memory_read(phys_addr, buf, len); } len -= l; buf += l; addr += l; } }"} {"target": 0, "idx": 13180, "func": "void *qemu_try_blockalign0(BlockDriverState *bs, size_t size) { void *mem = qemu_try_blockalign(bs, size); if (mem) { memset(mem, 0, size); } return mem; }"} {"target": 0, "idx": 13182, "func": "static void test_qemu_strtoul_underflow(void) { const char *str = \"-99999999999999999999999999999999999999999999\"; char f = 'X'; const char *endptr = &f; unsigned long res = 999; int err; err = qemu_strtoul(str, &endptr, 0, &res); g_assert_cmpint(err, ==, -ERANGE); g_assert_cmpint(res, ==, -1ul); g_assert(endptr == str + strlen(str)); }"} {"target": 0, "idx": 13186, "func": "static void do_downmix(AC3DecodeContext *ctx) { int from = ctx->bsi.acmod; int to = ctx->output; switch (from) { case AC3_INPUT_DUALMONO: switch (to) { case AC3_OUTPUT_MONO: mix_dualmono_to_mono(ctx); break; case AC3_OUTPUT_STEREO: /* We Assume that sum of both mono channels is requested */ mix_dualmono_to_stereo(ctx); break; } break; case AC3_INPUT_MONO: switch (to) { case AC3_OUTPUT_STEREO: upmix_mono_to_stereo(ctx); break; } break; case AC3_INPUT_STEREO: switch (to) { case AC3_OUTPUT_MONO: mix_stereo_to_mono(ctx); break; } break; case AC3_INPUT_3F: switch (to) { case AC3_OUTPUT_MONO: mix_3f_to_mono(ctx); break; case AC3_OUTPUT_STEREO: mix_3f_to_stereo(ctx); break; } break; case AC3_INPUT_2F_1R: switch (to) { case AC3_OUTPUT_MONO: mix_2f_1r_to_mono(ctx); break; case AC3_OUTPUT_STEREO: mix_2f_1r_to_stereo(ctx); break; case AC3_OUTPUT_DOLBY: mix_2f_1r_to_dolby(ctx); break; } break; case AC3_INPUT_3F_1R: switch (to) { case AC3_OUTPUT_MONO: mix_3f_1r_to_mono(ctx); break; case AC3_OUTPUT_STEREO: mix_3f_1r_to_stereo(ctx); break; case AC3_OUTPUT_DOLBY: mix_3f_1r_to_dolby(ctx); break; } break; case AC3_INPUT_2F_2R: switch (to) { case AC3_OUTPUT_MONO: mix_2f_2r_to_mono(ctx); break; case AC3_OUTPUT_STEREO: mix_2f_2r_to_stereo(ctx); break; case AC3_OUTPUT_DOLBY: mix_2f_2r_to_dolby(ctx); break; } break; case AC3_INPUT_3F_2R: switch (to) { case AC3_OUTPUT_MONO: mix_3f_2r_to_mono(ctx); break; case AC3_OUTPUT_STEREO: mix_3f_2r_to_stereo(ctx); break; case AC3_OUTPUT_DOLBY: mix_3f_2r_to_dolby(ctx); break; } break; } }"} {"target": 1, "idx": 13213, "func": "static int get_avc_nalsize(H264Context *h, const uint8_t *buf, int buf_size, int *buf_index) { int i, nalsize = 0; if (*buf_index >= buf_size - h->nal_length_size) return -1; for (i = 0; i < h->nal_length_size; i++) nalsize = (nalsize << 8) | buf[(*buf_index)++]; if (nalsize <= 0 || nalsize > buf_size - *buf_index) { av_log(h->avctx, AV_LOG_ERROR, \"AVC: nal size %d\\n\", nalsize); return -1; } return nalsize; }"} {"target": 1, "idx": 13223, "func": "static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t* dst, int stride, uint8_t *src, uint8_t *ref) { uint8_t *code, *data, *mv, *msk, *tmp, *tmp2; int i, j, k; int type, x, y, d, d2; uint32_t mask; code = src + 12; data = code + ((avctx->width * avctx->height) >> 4); mv = data + AV_RB32(src + 0); msk = mv + AV_RB32(src + 4); for(j = 0; j < avctx->height; j += 4){ for(i = 0; i < avctx->width; i += 4){ tmp = dst + i; tmp2 = ref + i; type = *code++; switch(type){ case 4: // motion compensation x = (*mv) >> 4; if(x & 8) x = 8 - x; y = (*mv++) & 0xF; if(y & 8) y = 8 - y; if (i < -x || avctx->width - i - 4 < x || j < -y || avctx->height - j - 4 < y) { av_log(avctx, AV_LOG_ERROR, \"MV %d %d out of bounds\\n\", x,y); return AVERROR_INVALIDDATA; } tmp2 += x + y*stride; case 0: // skip case 5: // skip in method 12 for(y = 0; y < 4; y++){ memcpy(tmp, tmp2, 4); tmp += stride; tmp2 += stride; } break; case 1: // masked change case 10: // masked change with only half of pixels changed case 11: // cases 10-15 are for method 12 only case 12: case 13: case 14: case 15: if(type == 1){ mask = AV_RB16(msk); msk += 2; }else{ type -= 10; mask = ((msk[0] & 0xF0) << shift1[type]) | ((msk[0] & 0xF) << shift2[type]); msk++; } for(y = 0; y < 4; y++){ for(x = 0; x < 4; x++){ tmp[x] = (mask & 0x8000) ? *data++ : tmp2[x]; mask <<= 1; } tmp += stride; tmp2 += stride; } break; case 2: // fill block for(y = 0; y < 4; y++){ memset(tmp, data[0], 4); tmp += stride; } data++; break; case 3: // raw block for(y = 0; y < 4; y++){ memcpy(tmp, data, 4); data += 4; tmp += stride; } break; case 8: // subblocks - method 13 only mask = *msk++; for(k = 0; k < 4; k++){ d = ((k & 1) << 1) + ((k & 2) * stride); d2 = ((k & 1) << 1) + ((k & 2) * stride); tmp2 = ref + i + d2; switch(mask & 0xC0){ case 0x80: // motion compensation x = (*mv) >> 4; if(x & 8) x = 8 - x; y = (*mv++) & 0xF; if(y & 8) y = 8 - y; if (i + 2*(k & 1) < -x || avctx->width - i - 2*(k & 1) - 2 < x || j + (k & 2) < -y || avctx->height - j - (k & 2) - 2 < y) { av_log(avctx, AV_LOG_ERROR, \"MV %d %d out of bounds\\n\", x,y); return AVERROR_INVALIDDATA; } tmp2 += x + y*stride; case 0x00: // skip tmp[d + 0 ] = tmp2[0]; tmp[d + 1 ] = tmp2[1]; tmp[d + 0 + stride] = tmp2[0 + stride]; tmp[d + 1 + stride] = tmp2[1 + stride]; break; case 0x40: // fill tmp[d + 0 ] = data[0]; tmp[d + 1 ] = data[0]; tmp[d + 0 + stride] = data[0]; tmp[d + 1 + stride] = data[0]; data++; break; case 0xC0: // raw tmp[d + 0 ] = *data++; tmp[d + 1 ] = *data++; tmp[d + 0 + stride] = *data++; tmp[d + 1 + stride] = *data++; break; } mask <<= 2; } break; case 32: // vector quantization - 2 colors mask = AV_RB16(msk); msk += 2; for(y = 0; y < 4; y++){ for(x = 0; x < 4; x++){ tmp[x] = data[mask & 1]; mask >>= 1; } tmp += stride; tmp2 += stride; } data += 2; break; case 33: // vector quantization - 3 or 4 colors case 34: mask = AV_RB32(msk); msk += 4; for(y = 0; y < 4; y++){ for(x = 0; x < 4; x++){ tmp[x] = data[mask & 3]; mask >>= 2; } tmp += stride; tmp2 += stride; } data += type - 30; break; default: av_log(avctx, AV_LOG_ERROR, \"Unknown opcode %d\\n\", type); return AVERROR_INVALIDDATA; } } dst += stride * 4; ref += stride * 4; } return 0; }"} {"target": 1, "idx": 13225, "func": "void palette8tobgr32(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette) { long i; for(i=0; iirqs = irqs; }"} {"target": 0, "idx": 13250, "func": "static int flic_decode_frame_15_16BPP(AVCodecContext *avctx, void *data, int *got_frame, const uint8_t *buf, int buf_size) { /* Note, the only difference between the 15Bpp and 16Bpp */ /* Format is the pixel format, the packets are processed the same. */ FlicDecodeContext *s = avctx->priv_data; GetByteContext g2; int pixel_ptr; unsigned char palette_idx1; unsigned int frame_size; int num_chunks; unsigned int chunk_size; int chunk_type; int i, j, ret; int lines; int compressed_lines; signed short line_packets; int y_ptr; int byte_run; int pixel_skip; int pixel_countdown; unsigned char *pixels; int pixel; unsigned int pixel_limit; bytestream2_init(&g2, buf, buf_size); if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) return ret; pixels = s->frame.data[0]; pixel_limit = s->avctx->height * s->frame.linesize[0]; frame_size = bytestream2_get_le32(&g2); bytestream2_skip(&g2, 2); /* skip the magic number */ num_chunks = bytestream2_get_le16(&g2); bytestream2_skip(&g2, 8); /* skip padding */ if (frame_size > buf_size) frame_size = buf_size; frame_size -= 16; /* iterate through the chunks */ while ((frame_size > 0) && (num_chunks > 0)) { int stream_ptr_after_chunk; chunk_size = bytestream2_get_le32(&g2); if (chunk_size > frame_size) { av_log(avctx, AV_LOG_WARNING, \"Invalid chunk_size = %u > frame_size = %u\\n\", chunk_size, frame_size); chunk_size = frame_size; } stream_ptr_after_chunk = bytestream2_tell(&g2) - 4 + chunk_size; chunk_type = bytestream2_get_le16(&g2); switch (chunk_type) { case FLI_256_COLOR: case FLI_COLOR: /* For some reason, it seems that non-palettized flics do * include one of these chunks in their first frame. * Why I do not know, it seems rather extraneous. */ av_dlog(avctx, \"Unexpected Palette chunk %d in non-palettized FLC\\n\", chunk_type); bytestream2_skip(&g2, chunk_size - 6); break; case FLI_DELTA: case FLI_DTA_LC: y_ptr = 0; compressed_lines = bytestream2_get_le16(&g2); while (compressed_lines > 0) { if (bytestream2_tell(&g2) + 2 > stream_ptr_after_chunk) break; line_packets = bytestream2_get_le16(&g2); if (line_packets < 0) { line_packets = -line_packets; y_ptr += line_packets * s->frame.linesize[0]; } else { compressed_lines--; pixel_ptr = y_ptr; CHECK_PIXEL_PTR(0); pixel_countdown = s->avctx->width; for (i = 0; i < line_packets; i++) { /* account for the skip bytes */ if (bytestream2_tell(&g2) + 2 > stream_ptr_after_chunk) break; pixel_skip = bytestream2_get_byte(&g2); pixel_ptr += (pixel_skip*2); /* Pixel is 2 bytes wide */ pixel_countdown -= pixel_skip; byte_run = sign_extend(bytestream2_get_byte(&g2), 8); if (byte_run < 0) { byte_run = -byte_run; pixel = bytestream2_get_le16(&g2); CHECK_PIXEL_PTR(2 * byte_run); for (j = 0; j < byte_run; j++, pixel_countdown -= 2) { *((signed short*)(&pixels[pixel_ptr])) = pixel; pixel_ptr += 2; } } else { if (bytestream2_tell(&g2) + 2*byte_run > stream_ptr_after_chunk) break; CHECK_PIXEL_PTR(2 * byte_run); for (j = 0; j < byte_run; j++, pixel_countdown--) { *((signed short*)(&pixels[pixel_ptr])) = bytestream2_get_le16(&g2); pixel_ptr += 2; } } } y_ptr += s->frame.linesize[0]; } } break; case FLI_LC: av_log(avctx, AV_LOG_ERROR, \"Unexpected FLI_LC chunk in non-palettized FLC\\n\"); bytestream2_skip(&g2, chunk_size - 6); break; case FLI_BLACK: /* set the whole frame to 0x0000 which is black in both 15Bpp and 16Bpp modes. */ memset(pixels, 0x0000, s->frame.linesize[0] * s->avctx->height); break; case FLI_BRUN: y_ptr = 0; for (lines = 0; lines < s->avctx->height; lines++) { pixel_ptr = y_ptr; /* disregard the line packets; instead, iterate through all * pixels on a row */ bytestream2_skip(&g2, 1); pixel_countdown = (s->avctx->width * 2); while (pixel_countdown > 0) { if (bytestream2_tell(&g2) + 1 > stream_ptr_after_chunk) break; byte_run = sign_extend(bytestream2_get_byte(&g2), 8); if (byte_run > 0) { palette_idx1 = bytestream2_get_byte(&g2); CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++) { pixels[pixel_ptr++] = palette_idx1; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, \"pixel_countdown < 0 (%d) (linea%d)\\n\", pixel_countdown, lines); } } else { /* copy bytes if byte_run < 0 */ byte_run = -byte_run; if (bytestream2_tell(&g2) + byte_run > stream_ptr_after_chunk) break; CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++) { palette_idx1 = bytestream2_get_byte(&g2); pixels[pixel_ptr++] = palette_idx1; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, \"pixel_countdown < 0 (%d) at line %d\\n\", pixel_countdown, lines); } } } /* Now FLX is strange, in that it is \"byte\" as opposed to \"pixel\" run length compressed. * This does not give us any good opportunity to perform word endian conversion * during decompression. So if it is required (i.e., this is not a LE target, we do * a second pass over the line here, swapping the bytes. */ #if HAVE_BIGENDIAN pixel_ptr = y_ptr; pixel_countdown = s->avctx->width; while (pixel_countdown > 0) { *((signed short*)(&pixels[pixel_ptr])) = AV_RL16(&buf[pixel_ptr]); pixel_ptr += 2; } #endif y_ptr += s->frame.linesize[0]; } break; case FLI_DTA_BRUN: y_ptr = 0; for (lines = 0; lines < s->avctx->height; lines++) { pixel_ptr = y_ptr; /* disregard the line packets; instead, iterate through all * pixels on a row */ bytestream2_skip(&g2, 1); pixel_countdown = s->avctx->width; /* Width is in pixels, not bytes */ while (pixel_countdown > 0) { if (bytestream2_tell(&g2) + 1 > stream_ptr_after_chunk) break; byte_run = sign_extend(bytestream2_get_byte(&g2), 8); if (byte_run > 0) { pixel = bytestream2_get_le16(&g2); CHECK_PIXEL_PTR(2 * byte_run); for (j = 0; j < byte_run; j++) { *((signed short*)(&pixels[pixel_ptr])) = pixel; pixel_ptr += 2; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, \"pixel_countdown < 0 (%d)\\n\", pixel_countdown); } } else { /* copy pixels if byte_run < 0 */ byte_run = -byte_run; if (bytestream2_tell(&g2) + 2 * byte_run > stream_ptr_after_chunk) break; CHECK_PIXEL_PTR(2 * byte_run); for (j = 0; j < byte_run; j++) { *((signed short*)(&pixels[pixel_ptr])) = bytestream2_get_le16(&g2); pixel_ptr += 2; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, \"pixel_countdown < 0 (%d)\\n\", pixel_countdown); } } } y_ptr += s->frame.linesize[0]; } break; case FLI_COPY: case FLI_DTA_COPY: /* copy the chunk (uncompressed frame) */ if (chunk_size - 6 > (unsigned int)(s->avctx->width * s->avctx->height)*2) { av_log(avctx, AV_LOG_ERROR, \"In chunk FLI_COPY : source data (%d bytes) \" \\ \"bigger than image, skipping chunk\\n\", chunk_size - 6); bytestream2_skip(&g2, chunk_size - 6); } else { for (y_ptr = 0; y_ptr < s->frame.linesize[0] * s->avctx->height; y_ptr += s->frame.linesize[0]) { pixel_countdown = s->avctx->width; pixel_ptr = 0; while (pixel_countdown > 0) { *((signed short*)(&pixels[y_ptr + pixel_ptr])) = bytestream2_get_le16(&g2); pixel_ptr += 2; pixel_countdown--; } } } break; case FLI_MINI: /* some sort of a thumbnail? disregard this chunk... */ bytestream2_skip(&g2, chunk_size - 6); break; default: av_log(avctx, AV_LOG_ERROR, \"Unrecognized chunk type: %d\\n\", chunk_type); break; } frame_size -= chunk_size; num_chunks--; } /* by the end of the chunk, the stream ptr should equal the frame * size (minus 1, possibly); if it doesn't, issue a warning */ if ((bytestream2_get_bytes_left(&g2) != 0) && (bytestream2_get_bytes_left(&g2) != 1)) av_log(avctx, AV_LOG_ERROR, \"Processed FLI chunk where chunk size = %d \" \\ \"and final chunk ptr = %d\\n\", buf_size, bytestream2_tell(&g2)); if ((ret = av_frame_ref(data, &s->frame)) < 0) return ret; *got_frame = 1; return buf_size; }"} {"target": 1, "idx": 13270, "func": "void qemu_put_buffer(QEMUFile *f, const uint8_t *buf, int size) { int l; if (f->last_error) { return; } while (size > 0) { l = IO_BUF_SIZE - f->buf_index; if (l > size) { l = size; } memcpy(f->buf + f->buf_index, buf, l); f->bytes_xfer += l; if (f->ops->writev_buffer) { add_to_iovec(f, f->buf + f->buf_index, l); } f->buf_index += l; if (f->buf_index == IO_BUF_SIZE) { qemu_fflush(f); } if (qemu_file_get_error(f)) { break; } buf += l; size -= l; } }"} {"target": 1, "idx": 13278, "func": "int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y, int *copy_buf) { switch (mode) { case VERT_PRED: if (!mb_x && mb_y) { *copy_buf = 1; return mode; } /* fall-through */ case DIAG_DOWN_LEFT_PRED: case VERT_LEFT_PRED: return !mb_y ? DC_127_PRED : mode; case HOR_PRED: if (!mb_y) { *copy_buf = 1; return mode; } /* fall-through */ case HOR_UP_PRED: return !mb_x ? DC_129_PRED : mode; case TM_VP8_PRED: return check_tm_pred4x4_mode(mode, mb_x, mb_y); case DC_PRED: /* 4x4 DC doesn't use the same \"H.264-style\" exceptions * as 16x16/8x8 DC */ case DIAG_DOWN_RIGHT_PRED: case VERT_RIGHT_PRED: case HOR_DOWN_PRED: if (!mb_y || !mb_x) *copy_buf = 1; return mode; } return mode; }"} {"target": 0, "idx": 13282, "func": "static int open_input(struct variant *var) { struct segment *seg = var->segments[var->cur_seq_no - var->start_seq_no]; if (seg->key_type == KEY_NONE) { return ffurl_open(&var->input, seg->url, AVIO_FLAG_READ, &var->parent->interrupt_callback, NULL); } else if (seg->key_type == KEY_AES_128) { char iv[33], key[33], url[MAX_URL_SIZE]; int ret; if (strcmp(seg->key, var->key_url)) { URLContext *uc; if (ffurl_open(&uc, seg->key, AVIO_FLAG_READ, &var->parent->interrupt_callback, NULL) == 0) { if (ffurl_read_complete(uc, var->key, sizeof(var->key)) != sizeof(var->key)) { av_log(NULL, AV_LOG_ERROR, \"Unable to read key file %s\\n\", seg->key); } ffurl_close(uc); } else { av_log(NULL, AV_LOG_ERROR, \"Unable to open key file %s\\n\", seg->key); } av_strlcpy(var->key_url, seg->key, sizeof(var->key_url)); } ff_data_to_hex(iv, seg->iv, sizeof(seg->iv), 0); ff_data_to_hex(key, var->key, sizeof(var->key), 0); iv[32] = key[32] = '\\0'; if (strstr(seg->url, \"://\")) snprintf(url, sizeof(url), \"crypto+%s\", seg->url); else snprintf(url, sizeof(url), \"crypto:%s\", seg->url); if ((ret = ffurl_alloc(&var->input, url, AVIO_FLAG_READ, &var->parent->interrupt_callback)) < 0) return ret; av_opt_set(var->input->priv_data, \"key\", key, 0); av_opt_set(var->input->priv_data, \"iv\", iv, 0); if ((ret = ffurl_connect(var->input, NULL)) < 0) { ffurl_close(var->input); var->input = NULL; return ret; } return 0; } return AVERROR(ENOSYS); }"} {"target": 1, "idx": 13291, "func": "static void process_ncq_command(AHCIState *s, int port, uint8_t *cmd_fis, int slot) { AHCIDevice *ad = &s->dev[port]; IDEState *ide_state = &ad->port.ifs[0]; NCQFrame *ncq_fis = (NCQFrame*)cmd_fis; uint8_t tag = ncq_fis->tag >> 3; NCQTransferState *ncq_tfs = &ad->ncq_tfs[tag]; if (ncq_tfs->used) { /* error - already in use */ fprintf(stderr, \"%s: tag %d already used\\n\", __FUNCTION__, tag); return; } ncq_tfs->used = 1; ncq_tfs->drive = ad; ncq_tfs->slot = slot; ncq_tfs->lba = ((uint64_t)ncq_fis->lba5 << 40) | ((uint64_t)ncq_fis->lba4 << 32) | ((uint64_t)ncq_fis->lba3 << 24) | ((uint64_t)ncq_fis->lba2 << 16) | ((uint64_t)ncq_fis->lba1 << 8) | (uint64_t)ncq_fis->lba0; /* Note: We calculate the sector count, but don't currently rely on it. * The total size of the DMA buffer tells us the transfer size instead. */ ncq_tfs->sector_count = ((uint16_t)ncq_fis->sector_count_high << 8) | ncq_fis->sector_count_low; DPRINTF(port, \"NCQ transfer LBA from %\"PRId64\" to %\"PRId64\", \" \"drive max %\"PRId64\"\\n\", ncq_tfs->lba, ncq_tfs->lba + ncq_tfs->sector_count - 2, ide_state->nb_sectors - 1); ahci_populate_sglist(ad, &ncq_tfs->sglist, 0); ncq_tfs->tag = tag; switch(ncq_fis->command) { case READ_FPDMA_QUEUED: DPRINTF(port, \"NCQ reading %d sectors from LBA %\"PRId64\", \" \"tag %d\\n\", ncq_tfs->sector_count-1, ncq_tfs->lba, ncq_tfs->tag); DPRINTF(port, \"tag %d aio read %\"PRId64\"\\n\", ncq_tfs->tag, ncq_tfs->lba); dma_acct_start(ide_state->blk, &ncq_tfs->acct, &ncq_tfs->sglist, BLOCK_ACCT_READ); ncq_tfs->aiocb = dma_blk_read(ide_state->blk, &ncq_tfs->sglist, ncq_tfs->lba, ncq_cb, ncq_tfs); break; case WRITE_FPDMA_QUEUED: DPRINTF(port, \"NCQ writing %d sectors to LBA %\"PRId64\", tag %d\\n\", ncq_tfs->sector_count-1, ncq_tfs->lba, ncq_tfs->tag); DPRINTF(port, \"tag %d aio write %\"PRId64\"\\n\", ncq_tfs->tag, ncq_tfs->lba); dma_acct_start(ide_state->blk, &ncq_tfs->acct, &ncq_tfs->sglist, BLOCK_ACCT_WRITE); ncq_tfs->aiocb = dma_blk_write(ide_state->blk, &ncq_tfs->sglist, ncq_tfs->lba, ncq_cb, ncq_tfs); break; default: if (is_ncq(cmd_fis[2])) { DPRINTF(port, \"error: unsupported NCQ command (0x%02x) received\\n\", cmd_fis[2]); } else { DPRINTF(port, \"error: tried to process non-NCQ command as NCQ\\n\"); } qemu_sglist_destroy(&ncq_tfs->sglist); } }"} {"target": 1, "idx": 13293, "func": "static void pl190_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); k->init = pl190_init; dc->no_user = 1; dc->reset = pl190_reset; dc->vmsd = &vmstate_pl190; }"} {"target": 1, "idx": 13302, "func": "static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { AVFrame *frame = data; const uint8_t *buf = avpkt->data; const uint8_t *buf_end = buf + avpkt->size; KgvContext * const c = avctx->priv_data; int offsets[8]; uint8_t *out, *prev; int outcnt = 0, maxcnt; int w, h, i, res; if (avpkt->size < 2) w = (buf[0] + 1) * 8; h = (buf[1] + 1) * 8; buf += 2; if (w != avctx->width || h != avctx->height) { av_freep(&c->frame_buffer); av_freep(&c->last_frame_buffer); if ((res = ff_set_dimensions(avctx, w, h)) < 0) return res; } if (!c->frame_buffer) { c->frame_buffer = av_mallocz(avctx->width * avctx->height * 2); c->last_frame_buffer = av_mallocz(avctx->width * avctx->height * 2); if (!c->frame_buffer || !c->last_frame_buffer) { decode_flush(avctx); return AVERROR(ENOMEM); } } maxcnt = w * h; if ((res = ff_get_buffer(avctx, frame, 0)) < 0) return res; out = (uint8_t*)c->frame_buffer; prev = (uint8_t*)c->last_frame_buffer; for (i = 0; i < 8; i++) offsets[i] = -1; while (outcnt < maxcnt && buf_end - 2 >= buf) { int code = AV_RL16(buf); buf += 2; if (!(code & 0x8000)) { AV_WN16A(&out[2 * outcnt], code); // rgb555 pixel coded directly outcnt++; } else { int count; if ((code & 0x6000) == 0x6000) { // copy from previous frame int oidx = (code >> 10) & 7; int start; count = (code & 0x3FF) + 3; if (offsets[oidx] < 0) { if (buf_end - 3 < buf) break; offsets[oidx] = AV_RL24(buf); buf += 3; } start = (outcnt + offsets[oidx]) % maxcnt; if (maxcnt - start < count || maxcnt - outcnt < count) break; if (!prev) { av_log(avctx, AV_LOG_ERROR, \"Frame reference does not exist\\n\"); break; } memcpy(out + 2 * outcnt, prev + 2 * start, 2 * count); } else { // copy from earlier in this frame int offset = (code & 0x1FFF) + 1; if (!(code & 0x6000)) { count = 2; } else if ((code & 0x6000) == 0x2000) { count = 3; } else { if (buf_end - 1 < buf) break; count = 4 + *buf++; } if (outcnt < offset || maxcnt - outcnt < count) break; av_memcpy_backptr(out + 2 * outcnt, 2 * offset, 2 * count); } outcnt += count; } } if (outcnt - maxcnt) av_log(avctx, AV_LOG_DEBUG, \"frame finished with %d diff\\n\", outcnt - maxcnt); av_image_copy_plane(frame->data[0], frame->linesize[0], (const uint8_t*)c->frame_buffer, avctx->width * 2, avctx->width * 2, avctx->height); FFSWAP(uint16_t *, c->frame_buffer, c->last_frame_buffer); *got_frame = 1; return avpkt->size; }"} {"target": 1, "idx": 13303, "func": "static void spapr_finalize_fdt(sPAPREnvironment *spapr, hwaddr fdt_addr, hwaddr rtas_addr, hwaddr rtas_size) { int ret, i; size_t cb = 0; char *bootlist; void *fdt; sPAPRPHBState *phb; fdt = g_malloc(FDT_MAX_SIZE); /* open out the base tree into a temp buffer for the final tweaks */ _FDT((fdt_open_into(spapr->fdt_skel, fdt, FDT_MAX_SIZE))); ret = spapr_populate_memory(spapr, fdt); if (ret < 0) { fprintf(stderr, \"couldn't setup memory nodes in fdt\\n\"); exit(1); } ret = spapr_populate_vdevice(spapr->vio_bus, fdt); if (ret < 0) { fprintf(stderr, \"couldn't setup vio devices in fdt\\n\"); exit(1); } QLIST_FOREACH(phb, &spapr->phbs, list) { ret = spapr_populate_pci_dt(phb, PHANDLE_XICP, fdt); } if (ret < 0) { fprintf(stderr, \"couldn't setup PCI devices in fdt\\n\"); exit(1); } /* RTAS */ ret = spapr_rtas_device_tree_setup(fdt, rtas_addr, rtas_size); if (ret < 0) { fprintf(stderr, \"Couldn't set up RTAS device tree properties\\n\"); } /* Advertise NUMA via ibm,associativity */ ret = spapr_fixup_cpu_dt(fdt, spapr); if (ret < 0) { fprintf(stderr, \"Couldn't finalize CPU device tree properties\\n\"); } bootlist = get_boot_devices_list(&cb, true); if (cb && bootlist) { int offset = fdt_path_offset(fdt, \"/chosen\"); if (offset < 0) { exit(1); } for (i = 0; i < cb; i++) { if (bootlist[i] == '\\n') { bootlist[i] = ' '; } } ret = fdt_setprop_string(fdt, offset, \"qemu,boot-list\", bootlist); } if (!spapr->has_graphics) { spapr_populate_chosen_stdout(fdt, spapr->vio_bus); } _FDT((fdt_pack(fdt))); if (fdt_totalsize(fdt) > FDT_MAX_SIZE) { hw_error(\"FDT too big ! 0x%x bytes (max is 0x%x)\\n\", fdt_totalsize(fdt), FDT_MAX_SIZE); exit(1); } cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt)); g_free(fdt); }"} {"target": 1, "idx": 13307, "func": "void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src) { dst->x = av_clip(src->x, s->mv_min.x, s->mv_max.x); dst->y = av_clip(src->y, s->mv_min.y, s->mv_max.y); }"} {"target": 1, "idx": 13324, "func": "static void pxa2xx_gpio_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { PXA2xxGPIOInfo *s = (PXA2xxGPIOInfo *) opaque; int bank; if (offset >= 0x200) return; bank = pxa2xx_gpio_regs[offset].bank; switch (pxa2xx_gpio_regs[offset].reg) { case GPDR: /* GPIO Pin-Direction registers */ s->dir[bank] = value; pxa2xx_gpio_handler_update(s); break; case GPSR: /* GPIO Pin-Output Set registers */ s->olevel[bank] |= value; pxa2xx_gpio_handler_update(s); s->gpsr[bank] = value; break; case GPCR: /* GPIO Pin-Output Clear registers */ s->olevel[bank] &= ~value; pxa2xx_gpio_handler_update(s); break; case GRER: /* GPIO Rising-Edge Detect Enable registers */ s->rising[bank] = value; break; case GFER: /* GPIO Falling-Edge Detect Enable registers */ s->falling[bank] = value; break; case GAFR_L: /* GPIO Alternate Function registers */ s->gafr[bank * 2] = value; break; case GAFR_U: /* GPIO Alternate Function registers */ s->gafr[bank * 2 + 1] = value; break; case GEDR: /* GPIO Edge Detect Status registers */ s->status[bank] &= ~value; pxa2xx_gpio_irq_update(s); break; default: hw_error(\"%s: Bad offset \" REG_FMT \"\\n\", __FUNCTION__, offset); } }"} {"target": 1, "idx": 13334, "func": "AVFilterBufferRef *avfilter_default_get_audio_buffer(AVFilterLink *link, int perms, int nb_samples) { AVFilterBufferRef *samplesref = NULL; int linesize[8]; uint8_t *data[8]; int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout); /* Calculate total buffer size, round to multiple of 16 to be SIMD friendly */ if (av_samples_alloc(data, linesize, nb_channels, nb_samples, link->format, 16) < 0) return NULL; samplesref = avfilter_get_audio_buffer_ref_from_arrays(data, linesize, perms, nb_samples, link->format, link->channel_layout, link->planar); if (!samplesref) { av_free(data[0]); return NULL; } return samplesref; }"} {"target": 1, "idx": 13337, "func": "opts_check_struct(Visitor *v, Error **errp) { OptsVisitor *ov = to_ov(v); GHashTableIter iter; GQueue *any; if (ov->depth > 0) { return; } /* we should have processed all (distinct) QemuOpt instances */ g_hash_table_iter_init(&iter, ov->unprocessed_opts); if (g_hash_table_iter_next(&iter, NULL, (void **)&any)) { const QemuOpt *first; first = g_queue_peek_head(any); error_setg(errp, QERR_INVALID_PARAMETER, first->name); } }"} {"target": 1, "idx": 13346, "func": "static int parse_chr(DeviceState *dev, Property *prop, const char *str) { CharDriverState **ptr = qdev_get_prop_ptr(dev, prop); *ptr = qemu_chr_find(str); if (*ptr == NULL) { return -ENOENT; } if ((*ptr)->assigned) { return -EEXIST; } (*ptr)->assigned = 1; return 0; }"} {"target": 0, "idx": 13360, "func": "int load_uimage(const char *filename, target_phys_addr_t *ep, target_phys_addr_t *loadaddr, int *is_linux) { int fd; int size; uboot_image_header_t h; uboot_image_header_t *hdr = &h; uint8_t *data = NULL; int ret = -1; fd = open(filename, O_RDONLY | O_BINARY); if (fd < 0) return -1; size = read(fd, hdr, sizeof(uboot_image_header_t)); if (size < 0) goto out; bswap_uboot_header(hdr); if (hdr->ih_magic != IH_MAGIC) goto out; /* TODO: Implement other image types. */ if (hdr->ih_type != IH_TYPE_KERNEL) { fprintf(stderr, \"Can only load u-boot image type \\\"kernel\\\"\\n\"); goto out; } switch (hdr->ih_comp) { case IH_COMP_NONE: case IH_COMP_GZIP: break; default: fprintf(stderr, \"Unable to load u-boot images with compression type %d\\n\", hdr->ih_comp); goto out; } /* TODO: Check CPU type. */ if (is_linux) { if (hdr->ih_os == IH_OS_LINUX) *is_linux = 1; else *is_linux = 0; } *ep = hdr->ih_ep; data = qemu_malloc(hdr->ih_size); if (read(fd, data, hdr->ih_size) != hdr->ih_size) { fprintf(stderr, \"Error reading file\\n\"); goto out; } if (hdr->ih_comp == IH_COMP_GZIP) { uint8_t *compressed_data; size_t max_bytes; ssize_t bytes; compressed_data = data; max_bytes = UBOOT_MAX_GUNZIP_BYTES; data = qemu_malloc(max_bytes); bytes = gunzip(data, max_bytes, compressed_data, hdr->ih_size); qemu_free(compressed_data); if (bytes < 0) { fprintf(stderr, \"Unable to decompress gzipped image!\\n\"); goto out; } hdr->ih_size = bytes; } cpu_physical_memory_write_rom(hdr->ih_load, data, hdr->ih_size); if (loadaddr) *loadaddr = hdr->ih_load; ret = hdr->ih_size; out: if (data) qemu_free(data); close(fd); return ret; }"} {"target": 0, "idx": 13361, "func": "static int aic_decode_coeffs(GetBitContext *gb, int16_t *dst, int band, int slice_width) { int has_skips, coeff_type, coeff_bits, skip_type, skip_bits; const int num_coeffs = aic_num_band_coeffs[band]; const uint8_t *scan = aic_scan[band]; int mb, idx, val; has_skips = get_bits1(gb); coeff_type = get_bits1(gb); coeff_bits = get_bits(gb, 3); if (has_skips) { skip_type = get_bits1(gb); skip_bits = get_bits(gb, 3); for (mb = 0; mb < slice_width; mb++) { idx = -1; do { GET_CODE(val, skip_type, skip_bits); idx += val + 1; if (idx >= num_coeffs) break; GET_CODE(val, coeff_type, coeff_bits); val++; if (val >= 0x10000) return AVERROR_INVALIDDATA; dst[scan[idx]] = val; } while (idx < num_coeffs - 1); dst += num_coeffs; } } else { for (mb = 0; mb < slice_width; mb++) { for (idx = 0; idx < num_coeffs; idx++) { GET_CODE(val, coeff_type, coeff_bits); if (val >= 0x10000) return AVERROR_INVALIDDATA; dst[scan[idx]] = val; } dst += num_coeffs; } } return 0; }"} {"target": 0, "idx": 13367, "func": "void fpu_dump_state(CPUState *env, FILE *f, int (*fpu_fprintf)(FILE *f, const char *fmt, ...), int flags) { int i; int is_fpu64 = !!(env->CP0_Status & (1 << CP0St_FR)); #define printfpr(fp) \\ do { \\ if (is_fpu64) \\ fpu_fprintf(f, \"w:%08x d:%016lx fd:%13g fs:%13g psu: %13g\\n\", \\ (fp)->w[FP_ENDIAN_IDX], (fp)->d, (fp)->fd, \\ (fp)->fs[FP_ENDIAN_IDX], (fp)->fs[!FP_ENDIAN_IDX]); \\ else { \\ fpr_t tmp; \\ tmp.w[FP_ENDIAN_IDX] = (fp)->w[FP_ENDIAN_IDX]; \\ tmp.w[!FP_ENDIAN_IDX] = ((fp) + 1)->w[FP_ENDIAN_IDX]; \\ fpu_fprintf(f, \"w:%08x d:%016lx fd:%13g fs:%13g psu:%13g\\n\", \\ tmp.w[FP_ENDIAN_IDX], tmp.d, tmp.fd, \\ tmp.fs[FP_ENDIAN_IDX], tmp.fs[!FP_ENDIAN_IDX]); \\ } \\ } while(0) fpu_fprintf(f, \"CP1 FCR0 0x%08x FCR31 0x%08x SR.FR %d fp_status 0x%08x(0x%02x)\\n\", env->fcr0, env->fcr31, is_fpu64, env->fp_status, get_float_exception_flags(&env->fp_status)); fpu_fprintf(f, \"FT0: \"); printfpr(&env->ft0); fpu_fprintf(f, \"FT1: \"); printfpr(&env->ft1); fpu_fprintf(f, \"FT2: \"); printfpr(&env->ft2); for (i = 0; i < 32; (is_fpu64) ? i++ : (i += 2)) { fpu_fprintf(f, \"%3s: \", fregnames[i]); printfpr(&env->fpr[i]); } #undef printfpr }"} {"target": 0, "idx": 13372, "func": "static int sdp_probe(AVProbeData *p1) { const char *p = p1->buf, *p_end = p1->buf + p1->buf_size; /* we look for a line beginning \"c=IN IP4\" */ while (p < p_end && *p != '\\0') { if (p + sizeof(\"c=IN IP4\") - 1 < p_end && av_strstart(p, \"c=IN IP4\", NULL)) return AVPROBE_SCORE_MAX / 2; while(p < p_end - 1 && *p != '\\n') p++; if (++p >= p_end) break; if (*p == '\\r') p++; } return 0; }"} {"target": 0, "idx": 13373, "func": "uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src, uint64_t len) { const uint8_t psw_key = (env->psw.mask & PSW_MASK_KEY) >> PSW_SHIFT_KEY; const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC; const uint64_t r0 = env->regs[0]; const uintptr_t ra = GETPC(); CPUState *cs = CPU(s390_env_get_cpu(env)); uint8_t dest_key, dest_as, dest_k, dest_a; uint8_t src_key, src_as, src_k, src_a; uint64_t val; int cc = 0; HELPER_LOG(\"%s dest %\" PRIx64 \", src %\" PRIx64 \", len %\" PRIx64 \"\\n\", __func__, dest, src, len); if (!(env->psw.mask & PSW_MASK_DAT)) { cpu_restore_state(cs, ra); program_interrupt(env, PGM_SPECIAL_OP, 6); } /* OAC (operand access control) for the first operand -> dest */ val = (r0 & 0xffff0000ULL) >> 16; dest_key = (val >> 12) & 0xf; dest_as = (val >> 6) & 0x3; dest_k = (val >> 1) & 0x1; dest_a = val & 0x1; /* OAC (operand access control) for the second operand -> src */ val = (r0 & 0x0000ffffULL); src_key = (val >> 12) & 0xf; src_as = (val >> 6) & 0x3; src_k = (val >> 1) & 0x1; src_a = val & 0x1; if (!dest_k) { dest_key = psw_key; } if (!src_k) { src_key = psw_key; } if (!dest_a) { dest_as = psw_as; } if (!src_a) { src_as = psw_as; } if (dest_a && dest_as == AS_HOME && (env->psw.mask & PSW_MASK_PSTATE)) { cpu_restore_state(cs, ra); program_interrupt(env, PGM_SPECIAL_OP, 6); } if (!(env->cregs[0] & CR0_SECONDARY) && (dest_as == AS_SECONDARY || src_as == AS_SECONDARY)) { cpu_restore_state(cs, ra); program_interrupt(env, PGM_SPECIAL_OP, 6); } if (!psw_key_valid(env, dest_key) || !psw_key_valid(env, src_key)) { cpu_restore_state(cs, ra); program_interrupt(env, PGM_PRIVILEGED, 6); } len = wrap_length(env, len); if (len > 4096) { cc = 3; len = 4096; } /* FIXME: AR-mode and proper problem state mode (using PSW keys) missing */ if (src_as == AS_ACCREG || dest_as == AS_ACCREG || (env->psw.mask & PSW_MASK_PSTATE)) { qemu_log_mask(LOG_UNIMP, \"%s: AR-mode and PSTATE support missing\\n\", __func__); cpu_restore_state(cs, ra); program_interrupt(env, PGM_ADDRESSING, 6); } /* FIXME: a) LAP * b) Access using correct keys * c) AR-mode */ #ifdef CONFIG_USER_ONLY /* psw keys are never valid in user mode, we will never reach this */ g_assert_not_reached(); #else fast_memmove_as(env, dest, src, len, dest_as, src_as, ra); #endif return cc; }"} {"target": 0, "idx": 13375, "func": "static void unrealize(DeviceState *d, Error **errp) { sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(d); Object *root_container; char name[256]; Error *err = NULL; trace_spapr_drc_unrealize(spapr_drc_index(drc)); root_container = container_get(object_get_root(), DRC_CONTAINER_PATH); snprintf(name, sizeof(name), \"%x\", spapr_drc_index(drc)); object_property_del(root_container, name, &err); if (err) { error_report_err(err); object_unref(OBJECT(drc)); } }"} {"target": 0, "idx": 13377, "func": "static void nbd_co_receive_reply(NBDClientSession *s, NBDRequest *request, NBDReply *reply, QEMUIOVector *qiov) { int ret; /* Wait until we're woken up by nbd_read_reply_entry. */ qemu_coroutine_yield(); *reply = s->reply; if (reply->handle != request->handle || !s->ioc) { reply->error = EIO; } else { if (qiov && reply->error == 0) { ret = nbd_wr_syncv(s->ioc, qiov->iov, qiov->niov, request->len, true, NULL); if (ret != request->len) { reply->error = EIO; } } /* Tell the read handler to read another header. */ s->reply.handle = 0; } }"} {"target": 0, "idx": 13385, "func": "void decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y) { VP8Macroblock *mb_edge[3] = { mb + 2 /* top */, mb - 1 /* left */, mb + 1 /* top-left */ }; enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV }; enum { EDGE_TOP, EDGE_LEFT, EDGE_TOPLEFT }; int idx = CNT_ZERO; int cur_sign_bias = s->sign_bias[mb->ref_frame]; int *sign_bias = s->sign_bias; VP56mv near_mv[4]; uint8_t cnt[4] = { 0 }; VP56RangeCoder *c = &s->c; AV_ZERO32(&near_mv[0]); AV_ZERO32(&near_mv[1]); AV_ZERO32(&near_mv[2]); /* Process MB on top, left and top-left */ #define MV_EDGE_CHECK(n)\\ {\\ VP8Macroblock *edge = mb_edge[n];\\ int edge_ref = edge->ref_frame;\\ if (edge_ref != VP56_FRAME_CURRENT) {\\ uint32_t mv = AV_RN32A(&edge->mv);\\ if (mv) {\\ if (cur_sign_bias != sign_bias[edge_ref]) {\\ /* SWAR negate of the values in mv. */\\ mv = ~mv;\\ mv = ((mv&0x7fff7fff) + 0x00010001) ^ (mv&0x80008000);\\ }\\ if (!n || mv != AV_RN32A(&near_mv[idx]))\\ AV_WN32A(&near_mv[++idx], mv);\\ cnt[idx] += 1 + (n != 2);\\ } else\\ cnt[CNT_ZERO] += 1 + (n != 2);\\ }\\ } MV_EDGE_CHECK(0) MV_EDGE_CHECK(1) MV_EDGE_CHECK(2) mb->partitioning = VP8_SPLITMVMODE_NONE; if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_ZERO]][0])) { mb->mode = VP8_MVMODE_MV; /* If we have three distinct MVs, merge first and last if they're the same */ if (cnt[CNT_SPLITMV] && AV_RN32A(&near_mv[1+EDGE_TOP]) == AV_RN32A(&near_mv[1+EDGE_TOPLEFT])) cnt[CNT_NEAREST] += 1; /* Swap near and nearest if necessary */ if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) { FFSWAP(uint8_t, cnt[CNT_NEAREST], cnt[CNT_NEAR]); FFSWAP( VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]); } if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) { if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAR]][2])) { /* Choose the best mv out of 0,0 and the nearest mv */ clamp_mv(s, &mb->mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]); cnt[CNT_SPLITMV] = ((mb_edge[EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) + (mb_edge[EDGE_TOP]->mode == VP8_MVMODE_SPLIT)) * 2 + (mb_edge[EDGE_TOPLEFT]->mode == VP8_MVMODE_SPLIT); if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) { mb->mode = VP8_MVMODE_SPLIT; mb->mv = mb->bmv[decode_splitmvs(s, c, mb) - 1]; } else { mb->mv.y += read_mv_component(c, s->prob->mvc[0]); mb->mv.x += read_mv_component(c, s->prob->mvc[1]); mb->bmv[0] = mb->mv; } } else { clamp_mv(s, &mb->mv, &near_mv[CNT_NEAR]); mb->bmv[0] = mb->mv; } } else { clamp_mv(s, &mb->mv, &near_mv[CNT_NEAREST]); mb->bmv[0] = mb->mv; } } else { mb->mode = VP8_MVMODE_ZERO; AV_ZERO32(&mb->mv); mb->bmv[0] = mb->mv; } }"} {"target": 0, "idx": 13389, "func": "int ff_h264_alloc_tables(H264Context *h){ MpegEncContext * const s = &h->s; const int big_mb_num= s->mb_stride * (s->mb_height+1); int x,y; FF_ALLOCZ_OR_GOTO(h->s.avctx, h->intra4x4_pred_mode, big_mb_num * 8 * sizeof(uint8_t), fail) FF_ALLOCZ_OR_GOTO(h->s.avctx, h->non_zero_count , big_mb_num * 16 * sizeof(uint8_t), fail) FF_ALLOCZ_OR_GOTO(h->s.avctx, h->slice_table_base , (big_mb_num+s->mb_stride) * sizeof(*h->slice_table_base), fail) FF_ALLOCZ_OR_GOTO(h->s.avctx, h->cbp_table, big_mb_num * sizeof(uint16_t), fail) FF_ALLOCZ_OR_GOTO(h->s.avctx, h->chroma_pred_mode_table, big_mb_num * sizeof(uint8_t), fail) FF_ALLOCZ_OR_GOTO(h->s.avctx, h->mvd_table[0], 32*big_mb_num * sizeof(uint16_t), fail); FF_ALLOCZ_OR_GOTO(h->s.avctx, h->mvd_table[1], 32*big_mb_num * sizeof(uint16_t), fail); FF_ALLOCZ_OR_GOTO(h->s.avctx, h->direct_table, 32*big_mb_num * sizeof(uint8_t) , fail); memset(h->slice_table_base, -1, (big_mb_num+s->mb_stride) * sizeof(*h->slice_table_base)); h->slice_table= h->slice_table_base + s->mb_stride*2 + 1; FF_ALLOCZ_OR_GOTO(h->s.avctx, h->mb2b_xy , big_mb_num * sizeof(uint32_t), fail); FF_ALLOCZ_OR_GOTO(h->s.avctx, h->mb2b8_xy , big_mb_num * sizeof(uint32_t), fail); for(y=0; ymb_height; y++){ for(x=0; xmb_width; x++){ const int mb_xy= x + y*s->mb_stride; const int b_xy = 4*x + 4*y*h->b_stride; const int b8_xy= 2*x + 2*y*h->b8_stride; h->mb2b_xy [mb_xy]= b_xy; h->mb2b8_xy[mb_xy]= b8_xy; } } s->obmc_scratchpad = NULL; if(!h->dequant4_coeff[0]) init_dequant_tables(h); return 0; fail: free_tables(h); return -1; }"} {"target": 1, "idx": 13396, "func": "void pcie_cap_slot_hotplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { uint8_t *exp_cap; PCIDevice *pci_dev = PCI_DEVICE(dev); pcie_cap_slot_hotplug_common(PCI_DEVICE(hotplug_dev), dev, &exp_cap, errp); /* Don't send event when device is enabled during qemu machine creation: * it is present on boot, no hotplug event is necessary. We do send an * event when the device is disabled later. */ if (!dev->hotplugged) { pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_PDS); return; } /* TODO: multifunction hot-plug. * Right now, only a device of function = 0 is allowed to be * hot plugged/unplugged. */ assert(PCI_FUNC(pci_dev->devfn) == 0); pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_PDS); pcie_cap_slot_event(PCI_DEVICE(hotplug_dev), PCI_EXP_HP_EV_PDC); }"} {"target": 0, "idx": 13424, "func": "void do_info_snapshots(Monitor *mon) { DriveInfo *dinfo; BlockDriverState *bs, *bs1; QEMUSnapshotInfo *sn_tab, *sn; int nb_sns, i; char buf[256]; bs = get_bs_snapshots(); if (!bs) { monitor_printf(mon, \"No available block device supports snapshots\\n\"); return; } monitor_printf(mon, \"Snapshot devices:\"); TAILQ_FOREACH(dinfo, &drives, next) { bs1 = dinfo->bdrv; if (bdrv_has_snapshot(bs1)) { if (bs == bs1) monitor_printf(mon, \" %s\", bdrv_get_device_name(bs1)); } } monitor_printf(mon, \"\\n\"); nb_sns = bdrv_snapshot_list(bs, &sn_tab); if (nb_sns < 0) { monitor_printf(mon, \"bdrv_snapshot_list: error %d\\n\", nb_sns); return; } monitor_printf(mon, \"Snapshot list (from %s):\\n\", bdrv_get_device_name(bs)); monitor_printf(mon, \"%s\\n\", bdrv_snapshot_dump(buf, sizeof(buf), NULL)); for(i = 0; i < nb_sns; i++) { sn = &sn_tab[i]; monitor_printf(mon, \"%s\\n\", bdrv_snapshot_dump(buf, sizeof(buf), sn)); } qemu_free(sn_tab); }"} {"target": 0, "idx": 13427, "func": "static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, uint8_t *buf, void *hba_private) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); switch (buf[0]) { case READ_6: case READ_10: case READ_12: case READ_16: case VERIFY_10: case VERIFY_12: case VERIFY_16: case WRITE_6: case WRITE_10: case WRITE_12: case WRITE_16: case WRITE_VERIFY_10: case WRITE_VERIFY_12: case WRITE_VERIFY_16: /* If we are not using O_DIRECT, we might read stale data from the * host cache if writes were made using other commands than these * ones (such as WRITE SAME or EXTENDED COPY, etc.). So, without * O_DIRECT everything must go through SG_IO. */ if (bdrv_get_flags(s->qdev.conf.bs) & BDRV_O_NOCACHE) { break; } /* MMC writing cannot be done via pread/pwrite, because it sometimes * involves writing beyond the maximum LBA or to negative LBA (lead-in). * And once you do these writes, reading from the block device is * unreliable, too. It is even possible that reads deliver random data * from the host page cache (this is probably a Linux bug). * * We might use scsi_disk_reqops as long as no writing commands are * seen, but performance usually isn't paramount on optical media. So, * just make scsi-block operate the same as scsi-generic for them. */ if (s->qdev.type == TYPE_ROM) { break; } return scsi_req_alloc(&scsi_disk_reqops, &s->qdev, tag, lun, hba_private); } return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun, hba_private); }"} {"target": 1, "idx": 13449, "func": "static void init_excp_4xx_softmmu (CPUPPCState *env) { #if !defined(CONFIG_USER_ONLY) env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000100; env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; env->excp_vectors[POWERPC_EXCP_PIT] = 0x00001000; env->excp_vectors[POWERPC_EXCP_FIT] = 0x00001010; env->excp_vectors[POWERPC_EXCP_WDT] = 0x00001020; env->excp_vectors[POWERPC_EXCP_DTLB] = 0x00001100; env->excp_vectors[POWERPC_EXCP_ITLB] = 0x00001200; env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00002000; env->excp_prefix = 0x00000000; env->ivor_mask = 0x0000FFF0; env->ivpr_mask = 0xFFFF0000; /* Hardware reset vector */ env->hreset_vector = 0xFFFFFFFCUL; #endif }"} {"target": 1, "idx": 13457, "func": "static int mxf_read_packet(AVFormatContext *s, AVPacket *pkt) { KLVPacket klv; while (!s->pb->eof_reached) { if (klv_read_packet(&klv, s->pb) < 0) return -1; PRINT_KEY(s, \"read packet\", klv.key); av_dlog(s, \"size %\"PRIu64\" offset %#\"PRIx64\"\\n\", klv.length, klv.offset); if (IS_KLV_KEY(klv.key, mxf_encrypted_triplet_key)) { int res = mxf_decrypt_triplet(s, pkt, &klv); if (res < 0) { av_log(s, AV_LOG_ERROR, \"invalid encoded triplet\\n\"); return -1; } return 0; } if (IS_KLV_KEY(klv.key, mxf_essence_element_key)) { int index = mxf_get_stream_index(s, &klv); if (index < 0) { av_log(s, AV_LOG_ERROR, \"error getting stream index %d\\n\", AV_RB32(klv.key+12)); goto skip; } if (s->streams[index]->discard == AVDISCARD_ALL) goto skip; /* check for 8 channels AES3 element */ if (klv.key[12] == 0x06 && klv.key[13] == 0x01 && klv.key[14] == 0x10) { if (mxf_get_d10_aes3_packet(s->pb, s->streams[index], pkt, klv.length) < 0) { av_log(s, AV_LOG_ERROR, \"error reading D-10 aes3 frame\\n\"); return -1; } } else av_get_packet(s->pb, pkt, klv.length); pkt->stream_index = index; pkt->pos = klv.offset; return 0; } else skip: avio_skip(s->pb, klv.length); } return AVERROR_EOF; }"} {"target": 1, "idx": 13459, "func": "int cpu_get_dump_info(ArchDumpInfo *info) { return -1; }"} {"target": 1, "idx": 13460, "func": "void ff_ivi_recompose_haar(const IVIPlaneDesc *plane, uint8_t *dst, const int dst_pitch, const int num_bands) { int x, y, indx, b0, b1, b2, b3, p0, p1, p2, p3; const IDWTELEM *b0_ptr, *b1_ptr, *b2_ptr, *b3_ptr; int32_t pitch; /* all bands should have the same pitch */ pitch = plane->bands[0].pitch; /* get pointers to the wavelet bands */ b0_ptr = plane->bands[0].buf; b1_ptr = plane->bands[1].buf; b2_ptr = plane->bands[2].buf; b3_ptr = plane->bands[3].buf; for (y = 0; y < plane->height; y += 2) { for (x = 0, indx = 0; x < plane->width; x += 2, indx++) { /* load coefficients */ b0 = b0_ptr[indx]; //should be: b0 = (num_bands > 0) ? b0_ptr[indx] : 0; b1 = b1_ptr[indx]; //should be: b1 = (num_bands > 1) ? b1_ptr[indx] : 0; b2 = b2_ptr[indx]; //should be: b2 = (num_bands > 2) ? b2_ptr[indx] : 0; b3 = b3_ptr[indx]; //should be: b3 = (num_bands > 3) ? b3_ptr[indx] : 0; /* haar wavelet recomposition */ p0 = (b0 + b1 + b2 + b3 + 2) >> 2; p1 = (b0 + b1 - b2 - b3 + 2) >> 2; p2 = (b0 - b1 + b2 - b3 + 2) >> 2; p3 = (b0 - b1 - b2 + b3 + 2) >> 2; /* bias, convert and output four pixels */ dst[x] = av_clip_uint8(p0 + 128); dst[x + 1] = av_clip_uint8(p1 + 128); dst[dst_pitch + x] = av_clip_uint8(p2 + 128); dst[dst_pitch + x + 1] = av_clip_uint8(p3 + 128); }// for x dst += dst_pitch << 1; b0_ptr += pitch; b1_ptr += pitch; b2_ptr += pitch; b3_ptr += pitch; }// for y }"} {"target": 1, "idx": 13467, "func": "void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val, int asi, int size) { #ifdef DEBUG_ASI dump_asi(\"write\", addr, asi, size, val); #endif if (asi < 0x80) { helper_raise_exception(env, TT_PRIV_ACT); } helper_check_align(env, addr, size - 1); addr = asi_address_mask(env, asi, addr); /* Convert to little endian */ switch (asi) { case 0x88: /* Primary LE */ case 0x89: /* Secondary LE */ switch (size) { case 2: val = bswap16(val); break; case 4: val = bswap32(val); break; case 8: val = bswap64(val); break; default: break; } default: break; } switch (asi) { case 0x80: /* Primary */ case 0x88: /* Primary LE */ { switch (size) { case 1: stb_raw(addr, val); break; case 2: stw_raw(addr, val); break; case 4: stl_raw(addr, val); break; case 8: default: stq_raw(addr, val); break; } } break; case 0x81: /* Secondary */ case 0x89: /* Secondary LE */ /* XXX */ return; case 0x82: /* Primary no-fault, RO */ case 0x83: /* Secondary no-fault, RO */ case 0x8a: /* Primary no-fault LE, RO */ case 0x8b: /* Secondary no-fault LE, RO */ default: helper_raise_exception(env, TT_DATA_ACCESS); return; } }"} {"target": 1, "idx": 13483, "func": "static ssize_t sdp_svc_search_attr_get(struct bt_l2cap_sdp_state_s *sdp, uint8_t *rsp, const uint8_t *req, ssize_t len) { ssize_t seqlen; int i, j, start, end, max; struct sdp_service_record_s *record; uint8_t *lst; /* Perform the search */ for (i = 0; i < sdp->services; i ++) { sdp->service_list[i].match = 0; for (j = 0; j < sdp->service_list[i].attributes; j ++) sdp->service_list[i].attribute_list[j].match = 0; } if (len < 1) return -SDP_INVALID_SYNTAX; if ((*req & ~SDP_DSIZE_MASK) == SDP_DTYPE_SEQ) { seqlen = sdp_datalen(&req, &len); if (seqlen < 3 || len < seqlen) return -SDP_INVALID_SYNTAX; len -= seqlen; while (seqlen) if (sdp_svc_match(sdp, &req, &seqlen)) return -SDP_INVALID_SYNTAX; } else if (sdp_svc_match(sdp, &req, &seqlen)) return -SDP_INVALID_SYNTAX; if (len < 3) return -SDP_INVALID_SYNTAX; max = (req[0] << 8) | req[1]; req += 2; len -= 2; if (max < 0x0007) return -SDP_INVALID_SYNTAX; if ((*req & ~SDP_DSIZE_MASK) == SDP_DTYPE_SEQ) { seqlen = sdp_datalen(&req, &len); if (seqlen < 3 || len < seqlen) return -SDP_INVALID_SYNTAX; len -= seqlen; while (seqlen) if (sdp_svc_attr_match(sdp, &req, &seqlen)) return -SDP_INVALID_SYNTAX; } else if (sdp_svc_attr_match(sdp, &req, &seqlen)) return -SDP_INVALID_SYNTAX; if (len < 1) return -SDP_INVALID_SYNTAX; if (*req) { if (len <= sizeof(int)) return -SDP_INVALID_SYNTAX; len -= sizeof(int); memcpy(&start, req + 1, sizeof(int)); } else start = 0; if (len > 1) return -SDP_INVALID_SYNTAX; /* Output the results */ /* This assumes empty attribute lists are never to be returned even * for matching Service Records. In practice this shouldn't happen * as the requestor will usually include the always present * ServiceRecordHandle AttributeID in AttributeIDList. */ lst = rsp + 2; max = MIN(max, MAX_RSP_PARAM_SIZE); len = 3 - start; end = 0; for (i = 0; i < sdp->services; i ++) if ((record = &sdp->service_list[i])->match) { len += 3; seqlen = len; for (j = 0; j < record->attributes; j ++) if (record->attribute_list[j].match) { if (len >= 0) if (len + record->attribute_list[j].len < max) { memcpy(lst + len, record->attribute_list[j].pair, record->attribute_list[j].len); end = len + record->attribute_list[j].len; } len += record->attribute_list[j].len; } if (seqlen == len) len -= 3; else if (seqlen >= 3 && seqlen < max) { lst[seqlen - 3] = SDP_DTYPE_SEQ | SDP_DSIZE_NEXT2; lst[seqlen - 2] = (len - seqlen) >> 8; lst[seqlen - 1] = (len - seqlen) & 0xff; } } if (len == 3 - start) len -= 3; else if (0 >= start) { lst[0] = SDP_DTYPE_SEQ | SDP_DSIZE_NEXT2; lst[1] = (len + start - 3) >> 8; lst[2] = (len + start - 3) & 0xff; } rsp[0] = end >> 8; rsp[1] = end & 0xff; if (end < len) { len = end + start; lst[end ++] = sizeof(int); memcpy(lst + end, &len, sizeof(int)); end += sizeof(int); } else lst[end ++] = 0; return end + 2; }"} {"target": 0, "idx": 13499, "func": "static void mcf_intc_write(void *opaque, target_phys_addr_t addr, uint64_t val, unsigned size) { int offset; mcf_intc_state *s = (mcf_intc_state *)opaque; offset = addr & 0xff; if (offset >= 0x40 && offset < 0x80) { int n = offset - 0x40; s->icr[n] = val; if (val == 0) s->enabled &= ~(1ull << n); else s->enabled |= (1ull << n); mcf_intc_update(s); return; } switch (offset) { case 0x00: case 0x04: /* Ignore IPR writes. */ return; case 0x08: s->imr = (s->imr & 0xffffffff) | ((uint64_t)val << 32); break; case 0x0c: s->imr = (s->imr & 0xffffffff00000000ull) | (uint32_t)val; break; default: hw_error(\"mcf_intc_write: Bad write offset %d\\n\", offset); break; } mcf_intc_update(s); }"} {"target": 0, "idx": 13515, "func": "static int cow_write(BlockDriverState *bs, int64_t sector_num, const uint8_t *buf, int nb_sectors) { BDRVCowState *s = bs->opaque; int ret; ret = bdrv_pwrite(bs->file, s->cow_sectors_offset + sector_num * 512, buf, nb_sectors * 512); if (ret < 0) { return ret; } return cow_update_bitmap(bs, sector_num, nb_sectors); }"} {"target": 0, "idx": 13521, "func": "static int find_smallest_bounding_rectangle(AVSubtitle *s) { uint8_t transp_color[256] = { 0 }; int y1, y2, x1, x2, y, w, h, i; uint8_t *bitmap; if (s->num_rects == 0 || s->rects == NULL || s->rects[0]->w <= 0 || s->rects[0]->h <= 0) return 0; for(i = 0; i < s->rects[0]->nb_colors; i++) { if ((((uint32_t*)s->rects[0]->pict.data[1])[i] >> 24) == 0) transp_color[i] = 1; } y1 = 0; while (y1 < s->rects[0]->h && is_transp(s->rects[0]->pict.data[0] + y1 * s->rects[0]->pict.linesize[0], 1, s->rects[0]->w, transp_color)) y1++; if (y1 == s->rects[0]->h) { av_freep(&s->rects[0]->pict.data[0]); s->rects[0]->w = s->rects[0]->h = 0; return 0; } y2 = s->rects[0]->h - 1; while (y2 > 0 && is_transp(s->rects[0]->pict.data[0] + y2 * s->rects[0]->pict.linesize[0], 1, s->rects[0]->w, transp_color)) y2--; x1 = 0; while (x1 < (s->rects[0]->w - 1) && is_transp(s->rects[0]->pict.data[0] + x1, s->rects[0]->pict.linesize[0], s->rects[0]->h, transp_color)) x1++; x2 = s->rects[0]->w - 1; while (x2 > 0 && is_transp(s->rects[0]->pict.data[0] + x2, s->rects[0]->pict.linesize[0], s->rects[0]->h, transp_color)) x2--; w = x2 - x1 + 1; h = y2 - y1 + 1; bitmap = av_malloc(w * h); if (!bitmap) return 1; for(y = 0; y < h; y++) { memcpy(bitmap + w * y, s->rects[0]->pict.data[0] + x1 + (y1 + y) * s->rects[0]->pict.linesize[0], w); } av_freep(&s->rects[0]->pict.data[0]); s->rects[0]->pict.data[0] = bitmap; s->rects[0]->pict.linesize[0] = w; s->rects[0]->w = w; s->rects[0]->h = h; s->rects[0]->x += x1; s->rects[0]->y += y1; return 1; }"} {"target": 1, "idx": 13529, "func": "QemuConsole *qemu_console_lookup_by_device(DeviceState *dev, uint32_t head) { Error *local_err = NULL; Object *obj; uint32_t h; int i; for (i = 0; i < nb_consoles; i++) { if (!consoles[i]) { continue; } obj = object_property_get_link(OBJECT(consoles[i]), \"device\", &local_err); if (DEVICE(obj) != dev) { continue; } h = object_property_get_int(OBJECT(consoles[i]), \"head\", &local_err); if (h != head) { continue; } return consoles[i]; } return NULL; }"} {"target": 1, "idx": 13546, "func": "int qemu_file_get_error(QEMUFile *f) { return f->last_error; }"} {"target": 1, "idx": 13552, "func": "static av_cold int allocate_buffers(AVCodecContext *avctx) { int blk, ch; AC3EncodeContext *s = avctx->priv_data; int channels = s->channels + 1; /* includes coupling channel */ FF_ALLOC_OR_GOTO(avctx, s->planar_samples, s->channels * sizeof(*s->planar_samples), alloc_fail); for (ch = 0; ch < s->channels; ch++) { FF_ALLOCZ_OR_GOTO(avctx, s->planar_samples[ch], (AC3_FRAME_SIZE+AC3_BLOCK_SIZE) * sizeof(**s->planar_samples), alloc_fail); } FF_ALLOC_OR_GOTO(avctx, s->bap_buffer, AC3_MAX_BLOCKS * channels * AC3_MAX_COEFS * sizeof(*s->bap_buffer), alloc_fail); FF_ALLOC_OR_GOTO(avctx, s->bap1_buffer, AC3_MAX_BLOCKS * channels * AC3_MAX_COEFS * sizeof(*s->bap1_buffer), alloc_fail); FF_ALLOC_OR_GOTO(avctx, s->mdct_coef_buffer, AC3_MAX_BLOCKS * channels * AC3_MAX_COEFS * sizeof(*s->mdct_coef_buffer), alloc_fail); FF_ALLOC_OR_GOTO(avctx, s->exp_buffer, AC3_MAX_BLOCKS * channels * AC3_MAX_COEFS * sizeof(*s->exp_buffer), alloc_fail); FF_ALLOC_OR_GOTO(avctx, s->grouped_exp_buffer, AC3_MAX_BLOCKS * channels * 128 * sizeof(*s->grouped_exp_buffer), alloc_fail); FF_ALLOC_OR_GOTO(avctx, s->psd_buffer, AC3_MAX_BLOCKS * channels * AC3_MAX_COEFS * sizeof(*s->psd_buffer), alloc_fail); FF_ALLOC_OR_GOTO(avctx, s->band_psd_buffer, AC3_MAX_BLOCKS * channels * 64 * sizeof(*s->band_psd_buffer), alloc_fail); FF_ALLOC_OR_GOTO(avctx, s->mask_buffer, AC3_MAX_BLOCKS * channels * 64 * sizeof(*s->mask_buffer), alloc_fail); FF_ALLOC_OR_GOTO(avctx, s->qmant_buffer, AC3_MAX_BLOCKS * channels * AC3_MAX_COEFS * sizeof(*s->qmant_buffer), alloc_fail); if (s->cpl_enabled) { FF_ALLOC_OR_GOTO(avctx, s->cpl_coord_exp_buffer, AC3_MAX_BLOCKS * channels * 16 * sizeof(*s->cpl_coord_exp_buffer), alloc_fail); FF_ALLOC_OR_GOTO(avctx, s->cpl_coord_mant_buffer, AC3_MAX_BLOCKS * channels * 16 * sizeof(*s->cpl_coord_mant_buffer), alloc_fail); } for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { AC3Block *block = &s->blocks[blk]; FF_ALLOC_OR_GOTO(avctx, block->bap, channels * sizeof(*block->bap), alloc_fail); FF_ALLOCZ_OR_GOTO(avctx, block->mdct_coef, channels * sizeof(*block->mdct_coef), alloc_fail); FF_ALLOCZ_OR_GOTO(avctx, block->exp, channels * sizeof(*block->exp), alloc_fail); FF_ALLOCZ_OR_GOTO(avctx, block->grouped_exp, channels * sizeof(*block->grouped_exp), alloc_fail); FF_ALLOCZ_OR_GOTO(avctx, block->psd, channels * sizeof(*block->psd), alloc_fail); FF_ALLOCZ_OR_GOTO(avctx, block->band_psd, channels * sizeof(*block->band_psd), alloc_fail); FF_ALLOCZ_OR_GOTO(avctx, block->mask, channels * sizeof(*block->mask), alloc_fail); FF_ALLOCZ_OR_GOTO(avctx, block->qmant, channels * sizeof(*block->qmant), alloc_fail); if (s->cpl_enabled) { FF_ALLOCZ_OR_GOTO(avctx, block->cpl_coord_exp, channels * sizeof(*block->cpl_coord_exp), alloc_fail); FF_ALLOCZ_OR_GOTO(avctx, block->cpl_coord_mant, channels * sizeof(*block->cpl_coord_mant), alloc_fail); } for (ch = 0; ch < channels; ch++) { /* arrangement: block, channel, coeff */ block->bap[ch] = &s->bap_buffer [AC3_MAX_COEFS * (blk * channels + ch)]; block->grouped_exp[ch] = &s->grouped_exp_buffer[128 * (blk * channels + ch)]; block->psd[ch] = &s->psd_buffer [AC3_MAX_COEFS * (blk * channels + ch)]; block->band_psd[ch] = &s->band_psd_buffer [64 * (blk * channels + ch)]; block->mask[ch] = &s->mask_buffer [64 * (blk * channels + ch)]; block->qmant[ch] = &s->qmant_buffer [AC3_MAX_COEFS * (blk * channels + ch)]; if (s->cpl_enabled) { block->cpl_coord_exp[ch] = &s->cpl_coord_exp_buffer [16 * (blk * channels + ch)]; block->cpl_coord_mant[ch] = &s->cpl_coord_mant_buffer[16 * (blk * channels + ch)]; } /* arrangement: channel, block, coeff */ block->exp[ch] = &s->exp_buffer [AC3_MAX_COEFS * (AC3_MAX_BLOCKS * ch + blk)]; block->mdct_coef[ch] = &s->mdct_coef_buffer [AC3_MAX_COEFS * (AC3_MAX_BLOCKS * ch + blk)]; } } if (CONFIG_AC3ENC_FLOAT) { FF_ALLOC_OR_GOTO(avctx, s->fixed_coef_buffer, AC3_MAX_BLOCKS * channels * AC3_MAX_COEFS * sizeof(*s->fixed_coef_buffer), alloc_fail); for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { AC3Block *block = &s->blocks[blk]; FF_ALLOCZ_OR_GOTO(avctx, block->fixed_coef, channels * sizeof(*block->fixed_coef), alloc_fail); for (ch = 0; ch < channels; ch++) block->fixed_coef[ch] = &s->fixed_coef_buffer[AC3_MAX_COEFS * (AC3_MAX_BLOCKS * ch + blk)]; } } else { for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { AC3Block *block = &s->blocks[blk]; FF_ALLOCZ_OR_GOTO(avctx, block->fixed_coef, channels * sizeof(*block->fixed_coef), alloc_fail); for (ch = 0; ch < channels; ch++) block->fixed_coef[ch] = (int32_t *)block->mdct_coef[ch]; } } return 0; alloc_fail: return AVERROR(ENOMEM); }"} {"target": 0, "idx": 13565, "func": "static void platform_fixed_ioport_init(PCIXenPlatformState* s) { register_ioport_write(XEN_PLATFORM_IOPORT, 16, 4, platform_fixed_ioport_writel, s); register_ioport_write(XEN_PLATFORM_IOPORT, 16, 2, platform_fixed_ioport_writew, s); register_ioport_write(XEN_PLATFORM_IOPORT, 16, 1, platform_fixed_ioport_writeb, s); register_ioport_read(XEN_PLATFORM_IOPORT, 16, 2, platform_fixed_ioport_readw, s); register_ioport_read(XEN_PLATFORM_IOPORT, 16, 1, platform_fixed_ioport_readb, s); }"} {"target": 0, "idx": 13569, "func": "uint64_t helper_fctidz(CPUPPCState *env, uint64_t arg) { CPU_DoubleU farg; farg.ll = arg; if (unlikely(float64_is_signaling_nan(farg.d))) { /* sNaN conversion */ farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI); } else if (unlikely(float64_is_quiet_nan(farg.d) || float64_is_infinity(farg.d))) { /* qNan / infinity conversion */ farg.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI); } else { farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status); } return farg.ll; }"} {"target": 0, "idx": 13592, "func": "void g_free(void *ptr) { __coverity_free__(ptr); __coverity_mark_as_afm_freed__(ptr, AFM_free); }"} {"target": 0, "idx": 13595, "func": "static int load_multiboot(void *fw_cfg, FILE *f, const char *kernel_filename, const char *initrd_filename, const char *kernel_cmdline, uint8_t *header) { int i, t, is_multiboot = 0; uint32_t flags = 0; uint32_t mh_entry_addr; uint32_t mh_load_addr; uint32_t mb_kernel_size; uint32_t mmap_addr = MULTIBOOT_STRUCT_ADDR; uint32_t mb_bootinfo = MULTIBOOT_STRUCT_ADDR + 0x500; uint32_t mb_cmdline = mb_bootinfo + 0x200; uint32_t mb_mod_end; /* Ok, let's see if it is a multiboot image. The header is 12x32bit long, so the latest entry may be 8192 - 48. */ for (i = 0; i < (8192 - 48); i += 4) { if (ldl_p(header+i) == 0x1BADB002) { uint32_t checksum = ldl_p(header+i+8); flags = ldl_p(header+i+4); checksum += flags; checksum += (uint32_t)0x1BADB002; if (!checksum) { is_multiboot = 1; break; } } } if (!is_multiboot) return 0; /* no multiboot */ #ifdef DEBUG_MULTIBOOT fprintf(stderr, \"qemu: I believe we found a multiboot image!\\n\"); #endif if (flags & 0x00000004) { /* MULTIBOOT_HEADER_HAS_VBE */ fprintf(stderr, \"qemu: multiboot knows VBE. we don't.\\n\"); } if (!(flags & 0x00010000)) { /* MULTIBOOT_HEADER_HAS_ADDR */ uint64_t elf_entry; int kernel_size; fclose(f); kernel_size = load_elf(kernel_filename, 0, &elf_entry, NULL, NULL, 0, ELF_MACHINE, 0); if (kernel_size < 0) { fprintf(stderr, \"Error while loading elf kernel\\n\"); exit(1); } mh_load_addr = mh_entry_addr = elf_entry; mb_kernel_size = kernel_size; #ifdef DEBUG_MULTIBOOT fprintf(stderr, \"qemu: loading multiboot-elf kernel (%#x bytes) with entry %#zx\\n\", mb_kernel_size, (size_t)mh_entry_addr); #endif } else { /* Valid if mh_flags sets MULTIBOOT_HEADER_HAS_ADDR. */ uint32_t mh_header_addr = ldl_p(header+i+12); mh_load_addr = ldl_p(header+i+16); #ifdef DEBUG_MULTIBOOT uint32_t mh_load_end_addr = ldl_p(header+i+20); uint32_t mh_bss_end_addr = ldl_p(header+i+24); #endif uint32_t mb_kernel_text_offset = i - (mh_header_addr - mh_load_addr); mh_entry_addr = ldl_p(header+i+28); mb_kernel_size = get_file_size(f) - mb_kernel_text_offset; /* Valid if mh_flags sets MULTIBOOT_HEADER_HAS_VBE. uint32_t mh_mode_type = ldl_p(header+i+32); uint32_t mh_width = ldl_p(header+i+36); uint32_t mh_height = ldl_p(header+i+40); uint32_t mh_depth = ldl_p(header+i+44); */ #ifdef DEBUG_MULTIBOOT fprintf(stderr, \"multiboot: mh_header_addr = %#x\\n\", mh_header_addr); fprintf(stderr, \"multiboot: mh_load_addr = %#x\\n\", mh_load_addr); fprintf(stderr, \"multiboot: mh_load_end_addr = %#x\\n\", mh_load_end_addr); fprintf(stderr, \"multiboot: mh_bss_end_addr = %#x\\n\", mh_bss_end_addr); #endif fseek(f, mb_kernel_text_offset, SEEK_SET); #ifdef DEBUG_MULTIBOOT fprintf(stderr, \"qemu: loading multiboot kernel (%#x bytes) at %#x\\n\", mb_kernel_size, mh_load_addr); #endif if (!fread_targphys_ok(mh_load_addr, mb_kernel_size, f)) { fprintf(stderr, \"qemu: read error on multiboot kernel '%s' (%#x)\\n\", kernel_filename, mb_kernel_size); exit(1); } fclose(f); } /* blob size is only the kernel for now */ mb_mod_end = mh_load_addr + mb_kernel_size; /* load modules */ stl_phys(mb_bootinfo + 20, 0x0); /* mods_count */ if (initrd_filename) { uint32_t mb_mod_info = mb_bootinfo + 0x100; uint32_t mb_mod_cmdline = mb_bootinfo + 0x300; uint32_t mb_mod_start = mh_load_addr; uint32_t mb_mod_length = mb_kernel_size; char *next_initrd; char *next_space; int mb_mod_count = 0; do { next_initrd = strchr(initrd_filename, ','); if (next_initrd) *next_initrd = '\\0'; /* if a space comes after the module filename, treat everything after that as parameters */ cpu_physical_memory_write(mb_mod_cmdline, (uint8_t*)initrd_filename, strlen(initrd_filename) + 1); stl_phys(mb_mod_info + 8, mb_mod_cmdline); /* string */ mb_mod_cmdline += strlen(initrd_filename) + 1; if ((next_space = strchr(initrd_filename, ' '))) *next_space = '\\0'; #ifdef DEBUG_MULTIBOOT printf(\"multiboot loading module: %s\\n\", initrd_filename); #endif f = fopen(initrd_filename, \"rb\"); if (f) { mb_mod_start = (mb_mod_start + mb_mod_length + (TARGET_PAGE_SIZE - 1)) & (TARGET_PAGE_MASK); mb_mod_length = get_file_size(f); mb_mod_end = mb_mod_start + mb_mod_length; if (!fread_targphys_ok(mb_mod_start, mb_mod_length, f)) { fprintf(stderr, \"qemu: read error on multiboot module '%s' (%#x)\\n\", initrd_filename, mb_mod_length); exit(1); } mb_mod_count++; stl_phys(mb_mod_info + 0, mb_mod_start); stl_phys(mb_mod_info + 4, mb_mod_start + mb_mod_length); #ifdef DEBUG_MULTIBOOT printf(\"mod_start: %#x\\nmod_end: %#x\\n\", mb_mod_start, mb_mod_start + mb_mod_length); #endif stl_phys(mb_mod_info + 12, 0x0); /* reserved */ } initrd_filename = next_initrd+1; mb_mod_info += 16; } while (next_initrd); stl_phys(mb_bootinfo + 20, mb_mod_count); /* mods_count */ stl_phys(mb_bootinfo + 24, mb_bootinfo + 0x100); /* mods_addr */ } /* Make sure we're getting kernel + modules back after reset */ option_rom_setup_reset(mh_load_addr, mb_mod_end - mh_load_addr); /* Commandline support */ stl_phys(mb_bootinfo + 16, mb_cmdline); t = strlen(kernel_filename); cpu_physical_memory_write(mb_cmdline, (uint8_t*)kernel_filename, t); mb_cmdline += t; stb_phys(mb_cmdline++, ' '); t = strlen(kernel_cmdline) + 1; cpu_physical_memory_write(mb_cmdline, (uint8_t*)kernel_cmdline, t); /* the kernel is where we want it to be now */ #define MULTIBOOT_FLAGS_MEMORY (1 << 0) #define MULTIBOOT_FLAGS_BOOT_DEVICE (1 << 1) #define MULTIBOOT_FLAGS_CMDLINE (1 << 2) #define MULTIBOOT_FLAGS_MODULES (1 << 3) #define MULTIBOOT_FLAGS_MMAP (1 << 6) stl_phys(mb_bootinfo, MULTIBOOT_FLAGS_MEMORY | MULTIBOOT_FLAGS_BOOT_DEVICE | MULTIBOOT_FLAGS_CMDLINE | MULTIBOOT_FLAGS_MODULES | MULTIBOOT_FLAGS_MMAP); stl_phys(mb_bootinfo + 4, 640); /* mem_lower */ stl_phys(mb_bootinfo + 8, ram_size / 1024); /* mem_upper */ stl_phys(mb_bootinfo + 12, 0x8001ffff); /* XXX: use the -boot switch? */ stl_phys(mb_bootinfo + 48, mmap_addr); /* mmap_addr */ #ifdef DEBUG_MULTIBOOT fprintf(stderr, \"multiboot: mh_entry_addr = %#x\\n\", mh_entry_addr); #endif /* Pass variables to option rom */ fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, mh_entry_addr); fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, mb_bootinfo); fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, mmap_addr); /* Make sure we're getting the config space back after reset */ option_rom_setup_reset(mb_bootinfo, 0x500); option_rom[nb_option_roms] = \"multiboot.bin\"; nb_option_roms++; return 1; /* yes, we are multiboot */ }"} {"target": 1, "idx": 13605, "func": "iscsi_aio_writev(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque) { IscsiLun *iscsilun = bs->opaque; struct iscsi_context *iscsi = iscsilun->iscsi; IscsiAIOCB *acb; size_t size; uint32_t num_sectors; uint64_t lba; struct iscsi_data data; acb = qemu_aio_get(&iscsi_aiocb_info, bs, cb, opaque); trace_iscsi_aio_writev(iscsi, sector_num, nb_sectors, opaque, acb); acb->iscsilun = iscsilun; acb->qiov = qiov; acb->canceled = 0; acb->bh = NULL; acb->status = -EINPROGRESS; /* XXX we should pass the iovec to write16 to avoid the extra copy */ /* this will allow us to get rid of 'buf' completely */ size = nb_sectors * BDRV_SECTOR_SIZE; data.size = MIN(size, acb->qiov->size); /* if the iovec only contains one buffer we can pass it directly */ if (acb->qiov->niov == 1) { acb->buf = NULL; data.data = acb->qiov->iov[0].iov_base; } else { acb->buf = g_malloc(data.size); qemu_iovec_to_buf(acb->qiov, 0, acb->buf, data.size); data.data = acb->buf; } acb->task = malloc(sizeof(struct scsi_task)); if (acb->task == NULL) { error_report(\"iSCSI: Failed to allocate task for scsi WRITE16 \" \"command. %s\", iscsi_get_error(iscsi)); qemu_aio_release(acb); return NULL; } memset(acb->task, 0, sizeof(struct scsi_task)); acb->task->xfer_dir = SCSI_XFER_WRITE; acb->task->cdb_size = 16; acb->task->cdb[0] = 0x8a; lba = sector_qemu2lun(sector_num, iscsilun); *(uint32_t *)&acb->task->cdb[2] = htonl(lba >> 32); *(uint32_t *)&acb->task->cdb[6] = htonl(lba & 0xffffffff); num_sectors = size / iscsilun->block_size; *(uint32_t *)&acb->task->cdb[10] = htonl(num_sectors); acb->task->expxferlen = size; if (iscsi_scsi_command_async(iscsi, iscsilun->lun, acb->task, iscsi_aio_write16_cb, &data, acb) != 0) { scsi_free_scsi_task(acb->task); g_free(acb->buf); qemu_aio_release(acb); return NULL; } iscsi_set_events(iscsilun); return &acb->common; }"} {"target": 0, "idx": 13612, "func": "static void build_file_streams(void) { FFServerStream *stream, *stream_next; int i, ret; /* gather all streams */ for(stream = config.first_stream; stream; stream = stream_next) { AVFormatContext *infile = NULL; stream_next = stream->next; if (stream->stream_type == STREAM_TYPE_LIVE && !stream->feed) { /* the stream comes from a file */ /* try to open the file */ /* open stream */ if (stream->fmt && !strcmp(stream->fmt->name, \"rtp\")) { /* specific case : if transport stream output to RTP, we use a raw transport stream reader */ av_dict_set(&stream->in_opts, \"mpeg2ts_compute_pcr\", \"1\", 0); } if (!stream->feed_filename[0]) { http_log(\"Unspecified feed file for stream '%s'\\n\", stream->filename); goto fail; } http_log(\"Opening feed file '%s' for stream '%s'\\n\", stream->feed_filename, stream->filename); if ((ret = avformat_open_input(&infile, stream->feed_filename, stream->ifmt, &stream->in_opts)) < 0) { http_log(\"Could not open '%s': %s\\n\", stream->feed_filename, av_err2str(ret)); /* remove stream (no need to spend more time on it) */ fail: remove_stream(stream); } else { /* find all the AVStreams inside and reference them in 'stream' */ if (avformat_find_stream_info(infile, NULL) < 0) { http_log(\"Could not find codec parameters from '%s'\\n\", stream->feed_filename); avformat_close_input(&infile); goto fail; } extract_mpeg4_header(infile); for(i=0;inb_streams;i++) add_av_stream1(stream, infile->streams[i]->codec, 1); avformat_close_input(&infile); } } } }"} {"target": 1, "idx": 13623, "func": "int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt) { int ret, flush = 0; if (pkt) { AVStream *st = s->streams[pkt->stream_index]; //FIXME/XXX/HACK drop zero sized packets if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size == 0) return 0; av_dlog(s, \"av_interleaved_write_frame size:%d dts:%\" PRId64 \" pts:%\" PRId64 \"\\n\", pkt->size, pkt->dts, pkt->pts); if ((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS)) if (pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS)) return AVERROR(EINVAL); } else { av_dlog(s, \"av_interleaved_write_frame FLUSH\\n\"); flush = 1; } for (;; ) { AVPacket opkt; int ret = interleave_packet(s, &opkt, pkt, flush); if (ret <= 0) //FIXME cleanup needed for ret<0 ? ret = write_packet(s, &opkt); if (ret >= 0) s->streams[opkt.stream_index]->nb_frames++; av_free_packet(&opkt); pkt = NULL; } }"} {"target": 0, "idx": 13648, "func": "static void unref_buffer(InputStream *ist, FrameBuffer *buf) { av_assert0(buf->refcount); buf->refcount--; if (!buf->refcount) { buf->next = ist->buffer_pool; ist->buffer_pool = buf; } }"} {"target": 0, "idx": 13649, "func": "static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x) { uint8_t *dest_y, *dest_cb, *dest_cr; int linesize, uvlinesize, mb_x, mb_y; const int end_mb_y = sl->mb_y + FRAME_MBAFF(h); const int old_slice_type = sl->slice_type; const int pixel_shift = h->pixel_shift; const int block_h = 16 >> h->chroma_y_shift; if (h->postpone_filter) return; if (sl->deblocking_filter) { for (mb_x = start_x; mb_x < end_x; mb_x++) for (mb_y = end_mb_y - FRAME_MBAFF(h); mb_y <= end_mb_y; mb_y++) { int mb_xy, mb_type; mb_xy = sl->mb_xy = mb_x + mb_y * h->mb_stride; sl->slice_num = h->slice_table[mb_xy]; mb_type = h->cur_pic.mb_type[mb_xy]; sl->list_count = h->list_counts[mb_xy]; if (FRAME_MBAFF(h)) sl->mb_mbaff = sl->mb_field_decoding_flag = !!IS_INTERLACED(mb_type); sl->mb_x = mb_x; sl->mb_y = mb_y; dest_y = h->cur_pic.f->data[0] + ((mb_x << pixel_shift) + mb_y * sl->linesize) * 16; dest_cb = h->cur_pic.f->data[1] + (mb_x << pixel_shift) * (8 << CHROMA444(h)) + mb_y * sl->uvlinesize * block_h; dest_cr = h->cur_pic.f->data[2] + (mb_x << pixel_shift) * (8 << CHROMA444(h)) + mb_y * sl->uvlinesize * block_h; // FIXME simplify above if (MB_FIELD(sl)) { linesize = sl->mb_linesize = sl->linesize * 2; uvlinesize = sl->mb_uvlinesize = sl->uvlinesize * 2; if (mb_y & 1) { // FIXME move out of this function? dest_y -= sl->linesize * 15; dest_cb -= sl->uvlinesize * (block_h - 1); dest_cr -= sl->uvlinesize * (block_h - 1); } } else { linesize = sl->mb_linesize = sl->linesize; uvlinesize = sl->mb_uvlinesize = sl->uvlinesize; } backup_mb_border(h, sl, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 0); if (fill_filter_caches(h, sl, mb_type)) continue; sl->chroma_qp[0] = get_chroma_qp(h, 0, h->cur_pic.qscale_table[mb_xy]); sl->chroma_qp[1] = get_chroma_qp(h, 1, h->cur_pic.qscale_table[mb_xy]); if (FRAME_MBAFF(h)) { ff_h264_filter_mb(h, sl, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize); } else { ff_h264_filter_mb_fast(h, sl, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize); } } } sl->slice_type = old_slice_type; sl->mb_x = end_x; sl->mb_y = end_mb_y - FRAME_MBAFF(h); sl->chroma_qp[0] = get_chroma_qp(h, 0, sl->qscale); sl->chroma_qp[1] = get_chroma_qp(h, 1, sl->qscale); }"} {"target": 1, "idx": 13660, "func": "static int get_buffer(QEMUFile *f, void *pv, size_t size) { uint8_t *v = pv; qemu_get_buffer(f, v, size); return 0; }"} {"target": 0, "idx": 13673, "func": "static av_cold int flashsv_decode_init(AVCodecContext *avctx) { FlashSVContext *s = avctx->priv_data; int zret; // Zlib return code s->avctx = avctx; s->zstream.zalloc = Z_NULL; s->zstream.zfree = Z_NULL; s->zstream.opaque = Z_NULL; zret = inflateInit(&s->zstream); if (zret != Z_OK) { av_log(avctx, AV_LOG_ERROR, \"Inflate init error: %d\\n\", zret); return 1; } avctx->pix_fmt = AV_PIX_FMT_BGR24; s->frame.data[0] = NULL; return 0; }"} {"target": 1, "idx": 13678, "func": "static int mov_write_trak_tag(AVIOContext *pb, MOVMuxContext *mov, MOVTrack *track, AVStream *st) { int64_t pos = avio_tell(pb); avio_wb32(pb, 0); /* size */ ffio_wfourcc(pb, \"trak\"); mov_write_tkhd_tag(pb, mov, track, st); if (track->mode == MODE_PSP || track->flags & MOV_TRACK_CTTS || (track->entry && track->cluster[0].dts) || is_clcp_track(track)) { if (mov->use_editlist) mov_write_edts_tag(pb, mov, track); // PSP Movies require edts box else if ((track->entry && track->cluster[0].dts) || track->mode == MODE_PSP || is_clcp_track(track)) av_log(mov->fc, AV_LOG_WARNING, \"Not writing any edit list even though one would have been required\\n\"); } if (track->tref_tag) mov_write_tref_tag(pb, track); mov_write_mdia_tag(pb, mov, track); if (track->mode == MODE_PSP) mov_write_uuid_tag_psp(pb, track); // PSP Movies require this uuid box if (track->tag == MKTAG('r','t','p',' ')) mov_write_udta_sdp(pb, track); if (track->mode == MODE_MOV) { if (track->enc->codec_type == AVMEDIA_TYPE_VIDEO) { double sample_aspect_ratio = av_q2d(st->sample_aspect_ratio); if ((0.0 != sample_aspect_ratio && 1.0 != sample_aspect_ratio)) { mov_write_tapt_tag(pb, track); } } if (is_clcp_track(track)) { mov_write_tapt_tag(pb, track); } } mov_write_track_udta_tag(pb, mov, st); return update_size(pb, pos); }"} {"target": 0, "idx": 13683, "func": "const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_length, int *consumed, int length){ int i, si, di; uint8_t *dst; int bufidx; // src[0]&0x80; //forbidden bit h->nal_ref_idc= src[0]>>5; h->nal_unit_type= src[0]&0x1F; src++; length--; #if HAVE_FAST_UNALIGNED # if HAVE_FAST_64BIT # define RS 7 for(i=0; i+10 && !src[i]) i--; while(src[i]) i++; #else # define RS 0 for(i=0; i+10 && src[i-1]==0) i--; #endif if(i+2=length-1){ //no escaped 0 *dst_length= length; *consumed= length+1; //+1 for the header return src; } bufidx = h->nal_unit_type == NAL_DPC ? 1 : 0; // use second escape buffer for inter data av_fast_malloc(&h->rbsp_buffer[bufidx], &h->rbsp_buffer_size[bufidx], length+FF_INPUT_BUFFER_PADDING_SIZE); dst= h->rbsp_buffer[bufidx]; if (dst == NULL){ return NULL; } //printf(\"decoding esc\\n\"); memcpy(dst, src, i); si=di=i; while(si+23){ dst[di++]= src[si++]; dst[di++]= src[si++]; }else if(src[si]==0 && src[si+1]==0){ if(src[si+2]==3){ //escape dst[di++]= 0; dst[di++]= 0; si+=3; continue; }else //next start code goto nsc; } dst[di++]= src[si++]; } while(simain_loop)) { g_main_loop_quit(ga_state->main_loop);"} {"target": 1, "idx": 13714, "func": "static int tcp_close(MigrationState *s) { DPRINTF(\"tcp_close\\n\"); if (s->fd != -1) { close(s->fd); s->fd = -1; } return 0; }"} {"target": 1, "idx": 13725, "func": "static void derive_spatial_merge_candidates(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int singleMCLFlag, int part_idx, int merge_idx, struct MvField mergecandlist[]) { HEVCLocalContext *lc = &s->HEVClc; RefPicList *refPicList = s->ref->refPicList; MvField *tab_mvf = s->ref->tab_mvf; const int min_pu_width = s->sps->min_pu_width; const int cand_bottom_left = lc->na.cand_bottom_left; const int cand_left = lc->na.cand_left; const int cand_up_left = lc->na.cand_up_left; const int cand_up = lc->na.cand_up; const int cand_up_right = lc->na.cand_up_right_sap; const int xA1 = x0 - 1; const int yA1 = y0 + nPbH - 1; const int xA1_pu = xA1 >> s->sps->log2_min_pu_size; const int yA1_pu = yA1 >> s->sps->log2_min_pu_size; const int xB1 = x0 + nPbW - 1; const int yB1 = y0 - 1; const int xB1_pu = xB1 >> s->sps->log2_min_pu_size; const int yB1_pu = yB1 >> s->sps->log2_min_pu_size; const int xB0 = x0 + nPbW; const int yB0 = y0 - 1; const int xB0_pu = xB0 >> s->sps->log2_min_pu_size; const int yB0_pu = yB0 >> s->sps->log2_min_pu_size; const int xA0 = x0 - 1; const int yA0 = y0 + nPbH; const int xA0_pu = xA0 >> s->sps->log2_min_pu_size; const int yA0_pu = yA0 >> s->sps->log2_min_pu_size; const int xB2 = x0 - 1; const int yB2 = y0 - 1; const int xB2_pu = xB2 >> s->sps->log2_min_pu_size; const int yB2_pu = yB2 >> s->sps->log2_min_pu_size; const int nb_refs = (s->sh.slice_type == P_SLICE) ? s->sh.nb_refs[0] : FFMIN(s->sh.nb_refs[0], s->sh.nb_refs[1]); int check_MER = 1; int check_MER_1 = 1; int zero_idx = 0; int nb_merge_cand = 0; int nb_orig_merge_cand = 0; int is_available_a0; int is_available_a1; int is_available_b0; int is_available_b1; int is_available_b2; int check_B0; int check_A0; //first left spatial merge candidate is_available_a1 = AVAILABLE(cand_left, A1); if (!singleMCLFlag && part_idx == 1 && (lc->cu.part_mode == PART_Nx2N || lc->cu.part_mode == PART_nLx2N || lc->cu.part_mode == PART_nRx2N) || isDiffMER(s, xA1, yA1, x0, y0)) { is_available_a1 = 0; } if (is_available_a1) { mergecandlist[0] = TAB_MVF_PU(A1); if (merge_idx == 0) return; nb_merge_cand++; } // above spatial merge candidate is_available_b1 = AVAILABLE(cand_up, B1); if (!singleMCLFlag && part_idx == 1 && (lc->cu.part_mode == PART_2NxN || lc->cu.part_mode == PART_2NxnU || lc->cu.part_mode == PART_2NxnD) || isDiffMER(s, xB1, yB1, x0, y0)) { is_available_b1 = 0; } if (is_available_a1 && is_available_b1) check_MER = !COMPARE_MV_REFIDX(B1, A1); if (is_available_b1 && check_MER) mergecandlist[nb_merge_cand++] = TAB_MVF_PU(B1); // above right spatial merge candidate check_MER = 1; check_B0 = PRED_BLOCK_AVAILABLE(B0); is_available_b0 = check_B0 && AVAILABLE(cand_up_right, B0); if (isDiffMER(s, xB0, yB0, x0, y0)) is_available_b0 = 0; if (is_available_b1 && is_available_b0) check_MER = !COMPARE_MV_REFIDX(B0, B1); if (is_available_b0 && check_MER) { mergecandlist[nb_merge_cand] = TAB_MVF_PU(B0); if (merge_idx == nb_merge_cand) return; nb_merge_cand++; } // left bottom spatial merge candidate check_MER = 1; check_A0 = PRED_BLOCK_AVAILABLE(A0); is_available_a0 = check_A0 && AVAILABLE(cand_bottom_left, A0); if (isDiffMER(s, xA0, yA0, x0, y0)) is_available_a0 = 0; if (is_available_a1 && is_available_a0) check_MER = !COMPARE_MV_REFIDX(A0, A1); if (is_available_a0 && check_MER) { mergecandlist[nb_merge_cand] = TAB_MVF_PU(A0); if (merge_idx == nb_merge_cand) return; nb_merge_cand++; } // above left spatial merge candidate check_MER = 1; is_available_b2 = AVAILABLE(cand_up_left, B2); if (isDiffMER(s, xB2, yB2, x0, y0)) is_available_b2 = 0; if (is_available_a1 && is_available_b2) check_MER = !COMPARE_MV_REFIDX(B2, A1); if (is_available_b1 && is_available_b2) check_MER_1 = !COMPARE_MV_REFIDX(B2, B1); if (is_available_b2 && check_MER && check_MER_1 && nb_merge_cand != 4) { mergecandlist[nb_merge_cand] = TAB_MVF_PU(B2); if (merge_idx == nb_merge_cand) return; nb_merge_cand++; } // temporal motion vector candidate if (s->sh.slice_temporal_mvp_enabled_flag && nb_merge_cand < s->sh.max_num_merge_cand) { Mv mv_l0_col, mv_l1_col; int available_l0 = temporal_luma_motion_vector(s, x0, y0, nPbW, nPbH, 0, &mv_l0_col, 0); int available_l1 = (s->sh.slice_type == B_SLICE) ? temporal_luma_motion_vector(s, x0, y0, nPbW, nPbH, 0, &mv_l1_col, 1) : 0; if (available_l0 || available_l1) { mergecandlist[nb_merge_cand].is_intra = 0; mergecandlist[nb_merge_cand].pred_flag[0] = available_l0; mergecandlist[nb_merge_cand].pred_flag[1] = available_l1; if (available_l0) { mergecandlist[nb_merge_cand].mv[0] = mv_l0_col; mergecandlist[nb_merge_cand].ref_idx[0] = 0; } if (available_l1) { mergecandlist[nb_merge_cand].mv[1] = mv_l1_col; mergecandlist[nb_merge_cand].ref_idx[1] = 0; } if (merge_idx == nb_merge_cand) return; nb_merge_cand++; } } nb_orig_merge_cand = nb_merge_cand; // combined bi-predictive merge candidates (applies for B slices) if (s->sh.slice_type == B_SLICE && nb_orig_merge_cand > 1 && nb_orig_merge_cand < s->sh.max_num_merge_cand) { int comb_idx; for (comb_idx = 0; nb_merge_cand < s->sh.max_num_merge_cand && comb_idx < nb_orig_merge_cand * (nb_orig_merge_cand - 1); comb_idx++) { int l0_cand_idx = l0_l1_cand_idx[comb_idx][0]; int l1_cand_idx = l0_l1_cand_idx[comb_idx][1]; MvField l0_cand = mergecandlist[l0_cand_idx]; MvField l1_cand = mergecandlist[l1_cand_idx]; if (l0_cand.pred_flag[0] && l1_cand.pred_flag[1] && (refPicList[0].list[l0_cand.ref_idx[0]] != refPicList[1].list[l1_cand.ref_idx[1]] || AV_RN32A(&l0_cand.mv[0]) != AV_RN32A(&l1_cand.mv[1]))) { mergecandlist[nb_merge_cand].ref_idx[0] = l0_cand.ref_idx[0]; mergecandlist[nb_merge_cand].ref_idx[1] = l1_cand.ref_idx[1]; mergecandlist[nb_merge_cand].pred_flag[0] = 1; mergecandlist[nb_merge_cand].pred_flag[1] = 1; AV_COPY32(&mergecandlist[nb_merge_cand].mv[0], &l0_cand.mv[0]); AV_COPY32(&mergecandlist[nb_merge_cand].mv[1], &l1_cand.mv[1]); mergecandlist[nb_merge_cand].is_intra = 0; if (merge_idx == nb_merge_cand) return; nb_merge_cand++; } } } // append Zero motion vector candidates while (nb_merge_cand < s->sh.max_num_merge_cand) { mergecandlist[nb_merge_cand].pred_flag[0] = 1; mergecandlist[nb_merge_cand].pred_flag[1] = s->sh.slice_type == B_SLICE; AV_ZERO32(mergecandlist[nb_merge_cand].mv + 0); AV_ZERO32(mergecandlist[nb_merge_cand].mv + 1); mergecandlist[nb_merge_cand].is_intra = 0; mergecandlist[nb_merge_cand].ref_idx[0] = zero_idx < nb_refs ? zero_idx : 0; mergecandlist[nb_merge_cand].ref_idx[1] = zero_idx < nb_refs ? zero_idx : 0; if (merge_idx == nb_merge_cand) return; nb_merge_cand++; zero_idx++; } }"} {"target": 1, "idx": 13738, "func": "int spapr_populate_pci_devices(sPAPRPHBState *phb, uint32_t xics_phandle, void *fdt) { PCIBus *bus = phb->host_state.bus; int bus_off, node_off = 0, devid, fn, i, n, devices; DeviceState *qdev; char nodename[256]; struct { uint32_t hi; uint64_t addr; uint64_t size; } __attribute__((packed)) reg[PCI_NUM_REGIONS + 1], assigned_addresses[PCI_NUM_REGIONS]; uint32_t bus_range[] = { cpu_to_be32(0), cpu_to_be32(0xff) }; struct { uint32_t hi; uint64_t child; uint64_t parent; uint64_t size; } __attribute__((packed)) ranges[] = { { cpu_to_be32(b_ss(1)), cpu_to_be64(0), cpu_to_be64(phb->io_win_addr), cpu_to_be64(memory_region_size(&phb->iospace)), }, { cpu_to_be32(b_ss(2)), cpu_to_be64(SPAPR_PCI_MEM_WIN_BUS_OFFSET), cpu_to_be64(phb->mem_win_addr), cpu_to_be64(memory_region_size(&phb->memwindow)), }, }; uint64_t bus_reg[] = { cpu_to_be64(phb->buid), 0 }; uint32_t interrupt_map_mask[] = { cpu_to_be32(b_ddddd(-1)|b_fff(-1)), 0x0, 0x0, 0x0}; uint32_t interrupt_map[bus->nirq][7]; /* Start populating the FDT */ sprintf(nodename, \"pci@%\" PRIx64, phb->buid); bus_off = fdt_add_subnode(fdt, 0, nodename); if (bus_off < 0) { return bus_off; } #define _FDT(exp) \\ do { \\ int ret = (exp); \\ if (ret < 0) { \\ return ret; \\ } \\ } while (0) /* Write PHB properties */ _FDT(fdt_setprop_string(fdt, bus_off, \"device_type\", \"pci\")); _FDT(fdt_setprop_string(fdt, bus_off, \"compatible\", \"IBM,Logical_PHB\")); _FDT(fdt_setprop_cell(fdt, bus_off, \"#address-cells\", 0x3)); _FDT(fdt_setprop_cell(fdt, bus_off, \"#size-cells\", 0x2)); _FDT(fdt_setprop_cell(fdt, bus_off, \"#interrupt-cells\", 0x1)); _FDT(fdt_setprop(fdt, bus_off, \"used-by-rtas\", NULL, 0)); _FDT(fdt_setprop(fdt, bus_off, \"bus-range\", &bus_range, sizeof(bus_range))); _FDT(fdt_setprop(fdt, bus_off, \"ranges\", &ranges, sizeof(ranges))); _FDT(fdt_setprop(fdt, bus_off, \"reg\", &bus_reg, sizeof(bus_reg))); _FDT(fdt_setprop(fdt, bus_off, \"interrupt-map-mask\", &interrupt_map_mask, sizeof(interrupt_map_mask))); /* Populate PCI devices and allocate IRQs */ devices = 0; QTAILQ_FOREACH(qdev, &bus->qbus.children, sibling) { PCIDevice *dev = DO_UPCAST(PCIDevice, qdev, qdev); int irq_index = pci_spapr_map_irq(dev, 0); uint32_t *irqmap = interrupt_map[devices]; uint8_t *config = dev->config; devid = dev->devfn >> 3; fn = dev->devfn & 7; sprintf(nodename, \"pci@%u,%u\", devid, fn); /* Allocate interrupt from the map */ if (devid > bus->nirq) { printf(\"Unexpected behaviour in spapr_populate_pci_devices,\" \"wrong devid %u\\n\", devid); exit(-1); } irqmap[0] = cpu_to_be32(b_ddddd(devid)|b_fff(fn)); irqmap[1] = 0; irqmap[2] = 0; irqmap[3] = 0; irqmap[4] = cpu_to_be32(xics_phandle); irqmap[5] = cpu_to_be32(phb->lsi_table[irq_index].dt_irq); irqmap[6] = cpu_to_be32(0x8); /* Add node to FDT */ node_off = fdt_add_subnode(fdt, bus_off, nodename); if (node_off < 0) { return node_off; } _FDT(fdt_setprop_cell(fdt, node_off, \"vendor-id\", pci_get_word(&config[PCI_VENDOR_ID]))); _FDT(fdt_setprop_cell(fdt, node_off, \"device-id\", pci_get_word(&config[PCI_DEVICE_ID]))); _FDT(fdt_setprop_cell(fdt, node_off, \"revision-id\", pci_get_byte(&config[PCI_REVISION_ID]))); _FDT(fdt_setprop_cell(fdt, node_off, \"class-code\", pci_get_long(&config[PCI_CLASS_REVISION]) >> 8)); _FDT(fdt_setprop_cell(fdt, node_off, \"subsystem-id\", pci_get_word(&config[PCI_SUBSYSTEM_ID]))); _FDT(fdt_setprop_cell(fdt, node_off, \"subsystem-vendor-id\", pci_get_word(&config[PCI_SUBSYSTEM_VENDOR_ID]))); /* Config space region comes first */ reg[0].hi = cpu_to_be32( b_n(0) | b_p(0) | b_t(0) | b_ss(0/*config*/) | b_bbbbbbbb(0) | b_ddddd(devid) | b_fff(fn)); reg[0].addr = 0; reg[0].size = 0; n = 0; for (i = 0; i < PCI_NUM_REGIONS; ++i) { if (0 == dev->io_regions[i].size) { continue; } reg[n+1].hi = cpu_to_be32( b_n(0) | b_p(0) | b_t(0) | b_ss(regtype_to_ss(dev->io_regions[i].type)) | b_bbbbbbbb(0) | b_ddddd(devid) | b_fff(fn) | b_rrrrrrrr(bars[i])); reg[n+1].addr = 0; reg[n+1].size = cpu_to_be64(dev->io_regions[i].size); assigned_addresses[n].hi = cpu_to_be32( b_n(1) | b_p(0) | b_t(0) | b_ss(regtype_to_ss(dev->io_regions[i].type)) | b_bbbbbbbb(0) | b_ddddd(devid) | b_fff(fn) | b_rrrrrrrr(bars[i])); /* * Writing zeroes to assigned_addresses causes the guest kernel to * reassign BARs */ assigned_addresses[n].addr = cpu_to_be64(dev->io_regions[i].addr); assigned_addresses[n].size = reg[n+1].size; ++n; } _FDT(fdt_setprop(fdt, node_off, \"reg\", reg, sizeof(reg[0])*(n+1))); _FDT(fdt_setprop(fdt, node_off, \"assigned-addresses\", assigned_addresses, sizeof(assigned_addresses[0])*(n))); _FDT(fdt_setprop_cell(fdt, node_off, \"interrupts\", pci_get_byte(&config[PCI_INTERRUPT_PIN]))); ++devices; } /* Write interrupt map */ _FDT(fdt_setprop(fdt, bus_off, \"interrupt-map\", &interrupt_map, devices * sizeof(interrupt_map[0]))); return 0; }"} {"target": 1, "idx": 13743, "func": "static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, int cmd, abi_long arg) { const argtype *arg_type = ie->arg_type; const StructEntry *se; const argtype *field_types; const int *dst_offsets, *src_offsets; int target_size; void *argptr; abi_ulong *target_rt_dev_ptr; unsigned long *host_rt_dev_ptr; abi_long ret; int i; assert(ie->access == IOC_W); assert(*arg_type == TYPE_PTR); arg_type++; assert(*arg_type == TYPE_STRUCT); target_size = thunk_type_size(arg_type, 0); argptr = lock_user(VERIFY_READ, arg, target_size, 1); if (!argptr) { return -TARGET_EFAULT; } arg_type++; assert(*arg_type == (int)STRUCT_rtentry); se = struct_entries + *arg_type++; assert(se->convert[0] == NULL); /* convert struct here to be able to catch rt_dev string */ field_types = se->field_types; dst_offsets = se->field_offsets[THUNK_HOST]; src_offsets = se->field_offsets[THUNK_TARGET]; for (i = 0; i < se->nb_fields; i++) { if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) { assert(*field_types == TYPE_PTRVOID); target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]); host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]); if (*target_rt_dev_ptr != 0) { *host_rt_dev_ptr = (unsigned long)lock_user_string( tswapal(*target_rt_dev_ptr)); if (!*host_rt_dev_ptr) { unlock_user(argptr, arg, 0); return -TARGET_EFAULT; } } else { *host_rt_dev_ptr = 0; } field_types++; continue; } field_types = thunk_convert(buf_temp + dst_offsets[i], argptr + src_offsets[i], field_types, THUNK_HOST); } unlock_user(argptr, arg, 0); assert(host_rt_dev_ptr); ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); if (*host_rt_dev_ptr != 0) { unlock_user((void *)*host_rt_dev_ptr, *target_rt_dev_ptr, 0); } return ret; }"} {"target": 0, "idx": 13753, "func": "void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint) { #if defined(TARGET_HAS_ICE) TAILQ_REMOVE(&env->breakpoints, breakpoint, entry); breakpoint_invalidate(env, breakpoint->pc); qemu_free(breakpoint); #endif }"} {"target": 0, "idx": 13758, "func": "static void gen_conditional_store(DisasContext *ctx, TCGv EA, int reg, int size) { int l1; tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so); l1 = gen_new_label(); tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, l1); tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ); #if defined(TARGET_PPC64) if (size == 8) { gen_qemu_st64(ctx, cpu_gpr[reg], EA); } else #endif if (size == 4) { gen_qemu_st32(ctx, cpu_gpr[reg], EA); } else if (size == 2) { gen_qemu_st16(ctx, cpu_gpr[reg], EA); #if defined(TARGET_PPC64) } else if (size == 16) { TCGv gpr1, gpr2 , EA8; if (unlikely(ctx->le_mode)) { gpr1 = cpu_gpr[reg+1]; gpr2 = cpu_gpr[reg]; } else { gpr1 = cpu_gpr[reg]; gpr2 = cpu_gpr[reg+1]; } gen_qemu_st64(ctx, gpr1, EA); EA8 = tcg_temp_local_new(); gen_addr_add(ctx, EA8, EA, 8); gen_qemu_st64(ctx, gpr2, EA8); tcg_temp_free(EA8); #endif } else { gen_qemu_st8(ctx, cpu_gpr[reg], EA); } gen_set_label(l1); tcg_gen_movi_tl(cpu_reserve, -1); }"} {"target": 0, "idx": 13773, "func": "static uint64_t cirrus_linear_bitblt_read(void *opaque, target_phys_addr_t addr, unsigned size) { CirrusVGAState *s = opaque; uint32_t ret; /* XXX handle bitblt */ (void)s; ret = 0xff; return ret; }"} {"target": 0, "idx": 13802, "func": "bochs_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) { BDRVBochsState *s = bs->opaque; uint64_t sector_num = offset >> BDRV_SECTOR_BITS; int nb_sectors = bytes >> BDRV_SECTOR_BITS; uint64_t bytes_done = 0; QEMUIOVector local_qiov; int ret; assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); qemu_iovec_init(&local_qiov, qiov->niov); qemu_co_mutex_lock(&s->lock); while (nb_sectors > 0) { int64_t block_offset = seek_to_sector(bs, sector_num); if (block_offset < 0) { ret = block_offset; goto fail; } qemu_iovec_reset(&local_qiov); qemu_iovec_concat(&local_qiov, qiov, bytes_done, 512); if (block_offset > 0) { ret = bdrv_co_preadv(bs->file->bs, block_offset, 512, &local_qiov, 0); if (ret < 0) { goto fail; } } else { qemu_iovec_memset(&local_qiov, 0, 0, 512); } nb_sectors--; sector_num++; bytes_done += 512; } ret = 0; fail: qemu_co_mutex_unlock(&s->lock); qemu_iovec_destroy(&local_qiov); return ret; }"} {"target": 0, "idx": 13812, "func": "static void parse_numa_node(NumaNodeOptions *node, QemuOpts *opts, Error **errp) { uint16_t nodenr; uint16List *cpus = NULL; if (node->has_nodeid) { nodenr = node->nodeid; } else { nodenr = nb_numa_nodes; } if (nodenr >= MAX_NODES) { error_setg(errp, \"Max number of NUMA nodes reached: %\" PRIu16 \"\", nodenr); return; } if (numa_info[nodenr].present) { error_setg(errp, \"Duplicate NUMA nodeid: %\" PRIu16, nodenr); return; } for (cpus = node->cpus; cpus; cpus = cpus->next) { if (cpus->value >= max_cpus) { error_setg(errp, \"CPU index (%\" PRIu16 \")\" \" should be smaller than maxcpus (%d)\", cpus->value, max_cpus); return; } bitmap_set(numa_info[nodenr].node_cpu, cpus->value, 1); } if (node->has_mem && node->has_memdev) { error_setg(errp, \"qemu: cannot specify both mem= and memdev=\"); return; } if (have_memdevs == -1) { have_memdevs = node->has_memdev; } if (node->has_memdev != have_memdevs) { error_setg(errp, \"qemu: memdev option must be specified for either \" \"all or no nodes\"); return; } if (node->has_mem) { uint64_t mem_size = node->mem; const char *mem_str = qemu_opt_get(opts, \"mem\"); /* Fix up legacy suffix-less format */ if (g_ascii_isdigit(mem_str[strlen(mem_str) - 1])) { mem_size <<= 20; } numa_info[nodenr].node_mem = mem_size; } if (node->has_memdev) { Object *o; o = object_resolve_path_type(node->memdev, TYPE_MEMORY_BACKEND, NULL); if (!o) { error_setg(errp, \"memdev=%s is ambiguous\", node->memdev); return; } object_ref(o); numa_info[nodenr].node_mem = object_property_get_int(o, \"size\", NULL); numa_info[nodenr].node_memdev = MEMORY_BACKEND(o); } numa_info[nodenr].present = true; max_numa_nodeid = MAX(max_numa_nodeid, nodenr + 1); }"} {"target": 0, "idx": 13820, "func": "static void nested_struct_cleanup(UserDefNested *udnp) { qapi_free_UserDefNested(udnp); }"} {"target": 0, "idx": 13833, "func": "static void hl_decode_mb(H264Context *h){ MpegEncContext * const s = &h->s; const int mb_xy= h->mb_xy; const int mb_type= s->current_picture.mb_type[mb_xy]; int is_complex = h->is_complex || IS_INTRA_PCM(mb_type) || s->qscale == 0; if(ENABLE_H264_ENCODER && !s->decode) return; if (is_complex) hl_decode_mb_complex(h); else hl_decode_mb_simple(h); }"} {"target": 1, "idx": 13836, "func": "static int query_format(struct vf_instance *vf, unsigned int fmt) { /* FIXME - really any YUV 4:2:0 input format should work */ switch (fmt) { case IMGFMT_YV12: case IMGFMT_IYUV: case IMGFMT_I420: return ff_vf_next_query_format(vf, IMGFMT_YV12); } return 0; }"} {"target": 1, "idx": 13857, "func": "void ff_jref_idct_add(uint8_t *dest, ptrdiff_t line_size, int16_t *block) { ff_j_rev_dct(block); ff_add_pixels_clamped(block, dest, line_size); }"} {"target": 0, "idx": 13869, "func": "static void get_id3_tag(AVFormatContext *s, int len) { ID3v2ExtraMeta *id3v2_extra_meta = NULL; ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta); if (id3v2_extra_meta) ff_id3v2_parse_apic(s, &id3v2_extra_meta); ff_id3v2_free_extra_meta(&id3v2_extra_meta); }"} {"target": 1, "idx": 13886, "func": "static void test_qga_fsfreeze_and_thaw(gconstpointer fix) { const TestFixture *fixture = fix; QDict *ret; const gchar *status; ret = qmp_fd(fixture->fd, \"{'execute': 'guest-fsfreeze-freeze'}\"); g_assert_nonnull(ret); qmp_assert_no_error(ret); QDECREF(ret); ret = qmp_fd(fixture->fd, \"{'execute': 'guest-fsfreeze-status'}\"); g_assert_nonnull(ret); qmp_assert_no_error(ret); status = qdict_get_try_str(ret, \"return\"); g_assert_cmpstr(status, ==, \"frozen\"); QDECREF(ret); ret = qmp_fd(fixture->fd, \"{'execute': 'guest-fsfreeze-thaw'}\"); g_assert_nonnull(ret); qmp_assert_no_error(ret); QDECREF(ret); }"} {"target": 0, "idx": 13893, "func": "static void flush_buffer(ByteIOContext *s) { if (s->buf_ptr > s->buffer) { if (s->write_packet) s->write_packet(s->opaque, s->buffer, s->buf_ptr - s->buffer); if(s->checksum_ptr){ s->checksum= s->update_checksum(s->checksum, s->checksum_ptr, s->buf_ptr - s->checksum_ptr); s->checksum_ptr= s->buffer; } s->pos += s->buf_ptr - s->buffer; } s->buf_ptr = s->buffer; }"} {"target": 1, "idx": 13896, "func": "int net_init_slirp(QemuOpts *opts, const char *name, VLANState *vlan) { struct slirp_config_str *config; const char *vhost; const char *vhostname; const char *vdhcp_start; const char *vnamesrv; const char *tftp_export; const char *bootfile; const char *smb_export; const char *vsmbsrv; const char *restrict_opt; char *vnet = NULL; int restricted = 0; int ret; vhost = qemu_opt_get(opts, \"host\"); vhostname = qemu_opt_get(opts, \"hostname\"); vdhcp_start = qemu_opt_get(opts, \"dhcpstart\"); vnamesrv = qemu_opt_get(opts, \"dns\"); tftp_export = qemu_opt_get(opts, \"tftp\"); bootfile = qemu_opt_get(opts, \"bootfile\"); smb_export = qemu_opt_get(opts, \"smb\"); vsmbsrv = qemu_opt_get(opts, \"smbserver\"); restrict_opt = qemu_opt_get(opts, \"restrict\"); if (restrict_opt) { if (!strcmp(restrict_opt, \"on\") || !strcmp(restrict_opt, \"yes\") || !strcmp(restrict_opt, \"y\")) { restricted = 1; } else if (strcmp(restrict_opt, \"off\") && strcmp(restrict_opt, \"no\") && strcmp(restrict_opt, \"n\")) { error_report(\"invalid option: 'restrict=%s'\", restrict_opt); return -1; } } if (qemu_opt_get(opts, \"ip\")) { const char *ip = qemu_opt_get(opts, \"ip\"); int l = strlen(ip) + strlen(\"/24\") + 1; vnet = g_malloc(l); /* emulate legacy ip= parameter */ pstrcpy(vnet, l, ip); pstrcat(vnet, l, \"/24\"); } if (qemu_opt_get(opts, \"net\")) { if (vnet) { g_free(vnet); } vnet = g_strdup(qemu_opt_get(opts, \"net\")); } qemu_opt_foreach(opts, net_init_slirp_configs, NULL, 0); ret = net_slirp_init(vlan, \"user\", name, restricted, vnet, vhost, vhostname, tftp_export, bootfile, vdhcp_start, vnamesrv, smb_export, vsmbsrv); while (slirp_configs) { config = slirp_configs; slirp_configs = config->next; g_free(config); } g_free(vnet); return ret; }"} {"target": 1, "idx": 13914, "func": "static int daala_packet(AVFormatContext *s, int idx) { int seg, duration = 1; struct ogg *ogg = s->priv_data; struct ogg_stream *os = ogg->streams + idx; /* * first packet handling: here we parse the duration of each packet in the * first page and compare the total duration to the page granule to find the * encoder delay and set the first timestamp */ if ((!os->lastpts || os->lastpts == AV_NOPTS_VALUE) && !(os->flags & OGG_FLAG_EOS)) { for (seg = os->segp; seg < os->nsegs; seg++) if (os->segments[seg] < 255) duration++; os->lastpts = os->lastdts = daala_gptopts(s, idx, os->granule, NULL) - duration; if(s->streams[idx]->start_time == AV_NOPTS_VALUE) { s->streams[idx]->start_time = os->lastpts; if (s->streams[idx]->duration) s->streams[idx]->duration -= s->streams[idx]->start_time; } } /* parse packet duration */ if (os->psize > 0) os->pduration = 1; return 0; }"} {"target": 1, "idx": 13938, "func": "static int clv_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; CLVContext *c = avctx->priv_data; GetByteContext gb; uint32_t frame_type; int i, j; int ret; int mb_ret = 0; bytestream2_init(&gb, buf, buf_size); if (avctx->codec_tag == MKTAG('C','L','V','1')) { int skip = bytestream2_get_byte(&gb); bytestream2_skip(&gb, (skip + 1) * 8); } frame_type = bytestream2_get_byte(&gb); if ((ret = ff_reget_buffer(avctx, c->pic)) < 0) return ret; c->pic->key_frame = frame_type & 0x20 ? 1 : 0; c->pic->pict_type = frame_type & 0x20 ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; if (frame_type & 0x2) { if (buf_size < c->mb_width * c->mb_height) { av_log(avctx, AV_LOG_ERROR, \"Packet too small\\n\"); return AVERROR_INVALIDDATA; } bytestream2_get_be32(&gb); // frame size; c->ac_quant = bytestream2_get_byte(&gb); c->luma_dc_quant = 32; c->chroma_dc_quant = 32; if ((ret = init_get_bits8(&c->gb, buf + bytestream2_tell(&gb), (buf_size - bytestream2_tell(&gb)))) < 0) return ret; for (i = 0; i < 3; i++) c->top_dc[i] = 32; for (i = 0; i < 4; i++) c->left_dc[i] = 32; for (j = 0; j < c->mb_height; j++) { for (i = 0; i < c->mb_width; i++) { ret = decode_mb(c, i, j); if (ret < 0) mb_ret = ret; } } } else { } if ((ret = av_frame_ref(data, c->pic)) < 0) return ret; *got_frame = 1; return mb_ret < 0 ? mb_ret : buf_size; }"} {"target": 1, "idx": 14001, "func": "static int read_frame(BVID_DemuxContext *vid, AVIOContext *pb, AVPacket *pkt, uint8_t block_type, AVFormatContext *s) { uint8_t * vidbuf_start = NULL; int vidbuf_nbytes = 0; int code; int bytes_copied = 0; int position, duration, npixels; unsigned int vidbuf_capacity; int ret = 0; AVStream *st; if (vid->video_index < 0) { st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); vid->video_index = st->index; if (vid->audio_index < 0) { av_log_ask_for_sample(s, \"No audio packet before first video \" \"packet. Using default video time base.\\n\"); } avpriv_set_pts_info(st, 64, 185, vid->sample_rate); st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = AV_CODEC_ID_BETHSOFTVID; st->codec->width = vid->width; st->codec->height = vid->height; } st = s->streams[vid->video_index]; npixels = st->codec->width * st->codec->height; vidbuf_start = av_malloc(vidbuf_capacity = BUFFER_PADDING_SIZE); if(!vidbuf_start) return AVERROR(ENOMEM); // save the file position for the packet, include block type position = avio_tell(pb) - 1; vidbuf_start[vidbuf_nbytes++] = block_type; // get the current packet duration duration = vid->bethsoft_global_delay + avio_rl16(pb); // set the y offset if it exists (decoder header data should be in data section) if(block_type == VIDEO_YOFF_P_FRAME){ if (avio_read(pb, &vidbuf_start[vidbuf_nbytes], 2) != 2) { ret = AVERROR(EIO); goto fail; } vidbuf_nbytes += 2; } do{ vidbuf_start = av_fast_realloc(vidbuf_start, &vidbuf_capacity, vidbuf_nbytes + BUFFER_PADDING_SIZE); if(!vidbuf_start) return AVERROR(ENOMEM); code = avio_r8(pb); vidbuf_start[vidbuf_nbytes++] = code; if(code >= 0x80){ // rle sequence if(block_type == VIDEO_I_FRAME) vidbuf_start[vidbuf_nbytes++] = avio_r8(pb); } else if(code){ // plain sequence if (avio_read(pb, &vidbuf_start[vidbuf_nbytes], code) != code) { ret = AVERROR(EIO); goto fail; } vidbuf_nbytes += code; } bytes_copied += code & 0x7F; if(bytes_copied == npixels){ // sometimes no stop character is given, need to keep track of bytes copied // may contain a 0 byte even if read all pixels if(avio_r8(pb)) avio_seek(pb, -1, SEEK_CUR); break; } if (bytes_copied > npixels) { ret = AVERROR_INVALIDDATA; goto fail; } } while(code); // copy data into packet if ((ret = av_new_packet(pkt, vidbuf_nbytes)) < 0) goto fail; memcpy(pkt->data, vidbuf_start, vidbuf_nbytes); av_free(vidbuf_start); pkt->pos = position; pkt->stream_index = vid->video_index; pkt->duration = duration; if (block_type == VIDEO_I_FRAME) pkt->flags |= AV_PKT_FLAG_KEY; /* if there is a new palette available, add it to packet side data */ if (vid->palette) { uint8_t *pdata = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, BVID_PALETTE_SIZE); memcpy(pdata, vid->palette, BVID_PALETTE_SIZE); av_freep(&vid->palette); } vid->nframes--; // used to check if all the frames were read return 0; fail: av_free(vidbuf_start); return ret; }"} {"target": 1, "idx": 14027, "func": "static int stdio_pclose(void *opaque) { QEMUFileStdio *s = opaque; int ret; ret = pclose(s->stdio_file); if (ret == -1) { ret = -errno; } else if (!WIFEXITED(ret) || WEXITSTATUS(ret) != 0) { /* close succeeded, but non-zero exit code: */ ret = -EIO; /* fake errno value */ } g_free(s); return ret; }"} {"target": 1, "idx": 14030, "func": "static int nvdec_vp8_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size) { VP8Context *h = avctx->priv_data; NVDECContext *ctx = avctx->internal->hwaccel_priv_data; CUVIDPICPARAMS *pp = &ctx->pic_params; FrameDecodeData *fdd; NVDECFrame *cf; AVFrame *cur_frame = h->framep[VP56_FRAME_CURRENT]->tf.f; int ret; ret = ff_nvdec_start_frame(avctx, cur_frame); if (ret < 0) return ret; fdd = (FrameDecodeData*)cur_frame->private_ref->data; cf = (NVDECFrame*)fdd->hwaccel_priv; *pp = (CUVIDPICPARAMS) { .PicWidthInMbs = (cur_frame->width + 15) / 16, .FrameHeightInMbs = (cur_frame->height + 15) / 16, .CurrPicIdx = cf->idx, .CodecSpecific.vp8 = { .width = cur_frame->width, .height = cur_frame->height, .first_partition_size = h->header_partition_size, .LastRefIdx = safe_get_ref_idx(h->framep[VP56_FRAME_PREVIOUS]), .GoldenRefIdx = safe_get_ref_idx(h->framep[VP56_FRAME_GOLDEN]), .AltRefIdx = safe_get_ref_idx(h->framep[VP56_FRAME_GOLDEN2]), .frame_type = !h->keyframe, .version = h->profile, .show_frame = !h->invisible, .update_mb_segmentation_data = h->segmentation.enabled ? h->segmentation.update_feature_data : 0, } }; return 0; }"} {"target": 1, "idx": 14034, "func": "static void ahci_irq_lower(AHCIState *s, AHCIDevice *dev) { AHCIPCIState *d = container_of(s, AHCIPCIState, ahci); DPRINTF(0, \"lower irq\\n\"); if (!msi_enabled(PCI_DEVICE(d))) { qemu_irq_lower(s->irq); } }"} {"target": 1, "idx": 14044, "func": "static void adb_mouse_realizefn(DeviceState *dev, Error **errp) { MouseState *s = ADB_MOUSE(dev); ADBMouseClass *amc = ADB_MOUSE_GET_CLASS(dev); amc->parent_realize(dev, errp); qemu_add_mouse_event_handler(adb_mouse_event, s, 0, \"QEMU ADB Mouse\"); }"} {"target": 1, "idx": 14046, "func": "int msmpeg4_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) { int cbp, code, i; int pred, val; UINT8 *coded_val; /* special slice handling */ if (s->mb_x == 0) { if ((s->mb_y % s->slice_height) == 0) { int wrap; /* reset DC pred (set previous line to 1024) */ wrap = 2 * s->mb_width + 2; memsetw(&s->dc_val[0][(1) + (2 * s->mb_y) * wrap], 1024, 2 * s->mb_width); wrap = s->mb_width + 2; memsetw(&s->dc_val[1][(1) + (s->mb_y) * wrap], 1024, s->mb_width); memsetw(&s->dc_val[2][(1) + (s->mb_y) * wrap], 1024, s->mb_width); s->first_slice_line = 1; } else { s->first_slice_line = 0; } } if (s->pict_type == P_TYPE) { set_stat(ST_INTER_MB); if (s->use_skip_mb_code) { if (get_bits1(&s->gb)) { /* skip mb */ s->mb_intra = 0; for(i=0;i<6;i++) s->block_last_index[i] = -1; s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; s->mb_skiped = 1; return 0; } } code = get_vlc(&s->gb, &mb_non_intra_vlc); if (code < 0) return -1; if (code & 0x40) s->mb_intra = 0; else s->mb_intra = 1; cbp = code & 0x3f; } else { set_stat(ST_INTRA_MB); s->mb_intra = 1; code = get_vlc(&s->gb, &mb_intra_vlc); if (code < 0) return -1; /* predict coded block pattern */ cbp = 0; for(i=0;i<6;i++) { val = ((code >> (5 - i)) & 1); if (i < 4) { pred = coded_block_pred(s, i, &coded_val); val = val ^ pred; *coded_val = val; } cbp |= val << (5 - i); } } if (!s->mb_intra) { int mx, my; set_stat(ST_MV); h263_pred_motion(s, 0, &mx, &my); if (msmpeg4_decode_motion(s, &mx, &my) < 0) return -1; s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; s->mv[0][0][0] = mx; s->mv[0][0][1] = my; } else { set_stat(ST_INTRA_MB); s->ac_pred = get_bits1(&s->gb); } for (i = 0; i < 6; i++) { if (msmpeg4_decode_block(s, block[i], i, (cbp >> (5 - i)) & 1) < 0) return -1; } return 0; }"} {"target": 1, "idx": 14063, "func": "static int matroska_parse_block(MatroskaDemuxContext *matroska, uint8_t *data, int size, int64_t pos, uint64_t cluster_time, uint64_t duration, int is_keyframe, int64_t cluster_pos) { uint64_t timecode = AV_NOPTS_VALUE; MatroskaTrack *track; int res = 0; AVStream *st; AVPacket *pkt; int16_t block_time; uint32_t *lace_size = NULL; int n, flags, laces = 0; uint64_t num; if ((n = matroska_ebmlnum_uint(matroska, data, size, &num)) < 0) { av_log(matroska->ctx, AV_LOG_ERROR, \"EBML block data error\\n\"); return res; } data += n; size -= n; track = matroska_find_track_by_num(matroska, num); if (size <= 3 || !track || !track->stream) { av_log(matroska->ctx, AV_LOG_INFO, \"Invalid stream %\"PRIu64\" or size %u\\n\", num, size); return res; } st = track->stream; if (st->discard >= AVDISCARD_ALL) return res; if (duration == AV_NOPTS_VALUE) duration = track->default_duration / matroska->time_scale; block_time = AV_RB16(data); data += 2; flags = *data++; size -= 3; if (is_keyframe == -1) is_keyframe = flags & 0x80 ? PKT_FLAG_KEY : 0; if (cluster_time != (uint64_t)-1 && (block_time >= 0 || cluster_time >= -block_time)) { timecode = cluster_time + block_time; if (track->type == MATROSKA_TRACK_TYPE_SUBTITLE && timecode < track->end_timecode) is_keyframe = 0; /* overlapping subtitles are not key frame */ if (is_keyframe) av_add_index_entry(st, cluster_pos, timecode, 0,0,AVINDEX_KEYFRAME); track->end_timecode = FFMAX(track->end_timecode, timecode+duration); } if (matroska->skip_to_keyframe && track->type != MATROSKA_TRACK_TYPE_SUBTITLE) { if (!is_keyframe || timecode < matroska->skip_to_timecode) return res; matroska->skip_to_keyframe = 0; } switch ((flags & 0x06) >> 1) { case 0x0: /* no lacing */ laces = 1; lace_size = av_mallocz(sizeof(int)); lace_size[0] = size; break; case 0x1: /* Xiph lacing */ case 0x2: /* fixed-size lacing */ case 0x3: /* EBML lacing */ assert(size>0); // size <=3 is checked before size-=3 above laces = (*data) + 1; data += 1; size -= 1; lace_size = av_mallocz(laces * sizeof(int)); switch ((flags & 0x06) >> 1) { case 0x1: /* Xiph lacing */ { uint8_t temp; uint32_t total = 0; for (n = 0; res == 0 && n < laces - 1; n++) { while (1) { if (size == 0) { res = -1; break; } temp = *data; lace_size[n] += temp; data += 1; size -= 1; if (temp != 0xff) break; } total += lace_size[n]; } lace_size[n] = size - total; break; } case 0x2: /* fixed-size lacing */ for (n = 0; n < laces; n++) lace_size[n] = size / laces; break; case 0x3: /* EBML lacing */ { uint32_t total; n = matroska_ebmlnum_uint(matroska, data, size, &num); if (n < 0) { av_log(matroska->ctx, AV_LOG_INFO, \"EBML block data error\\n\"); break; } data += n; size -= n; total = lace_size[0] = num; for (n = 1; res == 0 && n < laces - 1; n++) { int64_t snum; int r; r = matroska_ebmlnum_sint(matroska, data, size, &snum); if (r < 0) { av_log(matroska->ctx, AV_LOG_INFO, \"EBML block data error\\n\"); break; } data += r; size -= r; lace_size[n] = lace_size[n - 1] + snum; total += lace_size[n]; } lace_size[n] = size - total; break; } } break; } if (res == 0) { for (n = 0; n < laces; n++) { if (st->codec->codec_id == CODEC_ID_RA_288 || st->codec->codec_id == CODEC_ID_COOK || st->codec->codec_id == CODEC_ID_ATRAC3) { int a = st->codec->block_align; int sps = track->audio.sub_packet_size; int cfs = track->audio.coded_framesize; int h = track->audio.sub_packet_h; int y = track->audio.sub_packet_cnt; int w = track->audio.frame_size; int x; if (!track->audio.pkt_cnt) { if (st->codec->codec_id == CODEC_ID_RA_288) for (x=0; xaudio.buf+x*2*w+y*cfs, data+x*cfs, cfs); else for (x=0; xaudio.buf+sps*(h*x+((h+1)/2)*(y&1)+(y>>1)), data+x*sps, sps); if (++track->audio.sub_packet_cnt >= h) { track->audio.sub_packet_cnt = 0; track->audio.pkt_cnt = h*w / a; } } while (track->audio.pkt_cnt) { pkt = av_mallocz(sizeof(AVPacket)); av_new_packet(pkt, a); memcpy(pkt->data, track->audio.buf + a * (h*w / a - track->audio.pkt_cnt--), a); pkt->pos = pos; pkt->stream_index = st->index; dynarray_add(&matroska->packets,&matroska->num_packets,pkt); } } else { MatroskaTrackEncoding *encodings = track->encodings.elem; int offset = 0, pkt_size = lace_size[n]; uint8_t *pkt_data = data; if (encodings && encodings->scope & 1) { offset = matroska_decode_buffer(&pkt_data,&pkt_size, track); if (offset < 0) continue; } pkt = av_mallocz(sizeof(AVPacket)); /* XXX: prevent data copy... */ if (av_new_packet(pkt, pkt_size+offset) < 0) { av_free(pkt); res = AVERROR(ENOMEM); break; } if (offset) memcpy (pkt->data, encodings->compression.settings.data, offset); memcpy (pkt->data+offset, pkt_data, pkt_size); if (pkt_data != data) av_free(pkt_data); if (n == 0) pkt->flags = is_keyframe; pkt->stream_index = st->index; pkt->pts = timecode; pkt->pos = pos; if (st->codec->codec_id == CODEC_ID_TEXT) pkt->convergence_duration = duration; else if (track->type != MATROSKA_TRACK_TYPE_SUBTITLE) pkt->duration = duration; if (st->codec->codec_id == CODEC_ID_SSA) matroska_fix_ass_packet(matroska, pkt, duration); if (matroska->prev_pkt && timecode != AV_NOPTS_VALUE && matroska->prev_pkt->pts == timecode && matroska->prev_pkt->stream_index == st->index) matroska_merge_packets(matroska->prev_pkt, pkt); else { dynarray_add(&matroska->packets,&matroska->num_packets,pkt); matroska->prev_pkt = pkt; } } if (timecode != AV_NOPTS_VALUE) timecode = duration ? timecode + duration : AV_NOPTS_VALUE; data += lace_size[n]; } } av_free(lace_size); return res; }"} {"target": 0, "idx": 14088, "func": "static int coroutine_fn backup_do_cow(BlockDriverState *bs, int64_t sector_num, int nb_sectors, bool *error_is_read, bool is_write_notifier) { BackupBlockJob *job = (BackupBlockJob *)bs->job; CowRequest cow_request; struct iovec iov; QEMUIOVector bounce_qiov; void *bounce_buffer = NULL; int ret = 0; int64_t sectors_per_cluster = cluster_size_sectors(job); int64_t start, end; int n; qemu_co_rwlock_rdlock(&job->flush_rwlock); start = sector_num / sectors_per_cluster; end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster); trace_backup_do_cow_enter(job, start, sector_num, nb_sectors); wait_for_overlapping_requests(job, start, end); cow_request_begin(&cow_request, job, start, end); for (; start < end; start++) { if (hbitmap_get(job->bitmap, start)) { trace_backup_do_cow_skip(job, start); continue; /* already copied */ } trace_backup_do_cow_process(job, start); n = MIN(sectors_per_cluster, job->common.len / BDRV_SECTOR_SIZE - start * sectors_per_cluster); if (!bounce_buffer) { bounce_buffer = qemu_blockalign(bs, job->cluster_size); } iov.iov_base = bounce_buffer; iov.iov_len = n * BDRV_SECTOR_SIZE; qemu_iovec_init_external(&bounce_qiov, &iov, 1); if (is_write_notifier) { ret = bdrv_co_readv_no_serialising(bs, start * sectors_per_cluster, n, &bounce_qiov); } else { ret = bdrv_co_readv(bs, start * sectors_per_cluster, n, &bounce_qiov); } if (ret < 0) { trace_backup_do_cow_read_fail(job, start, ret); if (error_is_read) { *error_is_read = true; } goto out; } if (buffer_is_zero(iov.iov_base, iov.iov_len)) { ret = bdrv_co_write_zeroes(job->target, start * sectors_per_cluster, n, BDRV_REQ_MAY_UNMAP); } else { ret = bdrv_co_writev(job->target, start * sectors_per_cluster, n, &bounce_qiov); } if (ret < 0) { trace_backup_do_cow_write_fail(job, start, ret); if (error_is_read) { *error_is_read = false; } goto out; } hbitmap_set(job->bitmap, start, 1); /* Publish progress, guest I/O counts as progress too. Note that the * offset field is an opaque progress value, it is not a disk offset. */ job->sectors_read += n; job->common.offset += n * BDRV_SECTOR_SIZE; } out: if (bounce_buffer) { qemu_vfree(bounce_buffer); } cow_request_end(&cow_request); trace_backup_do_cow_return(job, sector_num, nb_sectors, ret); qemu_co_rwlock_unlock(&job->flush_rwlock); return ret; }"} {"target": 1, "idx": 14099, "func": "static void gen_dcbz(DisasContext *ctx) { TCGv tcgv_addr; TCGv_i32 tcgv_is_dcbzl; int is_dcbzl = ctx->opcode & 0x00200000 ? 1 : 0; gen_set_access_type(ctx, ACCESS_CACHE); tcgv_addr = tcg_temp_new(); tcgv_is_dcbzl = tcg_const_i32(is_dcbzl); gen_addr_reg_index(ctx, tcgv_addr); gen_helper_dcbz(cpu_env, tcgv_addr, tcgv_is_dcbzl); tcg_temp_free(tcgv_addr); tcg_temp_free_i32(tcgv_is_dcbzl); }"} {"target": 1, "idx": 14102, "func": "static void rtsp_cmd_pause(HTTPContext *c, const char *url, RTSPHeader *h) { HTTPContext *rtp_c; rtp_c = find_rtp_session_with_url(url, h->session_id); if (!rtp_c) { rtsp_reply_error(c, RTSP_STATUS_SESSION); return; } if (rtp_c->state != HTTPSTATE_SEND_DATA && rtp_c->state != HTTPSTATE_WAIT_FEED) { rtsp_reply_error(c, RTSP_STATUS_STATE); return; } rtp_c->state = HTTPSTATE_READY; /* now everything is OK, so we can send the connection parameters */ rtsp_reply_header(c, RTSP_STATUS_OK); /* session ID */ url_fprintf(c->pb, \"Session: %s\\r\\n\", rtp_c->session_id); url_fprintf(c->pb, \"\\r\\n\"); }"} {"target": 0, "idx": 14118, "func": "static void spapr_vio_quiesce_one(VIOsPAPRDevice *dev) { if (dev->tcet) { spapr_tce_reset(dev->tcet); } free_crq(dev); }"} {"target": 0, "idx": 14121, "func": "PXA2xxMMCIState *pxa2xx_mmci_init(MemoryRegion *sysmem, hwaddr base, BlockDriverState *bd, qemu_irq irq, qemu_irq rx_dma, qemu_irq tx_dma) { PXA2xxMMCIState *s; s = (PXA2xxMMCIState *) g_malloc0(sizeof(PXA2xxMMCIState)); s->irq = irq; s->rx_dma = rx_dma; s->tx_dma = tx_dma; memory_region_init_io(&s->iomem, NULL, &pxa2xx_mmci_ops, s, \"pxa2xx-mmci\", 0x00100000); memory_region_add_subregion(sysmem, base, &s->iomem); /* Instantiate the actual storage */ s->card = sd_init(bd, false); if (s->card == NULL) { exit(1); } register_savevm(NULL, \"pxa2xx_mmci\", 0, 0, pxa2xx_mmci_save, pxa2xx_mmci_load, s); return s; }"} {"target": 0, "idx": 14130, "func": "int64_t timerlist_deadline_ns(QEMUTimerList *timer_list) { int64_t delta; if (!timer_list->clock->enabled || !timer_list->active_timers) { return -1; } delta = timer_list->active_timers->expire_time - qemu_clock_get_ns(timer_list->clock->type); if (delta <= 0) { return 0; } return delta; }"} {"target": 0, "idx": 14132, "func": "void *qemu_mallocz(size_t size) { void *ptr; ptr = qemu_malloc(size); if (!ptr) return NULL; memset(ptr, 0, size); return ptr; }"} {"target": 0, "idx": 14133, "func": "sosendoob(struct socket *so) { struct sbuf *sb = &so->so_rcv; char buff[2048]; /* XXX Shouldn't be sending more oob data than this */ int n, len; DEBUG_CALL(\"sosendoob\"); DEBUG_ARG(\"so = %p\", so); DEBUG_ARG(\"sb->sb_cc = %d\", sb->sb_cc); if (so->so_urgc > 2048) so->so_urgc = 2048; /* XXXX */ if (sb->sb_rptr < sb->sb_wptr) { /* We can send it directly */ n = slirp_send(so, sb->sb_rptr, so->so_urgc, (MSG_OOB)); /* |MSG_DONTWAIT)); */ so->so_urgc -= n; DEBUG_MISC((dfd, \" --- sent %d bytes urgent data, %d urgent bytes left\\n\", n, so->so_urgc)); } else { /* * Since there's no sendv or sendtov like writev, * we must copy all data to a linear buffer then * send it all */ len = (sb->sb_data + sb->sb_datalen) - sb->sb_rptr; if (len > so->so_urgc) len = so->so_urgc; memcpy(buff, sb->sb_rptr, len); so->so_urgc -= len; if (so->so_urgc) { n = sb->sb_wptr - sb->sb_data; if (n > so->so_urgc) n = so->so_urgc; memcpy((buff + len), sb->sb_data, n); so->so_urgc -= n; len += n; } n = slirp_send(so, buff, len, (MSG_OOB)); /* |MSG_DONTWAIT)); */ #ifdef DEBUG if (n != len) DEBUG_ERROR((dfd, \"Didn't send all data urgently XXXXX\\n\")); #endif DEBUG_MISC((dfd, \" ---2 sent %d bytes urgent data, %d urgent bytes left\\n\", n, so->so_urgc)); } sb->sb_cc -= n; sb->sb_rptr += n; if (sb->sb_rptr >= (sb->sb_data + sb->sb_datalen)) sb->sb_rptr -= sb->sb_datalen; return n; }"} {"target": 0, "idx": 14136, "func": "AVParserState *ff_store_parser_state(AVFormatContext *s) { int i; AVStream *st; AVParserStreamState *ss; AVParserState *state = av_malloc(sizeof(AVParserState)); if (!state) return NULL; state->stream_states = av_malloc(sizeof(AVParserStreamState) * s->nb_streams); if (!state->stream_states) { av_free(state); return NULL; } state->fpos = avio_tell(s->pb); // copy context structures state->cur_st = s->cur_st; state->packet_buffer = s->packet_buffer; state->raw_packet_buffer = s->raw_packet_buffer; state->raw_packet_buffer_remaining_size = s->raw_packet_buffer_remaining_size; s->cur_st = NULL; s->packet_buffer = NULL; s->raw_packet_buffer = NULL; s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE; // copy stream structures state->nb_streams = s->nb_streams; for (i = 0; i < s->nb_streams; i++) { st = s->streams[i]; ss = &state->stream_states[i]; ss->parser = st->parser; ss->last_IP_pts = st->last_IP_pts; ss->cur_dts = st->cur_dts; ss->reference_dts = st->reference_dts; ss->cur_ptr = st->cur_ptr; ss->cur_len = st->cur_len; ss->probe_packets = st->probe_packets; ss->cur_pkt = st->cur_pkt; st->parser = NULL; st->last_IP_pts = AV_NOPTS_VALUE; st->cur_dts = AV_NOPTS_VALUE; st->reference_dts = AV_NOPTS_VALUE; st->cur_ptr = NULL; st->cur_len = 0; st->probe_packets = MAX_PROBE_PACKETS; av_init_packet(&st->cur_pkt); } return state; }"} {"target": 1, "idx": 14146, "func": "void ff_mpegts_parse_close(MpegTSContext *ts) { int i; for(i=0;ipids[i]); av_free(ts); }"} {"target": 1, "idx": 14147, "func": "static void gen_tlbiel(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); #else if (unlikely(ctx->pr)) { gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; } gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]); #endif }"} {"target": 0, "idx": 14172, "func": "static int config_input(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; PadContext *s = ctx->priv; const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format); uint8_t rgba_color[4]; int ret, is_packed_rgba; double var_values[VARS_NB], res; char *expr; s->hsub = pix_desc->log2_chroma_w; s->vsub = pix_desc->log2_chroma_h; var_values[VAR_PI] = M_PI; var_values[VAR_PHI] = M_PHI; var_values[VAR_E] = M_E; var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w; var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h; var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN; var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN; var_values[VAR_A] = (double) inlink->w / inlink->h; var_values[VAR_HSUB] = 1<hsub; var_values[VAR_VSUB] = 1<vsub; /* evaluate width and height */ if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr), var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto eval_fail; s->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res; if ((ret = av_expr_parse_and_eval(&res, (expr = s->h_expr), var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto eval_fail; s->h = var_values[VAR_OUT_H] = var_values[VAR_OH] = res; /* evaluate the width again, as it may depend on the evaluated output height */ if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr), var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto eval_fail; s->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res; /* evaluate x and y */ if ((ret = av_expr_parse_and_eval(&res, (expr = s->x_expr), var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto eval_fail; s->x = var_values[VAR_X] = res; if ((ret = av_expr_parse_and_eval(&res, (expr = s->y_expr), var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto eval_fail; s->y = var_values[VAR_Y] = res; /* evaluate x again, as it may depend on the evaluated y value */ if ((ret = av_expr_parse_and_eval(&res, (expr = s->x_expr), var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto eval_fail; s->x = var_values[VAR_X] = res; /* sanity check params */ if (s->w < 0 || s->h < 0 || s->x < 0 || s->y < 0) { av_log(ctx, AV_LOG_ERROR, \"Negative values are not acceptable.\\n\"); return AVERROR(EINVAL); } if (!s->w) s->w = inlink->w; if (!s->h) s->h = inlink->h; s->w &= ~((1 << s->hsub) - 1); s->h &= ~((1 << s->vsub) - 1); s->x &= ~((1 << s->hsub) - 1); s->y &= ~((1 << s->vsub) - 1); s->in_w = inlink->w & ~((1 << s->hsub) - 1); s->in_h = inlink->h & ~((1 << s->vsub) - 1); memcpy(rgba_color, s->color, sizeof(rgba_color)); ff_fill_line_with_color(s->line, s->line_step, s->w, s->color, inlink->format, rgba_color, &is_packed_rgba, NULL); av_log(ctx, AV_LOG_VERBOSE, \"w:%d h:%d -> w:%d h:%d x:%d y:%d color:0x%02X%02X%02X%02X[%s]\\n\", inlink->w, inlink->h, s->w, s->h, s->x, s->y, s->color[0], s->color[1], s->color[2], s->color[3], is_packed_rgba ? \"rgba\" : \"yuva\"); if (s->x < 0 || s->y < 0 || s->w <= 0 || s->h <= 0 || (unsigned)s->x + (unsigned)inlink->w > s->w || (unsigned)s->y + (unsigned)inlink->h > s->h) { av_log(ctx, AV_LOG_ERROR, \"Input area %d:%d:%d:%d not within the padded area 0:0:%d:%d or zero-sized\\n\", s->x, s->y, s->x + inlink->w, s->y + inlink->h, s->w, s->h); return AVERROR(EINVAL); } return 0; eval_fail: av_log(NULL, AV_LOG_ERROR, \"Error when evaluating the expression '%s'\\n\", expr); return ret; }"} {"target": 1, "idx": 14176, "func": "static void local_mapped_file_attr(FsContext *ctx, const char *path, struct stat *stbuf) { FILE *fp; char buf[ATTR_MAX]; char *attr_path; attr_path = local_mapped_attr_path(ctx, path); fp = local_fopen(attr_path, \"r\"); g_free(attr_path); if (!fp) { return; } memset(buf, 0, ATTR_MAX); while (fgets(buf, ATTR_MAX, fp)) { if (!strncmp(buf, \"virtfs.uid\", 10)) { stbuf->st_uid = atoi(buf+11); } else if (!strncmp(buf, \"virtfs.gid\", 10)) { stbuf->st_gid = atoi(buf+11); } else if (!strncmp(buf, \"virtfs.mode\", 11)) { stbuf->st_mode = atoi(buf+12); } else if (!strncmp(buf, \"virtfs.rdev\", 11)) { stbuf->st_rdev = atoi(buf+12); } memset(buf, 0, ATTR_MAX); } fclose(fp); }"} {"target": 1, "idx": 14180, "func": "AVRational av_guess_frame_rate(AVFormatContext *format, AVStream *st, AVFrame *frame) { AVRational fr = st->r_frame_rate; AVRational codec_fr = st->codec->framerate; AVRational avg_fr = st->avg_frame_rate; if (avg_fr.num > 0 && avg_fr.den > 0 && fr.num > 0 && fr.den > 0 && av_q2d(avg_fr) < 70 && av_q2d(fr) > 210) { fr = avg_fr; } if (st->codec->ticks_per_frame > 1) { if ( codec_fr.num > 0 && codec_fr.den > 0 && av_q2d(codec_fr) < av_q2d(fr)*0.7 && fabs(1.0 - av_q2d(av_div_q(avg_fr, fr))) > 0.1) fr = codec_fr; } return fr; }"} {"target": 1, "idx": 14192, "func": "static void apply_mid_side_stereo(ChannelElement *cpe) { int w, w2, g, i; IndividualChannelStream *ics = &cpe->ch[0].ics; if (!cpe->common_window) return; for (w = 0; w < ics->num_windows; w += ics->group_len[w]) { for (w2 = 0; w2 < ics->group_len[w]; w2++) { int start = (w+w2) * 128; for (g = 0; g < ics->num_swb; g++) { if (!cpe->ms_mask[w*16 + g]) { start += ics->swb_sizes[g]; continue; } for (i = 0; i < ics->swb_sizes[g]; i++) { float L = (cpe->ch[0].coeffs[start+i] + cpe->ch[1].coeffs[start+i]) * 0.5f; float R = L - cpe->ch[1].coeffs[start+i]; cpe->ch[0].coeffs[start+i] = L; cpe->ch[1].coeffs[start+i] = R; } start += ics->swb_sizes[g]; } } } }"} {"target": 0, "idx": 14194, "func": "static int ea_read_header(AVFormatContext *s, AVFormatParameters *ap) { EaDemuxContext *ea = s->priv_data; AVStream *st; if (!process_ea_header(s)) return AVERROR(EIO); if (ea->time_base.num && ea->time_base.den) { /* initialize the video decoder stream */ st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); ea->video_stream_index = st->index; st->codec->codec_type = CODEC_TYPE_VIDEO; st->codec->codec_id = ea->video_codec; st->codec->codec_tag = 0; /* no fourcc */ st->codec->time_base = ea->time_base; } if (ea->audio_codec) { /* initialize the audio decoder stream */ st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); av_set_pts_info(st, 33, 1, ea->sample_rate); st->codec->codec_type = CODEC_TYPE_AUDIO; st->codec->codec_id = ea->audio_codec; st->codec->codec_tag = 0; /* no tag */ st->codec->channels = ea->num_channels; st->codec->sample_rate = ea->sample_rate; st->codec->bits_per_sample = ea->bytes * 8; st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_sample / 4; st->codec->block_align = st->codec->channels*st->codec->bits_per_sample; ea->audio_stream_index = st->index; ea->audio_frame_counter = 0; } return 1; }"} {"target": 0, "idx": 14214, "func": "static int ppc_hash32_get_bat(CPUPPCState *env, struct mmu_ctx_hash32 *ctx, target_ulong virtual, int rwx) { target_ulong *BATlt, *BATut; target_ulong BEPIl, BEPIu, bl; int i, valid, prot; int ret = -1; LOG_BATS(\"%s: %cBAT v \" TARGET_FMT_lx \"\\n\", __func__, rwx == 2 ? 'I' : 'D', virtual); if (rwx == 2) { BATlt = env->IBAT[1]; BATut = env->IBAT[0]; } else { BATlt = env->DBAT[1]; BATut = env->DBAT[0]; } for (i = 0; i < env->nb_BATs; i++) { target_ulong batu = BATut[i]; target_ulong batl = BATlt[i]; BEPIu = batu & BATU32_BEPIU; BEPIl = batu & BATU32_BEPIL; if (unlikely(env->mmu_model == POWERPC_MMU_601)) { hash32_bat_601_size(env, &bl, &valid, batu, batl); prot = hash32_bat_601_prot(env, batu, batl); } else { hash32_bat_size(env, &bl, &valid, batu, batl); prot = hash32_bat_prot(env, batu, batl); } LOG_BATS(\"%s: %cBAT%d v \" TARGET_FMT_lx \" BATu \" TARGET_FMT_lx \" BATl \" TARGET_FMT_lx \"\\n\", __func__, type == ACCESS_CODE ? 'I' : 'D', i, virtual, batu, batl); if ((virtual & BATU32_BEPIU) == BEPIu && ((virtual & BATU32_BEPIL) & ~bl) == BEPIl) { /* BAT matches */ if (valid != 0) { /* Get physical address */ ctx->raddr = (batl & BATL32_BRPNU) | ((virtual & BATU32_BEPIL & bl) | (batl & BATL32_BRPNL)) | (virtual & 0x0001F000); /* Compute access rights */ ctx->prot = prot; ret = ppc_hash32_check_prot(ctx->prot, rwx); if (ret == 0) { LOG_BATS(\"BAT %d match: r \" TARGET_FMT_plx \" prot=%c%c\\n\", i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-', ctx->prot & PAGE_WRITE ? 'W' : '-'); } break; } } } if (ret < 0) { #if defined(DEBUG_BATS) if (qemu_log_enabled()) { LOG_BATS(\"no BAT match for \" TARGET_FMT_lx \":\\n\", virtual); for (i = 0; i < 4; i++) { BATu = &BATut[i]; BATl = &BATlt[i]; BEPIu = *BATu & BATU32_BEPIU; BEPIl = *BATu & BATU32_BEPIL; bl = (*BATu & 0x00001FFC) << 15; LOG_BATS(\"%s: %cBAT%d v \" TARGET_FMT_lx \" BATu \" TARGET_FMT_lx \" BATl \" TARGET_FMT_lx \"\\n\\t\" TARGET_FMT_lx \" \" TARGET_FMT_lx \" \" TARGET_FMT_lx \"\\n\", __func__, type == ACCESS_CODE ? 'I' : 'D', i, virtual, *BATu, *BATl, BEPIu, BEPIl, bl); } } #endif } /* No hit */ return ret; }"} {"target": 0, "idx": 14231, "func": "static QOSState *qvirtio_scsi_start(const char *extra_opts) { const char *cmd = \"-drive id=drv0,if=none,file=/dev/null,format=raw \" \"-device virtio-scsi-pci,id=vs0 \" \"-device scsi-hd,bus=vs0.0,drive=drv0 %s\"; return qtest_pc_boot(cmd, extra_opts ? : \"\"); }"} {"target": 0, "idx": 14236, "func": "int fread_targphys_ok(target_phys_addr_t dst_addr, size_t nbytes, FILE *f) { return fread_targphys(dst_addr, nbytes, f) == nbytes; }"} {"target": 0, "idx": 14240, "func": "static int set_format(void *obj, const char *name, int fmt, int search_flags, enum AVOptionType type, const char *desc, int max) { void *target_obj; const AVOption *o = av_opt_find2(obj, name, NULL, 0, search_flags, &target_obj); if (!o || !target_obj) return AVERROR_OPTION_NOT_FOUND; if (o->type != type) { av_log(obj, AV_LOG_ERROR, \"The value set by option '%s' is not a %s format\", name, desc); return AVERROR(EINVAL); } if (fmt < -1 || fmt > max) { av_log(obj, AV_LOG_ERROR, \"Value %d for parameter '%s' out of %s format range [-1 - %d]\\n\", fmt, name, desc, max); return AVERROR(ERANGE); } *(int *)(((uint8_t *)target_obj) + o->offset) = fmt; return 0; }"} {"target": 1, "idx": 14244, "func": "static CharDriverState *qemu_chr_open_stdio(const char *id, ChardevBackend *backend, ChardevReturn *ret, Error **errp) { ChardevStdio *opts = backend->u.stdio.data; CharDriverState *chr; struct sigaction act; ChardevCommon *common = qapi_ChardevStdio_base(opts); if (is_daemonized()) { error_setg(errp, \"cannot use stdio with -daemonize\"); if (stdio_in_use) { error_setg(errp, \"cannot use stdio by multiple character devices\"); stdio_in_use = true; old_fd0_flags = fcntl(0, F_GETFL); tcgetattr(0, &oldtty); qemu_set_nonblock(0); atexit(term_exit); memset(&act, 0, sizeof(act)); act.sa_handler = term_stdio_handler; sigaction(SIGCONT, &act, NULL); chr = qemu_chr_open_fd(0, 1, common, errp); chr->chr_close = qemu_chr_close_stdio; chr->chr_set_echo = qemu_chr_set_echo_stdio; if (opts->has_signal) { stdio_allow_signal = opts->signal; qemu_chr_fe_set_echo(chr, false); return chr;"} {"target": 1, "idx": 14254, "func": "static int ape_read_packet(AVFormatContext * s, AVPacket * pkt) { int ret; int nblocks; APEContext *ape = s->priv_data; uint32_t extra_size = 8; if (url_feof(s->pb)) return AVERROR_EOF; if (ape->currentframe >= ape->totalframes) return AVERROR_EOF; if (avio_seek(s->pb, ape->frames[ape->currentframe].pos, SEEK_SET) < 0) return AVERROR(EIO); /* Calculate how many blocks there are in this frame */ if (ape->currentframe == (ape->totalframes - 1)) nblocks = ape->finalframeblocks; else nblocks = ape->blocksperframe; if (ape->frames[ape->currentframe].size <= 0 || ape->frames[ape->currentframe].size > INT_MAX - extra_size) { av_log(s, AV_LOG_ERROR, \"invalid packet size: %d\\n\", ape->frames[ape->currentframe].size); ape->currentframe++; return AVERROR(EIO); } if (av_new_packet(pkt, ape->frames[ape->currentframe].size + extra_size) < 0) return AVERROR(ENOMEM); AV_WL32(pkt->data , nblocks); AV_WL32(pkt->data + 4, ape->frames[ape->currentframe].skip); ret = avio_read(s->pb, pkt->data + extra_size, ape->frames[ape->currentframe].size); pkt->pts = ape->frames[ape->currentframe].pts; pkt->stream_index = 0; /* note: we need to modify the packet size here to handle the last packet */ pkt->size = ret + extra_size; ape->currentframe++; return 0; }"} {"target": 0, "idx": 14267, "func": "static GSList *nvdimm_get_plugged_device_list(void) { GSList *list = NULL; object_child_foreach(qdev_get_machine(), nvdimm_plugged_device_list, &list); return list; }"} {"target": 0, "idx": 14271, "func": "void ide_dma_cancel(BMDMAState *bm) { if (bm->status & BM_STATUS_DMAING) { bm->status &= ~BM_STATUS_DMAING; /* cancel DMA request */ bm->unit = -1; bm->dma_cb = NULL; if (bm->aiocb) { #ifdef DEBUG_AIO printf(\"aio_cancel\\n\"); #endif bdrv_aio_cancel(bm->aiocb); bm->aiocb = NULL; } } }"} {"target": 0, "idx": 14272, "func": "uint64_t helper_fre(CPUPPCState *env, uint64_t arg) { CPU_DoubleU farg; farg.ll = arg; if (unlikely(float64_is_signaling_nan(farg.d))) { /* sNaN reciprocal */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); } farg.d = float64_div(float64_one, farg.d, &env->fp_status); return farg.d; }"} {"target": 1, "idx": 14304, "func": "PCIBus *i440fx_init(PCII440FXState **pi440fx_state, int *piix3_devfn, ISABus **isa_bus, qemu_irq *pic, MemoryRegion *address_space_mem, MemoryRegion *address_space_io, ram_addr_t ram_size, hwaddr pci_hole_start, hwaddr pci_hole_size, hwaddr pci_hole64_start, hwaddr pci_hole64_size, MemoryRegion *pci_address_space, MemoryRegion *ram_memory) { DeviceState *dev; PCIBus *b; PCIDevice *d; PCIHostState *s; PIIX3State *piix3; PCII440FXState *f; unsigned i; dev = qdev_create(NULL, TYPE_I440FX_PCI_HOST_BRIDGE); s = PCI_HOST_BRIDGE(dev); b = pci_bus_new(dev, NULL, pci_address_space, address_space_io, 0, TYPE_PCI_BUS); s->bus = b; object_property_add_child(qdev_get_machine(), \"i440fx\", OBJECT(dev), NULL); qdev_init_nofail(dev); d = pci_create_simple(b, 0, TYPE_I440FX_PCI_DEVICE); *pi440fx_state = I440FX_PCI_DEVICE(d); f = *pi440fx_state; f->system_memory = address_space_mem; f->pci_address_space = pci_address_space; f->ram_memory = ram_memory; memory_region_init_alias(&f->pci_hole, OBJECT(d), \"pci-hole\", f->pci_address_space, pci_hole_start, pci_hole_size); memory_region_add_subregion(f->system_memory, pci_hole_start, &f->pci_hole); memory_region_init_alias(&f->pci_hole_64bit, OBJECT(d), \"pci-hole64\", f->pci_address_space, pci_hole64_start, pci_hole64_size); if (pci_hole64_size) { memory_region_add_subregion(f->system_memory, pci_hole64_start, &f->pci_hole_64bit); } memory_region_init_alias(&f->smram_region, OBJECT(d), \"smram-region\", f->pci_address_space, 0xa0000, 0x20000); memory_region_add_subregion_overlap(f->system_memory, 0xa0000, &f->smram_region, 1); memory_region_set_enabled(&f->smram_region, false); init_pam(dev, f->ram_memory, f->system_memory, f->pci_address_space, &f->pam_regions[0], PAM_BIOS_BASE, PAM_BIOS_SIZE); for (i = 0; i < 12; ++i) { init_pam(dev, f->ram_memory, f->system_memory, f->pci_address_space, &f->pam_regions[i+1], PAM_EXPAN_BASE + i * PAM_EXPAN_SIZE, PAM_EXPAN_SIZE); } /* Xen supports additional interrupt routes from the PCI devices to * the IOAPIC: the four pins of each PCI device on the bus are also * connected to the IOAPIC directly. * These additional routes can be discovered through ACPI. */ if (xen_enabled()) { piix3 = DO_UPCAST(PIIX3State, dev, pci_create_simple_multifunction(b, -1, true, \"PIIX3-xen\")); pci_bus_irqs(b, xen_piix3_set_irq, xen_pci_slot_get_pirq, piix3, XEN_PIIX_NUM_PIRQS); } else { piix3 = DO_UPCAST(PIIX3State, dev, pci_create_simple_multifunction(b, -1, true, \"PIIX3\")); pci_bus_irqs(b, piix3_set_irq, pci_slot_get_pirq, piix3, PIIX_NUM_PIRQS); pci_bus_set_route_irq_fn(b, piix3_route_intx_pin_to_irq); } piix3->pic = pic; *isa_bus = ISA_BUS(qdev_get_child_bus(DEVICE(piix3), \"isa.0\")); *piix3_devfn = piix3->dev.devfn; ram_size = ram_size / 8 / 1024 / 1024; if (ram_size > 255) { ram_size = 255; } d->config[0x57] = ram_size; i440fx_update_memory_mappings(f); return b; }"} {"target": 1, "idx": 14312, "func": "static int virtio_rng_device_exit(DeviceState *qdev) { VirtIORNG *vrng = VIRTIO_RNG(qdev); VirtIODevice *vdev = VIRTIO_DEVICE(qdev); timer_del(vrng->rate_limit_timer); timer_free(vrng->rate_limit_timer); unregister_savevm(qdev, \"virtio-rng\", vrng); virtio_cleanup(vdev); return 0; }"} {"target": 0, "idx": 14333, "func": "static int svq1_decode_frame_header(AVCodecContext *avctx, AVFrame *frame) { SVQ1Context *s = avctx->priv_data; GetBitContext *bitbuf = &s->gb; int frame_size_code; int width = s->width; int height = s->height; skip_bits(bitbuf, 8); /* temporal_reference */ /* frame type */ s->nonref = 0; switch (get_bits(bitbuf, 2)) { case 0: frame->pict_type = AV_PICTURE_TYPE_I; break; case 2: s->nonref = 1; case 1: frame->pict_type = AV_PICTURE_TYPE_P; break; default: av_log(avctx, AV_LOG_ERROR, \"Invalid frame type.\\n\"); return AVERROR_INVALIDDATA; } if (frame->pict_type == AV_PICTURE_TYPE_I) { /* unknown fields */ if (s->frame_code == 0x50 || s->frame_code == 0x60) { int csum = get_bits(bitbuf, 16); csum = ff_svq1_packet_checksum(bitbuf->buffer, bitbuf->size_in_bits >> 3, csum); av_dlog(avctx, \"%s checksum (%02x) for packet data\\n\", (csum == 0) ? \"correct\" : \"incorrect\", csum); } if ((s->frame_code ^ 0x10) >= 0x50) { uint8_t msg[256]; svq1_parse_string(bitbuf, msg); av_log(avctx, AV_LOG_INFO, \"embedded message:\\n%s\\n\", (char *)msg); } skip_bits(bitbuf, 2); skip_bits(bitbuf, 2); skip_bits1(bitbuf); /* load frame size */ frame_size_code = get_bits(bitbuf, 3); if (frame_size_code == 7) { /* load width, height (12 bits each) */ width = get_bits(bitbuf, 12); height = get_bits(bitbuf, 12); if (!width || !height) return AVERROR_INVALIDDATA; } else { /* get width, height from table */ width = ff_svq1_frame_size_table[frame_size_code][0]; height = ff_svq1_frame_size_table[frame_size_code][1]; } } /* unknown fields */ if (get_bits1(bitbuf)) { skip_bits1(bitbuf); /* use packet checksum if (1) */ skip_bits1(bitbuf); /* component checksums after image data if (1) */ if (get_bits(bitbuf, 2) != 0) return AVERROR_INVALIDDATA; } if (get_bits1(bitbuf)) { skip_bits1(bitbuf); skip_bits(bitbuf, 4); skip_bits1(bitbuf); skip_bits(bitbuf, 2); if (skip_1stop_8data_bits(bitbuf) < 0) return AVERROR_INVALIDDATA; } s->width = width; s->height = height; return 0; }"} {"target": 1, "idx": 14354, "func": "static struct pathelem *add_entry(struct pathelem *root, const char *name, unsigned type) { struct pathelem **e; root->num_entries++; root = realloc(root, sizeof(*root) + sizeof(root->entries[0])*root->num_entries); e = &root->entries[root->num_entries-1]; *e = new_entry(root->pathname, root, name); if (is_dir_maybe(type)) { *e = add_dir_maybe(*e); } return root; }"} {"target": 1, "idx": 14360, "func": "static void virtio_balloon_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); dc->exit = virtio_balloon_device_exit; dc->props = virtio_balloon_properties; set_bit(DEVICE_CATEGORY_MISC, dc->categories); vdc->init = virtio_balloon_device_init; vdc->get_config = virtio_balloon_get_config; vdc->set_config = virtio_balloon_set_config; vdc->get_features = virtio_balloon_get_features; }"} {"target": 1, "idx": 14367, "func": "static KeyValue *copy_key_value(KeyValue *src) { KeyValue *dst = g_new(KeyValue, 1); memcpy(dst, src, sizeof(*src)); return dst;"} {"target": 1, "idx": 14373, "func": "target_ulong helper_sub_suov(CPUTriCoreState *env, target_ulong r1, target_ulong r2) { int64_t t1 = extract64(r1, 0, 32); int64_t t2 = extract64(r2, 0, 32); int64_t result = t1 - t2; return suov32(env, result); }"} {"target": 0, "idx": 14386, "func": "static uint64_t icp_pit_read(void *opaque, target_phys_addr_t offset, unsigned size) { icp_pit_state *s = (icp_pit_state *)opaque; int n; /* ??? Don't know the PrimeCell ID for this device. */ n = offset >> 8; if (n > 2) { hw_error(\"%s: Bad timer %d\\n\", __func__, n); } return arm_timer_read(s->timer[n], offset & 0xff); }"} {"target": 0, "idx": 14390, "func": "static void v9fs_synth_direntry(V9fsSynthNode *node, struct dirent *entry, off_t off) { strcpy(entry->d_name, node->name); entry->d_ino = node->attr->inode; entry->d_off = off + 1; }"} {"target": 0, "idx": 14396, "func": "static void gen_nop_hint(DisasContext *s, int val) { switch (val) { case 1: /* yield */ if (!parallel_cpus) { gen_set_pc_im(s, s->pc); s->base.is_jmp = DISAS_YIELD; } break; case 3: /* wfi */ gen_set_pc_im(s, s->pc); s->base.is_jmp = DISAS_WFI; break; case 2: /* wfe */ if (!parallel_cpus) { gen_set_pc_im(s, s->pc); s->base.is_jmp = DISAS_WFE; } break; case 4: /* sev */ case 5: /* sevl */ /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */ default: /* nop */ break; } }"} {"target": 0, "idx": 14402, "func": "static void vnc_dpy_setdata(DisplayState *ds) { /* We don't have to do anything */ }"} {"target": 0, "idx": 14405, "func": "static int64_t ftp_seek(URLContext *h, int64_t pos, int whence) { FTPContext *s = h->priv_data; int err; int64_t new_pos, fake_pos; av_dlog(h, \"ftp protocol seek %\"PRId64\" %d\\n\", pos, whence); switch(whence) { case AVSEEK_SIZE: return s->filesize; case SEEK_SET: new_pos = pos; break; case SEEK_CUR: new_pos = s->position + pos; break; case SEEK_END: if (s->filesize < 0) return AVERROR(EIO); new_pos = s->filesize + pos; break; default: return AVERROR(EINVAL); } if (h->is_streamed) return AVERROR(EIO); if (new_pos < 0) { av_log(h, AV_LOG_ERROR, \"Seeking to nagative position.\\n\"); return AVERROR(EINVAL); } fake_pos = s->filesize != -1 ? FFMIN(new_pos, s->filesize) : new_pos; if (fake_pos != s->position) { if ((err = ftp_abort(h)) < 0) return err; s->position = fake_pos; } return new_pos; }"} {"target": 0, "idx": 14410, "func": "struct vhost_net *vhost_net_init(NetClientState *backend, int devfd, bool force) { int r; struct vhost_net *net = g_malloc(sizeof *net); if (!backend) { fprintf(stderr, \"vhost-net requires backend to be setup\\n\"); goto fail; } r = vhost_net_get_fd(backend); if (r < 0) { goto fail; } net->nc = backend; net->dev.backend_features = tap_has_vnet_hdr(backend) ? 0 : (1 << VHOST_NET_F_VIRTIO_NET_HDR); net->backend = r; net->dev.nvqs = 2; net->dev.vqs = net->vqs; r = vhost_dev_init(&net->dev, devfd, \"/dev/vhost-net\", force); if (r < 0) { goto fail; } if (!tap_has_vnet_hdr_len(backend, sizeof(struct virtio_net_hdr_mrg_rxbuf))) { net->dev.features &= ~(1 << VIRTIO_NET_F_MRG_RXBUF); } if (~net->dev.features & net->dev.backend_features) { fprintf(stderr, \"vhost lacks feature mask %\" PRIu64 \" for backend\\n\", (uint64_t)(~net->dev.features & net->dev.backend_features)); vhost_dev_cleanup(&net->dev); goto fail; } /* Set sane init value. Override when guest acks. */ vhost_net_ack_features(net, 0); return net; fail: g_free(net); return NULL; }"} {"target": 0, "idx": 14427, "func": "static int encode_superframe(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { WMACodecContext *s = avctx->priv_data; int i, total_gain, ret, error; s->block_len_bits= s->frame_len_bits; //required by non variable block len s->block_len = 1 << s->block_len_bits; apply_window_and_mdct(avctx, frame); if (s->ms_stereo) { float a, b; int i; for(i = 0; i < s->block_len; i++) { a = s->coefs[0][i]*0.5; b = s->coefs[1][i]*0.5; s->coefs[0][i] = a + b; s->coefs[1][i] = a - b; } } if ((ret = ff_alloc_packet2(avctx, avpkt, 2 * MAX_CODED_SUPERFRAME_SIZE))) return ret; total_gain= 128; for(i=64; i; i>>=1){ error = encode_frame(s, s->coefs, avpkt->data, avpkt->size, total_gain - i); if(error<=0) total_gain-= i; } while(total_gain <= 128 && error > 0) error = encode_frame(s, s->coefs, avpkt->data, avpkt->size, total_gain++); av_assert0((put_bits_count(&s->pb) & 7) == 0); i= avctx->block_align - (put_bits_count(&s->pb)+7)/8; av_assert0(i>=0); while(i--) put_bits(&s->pb, 8, 'N'); flush_put_bits(&s->pb); av_assert0(put_bits_ptr(&s->pb) - s->pb.buf == avctx->block_align); if (frame->pts != AV_NOPTS_VALUE) avpkt->pts = frame->pts - ff_samples_to_time_base(avctx, avctx->delay); avpkt->size = avctx->block_align; *got_packet_ptr = 1; return 0; }"} {"target": 0, "idx": 14429, "func": "static int qcow_create(const char *filename, int64_t total_size, const char *backing_file, int flags) { int fd, header_size, backing_filename_len, l1_size, i, shift, l2_bits; QCowHeader header; uint64_t tmp, offset; QCowCreateState s1, *s = &s1; memset(s, 0, sizeof(*s)); fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0644); if (fd < 0) return -1; memset(&header, 0, sizeof(header)); header.magic = cpu_to_be32(QCOW_MAGIC); header.version = cpu_to_be32(QCOW_VERSION); header.size = cpu_to_be64(total_size * 512); header_size = sizeof(header); backing_filename_len = 0; if (backing_file) { header.backing_file_offset = cpu_to_be64(header_size); backing_filename_len = strlen(backing_file); header.backing_file_size = cpu_to_be32(backing_filename_len); header_size += backing_filename_len; } s->cluster_bits = 12; /* 4 KB clusters */ s->cluster_size = 1 << s->cluster_bits; header.cluster_bits = cpu_to_be32(s->cluster_bits); header_size = (header_size + 7) & ~7; if (flags & BLOCK_FLAG_ENCRYPT) { header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES); } else { header.crypt_method = cpu_to_be32(QCOW_CRYPT_NONE); } l2_bits = s->cluster_bits - 3; shift = s->cluster_bits + l2_bits; l1_size = (((total_size * 512) + (1LL << shift) - 1) >> shift); offset = align_offset(header_size, s->cluster_size); s->l1_table_offset = offset; header.l1_table_offset = cpu_to_be64(s->l1_table_offset); header.l1_size = cpu_to_be32(l1_size); offset += align_offset(l1_size * sizeof(uint64_t), s->cluster_size); s->refcount_table = qemu_mallocz(s->cluster_size); s->refcount_block = qemu_mallocz(s->cluster_size); s->refcount_table_offset = offset; header.refcount_table_offset = cpu_to_be64(offset); header.refcount_table_clusters = cpu_to_be32(1); offset += s->cluster_size; s->refcount_table[0] = cpu_to_be64(offset); s->refcount_block_offset = offset; offset += s->cluster_size; /* update refcounts */ create_refcount_update(s, 0, header_size); create_refcount_update(s, s->l1_table_offset, l1_size * sizeof(uint64_t)); create_refcount_update(s, s->refcount_table_offset, s->cluster_size); create_refcount_update(s, s->refcount_block_offset, s->cluster_size); /* write all the data */ write(fd, &header, sizeof(header)); if (backing_file) { write(fd, backing_file, backing_filename_len); } lseek(fd, s->l1_table_offset, SEEK_SET); tmp = 0; for(i = 0;i < l1_size; i++) { write(fd, &tmp, sizeof(tmp)); } lseek(fd, s->refcount_table_offset, SEEK_SET); write(fd, s->refcount_table, s->cluster_size); lseek(fd, s->refcount_block_offset, SEEK_SET); write(fd, s->refcount_block, s->cluster_size); qemu_free(s->refcount_table); qemu_free(s->refcount_block); close(fd); return 0; }"} {"target": 1, "idx": 14445, "func": "static void scsi_read_data(SCSIRequest *req) { SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint32_t n; if (r->sector_count == (uint32_t)-1) { DPRINTF(\"Read buf_len=%zd\\n\", r->iov.iov_len); r->sector_count = 0; scsi_req_data(&r->req, r->iov.iov_len); return; } DPRINTF(\"Read sector_count=%d\\n\", r->sector_count); if (r->sector_count == 0) { scsi_command_complete(r, GOOD, NO_SENSE); return; } /* No data transfer may already be in progress */ assert(r->req.aiocb == NULL); n = r->sector_count; if (n > SCSI_DMA_BUF_SIZE / 512) n = SCSI_DMA_BUF_SIZE / 512; r->iov.iov_len = n * 512; qemu_iovec_init_external(&r->qiov, &r->iov, 1); r->req.aiocb = bdrv_aio_readv(s->bs, r->sector, &r->qiov, n, scsi_read_complete, r); if (r->req.aiocb == NULL) { scsi_read_complete(r, -EIO); } }"} {"target": 1, "idx": 14464, "func": "static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target, const char *replaces, int64_t speed, uint32_t granularity, int64_t buf_size, BlockdevOnError on_source_error, BlockdevOnError on_target_error, bool unmap, BlockCompletionFunc *cb, void *opaque, Error **errp, const BlockJobDriver *driver, bool is_none_mode, BlockDriverState *base) { MirrorBlockJob *s; BlockDriverState *replaced_bs; if (granularity == 0) { granularity = bdrv_get_default_bitmap_granularity(target); assert ((granularity & (granularity - 1)) == 0); if ((on_source_error == BLOCKDEV_ON_ERROR_STOP || on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) && (!bs->blk || !blk_iostatus_is_enabled(bs->blk))) { error_setg(errp, QERR_INVALID_PARAMETER, \"on-source-error\"); if (buf_size < 0) { error_setg(errp, \"Invalid parameter 'buf-size'\"); if (buf_size == 0) { buf_size = DEFAULT_MIRROR_BUF_SIZE; s = block_job_create(driver, bs, speed, cb, opaque, errp); if (!s) { s->replaces = g_strdup(replaces); s->on_source_error = on_source_error; s->on_target_error = on_target_error; s->target = target; s->is_none_mode = is_none_mode; s->base = base; s->granularity = granularity; s->buf_size = ROUND_UP(buf_size, granularity); s->unmap = unmap; s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); if (!s->dirty_bitmap) { g_free(s->replaces); block_job_unref(&s->common); bdrv_op_block_all(s->target, s->common.blocker); bdrv_set_enable_write_cache(s->target, true); if (s->target->blk) { blk_set_on_error(s->target->blk, on_target_error, on_target_error); blk_iostatus_enable(s->target->blk); s->common.co = qemu_coroutine_create(mirror_run); trace_mirror_start(bs, s, s->common.co, opaque); qemu_coroutine_enter(s->common.co, s);"} {"target": 0, "idx": 14472, "func": "static void qemu_kvm_start_vcpu(CPUState *env) { env->thread = g_malloc0(sizeof(QemuThread)); env->halt_cond = g_malloc0(sizeof(QemuCond)); qemu_cond_init(env->halt_cond); qemu_thread_create(env->thread, qemu_kvm_cpu_thread_fn, env, QEMU_THREAD_DETACHED); while (env->created == 0) { qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); } }"} {"target": 0, "idx": 14496, "func": "static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, const uint8_t *buf, int len, hwaddr addr1, hwaddr l, MemoryRegion *mr) { uint8_t *ptr; uint64_t val; MemTxResult result = MEMTX_OK; bool release_lock = false; for (;;) { if (!memory_access_is_direct(mr, true)) { release_lock |= prepare_mmio_access(mr); l = memory_access_size(mr, l, addr1); /* XXX: could force current_cpu to NULL to avoid potential bugs */ switch (l) { case 8: /* 64 bit write access */ val = ldq_p(buf); result |= memory_region_dispatch_write(mr, addr1, val, 8, attrs); break; case 4: /* 32 bit write access */ val = (uint32_t)ldl_p(buf); result |= memory_region_dispatch_write(mr, addr1, val, 4, attrs); break; case 2: /* 16 bit write access */ val = lduw_p(buf); result |= memory_region_dispatch_write(mr, addr1, val, 2, attrs); break; case 1: /* 8 bit write access */ val = ldub_p(buf); result |= memory_region_dispatch_write(mr, addr1, val, 1, attrs); break; default: abort(); } } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->ram_block, addr1); memcpy(ptr, buf, l); invalidate_and_set_dirty(mr, addr1, l); } if (release_lock) { qemu_mutex_unlock_iothread(); release_lock = false; } len -= l; buf += l; addr += l; if (!len) { break; } l = len; mr = address_space_translate(as, addr, &addr1, &l, true); } return result; }"} {"target": 0, "idx": 14509, "func": "static int g729_parse(AVCodecParserContext *s1, AVCodecContext *avctx, const uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size) { G729ParseContext *s = s1->priv_data; ParseContext *pc = &s->pc; int next; if (!s->block_size) { switch (avctx->codec_id) { case AV_CODEC_ID_G729: /* FIXME: replace this heuristic block_size with more precise estimate */ s->block_size = (avctx->bit_rate < 8000) ? G729D_6K4_BLOCK_SIZE : G729_8K_BLOCK_SIZE; s->duration = avctx->frame_size; break; default: *poutbuf = buf; *poutbuf_size = buf_size; av_log(avctx, AV_LOG_ERROR, \"Invalid codec_id\\n\"); return buf_size; } } if (!s->remaining) s->remaining = s->block_size; if (s->remaining <= buf_size) { next = s->remaining; s->remaining = 0; } else { next = END_NOT_FOUND; s->remaining -= buf_size; } if (ff_combine_frame(pc, next, &buf, &buf_size) < 0 || !buf_size) { *poutbuf = NULL; *poutbuf_size = 0; return buf_size; } s1->duration = s->duration; *poutbuf = buf; *poutbuf_size = buf_size; return next; }"} {"target": 0, "idx": 14532, "func": "static uint64_t timerblock_read(void *opaque, target_phys_addr_t addr, unsigned size) { timerblock *tb = (timerblock *)opaque; int64_t val; switch (addr) { case 0: /* Load */ return tb->load; case 4: /* Counter. */ if (((tb->control & 1) == 0) || (tb->count == 0)) { return 0; } /* Slow and ugly, but hopefully won't happen too often. */ val = tb->tick - qemu_get_clock_ns(vm_clock); val /= timerblock_scale(tb); if (val < 0) { val = 0; } return val; case 8: /* Control. */ return tb->control; case 12: /* Interrupt status. */ return tb->status; default: return 0; } }"} {"target": 0, "idx": 14535, "func": "static int decode_audio_block(AC3DecodeContext *s, int blk) { int fbw_channels = s->fbw_channels; int channel_mode = s->channel_mode; int i, bnd, seg, ch; int different_transforms; int downmix_output; int cpl_in_use; GetBitContext *gbc = &s->gbc; uint8_t bit_alloc_stages[AC3_MAX_CHANNELS]; memset(bit_alloc_stages, 0, AC3_MAX_CHANNELS); /* block switch flags */ different_transforms = 0; if (s->block_switch_syntax) { for (ch = 1; ch <= fbw_channels; ch++) { s->block_switch[ch] = get_bits1(gbc); if(ch > 1 && s->block_switch[ch] != s->block_switch[1]) different_transforms = 1; } } /* dithering flags */ if (s->dither_flag_syntax) { for (ch = 1; ch <= fbw_channels; ch++) { s->dither_flag[ch] = get_bits1(gbc); } } /* dynamic range */ i = !(s->channel_mode); do { if(get_bits1(gbc)) { s->dynamic_range[i] = ((dynamic_range_tab[get_bits(gbc, 8)]-1.0) * s->avctx->drc_scale)+1.0; } else if(blk == 0) { s->dynamic_range[i] = 1.0f; } } while(i--); /* spectral extension strategy */ if (s->eac3 && (!blk || get_bits1(gbc))) { if (get_bits1(gbc)) { av_log_missing_feature(s->avctx, \"Spectral extension\", 1); return -1; } /* TODO: parse spectral extension strategy info */ } /* TODO: spectral extension coordinates */ /* coupling strategy */ if (s->eac3 ? s->cpl_strategy_exists[blk] : get_bits1(gbc)) { memset(bit_alloc_stages, 3, AC3_MAX_CHANNELS); if (!s->eac3) s->cpl_in_use[blk] = get_bits1(gbc); if (s->cpl_in_use[blk]) { /* coupling in use */ int cpl_start_subband, cpl_end_subband; if (channel_mode < AC3_CHMODE_STEREO) { av_log(s->avctx, AV_LOG_ERROR, \"coupling not allowed in mono or dual-mono\\n\"); return -1; } /* check for enhanced coupling */ if (s->eac3 && get_bits1(gbc)) { /* TODO: parse enhanced coupling strategy info */ av_log_missing_feature(s->avctx, \"Enhanced coupling\", 1); return -1; } /* determine which channels are coupled */ if (s->eac3 && s->channel_mode == AC3_CHMODE_STEREO) { s->channel_in_cpl[1] = 1; s->channel_in_cpl[2] = 1; } else { for (ch = 1; ch <= fbw_channels; ch++) s->channel_in_cpl[ch] = get_bits1(gbc); } /* phase flags in use */ if (channel_mode == AC3_CHMODE_STEREO) s->phase_flags_in_use = get_bits1(gbc); /* coupling frequency range */ /* TODO: modify coupling end freq if spectral extension is used */ cpl_start_subband = get_bits(gbc, 4); cpl_end_subband = get_bits(gbc, 4) + 3; s->num_cpl_subbands = cpl_end_subband - cpl_start_subband; if (s->num_cpl_subbands < 0) { av_log(s->avctx, AV_LOG_ERROR, \"invalid coupling range (%d > %d)\\n\", cpl_start_subband, cpl_end_subband); return -1; } s->start_freq[CPL_CH] = cpl_start_subband * 12 + 37; s->end_freq[CPL_CH] = cpl_end_subband * 12 + 37; decode_band_structure(gbc, blk, s->eac3, 0, cpl_start_subband, cpl_end_subband, ff_eac3_default_cpl_band_struct, s->cpl_band_struct, &s->num_cpl_subbands, &s->num_cpl_bands, NULL); } else { /* coupling not in use */ for (ch = 1; ch <= fbw_channels; ch++) { s->channel_in_cpl[ch] = 0; s->first_cpl_coords[ch] = 1; } s->first_cpl_leak = s->eac3; s->phase_flags_in_use = 0; } } else if (!s->eac3) { if(!blk) { av_log(s->avctx, AV_LOG_ERROR, \"new coupling strategy must be present in block 0\\n\"); return -1; } else { s->cpl_in_use[blk] = s->cpl_in_use[blk-1]; } } cpl_in_use = s->cpl_in_use[blk]; /* coupling coordinates */ if (cpl_in_use) { int cpl_coords_exist = 0; for (ch = 1; ch <= fbw_channels; ch++) { if (s->channel_in_cpl[ch]) { if ((s->eac3 && s->first_cpl_coords[ch]) || get_bits1(gbc)) { int master_cpl_coord, cpl_coord_exp, cpl_coord_mant; s->first_cpl_coords[ch] = 0; cpl_coords_exist = 1; master_cpl_coord = 3 * get_bits(gbc, 2); for (bnd = 0; bnd < s->num_cpl_bands; bnd++) { cpl_coord_exp = get_bits(gbc, 4); cpl_coord_mant = get_bits(gbc, 4); if (cpl_coord_exp == 15) s->cpl_coords[ch][bnd] = cpl_coord_mant << 22; else s->cpl_coords[ch][bnd] = (cpl_coord_mant + 16) << 21; s->cpl_coords[ch][bnd] >>= (cpl_coord_exp + master_cpl_coord); } } else if (!blk) { av_log(s->avctx, AV_LOG_ERROR, \"new coupling coordinates must be present in block 0\\n\"); return -1; } } else { /* channel not in coupling */ s->first_cpl_coords[ch] = 1; } } /* phase flags */ if (channel_mode == AC3_CHMODE_STEREO && cpl_coords_exist) { for (bnd = 0; bnd < s->num_cpl_bands; bnd++) { s->phase_flags[bnd] = s->phase_flags_in_use? get_bits1(gbc) : 0; } } } /* stereo rematrixing strategy and band structure */ if (channel_mode == AC3_CHMODE_STEREO) { if ((s->eac3 && !blk) || get_bits1(gbc)) { s->num_rematrixing_bands = 4; if(cpl_in_use && s->start_freq[CPL_CH] <= 61) s->num_rematrixing_bands -= 1 + (s->start_freq[CPL_CH] == 37); for(bnd=0; bndnum_rematrixing_bands; bnd++) s->rematrixing_flags[bnd] = get_bits1(gbc); } else if (!blk) { av_log(s->avctx, AV_LOG_ERROR, \"new rematrixing strategy must be present in block 0\\n\"); return -1; } } /* exponent strategies for each channel */ for (ch = !cpl_in_use; ch <= s->channels; ch++) { if (!s->eac3) s->exp_strategy[blk][ch] = get_bits(gbc, 2 - (ch == s->lfe_ch)); if(s->exp_strategy[blk][ch] != EXP_REUSE) bit_alloc_stages[ch] = 3; } /* channel bandwidth */ for (ch = 1; ch <= fbw_channels; ch++) { s->start_freq[ch] = 0; if (s->exp_strategy[blk][ch] != EXP_REUSE) { int group_size; int prev = s->end_freq[ch]; if (s->channel_in_cpl[ch]) s->end_freq[ch] = s->start_freq[CPL_CH]; else { int bandwidth_code = get_bits(gbc, 6); if (bandwidth_code > 60) { av_log(s->avctx, AV_LOG_ERROR, \"bandwidth code = %d > 60\\n\", bandwidth_code); return -1; } s->end_freq[ch] = bandwidth_code * 3 + 73; } group_size = 3 << (s->exp_strategy[blk][ch] - 1); s->num_exp_groups[ch] = (s->end_freq[ch]+group_size-4) / group_size; if(blk > 0 && s->end_freq[ch] != prev) memset(bit_alloc_stages, 3, AC3_MAX_CHANNELS); } } if (cpl_in_use && s->exp_strategy[blk][CPL_CH] != EXP_REUSE) { s->num_exp_groups[CPL_CH] = (s->end_freq[CPL_CH] - s->start_freq[CPL_CH]) / (3 << (s->exp_strategy[blk][CPL_CH] - 1)); } /* decode exponents for each channel */ for (ch = !cpl_in_use; ch <= s->channels; ch++) { if (s->exp_strategy[blk][ch] != EXP_REUSE) { s->dexps[ch][0] = get_bits(gbc, 4) << !ch; if (decode_exponents(gbc, s->exp_strategy[blk][ch], s->num_exp_groups[ch], s->dexps[ch][0], &s->dexps[ch][s->start_freq[ch]+!!ch])) { av_log(s->avctx, AV_LOG_ERROR, \"exponent out-of-range\\n\"); return -1; } if(ch != CPL_CH && ch != s->lfe_ch) skip_bits(gbc, 2); /* skip gainrng */ } } /* bit allocation information */ if (s->bit_allocation_syntax) { if (get_bits1(gbc)) { s->bit_alloc_params.slow_decay = ff_ac3_slow_decay_tab[get_bits(gbc, 2)] >> s->bit_alloc_params.sr_shift; s->bit_alloc_params.fast_decay = ff_ac3_fast_decay_tab[get_bits(gbc, 2)] >> s->bit_alloc_params.sr_shift; s->bit_alloc_params.slow_gain = ff_ac3_slow_gain_tab[get_bits(gbc, 2)]; s->bit_alloc_params.db_per_bit = ff_ac3_db_per_bit_tab[get_bits(gbc, 2)]; s->bit_alloc_params.floor = ff_ac3_floor_tab[get_bits(gbc, 3)]; for(ch=!cpl_in_use; ch<=s->channels; ch++) bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2); } else if (!blk) { av_log(s->avctx, AV_LOG_ERROR, \"new bit allocation info must be present in block 0\\n\"); return -1; } } /* signal-to-noise ratio offsets and fast gains (signal-to-mask ratios) */ if(!s->eac3 || !blk){ if(s->snr_offset_strategy && get_bits1(gbc)) { int snr = 0; int csnr; csnr = (get_bits(gbc, 6) - 15) << 4; for (i = ch = !cpl_in_use; ch <= s->channels; ch++) { /* snr offset */ if (ch == i || s->snr_offset_strategy == 2) snr = (csnr + get_bits(gbc, 4)) << 2; /* run at least last bit allocation stage if snr offset changes */ if(blk && s->snr_offset[ch] != snr) { bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 1); } s->snr_offset[ch] = snr; /* fast gain (normal AC-3 only) */ if (!s->eac3) { int prev = s->fast_gain[ch]; s->fast_gain[ch] = ff_ac3_fast_gain_tab[get_bits(gbc, 3)]; /* run last 2 bit allocation stages if fast gain changes */ if(blk && prev != s->fast_gain[ch]) bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2); } } } else if (!s->eac3 && !blk) { av_log(s->avctx, AV_LOG_ERROR, \"new snr offsets must be present in block 0\\n\"); return -1; } } /* fast gain (E-AC-3 only) */ if (s->fast_gain_syntax && get_bits1(gbc)) { for (ch = !cpl_in_use; ch <= s->channels; ch++) { int prev = s->fast_gain[ch]; s->fast_gain[ch] = ff_ac3_fast_gain_tab[get_bits(gbc, 3)]; /* run last 2 bit allocation stages if fast gain changes */ if(blk && prev != s->fast_gain[ch]) bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2); } } else if (s->eac3 && !blk) { for (ch = !cpl_in_use; ch <= s->channels; ch++) s->fast_gain[ch] = ff_ac3_fast_gain_tab[4]; } /* E-AC-3 to AC-3 converter SNR offset */ if (s->frame_type == EAC3_FRAME_TYPE_INDEPENDENT && get_bits1(gbc)) { skip_bits(gbc, 10); // skip converter snr offset } /* coupling leak information */ if (cpl_in_use) { if (s->first_cpl_leak || get_bits1(gbc)) { int fl = get_bits(gbc, 3); int sl = get_bits(gbc, 3); /* run last 2 bit allocation stages for coupling channel if coupling leak changes */ if(blk && (fl != s->bit_alloc_params.cpl_fast_leak || sl != s->bit_alloc_params.cpl_slow_leak)) { bit_alloc_stages[CPL_CH] = FFMAX(bit_alloc_stages[CPL_CH], 2); } s->bit_alloc_params.cpl_fast_leak = fl; s->bit_alloc_params.cpl_slow_leak = sl; } else if (!s->eac3 && !blk) { av_log(s->avctx, AV_LOG_ERROR, \"new coupling leak info must be present in block 0\\n\"); return -1; } s->first_cpl_leak = 0; } /* delta bit allocation information */ if (s->dba_syntax && get_bits1(gbc)) { /* delta bit allocation exists (strategy) */ for (ch = !cpl_in_use; ch <= fbw_channels; ch++) { s->dba_mode[ch] = get_bits(gbc, 2); if (s->dba_mode[ch] == DBA_RESERVED) { av_log(s->avctx, AV_LOG_ERROR, \"delta bit allocation strategy reserved\\n\"); return -1; } bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2); } /* channel delta offset, len and bit allocation */ for (ch = !cpl_in_use; ch <= fbw_channels; ch++) { if (s->dba_mode[ch] == DBA_NEW) { s->dba_nsegs[ch] = get_bits(gbc, 3); for (seg = 0; seg <= s->dba_nsegs[ch]; seg++) { s->dba_offsets[ch][seg] = get_bits(gbc, 5); s->dba_lengths[ch][seg] = get_bits(gbc, 4); s->dba_values[ch][seg] = get_bits(gbc, 3); } /* run last 2 bit allocation stages if new dba values */ bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2); } } } else if(blk == 0) { for(ch=0; ch<=s->channels; ch++) { s->dba_mode[ch] = DBA_NONE; } } /* Bit allocation */ for(ch=!cpl_in_use; ch<=s->channels; ch++) { if(bit_alloc_stages[ch] > 2) { /* Exponent mapping into PSD and PSD integration */ ff_ac3_bit_alloc_calc_psd(s->dexps[ch], s->start_freq[ch], s->end_freq[ch], s->psd[ch], s->band_psd[ch]); } if(bit_alloc_stages[ch] > 1) { /* Compute excitation function, Compute masking curve, and Apply delta bit allocation */ ff_ac3_bit_alloc_calc_mask(&s->bit_alloc_params, s->band_psd[ch], s->start_freq[ch], s->end_freq[ch], s->fast_gain[ch], (ch == s->lfe_ch), s->dba_mode[ch], s->dba_nsegs[ch], s->dba_offsets[ch], s->dba_lengths[ch], s->dba_values[ch], s->mask[ch]); } if(bit_alloc_stages[ch] > 0) { /* Compute bit allocation */ const uint8_t *bap_tab = s->channel_uses_aht[ch] ? ff_eac3_hebap_tab : ff_ac3_bap_tab; ff_ac3_bit_alloc_calc_bap(s->mask[ch], s->psd[ch], s->start_freq[ch], s->end_freq[ch], s->snr_offset[ch], s->bit_alloc_params.floor, bap_tab, s->bap[ch]); } } /* unused dummy data */ if (s->skip_syntax && get_bits1(gbc)) { int skipl = get_bits(gbc, 9); while(skipl--) skip_bits(gbc, 8); } /* unpack the transform coefficients this also uncouples channels if coupling is in use. */ decode_transform_coeffs(s, blk); /* TODO: generate enhanced coupling coordinates and uncouple */ /* TODO: apply spectral extension */ /* recover coefficients if rematrixing is in use */ if(s->channel_mode == AC3_CHMODE_STEREO) do_rematrixing(s); /* apply scaling to coefficients (headroom, dynrng) */ for(ch=1; ch<=s->channels; ch++) { float gain = s->mul_bias / 4194304.0f; if(s->channel_mode == AC3_CHMODE_DUALMONO) { gain *= s->dynamic_range[ch-1]; } else { gain *= s->dynamic_range[0]; } s->dsp.int32_to_float_fmul_scalar(s->transform_coeffs[ch], s->fixed_coeffs[ch], gain, 256); } /* downmix and MDCT. order depends on whether block switching is used for any channel in this block. this is because coefficients for the long and short transforms cannot be mixed. */ downmix_output = s->channels != s->out_channels && !((s->output_mode & AC3_OUTPUT_LFEON) && s->fbw_channels == s->out_channels); if(different_transforms) { /* the delay samples have already been downmixed, so we upmix the delay samples in order to reconstruct all channels before downmixing. */ if(s->downmixed) { s->downmixed = 0; ac3_upmix_delay(s); } do_imdct(s, s->channels); if(downmix_output) { s->dsp.ac3_downmix(s->output, s->downmix_coeffs, s->out_channels, s->fbw_channels, 256); } } else { if(downmix_output) { s->dsp.ac3_downmix(s->transform_coeffs+1, s->downmix_coeffs, s->out_channels, s->fbw_channels, 256); } if(downmix_output && !s->downmixed) { s->downmixed = 1; s->dsp.ac3_downmix(s->delay, s->downmix_coeffs, s->out_channels, s->fbw_channels, 128); } do_imdct(s, s->out_channels); } return 0; }"} {"target": 0, "idx": 14543, "func": "setup_return(CPUARMState *env, struct target_sigaction *ka, abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr) { abi_ulong handler = ka->_sa_handler; abi_ulong retcode; int thumb = handler & 1; uint32_t cpsr = cpsr_read(env); cpsr &= ~CPSR_IT; if (thumb) { cpsr |= CPSR_T; } else { cpsr &= ~CPSR_T; } if (ka->sa_flags & TARGET_SA_RESTORER) { retcode = ka->sa_restorer; } else { unsigned int idx = thumb; if (ka->sa_flags & TARGET_SA_SIGINFO) idx += 2; __put_user(retcodes[idx], rc); retcode = rc_addr + thumb; } env->regs[0] = usig; env->regs[13] = frame_addr; env->regs[14] = retcode; env->regs[15] = handler & (thumb ? ~1 : ~3); cpsr_write(env, cpsr, 0xffffffff, CPSRWriteByInstr); }"} {"target": 0, "idx": 14556, "func": "static void tcg_exec_all(void) { int r; /* Account partial waits to QEMU_CLOCK_VIRTUAL. */ qemu_clock_warp(QEMU_CLOCK_VIRTUAL); if (next_cpu == NULL) { next_cpu = first_cpu; } for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) { CPUState *cpu = next_cpu; qemu_clock_enable(QEMU_CLOCK_VIRTUAL, (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0); if (cpu_can_run(cpu)) { r = tcg_cpu_exec(cpu); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(cpu); break; } } else if (cpu->stop || cpu->stopped) { break; } } /* Pairs with smp_wmb in qemu_cpu_kick. */ atomic_mb_set(&exit_request, 0); }"} {"target": 0, "idx": 14563, "func": "static int get_keycode(const char *key) { const KeyDef *p; char *endp; int ret; for(p = key_defs; p->name != NULL; p++) { if (!strcmp(key, p->name)) return p->keycode; } if (strstart(key, \"0x\", NULL)) { ret = strtoul(key, &endp, 0); if (*endp == '\\0' && ret >= 0x01 && ret <= 0xff) return ret; } return -1; }"} {"target": 0, "idx": 14570, "func": "static void tricore_testboard_init(MachineState *machine, int board_id) { TriCoreCPU *cpu; CPUTriCoreState *env; MemoryRegion *sysmem = get_system_memory(); MemoryRegion *ext_cram = g_new(MemoryRegion, 1); MemoryRegion *ext_dram = g_new(MemoryRegion, 1); MemoryRegion *int_cram = g_new(MemoryRegion, 1); MemoryRegion *int_dram = g_new(MemoryRegion, 1); MemoryRegion *pcp_data = g_new(MemoryRegion, 1); MemoryRegion *pcp_text = g_new(MemoryRegion, 1); if (!machine->cpu_model) { machine->cpu_model = \"tc1796\"; } cpu = cpu_tricore_init(machine->cpu_model); env = &cpu->env; if (!cpu) { error_report(\"Unable to find CPU definition\"); exit(1); } memory_region_init_ram(ext_cram, NULL, \"powerlink_ext_c.ram\", 2*1024*1024, &error_abort); vmstate_register_ram_global(ext_cram); memory_region_init_ram(ext_dram, NULL, \"powerlink_ext_d.ram\", 4*1024*1024, &error_abort); vmstate_register_ram_global(ext_dram); memory_region_init_ram(int_cram, NULL, \"powerlink_int_c.ram\", 48*1024, &error_abort); vmstate_register_ram_global(int_cram); memory_region_init_ram(int_dram, NULL, \"powerlink_int_d.ram\", 48*1024, &error_abort); vmstate_register_ram_global(int_dram); memory_region_init_ram(pcp_data, NULL, \"powerlink_pcp_data.ram\", 16*1024, &error_abort); vmstate_register_ram_global(pcp_data); memory_region_init_ram(pcp_text, NULL, \"powerlink_pcp_text.ram\", 32*1024, &error_abort); vmstate_register_ram_global(pcp_text); memory_region_add_subregion(sysmem, 0x80000000, ext_cram); memory_region_add_subregion(sysmem, 0xa1000000, ext_dram); memory_region_add_subregion(sysmem, 0xd4000000, int_cram); memory_region_add_subregion(sysmem, 0xd0000000, int_dram); memory_region_add_subregion(sysmem, 0xf0050000, pcp_data); memory_region_add_subregion(sysmem, 0xf0060000, pcp_text); tricoretb_binfo.ram_size = machine->ram_size; tricoretb_binfo.kernel_filename = machine->kernel_filename; if (machine->kernel_filename) { tricore_load_kernel(env); } }"} {"target": 0, "idx": 14587, "func": "static uint64_t pxa2xx_cm_read(void *opaque, hwaddr addr, unsigned size) { PXA2xxState *s = (PXA2xxState *) opaque; switch (addr) { case CCCR: case CKEN: case OSCC: return s->cm_regs[addr >> 2]; case CCSR: return s->cm_regs[CCCR >> 2] | (3 << 28); default: printf(\"%s: Bad register \" REG_FMT \"\\n\", __FUNCTION__, addr); break; } return 0; }"} {"target": 0, "idx": 14611, "func": "static int mxf_write_header_metadata_sets(AVFormatContext *s) { AVStream *st; MXFStreamContext *sc = NULL; int i; mxf_write_preface(s); mxf_write_identification(s); mxf_write_content_storage(s); for (i = 0; i < s->nb_streams; i++) { st = s->streams[i]; sc = av_mallocz(sizeof(MXFStreamContext)); if (!sc) return AVERROR(ENOMEM); st->priv_data = sc; // set pts information if (st->codec->codec_type == CODEC_TYPE_VIDEO) av_set_pts_info(st, 64, 1, st->codec->time_base.den); else if (st->codec->codec_type == CODEC_TYPE_AUDIO) av_set_pts_info(st, 64, 1, st->codec->sample_rate); } mxf_build_structural_metadata(s, MaterialPackage); mxf_build_structural_metadata(s, SourcePackage); return 0; }"} {"target": 0, "idx": 14622, "func": "int av_opencl_buffer_write_image(cl_mem dst_cl_buf, size_t cl_buffer_size, int dst_cl_offset, uint8_t **src_data, int *plane_size, int plane_num) { int i, buffer_size = 0; uint8_t *temp; cl_int status; void *mapped; if ((unsigned int)plane_num > 8) { return AVERROR(EINVAL); } for (i = 0;i < plane_num;i++) { buffer_size += plane_size[i]; } if (buffer_size > cl_buffer_size) { av_log(&openclutils, AV_LOG_ERROR, \"Cannot write image to OpenCL buffer: buffer too small\\n\"); return AVERROR(EINVAL); } mapped = clEnqueueMapBuffer(gpu_env.command_queue, dst_cl_buf, CL_TRUE,CL_MAP_WRITE, 0, buffer_size + dst_cl_offset, 0, NULL, NULL, &status); if (status != CL_SUCCESS) { av_log(&openclutils, AV_LOG_ERROR, \"Could not map OpenCL buffer: %s\\n\", opencl_errstr(status)); return AVERROR_EXTERNAL; } temp = mapped; temp += dst_cl_offset; for (i = 0; i < plane_num; i++) { memcpy(temp, src_data[i], plane_size[i]); temp += plane_size[i]; } status = clEnqueueUnmapMemObject(gpu_env.command_queue, dst_cl_buf, mapped, 0, NULL, NULL); if (status != CL_SUCCESS) { av_log(&openclutils, AV_LOG_ERROR, \"Could not unmap OpenCL buffer: %s\\n\", opencl_errstr(status)); return AVERROR_EXTERNAL; } return 0; }"} {"target": 1, "idx": 14624, "func": "int ff_get_wav_header(AVIOContext *pb, AVCodecContext *codec, int size) { int id; id = avio_rl16(pb); codec->codec_type = AVMEDIA_TYPE_AUDIO; codec->channels = avio_rl16(pb); codec->sample_rate = avio_rl32(pb); codec->bit_rate = avio_rl32(pb) * 8; codec->block_align = avio_rl16(pb); if (size == 14) { /* We're dealing with plain vanilla WAVEFORMAT */ codec->bits_per_coded_sample = 8; }else codec->bits_per_coded_sample = avio_rl16(pb); if (id == 0xFFFE) { codec->codec_tag = 0; } else { codec->codec_tag = id; codec->codec_id = ff_wav_codec_get_id(id, codec->bits_per_coded_sample); } if (size >= 18) { /* We're obviously dealing with WAVEFORMATEX */ int cbSize = avio_rl16(pb); /* cbSize */ size -= 18; cbSize = FFMIN(size, cbSize); if (cbSize >= 22 && id == 0xfffe) { /* WAVEFORMATEXTENSIBLE */ ff_asf_guid subformat; codec->bits_per_coded_sample = avio_rl16(pb); codec->channel_layout = avio_rl32(pb); /* dwChannelMask */ ff_get_guid(pb, &subformat); if (!memcmp(subformat + 4, (const uint8_t[]){FF_MEDIASUBTYPE_BASE_GUID}, 12)) { codec->codec_tag = AV_RL32(subformat); codec->codec_id = ff_wav_codec_get_id(codec->codec_tag, codec->bits_per_coded_sample); } else { codec->codec_id = ff_codec_guid_get_id(ff_codec_wav_guids, subformat); if (!codec->codec_id) av_log(codec, AV_LOG_WARNING, \"unknown subformat:\"FF_PRI_GUID\"\\n\", FF_ARG_GUID(subformat)); } cbSize -= 22; size -= 22; } codec->extradata_size = cbSize; if (cbSize > 0) { av_free(codec->extradata); codec->extradata = av_mallocz(codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!codec->extradata) return AVERROR(ENOMEM); avio_read(pb, codec->extradata, codec->extradata_size); size -= cbSize; } /* It is possible for the chunk to contain garbage at the end */ if (size > 0) avio_skip(pb, size); } if (codec->codec_id == CODEC_ID_AAC_LATM) { /* channels and sample_rate values are those prior to applying SBR and/or PS */ codec->channels = 0; codec->sample_rate = 0; } /* override bits_per_coded_sample for G.726 */ if (codec->codec_id == CODEC_ID_ADPCM_G726) codec->bits_per_coded_sample = codec->bit_rate / codec->sample_rate; return 0; }"} {"target": 1, "idx": 14630, "func": "static int r3d_read_packet(AVFormatContext *s, AVPacket *pkt) { R3DContext *r3d = s->priv_data; Atom atom; int err = 0; while (!err) { if (read_atom(s, &atom) < 0) { err = -1; break; } switch (atom.tag) { case MKTAG('R','E','D','V'): if (s->streams[0]->discard == AVDISCARD_ALL) goto skip; if (!(err = r3d_read_redv(s, pkt, &atom))) return 0; break; case MKTAG('R','E','D','A'): if (!r3d->audio_channels) return -1; if (s->streams[1]->discard == AVDISCARD_ALL) goto skip; if (!(err = r3d_read_reda(s, pkt, &atom))) return 0; break; default: skip: avio_skip(s->pb, atom.size-8); } } return err; }"} {"target": 1, "idx": 14634, "func": "PPC_OP(test_ctrz_true) { T0 = (regs->ctr == 0 && (T0 & PARAM(1)) != 0); RETURN(); }"} {"target": 1, "idx": 14643, "func": "static void monitor_control_event(void *opaque, int event) { if (event == CHR_EVENT_OPENED) { QObject *data; Monitor *mon = opaque; mon->mc->command_mode = 0; json_message_parser_init(&mon->mc->parser, handle_qmp_command); data = get_qmp_greeting(); monitor_json_emitter(mon, data); qobject_decref(data); } }"} {"target": 1, "idx": 14652, "func": "void cpu_ppc_reset (void *opaque) { CPUPPCState *env; target_ulong msr; env = opaque; msr = (target_ulong)0; #if defined(TARGET_PPC64) msr |= (target_ulong)0 << MSR_HV; /* Should be 1... */ #endif msr |= (target_ulong)0 << MSR_AP; /* TO BE CHECKED */ msr |= (target_ulong)0 << MSR_SA; /* TO BE CHECKED */ msr |= (target_ulong)1 << MSR_EP; #if defined (DO_SINGLE_STEP) && 0 /* Single step trace mode */ msr |= (target_ulong)1 << MSR_SE; msr |= (target_ulong)1 << MSR_BE; #endif #if defined(CONFIG_USER_ONLY) msr |= (target_ulong)1 << MSR_FP; /* Allow floating point usage */ msr |= (target_ulong)1 << MSR_PR; #else env->nip = env->hreset_vector | env->excp_prefix; if (env->mmu_model != POWERPC_MMU_REAL_4xx) ppc_tlb_invalidate_all(env); #endif env->msr = msr; hreg_compute_hflags(env); env->reserve = -1; /* Be sure no exception or interrupt is pending */ env->pending_interrupts = 0; env->exception_index = POWERPC_EXCP_NONE; env->error_code = 0; /* Flush all TLBs */ tlb_flush(env, 1); }"} {"target": 1, "idx": 14653, "func": "static int mov_create_timecode_track(AVFormatContext *s, int index, int src_index, const char *tcstr) { int ret; MOVMuxContext *mov = s->priv_data; MOVTrack *track = &mov->tracks[index]; AVStream *src_st = s->streams[src_index]; AVTimecode tc; AVPacket pkt = {.stream_index = index, .flags = AV_PKT_FLAG_KEY, .size = 4}; AVRational rate = find_fps(s, src_st); /* compute the frame number */ ret = av_timecode_init_from_string(&tc, rate, tcstr, s); if (ret < 0) return ret; /* tmcd track based on video stream */ track->mode = mov->mode; track->tag = MKTAG('t','m','c','d'); track->src_track = src_index; track->timescale = mov->tracks[src_index].timescale; if (tc.flags & AV_TIMECODE_FLAG_DROPFRAME) track->timecode_flags |= MOV_TIMECODE_FLAG_DROPFRAME; /* set st to src_st for metadata access*/ track->st = src_st; /* encode context: tmcd data stream */ track->enc = avcodec_alloc_context3(NULL); track->enc->codec_type = AVMEDIA_TYPE_DATA; track->enc->codec_tag = track->tag; track->enc->time_base = av_inv_q(rate); /* the tmcd track just contains one packet with the frame number */ pkt.data = av_malloc(pkt.size); AV_WB32(pkt.data, tc.start); ret = ff_mov_write_packet(s, &pkt); av_free(pkt.data); return ret; }"} {"target": 1, "idx": 14657, "func": "static int rv40_decode_intra_types(RV34DecContext *r, GetBitContext *gb, int8_t *dst) { MpegEncContext *s = &r->s; int i, j, k, v; int A, B, C; int pattern; int8_t *ptr; for(i = 0; i < 4; i++, dst += r->intra_types_stride){ if(!i && s->first_slice_line){ pattern = get_vlc2(gb, aic_top_vlc.table, AIC_TOP_BITS, 1); dst[0] = (pattern >> 2) & 2; dst[1] = (pattern >> 1) & 2; dst[2] = pattern & 2; dst[3] = (pattern << 1) & 2; continue; } ptr = dst; for(j = 0; j < 4; j++){ /* Coefficients are read using VLC chosen by the prediction pattern * The first one (used for retrieving a pair of coefficients) is * constructed from the top, top right and left coefficients * The second one (used for retrieving only one coefficient) is * top + 10 * left. */ A = ptr[-r->intra_types_stride + 1]; // it won't be used for the last coefficient in a row B = ptr[-r->intra_types_stride]; C = ptr[-1]; pattern = A + (B << 4) + (C << 8); for(k = 0; k < MODE2_PATTERNS_NUM; k++) if(pattern == rv40_aic_table_index[k]) break; if(j < 3 && k < MODE2_PATTERNS_NUM){ //pattern is found, decoding 2 coefficients v = get_vlc2(gb, aic_mode2_vlc[k].table, AIC_MODE2_BITS, 2); *ptr++ = v/9; *ptr++ = v%9; j++; }else{ if(B != -1 && C != -1) v = get_vlc2(gb, aic_mode1_vlc[B + C*10].table, AIC_MODE1_BITS, 1); else{ // tricky decoding v = 0; switch(C){ case -1: // code 0 -> 1, 1 -> 0 if(B < 2) v = get_bits1(gb) ^ 1; break; case 0: case 2: // code 0 -> 2, 1 -> 0 v = (get_bits1(gb) ^ 1) << 1; break; } } *ptr++ = v; } } } return 0; }"} {"target": 1, "idx": 14661, "func": "void virtio_queue_set_align(VirtIODevice *vdev, int n, int align) { BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); /* virtio-1 compliant devices cannot change the alignment */ if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { error_report(\"tried to modify queue alignment for virtio-1 device\"); return; } /* Check that the transport told us it was going to do this * (so a buggy transport will immediately assert rather than * silently failing to migrate this state) */ assert(k->has_variable_vring_alignment); vdev->vq[n].vring.align = align; virtio_queue_update_rings(vdev, n); }"} {"target": 1, "idx": 14671, "func": "static int update_wrap_reference(AVFormatContext *s, AVStream *st, int stream_index, AVPacket *pkt) { int64_t ref = pkt->dts; if (ref == AV_NOPTS_VALUE) ref = pkt->pts; if (ref == AV_NOPTS_VALUE) return 0; ref &= (1LL<pts_wrap_bits)-1; if (s->correct_ts_overflow && st->pts_wrap_bits < 63 && st->pts_wrap_reference == AV_NOPTS_VALUE) { int i; // reference time stamp should be 60 s before first time stamp int64_t pts_wrap_reference = ref - av_rescale(60, st->time_base.den, st->time_base.num); // if first time stamp is not more than 1/8 and 60s before the wrap point, subtract rather than add wrap offset int pts_wrap_behavior = (ref < (1LL<pts_wrap_bits) - (1LL<pts_wrap_bits-3)) || (ref < (1LL<pts_wrap_bits) - av_rescale(60, st->time_base.den, st->time_base.num)) ? AV_PTS_WRAP_ADD_OFFSET : AV_PTS_WRAP_SUB_OFFSET; AVProgram *first_program = av_find_program_from_stream(s, NULL, stream_index); if (!first_program) { int default_stream_index = av_find_default_stream_index(s); if (s->streams[default_stream_index]->pts_wrap_reference == AV_NOPTS_VALUE) { for (i=0; inb_streams; i++) { s->streams[i]->pts_wrap_reference = pts_wrap_reference; s->streams[i]->pts_wrap_behavior = pts_wrap_behavior; } } else { st->pts_wrap_reference = s->streams[default_stream_index]->pts_wrap_reference; st->pts_wrap_behavior = s->streams[default_stream_index]->pts_wrap_behavior; } } else { AVProgram *program = first_program; while (program) { if (program->pts_wrap_reference != AV_NOPTS_VALUE) { pts_wrap_reference = program->pts_wrap_reference; pts_wrap_behavior = program->pts_wrap_behavior; break; } program = av_find_program_from_stream(s, program, stream_index); } // update every program with differing pts_wrap_reference program = first_program; while(program) { if (program->pts_wrap_reference != pts_wrap_reference) { for (i=0; inb_stream_indexes; i++) { s->streams[program->stream_index[i]]->pts_wrap_reference = pts_wrap_reference; s->streams[program->stream_index[i]]->pts_wrap_behavior = pts_wrap_behavior; } program->pts_wrap_reference = pts_wrap_reference; program->pts_wrap_behavior = pts_wrap_behavior; } program = av_find_program_from_stream(s, program, stream_index); } } return 1; } return 0; }"} {"target": 1, "idx": 14675, "func": "static void action_command(EEPRO100State *s) { for (;;) { bool bit_el; bool bit_s; bool bit_i; bool bit_nc; uint16_t ok_status = STATUS_OK; s->cb_address = s->cu_base + s->cu_offset; read_cb(s); bit_el = ((s->tx.command & COMMAND_EL) != 0); bit_s = ((s->tx.command & COMMAND_S) != 0); bit_i = ((s->tx.command & COMMAND_I) != 0); bit_nc = ((s->tx.command & COMMAND_NC) != 0); #if 0 bool bit_sf = ((s->tx.command & COMMAND_SF) != 0); #endif s->cu_offset = s->tx.link; TRACE(OTHER, logout(\"val=(cu start), status=0x%04x, command=0x%04x, link=0x%08x\\n\", s->tx.status, s->tx.command, s->tx.link)); switch (s->tx.command & COMMAND_CMD) { case CmdNOp: /* Do nothing. */ break; case CmdIASetup: cpu_physical_memory_read(s->cb_address + 8, &s->conf.macaddr.a[0], 6); TRACE(OTHER, logout(\"macaddr: %s\\n\", nic_dump(&s->conf.macaddr.a[0], 6))); break; case CmdConfigure: cpu_physical_memory_read(s->cb_address + 8, &s->configuration[0], sizeof(s->configuration)); TRACE(OTHER, logout(\"configuration: %s\\n\", nic_dump(&s->configuration[0], 16))); break; case CmdMulticastList: set_multicast_list(s); break; case CmdTx: if (bit_nc) { missing(\"CmdTx: NC = 0\"); ok_status = 0; break; } tx_command(s); break; case CmdTDR: TRACE(OTHER, logout(\"load microcode\\n\")); /* Starting with offset 8, the command contains * 64 dwords microcode which we just ignore here. */ break; case CmdDiagnose: TRACE(OTHER, logout(\"diagnose\\n\")); /* Make sure error flag is not set. */ s->tx.status = 0; break; default: missing(\"undefined command\"); ok_status = 0; break; } /* Write new status. */ stw_phys(s->cb_address, s->tx.status | ok_status | STATUS_C); if (bit_i) { /* CU completed action. */ eepro100_cx_interrupt(s); } if (bit_el) { /* CU becomes idle. Terminate command loop. */ set_cu_state(s, cu_idle); eepro100_cna_interrupt(s); break; } else if (bit_s) { /* CU becomes suspended. Terminate command loop. */ set_cu_state(s, cu_suspended); eepro100_cna_interrupt(s); break; } else { /* More entries in list. */ TRACE(OTHER, logout(\"CU list with at least one more entry\\n\")); } } TRACE(OTHER, logout(\"CU list empty\\n\")); /* List is empty. Now CU is idle or suspended. */ }"} {"target": 1, "idx": 14688, "func": "void SwScale_YV12slice(unsigned char* srcptr[],int stride[], int srcSliceY , int srcSliceH, uint8_t* dstptr[], int dststride, int dstbpp, int srcW, int srcH, int dstW, int dstH){ #ifdef RUNTIME_CPUDETECT #ifdef CAN_COMPILE_X86_ASM // ordered per speed fasterst first if(gCpuCaps.hasMMX2) SwScale_YV12slice_MMX2(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH); else if(gCpuCaps.has3DNow) SwScale_YV12slice_3DNow(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH); else if(gCpuCaps.hasMMX) SwScale_YV12slice_MMX(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH); else SwScale_YV12slice_C(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH); #else SwScale_YV12slice_C(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH); #endif #else //RUNTIME_CPUDETECT #ifdef HAVE_MMX2 SwScale_YV12slice_MMX2(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH); #elif defined (HAVE_3DNOW) SwScale_YV12slice_3DNow(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH); #elif defined (HAVE_MMX) SwScale_YV12slice_MMX(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH); #else SwScale_YV12slice_C(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH); #endif #endif //!RUNTIME_CPUDETECT }"} {"target": 0, "idx": 14706, "func": "int ppc_hash64_handle_mmu_fault(CPUPPCState *env, target_ulong address, int rw, int mmu_idx) { struct mmu_ctx_hash64 ctx; int access_type; int ret = 0; if (rw == 2) { /* code access */ rw = 0; access_type = ACCESS_CODE; } else { /* data access */ access_type = env->access_type; } ret = ppc_hash64_get_physical_address(env, &ctx, address, rw, access_type); if (ret == 0) { tlb_set_page(env, address & TARGET_PAGE_MASK, ctx.raddr & TARGET_PAGE_MASK, ctx.prot, mmu_idx, TARGET_PAGE_SIZE); ret = 0; } else if (ret < 0) { LOG_MMU_STATE(env); if (access_type == ACCESS_CODE) { switch (ret) { case -1: env->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x40000000; break; case -2: /* Access rights violation */ env->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x08000000; break; case -3: /* No execute protection violation */ env->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x10000000; break; case -5: /* No match in segment table */ env->exception_index = POWERPC_EXCP_ISEG; env->error_code = 0; break; } } else { switch (ret) { case -1: /* No matches in page tables or TLB */ env->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; env->spr[SPR_DAR] = address; if (rw == 1) { env->spr[SPR_DSISR] = 0x42000000; } else { env->spr[SPR_DSISR] = 0x40000000; } break; case -2: /* Access rights violation */ env->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; env->spr[SPR_DAR] = address; if (rw == 1) { env->spr[SPR_DSISR] = 0x0A000000; } else { env->spr[SPR_DSISR] = 0x08000000; } break; case -5: /* No match in segment table */ env->exception_index = POWERPC_EXCP_DSEG; env->error_code = 0; env->spr[SPR_DAR] = address; break; } } #if 0 printf(\"%s: set exception to %d %02x\\n\", __func__, env->exception, env->error_code); #endif ret = 1; } return ret; }"} {"target": 0, "idx": 14713, "func": "static void isa_mmio_writew(void *opaque, target_phys_addr_t addr, uint32_t val) { cpu_outw(addr & IOPORTS_MASK, val); }"} {"target": 1, "idx": 14740, "func": "static int flic_decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size) { FlicDecodeContext *s = (FlicDecodeContext *)avctx->priv_data; int stream_ptr = 0; int stream_ptr_after_color_chunk; int pixel_ptr; int palette_ptr; unsigned char palette_idx1; unsigned char palette_idx2; unsigned int frame_size; int num_chunks; unsigned int chunk_size; int chunk_type; int i, j; int color_packets; int color_changes; int color_shift; unsigned char r, g, b; int lines; int compressed_lines; int starting_line; signed short line_packets; int y_ptr; signed char byte_run; int pixel_skip; int pixel_countdown; unsigned char *pixels; int pixel_limit; s->frame.reference = 1; s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if (avctx->reget_buffer(avctx, &s->frame) < 0) { av_log(avctx, AV_LOG_ERROR, \"reget_buffer() failed\\n\"); return -1; } pixels = s->frame.data[0]; pixel_limit = s->avctx->height * s->frame.linesize[0]; frame_size = LE_32(&buf[stream_ptr]); stream_ptr += 6; /* skip the magic number */ num_chunks = LE_16(&buf[stream_ptr]); stream_ptr += 10; /* skip padding */ frame_size -= 16; /* iterate through the chunks */ while ((frame_size > 0) && (num_chunks > 0)) { chunk_size = LE_32(&buf[stream_ptr]); stream_ptr += 4; chunk_type = LE_16(&buf[stream_ptr]); stream_ptr += 2; switch (chunk_type) { case FLI_256_COLOR: case FLI_COLOR: stream_ptr_after_color_chunk = stream_ptr + chunk_size - 6; s->new_palette = 1; /* check special case: If this file is from the Magic Carpet * game and uses 6-bit colors even though it reports 256-color * chunks in a 0xAF12-type file (fli_type is set to 0xAF13 during * initialization) */ if ((chunk_type == FLI_256_COLOR) && (s->fli_type != 0xAF13)) color_shift = 0; else color_shift = 2; /* set up the palette */ color_packets = LE_16(&buf[stream_ptr]); stream_ptr += 2; palette_ptr = 0; for (i = 0; i < color_packets; i++) { /* first byte is how many colors to skip */ palette_ptr += buf[stream_ptr++]; /* next byte indicates how many entries to change */ color_changes = buf[stream_ptr++]; /* if there are 0 color changes, there are actually 256 */ if (color_changes == 0) color_changes = 256; for (j = 0; j < color_changes; j++) { /* wrap around, for good measure */ if ((unsigned)palette_ptr >= 256) palette_ptr = 0; r = buf[stream_ptr++] << color_shift; g = buf[stream_ptr++] << color_shift; b = buf[stream_ptr++] << color_shift; s->palette[palette_ptr++] = (r << 16) | (g << 8) | b; } } /* color chunks sometimes have weird 16-bit alignment issues; * therefore, take the hardline approach and set the stream_ptr * to the value calculated w.r.t. the size specified by the color * chunk header */ stream_ptr = stream_ptr_after_color_chunk; break; case FLI_DELTA: y_ptr = 0; compressed_lines = LE_16(&buf[stream_ptr]); stream_ptr += 2; while (compressed_lines > 0) { line_packets = LE_16(&buf[stream_ptr]); stream_ptr += 2; if (line_packets < 0) { line_packets = -line_packets; y_ptr += line_packets * s->frame.linesize[0]; } else { compressed_lines--; pixel_ptr = y_ptr; pixel_countdown = s->avctx->width; for (i = 0; i < line_packets; i++) { /* account for the skip bytes */ pixel_skip = buf[stream_ptr++]; pixel_ptr += pixel_skip; pixel_countdown -= pixel_skip; byte_run = buf[stream_ptr++]; if (byte_run < 0) { byte_run = -byte_run; palette_idx1 = buf[stream_ptr++]; palette_idx2 = buf[stream_ptr++]; for (j = 0; j < byte_run; j++, pixel_countdown -= 2) { pixels[pixel_ptr++] = palette_idx1; pixels[pixel_ptr++] = palette_idx2; } } else { for (j = 0; j < byte_run * 2; j++, pixel_countdown--) { palette_idx1 = buf[stream_ptr++]; pixels[pixel_ptr++] = palette_idx1; } } } y_ptr += s->frame.linesize[0]; } } break; case FLI_LC: /* line compressed */ starting_line = LE_16(&buf[stream_ptr]); stream_ptr += 2; y_ptr = 0; y_ptr += starting_line * s->frame.linesize[0]; compressed_lines = LE_16(&buf[stream_ptr]); stream_ptr += 2; while (compressed_lines > 0) { pixel_ptr = y_ptr; pixel_countdown = s->avctx->width; line_packets = buf[stream_ptr++]; if (line_packets > 0) { for (i = 0; i < line_packets; i++) { /* account for the skip bytes */ pixel_skip = buf[stream_ptr++]; pixel_ptr += pixel_skip; pixel_countdown -= pixel_skip; byte_run = buf[stream_ptr++]; if (byte_run > 0) { for (j = 0; j < byte_run; j++, pixel_countdown--) { palette_idx1 = buf[stream_ptr++]; pixels[pixel_ptr++] = palette_idx1; } } else { byte_run = -byte_run; palette_idx1 = buf[stream_ptr++]; for (j = 0; j < byte_run; j++, pixel_countdown--) { pixels[pixel_ptr++] = palette_idx1; } } } } y_ptr += s->frame.linesize[0]; compressed_lines--; } break; case FLI_BLACK: /* set the whole frame to color 0 (which is usually black) */ memset(pixels, 0, s->frame.linesize[0] * s->avctx->height); break; case FLI_BRUN: /* Byte run compression: This chunk type only occurs in the first * FLI frame and it will update the entire frame. */ y_ptr = 0; for (lines = 0; lines < s->avctx->height; lines++) { pixel_ptr = y_ptr; /* disregard the line packets; instead, iterate through all * pixels on a row */ stream_ptr++; pixel_countdown = s->avctx->width; while (pixel_countdown > 0) { byte_run = buf[stream_ptr++]; if (byte_run > 0) { palette_idx1 = buf[stream_ptr++]; for (j = 0; j < byte_run; j++) { pixels[pixel_ptr++] = palette_idx1; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, \"pixel_countdown < 0 (%d)\\n\", pixel_countdown); } } else { /* copy bytes if byte_run < 0 */ byte_run = -byte_run; for (j = 0; j < byte_run; j++) { palette_idx1 = buf[stream_ptr++]; pixels[pixel_ptr++] = palette_idx1; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, \"pixel_countdown < 0 (%d)\\n\", pixel_countdown); } } } y_ptr += s->frame.linesize[0]; } break; case FLI_COPY: /* copy the chunk (uncompressed frame) */ if (chunk_size - 6 > s->avctx->width * s->avctx->height) { av_log(avctx, AV_LOG_ERROR, \"In chunk FLI_COPY : source data (%d bytes) \" \\ \"bigger than image, skipping chunk\\n\", chunk_size - 6); stream_ptr += chunk_size - 6; } else { for (y_ptr = 0; y_ptr < s->frame.linesize[0] * s->avctx->height; y_ptr += s->frame.linesize[0]) { memcpy(&pixels[y_ptr], &buf[stream_ptr], s->avctx->width); stream_ptr += s->avctx->width; } } break; case FLI_MINI: /* some sort of a thumbnail? disregard this chunk... */ stream_ptr += chunk_size - 6; break; default: av_log(avctx, AV_LOG_ERROR, \"Unrecognized chunk type: %d\\n\", chunk_type); break; } frame_size -= chunk_size; num_chunks--; } /* by the end of the chunk, the stream ptr should equal the frame * size (minus 1, possibly); if it doesn't, issue a warning */ if ((stream_ptr != buf_size) && (stream_ptr != buf_size - 1)) av_log(avctx, AV_LOG_ERROR, \"Processed FLI chunk where chunk size = %d \" \\ \"and final chunk ptr = %d\\n\", buf_size, stream_ptr); /* make the palette available on the way out */ // if (s->new_palette) { if (1) { memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE); s->frame.palette_has_changed = 1; s->new_palette = 0; } *data_size=sizeof(AVFrame); *(AVFrame*)data = s->frame; return buf_size; }"} {"target": 1, "idx": 14758, "func": "static int encode_superframe(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ WMACodecContext *s = avctx->priv_data; const short *samples = data; int i, total_gain; s->block_len_bits= s->frame_len_bits; //required by non variable block len s->block_len = 1 << s->block_len_bits; apply_window_and_mdct(avctx, samples, avctx->frame_size); if (s->ms_stereo) { float a, b; int i; for(i = 0; i < s->block_len; i++) { a = s->coefs[0][i]*0.5; b = s->coefs[1][i]*0.5; s->coefs[0][i] = a + b; s->coefs[1][i] = a - b; #if 1 total_gain= 128; for(i=64; i; i>>=1){ int error= encode_frame(s, s->coefs, buf, buf_size, total_gain-i); if(error<0) total_gain-= i; #else total_gain= 90; best= encode_frame(s, s->coefs, buf, buf_size, total_gain); for(i=32; i; i>>=1){ int scoreL= encode_frame(s, s->coefs, buf, buf_size, total_gain-i); int scoreR= encode_frame(s, s->coefs, buf, buf_size, total_gain+i); av_log(NULL, AV_LOG_ERROR, \"%d %d %d (%d)\\n\", scoreL, best, scoreR, total_gain); if(scoreL < FFMIN(best, scoreR)){ best = scoreL; total_gain -= i; }else if(scoreR < best){ best = scoreR; total_gain += i; #endif encode_frame(s, s->coefs, buf, buf_size, total_gain); assert((put_bits_count(&s->pb) & 7) == 0); i= s->block_align - (put_bits_count(&s->pb)+7)/8; assert(i>=0); while(i--) put_bits(&s->pb, 8, 'N'); flush_put_bits(&s->pb); return put_bits_ptr(&s->pb) - s->pb.buf;"} {"target": 1, "idx": 14770, "func": "int kvm_on_sigbus(int code, void *addr) { #if defined(KVM_CAP_MCE) if ((first_cpu->mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) { uint64_t status; void *vaddr; ram_addr_t ram_addr; target_phys_addr_t paddr; /* Hope we are lucky for AO MCE */ vaddr = addr; if (qemu_ram_addr_from_host(vaddr, &ram_addr) || !kvm_physical_memory_addr_from_ram(first_cpu->kvm_state, ram_addr, &paddr)) { fprintf(stderr, \"Hardware memory error for memory used by \" \"QEMU itself instead of guest system!: %p\\n\", addr); return 0; } status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S | 0xc0; kvm_inject_x86_mce(first_cpu, 9, status, MCG_STATUS_MCIP | MCG_STATUS_RIPV, paddr, (MCM_ADDR_PHYS << 6) | 0xc, 1); kvm_mce_broadcast_rest(first_cpu); } else #endif { if (code == BUS_MCEERR_AO) { return 0; } else if (code == BUS_MCEERR_AR) { hardware_memory_error(); } else { return 1; } } return 0; }"} {"target": 1, "idx": 14772, "func": "static int ogg_save(AVFormatContext *s) { struct ogg *ogg = s->priv_data; struct ogg_state *ost = av_malloc(sizeof (*ost) + (ogg->nstreams-1) * sizeof (*ogg->streams)); int i; ost->pos = avio_tell (s->pb); ost->curidx = ogg->curidx; ost->next = ogg->state; ost->nstreams = ogg->nstreams; memcpy(ost->streams, ogg->streams, ogg->nstreams * sizeof(*ogg->streams)); for (i = 0; i < ogg->nstreams; i++){ struct ogg_stream *os = ogg->streams + i; os->buf = av_malloc (os->bufsize); memset (os->buf, 0, os->bufsize); memcpy (os->buf, ost->streams[i].buf, os->bufpos); } ogg->state = ost; return 0; }"} {"target": 1, "idx": 14776, "func": "static int dxtory_decode_v1_444(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size) { int h, w; uint8_t *Y, *U, *V; int ret; if (src_size < avctx->width * avctx->height * 3) { av_log(avctx, AV_LOG_ERROR, \"packet too small\\n\"); return AVERROR_INVALIDDATA; } avctx->pix_fmt = AV_PIX_FMT_YUV444P; if ((ret = ff_get_buffer(avctx, pic, 0)) < 0) return ret; Y = pic->data[0]; U = pic->data[1]; V = pic->data[2]; for (h = 0; h < avctx->height; h++) { for (w = 0; w < avctx->width; w++) { Y[w] = *src++; U[w] = *src++ ^ 0x80; V[w] = *src++ ^ 0x80; } Y += pic->linesize[0]; U += pic->linesize[1]; V += pic->linesize[2]; } return 0; }"} {"target": 0, "idx": 14781, "func": "static size_t buffered_set_rate_limit(void *opaque, size_t new_rate) { QEMUFileBuffered *s = opaque; if (s->has_error) goto out; s->xfer_limit = new_rate / 10; out: return s->xfer_limit; }"} {"target": 0, "idx": 14788, "func": "static void ccw_init(MachineState *machine) { int ret; VirtualCssBus *css_bus; DeviceState *dev; s390_sclp_init(); s390_memory_init(machine->ram_size); /* get a BUS */ css_bus = virtual_css_bus_init(); s390_init_ipl_dev(machine->kernel_filename, machine->kernel_cmdline, machine->initrd_filename, \"s390-ccw.img\", true); s390_flic_init(); dev = qdev_create(NULL, TYPE_S390_PCI_HOST_BRIDGE); object_property_add_child(qdev_get_machine(), TYPE_S390_PCI_HOST_BRIDGE, OBJECT(dev), NULL); qdev_init_nofail(dev); /* register hypercalls */ virtio_ccw_register_hcalls(); /* init CPUs */ s390_init_cpus(machine->cpu_model); if (kvm_enabled()) { kvm_s390_enable_css_support(s390_cpu_addr2state(0)); } /* * Create virtual css and set it as default so that non mcss-e * enabled guests only see virtio devices. */ ret = css_create_css_image(VIRTUAL_CSSID, true); assert(ret == 0); /* Create VirtIO network adapters */ s390_create_virtio_net(BUS(css_bus), \"virtio-net-ccw\"); /* Register savevm handler for guest TOD clock */ register_savevm(NULL, \"todclock\", 0, 1, gtod_save, gtod_load, kvm_state); }"} {"target": 0, "idx": 14789, "func": "int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf, int64_t pos, int size) { BlockDriver *drv = bs->drv; if (!drv) return -ENOMEDIUM; if (drv->bdrv_load_vmstate) return drv->bdrv_load_vmstate(bs, buf, pos, size); if (bs->file) return bdrv_load_vmstate(bs->file, buf, pos, size); return -ENOTSUP; }"} {"target": 0, "idx": 14791, "func": "static av_cold int vaapi_encode_h265_init(AVCodecContext *avctx) { return ff_vaapi_encode_init(avctx, &vaapi_encode_type_h265); }"} {"target": 0, "idx": 14801, "func": "static void versatile_init(ram_addr_t ram_size, int vga_ram_size, const char *boot_device, DisplayState *ds, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model, int board_id) { CPUState *env; qemu_irq *pic; qemu_irq *sic; void *scsi_hba; PCIBus *pci_bus; NICInfo *nd; int n; int done_smc = 0; int index; if (!cpu_model) cpu_model = \"arm926\"; env = cpu_init(cpu_model); if (!env) { fprintf(stderr, \"Unable to find CPU definition\\n\"); exit(1); } /* ??? RAM should repeat to fill physical memory space. */ /* SDRAM at address zero. */ cpu_register_physical_memory(0, ram_size, IO_MEM_RAM); arm_sysctl_init(0x10000000, 0x41007004); pic = arm_pic_init_cpu(env); pic = pl190_init(0x10140000, pic[0], pic[1]); sic = vpb_sic_init(0x10003000, pic, 31); pl050_init(0x10006000, sic[3], 0); pl050_init(0x10007000, sic[4], 1); pci_bus = pci_vpb_init(sic, 27, 0); /* The Versatile PCI bridge does not provide access to PCI IO space, so many of the qemu PCI devices are not useable. */ for(n = 0; n < nb_nics; n++) { nd = &nd_table[n]; if (!nd->model) nd->model = done_smc ? \"rtl8139\" : \"smc91c111\"; if (strcmp(nd->model, \"smc91c111\") == 0) { smc91c111_init(nd, 0x10010000, sic[25]); } else { pci_nic_init(pci_bus, nd, -1); } } if (usb_enabled) { usb_ohci_init_pci(pci_bus, 3, -1); } if (drive_get_max_bus(IF_SCSI) > 0) { fprintf(stderr, \"qemu: too many SCSI bus\\n\"); exit(1); } scsi_hba = lsi_scsi_init(pci_bus, -1); for (n = 0; n < LSI_MAX_DEVS; n++) { index = drive_get_index(IF_SCSI, 0, n); if (index == -1) continue; lsi_scsi_attach(scsi_hba, drives_table[index].bdrv, n); } pl011_init(0x101f1000, pic[12], serial_hds[0], PL011_ARM); pl011_init(0x101f2000, pic[13], serial_hds[1], PL011_ARM); pl011_init(0x101f3000, pic[14], serial_hds[2], PL011_ARM); pl011_init(0x10009000, sic[6], serial_hds[3], PL011_ARM); pl080_init(0x10130000, pic[17], 8); sp804_init(0x101e2000, pic[4]); sp804_init(0x101e3000, pic[5]); /* The versatile/PB actually has a modified Color LCD controller that includes hardware cursor support from the PL111. */ pl110_init(ds, 0x10120000, pic[16], 1); index = drive_get_index(IF_SD, 0, 0); if (index == -1) { fprintf(stderr, \"qemu: missing SecureDigital card\\n\"); exit(1); } pl181_init(0x10005000, drives_table[index].bdrv, sic[22], sic[1]); #if 0 /* Disabled because there's no way of specifying a block device. */ pl181_init(0x1000b000, NULL, sic, 23, 2); #endif /* Add PL031 Real Time Clock. */ pl031_init(0x101e8000,pic[10]); /* Memory map for Versatile/PB: */ /* 0x10000000 System registers. */ /* 0x10001000 PCI controller config registers. */ /* 0x10002000 Serial bus interface. */ /* 0x10003000 Secondary interrupt controller. */ /* 0x10004000 AACI (audio). */ /* 0x10005000 MMCI0. */ /* 0x10006000 KMI0 (keyboard). */ /* 0x10007000 KMI1 (mouse). */ /* 0x10008000 Character LCD Interface. */ /* 0x10009000 UART3. */ /* 0x1000a000 Smart card 1. */ /* 0x1000b000 MMCI1. */ /* 0x10010000 Ethernet. */ /* 0x10020000 USB. */ /* 0x10100000 SSMC. */ /* 0x10110000 MPMC. */ /* 0x10120000 CLCD Controller. */ /* 0x10130000 DMA Controller. */ /* 0x10140000 Vectored interrupt controller. */ /* 0x101d0000 AHB Monitor Interface. */ /* 0x101e0000 System Controller. */ /* 0x101e1000 Watchdog Interface. */ /* 0x101e2000 Timer 0/1. */ /* 0x101e3000 Timer 2/3. */ /* 0x101e4000 GPIO port 0. */ /* 0x101e5000 GPIO port 1. */ /* 0x101e6000 GPIO port 2. */ /* 0x101e7000 GPIO port 3. */ /* 0x101e8000 RTC. */ /* 0x101f0000 Smart card 0. */ /* 0x101f1000 UART0. */ /* 0x101f2000 UART1. */ /* 0x101f3000 UART2. */ /* 0x101f4000 SSPI. */ versatile_binfo.ram_size = ram_size; versatile_binfo.kernel_filename = kernel_filename; versatile_binfo.kernel_cmdline = kernel_cmdline; versatile_binfo.initrd_filename = initrd_filename; versatile_binfo.board_id = board_id; arm_load_kernel(env, &versatile_binfo); }"} {"target": 0, "idx": 14808, "func": "static void detach(sPAPRDRConnector *drc, DeviceState *d, spapr_drc_detach_cb *detach_cb, void *detach_cb_opaque, Error **errp) { trace_spapr_drc_detach(get_index(drc)); drc->detach_cb = detach_cb; drc->detach_cb_opaque = detach_cb_opaque; /* if we've signalled device presence to the guest, or if the guest * has gone ahead and configured the device (via manually-executed * device add via drmgr in guest, namely), we need to wait * for the guest to quiesce the device before completing detach. * Otherwise, we can assume the guest hasn't seen it and complete the * detach immediately. Note that there is a small race window * just before, or during, configuration, which is this context * refers mainly to fetching the device tree via RTAS. * During this window the device access will be arbitrated by * associated DRC, which will simply fail the RTAS calls as invalid. * This is recoverable within guest and current implementations of * drmgr should be able to cope. */ if (!drc->signalled && !drc->configured) { /* if the guest hasn't seen the device we can't rely on it to * set it back to an isolated state via RTAS, so do it here manually */ drc->isolation_state = SPAPR_DR_ISOLATION_STATE_ISOLATED; } if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_ISOLATED) { trace_spapr_drc_awaiting_isolated(get_index(drc)); drc->awaiting_release = true; return; } if (drc->type != SPAPR_DR_CONNECTOR_TYPE_PCI && drc->allocation_state != SPAPR_DR_ALLOCATION_STATE_UNUSABLE) { trace_spapr_drc_awaiting_unusable(get_index(drc)); drc->awaiting_release = true; return; } if (drc->awaiting_allocation) { if (!drc->awaiting_allocation_skippable) { drc->awaiting_release = true; trace_spapr_drc_awaiting_allocation(get_index(drc)); return; } } drc->indicator_state = SPAPR_DR_INDICATOR_STATE_INACTIVE; if (drc->detach_cb) { drc->detach_cb(drc->dev, drc->detach_cb_opaque); } drc->awaiting_release = false; drc->awaiting_allocation_skippable = false; g_free(drc->fdt); drc->fdt = NULL; drc->fdt_start_offset = 0; object_property_del(OBJECT(drc), \"device\", NULL); drc->dev = NULL; drc->detach_cb = NULL; drc->detach_cb_opaque = NULL; }"} {"target": 1, "idx": 14818, "func": "char **breakline(char *input, int *count) { int c = 0; char *p; char **rval = calloc(sizeof(char *), 1); while (rval && (p = qemu_strsep(&input, \" \")) != NULL) { if (!*p) { continue; } c++; rval = realloc(rval, sizeof(*rval) * (c + 1)); if (!rval) { c = 0; break; } rval[c - 1] = p; rval[c] = NULL; } *count = c; return rval; }"} {"target": 0, "idx": 14819, "func": "static av_noinline void FUNC(hl_decode_mb)(H264Context *h) { const int mb_x = h->mb_x; const int mb_y = h->mb_y; const int mb_xy = h->mb_xy; const int mb_type = h->cur_pic.f.mb_type[mb_xy]; uint8_t *dest_y, *dest_cb, *dest_cr; int linesize, uvlinesize /*dct_offset*/; int i, j; int *block_offset = &h->block_offset[0]; const int transform_bypass = !SIMPLE && (h->qscale == 0 && h->sps.transform_bypass); /* is_h264 should always be true if SVQ3 is disabled. */ const int is_h264 = !CONFIG_SVQ3_DECODER || SIMPLE || h->avctx->codec_id == AV_CODEC_ID_H264; void (*idct_add)(uint8_t *dst, int16_t *block, int stride); const int block_h = 16 >> h->chroma_y_shift; const int chroma422 = CHROMA422; dest_y = h->cur_pic.f.data[0] + ((mb_x << PIXEL_SHIFT) + mb_y * h->linesize) * 16; dest_cb = h->cur_pic.f.data[1] + (mb_x << PIXEL_SHIFT) * 8 + mb_y * h->uvlinesize * block_h; dest_cr = h->cur_pic.f.data[2] + (mb_x << PIXEL_SHIFT) * 8 + mb_y * h->uvlinesize * block_h; h->vdsp.prefetch(dest_y + (h->mb_x & 3) * 4 * h->linesize + (64 << PIXEL_SHIFT), h->linesize, 4); h->vdsp.prefetch(dest_cb + (h->mb_x & 7) * h->uvlinesize + (64 << PIXEL_SHIFT), dest_cr - dest_cb, 2); h->list_counts[mb_xy] = h->list_count; if (!SIMPLE && MB_FIELD) { linesize = h->mb_linesize = h->linesize * 2; uvlinesize = h->mb_uvlinesize = h->uvlinesize * 2; block_offset = &h->block_offset[48]; if (mb_y & 1) { // FIXME move out of this function? dest_y -= h->linesize * 15; dest_cb -= h->uvlinesize * (block_h - 1); dest_cr -= h->uvlinesize * (block_h - 1); } if (FRAME_MBAFF) { int list; for (list = 0; list < h->list_count; list++) { if (!USES_LIST(mb_type, list)) continue; if (IS_16X16(mb_type)) { int8_t *ref = &h->ref_cache[list][scan8[0]]; fill_rectangle(ref, 4, 4, 8, (16 + *ref) ^ (h->mb_y & 1), 1); } else { for (i = 0; i < 16; i += 4) { int ref = h->ref_cache[list][scan8[i]]; if (ref >= 0) fill_rectangle(&h->ref_cache[list][scan8[i]], 2, 2, 8, (16 + ref) ^ (h->mb_y & 1), 1); } } } } } else { linesize = h->mb_linesize = h->linesize; uvlinesize = h->mb_uvlinesize = h->uvlinesize; // dct_offset = s->linesize * 16; } if (!SIMPLE && IS_INTRA_PCM(mb_type)) { const int bit_depth = h->sps.bit_depth_luma; if (PIXEL_SHIFT) { int j; GetBitContext gb; init_get_bits(&gb, (uint8_t *)h->intra_pcm_ptr, ff_h264_mb_sizes[h->sps.chroma_format_idc] * bit_depth); for (i = 0; i < 16; i++) { uint16_t *tmp_y = (uint16_t *)(dest_y + i * linesize); for (j = 0; j < 16; j++) tmp_y[j] = get_bits(&gb, bit_depth); } if (SIMPLE || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) { if (!h->sps.chroma_format_idc) { for (i = 0; i < block_h; i++) { uint16_t *tmp_cb = (uint16_t *)(dest_cb + i * uvlinesize); uint16_t *tmp_cr = (uint16_t *)(dest_cr + i * uvlinesize); for (j = 0; j < 8; j++) { tmp_cb[j] = tmp_cr[j] = 1 << (bit_depth - 1); } } } else { for (i = 0; i < block_h; i++) { uint16_t *tmp_cb = (uint16_t *)(dest_cb + i * uvlinesize); for (j = 0; j < 8; j++) tmp_cb[j] = get_bits(&gb, bit_depth); } for (i = 0; i < block_h; i++) { uint16_t *tmp_cr = (uint16_t *)(dest_cr + i * uvlinesize); for (j = 0; j < 8; j++) tmp_cr[j] = get_bits(&gb, bit_depth); } } } } else { for (i = 0; i < 16; i++) memcpy(dest_y + i * linesize, (uint8_t *)h->intra_pcm_ptr + i * 16, 16); if (SIMPLE || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) { if (!h->sps.chroma_format_idc) { for (i = 0; i < 8; i++) { memset(dest_cb + i*uvlinesize, 1 << (bit_depth - 1), 8); memset(dest_cr + i*uvlinesize, 1 << (bit_depth - 1), 8); } } else { uint8_t *src_cb = (uint8_t *)h->intra_pcm_ptr + 256; uint8_t *src_cr = (uint8_t *)h->intra_pcm_ptr + 256 + block_h * 8; for (i = 0; i < block_h; i++) { memcpy(dest_cb + i * uvlinesize, src_cb + i * 8, 8); memcpy(dest_cr + i * uvlinesize, src_cr + i * 8, 8); } } } } } else { if (IS_INTRA(mb_type)) { if (h->deblocking_filter) xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 1, 0, SIMPLE, PIXEL_SHIFT); if (SIMPLE || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) { h->hpc.pred8x8[h->chroma_pred_mode](dest_cb, uvlinesize); h->hpc.pred8x8[h->chroma_pred_mode](dest_cr, uvlinesize); } hl_decode_mb_predict_luma(h, mb_type, is_h264, SIMPLE, transform_bypass, PIXEL_SHIFT, block_offset, linesize, dest_y, 0); if (h->deblocking_filter) xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 0, 0, SIMPLE, PIXEL_SHIFT); } else if (is_h264) { if (chroma422) { FUNC(hl_motion_422)(h, dest_y, dest_cb, dest_cr, h->me.qpel_put, h->h264chroma.put_h264_chroma_pixels_tab, h->me.qpel_avg, h->h264chroma.avg_h264_chroma_pixels_tab, h->h264dsp.weight_h264_pixels_tab, h->h264dsp.biweight_h264_pixels_tab); } else { FUNC(hl_motion_420)(h, dest_y, dest_cb, dest_cr, h->me.qpel_put, h->h264chroma.put_h264_chroma_pixels_tab, h->me.qpel_avg, h->h264chroma.avg_h264_chroma_pixels_tab, h->h264dsp.weight_h264_pixels_tab, h->h264dsp.biweight_h264_pixels_tab); } } hl_decode_mb_idct_luma(h, mb_type, is_h264, SIMPLE, transform_bypass, PIXEL_SHIFT, block_offset, linesize, dest_y, 0); if ((SIMPLE || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) && (h->cbp & 0x30)) { uint8_t *dest[2] = { dest_cb, dest_cr }; if (transform_bypass) { if (IS_INTRA(mb_type) && h->sps.profile_idc == 244 && (h->chroma_pred_mode == VERT_PRED8x8 || h->chroma_pred_mode == HOR_PRED8x8)) { h->hpc.pred8x8_add[h->chroma_pred_mode](dest[0], block_offset + 16, h->mb + (16 * 16 * 1 << PIXEL_SHIFT), uvlinesize); h->hpc.pred8x8_add[h->chroma_pred_mode](dest[1], block_offset + 32, h->mb + (16 * 16 * 2 << PIXEL_SHIFT), uvlinesize); } else { idct_add = h->h264dsp.h264_add_pixels4; for (j = 1; j < 3; j++) { for (i = j * 16; i < j * 16 + 4; i++) if (h->non_zero_count_cache[scan8[i]] || dctcoef_get(h->mb, PIXEL_SHIFT, i * 16)) idct_add(dest[j - 1] + block_offset[i], h->mb + (i * 16 << PIXEL_SHIFT), uvlinesize); if (chroma422) { for (i = j * 16 + 4; i < j * 16 + 8; i++) if (h->non_zero_count_cache[scan8[i + 4]] || dctcoef_get(h->mb, PIXEL_SHIFT, i * 16)) idct_add(dest[j - 1] + block_offset[i + 4], h->mb + (i * 16 << PIXEL_SHIFT), uvlinesize); } } } } else { if (is_h264) { int qp[2]; if (chroma422) { qp[0] = h->chroma_qp[0] + 3; qp[1] = h->chroma_qp[1] + 3; } else { qp[0] = h->chroma_qp[0]; qp[1] = h->chroma_qp[1]; } if (h->non_zero_count_cache[scan8[CHROMA_DC_BLOCK_INDEX + 0]]) h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + (16 * 16 * 1 << PIXEL_SHIFT), h->dequant4_coeff[IS_INTRA(mb_type) ? 1 : 4][qp[0]][0]); if (h->non_zero_count_cache[scan8[CHROMA_DC_BLOCK_INDEX + 1]]) h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + (16 * 16 * 2 << PIXEL_SHIFT), h->dequant4_coeff[IS_INTRA(mb_type) ? 2 : 5][qp[1]][0]); h->h264dsp.h264_idct_add8(dest, block_offset, h->mb, uvlinesize, h->non_zero_count_cache); } else if (CONFIG_SVQ3_DECODER) { h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + 16 * 16 * 1, h->dequant4_coeff[IS_INTRA(mb_type) ? 1 : 4][h->chroma_qp[0]][0]); h->h264dsp.h264_chroma_dc_dequant_idct(h->mb + 16 * 16 * 2, h->dequant4_coeff[IS_INTRA(mb_type) ? 2 : 5][h->chroma_qp[1]][0]); for (j = 1; j < 3; j++) { for (i = j * 16; i < j * 16 + 4; i++) if (h->non_zero_count_cache[scan8[i]] || h->mb[i * 16]) { uint8_t *const ptr = dest[j - 1] + block_offset[i]; ff_svq3_add_idct_c(ptr, h->mb + i * 16, uvlinesize, ff_h264_chroma_qp[0][h->qscale + 12] - 12, 2); } } } } } if (h->cbp || IS_INTRA(mb_type)) { h->dsp.clear_blocks(h->mb); h->dsp.clear_blocks(h->mb + (24 * 16 << PIXEL_SHIFT)); } } }"} {"target": 1, "idx": 14829, "func": "static gsize calc_float_string_storage(double value) { int whole_value = value; gsize i = 0; do { i++; } while (whole_value /= 10); return i + 2 + FLOAT_STRING_PRECISION; }"} {"target": 0, "idx": 14850, "func": "static void virtio_net_vmstate_change(void *opaque, int running, int reason) { VirtIONet *n = opaque; if (!running) { return; } /* This is called when vm is started, it will start vhost backend if * appropriate e.g. after migration. */ virtio_net_set_status(&n->vdev, n->vdev.status); }"} {"target": 0, "idx": 14854, "func": "static int coroutine_fn raw_co_pdiscard(BlockDriverState *bs, int64_t offset, int count) { BDRVRawState *s = bs->opaque; if (offset > UINT64_MAX - s->offset) { return -EINVAL; } offset += s->offset; return bdrv_co_pdiscard(bs->file->bs, offset, count); }"} {"target": 0, "idx": 14870, "func": "static av_cold int mp3lame_encode_init(AVCodecContext *avctx) { LAMEContext *s = avctx->priv_data; int ret; s->avctx = avctx; /* initialize LAME and get defaults */ if (!(s->gfp = lame_init())) return AVERROR(ENOMEM); lame_set_num_channels(s->gfp, avctx->channels); lame_set_mode(s->gfp, avctx->channels > 1 ? s->joint_stereo ? JOINT_STEREO : STEREO : MONO); /* sample rate */ lame_set_in_samplerate (s->gfp, avctx->sample_rate); lame_set_out_samplerate(s->gfp, avctx->sample_rate); /* algorithmic quality */ if (avctx->compression_level == FF_COMPRESSION_DEFAULT) lame_set_quality(s->gfp, 5); else lame_set_quality(s->gfp, avctx->compression_level); /* rate control */ if (avctx->flags & CODEC_FLAG_QSCALE) { // VBR lame_set_VBR(s->gfp, vbr_default); lame_set_VBR_quality(s->gfp, avctx->global_quality / (float)FF_QP2LAMBDA); } else { if (avctx->bit_rate) { if (s->abr) { // ABR lame_set_VBR(s->gfp, vbr_abr); lame_set_VBR_mean_bitrate_kbps(s->gfp, avctx->bit_rate / 1000); } else // CBR lame_set_brate(s->gfp, avctx->bit_rate / 1000); } } /* do not get a Xing VBR header frame from LAME */ lame_set_bWriteVbrTag(s->gfp,0); /* bit reservoir usage */ lame_set_disable_reservoir(s->gfp, !s->reservoir); /* set specified parameters */ if (lame_init_params(s->gfp) < 0) { ret = -1; goto error; } /* get encoder delay */ avctx->delay = lame_get_encoder_delay(s->gfp) + 528 + 1; ff_af_queue_init(avctx, &s->afq); avctx->frame_size = lame_get_framesize(s->gfp); /* allocate float sample buffers */ if (avctx->sample_fmt == AV_SAMPLE_FMT_FLTP) { int ch; for (ch = 0; ch < avctx->channels; ch++) { s->samples_flt[ch] = av_malloc(avctx->frame_size * sizeof(*s->samples_flt[ch])); if (!s->samples_flt[ch]) { ret = AVERROR(ENOMEM); goto error; } } } ret = realloc_buffer(s); if (ret < 0) goto error; avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT); return 0; error: mp3lame_encode_close(avctx); return ret; }"} {"target": 0, "idx": 14887, "func": "static int scsi_write_data(SCSIDevice *d, uint32_t tag) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); SCSIDiskReq *r; DPRINTF(\"Write data tag=0x%x\\n\", tag); r = scsi_find_request(s, tag); if (!r) { BADF(\"Bad write tag 0x%x\\n\", tag); scsi_command_complete(r, CHECK_CONDITION, HARDWARE_ERROR); return 1; } /* No data transfer may already be in progress */ assert(r->req.aiocb == NULL); scsi_write_request(r); return 0; }"} {"target": 0, "idx": 14890, "func": "void replay_account_executed_instructions(void) { if (replay_mode == REPLAY_MODE_PLAY) { replay_mutex_lock(); if (replay_state.instructions_count > 0) { int count = (int)(replay_get_current_step() - replay_state.current_step); replay_state.instructions_count -= count; replay_state.current_step += count; if (replay_state.instructions_count == 0) { assert(replay_data_kind == EVENT_INSTRUCTION); replay_finish_event(); /* Wake up iothread. This is required because timers will not expire until clock counters will be read from the log. */ qemu_notify_event(); } } replay_mutex_unlock(); } }"} {"target": 0, "idx": 14902, "func": "void qemu_co_rwlock_unlock(CoRwlock *lock) { assert(qemu_in_coroutine()); if (lock->writer) { lock->writer = false; while (!qemu_co_queue_empty(&lock->queue)) { /* * Wakeup every body. This will include some * writers too. */ qemu_co_queue_next(&lock->queue); } } else { lock->reader--; assert(lock->reader >= 0); /* Wakeup only one waiting writer */ if (!lock->reader) { qemu_co_queue_next(&lock->queue); } } }"} {"target": 0, "idx": 14918, "func": "float32 HELPER(ucf64_sf2si)(float32 x, CPUUniCore32State *env) { return ucf64_itos(float32_to_int32(x, &env->ucf64.fp_status)); }"} {"target": 0, "idx": 14933, "func": "static int pl061_init(SysBusDevice *dev, const unsigned char *id) { int iomemtype; pl061_state *s = FROM_SYSBUS(pl061_state, dev); s->id = id; iomemtype = cpu_register_io_memory(pl061_readfn, pl061_writefn, s, DEVICE_NATIVE_ENDIAN); sysbus_init_mmio(dev, 0x1000, iomemtype); sysbus_init_irq(dev, &s->irq); qdev_init_gpio_in(&dev->qdev, pl061_set_irq, 8); qdev_init_gpio_out(&dev->qdev, s->out, 8); pl061_reset(s); register_savevm(&dev->qdev, \"pl061_gpio\", -1, 1, pl061_save, pl061_load, s); return 0; }"} {"target": 0, "idx": 14939, "func": "void qemu_chr_reset(CharDriverState *s) { if (s->bh == NULL && initial_reset_issued) { s->bh = qemu_bh_new(qemu_chr_reset_bh, s); qemu_bh_schedule(s->bh); } }"} {"target": 1, "idx": 14943, "func": "static void omap_i2c_set_slave_addr(OMAPI2C *s, uint8_t addr) { uint16_t data = addr; memwrite(s->addr + OMAP_I2C_SA, &data, 2); memread(s->addr + OMAP_I2C_SA, &data, 2); g_assert_cmphex(data, ==, addr); }"} {"target": 1, "idx": 14947, "func": "static int read_packet(AVFormatContext *s, AVPacket *pkt) { MmDemuxContext *mm = s->priv_data; AVIOContext *pb = s->pb; unsigned char preamble[MM_PREAMBLE_SIZE]; unsigned int type, length; while(1) { if (avio_read(pb, preamble, MM_PREAMBLE_SIZE) != MM_PREAMBLE_SIZE) { return AVERROR(EIO); type = AV_RL16(&preamble[0]); length = AV_RL16(&preamble[2]); switch(type) { case MM_TYPE_PALETTE : case MM_TYPE_INTER : case MM_TYPE_INTRA : case MM_TYPE_INTRA_HH : case MM_TYPE_INTER_HH : case MM_TYPE_INTRA_HHV : case MM_TYPE_INTER_HHV : /* output preamble + data */ if (av_new_packet(pkt, length + MM_PREAMBLE_SIZE)) return AVERROR(ENOMEM); memcpy(pkt->data, preamble, MM_PREAMBLE_SIZE); if (avio_read(pb, pkt->data + MM_PREAMBLE_SIZE, length) != length) return AVERROR(EIO); pkt->size = length + MM_PREAMBLE_SIZE; pkt->stream_index = 0; pkt->pts = mm->video_pts; if (type!=MM_TYPE_PALETTE) mm->video_pts++; return 0; case MM_TYPE_AUDIO : if (av_get_packet(s->pb, pkt, length)<0) return AVERROR(ENOMEM); pkt->size = length; pkt->stream_index = 1; pkt->pts = mm->audio_pts++; return 0; default : av_log(s, AV_LOG_INFO, \"unknown chunk type 0x%x\\n\", type);"} {"target": 0, "idx": 14958, "func": "static int coroutine_fn qed_aio_write_cow(QEDAIOCB *acb) { BDRVQEDState *s = acb_to_s(acb); uint64_t start, len, offset; int ret; /* Populate front untouched region of new data cluster */ start = qed_start_of_cluster(s, acb->cur_pos); len = qed_offset_into_cluster(s, acb->cur_pos); trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster); ret = qed_copy_from_backing_file(s, start, len, acb->cur_cluster); if (ret < 0) { return ret; } /* Populate back untouched region of new data cluster */ start = acb->cur_pos + acb->cur_qiov.size; len = qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start; offset = acb->cur_cluster + qed_offset_into_cluster(s, acb->cur_pos) + acb->cur_qiov.size; trace_qed_aio_write_postfill(s, acb, start, len, offset); ret = qed_copy_from_backing_file(s, start, len, offset); if (ret < 0) { return ret; } ret = qed_aio_write_main(acb); if (ret < 0) { return ret; } if (s->bs->backing) { /* * Flush new data clusters before updating the L2 table * * This flush is necessary when a backing file is in use. A crash * during an allocating write could result in empty clusters in the * image. If the write only touched a subregion of the cluster, * then backing image sectors have been lost in the untouched * region. The solution is to flush after writing a new data * cluster and before updating the L2 table. */ ret = bdrv_co_flush(s->bs->file->bs); if (ret < 0) { return ret; } } return 0; }"} {"target": 0, "idx": 14962, "func": "static void end_list(Visitor *v) { StringInputVisitor *siv = to_siv(v); siv->head = true; }"} {"target": 0, "idx": 14963, "func": "static void * attribute_align_arg worker(void *v){ AVCodecContext *avctx = v; ThreadContext *c = avctx->internal->frame_thread_encoder; AVPacket *pkt = NULL; while(!c->exit){ int got_packet, ret; AVFrame *frame; Task task; if(!pkt) pkt= av_mallocz(sizeof(*pkt)); if(!pkt) continue; av_init_packet(pkt); pthread_mutex_lock(&c->task_fifo_mutex); while (av_fifo_size(c->task_fifo) <= 0 || c->exit) { if(c->exit){ pthread_mutex_unlock(&c->task_fifo_mutex); goto end; } pthread_cond_wait(&c->task_fifo_cond, &c->task_fifo_mutex); } av_fifo_generic_read(c->task_fifo, &task, sizeof(task), NULL); pthread_mutex_unlock(&c->task_fifo_mutex); frame = task.indata; ret = avcodec_encode_video2(avctx, pkt, frame, &got_packet); pthread_mutex_lock(&c->buffer_mutex); av_frame_unref(frame); pthread_mutex_unlock(&c->buffer_mutex); av_frame_free(&frame); if(got_packet) { av_dup_packet(pkt); } else { pkt->data = NULL; pkt->size = 0; } pthread_mutex_lock(&c->finished_task_mutex); c->finished_tasks[task.index].outdata = pkt; pkt = NULL; c->finished_tasks[task.index].return_code = ret; pthread_cond_signal(&c->finished_task_cond); pthread_mutex_unlock(&c->finished_task_mutex); } end: av_free(pkt); pthread_mutex_lock(&c->buffer_mutex); avcodec_close(avctx); pthread_mutex_unlock(&c->buffer_mutex); av_freep(&avctx); return NULL; }"} {"target": 0, "idx": 14965, "func": "static void do_cont(void) { vm_start(); }"} {"target": 0, "idx": 14968, "func": "void AUD_remove_card (QEMUSoundCard *card) { LIST_REMOVE (card, entries); qemu_free (card->name); }"} {"target": 0, "idx": 14971, "func": "static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index, target_phys_addr_t *nb, uint16_t leaf, int level) { PhysPageEntry *p; int i; target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS); if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) { lp->ptr = phys_map_node_alloc(); p = phys_map_nodes[lp->ptr]; if (level == 0) { for (i = 0; i < L2_SIZE; i++) { p[i].is_leaf = 1; p[i].ptr = phys_section_unassigned; } } } else { p = phys_map_nodes[lp->ptr]; } lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)]; while (*nb && lp < &p[L2_SIZE]) { if ((*index & (step - 1)) == 0 && *nb >= step) { lp->is_leaf = true; lp->ptr = leaf; *index += step; *nb -= step; } else { phys_page_set_level(lp, index, nb, leaf, level - 1); } ++lp; } }"} {"target": 0, "idx": 14976, "func": "static void tcx24_screen_dump(void *opaque, const char *filename, bool cswitch, Error **errp) { TCXState *s = opaque; FILE *f; uint8_t *d, *d1, v; uint32_t *s24, *cptr, dval; int ret, y, x; f = fopen(filename, \"wb\"); if (!f) { error_setg(errp, \"failed to open file '%s': %s\", filename, strerror(errno)); return; } ret = fprintf(f, \"P6\\n%d %d\\n%d\\n\", s->width, s->height, 255); if (ret < 0) { goto write_err; } d1 = s->vram; s24 = s->vram24; cptr = s->cplane; for(y = 0; y < s->height; y++) { d = d1; for(x = 0; x < s->width; x++, d++, s24++) { if ((*cptr++ & 0xff000000) == 0x03000000) { // 24-bit direct dval = *s24 & 0x00ffffff; ret = fputc((dval >> 16) & 0xff, f); if (ret == EOF) { goto write_err; } ret = fputc((dval >> 8) & 0xff, f); if (ret == EOF) { goto write_err; } ret = fputc(dval & 0xff, f); if (ret == EOF) { goto write_err; } } else { v = *d; ret = fputc(s->r[v], f); if (ret == EOF) { goto write_err; } ret = fputc(s->g[v], f); if (ret == EOF) { goto write_err; } ret = fputc(s->b[v], f); if (ret == EOF) { goto write_err; } } } d1 += MAXX; } out: fclose(f); return; write_err: error_setg(errp, \"failed to write to file '%s': %s\", filename, strerror(errno)); unlink(filename); goto out; }"} {"target": 0, "idx": 14980, "func": "static int qcow2_open(BlockDriverState *bs, int flags) { BDRVQcowState *s = bs->opaque; int len, i, ret = 0; QCowHeader header; uint64_t ext_end; ret = bdrv_pread(bs->file, 0, &header, sizeof(header)); if (ret < 0) { goto fail; } be32_to_cpus(&header.magic); be32_to_cpus(&header.version); be64_to_cpus(&header.backing_file_offset); be32_to_cpus(&header.backing_file_size); be64_to_cpus(&header.size); be32_to_cpus(&header.cluster_bits); be32_to_cpus(&header.crypt_method); be64_to_cpus(&header.l1_table_offset); be32_to_cpus(&header.l1_size); be64_to_cpus(&header.refcount_table_offset); be32_to_cpus(&header.refcount_table_clusters); be64_to_cpus(&header.snapshots_offset); be32_to_cpus(&header.nb_snapshots); if (header.magic != QCOW_MAGIC) { ret = -EINVAL; goto fail; } if (header.version < 2 || header.version > 3) { report_unsupported(bs, \"QCOW version %d\", header.version); ret = -ENOTSUP; goto fail; } s->qcow_version = header.version; /* Initialise version 3 header fields */ if (header.version == 2) { header.incompatible_features = 0; header.compatible_features = 0; header.autoclear_features = 0; header.refcount_order = 4; header.header_length = 72; } else { be64_to_cpus(&header.incompatible_features); be64_to_cpus(&header.compatible_features); be64_to_cpus(&header.autoclear_features); be32_to_cpus(&header.refcount_order); be32_to_cpus(&header.header_length); } if (header.header_length > sizeof(header)) { s->unknown_header_fields_size = header.header_length - sizeof(header); s->unknown_header_fields = g_malloc(s->unknown_header_fields_size); ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields, s->unknown_header_fields_size); if (ret < 0) { goto fail; } } if (header.backing_file_offset) { ext_end = header.backing_file_offset; } else { ext_end = 1 << header.cluster_bits; } /* Handle feature bits */ s->incompatible_features = header.incompatible_features; s->compatible_features = header.compatible_features; s->autoclear_features = header.autoclear_features; if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) { void *feature_table = NULL; qcow2_read_extensions(bs, header.header_length, ext_end, &feature_table); report_unsupported_feature(bs, feature_table, s->incompatible_features & ~QCOW2_INCOMPAT_MASK); ret = -ENOTSUP; goto fail; } /* Check support for various header values */ if (header.refcount_order != 4) { report_unsupported(bs, \"%d bit reference counts\", 1 << header.refcount_order); ret = -ENOTSUP; goto fail; } if (header.cluster_bits < MIN_CLUSTER_BITS || header.cluster_bits > MAX_CLUSTER_BITS) { ret = -EINVAL; goto fail; } if (header.crypt_method > QCOW_CRYPT_AES) { ret = -EINVAL; goto fail; } s->crypt_method_header = header.crypt_method; if (s->crypt_method_header) { bs->encrypted = 1; } s->cluster_bits = header.cluster_bits; s->cluster_size = 1 << s->cluster_bits; s->cluster_sectors = 1 << (s->cluster_bits - 9); s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */ s->l2_size = 1 << s->l2_bits; bs->total_sectors = header.size / 512; s->csize_shift = (62 - (s->cluster_bits - 8)); s->csize_mask = (1 << (s->cluster_bits - 8)) - 1; s->cluster_offset_mask = (1LL << s->csize_shift) - 1; s->refcount_table_offset = header.refcount_table_offset; s->refcount_table_size = header.refcount_table_clusters << (s->cluster_bits - 3); s->snapshots_offset = header.snapshots_offset; s->nb_snapshots = header.nb_snapshots; /* read the level 1 table */ s->l1_size = header.l1_size; s->l1_vm_state_index = size_to_l1(s, header.size); /* the L1 table must contain at least enough entries to put header.size bytes */ if (s->l1_size < s->l1_vm_state_index) { ret = -EINVAL; goto fail; } s->l1_table_offset = header.l1_table_offset; if (s->l1_size > 0) { s->l1_table = g_malloc0( align_offset(s->l1_size * sizeof(uint64_t), 512)); ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table, s->l1_size * sizeof(uint64_t)); if (ret < 0) { goto fail; } for(i = 0;i < s->l1_size; i++) { be64_to_cpus(&s->l1_table[i]); } } /* alloc L2 table/refcount block cache */ s->l2_table_cache = qcow2_cache_create(bs, L2_CACHE_SIZE); s->refcount_block_cache = qcow2_cache_create(bs, REFCOUNT_CACHE_SIZE); s->cluster_cache = g_malloc(s->cluster_size); /* one more sector for decompressed data alignment */ s->cluster_data = qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size + 512); s->cluster_cache_offset = -1; s->flags = flags; ret = qcow2_refcount_init(bs); if (ret != 0) { goto fail; } QLIST_INIT(&s->cluster_allocs); /* read qcow2 extensions */ if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL)) { ret = -EINVAL; goto fail; } /* read the backing file name */ if (header.backing_file_offset != 0) { len = header.backing_file_size; if (len > 1023) { len = 1023; } ret = bdrv_pread(bs->file, header.backing_file_offset, bs->backing_file, len); if (ret < 0) { goto fail; } bs->backing_file[len] = '\\0'; } ret = qcow2_read_snapshots(bs); if (ret < 0) { goto fail; } /* Clear unknown autoclear feature bits */ if (!bs->read_only && s->autoclear_features != 0) { s->autoclear_features = 0; ret = qcow2_update_header(bs); if (ret < 0) { goto fail; } } /* Initialise locks */ qemu_co_mutex_init(&s->lock); /* Repair image if dirty */ if ((s->incompatible_features & QCOW2_INCOMPAT_DIRTY) && !bs->read_only) { BdrvCheckResult result = {0}; ret = qcow2_check_refcounts(bs, &result, BDRV_FIX_ERRORS); if (ret < 0) { goto fail; } ret = qcow2_mark_clean(bs); if (ret < 0) { goto fail; } } #ifdef DEBUG_ALLOC { BdrvCheckResult result = {0}; qcow2_check_refcounts(bs, &result, 0); } #endif return ret; fail: g_free(s->unknown_header_fields); cleanup_unknown_header_ext(bs); qcow2_free_snapshots(bs); qcow2_refcount_close(bs); g_free(s->l1_table); if (s->l2_table_cache) { qcow2_cache_destroy(bs, s->l2_table_cache); } g_free(s->cluster_cache); qemu_vfree(s->cluster_data); return ret; }"} {"target": 0, "idx": 14985, "func": "int ff_audio_mix_set_matrix(AudioMix *am, const double *matrix, int stride) { int i, o; if ( am->in_channels <= 0 || am->in_channels > AVRESAMPLE_MAX_CHANNELS || am->out_channels <= 0 || am->out_channels > AVRESAMPLE_MAX_CHANNELS) { av_log(am, AV_LOG_ERROR, \"Invalid channel counts\\n\"); return AVERROR(EINVAL); } if (am->matrix) { av_free(am->matrix[0]); am->matrix = NULL; } #define CONVERT_MATRIX(type, expr) \\ am->matrix_## type[0] = av_mallocz(am->out_channels * am->in_channels * \\ sizeof(*am->matrix_## type[0])); \\ if (!am->matrix_## type[0]) \\ return AVERROR(ENOMEM); \\ for (o = 0; o < am->out_channels; o++) { \\ if (o > 0) \\ am->matrix_## type[o] = am->matrix_## type[o - 1] + \\ am->in_channels; \\ for (i = 0; i < am->in_channels; i++) { \\ double v = matrix[o * stride + i]; \\ am->matrix_## type[o][i] = expr; \\ } \\ } \\ am->matrix = (void **)am->matrix_## type; switch (am->coeff_type) { case AV_MIX_COEFF_TYPE_Q8: CONVERT_MATRIX(q8, av_clip_int16(lrint(256.0 * v))) break; case AV_MIX_COEFF_TYPE_Q15: CONVERT_MATRIX(q15, av_clipl_int32(llrint(32768.0 * v))) break; case AV_MIX_COEFF_TYPE_FLT: CONVERT_MATRIX(flt, v) break; default: av_log(am, AV_LOG_ERROR, \"Invalid mix coeff type\\n\"); return AVERROR(EINVAL); } /* TODO: detect situations where we can just swap around pointers instead of doing matrix multiplications with 0.0 and 1.0 */ return 0; }"} {"target": 0, "idx": 14988, "func": "static int queue_attached_pictures(AVFormatContext *s) { int i; for (i = 0; i < s->nb_streams; i++) if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC && s->streams[i]->discard < AVDISCARD_ALL) { AVPacket copy = s->streams[i]->attached_pic; copy.buf = av_buffer_ref(copy.buf); if (!copy.buf) return AVERROR(ENOMEM); add_to_pktbuf(&s->internal->raw_packet_buffer, ©, &s->internal->raw_packet_buffer_end); } return 0; }"} {"target": 1, "idx": 15000, "func": "AioContext *aio_context_new(Error **errp) { int ret; AioContext *ctx; ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext)); aio_context_setup(ctx); ret = event_notifier_init(&ctx->notifier, false); if (ret < 0) { error_setg_errno(errp, -ret, \"Failed to initialize event notifier\"); goto fail; } g_source_set_can_recurse(&ctx->source, true); aio_set_event_notifier(ctx, &ctx->notifier, false, (EventNotifierHandler *) event_notifier_dummy_cb, event_notifier_poll); #ifdef CONFIG_LINUX_AIO ctx->linux_aio = NULL; #endif ctx->thread_pool = NULL; qemu_mutex_init(&ctx->bh_lock); qemu_rec_mutex_init(&ctx->lock); timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx); ctx->poll_max_ns = 0; return ctx; fail: g_source_destroy(&ctx->source); return NULL; }"} {"target": 0, "idx": 15022, "func": "static void opt_frame_pad_top(const char *arg) { frame_padtop = atoi(arg); if (frame_padtop < 0) { fprintf(stderr, \"Incorrect top pad size\\n\"); av_exit(1); } }"} {"target": 0, "idx": 15023, "func": "static void chr_event(void *opaque, int event) { Terminal3270 *t = opaque; CcwDevice *ccw_dev = CCW_DEVICE(t); SubchDev *sch = ccw_dev->sch; /* Ensure the initial status correct, always reset them. */ t->in_len = 0; t->out_len = 0; t->handshake_done = false; if (t->timer_tag) { g_source_remove(t->timer_tag); t->timer_tag = 0; } switch (event) { case CHR_EVENT_OPENED: /* * 3270 does handshake firstly by the negotiate options in * char-socket.c. Once qemu receives the terminal-type of the * client, mark handshake done and trigger everything rolling again. */ t->timer_tag = g_timeout_add_seconds(600, send_timing_mark_cb, t); break; case CHR_EVENT_CLOSED: sch->curr_status.scsw.dstat = SCSW_DSTAT_DEVICE_END; css_conditional_io_interrupt(sch); break; } }"} {"target": 0, "idx": 15026, "func": "static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) { if (facilities & FACILITY_EXT_IMM) { tcg_out_insn(s, RRE, LLGCR, dest, src); return; } if (dest == src) { tcg_out_movi(s, type, TCG_TMP0, 0xff); src = TCG_TMP0; } else { tcg_out_movi(s, type, dest, 0xff); } if (type == TCG_TYPE_I32) { tcg_out_insn(s, RR, NR, dest, src); } else { tcg_out_insn(s, RRE, NGR, dest, src); } }"} {"target": 0, "idx": 15027, "func": "static bool esp_mem_accepts(void *opaque, target_phys_addr_t addr, unsigned size, bool is_write) { return (size == 1) || (is_write && size == 4); }"} {"target": 0, "idx": 15041, "func": "SDState *sd_init(BlockBackend *blk, bool is_spi) { SDState *sd; if (blk && blk_is_read_only(blk)) { fprintf(stderr, \"sd_init: Cannot use read-only drive\\n\"); return NULL; } sd = (SDState *) g_malloc0(sizeof(SDState)); sd->buf = blk_blockalign(blk, 512); sd->spi = is_spi; sd->enable = true; sd_reset(sd, blk); if (sd->blk) { blk_attach_dev_nofail(sd->blk, sd); blk_set_dev_ops(sd->blk, &sd_block_ops, sd); } vmstate_register(NULL, -1, &sd_vmstate, sd); return sd; }"} {"target": 0, "idx": 15059, "func": "static uint64_t *store_bitmap_data(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, uint32_t *bitmap_table_size, Error **errp) { int ret; BDRVQcow2State *s = bs->opaque; int64_t sector; uint64_t limit, sbc; uint64_t bm_size = bdrv_dirty_bitmap_size(bitmap); uint64_t bm_sectors = DIV_ROUND_UP(bm_size, BDRV_SECTOR_SIZE); const char *bm_name = bdrv_dirty_bitmap_name(bitmap); uint8_t *buf = NULL; BdrvDirtyBitmapIter *dbi; uint64_t *tb; uint64_t tb_size = size_to_clusters(s, bdrv_dirty_bitmap_serialization_size(bitmap, 0, bm_size)); if (tb_size > BME_MAX_TABLE_SIZE || tb_size * s->cluster_size > BME_MAX_PHYS_SIZE) { error_setg(errp, \"Bitmap '%s' is too big\", bm_name); return NULL; } tb = g_try_new0(uint64_t, tb_size); if (tb == NULL) { error_setg(errp, \"No memory\"); return NULL; } dbi = bdrv_dirty_iter_new(bitmap); buf = g_malloc(s->cluster_size); limit = bytes_covered_by_bitmap_cluster(s, bitmap); sbc = limit >> BDRV_SECTOR_BITS; assert(DIV_ROUND_UP(bm_size, limit) == tb_size); while ((sector = bdrv_dirty_iter_next(dbi) >> BDRV_SECTOR_BITS) >= 0) { uint64_t cluster = sector / sbc; uint64_t end, write_size; int64_t off; sector = cluster * sbc; end = MIN(bm_sectors, sector + sbc); write_size = bdrv_dirty_bitmap_serialization_size(bitmap, sector * BDRV_SECTOR_SIZE, (end - sector) * BDRV_SECTOR_SIZE); assert(write_size <= s->cluster_size); off = qcow2_alloc_clusters(bs, s->cluster_size); if (off < 0) { error_setg_errno(errp, -off, \"Failed to allocate clusters for bitmap '%s'\", bm_name); goto fail; } tb[cluster] = off; bdrv_dirty_bitmap_serialize_part(bitmap, buf, sector * BDRV_SECTOR_SIZE, (end - sector) * BDRV_SECTOR_SIZE); if (write_size < s->cluster_size) { memset(buf + write_size, 0, s->cluster_size - write_size); } ret = qcow2_pre_write_overlap_check(bs, 0, off, s->cluster_size); if (ret < 0) { error_setg_errno(errp, -ret, \"Qcow2 overlap check failed\"); goto fail; } ret = bdrv_pwrite(bs->file, off, buf, s->cluster_size); if (ret < 0) { error_setg_errno(errp, -ret, \"Failed to write bitmap '%s' to file\", bm_name); goto fail; } if (end >= bm_sectors) { break; } bdrv_set_dirty_iter(dbi, end * BDRV_SECTOR_SIZE); } *bitmap_table_size = tb_size; g_free(buf); bdrv_dirty_iter_free(dbi); return tb; fail: clear_bitmap_table(bs, tb, tb_size); g_free(buf); bdrv_dirty_iter_free(dbi); g_free(tb); return NULL; }"} {"target": 0, "idx": 15069, "func": "iscsi_synccache10_cb(struct iscsi_context *iscsi, int status, void *command_data, void *opaque) { IscsiAIOCB *acb = opaque; if (acb->canceled != 0) { return; } acb->status = 0; if (status != 0) { if (status == SCSI_STATUS_CHECK_CONDITION && acb->task->sense.key == SCSI_SENSE_UNIT_ATTENTION && acb->retries-- > 0) { if (acb->task != NULL) { scsi_free_scsi_task(acb->task); acb->task = NULL; } if (iscsi_aio_flush_acb(acb) == 0) { iscsi_set_events(acb->iscsilun); return; } } error_report(\"Failed to sync10 data on iSCSI lun. %s\", iscsi_get_error(iscsi)); acb->status = -EIO; } iscsi_schedule_bh(acb); }"} {"target": 0, "idx": 15086, "func": "static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config) { VirtIONet *n = VIRTIO_NET(vdev); struct virtio_net_config netcfg = {}; memcpy(&netcfg, config, n->config_size); if (!(vdev->guest_features >> VIRTIO_NET_F_CTRL_MAC_ADDR & 1) && memcmp(netcfg.mac, n->mac, ETH_ALEN)) { memcpy(n->mac, netcfg.mac, ETH_ALEN); qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); } }"} {"target": 1, "idx": 15116, "func": "static void set_frame_data(MIContext *mi_ctx, int alpha, AVFrame *avf_out) { int x, y, plane; for (plane = 0; plane < mi_ctx->nb_planes; plane++) { int width = avf_out->width; int height = avf_out->height; int chroma = plane == 1 || plane == 2; for (y = 0; y < height; y++) for (x = 0; x < width; x++) { int x_mv, y_mv; int weight_sum = 0; int i, val = 0; Pixel *pixel = &mi_ctx->pixels[x + y * avf_out->width]; for (i = 0; i < pixel->nb; i++) weight_sum += pixel->weights[i]; if (!weight_sum || !pixel->nb) { pixel->weights[0] = ALPHA_MAX - alpha; pixel->refs[0] = 1; pixel->mvs[0][0] = 0; pixel->mvs[0][1] = 0; pixel->weights[1] = alpha; pixel->refs[1] = 2; pixel->mvs[1][0] = 0; pixel->mvs[1][1] = 0; pixel->nb = 2; weight_sum = ALPHA_MAX; } for (i = 0; i < pixel->nb; i++) { Frame *frame = &mi_ctx->frames[pixel->refs[i]]; if (chroma) { x_mv = (x >> mi_ctx->chroma_h_shift) + (pixel->mvs[i][0] >> mi_ctx->chroma_h_shift); y_mv = (y >> mi_ctx->chroma_v_shift) + (pixel->mvs[i][1] >> mi_ctx->chroma_v_shift); } else { x_mv = x + pixel->mvs[i][0]; y_mv = y + pixel->mvs[i][1]; } val += pixel->weights[i] * frame->avf->data[plane][x_mv + y_mv * frame->avf->linesize[plane]]; } val = ROUNDED_DIV(val, weight_sum); if (chroma) avf_out->data[plane][(x >> mi_ctx->chroma_h_shift) + (y >> mi_ctx->chroma_v_shift) * avf_out->linesize[plane]] = val; else avf_out->data[plane][x + y * avf_out->linesize[plane]] = val; } } }"} {"target": 1, "idx": 15130, "func": "void trace_init_vcpu_events(void) { TraceEvent *ev = NULL; while ((ev = trace_event_pattern(\"*\", ev)) != NULL) { if (trace_event_is_vcpu(ev) && trace_event_get_state_static(ev) && trace_event_get_state_dynamic(ev)) { TraceEventID id = trace_event_get_id(ev); /* check preconditions */ assert(trace_events_dstate[id] == 1); /* disable early-init state ... */ trace_events_dstate[id] = 0; trace_events_enabled_count--; /* ... and properly re-enable */ trace_event_set_state_dynamic(ev, true); } } }"} {"target": 1, "idx": 15134, "func": "static void svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane, unsigned char *ref_plane, unsigned char *decoded_plane, int width, int height, int src_stride, int stride) { int x, y; int i; int block_width, block_height; int level; int threshold[6]; const int lambda= (s->picture.quality*s->picture.quality) >> (2*FF_LAMBDA_SHIFT); /* figure out the acceptable level thresholds in advance */ threshold[5] = QUALITY_THRESHOLD; for (level = 4; level >= 0; level--) threshold[level] = threshold[level + 1] * THRESHOLD_MULTIPLIER; block_width = (width + 15) / 16; block_height = (height + 15) / 16; if(s->picture.pict_type == P_TYPE){ s->m.avctx= s->avctx; s->m.current_picture_ptr= &s->m.current_picture; s->m.last_picture_ptr = &s->m.last_picture; s->m.last_picture.data[0]= ref_plane; s->m.linesize= s->m.last_picture.linesize[0]= s->m.new_picture.linesize[0]= s->m.current_picture.linesize[0]= stride; s->m.width= width; s->m.height= height; s->m.mb_width= block_width; s->m.mb_height= block_height; s->m.mb_stride= s->m.mb_width+1; s->m.b8_stride= 2*s->m.mb_width+1; s->m.f_code=1; s->m.pict_type= s->picture.pict_type; s->m.qscale= s->picture.quality/FF_QP2LAMBDA; s->m.me_method= s->avctx->me_method; if(!s->motion_val8[plane]){ s->motion_val8 [plane]= av_mallocz((s->m.b8_stride*block_height*2 + 2)*2*sizeof(int16_t)); s->motion_val16[plane]= av_mallocz((s->m.mb_stride*(block_height + 2) + 1)*2*sizeof(int16_t)); } s->m.mb_type= s->mb_type; //dummies, to avoid segfaults s->m.current_picture.mb_mean= (uint8_t *)s->dummy; s->m.current_picture.mb_var= (uint16_t*)s->dummy; s->m.current_picture.mc_mb_var= (uint16_t*)s->dummy; s->m.current_picture.mb_type= s->dummy; s->m.current_picture.motion_val[0]= s->motion_val8[plane] + 2; s->m.p_mv_table= s->motion_val16[plane] + s->m.mb_stride + 1; s->m.dsp= s->dsp; //move ff_init_me(&s->m); s->m.me.dia_size= s->avctx->dia_size; s->m.first_slice_line=1; for (y = 0; y < block_height; y++) { uint8_t src[stride*16]; s->m.new_picture.data[0]= src - y*16*stride; //ugly s->m.mb_y= y; for(i=0; i<16 && i + 16*ym.mb_x= x; ff_init_block_index(&s->m); ff_update_block_index(&s->m); ff_estimate_p_frame_motion(&s->m, x, y); } s->m.first_slice_line=0; } ff_fix_long_p_mvs(&s->m); ff_fix_long_mvs(&s->m, NULL, 0, s->m.p_mv_table, s->m.f_code, CANDIDATE_MB_TYPE_INTER, 0); } s->m.first_slice_line=1; for (y = 0; y < block_height; y++) { uint8_t src[stride*16]; for(i=0; i<16 && i + 16*ym.mb_y= y; for (x = 0; x < block_width; x++) { uint8_t reorder_buffer[3][6][7*32]; int count[3][6]; int offset = y * 16 * stride + x * 16; uint8_t *decoded= decoded_plane + offset; uint8_t *ref= ref_plane + offset; int score[4]={0,0,0,0}, best; uint8_t temp[16*stride]; s->m.mb_x= x; ff_init_block_index(&s->m); ff_update_block_index(&s->m); if(s->picture.pict_type == I_TYPE || (s->m.mb_type[x + y*s->m.mb_stride]&CANDIDATE_MB_TYPE_INTRA)){ for(i=0; i<6; i++){ init_put_bits(&s->reorder_pb[i], reorder_buffer[0][i], 7*32); } if(s->picture.pict_type == P_TYPE){ const uint8_t *vlc= svq1_block_type_vlc[SVQ1_BLOCK_INTRA]; put_bits(&s->reorder_pb[5], vlc[1], vlc[0]); score[0]= vlc[1]*lambda; } score[0]+= encode_block(s, src+16*x, NULL, temp, stride, 5, 64, lambda, 1); for(i=0; i<6; i++){ count[0][i]= put_bits_count(&s->reorder_pb[i]); flush_put_bits(&s->reorder_pb[i]); } }else score[0]= INT_MAX; best=0; if(s->picture.pict_type == P_TYPE){ const uint8_t *vlc= svq1_block_type_vlc[SVQ1_BLOCK_INTER]; int mx, my, pred_x, pred_y, dxy; int16_t *motion_ptr; motion_ptr= h263_pred_motion(&s->m, 0, 0, &pred_x, &pred_y); if(s->m.mb_type[x + y*s->m.mb_stride]&CANDIDATE_MB_TYPE_INTER){ for(i=0; i<6; i++) init_put_bits(&s->reorder_pb[i], reorder_buffer[1][i], 7*32); put_bits(&s->reorder_pb[5], vlc[1], vlc[0]); s->m.pb= s->reorder_pb[5]; mx= motion_ptr[0]; my= motion_ptr[1]; assert(mx>=-32 && mx<=31); assert(my>=-32 && my<=31); assert(pred_x>=-32 && pred_x<=31); assert(pred_y>=-32 && pred_y<=31); ff_h263_encode_motion(&s->m, mx - pred_x, 1); ff_h263_encode_motion(&s->m, my - pred_y, 1); s->reorder_pb[5]= s->m.pb; score[1] += lambda*put_bits_count(&s->reorder_pb[5]); dxy= (mx&1) + 2*(my&1); s->dsp.put_pixels_tab[0][dxy](temp+16, ref + (mx>>1) + stride*(my>>1), stride, 16); score[1]+= encode_block(s, src+16*x, temp+16, decoded, stride, 5, 64, lambda, 0); best= score[1] <= score[0]; vlc= svq1_block_type_vlc[SVQ1_BLOCK_SKIP]; score[2]= s->dsp.sse[0](NULL, src+16*x, ref, stride, 16); score[2]+= vlc[1]*lambda; if(score[2] < score[best] && mx==0 && my==0){ best=2; s->dsp.put_pixels_tab[0][0](decoded, ref, stride, 16); for(i=0; i<6; i++){ count[2][i]=0; } put_bits(&s->pb, vlc[1], vlc[0]); } } if(best==1){ for(i=0; i<6; i++){ count[1][i]= put_bits_count(&s->reorder_pb[i]); flush_put_bits(&s->reorder_pb[i]); } }else{ motion_ptr[0 ] = motion_ptr[1 ]= motion_ptr[2 ] = motion_ptr[3 ]= motion_ptr[0+2*s->m.b8_stride] = motion_ptr[1+2*s->m.b8_stride]= motion_ptr[2+2*s->m.b8_stride] = motion_ptr[3+2*s->m.b8_stride]=0; } } s->rd_total += score[best]; for(i=5; i>=0; i--){ ff_copy_bits(&s->pb, reorder_buffer[best][i], count[best][i]); } if(best==0){ s->dsp.put_pixels_tab[0][0](decoded, temp, stride, 16); } } s->m.first_slice_line=0; } }"} {"target": 1, "idx": 15159, "func": "static int smka_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { SmackerAudioContext *s = avctx->priv_data; const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; GetBitContext gb; HuffContext h[4] = { { 0 } }; VLC vlc[4] = { { 0 } }; int16_t *samples; uint8_t *samples8; int val; int i, res, ret; int unp_size; int bits, stereo; int pred[2] = {0, 0}; if (buf_size <= 4) { av_log(avctx, AV_LOG_ERROR, \"packet is too small\\n\"); return AVERROR(EINVAL); unp_size = AV_RL32(buf); init_get_bits(&gb, buf + 4, (buf_size - 4) * 8); if(!get_bits1(&gb)){ av_log(avctx, AV_LOG_INFO, \"Sound: no data\\n\"); *got_frame_ptr = 0; return 1; stereo = get_bits1(&gb); bits = get_bits1(&gb); if (stereo ^ (avctx->channels != 1)) { av_log(avctx, AV_LOG_ERROR, \"channels mismatch\\n\"); return AVERROR(EINVAL); if (bits && avctx->sample_fmt == AV_SAMPLE_FMT_U8) { av_log(avctx, AV_LOG_ERROR, \"sample format mismatch\\n\"); return AVERROR(EINVAL); /* get output buffer */ s->frame.nb_samples = unp_size / (avctx->channels * (bits + 1)); if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { av_log(avctx, AV_LOG_ERROR, \"get_buffer() failed\\n\"); return ret; samples = (int16_t *)s->frame.data[0]; samples8 = s->frame.data[0]; // Initialize for(i = 0; i < (1 << (bits + stereo)); i++) { h[i].length = 256; h[i].maxlength = 0; h[i].current = 0; h[i].bits = av_mallocz(256 * 4); h[i].lengths = av_mallocz(256 * sizeof(int)); h[i].values = av_mallocz(256 * sizeof(int)); skip_bits1(&gb); smacker_decode_tree(&gb, &h[i], 0, 0); skip_bits1(&gb); if(h[i].current > 1) { res = init_vlc(&vlc[i], SMKTREE_BITS, h[i].length, h[i].lengths, sizeof(int), sizeof(int), h[i].bits, sizeof(uint32_t), sizeof(uint32_t), INIT_VLC_LE); if(res < 0) { av_log(avctx, AV_LOG_ERROR, \"Cannot build VLC table\\n\"); if(bits) { //decode 16-bit data for(i = stereo; i >= 0; i--) pred[i] = sign_extend(av_bswap16(get_bits(&gb, 16)), 16); for(i = 0; i <= stereo; i++) *samples++ = pred[i]; for(; i < unp_size / 2; i++) { if(get_bits_left(&gb)<0) if(i & stereo) { if(vlc[2].table) res = get_vlc2(&gb, vlc[2].table, SMKTREE_BITS, 3); else res = 0; if (res < 0) { av_log(avctx, AV_LOG_ERROR, \"invalid vlc\\n\"); val = h[2].values[res]; if(vlc[3].table) res = get_vlc2(&gb, vlc[3].table, SMKTREE_BITS, 3); else res = 0; if (res < 0) { av_log(avctx, AV_LOG_ERROR, \"invalid vlc\\n\"); val |= h[3].values[res] << 8; pred[1] += sign_extend(val, 16); *samples++ = av_clip_int16(pred[1]); } else { if(vlc[0].table) res = get_vlc2(&gb, vlc[0].table, SMKTREE_BITS, 3); else res = 0; if (res < 0) { av_log(avctx, AV_LOG_ERROR, \"invalid vlc\\n\"); val = h[0].values[res]; if(vlc[1].table) res = get_vlc2(&gb, vlc[1].table, SMKTREE_BITS, 3); else res = 0; if (res < 0) { av_log(avctx, AV_LOG_ERROR, \"invalid vlc\\n\"); val |= h[1].values[res] << 8; pred[0] += sign_extend(val, 16); *samples++ = av_clip_int16(pred[0]); } else { //8-bit data for(i = stereo; i >= 0; i--) pred[i] = get_bits(&gb, 8); for(i = 0; i <= stereo; i++) *samples8++ = pred[i]; for(; i < unp_size; i++) { if(get_bits_left(&gb)<0) if(i & stereo){ if(vlc[1].table) res = get_vlc2(&gb, vlc[1].table, SMKTREE_BITS, 3); else res = 0; if (res < 0) { av_log(avctx, AV_LOG_ERROR, \"invalid vlc\\n\"); pred[1] += sign_extend(h[1].values[res], 8); *samples8++ = av_clip_uint8(pred[1]); } else { if(vlc[0].table) res = get_vlc2(&gb, vlc[0].table, SMKTREE_BITS, 3); else res = 0; if (res < 0) { av_log(avctx, AV_LOG_ERROR, \"invalid vlc\\n\"); pred[0] += sign_extend(h[0].values[res], 8); *samples8++ = av_clip_uint8(pred[0]); for(i = 0; i < 4; i++) { if(vlc[i].table) ff_free_vlc(&vlc[i]); av_free(h[i].bits); av_free(h[i].lengths); av_free(h[i].values); *got_frame_ptr = 1; *(AVFrame *)data = s->frame; return buf_size;"} {"target": 0, "idx": 15163, "func": "static int decode_rle(AVCodecContext *avctx, AVSubtitle *sub, const uint8_t *buf, unsigned int buf_size) { const uint8_t *rle_bitmap_end; int pixel_count, line_count; rle_bitmap_end = buf + buf_size; sub->rects[0]->pict.data[0] = av_malloc(sub->rects[0]->w * sub->rects[0]->h); if (!sub->rects[0]->pict.data[0]) return -1; pixel_count = 0; line_count = 0; while (buf < rle_bitmap_end && line_count < sub->rects[0]->h) { uint8_t flags, color; int run; color = bytestream_get_byte(&buf); run = 1; if (color == 0x00) { flags = bytestream_get_byte(&buf); run = flags & 0x3f; if (flags & 0x40) run = (run << 8) + bytestream_get_byte(&buf); color = flags & 0x80 ? bytestream_get_byte(&buf) : 0; } if (run > 0 && pixel_count + run <= sub->rects[0]->w * sub->rects[0]->h) { memset(sub->rects[0]->pict.data[0] + pixel_count, color, run); pixel_count += run; } else if (!run) { /* * New Line. Check if correct pixels decoded, if not display warning * and adjust bitmap pointer to correct new line position. */ if (pixel_count % sub->rects[0]->w > 0) av_log(avctx, AV_LOG_ERROR, \"Decoded %d pixels, when line should be %d pixels\\n\", pixel_count % sub->rects[0]->w, sub->rects[0]->w); line_count++; } } if (pixel_count < sub->rects[0]->w * sub->rects[0]->h) { av_log(avctx, AV_LOG_ERROR, \"Insufficient RLE data for subtitle\\n\"); return -1; } av_dlog(avctx, \"Pixel Count = %d, Area = %d\\n\", pixel_count, sub->rects[0]->w * sub->rects[0]->h); return 0; }"} {"target": 0, "idx": 15172, "func": "static int scsi_disk_initfn(SCSIDevice *dev) { SCSIDriveKind kind; if (!dev->conf.bs) { kind = SCSI_HD; /* will die in scsi_initfn() */ } else { kind = bdrv_get_type_hint(dev->conf.bs) == BDRV_TYPE_CDROM ? SCSI_CD : SCSI_HD; } return scsi_initfn(dev, kind); }"} {"target": 0, "idx": 15176, "func": "static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) { BdrvTrackedRequest req; /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */ uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment); uint8_t *head_buf = NULL; uint8_t *tail_buf = NULL; QEMUIOVector local_qiov; bool use_local_qiov = false; int ret; if (!bs->drv) { return -ENOMEDIUM; } if (bs->read_only) { return -EACCES; } ret = bdrv_check_byte_request(bs, offset, bytes); if (ret < 0) { return ret; } /* throttling disk I/O */ if (bs->io_limits_enabled) { bdrv_io_limits_intercept(bs, bytes, true); } /* * Align write if necessary by performing a read-modify-write cycle. * Pad qiov with the read parts and be sure to have a tracked request not * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle. */ tracked_request_begin(&req, bs, offset, bytes, true); if (offset & (align - 1)) { QEMUIOVector head_qiov; struct iovec head_iov; mark_request_serialising(&req, align); wait_serialising_requests(&req); head_buf = qemu_blockalign(bs, align); head_iov = (struct iovec) { .iov_base = head_buf, .iov_len = align, }; qemu_iovec_init_external(&head_qiov, &head_iov, 1); BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_HEAD); ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align, align, &head_qiov, 0); if (ret < 0) { goto fail; } BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD); qemu_iovec_init(&local_qiov, qiov->niov + 2); qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); use_local_qiov = true; bytes += offset & (align - 1); offset = offset & ~(align - 1); } if ((offset + bytes) & (align - 1)) { QEMUIOVector tail_qiov; struct iovec tail_iov; size_t tail_bytes; bool waited; mark_request_serialising(&req, align); waited = wait_serialising_requests(&req); assert(!waited || !use_local_qiov); tail_buf = qemu_blockalign(bs, align); tail_iov = (struct iovec) { .iov_base = tail_buf, .iov_len = align, }; qemu_iovec_init_external(&tail_qiov, &tail_iov, 1); BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_TAIL); ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align, align, &tail_qiov, 0); if (ret < 0) { goto fail; } BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL); if (!use_local_qiov) { qemu_iovec_init(&local_qiov, qiov->niov + 1); qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); use_local_qiov = true; } tail_bytes = (offset + bytes) & (align - 1); qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes); bytes = ROUND_UP(bytes, align); } ret = bdrv_aligned_pwritev(bs, &req, offset, bytes, use_local_qiov ? &local_qiov : qiov, flags); fail: tracked_request_end(&req); if (use_local_qiov) { qemu_iovec_destroy(&local_qiov); } qemu_vfree(head_buf); qemu_vfree(tail_buf); return ret; }"} {"target": 0, "idx": 15179, "func": "static int cloop_read(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, int nb_sectors) { BDRVCloopState *s = bs->opaque; int i; for (i = 0; i < nb_sectors; i++) { uint32_t sector_offset_in_block = ((sector_num + i) % s->sectors_per_block), block_num = (sector_num + i) / s->sectors_per_block; if (cloop_read_block(bs, block_num) != 0) { return -1; } memcpy(buf + i * 512, s->uncompressed_block + sector_offset_in_block * 512, 512); } return 0; }"} {"target": 0, "idx": 15183, "func": "ram_addr_t ppc4xx_sdram_adjust(ram_addr_t ram_size, int nr_banks, target_phys_addr_t ram_bases[], target_phys_addr_t ram_sizes[], const unsigned int sdram_bank_sizes[]) { ram_addr_t size_left = ram_size; int i; int j; for (i = 0; i < nr_banks; i++) { for (j = 0; sdram_bank_sizes[j] != 0; j++) { unsigned int bank_size = sdram_bank_sizes[j]; if (bank_size <= size_left) { char name[32]; snprintf(name, sizeof(name), \"ppc4xx.sdram%d\", i); ram_bases[i] = qemu_ram_alloc(NULL, name, bank_size); ram_sizes[i] = bank_size; size_left -= bank_size; break; } } if (!size_left) { /* No need to use the remaining banks. */ break; } } ram_size -= size_left; if (size_left) printf(\"Truncating memory to %d MiB to fit SDRAM controller limits.\\n\", (int)(ram_size >> 20)); return ram_size; }"} {"target": 0, "idx": 15187, "func": "static inline int check_fit(tcg_target_long val, unsigned int bits) { return ((val << ((sizeof(tcg_target_long) * 8 - bits)) >> (sizeof(tcg_target_long) * 8 - bits)) == val); }"} {"target": 0, "idx": 15191, "func": "void *qemu_memalign(size_t alignment, size_t size) { return VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE); }"} {"target": 1, "idx": 15198, "func": "static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req) { VirtIODevice *vdev = (VirtIODevice *)s; uint32_t type; int r = 0; if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0, &type, sizeof(type)) < sizeof(type)) { virtio_scsi_bad_req(); return; } virtio_tswap32s(vdev, &type); if (type == VIRTIO_SCSI_T_TMF) { if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlTMFReq), sizeof(VirtIOSCSICtrlTMFResp)) < 0) { virtio_scsi_bad_req(); } else { r = virtio_scsi_do_tmf(s, req); } } else if (type == VIRTIO_SCSI_T_AN_QUERY || type == VIRTIO_SCSI_T_AN_SUBSCRIBE) { if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlANReq), sizeof(VirtIOSCSICtrlANResp)) < 0) { virtio_scsi_bad_req(); } else { req->resp.an.event_actual = 0; req->resp.an.response = VIRTIO_SCSI_S_OK; } } if (r == 0) { virtio_scsi_complete_req(req); } else { assert(r == -EINPROGRESS); } }"} {"target": 1, "idx": 15204, "func": "static inline uint32_t mipsdsp_sub32(int32_t a, int32_t b, CPUMIPSState *env) { int32_t temp; temp = a - b; if (MIPSDSP_OVERFLOW(a, -b, temp, 0x80000000)) { set_DSPControl_overflow_flag(1, 20, env); } return temp; }"} {"target": 1, "idx": 15217, "func": "static inline abi_long host_to_target_timespec(abi_ulong target_addr, struct timespec *host_ts) { struct target_timespec *target_ts; if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) return -TARGET_EFAULT; target_ts->tv_sec = tswapal(host_ts->tv_sec); target_ts->tv_nsec = tswapal(host_ts->tv_nsec); unlock_user_struct(target_ts, target_addr, 1); return 0; }"} {"target": 0, "idx": 15219, "func": "static int mpegvideo_probe(AVProbeData *p) { uint32_t code= -1; int pic=0, seq=0, slice=0, pspack=0, vpes=0, apes=0, res=0; int i; for(i=0; ibuf_size; i++){ code = (code<<8) + p->buf[i]; if ((code & 0xffffff00) == 0x100) { switch(code){ case SEQ_START_CODE: seq++; break; case PICTURE_START_CODE: pic++; break; case PACK_START_CODE: pspack++; break; case 0x1b6: res++; break; } if (code >= SLICE_START_CODE && code <= 0x1af) slice++; if ((code & 0x1f0) == VIDEO_ID) vpes++; else if((code & 0x1e0) == AUDIO_ID) apes++; } } if(seq && seq*9<=pic*10 && pic*9<=slice*10 && !pspack && !apes && !res) { if(vpes) return AVPROBE_SCORE_MAX/8; else return pic>1 ? AVPROBE_SCORE_MAX/2+1 : AVPROBE_SCORE_MAX/4; // +1 for .mpg } return 0; }"} {"target": 0, "idx": 15222, "func": "av_cold void ff_yadif_init_x86(YADIFContext *yadif) { int cpu_flags = av_get_cpu_flags(); if (HAVE_MMX && cpu_flags & AV_CPU_FLAG_MMX) yadif->filter_line = yadif_filter_line_mmx; if (HAVE_SSE && cpu_flags & AV_CPU_FLAG_SSE2) yadif->filter_line = yadif_filter_line_sse2; if (HAVE_SSSE3 && cpu_flags & AV_CPU_FLAG_SSSE3) yadif->filter_line = yadif_filter_line_ssse3; }"} {"target": 1, "idx": 15225, "func": "void av_frame_unref(AVFrame *frame) { int i; wipe_side_data(frame); for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++) av_buffer_unref(&frame->buf[i]); for (i = 0; i < frame->nb_extended_buf; i++) av_buffer_unref(&frame->extended_buf[i]); av_freep(&frame->extended_buf); av_dict_free(&frame->metadata); av_buffer_unref(&frame->qp_table_buf); get_frame_defaults(frame); }"} {"target": 1, "idx": 15226, "func": "static bool load_asl(GArray *sdts, AcpiSdtTable *sdt) { AcpiSdtTable *temp; GError *error = NULL; GString *command_line = g_string_new(iasl); gint fd; gchar *out, *out_err; gboolean ret; int i; fd = g_file_open_tmp(\"asl-XXXXXX.dsl\", &sdt->asl_file, &error); g_assert_no_error(error); close(fd); /* build command line */ g_string_append_printf(command_line, \" -p %s \", sdt->asl_file); if (compare_signature(sdt, \"DSDT\") || compare_signature(sdt, \"SSDT\")) { for (i = 0; i < sdts->len; ++i) { temp = &g_array_index(sdts, AcpiSdtTable, i); if (compare_signature(temp, \"DSDT\") || compare_signature(temp, \"SSDT\")) { g_string_append_printf(command_line, \"-e %s \", temp->aml_file); } } } g_string_append_printf(command_line, \"-d %s\", sdt->aml_file); /* pass 'out' and 'out_err' in order to be redirected */ ret = g_spawn_command_line_sync(command_line->str, &out, &out_err, NULL, &error); g_assert_no_error(error); if (ret) { ret = g_file_get_contents(sdt->asl_file, (gchar **)&sdt->asl, &sdt->asl_len, &error); g_assert(ret); g_assert_no_error(error); g_assert(sdt->asl_len); } g_free(out); g_free(out_err); g_string_free(command_line, true); return !ret; }"} {"target": 1, "idx": 15227, "func": "int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, int mmu_idx) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; ppc_slb_t *slb; unsigned apshift; hwaddr pte_offset; ppc_hash_pte64_t pte; int pp_prot, amr_prot, prot; uint64_t new_pte1, dsisr; const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC}; hwaddr raddr; assert((rwx == 0) || (rwx == 1) || (rwx == 2)); /* 1. Handle real mode accesses */ if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { /* Translation is off */ /* In real mode the top 4 effective address bits are ignored */ raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, TARGET_PAGE_SIZE); return 0; } /* 2. Translation is on, so look up the SLB */ slb = slb_lookup(cpu, eaddr); if (!slb) { if (rwx == 2) { cs->exception_index = POWERPC_EXCP_ISEG; env->error_code = 0; } else { cs->exception_index = POWERPC_EXCP_DSEG; env->error_code = 0; env->spr[SPR_DAR] = eaddr; } return 1; } /* 3. Check for segment level no-execute violation */ if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) { ppc_hash64_set_isi(cs, env, 0x10000000); return 1; } /* 4. Locate the PTE in the hash table */ pte_offset = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte); if (pte_offset == -1) { dsisr = 0x40000000; if (rwx == 2) { ppc_hash64_set_isi(cs, env, dsisr); } else { if (rwx == 1) { dsisr |= 0x02000000; } ppc_hash64_set_dsi(cs, env, eaddr, dsisr); } return 1; } qemu_log_mask(CPU_LOG_MMU, \"found PTE at offset %08\" HWADDR_PRIx \"\\n\", pte_offset); /* Validate page size encoding */ apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1); if (!apshift) { error_report(\"Bad page size encoding in HPTE 0x%\"PRIx64\" - 0x%\"PRIx64 \" @ 0x%\"HWADDR_PRIx, pte.pte0, pte.pte1, pte_offset); /* Not entirely sure what the right action here, but machine * check seems reasonable */ cs->exception_index = POWERPC_EXCP_MCHECK; env->error_code = 0; return 1; } /* 5. Check access permissions */ pp_prot = ppc_hash64_pte_prot(cpu, slb, pte); amr_prot = ppc_hash64_amr_prot(cpu, pte); prot = pp_prot & amr_prot; if ((need_prot[rwx] & ~prot) != 0) { /* Access right violation */ qemu_log_mask(CPU_LOG_MMU, \"PTE access rejected\\n\"); if (rwx == 2) { ppc_hash64_set_isi(cs, env, 0x08000000); } else { dsisr = 0; if (need_prot[rwx] & ~pp_prot) { dsisr |= 0x08000000; } if (rwx == 1) { dsisr |= 0x02000000; } if (need_prot[rwx] & ~amr_prot) { dsisr |= 0x00200000; } ppc_hash64_set_dsi(cs, env, eaddr, dsisr); } return 1; } qemu_log_mask(CPU_LOG_MMU, \"PTE access granted !\\n\"); /* 6. Update PTE referenced and changed bits if necessary */ new_pte1 = pte.pte1 | HPTE64_R_R; /* set referenced bit */ if (rwx == 1) { new_pte1 |= HPTE64_R_C; /* set changed (dirty) bit */ } else { /* Treat the page as read-only for now, so that a later write * will pass through this function again to set the C bit */ prot &= ~PAGE_WRITE; } if (new_pte1 != pte.pte1) { ppc_hash64_store_hpte(cpu, pte_offset / HASH_PTE_SIZE_64, pte.pte0, new_pte1); } /* 7. Determine the real address from the PTE */ raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr); tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, prot, mmu_idx, 1ULL << apshift); return 0; }"} {"target": 1, "idx": 15245, "func": "MemoryRegionSection memory_region_find(MemoryRegion *address_space, target_phys_addr_t addr, uint64_t size) { AddressSpace *as = memory_region_to_address_space(address_space); AddrRange range = addrrange_make(int128_make64(addr), int128_make64(size)); FlatRange *fr = address_space_lookup(as, range); MemoryRegionSection ret = { .mr = NULL, .size = 0 }; if (!fr) { return ret; } while (fr > as->current_map.ranges && addrrange_intersects(fr[-1].addr, range)) { --fr; } ret.mr = fr->mr; range = addrrange_intersection(range, fr->addr); ret.offset_within_region = fr->offset_in_region; ret.offset_within_region += int128_get64(int128_sub(range.start, fr->addr.start)); ret.size = int128_get64(range.size); ret.offset_within_address_space = int128_get64(range.start); ret.readonly = fr->readonly; return ret; }"} {"target": 1, "idx": 15258, "func": "static int decode_ics(AACContext *ac, SingleChannelElement *sce, GetBitContext *gb, int common_window, int scale_flag) { Pulse pulse; TemporalNoiseShaping *tns = &sce->tns; IndividualChannelStream *ics = &sce->ics; INTFLOAT *out = sce->coeffs; int global_gain, eld_syntax, er_syntax, pulse_present = 0; int ret; eld_syntax = ac->oc[1].m4ac.object_type == AOT_ER_AAC_ELD; er_syntax = ac->oc[1].m4ac.object_type == AOT_ER_AAC_LC || ac->oc[1].m4ac.object_type == AOT_ER_AAC_LTP || ac->oc[1].m4ac.object_type == AOT_ER_AAC_LD || ac->oc[1].m4ac.object_type == AOT_ER_AAC_ELD; /* This assignment is to silence a GCC warning about the variable being used * uninitialized when in fact it always is. */ pulse.num_pulse = 0; global_gain = get_bits(gb, 8); if (!common_window && !scale_flag) { if (decode_ics_info(ac, ics, gb) < 0) return AVERROR_INVALIDDATA; } if ((ret = decode_band_types(ac, sce->band_type, sce->band_type_run_end, gb, ics)) < 0) return ret; if ((ret = decode_scalefactors(ac, sce->sf, gb, global_gain, ics, sce->band_type, sce->band_type_run_end)) < 0) return ret; pulse_present = 0; if (!scale_flag) { if (!eld_syntax && (pulse_present = get_bits1(gb))) { if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) { av_log(ac->avctx, AV_LOG_ERROR, \"Pulse tool not allowed in eight short sequence.\\n\"); return AVERROR_INVALIDDATA; } if (decode_pulses(&pulse, gb, ics->swb_offset, ics->num_swb)) { av_log(ac->avctx, AV_LOG_ERROR, \"Pulse data corrupt or invalid.\\n\"); return AVERROR_INVALIDDATA; } } tns->present = get_bits1(gb); if (tns->present && !er_syntax) if (decode_tns(ac, tns, gb, ics) < 0) return AVERROR_INVALIDDATA; if (!eld_syntax && get_bits1(gb)) { avpriv_request_sample(ac->avctx, \"SSR\"); return AVERROR_PATCHWELCOME; } // I see no textual basis in the spec for this occurring after SSR gain // control, but this is what both reference and real implmentations do if (tns->present && er_syntax) if (decode_tns(ac, tns, gb, ics) < 0) return AVERROR_INVALIDDATA; } if (decode_spectrum_and_dequant(ac, out, gb, sce->sf, pulse_present, &pulse, ics, sce->band_type) < 0) return AVERROR_INVALIDDATA; if (ac->oc[1].m4ac.object_type == AOT_AAC_MAIN && !common_window) apply_prediction(ac, sce); return 0; }"} {"target": 1, "idx": 15262, "func": "static int decode_i_block(FourXContext *f, DCTELEM *block){ int code, i, j, level, val; /* DC coef */ val = get_vlc2(&f->pre_gb, f->pre_vlc.table, ACDC_VLC_BITS, 3); if (val>>4){ av_log(f->avctx, AV_LOG_ERROR, \"error dc run != 0\\n\"); if(val) val = get_xbits(&f->gb, val); val = val * dequant_table[0] + f->last_dc; f->last_dc = block[0] = val; /* AC coefs */ i = 1; for(;;) { code = get_vlc2(&f->pre_gb, f->pre_vlc.table, ACDC_VLC_BITS, 3); /* EOB */ if (code == 0) break; if (code == 0xf0) { i += 16; } else { level = get_xbits(&f->gb, code & 0xf); i += code >> 4; if (i >= 64) { av_log(f->avctx, AV_LOG_ERROR, \"run %d oveflow\\n\", i); return 0; j= ff_zigzag_direct[i]; block[j] = level * dequant_table[j]; i++; if (i >= 64) break; return 0;"} {"target": 1, "idx": 15264, "func": "uint64_t qpci_io_readq(QPCIDevice *dev, void *data) { uintptr_t addr = (uintptr_t)data; if (addr < QPCI_PIO_LIMIT) { return dev->bus->pio_readq(dev->bus, addr); } else { uint64_t val; dev->bus->memread(dev->bus, addr, &val, sizeof(val)); return le64_to_cpu(val); } }"} {"target": 1, "idx": 15279, "func": "static int gxf_write_header(AVFormatContext *s) { AVIOContext *pb = s->pb; GXFContext *gxf = s->priv_data; GXFStreamContext *vsc = NULL; uint8_t tracks[255] = {0}; int i, media_info = 0; AVDictionaryEntry *tcr = av_dict_get(s->metadata, \"timecode\", NULL, 0); if (!pb->seekable) { av_log(s, AV_LOG_ERROR, \"gxf muxer does not support streamed output, patch welcome\\n\"); return -1; } gxf->flags |= 0x00080000; /* material is simple clip */ for (i = 0; i < s->nb_streams; ++i) { AVStream *st = s->streams[i]; GXFStreamContext *sc = av_mallocz(sizeof(*sc)); if (!sc) return AVERROR(ENOMEM); st->priv_data = sc; sc->media_type = ff_codec_get_tag(gxf_media_types, st->codec->codec_id); if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { if (st->codec->codec_id != AV_CODEC_ID_PCM_S16LE) { av_log(s, AV_LOG_ERROR, \"only 16 BIT PCM LE allowed for now\\n\"); return -1; } if (st->codec->sample_rate != 48000) { av_log(s, AV_LOG_ERROR, \"only 48000hz sampling rate is allowed\\n\"); return -1; } if (st->codec->channels != 1) { av_log(s, AV_LOG_ERROR, \"only mono tracks are allowed\\n\"); return -1; } sc->track_type = 2; sc->sample_rate = st->codec->sample_rate; avpriv_set_pts_info(st, 64, 1, sc->sample_rate); sc->sample_size = 16; sc->frame_rate_index = -2; sc->lines_index = -2; sc->fields = -2; gxf->audio_tracks++; gxf->flags |= 0x04000000; /* audio is 16 bit pcm */ media_info = 'A'; } else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { if (i != 0) { av_log(s, AV_LOG_ERROR, \"video stream must be the first track\\n\"); return -1; } /* FIXME check from time_base ? */ if (st->codec->height == 480 || st->codec->height == 512) { /* NTSC or NTSC+VBI */ sc->frame_rate_index = 5; sc->sample_rate = 60; gxf->flags |= 0x00000080; gxf->time_base = (AVRational){ 1001, 60000 }; } else if (st->codec->height == 576 || st->codec->height == 608) { /* PAL or PAL+VBI */ sc->frame_rate_index = 6; sc->media_type++; sc->sample_rate = 50; gxf->flags |= 0x00000040; gxf->time_base = (AVRational){ 1, 50 }; } else { av_log(s, AV_LOG_ERROR, \"unsupported video resolution, \" \"gxf muxer only accepts PAL or NTSC resolutions currently\\n\"); return -1; } if (!tcr) tcr = av_dict_get(st->metadata, \"timecode\", NULL, 0); avpriv_set_pts_info(st, 64, gxf->time_base.num, gxf->time_base.den); if (gxf_find_lines_index(st) < 0) sc->lines_index = -1; sc->sample_size = st->codec->bit_rate; sc->fields = 2; /* interlaced */ vsc = sc; switch (st->codec->codec_id) { case AV_CODEC_ID_MJPEG: sc->track_type = 1; gxf->flags |= 0x00004000; media_info = 'J'; break; case AV_CODEC_ID_MPEG1VIDEO: sc->track_type = 9; gxf->mpeg_tracks++; media_info = 'L'; break; case AV_CODEC_ID_MPEG2VIDEO: sc->first_gop_closed = -1; sc->track_type = 4; gxf->mpeg_tracks++; gxf->flags |= 0x00008000; media_info = 'M'; break; case AV_CODEC_ID_DVVIDEO: if (st->codec->pix_fmt == AV_PIX_FMT_YUV422P) { sc->media_type += 2; sc->track_type = 6; gxf->flags |= 0x00002000; media_info = 'E'; } else { sc->track_type = 5; gxf->flags |= 0x00001000; media_info = 'D'; } break; default: av_log(s, AV_LOG_ERROR, \"video codec not supported\\n\"); return -1; } } /* FIXME first 10 audio tracks are 0 to 9 next 22 are A to V */ sc->media_info = media_info<<8 | ('0'+tracks[media_info]++); sc->order = s->nb_streams - st->index; } if (ff_audio_interleave_init(s, GXF_samples_per_frame, (AVRational){ 1, 48000 }) < 0) return -1; if (tcr) gxf_init_timecode(s, &gxf->tc, tcr->value, vsc->fields); gxf_init_timecode_track(&gxf->timecode_track, vsc); gxf->flags |= 0x200000; // time code track is non-drop frame gxf_write_map_packet(s, 0); gxf_write_flt_packet(s); gxf_write_umf_packet(s); gxf->packet_count = 3; avio_flush(pb); return 0; }"} {"target": 1, "idx": 15296, "func": "static int decode_nal_sei_prefix(GetBitContext *gb, HEVCSEIContext *s, const HEVCParamSets *ps, int type, int size, void *logctx) { switch (type) { case 256: // Mismatched value from HM 8.1 return decode_nal_sei_decoded_picture_hash(&s->picture_hash, gb); case HEVC_SEI_TYPE_FRAME_PACKING: return decode_nal_sei_frame_packing_arrangement(&s->frame_packing, gb); case HEVC_SEI_TYPE_DISPLAY_ORIENTATION: return decode_nal_sei_display_orientation(&s->display_orientation, gb); case HEVC_SEI_TYPE_PICTURE_TIMING: { int ret = decode_pic_timing(s, gb, ps, logctx); av_log(logctx, AV_LOG_DEBUG, \"Skipped PREFIX SEI %d\\n\", type); skip_bits(gb, 8 * size); return ret; } case HEVC_SEI_TYPE_MASTERING_DISPLAY_INFO: return decode_nal_sei_mastering_display_info(&s->mastering_display, gb); case HEVC_SEI_TYPE_CONTENT_LIGHT_LEVEL_INFO: return decode_nal_sei_content_light_info(&s->content_light, gb); case HEVC_SEI_TYPE_ACTIVE_PARAMETER_SETS: active_parameter_sets(s, gb, logctx); av_log(logctx, AV_LOG_DEBUG, \"Skipped PREFIX SEI %d\\n\", type); return 0; case HEVC_SEI_TYPE_USER_DATA_REGISTERED_ITU_T_T35: return decode_nal_sei_user_data_registered_itu_t_t35(s, gb, size); default: av_log(logctx, AV_LOG_DEBUG, \"Skipped PREFIX SEI %d\\n\", type); skip_bits_long(gb, 8 * size); return 0; } }"} {"target": 1, "idx": 15311, "func": "static int get_segment (CPUState *env, mmu_ctx_t *ctx, target_ulong eaddr, int rw, int type) { target_phys_addr_t sdr, hash, mask, sdr_mask; target_ulong sr, vsid, vsid_mask, pgidx, page_mask; #if defined(TARGET_PPC64) int attr; #endif int ds, nx, vsid_sh, sdr_sh; int ret, ret2; #if defined(TARGET_PPC64) if (env->mmu_model == POWERPC_MMU_64B || env->mmu_model == POWERPC_MMU_64BRIDGE) { ret = slb_lookup(env, eaddr, &vsid, &page_mask, &attr); if (ret < 0) return ret; ctx->key = ((attr & 0x40) && msr_pr == 1) || ((attr & 0x80) && msr_pr == 0) ? 1 : 0; ds = 0; nx = attr & 0x20 ? 1 : 0; vsid_mask = 0x00003FFFFFFFFF80ULL; vsid_sh = 7; sdr_sh = 18; sdr_mask = 0x3FF80; } else #endif /* defined(TARGET_PPC64) */ { sr = env->sr[eaddr >> 28]; page_mask = 0x0FFFFFFF; ctx->key = (((sr & 0x20000000) && msr_pr == 1) || ((sr & 0x40000000) && msr_pr == 0)) ? 1 : 0; ds = sr & 0x80000000 ? 1 : 0; nx = sr & 0x10000000 ? 1 : 0; vsid = sr & 0x00FFFFFF; vsid_mask = 0x01FFFFC0; vsid_sh = 6; sdr_sh = 16; sdr_mask = 0xFFC0; #if defined (DEBUG_MMU) if (loglevel != 0) { fprintf(logfile, \"Check segment v=0x\" ADDRX \" %d 0x\" ADDRX \" nip=0x\" ADDRX \" lr=0x\" ADDRX \" ir=%d dr=%d pr=%d %d t=%d\\n\", eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, msr_ir, msr_dr, msr_pr, rw, type); } if (!ds && loglevel != 0) { fprintf(logfile, \"pte segment: key=%d n=0x\" ADDRX \"\\n\", ctx->key, sr & 0x10000000); } #endif } ret = -1; if (!ds) { /* Check if instruction fetch is allowed, if needed */ if (type != ACCESS_CODE || nx == 0) { /* Page address translation */ pgidx = (eaddr & page_mask) >> TARGET_PAGE_BITS; hash = ((vsid ^ pgidx) << vsid_sh) & vsid_mask; /* Primary table address */ sdr = env->sdr1; mask = ((sdr & 0x000001FF) << sdr_sh) | sdr_mask; ctx->pg_addr[0] = get_pgaddr(sdr, sdr_sh, hash, mask); /* Secondary table address */ hash = (~hash) & vsid_mask; ctx->pg_addr[1] = get_pgaddr(sdr, sdr_sh, hash, mask); #if defined(TARGET_PPC64) if (env->mmu_model == POWERPC_MMU_64B || env->mmu_model == POWERPC_MMU_64BRIDGE) { /* Only 5 bits of the page index are used in the AVPN */ ctx->ptem = (vsid << 12) | ((pgidx >> 4) & 0x0F80); } else #endif { ctx->ptem = (vsid << 7) | (pgidx >> 10); } /* Initialize real address with an invalid value */ ctx->raddr = (target_ulong)-1; if (unlikely(env->mmu_model == POWERPC_MMU_SOFT_6xx || env->mmu_model == POWERPC_MMU_SOFT_74xx)) { /* Software TLB search */ ret = ppc6xx_tlb_check(env, ctx, eaddr, rw, type); } else { #if defined (DEBUG_MMU) if (loglevel != 0) { fprintf(logfile, \"0 sdr1=0x\" PADDRX \" vsid=0x%06x \" \"api=0x%04x hash=0x%07x pg_addr=0x\" PADDRX \"\\n\", sdr, (uint32_t)vsid, (uint32_t)pgidx, (uint32_t)hash, ctx->pg_addr[0]); } #endif /* Primary table lookup */ ret = find_pte(env, ctx, 0, rw); if (ret < 0) { /* Secondary table lookup */ #if defined (DEBUG_MMU) if (eaddr != 0xEFFFFFFF && loglevel != 0) { fprintf(logfile, \"1 sdr1=0x\" PADDRX \" vsid=0x%06x api=0x%04x \" \"hash=0x%05x pg_addr=0x\" PADDRX \"\\n\", sdr, (uint32_t)vsid, (uint32_t)pgidx, (uint32_t)hash, ctx->pg_addr[1]); } #endif ret2 = find_pte(env, ctx, 1, rw); if (ret2 != -1) ret = ret2; } } } else { #if defined (DEBUG_MMU) if (loglevel != 0) fprintf(logfile, \"No access allowed\\n\"); #endif ret = -3; } } else { #if defined (DEBUG_MMU) if (loglevel != 0) fprintf(logfile, \"direct store...\\n\"); #endif /* Direct-store segment : absolutely *BUGGY* for now */ switch (type) { case ACCESS_INT: /* Integer load/store : only access allowed */ break; case ACCESS_CODE: /* No code fetch is allowed in direct-store areas */ return -4; case ACCESS_FLOAT: /* Floating point load/store */ return -4; case ACCESS_RES: /* lwarx, ldarx or srwcx. */ return -4; case ACCESS_CACHE: /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */ /* Should make the instruction do no-op. * As it already do no-op, it's quite easy :-) */ ctx->raddr = eaddr; return 0; case ACCESS_EXT: /* eciwx or ecowx */ return -4; default: if (logfile) { fprintf(logfile, \"ERROR: instruction should not need \" \"address translation\\n\"); } return -4; } if ((rw == 1 || ctx->key != 1) && (rw == 0 || ctx->key != 0)) { ctx->raddr = eaddr; ret = 2; } else { ret = -2; } } return ret; }"} {"target": 1, "idx": 15312, "func": "static void handle_satn_stop(ESPState *s) { if (s->dma && !s->dma_enabled) { s->dma_cb = handle_satn_stop; return; } s->cmdlen = get_cmd(s, s->cmdbuf); if (s->cmdlen) { trace_esp_handle_satn_stop(s->cmdlen); s->do_cmd = 1; s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; s->rregs[ESP_RINTR] = INTR_BS | INTR_FC; s->rregs[ESP_RSEQ] = SEQ_CD; esp_raise_irq(s); } }"} {"target": 1, "idx": 15319, "func": "static int xv_write_trailer(AVFormatContext *s) { XVContext *xv = s->priv_data; XShmDetach(xv->display, &xv->yuv_shminfo); shmdt(xv->yuv_image->data); XFree(xv->yuv_image); XCloseDisplay(xv->display); return 0; }"} {"target": 0, "idx": 15330, "func": "void ff_biweight_h264_pixels16_8_msa(uint8_t *dst, uint8_t *src, int stride, int height, int log2_denom, int weight_dst, int weight_src, int offset) { avc_biwgt_16width_msa(src, stride, dst, stride, height, log2_denom, weight_src, weight_dst, offset); }"} {"target": 0, "idx": 15337, "func": "static void vt82c686b_write_config(PCIDevice * d, uint32_t address, uint32_t val, int len) { VT82C686BState *vt686 = DO_UPCAST(VT82C686BState, dev, d); DPRINTF(\"vt82c686b_write_config address 0x%x val 0x%x len 0x%x \\n\", address, val, len); pci_default_write_config(d, address, val, len); if (address == 0x85) { /* enable or disable super IO configure */ if (val & 0x2) { /* floppy also uses 0x3f0 and 0x3f1. * But we do not emulate flopy,so just set it here. */ isa_unassign_ioport(0x3f0, 2); register_ioport_read(0x3f0, 2, 1, superio_ioport_readb, &vt686->superio_conf); register_ioport_write(0x3f0, 2, 1, superio_ioport_writeb, &vt686->superio_conf); } else { isa_unassign_ioport(0x3f0, 2); } } }"} {"target": 0, "idx": 15356, "func": "e1000_receive(VLANClientState *nc, const uint8_t *buf, size_t size) { E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque; struct e1000_rx_desc desc; target_phys_addr_t base; unsigned int n, rdt; uint32_t rdh_start; uint16_t vlan_special = 0; uint8_t vlan_status = 0, vlan_offset = 0; if (!(s->mac_reg[RCTL] & E1000_RCTL_EN)) return -1; if (size > s->rxbuf_size) { DBGOUT(RX, \"packet too large for buffers (%lu > %d)\\n\", (unsigned long)size, s->rxbuf_size); return -1; } if (!receive_filter(s, buf, size)) return size; if (vlan_enabled(s) && is_vlan_packet(s, buf)) { vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(buf + 14))); memmove((uint8_t *)buf + 4, buf, 12); vlan_status = E1000_RXD_STAT_VP; vlan_offset = 4; size -= 4; } rdh_start = s->mac_reg[RDH]; size += 4; // for the header do { if (s->mac_reg[RDH] == s->mac_reg[RDT] && s->check_rxov) { set_ics(s, 0, E1000_ICS_RXO); return -1; } base = ((uint64_t)s->mac_reg[RDBAH] << 32) + s->mac_reg[RDBAL] + sizeof(desc) * s->mac_reg[RDH]; cpu_physical_memory_read(base, (void *)&desc, sizeof(desc)); desc.special = vlan_special; desc.status |= (vlan_status | E1000_RXD_STAT_DD); if (desc.buffer_addr) { cpu_physical_memory_write(le64_to_cpu(desc.buffer_addr), (void *)(buf + vlan_offset), size); desc.length = cpu_to_le16(size); desc.status |= E1000_RXD_STAT_EOP|E1000_RXD_STAT_IXSM; } else // as per intel docs; skip descriptors with null buf addr DBGOUT(RX, \"Null RX descriptor!!\\n\"); cpu_physical_memory_write(base, (void *)&desc, sizeof(desc)); if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN]) s->mac_reg[RDH] = 0; s->check_rxov = 1; /* see comment in start_xmit; same here */ if (s->mac_reg[RDH] == rdh_start) { DBGOUT(RXERR, \"RDH wraparound @%x, RDT %x, RDLEN %x\\n\", rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]); set_ics(s, 0, E1000_ICS_RXO); return -1; } } while (desc.buffer_addr == 0); s->mac_reg[GPRC]++; s->mac_reg[TPR]++; n = s->mac_reg[TORL]; if ((s->mac_reg[TORL] += size) < n) s->mac_reg[TORH]++; n = E1000_ICS_RXT0; if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH]) rdt += s->mac_reg[RDLEN] / sizeof(desc); if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >> s->rxbuf_min_shift) n |= E1000_ICS_RXDMT0; set_ics(s, 0, n); return size; }"} {"target": 0, "idx": 15358, "func": "void stl_le_phys(target_phys_addr_t addr, uint32_t val) { stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN); }"} {"target": 1, "idx": 15397, "func": "static void kvm_mce_broadcast_rest(CPUState *env) { CPUState *cenv; int family, model, cpuver = env->cpuid_version; family = (cpuver >> 8) & 0xf; model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0xf); /* Broadcast MCA signal for processor version 06H_EH and above */ if ((family == 6 && model >= 14) || family > 6) { for (cenv = first_cpu; cenv != NULL; cenv = cenv->next_cpu) { if (cenv == env) { continue; } kvm_inject_x86_mce(cenv, 1, MCI_STATUS_VAL | MCI_STATUS_UC, MCG_STATUS_MCIP | MCG_STATUS_RIPV, 0, 0, 1); } } }"} {"target": 0, "idx": 15403, "func": "void ff_put_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h) { if (h&3) { ff_put_dirac_pixels32_c(dst, src, stride, h); } else { ff_put_pixels16_sse2(dst , src[0] , stride, h); ff_put_pixels16_sse2(dst+16, src[0]+16, stride, h); } }"} {"target": 0, "idx": 15421, "func": "void bdrv_guess_geometry(BlockDriverState *bs, int *pcyls, int *pheads, int *psecs) { int translation, lba_detected = 0; int cylinders, heads, secs; uint64_t nb_sectors; /* if a geometry hint is available, use it */ bdrv_get_geometry(bs, &nb_sectors); bdrv_get_geometry_hint(bs, &cylinders, &heads, &secs); translation = bdrv_get_translation_hint(bs); if (cylinders != 0) { *pcyls = cylinders; *pheads = heads; *psecs = secs; } else { if (guess_disk_lchs(bs, &cylinders, &heads, &secs) == 0) { if (heads > 16) { /* if heads > 16, it means that a BIOS LBA translation was active, so the default hardware geometry is OK */ lba_detected = 1; goto default_geometry; } else { *pcyls = cylinders; *pheads = heads; *psecs = secs; /* disable any translation to be in sync with the logical geometry */ if (translation == BIOS_ATA_TRANSLATION_AUTO) { bdrv_set_translation_hint(bs, BIOS_ATA_TRANSLATION_NONE); } } } else { default_geometry: /* if no geometry, use a standard physical disk geometry */ cylinders = nb_sectors / (16 * 63); if (cylinders > 16383) cylinders = 16383; else if (cylinders < 2) cylinders = 2; *pcyls = cylinders; *pheads = 16; *psecs = 63; if ((lba_detected == 1) && (translation == BIOS_ATA_TRANSLATION_AUTO)) { if ((*pcyls * *pheads) <= 131072) { bdrv_set_translation_hint(bs, BIOS_ATA_TRANSLATION_LARGE); } else { bdrv_set_translation_hint(bs, BIOS_ATA_TRANSLATION_LBA); } } } bdrv_set_geometry_hint(bs, *pcyls, *pheads, *psecs); } }"} {"target": 0, "idx": 15445, "func": "static void glib_pollfds_fill(uint32_t *cur_timeout) { GMainContext *context = g_main_context_default(); int timeout = 0; int n; g_main_context_prepare(context, &max_priority); glib_pollfds_idx = gpollfds->len; n = glib_n_poll_fds; do { GPollFD *pfds; glib_n_poll_fds = n; g_array_set_size(gpollfds, glib_pollfds_idx + glib_n_poll_fds); pfds = &g_array_index(gpollfds, GPollFD, glib_pollfds_idx); n = g_main_context_query(context, max_priority, &timeout, pfds, glib_n_poll_fds); } while (n != glib_n_poll_fds); if (timeout >= 0 && timeout < *cur_timeout) { *cur_timeout = timeout; } }"} {"target": 0, "idx": 15458, "func": "long do_sigreturn(CPUMBState *env) { struct target_signal_frame *frame; abi_ulong frame_addr; target_sigset_t target_set; sigset_t set; int i; frame_addr = env->regs[R_SP]; /* Make sure the guest isn't playing games. */ if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) goto badframe; /* Restore blocked signals */ if (__get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask)) goto badframe; for(i = 1; i < TARGET_NSIG_WORDS; i++) { if (__get_user(target_set.sig[i], &frame->extramask[i - 1])) goto badframe; } target_to_host_sigset_internal(&set, &target_set); sigprocmask(SIG_SETMASK, &set, NULL); restore_sigcontext(&frame->uc.tuc_mcontext, env); /* We got here through a sigreturn syscall, our path back is via an rtb insn so setup r14 for that. */ env->regs[14] = env->sregs[SR_PC]; unlock_user_struct(frame, frame_addr, 0); return env->regs[10]; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); }"} {"target": 0, "idx": 15463, "func": "static void vc1_put_signed_blocks_clamped(VC1Context *v) { MpegEncContext *s = &v->s; int topleft_mb_pos, top_mb_pos; int stride_y, fieldtx; int v_dist; /* The put pixels loop is always one MB row behind the decoding loop, * because we can only put pixels when overlap filtering is done, and * for filtering of the bottom edge of a MB, we need the next MB row * present as well. * Within the row, the put pixels loop is also one MB col behind the * decoding loop. The reason for this is again, because for filtering * of the right MB edge, we need the next MB present. */ if (!s->first_slice_line) { if (s->mb_x) { topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1; fieldtx = v->fieldtx_plane[topleft_mb_pos]; stride_y = s->linesize << fieldtx; v_dist = (16 - fieldtx) >> (fieldtx == 0); s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0], s->dest[0] - 16 * s->linesize - 16, stride_y); s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][1], s->dest[0] - 16 * s->linesize - 8, stride_y); s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][2], s->dest[0] - v_dist * s->linesize - 16, stride_y); s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][3], s->dest[0] - v_dist * s->linesize - 8, stride_y); s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][4], s->dest[1] - 8 * s->uvlinesize - 8, s->uvlinesize); s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][5], s->dest[2] - 8 * s->uvlinesize - 8, s->uvlinesize); } if (s->mb_x == s->mb_width - 1) { top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x; fieldtx = v->fieldtx_plane[top_mb_pos]; stride_y = s->linesize << fieldtx; v_dist = fieldtx ? 15 : 8; s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0], s->dest[0] - 16 * s->linesize, stride_y); s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][1], s->dest[0] - 16 * s->linesize + 8, stride_y); s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][2], s->dest[0] - v_dist * s->linesize, stride_y); s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][3], s->dest[0] - v_dist * s->linesize + 8, stride_y); s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][4], s->dest[1] - 8 * s->uvlinesize, s->uvlinesize); s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][5], s->dest[2] - 8 * s->uvlinesize, s->uvlinesize); } } #define inc_blk_idx(idx) do { \\ idx++; \\ if (idx >= v->n_allocated_blks) \\ idx = 0; \\ } while (0) inc_blk_idx(v->topleft_blk_idx); inc_blk_idx(v->top_blk_idx); inc_blk_idx(v->left_blk_idx); inc_blk_idx(v->cur_blk_idx); }"} {"target": 0, "idx": 15472, "func": "static void scsi_device_destroy(SCSIDevice *s) { SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); if (sc->destroy) { sc->destroy(s); } }"} {"target": 0, "idx": 15476, "func": "int ff_h264_get_profile(SPS *sps) { int profile = sps->profile_idc; switch (sps->profile_idc) { case FF_PROFILE_H264_BASELINE: // constraint_set1_flag set to 1 profile |= (sps->constraint_set_flags & 1 << 1) ? FF_PROFILE_H264_CONSTRAINED : 0; break; case FF_PROFILE_H264_HIGH_10: case FF_PROFILE_H264_HIGH_422: case FF_PROFILE_H264_HIGH_444_PREDICTIVE: // constraint_set3_flag set to 1 profile |= (sps->constraint_set_flags & 1 << 3) ? FF_PROFILE_H264_INTRA : 0; break; } return profile; }"} {"target": 0, "idx": 15489, "func": "void net_tx_pkt_setup_vlan_header(struct NetTxPkt *pkt, uint16_t vlan) { bool is_new; assert(pkt); eth_setup_vlan_headers(pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base, vlan, &is_new); /* update l2hdrlen */ if (is_new) { pkt->hdr_len += sizeof(struct vlan_header); pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len += sizeof(struct vlan_header); } }"} {"target": 1, "idx": 15499, "func": "int pcnet_common_init(DeviceState *dev, PCNetState *s, NetClientInfo *info) { int i; uint16_t checksum; s->poll_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, pcnet_poll_timer, s); qemu_macaddr_default_if_unset(&s->conf.macaddr); s->nic = qemu_new_nic(info, &s->conf, object_get_typename(OBJECT(dev)), dev->id, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); /* Initialize the PROM */ /* Datasheet: http://pdfdata.datasheetsite.com/web/24528/AM79C970A.pdf page 95 */ memcpy(s->prom, s->conf.macaddr.a, 6); /* Reserved Location: must be 00h */ s->prom[6] = s->prom[7] = 0x00; /* Reserved Location: must be 00h */ s->prom[8] = 0x00; /* Hardware ID: must be 11h if compatibility to AMD drivers is desired */ s->prom[9] = 0x11; /* User programmable space, init with 0 */ s->prom[10] = s->prom[11] = 0x00; /* LSByte of two-byte checksum, which is the sum of bytes 00h-0Bh and bytes 0Eh and 0Fh, must therefore be initialized with 0! */ s->prom[12] = s->prom[13] = 0x00; /* Must be ASCII W (57h) if compatibility to AMD driver software is desired */ s->prom[14] = s->prom[15] = 0x57; for (i = 0, checksum = 0; i < 16; i++) { checksum += s->prom[i]; } *(uint16_t *)&s->prom[12] = cpu_to_le16(checksum); s->lnkst = 0x40; /* initial link state: up */ return 0; }"} {"target": 1, "idx": 15501, "func": "static int query_memdev(Object *obj, void *opaque) { MemdevList **list = opaque; Error *err = NULL; if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { MemdevList *m = g_malloc0(sizeof(*m)); m->value = g_malloc0(sizeof(*m->value)); m->value->size = object_property_get_int(obj, \"size\", &err); if (err) { goto error; } m->value->merge = object_property_get_bool(obj, \"merge\", &err); if (err) { goto error; } m->value->dump = object_property_get_bool(obj, \"dump\", &err); if (err) { goto error; } m->value->prealloc = object_property_get_bool(obj, \"prealloc\", &err); if (err) { goto error; } m->value->policy = object_property_get_enum(obj, \"policy\", HostMemPolicy_lookup, &err); if (err) { goto error; } object_property_get_uint16List(obj, \"host-nodes\", &m->value->host_nodes, &err); if (err) { goto error; } m->next = *list; *list = m; } return 0; error: return -1; }"} {"target": 1, "idx": 15514, "func": "static int decode_iccp_chunk(PNGDecContext *s, int length, AVFrame *f) { int ret, cnt = 0; uint8_t *data, profile_name[82]; AVBPrint bp; AVFrameSideData *sd; while ((profile_name[cnt++] = bytestream2_get_byte(&s->gb)) && cnt < 81); if (cnt > 80) { av_log(s->avctx, AV_LOG_ERROR, \"iCCP with invalid name!\\n\"); return AVERROR_INVALIDDATA; } length = FFMAX(length - cnt, 0); if (bytestream2_get_byte(&s->gb) != 0) { av_log(s->avctx, AV_LOG_ERROR, \"iCCP with invalid compression!\\n\"); return AVERROR_INVALIDDATA; } length = FFMAX(length - 1, 0); if ((ret = decode_zbuf(&bp, s->gb.buffer, s->gb.buffer + length)) < 0) return ret; av_bprint_finalize(&bp, (char **)&data); sd = av_frame_new_side_data(f, AV_FRAME_DATA_ICC_PROFILE, bp.len); if (!sd) { av_free(data); } av_dict_set(&sd->metadata, \"name\", profile_name, 0); memcpy(sd->data, data, bp.len); av_free(data); /* ICC compressed data and CRC */ bytestream2_skip(&s->gb, length + 4); return 0; }"} {"target": 0, "idx": 15523, "func": "uint64_t helper_fres(CPUPPCState *env, uint64_t arg) { CPU_DoubleU farg; float32 f32; farg.ll = arg; if (unlikely(float64_is_signaling_nan(farg.d))) { /* sNaN reciprocal */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); } farg.d = float64_div(float64_one, farg.d, &env->fp_status); f32 = float64_to_float32(farg.d, &env->fp_status); farg.d = float32_to_float64(f32, &env->fp_status); return farg.ll; }"} {"target": 0, "idx": 15526, "func": "void HELPER(wsr_lend)(uint32_t v) { if (env->sregs[LEND] != v) { tb_invalidate_phys_page_range( env->sregs[LEND] - 1, env->sregs[LEND], 0); env->sregs[LEND] = v; tb_invalidate_phys_page_range( env->sregs[LEND] - 1, env->sregs[LEND], 0); } }"} {"target": 0, "idx": 15550, "func": "static int aio_epoll(AioContext *ctx, GPollFD *pfds, unsigned npfd, int64_t timeout) { assert(false); }"} {"target": 0, "idx": 15567, "func": "static int decode_buffering_period(H264Context *h) { unsigned int sps_id; int sched_sel_idx; SPS *sps; sps_id = get_ue_golomb_31(&h->gb); if (sps_id > 31 || !h->sps_buffers[sps_id]) { av_log(h->avctx, AV_LOG_ERROR, \"non-existing SPS %d referenced in buffering period\\n\", sps_id); return AVERROR_INVALIDDATA; } sps = h->sps_buffers[sps_id]; // NOTE: This is really so duplicated in the standard... See H.264, D.1.1 if (sps->nal_hrd_parameters_present_flag) { for (sched_sel_idx = 0; sched_sel_idx < sps->cpb_cnt; sched_sel_idx++) { h->initial_cpb_removal_delay[sched_sel_idx] = get_bits(&h->gb, sps->initial_cpb_removal_delay_length); // initial_cpb_removal_delay_offset skip_bits(&h->gb, sps->initial_cpb_removal_delay_length); } } if (sps->vcl_hrd_parameters_present_flag) { for (sched_sel_idx = 0; sched_sel_idx < sps->cpb_cnt; sched_sel_idx++) { h->initial_cpb_removal_delay[sched_sel_idx] = get_bits(&h->gb, sps->initial_cpb_removal_delay_length); // initial_cpb_removal_delay_offset skip_bits(&h->gb, sps->initial_cpb_removal_delay_length); } } h->sei_buffering_period_present = 1; return 0; }"} {"target": 1, "idx": 15577, "func": "static int ism_write_packet(AVFormatContext *s, AVPacket *pkt) { SmoothStreamingContext *c = s->priv_data; AVStream *st = s->streams[pkt->stream_index]; OutputStream *os = &c->streams[pkt->stream_index]; int64_t end_dts = (c->nb_fragments + 1) * c->min_frag_duration; int ret; if (st->first_dts == AV_NOPTS_VALUE) st->first_dts = pkt->dts; if ((!c->has_video || st->codec->codec_type == AVMEDIA_TYPE_VIDEO) && av_compare_ts(pkt->dts - st->first_dts, st->time_base, end_dts, AV_TIME_BASE_Q) >= 0 && pkt->flags & AV_PKT_FLAG_KEY && os->packets_written) { if ((ret = ism_flush(s, 0)) < 0) return ret; c->nb_fragments++; } os->packets_written++; return ff_write_chained(os->ctx, 0, pkt, s); }"} {"target": 0, "idx": 15578, "func": "static void avc_luma_midh_qrt_and_aver_dst_4w_msa(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, int32_t height, uint8_t horiz_offset) { uint32_t row; v16i8 src0, src1, src2, src3, src4, src5, src6; v16u8 dst0, dst1, res; v8i16 vt_res0, vt_res1, vt_res2, vt_res3; v4i32 hz_res0, hz_res1; v8i16 res0, res1; v8i16 shf_vec0, shf_vec1, shf_vec2, shf_vec3, shf_vec4, shf_vec5; v8i16 mask0 = { 0, 5, 1, 6, 2, 7, 3, 8 }; v8i16 mask1 = { 1, 4, 2, 5, 3, 6, 4, 7 }; v8i16 mask2 = { 2, 3, 3, 4, 4, 5, 5, 6 }; v8i16 minus5h = __msa_ldi_h(-5); v8i16 plus20h = __msa_ldi_h(20); v8i16 zeros = { 0 }; LD_SB5(src, src_stride, src0, src1, src2, src3, src4); src += (5 * src_stride); XORI_B5_128_SB(src0, src1, src2, src3, src4); for (row = (height >> 1); row--;) { LD_SB2(src, src_stride, src5, src6); src += (2 * src_stride); XORI_B2_128_SB(src5, src6); LD_UB2(dst, dst_stride, dst0, dst1); dst0 = (v16u8) __msa_ilvr_w((v4i32) dst1, (v4i32) dst0); AVC_CALC_DPADD_B_6PIX_2COEFF_SH(src0, src1, src2, src3, src4, src5, vt_res0, vt_res1); AVC_CALC_DPADD_B_6PIX_2COEFF_SH(src1, src2, src3, src4, src5, src6, vt_res2, vt_res3); VSHF_H3_SH(vt_res0, vt_res1, vt_res0, vt_res1, vt_res0, vt_res1, mask0, mask1, mask2, shf_vec0, shf_vec1, shf_vec2); VSHF_H3_SH(vt_res2, vt_res3, vt_res2, vt_res3, vt_res2, vt_res3, mask0, mask1, mask2, shf_vec3, shf_vec4, shf_vec5); hz_res0 = __msa_hadd_s_w(shf_vec0, shf_vec0); DPADD_SH2_SW(shf_vec1, shf_vec2, minus5h, plus20h, hz_res0, hz_res0); hz_res1 = __msa_hadd_s_w(shf_vec3, shf_vec3); DPADD_SH2_SW(shf_vec4, shf_vec5, minus5h, plus20h, hz_res1, hz_res1); SRARI_W2_SW(hz_res0, hz_res1, 10); SAT_SW2_SW(hz_res0, hz_res1, 7); res0 = __msa_srari_h(shf_vec2, 5); res1 = __msa_srari_h(shf_vec5, 5); SAT_SH2_SH(res0, res1, 7); if (horiz_offset) { res0 = __msa_ilvod_h(zeros, res0); res1 = __msa_ilvod_h(zeros, res1); } else { ILVEV_H2_SH(res0, zeros, res1, zeros, res0, res1); } hz_res0 = __msa_aver_s_w(hz_res0, (v4i32) res0); hz_res1 = __msa_aver_s_w(hz_res1, (v4i32) res1); res0 = __msa_pckev_h((v8i16) hz_res1, (v8i16) hz_res0); res = PCKEV_XORI128_UB(res0, res0); dst0 = __msa_aver_u_b(res, dst0); ST4x2_UB(dst0, dst, dst_stride); dst += (2 * dst_stride); src0 = src2; src1 = src3; src2 = src4; src3 = src5; src4 = src6; } }"} {"target": 1, "idx": 15583, "func": "static int onenand_initfn(SysBusDevice *sbd) { DeviceState *dev = DEVICE(sbd); OneNANDState *s = ONE_NAND(dev); uint32_t size = 1 << (24 + ((s->id.dev >> 4) & 7)); void *ram; s->base = (hwaddr)-1; s->rdy = NULL; s->blocks = size >> BLOCK_SHIFT; s->secs = size >> 9; s->blockwp = g_malloc(s->blocks); s->density_mask = (s->id.dev & 0x08) ? (1 << (6 + ((s->id.dev >> 4) & 7))) : 0; memory_region_init_io(&s->iomem, OBJECT(s), &onenand_ops, s, \"onenand\", 0x10000 << s->shift); if (!s->blk) { s->image = memset(g_malloc(size + (size >> 5)), 0xff, size + (size >> 5)); } else { if (blk_is_read_only(s->blk)) { error_report(\"Can't use a read-only drive\"); return -1; } s->blk_cur = s->blk; } s->otp = memset(g_malloc((64 + 2) << PAGE_SHIFT), 0xff, (64 + 2) << PAGE_SHIFT); memory_region_init_ram(&s->ram, OBJECT(s), \"onenand.ram\", 0xc000 << s->shift, &error_abort); vmstate_register_ram_global(&s->ram); ram = memory_region_get_ram_ptr(&s->ram); s->boot[0] = ram + (0x0000 << s->shift); s->boot[1] = ram + (0x8000 << s->shift); s->data[0][0] = ram + ((0x0200 + (0 << (PAGE_SHIFT - 1))) << s->shift); s->data[0][1] = ram + ((0x8010 + (0 << (PAGE_SHIFT - 6))) << s->shift); s->data[1][0] = ram + ((0x0200 + (1 << (PAGE_SHIFT - 1))) << s->shift); s->data[1][1] = ram + ((0x8010 + (1 << (PAGE_SHIFT - 6))) << s->shift); onenand_mem_setup(s); sysbus_init_irq(sbd, &s->intr); sysbus_init_mmio(sbd, &s->container); vmstate_register(dev, ((s->shift & 0x7f) << 24) | ((s->id.man & 0xff) << 16) | ((s->id.dev & 0xff) << 8) | (s->id.ver & 0xff), &vmstate_onenand, s); return 0; }"} {"target": 0, "idx": 15612, "func": "static void sdl_resize(DisplayState *ds) { int flags; // printf(\"resizing to %d %d\\n\", w, h); flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL; if (gui_fullscreen) flags |= SDL_FULLSCREEN; if (gui_noframe) flags |= SDL_NOFRAME; again: real_screen = SDL_SetVideoMode(ds_get_width(ds), ds_get_height(ds), 0, flags); if (!real_screen) { fprintf(stderr, \"Could not open SDL display\\n\"); exit(1); } if (!real_screen->pixels && (flags & SDL_HWSURFACE) && (flags & SDL_FULLSCREEN)) { flags &= ~SDL_HWSURFACE; goto again; } if (!real_screen->pixels) { fprintf(stderr, \"Could not open SDL display\\n\"); exit(1); } sdl_setdata(ds); }"} {"target": 1, "idx": 15622, "func": "void do_subfeo (void) { T2 = T0; T0 = T1 + ~T0 + xer_ca; if (likely(!((~T2 ^ T1 ^ (-1)) & (~T2 ^ T0) & (1 << 31)))) { xer_ov = 0; } else { xer_so = 1; xer_ov = 1; } if (likely(T0 >= T1 && (xer_ca == 0 || T0 != T1))) { xer_ca = 0; } else { xer_ca = 1; } }"} {"target": 0, "idx": 15628, "func": "static bool object_is_type(Object *obj, const char *typename) { TypeImpl *target_type = type_get_by_name(typename); TypeImpl *type = obj->class->type; GSList *i; /* Check if typename is a direct ancestor of type */ while (type) { if (type == target_type) { return true; } type = type_get_parent(type); } /* Check if obj has an interface of typename */ for (i = obj->interfaces; i; i = i->next) { Interface *iface = i->data; if (object_is_type(OBJECT(iface), typename)) { return true; } } return false; }"} {"target": 0, "idx": 15635, "func": "static int truespeech_decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size) { TSContext *c = avctx->priv_data; int i; short *samples = data; int consumed = 0; int16_t out_buf[240]; if (!buf_size) return 0; while (consumed < buf_size) { truespeech_read_frame(c, buf + consumed); consumed += 32; truespeech_correlate_filter(c); truespeech_filters_merge(c); memset(out_buf, 0, 240 * 2); for(i = 0; i < 4; i++) { truespeech_apply_twopoint_filter(c, i); truespeech_place_pulses(c, out_buf + i * 60, i); truespeech_update_filters(c, out_buf + i * 60, i); truespeech_synth(c, out_buf + i * 60, i); } truespeech_save_prevvec(c); /* finally output decoded frame */ for(i = 0; i < 240; i++) *samples++ = out_buf[i]; } *data_size = consumed * 15; return buf_size; }"} {"target": 0, "idx": 15660, "func": "static uint64_t dchip_read(void *opaque, target_phys_addr_t addr, unsigned size) { /* Skip this. It's all related to DRAM timing and setup. */ return 0; }"} {"target": 0, "idx": 15661, "func": "static void omap_prcm_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { struct omap_prcm_s *s = (struct omap_prcm_s *) opaque; if (size != 4) { return omap_badwidth_write32(opaque, addr, value); } switch (addr) { case 0x000: /* PRCM_REVISION */ case 0x054: /* PRCM_VOLTST */ case 0x084: /* PRCM_CLKCFG_STATUS */ case 0x1e4: /* PM_PWSTST_MPU */ case 0x220: /* CM_IDLEST1_CORE */ case 0x224: /* CM_IDLEST2_CORE */ case 0x22c: /* CM_IDLEST4_CORE */ case 0x2c8: /* PM_WKDEP_CORE */ case 0x2e4: /* PM_PWSTST_CORE */ case 0x320: /* CM_IDLEST_GFX */ case 0x3e4: /* PM_PWSTST_GFX */ case 0x420: /* CM_IDLEST_WKUP */ case 0x520: /* CM_IDLEST_CKGEN */ case 0x820: /* CM_IDLEST_DSP */ case 0x8e4: /* PM_PWSTST_DSP */ OMAP_RO_REG(addr); return; case 0x010: /* PRCM_SYSCONFIG */ s->sysconfig = value & 1; break; case 0x018: /* PRCM_IRQSTATUS_MPU */ s->irqst[0] &= ~value; omap_prcm_int_update(s, 0); break; case 0x01c: /* PRCM_IRQENABLE_MPU */ s->irqen[0] = value & 0x3f; omap_prcm_int_update(s, 0); break; case 0x050: /* PRCM_VOLTCTRL */ s->voltctrl = value & 0xf1c3; break; case 0x060: /* PRCM_CLKSRC_CTRL */ s->clksrc[0] = value & 0xdb; /* TODO update clocks */ break; case 0x070: /* PRCM_CLKOUT_CTRL */ s->clkout[0] = value & 0xbbbb; /* TODO update clocks */ break; case 0x078: /* PRCM_CLKEMUL_CTRL */ s->clkemul[0] = value & 1; /* TODO update clocks */ break; case 0x080: /* PRCM_CLKCFG_CTRL */ break; case 0x090: /* PRCM_VOLTSETUP */ s->setuptime[0] = value & 0xffff; break; case 0x094: /* PRCM_CLKSSETUP */ s->setuptime[1] = value & 0xffff; break; case 0x098: /* PRCM_POLCTRL */ s->clkpol[0] = value & 0x701; break; case 0x0b0: /* GENERAL_PURPOSE1 */ case 0x0b4: /* GENERAL_PURPOSE2 */ case 0x0b8: /* GENERAL_PURPOSE3 */ case 0x0bc: /* GENERAL_PURPOSE4 */ case 0x0c0: /* GENERAL_PURPOSE5 */ case 0x0c4: /* GENERAL_PURPOSE6 */ case 0x0c8: /* GENERAL_PURPOSE7 */ case 0x0cc: /* GENERAL_PURPOSE8 */ case 0x0d0: /* GENERAL_PURPOSE9 */ case 0x0d4: /* GENERAL_PURPOSE10 */ case 0x0d8: /* GENERAL_PURPOSE11 */ case 0x0dc: /* GENERAL_PURPOSE12 */ case 0x0e0: /* GENERAL_PURPOSE13 */ case 0x0e4: /* GENERAL_PURPOSE14 */ case 0x0e8: /* GENERAL_PURPOSE15 */ case 0x0ec: /* GENERAL_PURPOSE16 */ case 0x0f0: /* GENERAL_PURPOSE17 */ case 0x0f4: /* GENERAL_PURPOSE18 */ case 0x0f8: /* GENERAL_PURPOSE19 */ case 0x0fc: /* GENERAL_PURPOSE20 */ s->scratch[(addr - 0xb0) >> 2] = value; break; case 0x140: /* CM_CLKSEL_MPU */ s->clksel[0] = value & 0x1f; /* TODO update clocks */ break; case 0x148: /* CM_CLKSTCTRL_MPU */ s->clkctrl[0] = value & 0x1f; break; case 0x158: /* RM_RSTST_MPU */ s->rst[0] &= ~value; break; case 0x1c8: /* PM_WKDEP_MPU */ s->wkup[0] = value & 0x15; break; case 0x1d4: /* PM_EVGENCTRL_MPU */ s->ev = value & 0x1f; break; case 0x1d8: /* PM_EVEGENONTIM_MPU */ s->evtime[0] = value; break; case 0x1dc: /* PM_EVEGENOFFTIM_MPU */ s->evtime[1] = value; break; case 0x1e0: /* PM_PWSTCTRL_MPU */ s->power[0] = value & 0xc0f; break; case 0x200: /* CM_FCLKEN1_CORE */ s->clken[0] = value & 0xbfffffff; /* TODO update clocks */ /* The EN_EAC bit only gets/puts func_96m_clk. */ break; case 0x204: /* CM_FCLKEN2_CORE */ s->clken[1] = value & 0x00000007; /* TODO update clocks */ break; case 0x210: /* CM_ICLKEN1_CORE */ s->clken[2] = value & 0xfffffff9; /* TODO update clocks */ /* The EN_EAC bit only gets/puts core_l4_iclk. */ break; case 0x214: /* CM_ICLKEN2_CORE */ s->clken[3] = value & 0x00000007; /* TODO update clocks */ break; case 0x21c: /* CM_ICLKEN4_CORE */ s->clken[4] = value & 0x0000001f; /* TODO update clocks */ break; case 0x230: /* CM_AUTOIDLE1_CORE */ s->clkidle[0] = value & 0xfffffff9; /* TODO update clocks */ break; case 0x234: /* CM_AUTOIDLE2_CORE */ s->clkidle[1] = value & 0x00000007; /* TODO update clocks */ break; case 0x238: /* CM_AUTOIDLE3_CORE */ s->clkidle[2] = value & 0x00000007; /* TODO update clocks */ break; case 0x23c: /* CM_AUTOIDLE4_CORE */ s->clkidle[3] = value & 0x0000001f; /* TODO update clocks */ break; case 0x240: /* CM_CLKSEL1_CORE */ s->clksel[1] = value & 0x0fffbf7f; /* TODO update clocks */ break; case 0x244: /* CM_CLKSEL2_CORE */ s->clksel[2] = value & 0x00fffffc; /* TODO update clocks */ break; case 0x248: /* CM_CLKSTCTRL_CORE */ s->clkctrl[1] = value & 0x7; break; case 0x2a0: /* PM_WKEN1_CORE */ s->wken[0] = value & 0x04667ff8; break; case 0x2a4: /* PM_WKEN2_CORE */ s->wken[1] = value & 0x00000005; break; case 0x2b0: /* PM_WKST1_CORE */ s->wkst[0] &= ~value; break; case 0x2b4: /* PM_WKST2_CORE */ s->wkst[1] &= ~value; break; case 0x2e0: /* PM_PWSTCTRL_CORE */ s->power[1] = (value & 0x00fc3f) | (1 << 2); break; case 0x300: /* CM_FCLKEN_GFX */ s->clken[5] = value & 6; /* TODO update clocks */ break; case 0x310: /* CM_ICLKEN_GFX */ s->clken[6] = value & 1; /* TODO update clocks */ break; case 0x340: /* CM_CLKSEL_GFX */ s->clksel[3] = value & 7; /* TODO update clocks */ break; case 0x348: /* CM_CLKSTCTRL_GFX */ s->clkctrl[2] = value & 1; break; case 0x350: /* RM_RSTCTRL_GFX */ s->rstctrl[0] = value & 1; /* TODO: reset */ break; case 0x358: /* RM_RSTST_GFX */ s->rst[1] &= ~value; break; case 0x3c8: /* PM_WKDEP_GFX */ s->wkup[1] = value & 0x13; break; case 0x3e0: /* PM_PWSTCTRL_GFX */ s->power[2] = (value & 0x00c0f) | (3 << 2); break; case 0x400: /* CM_FCLKEN_WKUP */ s->clken[7] = value & 0xd; /* TODO update clocks */ break; case 0x410: /* CM_ICLKEN_WKUP */ s->clken[8] = value & 0x3f; /* TODO update clocks */ break; case 0x430: /* CM_AUTOIDLE_WKUP */ s->clkidle[4] = value & 0x0000003f; /* TODO update clocks */ break; case 0x440: /* CM_CLKSEL_WKUP */ s->clksel[4] = value & 3; /* TODO update clocks */ break; case 0x450: /* RM_RSTCTRL_WKUP */ /* TODO: reset */ if (value & 2) qemu_system_reset_request(); break; case 0x454: /* RM_RSTTIME_WKUP */ s->rsttime_wkup = value & 0x1fff; break; case 0x458: /* RM_RSTST_WKUP */ s->rst[2] &= ~value; break; case 0x4a0: /* PM_WKEN_WKUP */ s->wken[2] = value & 0x00000005; break; case 0x4b0: /* PM_WKST_WKUP */ s->wkst[2] &= ~value; break; case 0x500: /* CM_CLKEN_PLL */ if (value & 0xffffff30) fprintf(stderr, \"%s: write 0s in CM_CLKEN_PLL for \" \"future compatibility\\n\", __FUNCTION__); if ((s->clken[9] ^ value) & 0xcc) { s->clken[9] &= ~0xcc; s->clken[9] |= value & 0xcc; omap_prcm_apll_update(s); } if ((s->clken[9] ^ value) & 3) { s->clken[9] &= ~3; s->clken[9] |= value & 3; omap_prcm_dpll_update(s); } break; case 0x530: /* CM_AUTOIDLE_PLL */ s->clkidle[5] = value & 0x000000cf; /* TODO update clocks */ break; case 0x540: /* CM_CLKSEL1_PLL */ if (value & 0xfc4000d7) fprintf(stderr, \"%s: write 0s in CM_CLKSEL1_PLL for \" \"future compatibility\\n\", __FUNCTION__); if ((s->clksel[5] ^ value) & 0x003fff00) { s->clksel[5] = value & 0x03bfff28; omap_prcm_dpll_update(s); } /* TODO update the other clocks */ s->clksel[5] = value & 0x03bfff28; break; case 0x544: /* CM_CLKSEL2_PLL */ if (value & ~3) fprintf(stderr, \"%s: write 0s in CM_CLKSEL2_PLL[31:2] for \" \"future compatibility\\n\", __FUNCTION__); if (s->clksel[6] != (value & 3)) { s->clksel[6] = value & 3; omap_prcm_dpll_update(s); } break; case 0x800: /* CM_FCLKEN_DSP */ s->clken[10] = value & 0x501; /* TODO update clocks */ break; case 0x810: /* CM_ICLKEN_DSP */ s->clken[11] = value & 0x2; /* TODO update clocks */ break; case 0x830: /* CM_AUTOIDLE_DSP */ s->clkidle[6] = value & 0x2; /* TODO update clocks */ break; case 0x840: /* CM_CLKSEL_DSP */ s->clksel[7] = value & 0x3fff; /* TODO update clocks */ break; case 0x848: /* CM_CLKSTCTRL_DSP */ s->clkctrl[3] = value & 0x101; break; case 0x850: /* RM_RSTCTRL_DSP */ /* TODO: reset */ break; case 0x858: /* RM_RSTST_DSP */ s->rst[3] &= ~value; break; case 0x8c8: /* PM_WKDEP_DSP */ s->wkup[2] = value & 0x13; break; case 0x8e0: /* PM_PWSTCTRL_DSP */ s->power[3] = (value & 0x03017) | (3 << 2); break; case 0x8f0: /* PRCM_IRQSTATUS_DSP */ s->irqst[1] &= ~value; omap_prcm_int_update(s, 1); break; case 0x8f4: /* PRCM_IRQENABLE_DSP */ s->irqen[1] = value & 0x7; omap_prcm_int_update(s, 1); break; case 0x8f8: /* PRCM_IRQSTATUS_IVA */ s->irqst[2] &= ~value; omap_prcm_int_update(s, 2); break; case 0x8fc: /* PRCM_IRQENABLE_IVA */ s->irqen[2] = value & 0x7; omap_prcm_int_update(s, 2); break; default: OMAP_BAD_REG(addr); return; } }"} {"target": 1, "idx": 15686, "func": "static void init_proc_755 (CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_7xx(env); gen_spr_G2_755(env); /* Time base */ gen_tbl(env); /* L2 cache control */ /* XXX : not implemented */ spr_register(env, SPR_L2CR, \"L2CR\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, NULL, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_L2PMCR, \"L2PMCR\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Thermal management */ gen_spr_thrm(env); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, \"HID0\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, \"HID1\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID2, \"HID2\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ gen_low_BATs(env); gen_high_BATs(env); gen_6xx_7xx_soft_tlb(env, 64, 2); init_excp_7x5(env); env->dcache_line_size = 32; env->icache_line_size = 32; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env); }"} {"target": 0, "idx": 15693, "func": "static int gxf_write_trailer(AVFormatContext *s) { GXFContext *gxf = s->priv_data; AVIOContext *pb = s->pb; int64_t end; int i; ff_audio_interleave_close(s); gxf_write_eos_packet(pb); end = avio_tell(pb); avio_seek(pb, 0, SEEK_SET); /* overwrite map, flt and umf packets with new values */ gxf_write_map_packet(s, 1); gxf_write_flt_packet(s); gxf_write_umf_packet(s); avio_flush(pb); /* update duration in all map packets */ for (i = 1; i < gxf->map_offsets_nb; i++) { avio_seek(pb, gxf->map_offsets[i], SEEK_SET); gxf_write_map_packet(s, 1); avio_flush(pb); } avio_seek(pb, end, SEEK_SET); av_freep(&gxf->flt_entries); av_freep(&gxf->map_offsets); return 0; }"} {"target": 0, "idx": 15719, "func": "static int bdrv_prwv_co(BdrvChild *child, int64_t offset, QEMUIOVector *qiov, bool is_write, BdrvRequestFlags flags) { Coroutine *co; RwCo rwco = { .child = child, .offset = offset, .qiov = qiov, .is_write = is_write, .ret = NOT_DONE, .flags = flags, }; if (qemu_in_coroutine()) { /* Fast-path if already in coroutine context */ bdrv_rw_co_entry(&rwco); } else { AioContext *aio_context = bdrv_get_aio_context(child->bs); co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco); qemu_coroutine_enter(co); while (rwco.ret == NOT_DONE) { aio_poll(aio_context, true); } } return rwco.ret; }"} {"target": 0, "idx": 15731, "func": "static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr, unsigned size) { VirtIOPCIProxy *proxy = opaque; VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); uint32_t val = 0; int i; switch (addr) { case VIRTIO_PCI_COMMON_DFSELECT: val = proxy->dfselect; break; case VIRTIO_PCI_COMMON_DF: if (proxy->dfselect <= 1) { val = vdev->host_features >> (32 * proxy->dfselect); } break; case VIRTIO_PCI_COMMON_GFSELECT: val = proxy->gfselect; break; case VIRTIO_PCI_COMMON_GF: if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) { val = proxy->guest_features[proxy->gfselect]; } break; case VIRTIO_PCI_COMMON_MSIX: val = vdev->config_vector; break; case VIRTIO_PCI_COMMON_NUMQ: for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) { if (virtio_queue_get_num(vdev, i)) { val = i + 1; } } break; case VIRTIO_PCI_COMMON_STATUS: val = vdev->status; break; case VIRTIO_PCI_COMMON_CFGGENERATION: val = vdev->generation; break; case VIRTIO_PCI_COMMON_Q_SELECT: val = vdev->queue_sel; break; case VIRTIO_PCI_COMMON_Q_SIZE: val = virtio_queue_get_num(vdev, vdev->queue_sel); break; case VIRTIO_PCI_COMMON_Q_MSIX: val = virtio_queue_vector(vdev, vdev->queue_sel); break; case VIRTIO_PCI_COMMON_Q_ENABLE: val = proxy->vqs[vdev->queue_sel].enabled; break; case VIRTIO_PCI_COMMON_Q_NOFF: /* Simply map queues in order */ val = vdev->queue_sel; break; case VIRTIO_PCI_COMMON_Q_DESCLO: val = proxy->vqs[vdev->queue_sel].desc[0]; break; case VIRTIO_PCI_COMMON_Q_DESCHI: val = proxy->vqs[vdev->queue_sel].desc[1]; break; case VIRTIO_PCI_COMMON_Q_AVAILLO: val = proxy->vqs[vdev->queue_sel].avail[0]; break; case VIRTIO_PCI_COMMON_Q_AVAILHI: val = proxy->vqs[vdev->queue_sel].avail[1]; break; case VIRTIO_PCI_COMMON_Q_USEDLO: val = proxy->vqs[vdev->queue_sel].used[0]; break; case VIRTIO_PCI_COMMON_Q_USEDHI: val = proxy->vqs[vdev->queue_sel].used[1]; break; default: val = 0; } return val; }"} {"target": 0, "idx": 15748, "func": "void in_asm_used_var_warning_killer() { volatile int i= yCoeff+vrCoeff+ubCoeff+vgCoeff+ugCoeff+bF8+bFC+w400+w80+w10+ bm00001111+bm00000111+bm11111000+b16Mask+g16Mask+r16Mask+b15Mask+g15Mask+r15Mask+asm_yalpha1+ asm_uvalpha1+ M24A+M24B+M24C+w02 + b5Dither+g5Dither+r5Dither+g6Dither+dither4[0]+dither8[0]; if(i) i=0; }"} {"target": 0, "idx": 15750, "func": "static double setup_compress_thresh(double threshold) { if ((threshold > DBL_EPSILON) && (threshold < (1.0 - DBL_EPSILON))) { double current_threshold = threshold; double step_size = 1.0; while (step_size > DBL_EPSILON) { while ((current_threshold + step_size > current_threshold) && (bound(current_threshold + step_size, 1.0) <= threshold)) { current_threshold += step_size; } step_size /= 2.0; } return current_threshold; } else { return threshold; } }"} {"target": 1, "idx": 15754, "func": "static av_always_inline int get_decoded_frame(AVFormatContext *fmt_ctx, AVFrame *frame, int *got_frame, AVPacket *pkt) { AVCodecContext *dec_ctx = fmt_ctx->streams[pkt->stream_index]->codec; int ret = 0; *got_frame = 0; if (dec_ctx->codec) { switch (dec_ctx->codec_type) { case AVMEDIA_TYPE_VIDEO: ret = avcodec_decode_video2(dec_ctx, frame, got_frame, pkt); break; case AVMEDIA_TYPE_AUDIO: ret = avcodec_decode_audio4(dec_ctx, frame, got_frame, pkt); break; return ret;"} {"target": 1, "idx": 15768, "func": "static void xilinx_axidma_realize(DeviceState *dev, Error **errp) { XilinxAXIDMA *s = XILINX_AXI_DMA(dev); XilinxAXIDMAStreamSlave *ds = XILINX_AXI_DMA_DATA_STREAM(&s->rx_data_dev); XilinxAXIDMAStreamSlave *cs = XILINX_AXI_DMA_CONTROL_STREAM( &s->rx_control_dev); Error *local_err = NULL; object_property_add_link(OBJECT(ds), \"dma\", TYPE_XILINX_AXI_DMA, (Object **)&ds->dma, object_property_allow_set_link, OBJ_PROP_LINK_UNREF_ON_RELEASE, &local_err); object_property_add_link(OBJECT(cs), \"dma\", TYPE_XILINX_AXI_DMA, (Object **)&cs->dma, object_property_allow_set_link, OBJ_PROP_LINK_UNREF_ON_RELEASE, &local_err); if (local_err) { goto xilinx_axidma_realize_fail; } object_property_set_link(OBJECT(ds), OBJECT(s), \"dma\", &local_err); object_property_set_link(OBJECT(cs), OBJECT(s), \"dma\", &local_err); if (local_err) { goto xilinx_axidma_realize_fail; } int i; for (i = 0; i < 2; i++) { struct Stream *st = &s->streams[i]; st->nr = i; st->bh = qemu_bh_new(timer_hit, st); st->ptimer = ptimer_init(st->bh, PTIMER_POLICY_DEFAULT); ptimer_set_freq(st->ptimer, s->freqhz); } return; xilinx_axidma_realize_fail: if (!*errp) { *errp = local_err; } }"} {"target": 0, "idx": 15778, "func": "BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs, BlockDriverState *child_bs, const char *child_name, const BdrvChildRole *child_role, Error **errp) { BdrvChild *child = bdrv_root_attach_child(child_bs, child_name, child_role, parent_bs); QLIST_INSERT_HEAD(&parent_bs->children, child, next); return child; }"} {"target": 0, "idx": 15815, "func": "static void test_visitor_in_native_list_number(TestInputVisitorData *data, const void *unused) { UserDefNativeListUnion *cvalue = NULL; numberList *elem = NULL; Error *err = NULL; Visitor *v; GString *gstr_list = g_string_new(\"\"); GString *gstr_union = g_string_new(\"\"); int i; for (i = 0; i < 32; i++) { g_string_append_printf(gstr_list, \"%f\", (double)i / 3); if (i != 31) { g_string_append(gstr_list, \", \"); } } g_string_append_printf(gstr_union, \"{ 'type': 'number', 'data': [ %s ] }\", gstr_list->str); v = visitor_input_test_init_raw(data, gstr_union->str); visit_type_UserDefNativeListUnion(v, &cvalue, NULL, &err); g_assert(err == NULL); g_assert(cvalue != NULL); g_assert_cmpint(cvalue->type, ==, USER_DEF_NATIVE_LIST_UNION_KIND_NUMBER); for (i = 0, elem = cvalue->u.number; elem; elem = elem->next, i++) { GString *double_expected = g_string_new(\"\"); GString *double_actual = g_string_new(\"\"); g_string_printf(double_expected, \"%.6f\", (double)i / 3); g_string_printf(double_actual, \"%.6f\", elem->value); g_assert_cmpstr(double_expected->str, ==, double_actual->str); g_string_free(double_expected, true); g_string_free(double_actual, true); } g_string_free(gstr_union, true); g_string_free(gstr_list, true); qapi_free_UserDefNativeListUnion(cvalue); }"} {"target": 0, "idx": 15820, "func": "static int mpegps_probe(AVProbeData *p) { uint32_t code= -1; int sys=0, pspack=0, priv1=0, vid=0, audio=0, invalid=0; int i; int score=0; for(i=0; ibuf_size; i++){ code = (code<<8) + p->buf[i]; if ((code & 0xffffff00) == 0x100) { int len= p->buf[i+1] << 8 | p->buf[i+2]; int pes= check_pes(p->buf+i, p->buf+p->buf_size); if(code == SYSTEM_HEADER_START_CODE) sys++; else if(code == PACK_START_CODE) pspack++; else if((code & 0xf0) == VIDEO_ID && pes) vid++; // skip pes payload to avoid start code emulation for private // and audio streams else if((code & 0xe0) == AUDIO_ID && pes) {audio++; i+=len;} else if(code == PRIVATE_STREAM_1 && pes) {priv1++; i+=len;} else if((code & 0xf0) == VIDEO_ID && !pes) invalid++; else if((code & 0xe0) == AUDIO_ID && !pes) invalid++; else if(code == PRIVATE_STREAM_1 && !pes) invalid++; } } if(vid+audio > invalid) /* invalid VDR files nd short PES streams */ score= AVPROBE_SCORE_MAX/4; //av_log(NULL, AV_LOG_ERROR, \"%d %d %d %d %d %d len:%d\\n\", sys, priv1, pspack,vid, audio, invalid, p->buf_size); if(sys>invalid && sys*9 <= pspack*10) return pspack > 2 ? AVPROBE_SCORE_MAX/2+2 : AVPROBE_SCORE_MAX/4; // +1 for .mpg if(pspack > invalid && (priv1+vid+audio)*10 >= pspack*9) return pspack > 2 ? AVPROBE_SCORE_MAX/2+2 : AVPROBE_SCORE_MAX/4; // +1 for .mpg if((!!vid ^ !!audio) && (audio > 4 || vid > 1) && !sys && !pspack && p->buf_size>2048 && vid + audio > invalid) /* PES stream */ return (audio > 12 || vid > 3) ? AVPROBE_SCORE_MAX/2+2 : AVPROBE_SCORE_MAX/4; //02-Penguin.flac has sys:0 priv1:0 pspack:0 vid:0 audio:1 //mp3_misidentified_2.mp3 has sys:0 priv1:0 pspack:0 vid:0 audio:6 return score; }"} {"target": 0, "idx": 15835, "func": "static void migrate_fd_cleanup(MigrationState *s) { int ret = 0; if (s->file) { DPRINTF(\"closing file\\n\"); ret = qemu_fclose(s->file); s->file = NULL; } assert(s->fd == -1); if (ret < 0 && s->state == MIG_STATE_ACTIVE) { s->state = MIG_STATE_ERROR; } if (s->state != MIG_STATE_ACTIVE) { qemu_savevm_state_cancel(); } }"} {"target": 1, "idx": 15844, "func": "unsigned long init_guest_space(unsigned long host_start, unsigned long host_size, unsigned long guest_start, bool fixed) { unsigned long current_start, real_start; int flags; assert(host_start || host_size); /* If just a starting address is given, then just verify that * address. */ if (host_start && !host_size) { if (guest_validate_base(host_start)) { return host_start; } else { return (unsigned long)-1; } } /* Setup the initial flags and start address. */ current_start = host_start & qemu_host_page_mask; flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE; if (fixed) { flags |= MAP_FIXED; } /* Otherwise, a non-zero size region of memory needs to be mapped * and validated. */ while (1) { /* Do not use mmap_find_vma here because that is limited to the * guest address space. We are going to make the * guest address space fit whatever we're given. */ real_start = (unsigned long) mmap((void *)current_start, host_size, PROT_NONE, flags, -1, 0); if (real_start == (unsigned long)-1) { return (unsigned long)-1; } if ((real_start == current_start) && guest_validate_base(real_start - guest_start)) { break; } /* That address didn't work. Unmap and try a different one. * The address the host picked because is typically right at * the top of the host address space and leaves the guest with * no usable address space. Resort to a linear search. We * already compensated for mmap_min_addr, so this should not * happen often. Probably means we got unlucky and host * address space randomization put a shared library somewhere * inconvenient. */ munmap((void *)real_start, host_size); current_start += qemu_host_page_size; if (host_start == current_start) { /* Theoretically possible if host doesn't have any suitably * aligned areas. Normally the first mmap will fail. */ return (unsigned long)-1; } } return real_start; }"} {"target": 1, "idx": 15852, "func": "static void qmp_input_start_struct(Visitor *v, const char *name, void **obj, size_t size, Error **errp) { QmpInputVisitor *qiv = to_qiv(v); QObject *qobj = qmp_input_get_object(qiv, name, true); Error *err = NULL; if (!qobj || qobject_type(qobj) != QTYPE_QDICT) { error_setg(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : \"null\", \"QDict\"); return; qmp_input_push(qiv, qobj, &err); if (err) { error_propagate(errp, err); return; *obj = g_malloc0(size);"} {"target": 1, "idx": 15855, "func": "static void ehci_advance_state(EHCIState *ehci, int async) { EHCIQueue *q = NULL; int again; int iter = 0; do { if (ehci_get_state(ehci, async) == EST_FETCHQH) { iter++; /* if we are roaming a lot of QH without executing a qTD * something is wrong with the linked list. TO-DO: why is * this hack needed? */ assert(iter < MAX_ITERATIONS); #if 0 if (iter > MAX_ITERATIONS) { DPRINTF(\"\\n*** advance_state: bailing on MAX ITERATIONS***\\n\"); ehci_set_state(ehci, async, EST_ACTIVE); break; } #endif } switch(ehci_get_state(ehci, async)) { case EST_WAITLISTHEAD: again = ehci_state_waitlisthead(ehci, async); break; case EST_FETCHENTRY: again = ehci_state_fetchentry(ehci, async); break; case EST_FETCHQH: q = ehci_state_fetchqh(ehci, async); again = q ? 1 : 0; break; case EST_FETCHITD: again = ehci_state_fetchitd(ehci, async); break; case EST_FETCHSITD: again = ehci_state_fetchsitd(ehci, async); break; case EST_ADVANCEQUEUE: again = ehci_state_advqueue(q, async); break; case EST_FETCHQTD: again = ehci_state_fetchqtd(q, async); break; case EST_HORIZONTALQH: again = ehci_state_horizqh(q, async); break; case EST_EXECUTE: iter = 0; again = ehci_state_execute(q, async); break; case EST_EXECUTING: assert(q != NULL); again = ehci_state_executing(q, async); break; case EST_WRITEBACK: assert(q != NULL); again = ehci_state_writeback(q, async); break; default: fprintf(stderr, \"Bad state!\\n\"); again = -1; assert(0); break; } if (again < 0) { fprintf(stderr, \"processing error - resetting ehci HC\\n\"); ehci_reset(ehci); again = 0; assert(0); } } while (again); ehci_commit_interrupt(ehci); }"} {"target": 1, "idx": 15857, "func": "void acpi_build(PcGuestInfo *guest_info, AcpiBuildTables *tables) { GArray *table_offsets; unsigned facs, ssdt, dsdt, rsdt; AcpiCpuInfo cpu; AcpiPmInfo pm; AcpiMiscInfo misc; AcpiMcfgInfo mcfg; PcPciInfo pci; uint8_t *u; size_t aml_len = 0; acpi_get_cpu_info(&cpu); acpi_get_pm_info(&pm); acpi_get_dsdt(&misc); acpi_get_misc_info(&misc); acpi_get_pci_info(&pci); table_offsets = g_array_new(false, true /* clear */, sizeof(uint32_t)); ACPI_BUILD_DPRINTF(3, \"init ACPI tables\\n\"); bios_linker_loader_alloc(tables->linker, ACPI_BUILD_TABLE_FILE, 64 /* Ensure FACS is aligned */, false /* high memory */); /* * FACS is pointed to by FADT. * We place it first since it's the only table that has alignment * requirements. */ facs = tables->table_data->len; build_facs(tables->table_data, tables->linker, guest_info); /* DSDT is pointed to by FADT */ dsdt = tables->table_data->len; build_dsdt(tables->table_data, tables->linker, &misc); /* Count the size of the DSDT and SSDT, we will need it for legacy * sizing of ACPI tables. */ aml_len += tables->table_data->len - dsdt; /* ACPI tables pointed to by RSDT */ acpi_add_table(table_offsets, tables->table_data); build_fadt(tables->table_data, tables->linker, &pm, facs, dsdt); ssdt = tables->table_data->len; acpi_add_table(table_offsets, tables->table_data); build_ssdt(tables->table_data, tables->linker, &cpu, &pm, &misc, &pci, guest_info); aml_len += tables->table_data->len - ssdt; acpi_add_table(table_offsets, tables->table_data); build_madt(tables->table_data, tables->linker, &cpu, guest_info); if (misc.has_hpet) { acpi_add_table(table_offsets, tables->table_data); build_hpet(tables->table_data, tables->linker); } if (guest_info->numa_nodes) { acpi_add_table(table_offsets, tables->table_data); build_srat(tables->table_data, tables->linker, &cpu, guest_info); } if (acpi_get_mcfg(&mcfg)) { acpi_add_table(table_offsets, tables->table_data); build_mcfg_q35(tables->table_data, tables->linker, &mcfg); } /* Add tables supplied by user (if any) */ for (u = acpi_table_first(); u; u = acpi_table_next(u)) { unsigned len = acpi_table_len(u); acpi_add_table(table_offsets, tables->table_data); g_array_append_vals(tables->table_data, u, len); } /* RSDT is pointed to by RSDP */ rsdt = tables->table_data->len; build_rsdt(tables->table_data, tables->linker, table_offsets); /* RSDP is in FSEG memory, so allocate it separately */ build_rsdp(tables->rsdp, tables->linker, rsdt); /* We'll expose it all to Guest so we want to reduce * chance of size changes. * RSDP is small so it's easy to keep it immutable, no need to * bother with alignment. * * We used to align the tables to 4k, but of course this would * too simple to be enough. 4k turned out to be too small an * alignment very soon, and in fact it is almost impossible to * keep the table size stable for all (max_cpus, max_memory_slots) * combinations. So the table size is always 64k for pc-i440fx-2.1 * and we give an error if the table grows beyond that limit. * * We still have the problem of migrating from \"-M pc-i440fx-2.0\". For * that, we exploit the fact that QEMU 2.1 generates _smaller_ tables * than 2.0 and we can always pad the smaller tables with zeros. We can * then use the exact size of the 2.0 tables. * * All this is for PIIX4, since QEMU 2.0 didn't support Q35 migration. */ if (guest_info->legacy_acpi_table_size) { /* Subtracting aml_len gives the size of fixed tables. Then add the * size of the PIIX4 DSDT/SSDT in QEMU 2.0. */ int legacy_aml_len = guest_info->legacy_acpi_table_size + ACPI_BUILD_LEGACY_CPU_AML_SIZE * max_cpus; int legacy_table_size = ROUND_UP(tables->table_data->len - aml_len + legacy_aml_len, ACPI_BUILD_ALIGN_SIZE); if (tables->table_data->len > legacy_table_size) { /* Should happen only with PCI bridges and -M pc-i440fx-2.0. */ error_report(\"Warning: migration to QEMU 2.0 may not work.\"); } g_array_set_size(tables->table_data, legacy_table_size); } else { if (tables->table_data->len > ACPI_BUILD_TABLE_SIZE) { /* As of QEMU 2.1, this fires with 160 VCPUs and 255 memory slots. */ error_report(\"ACPI tables are larger than 64k. Please remove\"); error_report(\"CPUs, NUMA nodes, memory slots or PCI bridges.\"); exit(1); } g_array_set_size(tables->table_data, ACPI_BUILD_TABLE_SIZE); } acpi_align_size(tables->linker, ACPI_BUILD_ALIGN_SIZE); /* Cleanup memory that's no longer used. */ g_array_free(table_offsets, true); }"} {"target": 0, "idx": 15869, "func": "int l4_register_io_memory(CPUReadMemoryFunc * const *mem_read, CPUWriteMemoryFunc * const *mem_write, void *opaque) { omap_l4_io_entry[omap_l4_io_entries].mem_read = mem_read; omap_l4_io_entry[omap_l4_io_entries].mem_write = mem_write; omap_l4_io_entry[omap_l4_io_entries].opaque = opaque; return omap_l4_io_entries ++; }"} {"target": 0, "idx": 15877, "func": "static int vhdx_create_new_headers(BlockDriverState *bs, uint64_t image_size, uint32_t log_size) { int ret = 0; VHDXHeader *hdr = NULL; hdr = g_new0(VHDXHeader, 1); hdr->signature = VHDX_HEADER_SIGNATURE; hdr->sequence_number = g_random_int(); hdr->log_version = 0; hdr->version = 1; hdr->log_length = log_size; hdr->log_offset = VHDX_HEADER_SECTION_END; vhdx_guid_generate(&hdr->file_write_guid); vhdx_guid_generate(&hdr->data_write_guid); ret = vhdx_write_header(bs, hdr, VHDX_HEADER1_OFFSET, false); if (ret < 0) { goto exit; } hdr->sequence_number++; ret = vhdx_write_header(bs, hdr, VHDX_HEADER2_OFFSET, false); if (ret < 0) { goto exit; } exit: g_free(hdr); return ret; }"} {"target": 0, "idx": 15902, "func": "static int usb_net_handle_data(USBDevice *dev, USBPacket *p) { USBNetState *s = (USBNetState *) dev; int ret = 0; switch(p->pid) { case USB_TOKEN_IN: switch (p->devep) { case 1: ret = usb_net_handle_statusin(s, p); break; case 2: ret = usb_net_handle_datain(s, p); break; default: goto fail; } break; case USB_TOKEN_OUT: switch (p->devep) { case 2: ret = usb_net_handle_dataout(s, p); break; default: goto fail; } break; default: fail: ret = USB_RET_STALL; break; } if (ret == USB_RET_STALL) fprintf(stderr, \"usbnet: failed data transaction: \" \"pid 0x%x ep 0x%x len 0x%zx\\n\", p->pid, p->devep, p->iov.size); return ret; }"} {"target": 0, "idx": 15918, "func": "static void migrate_fd_monitor_suspend(MigrationState *s, Monitor *mon) { if (monitor_suspend(mon) == 0) { DPRINTF(\"suspending monitor\\n\"); } else { monitor_printf(mon, \"terminal does not allow synchronous \" \"migration, continuing detached\\n\"); } }"} {"target": 0, "idx": 15922, "func": "static inline void vmsvga_update_rect(struct vmsvga_state_s *s, int x, int y, int w, int h) { int line; int bypl; int width; int start; uint8_t *src; uint8_t *dst; if (x + w > s->width) { fprintf(stderr, \"%s: update width too large x: %d, w: %d\\n\", __FUNCTION__, x, w); x = MIN(x, s->width); w = s->width - x; } if (y + h > s->height) { fprintf(stderr, \"%s: update height too large y: %d, h: %d\\n\", __FUNCTION__, y, h); y = MIN(y, s->height); h = s->height - y; } line = h; bypl = s->bypp * s->width; width = s->bypp * w; start = s->bypp * x + bypl * y; src = s->vga.vram_ptr + start; dst = ds_get_data(s->vga.ds) + start; for (; line > 0; line --, src += bypl, dst += bypl) memcpy(dst, src, width); dpy_gfx_update(s->vga.ds, x, y, w, h); }"} {"target": 0, "idx": 15928, "func": "static void qemu_tcg_init_vcpu(void *_env) { CPUState *env = _env; /* share a single thread for all cpus with TCG */ if (!tcg_cpu_thread) { env->thread = qemu_mallocz(sizeof(QemuThread)); env->halt_cond = qemu_mallocz(sizeof(QemuCond)); qemu_cond_init(env->halt_cond); qemu_thread_create(env->thread, qemu_tcg_cpu_thread_fn, env); while (env->created == 0) qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100); tcg_cpu_thread = env->thread; tcg_halt_cond = env->halt_cond; } else { env->thread = tcg_cpu_thread; env->halt_cond = tcg_halt_cond; } }"} {"target": 0, "idx": 15931, "func": "static int64_t http_seek(URLContext *h, int64_t off, int whence) { HTTPContext *s = h->priv_data; URLContext *old_hd = s->hd; int64_t old_off = s->off; uint8_t old_buf[BUFFER_SIZE]; int old_buf_size; AVDictionary *options = NULL; if (whence == AVSEEK_SIZE) return s->filesize; else if ((whence == SEEK_CUR && off == 0) || (whence == SEEK_SET && off == s->off)) return s->off; else if ((s->filesize == -1 && whence == SEEK_END) || h->is_streamed) return -1; /* we save the old context in case the seek fails */ old_buf_size = s->buf_end - s->buf_ptr; memcpy(old_buf, s->buf_ptr, old_buf_size); s->hd = NULL; if (whence == SEEK_CUR) off += s->off; else if (whence == SEEK_END) off += s->filesize; s->off = off; /* if it fails, continue on old connection */ av_dict_copy(&options, s->chained_options, 0); if (http_open_cnx(h, &options) < 0) { av_dict_free(&options); memcpy(s->buffer, old_buf, old_buf_size); s->buf_ptr = s->buffer; s->buf_end = s->buffer + old_buf_size; s->hd = old_hd; s->off = old_off; return -1; } av_dict_free(&options); ffurl_close(old_hd); return off; }"} {"target": 0, "idx": 15952, "func": "void qdict_del(QDict *qdict, const char *key) { QDictEntry *entry; entry = qdict_find(qdict, key, tdb_hash(key) % QDICT_HASH_SIZE); if (entry) { LIST_REMOVE(entry, next); qentry_destroy(entry); qdict->size--; } }"} {"target": 0, "idx": 15954, "func": "static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length) { migration_dirty_pages += cpu_physical_memory_sync_dirty_bitmap(migration_bitmap, start, length); }"} {"target": 0, "idx": 15966, "func": "static void xhci_port_write(void *ptr, hwaddr reg, uint64_t val, unsigned size) { XHCIPort *port = ptr; uint32_t portsc, notify; trace_usb_xhci_port_write(port->portnr, reg, val); switch (reg) { case 0x00: /* PORTSC */ /* write-1-to-start bits */ if (val & PORTSC_PR) { xhci_port_reset(port); break; } portsc = port->portsc; notify = 0; /* write-1-to-clear bits*/ portsc &= ~(val & (PORTSC_CSC|PORTSC_PEC|PORTSC_WRC|PORTSC_OCC| PORTSC_PRC|PORTSC_PLC|PORTSC_CEC)); if (val & PORTSC_LWS) { /* overwrite PLS only when LWS=1 */ uint32_t old_pls = get_field(port->portsc, PORTSC_PLS); uint32_t new_pls = get_field(val, PORTSC_PLS); switch (new_pls) { case PLS_U0: if (old_pls != PLS_U0) { set_field(&portsc, new_pls, PORTSC_PLS); trace_usb_xhci_port_link(port->portnr, new_pls); notify = PORTSC_PLC; } break; case PLS_U3: if (old_pls < PLS_U3) { set_field(&portsc, new_pls, PORTSC_PLS); trace_usb_xhci_port_link(port->portnr, new_pls); } break; case PLS_RESUME: /* windows does this for some reason, don't spam stderr */ break; default: fprintf(stderr, \"%s: ignore pls write (old %d, new %d)\\n\", __func__, old_pls, new_pls); break; } } /* read/write bits */ portsc &= ~(PORTSC_PP|PORTSC_WCE|PORTSC_WDE|PORTSC_WOE); portsc |= (val & (PORTSC_PP|PORTSC_WCE|PORTSC_WDE|PORTSC_WOE)); port->portsc = portsc; if (notify) { xhci_port_notify(port, notify); } break; case 0x04: /* PORTPMSC */ case 0x08: /* PORTLI */ default: trace_usb_xhci_unimplemented(\"port write\", reg); } }"} {"target": 0, "idx": 15973, "func": "static int pci_unin_map_irq(PCIDevice *pci_dev, int irq_num) { int retval; int devfn = pci_dev->devfn & 0x00FFFFFF; retval = (((devfn >> 11) & 0x1F) + irq_num) & 3; return retval; }"} {"target": 0, "idx": 15976, "func": "static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res, void **refcount_table, int64_t *refcount_table_size, int64_t l2_offset, int flags) { BDRVQcow2State *s = bs->opaque; uint64_t *l2_table, l2_entry; uint64_t next_contiguous_offset = 0; int i, l2_size, nb_csectors, ret; /* Read L2 table from disk */ l2_size = s->l2_size * sizeof(uint64_t); l2_table = g_malloc(l2_size); ret = bdrv_pread(bs->file, l2_offset, l2_table, l2_size); if (ret < 0) { fprintf(stderr, \"ERROR: I/O error in check_refcounts_l2\\n\"); res->check_errors++; goto fail; } /* Do the actual checks */ for(i = 0; i < s->l2_size; i++) { l2_entry = be64_to_cpu(l2_table[i]); switch (qcow2_get_cluster_type(l2_entry)) { case QCOW2_CLUSTER_COMPRESSED: /* Compressed clusters don't have QCOW_OFLAG_COPIED */ if (l2_entry & QCOW_OFLAG_COPIED) { fprintf(stderr, \"ERROR: cluster %\" PRId64 \": \" \"copied flag must never be set for compressed \" \"clusters\\n\", l2_entry >> s->cluster_bits); l2_entry &= ~QCOW_OFLAG_COPIED; res->corruptions++; } /* Mark cluster as used */ nb_csectors = ((l2_entry >> s->csize_shift) & s->csize_mask) + 1; l2_entry &= s->cluster_offset_mask; ret = inc_refcounts(bs, res, refcount_table, refcount_table_size, l2_entry & ~511, nb_csectors * 512); if (ret < 0) { goto fail; } if (flags & CHECK_FRAG_INFO) { res->bfi.allocated_clusters++; res->bfi.compressed_clusters++; /* Compressed clusters are fragmented by nature. Since they * take up sub-sector space but we only have sector granularity * I/O we need to re-read the same sectors even for adjacent * compressed clusters. */ res->bfi.fragmented_clusters++; } break; case QCOW2_CLUSTER_ZERO: if ((l2_entry & L2E_OFFSET_MASK) == 0) { break; } /* fall through */ case QCOW2_CLUSTER_NORMAL: { uint64_t offset = l2_entry & L2E_OFFSET_MASK; if (flags & CHECK_FRAG_INFO) { res->bfi.allocated_clusters++; if (next_contiguous_offset && offset != next_contiguous_offset) { res->bfi.fragmented_clusters++; } next_contiguous_offset = offset + s->cluster_size; } /* Mark cluster as used */ ret = inc_refcounts(bs, res, refcount_table, refcount_table_size, offset, s->cluster_size); if (ret < 0) { goto fail; } /* Correct offsets are cluster aligned */ if (offset_into_cluster(s, offset)) { fprintf(stderr, \"ERROR offset=%\" PRIx64 \": Cluster is not \" \"properly aligned; L2 entry corrupted.\\n\", offset); res->corruptions++; } break; } case QCOW2_CLUSTER_UNALLOCATED: break; default: abort(); } } g_free(l2_table); return 0; fail: g_free(l2_table); return ret; }"} {"target": 0, "idx": 15986, "func": "static int opus_packet(AVFormatContext *avf, int idx) { struct ogg *ogg = avf->priv_data; struct ogg_stream *os = &ogg->streams[idx]; AVStream *st = avf->streams[idx]; struct oggopus_private *priv = os->private; uint8_t *packet = os->buf + os->pstart; if (!os->psize) return AVERROR_INVALIDDATA; if ((!os->lastpts || os->lastpts == AV_NOPTS_VALUE) && !(os->flags & OGG_FLAG_EOS)) { int seg, d; int duration; uint8_t *last_pkt = os->buf + os->pstart; uint8_t *next_pkt = last_pkt; duration = 0; seg = os->segp; d = opus_duration(last_pkt, os->psize); if (d < 0) { os->pflags |= AV_PKT_FLAG_CORRUPT; return 0; } duration += d; last_pkt = next_pkt = next_pkt + os->psize; for (; seg < os->nsegs; seg++) { if (os->segments[seg] < 255) { int d = opus_duration(last_pkt, os->segments[seg]); if (d < 0) { duration = os->granule; break; } duration += d; last_pkt = next_pkt + os->segments[seg]; } next_pkt += os->segments[seg]; } os->lastpts = os->lastdts = os->granule - duration; } os->pduration = opus_duration(packet, os->psize); if (os->lastpts != AV_NOPTS_VALUE) { if (st->start_time == AV_NOPTS_VALUE) st->start_time = os->lastpts; priv->cur_dts = os->lastdts = os->lastpts -= priv->pre_skip; } priv->cur_dts += os->pduration; if ((os->flags & OGG_FLAG_EOS)) { int64_t skip = priv->cur_dts - os->granule + priv->pre_skip; skip = FFMIN(skip, os->pduration); if (skip > 0) { os->pduration = skip < os->pduration ? os->pduration - skip : 1; os->end_trimming = skip; av_log(avf, AV_LOG_DEBUG, \"Last packet was truncated to %d due to end trimming.\\n\", os->pduration); } } return 0; }"} {"target": 0, "idx": 15993, "func": "static int nsv_read_chunk(AVFormatContext *s, int fill_header) { NSVContext *nsv = s->priv_data; AVIOContext *pb = s->pb; AVStream *st[2] = {NULL, NULL}; NSVStream *nst; AVPacket *pkt; int i, err = 0; uint8_t auxcount; /* number of aux metadata, also 4 bits of vsize */ uint32_t vsize; uint16_t asize; uint16_t auxsize; av_dlog(s, \"%s(%d)\\n\", __FUNCTION__, fill_header); if (nsv->ahead[0].data || nsv->ahead[1].data) return 0; //-1; /* hey! eat what you've in your plate first! */ null_chunk_retry: if (avio_feof(pb)) return -1; for (i = 0; i < NSV_MAX_RESYNC_TRIES && nsv->state < NSV_FOUND_NSVS && !err; i++) err = nsv_resync(s); if (err < 0) return err; if (nsv->state == NSV_FOUND_NSVS) err = nsv_parse_NSVs_header(s); if (err < 0) return err; if (nsv->state != NSV_HAS_READ_NSVS && nsv->state != NSV_FOUND_BEEF) return -1; auxcount = avio_r8(pb); vsize = avio_rl16(pb); asize = avio_rl16(pb); vsize = (vsize << 4) | (auxcount >> 4); auxcount &= 0x0f; av_dlog(s, \"NSV CHUNK %d aux, %u bytes video, %d bytes audio\\n\", auxcount, vsize, asize); /* skip aux stuff */ for (i = 0; i < auxcount; i++) { uint32_t av_unused auxtag; auxsize = avio_rl16(pb); auxtag = avio_rl32(pb); av_dlog(s, \"NSV aux data: '%c%c%c%c', %d bytes\\n\", (auxtag & 0x0ff), ((auxtag >> 8) & 0x0ff), ((auxtag >> 16) & 0x0ff), ((auxtag >> 24) & 0x0ff), auxsize); avio_skip(pb, auxsize); vsize -= auxsize + sizeof(uint16_t) + sizeof(uint32_t); /* that's becoming braindead */ } if (avio_feof(pb)) return -1; if (!vsize && !asize) { nsv->state = NSV_UNSYNC; goto null_chunk_retry; } /* map back streams to v,a */ if (s->nb_streams > 0) st[s->streams[0]->id] = s->streams[0]; if (s->nb_streams > 1) st[s->streams[1]->id] = s->streams[1]; if (vsize && st[NSV_ST_VIDEO]) { int ret; nst = st[NSV_ST_VIDEO]->priv_data; pkt = &nsv->ahead[NSV_ST_VIDEO]; if ((ret = av_get_packet(pb, pkt, vsize)) < 0) return ret; pkt->stream_index = st[NSV_ST_VIDEO]->index;//NSV_ST_VIDEO; pkt->dts = nst->frame_offset; pkt->flags |= nsv->state == NSV_HAS_READ_NSVS ? AV_PKT_FLAG_KEY : 0; /* keyframe only likely on a sync frame */ for (i = 0; i < FFMIN(8, vsize); i++) av_dlog(s, \"NSV video: [%d] = %02x\\n\", i, pkt->data[i]); } if(st[NSV_ST_VIDEO]) ((NSVStream*)st[NSV_ST_VIDEO]->priv_data)->frame_offset++; if (asize && st[NSV_ST_AUDIO]) { nst = st[NSV_ST_AUDIO]->priv_data; pkt = &nsv->ahead[NSV_ST_AUDIO]; /* read raw audio specific header on the first audio chunk... */ /* on ALL audio chunks ?? seems so! */ if (asize && st[NSV_ST_AUDIO]->codec->codec_tag == MKTAG('P', 'C', 'M', ' ')/* && fill_header*/) { uint8_t bps; uint8_t channels; uint16_t samplerate; bps = avio_r8(pb); channels = avio_r8(pb); samplerate = avio_rl16(pb); asize-=4; av_dlog(s, \"NSV RAWAUDIO: bps %d, nchan %d, srate %d\\n\", bps, channels, samplerate); if (fill_header) { st[NSV_ST_AUDIO]->need_parsing = AVSTREAM_PARSE_NONE; /* we know everything */ if (bps != 16) { av_dlog(s, \"NSV AUDIO bit/sample != 16 (%d)!!!\\n\", bps); } if(channels) bps /= channels; // ??? else av_log(s, AV_LOG_WARNING, \"Channels is 0\\n\"); if (bps == 8) st[NSV_ST_AUDIO]->codec->codec_id = AV_CODEC_ID_PCM_U8; samplerate /= 4;/* UGH ??? XXX */ channels = 1; st[NSV_ST_AUDIO]->codec->channels = channels; st[NSV_ST_AUDIO]->codec->sample_rate = samplerate; av_dlog(s, \"NSV RAWAUDIO: bps %d, nchan %d, srate %d\\n\", bps, channels, samplerate); } } av_get_packet(pb, pkt, asize); pkt->stream_index = st[NSV_ST_AUDIO]->index;//NSV_ST_AUDIO; pkt->flags |= nsv->state == NSV_HAS_READ_NSVS ? AV_PKT_FLAG_KEY : 0; /* keyframe only likely on a sync frame */ if( nsv->state == NSV_HAS_READ_NSVS && st[NSV_ST_VIDEO] ) { /* on a nsvs frame we have new information on a/v sync */ pkt->dts = (((NSVStream*)st[NSV_ST_VIDEO]->priv_data)->frame_offset-1); pkt->dts *= (int64_t)1000 * nsv->framerate.den; pkt->dts += (int64_t)nsv->avsync * nsv->framerate.num; av_dlog(s, \"NSV AUDIO: sync:%d, dts:%\"PRId64, nsv->avsync, pkt->dts); } nst->frame_offset++; } nsv->state = NSV_UNSYNC; return 0; }"} {"target": 1, "idx": 16003, "func": "static void virtio_pci_common_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { VirtIOPCIProxy *proxy = opaque; VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); switch (addr) { case VIRTIO_PCI_COMMON_DFSELECT: proxy->dfselect = val; break; case VIRTIO_PCI_COMMON_GFSELECT: proxy->gfselect = val; break; case VIRTIO_PCI_COMMON_GF: if (proxy->gfselect <= ARRAY_SIZE(proxy->guest_features)) { proxy->guest_features[proxy->gfselect] = val; virtio_set_features(vdev, (((uint64_t)proxy->guest_features[1]) << 32) | proxy->guest_features[0]); } break; case VIRTIO_PCI_COMMON_MSIX: msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); /* Make it possible for guest to discover an error took place. */ if (msix_vector_use(&proxy->pci_dev, val) < 0) { val = VIRTIO_NO_VECTOR; } vdev->config_vector = val; break; case VIRTIO_PCI_COMMON_STATUS: if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) { virtio_pci_stop_ioeventfd(proxy); } virtio_set_status(vdev, val & 0xFF); if (val & VIRTIO_CONFIG_S_DRIVER_OK) { virtio_pci_start_ioeventfd(proxy); } if (vdev->status == 0) { virtio_reset(vdev); msix_unuse_all_vectors(&proxy->pci_dev); } break; case VIRTIO_PCI_COMMON_Q_SELECT: if (val < VIRTIO_QUEUE_MAX) { vdev->queue_sel = val; } break; case VIRTIO_PCI_COMMON_Q_SIZE: proxy->vqs[vdev->queue_sel].num = val; break; case VIRTIO_PCI_COMMON_Q_MSIX: msix_vector_unuse(&proxy->pci_dev, virtio_queue_vector(vdev, vdev->queue_sel)); /* Make it possible for guest to discover an error took place. */ if (msix_vector_use(&proxy->pci_dev, val) < 0) { val = VIRTIO_NO_VECTOR; } virtio_queue_set_vector(vdev, vdev->queue_sel, val); break; case VIRTIO_PCI_COMMON_Q_ENABLE: /* TODO: need a way to put num back on reset. */ virtio_queue_set_num(vdev, vdev->queue_sel, proxy->vqs[vdev->queue_sel].num); virtio_queue_set_rings(vdev, vdev->queue_sel, ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 | proxy->vqs[vdev->queue_sel].desc[0], ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 | proxy->vqs[vdev->queue_sel].avail[0], ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 | proxy->vqs[vdev->queue_sel].used[0]); break; case VIRTIO_PCI_COMMON_Q_DESCLO: proxy->vqs[vdev->queue_sel].desc[0] = val; break; case VIRTIO_PCI_COMMON_Q_DESCHI: proxy->vqs[vdev->queue_sel].desc[1] = val; break; case VIRTIO_PCI_COMMON_Q_AVAILLO: proxy->vqs[vdev->queue_sel].avail[0] = val; break; case VIRTIO_PCI_COMMON_Q_AVAILHI: proxy->vqs[vdev->queue_sel].avail[1] = val; break; case VIRTIO_PCI_COMMON_Q_USEDLO: proxy->vqs[vdev->queue_sel].used[0] = val; break; case VIRTIO_PCI_COMMON_Q_USEDHI: proxy->vqs[vdev->queue_sel].used[1] = val; break; default: break; } }"} {"target": 0, "idx": 16032, "func": "void tcp_start_outgoing_migration(MigrationState *s, const char *host_port, Error **errp) { Error *err = NULL; SocketAddress *saddr = tcp_build_address(host_port, &err); if (!err) { socket_start_outgoing_migration(s, saddr, &err); } error_propagate(errp, err); }"} {"target": 0, "idx": 16036, "func": "static int ast2400_rambits(AspeedSDMCState *s) { switch (s->ram_size >> 20) { case 64: return ASPEED_SDMC_DRAM_64MB; case 128: return ASPEED_SDMC_DRAM_128MB; case 256: return ASPEED_SDMC_DRAM_256MB; case 512: return ASPEED_SDMC_DRAM_512MB; default: break; } /* use a common default */ error_report(\"warning: Invalid RAM size 0x%\" PRIx64 \". Using default 256M\", s->ram_size); s->ram_size = 256 << 20; return ASPEED_SDMC_DRAM_256MB; }"} {"target": 0, "idx": 16063, "func": "static int decode_slice(MpegEncContext *s){ const int part_mask= s->partitioned_frame ? (ER_AC_END|ER_AC_ERROR) : 0x7F; const int mb_size= 16>>s->avctx->lowres; int ret; s->last_resync_gb= s->gb; s->first_slice_line= 1; s->resync_mb_x= s->mb_x; s->resync_mb_y= s->mb_y; ff_set_qscale(s, s->qscale); if (s->avctx->hwaccel) { const uint8_t *start= s->gb.buffer + get_bits_count(&s->gb)/8; const uint8_t *end = ff_h263_find_resync_marker(start + 1, s->gb.buffer_end); skip_bits_long(&s->gb, 8*(end - start)); return s->avctx->hwaccel->decode_slice(s->avctx, start, end - start); } if(s->partitioned_frame){ const int qscale= s->qscale; if(CONFIG_MPEG4_DECODER && s->codec_id==AV_CODEC_ID_MPEG4){ if ((ret = ff_mpeg4_decode_partitions(s)) < 0) return ret; } /* restore variables which were modified */ s->first_slice_line=1; s->mb_x= s->resync_mb_x; s->mb_y= s->resync_mb_y; ff_set_qscale(s, qscale); } for(; s->mb_y < s->mb_height; s->mb_y++) { /* per-row end of slice checks */ if(s->msmpeg4_version){ if(s->resync_mb_y + s->slice_height == s->mb_y){ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END); return 0; } } if(s->msmpeg4_version==1){ s->last_dc[0]= s->last_dc[1]= s->last_dc[2]= 128; } ff_init_block_index(s); for(; s->mb_x < s->mb_width; s->mb_x++) { int ret; ff_update_block_index(s); if(s->resync_mb_x == s->mb_x && s->resync_mb_y+1 == s->mb_y){ s->first_slice_line=0; } /* DCT & quantize */ s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; // s->mb_skipped = 0; av_dlog(s, \"%d %d %06X\\n\", ret, get_bits_count(&s->gb), show_bits(&s->gb, 24)); ret= s->decode_mb(s, s->block); if (s->pict_type!=AV_PICTURE_TYPE_B) ff_h263_update_motion_val(s); if(ret<0){ const int xy= s->mb_x + s->mb_y*s->mb_stride; if(ret==SLICE_END){ ff_MPV_decode_mb(s, s->block); if(s->loop_filter) ff_h263_loop_filter(s); ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, ER_MB_END&part_mask); s->padding_bug_score--; if(++s->mb_x >= s->mb_width){ s->mb_x=0; ff_draw_horiz_band(s, s->mb_y*mb_size, mb_size); ff_MPV_report_decode_progress(s); s->mb_y++; } return 0; }else if(ret==SLICE_NOEND){ av_log(s->avctx, AV_LOG_ERROR, \"Slice mismatch at MB: %d\\n\", xy); ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x+1, s->mb_y, ER_MB_END&part_mask); return AVERROR_INVALIDDATA; } av_log(s->avctx, AV_LOG_ERROR, \"Error at MB: %d\\n\", xy); ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR&part_mask); return AVERROR_INVALIDDATA; } ff_MPV_decode_mb(s, s->block); if(s->loop_filter) ff_h263_loop_filter(s); } ff_draw_horiz_band(s, s->mb_y*mb_size, mb_size); ff_MPV_report_decode_progress(s); s->mb_x= 0; } assert(s->mb_x==0 && s->mb_y==s->mb_height); if(s->codec_id==AV_CODEC_ID_MPEG4 && (s->workaround_bugs&FF_BUG_AUTODETECT) && get_bits_left(&s->gb) >= 48 && show_bits(&s->gb, 24)==0x4010 && !s->data_partitioning) s->padding_bug_score+=32; /* try to detect the padding bug */ if( s->codec_id==AV_CODEC_ID_MPEG4 && (s->workaround_bugs&FF_BUG_AUTODETECT) && get_bits_left(&s->gb) >=0 && get_bits_left(&s->gb) < 137 // && !s->resync_marker && !s->data_partitioning){ const int bits_count= get_bits_count(&s->gb); const int bits_left = s->gb.size_in_bits - bits_count; if(bits_left==0){ s->padding_bug_score+=16; } else if(bits_left != 1){ int v= show_bits(&s->gb, 8); v|= 0x7F >> (7-(bits_count&7)); if(v==0x7F && bits_left<=8) s->padding_bug_score--; else if(v==0x7F && ((get_bits_count(&s->gb)+8)&8) && bits_left<=16) s->padding_bug_score+= 4; else s->padding_bug_score++; } } if(s->workaround_bugs&FF_BUG_AUTODETECT){ if(s->padding_bug_score > -2 && !s->data_partitioning /*&& (s->divx_version>=0 || !s->resync_marker)*/) s->workaround_bugs |= FF_BUG_NO_PADDING; else s->workaround_bugs &= ~FF_BUG_NO_PADDING; } // handle formats which don't have unique end markers if(s->msmpeg4_version || (s->workaround_bugs&FF_BUG_NO_PADDING)){ //FIXME perhaps solve this more cleanly int left= get_bits_left(&s->gb); int max_extra=7; /* no markers in M$ crap */ if(s->msmpeg4_version && s->pict_type==AV_PICTURE_TYPE_I) max_extra+= 17; /* buggy padding but the frame should still end approximately at the bitstream end */ if((s->workaround_bugs&FF_BUG_NO_PADDING) && (s->err_recognition&(AV_EF_BUFFER|AV_EF_AGGRESSIVE))) max_extra+= 48; else if((s->workaround_bugs&FF_BUG_NO_PADDING)) max_extra+= 256*256*256*64; if(left>max_extra){ av_log(s->avctx, AV_LOG_ERROR, \"discarding %d junk bits at end, next would be %X\\n\", left, show_bits(&s->gb, 24)); } else if(left<0){ av_log(s->avctx, AV_LOG_ERROR, \"overreading %d bits\\n\", -left); }else ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END); return 0; } av_log(s->avctx, AV_LOG_ERROR, \"slice end not reached but screenspace end (%d left %06X, score= %d)\\n\", get_bits_left(&s->gb), show_bits(&s->gb, 24), s->padding_bug_score); ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, ER_MB_END&part_mask); return AVERROR_INVALIDDATA; }"} {"target": 0, "idx": 16074, "func": "static void get_fcb_param(FCBParam *optim, int16_t *impulse_resp, int16_t *buf, int pulse_cnt, int pitch_lag) { FCBParam param; int16_t impulse_r[SUBFRAME_LEN]; int16_t temp_corr[SUBFRAME_LEN]; int16_t impulse_corr[SUBFRAME_LEN]; int ccr1[SUBFRAME_LEN]; int ccr2[SUBFRAME_LEN]; int amp, err, max, max_amp_index, min, scale, i, j, k, l; int64_t temp; /* Update impulse response */ memcpy(impulse_r, impulse_resp, sizeof(int16_t) * SUBFRAME_LEN); param.dirac_train = 0; if (pitch_lag < SUBFRAME_LEN - 2) { param.dirac_train = 1; gen_dirac_train(impulse_r, pitch_lag); } for (i = 0; i < SUBFRAME_LEN; i++) temp_corr[i] = impulse_r[i] >> 1; /* Compute impulse response autocorrelation */ temp = dot_product(temp_corr, temp_corr, SUBFRAME_LEN); scale = normalize_bits_int32(temp); impulse_corr[0] = av_clipl_int32((temp << scale) + (1 << 15)) >> 16; for (i = 1; i < SUBFRAME_LEN; i++) { temp = dot_product(temp_corr + i, temp_corr, SUBFRAME_LEN - i); impulse_corr[i] = av_clipl_int32((temp << scale) + (1 << 15)) >> 16; } /* Compute crosscorrelation of impulse response with residual signal */ scale -= 4; for (i = 0; i < SUBFRAME_LEN; i++){ temp = dot_product(buf + i, impulse_r, SUBFRAME_LEN - i); if (scale < 0) ccr1[i] = temp >> -scale; else ccr1[i] = av_clipl_int32(temp << scale); } /* Search loop */ for (i = 0; i < GRID_SIZE; i++) { /* Maximize the crosscorrelation */ max = 0; for (j = i; j < SUBFRAME_LEN; j += GRID_SIZE) { temp = FFABS(ccr1[j]); if (temp >= max) { max = temp; param.pulse_pos[0] = j; } } /* Quantize the gain (max crosscorrelation/impulse_corr[0]) */ amp = max; min = 1 << 30; max_amp_index = GAIN_LEVELS - 2; for (j = max_amp_index; j >= 2; j--) { temp = av_clipl_int32((int64_t)fixed_cb_gain[j] * impulse_corr[0] << 1); temp = FFABS(temp - amp); if (temp < min) { min = temp; max_amp_index = j; } } max_amp_index--; /* Select additional gain values */ for (j = 1; j < 5; j++) { for (k = i; k < SUBFRAME_LEN; k += GRID_SIZE) { temp_corr[k] = 0; ccr2[k] = ccr1[k]; } param.amp_index = max_amp_index + j - 2; amp = fixed_cb_gain[param.amp_index]; param.pulse_sign[0] = (ccr2[param.pulse_pos[0]] < 0) ? -amp : amp; temp_corr[param.pulse_pos[0]] = 1; for (k = 1; k < pulse_cnt; k++) { max = -1 << 30; for (l = i; l < SUBFRAME_LEN; l += GRID_SIZE) { if (temp_corr[l]) continue; temp = impulse_corr[FFABS(l - param.pulse_pos[k - 1])]; temp = av_clipl_int32((int64_t)temp * param.pulse_sign[k - 1] << 1); ccr2[l] -= temp; temp = FFABS(ccr2[l]); if (temp > max) { max = temp; param.pulse_pos[k] = l; } } param.pulse_sign[k] = (ccr2[param.pulse_pos[k]] < 0) ? -amp : amp; temp_corr[param.pulse_pos[k]] = 1; } /* Create the error vector */ memset(temp_corr, 0, sizeof(int16_t) * SUBFRAME_LEN); for (k = 0; k < pulse_cnt; k++) temp_corr[param.pulse_pos[k]] = param.pulse_sign[k]; for (k = SUBFRAME_LEN - 1; k >= 0; k--) { temp = 0; for (l = 0; l <= k; l++) { int prod = av_clipl_int32((int64_t)temp_corr[l] * impulse_r[k - l] << 1); temp = av_clipl_int32(temp + prod); } temp_corr[k] = temp << 2 >> 16; } /* Compute square of error */ err = 0; for (k = 0; k < SUBFRAME_LEN; k++) { int64_t prod; prod = av_clipl_int32((int64_t)buf[k] * temp_corr[k] << 1); err = av_clipl_int32(err - prod); prod = av_clipl_int32((int64_t)temp_corr[k] * temp_corr[k]); err = av_clipl_int32(err + prod); } /* Minimize */ if (err < optim->min_err) { optim->min_err = err; optim->grid_index = i; optim->amp_index = param.amp_index; optim->dirac_train = param.dirac_train; for (k = 0; k < pulse_cnt; k++) { optim->pulse_sign[k] = param.pulse_sign[k]; optim->pulse_pos[k] = param.pulse_pos[k]; } } } } }"} {"target": 0, "idx": 16076, "func": "static ssize_t proxy_readlink(FsContext *fs_ctx, V9fsPath *fs_path, char *buf, size_t bufsz) { int retval; retval = v9fs_request(fs_ctx->private, T_READLINK, buf, \"sd\", fs_path, bufsz); if (retval < 0) { errno = -retval; return -1; } return strlen(buf); }"} {"target": 0, "idx": 16086, "func": "static void gt64120_writel (void *opaque, target_phys_addr_t addr, uint32_t val) { GT64120State *s = opaque; uint32_t saddr; #ifdef TARGET_WORDS_BIGENDIAN val = bswap32(val); #endif saddr = (addr & 0xfff) >> 2; switch (saddr) { /* CPU Configuration */ case GT_CPU: s->regs[GT_CPU] = val; break; case GT_MULTI: /* Read-only register as only one GT64xxx is present on the CPU bus */ break; /* CPU Address Decode */ case GT_PCI0IOLD: s->regs[GT_PCI0IOLD] = val & 0x00007fff; s->regs[GT_PCI0IOREMAP] = val & 0x000007ff; gt64120_pci_mapping(s); break; case GT_PCI0M0LD: s->regs[GT_PCI0M0LD] = val & 0x00007fff; s->regs[GT_PCI0M0REMAP] = val & 0x000007ff; break; case GT_PCI0M1LD: s->regs[GT_PCI0M1LD] = val & 0x00007fff; s->regs[GT_PCI0M1REMAP] = val & 0x000007ff; break; case GT_PCI1IOLD: s->regs[GT_PCI1IOLD] = val & 0x00007fff; s->regs[GT_PCI1IOREMAP] = val & 0x000007ff; break; case GT_PCI1M0LD: s->regs[GT_PCI1M0LD] = val & 0x00007fff; s->regs[GT_PCI1M0REMAP] = val & 0x000007ff; break; case GT_PCI1M1LD: s->regs[GT_PCI1M1LD] = val & 0x00007fff; s->regs[GT_PCI1M1REMAP] = val & 0x000007ff; break; case GT_PCI0IOHD: s->regs[saddr] = val & 0x0000007f; gt64120_pci_mapping(s); break; case GT_PCI0M0HD: case GT_PCI0M1HD: case GT_PCI1IOHD: case GT_PCI1M0HD: case GT_PCI1M1HD: s->regs[saddr] = val & 0x0000007f; break; case GT_ISD: s->regs[saddr] = val & 0x00007fff; gt64120_isd_mapping(s); break; case GT_PCI0IOREMAP: case GT_PCI0M0REMAP: case GT_PCI0M1REMAP: case GT_PCI1IOREMAP: case GT_PCI1M0REMAP: case GT_PCI1M1REMAP: s->regs[saddr] = val & 0x000007ff; break; /* CPU Error Report */ case GT_CPUERR_ADDRLO: case GT_CPUERR_ADDRHI: case GT_CPUERR_DATALO: case GT_CPUERR_DATAHI: case GT_CPUERR_PARITY: /* Read-only registers, do nothing */ break; /* CPU Sync Barrier */ case GT_PCI0SYNC: case GT_PCI1SYNC: /* Read-only registers, do nothing */ break; /* SDRAM and Device Address Decode */ case GT_SCS0LD: case GT_SCS0HD: case GT_SCS1LD: case GT_SCS1HD: case GT_SCS2LD: case GT_SCS2HD: case GT_SCS3LD: case GT_SCS3HD: case GT_CS0LD: case GT_CS0HD: case GT_CS1LD: case GT_CS1HD: case GT_CS2LD: case GT_CS2HD: case GT_CS3LD: case GT_CS3HD: case GT_BOOTLD: case GT_BOOTHD: case GT_ADERR: /* SDRAM Configuration */ case GT_SDRAM_CFG: case GT_SDRAM_OPMODE: case GT_SDRAM_BM: case GT_SDRAM_ADDRDECODE: /* Accept and ignore SDRAM interleave configuration */ s->regs[saddr] = val; break; /* Device Parameters */ case GT_DEV_B0: case GT_DEV_B1: case GT_DEV_B2: case GT_DEV_B3: case GT_DEV_BOOT: /* Not implemented */ dprintf (\"Unimplemented device register offset 0x%x\\n\", saddr << 2); break; /* ECC */ case GT_ECC_ERRDATALO: case GT_ECC_ERRDATAHI: case GT_ECC_MEM: case GT_ECC_CALC: case GT_ECC_ERRADDR: /* Read-only registers, do nothing */ break; /* DMA Record */ case GT_DMA0_CNT: case GT_DMA1_CNT: case GT_DMA2_CNT: case GT_DMA3_CNT: case GT_DMA0_SA: case GT_DMA1_SA: case GT_DMA2_SA: case GT_DMA3_SA: case GT_DMA0_DA: case GT_DMA1_DA: case GT_DMA2_DA: case GT_DMA3_DA: case GT_DMA0_NEXT: case GT_DMA1_NEXT: case GT_DMA2_NEXT: case GT_DMA3_NEXT: case GT_DMA0_CUR: case GT_DMA1_CUR: case GT_DMA2_CUR: case GT_DMA3_CUR: /* Not implemented */ dprintf (\"Unimplemented DMA register offset 0x%x\\n\", saddr << 2); break; /* DMA Channel Control */ case GT_DMA0_CTRL: case GT_DMA1_CTRL: case GT_DMA2_CTRL: case GT_DMA3_CTRL: /* Not implemented */ dprintf (\"Unimplemented DMA register offset 0x%x\\n\", saddr << 2); break; /* DMA Arbiter */ case GT_DMA_ARB: /* Not implemented */ dprintf (\"Unimplemented DMA register offset 0x%x\\n\", saddr << 2); break; /* Timer/Counter */ case GT_TC0: case GT_TC1: case GT_TC2: case GT_TC3: case GT_TC_CONTROL: /* Not implemented */ dprintf (\"Unimplemented timer register offset 0x%x\\n\", saddr << 2); break; /* PCI Internal */ case GT_PCI0_CMD: case GT_PCI1_CMD: s->regs[saddr] = val & 0x0401fc0f; break; case GT_PCI0_TOR: case GT_PCI0_BS_SCS10: case GT_PCI0_BS_SCS32: case GT_PCI0_BS_CS20: case GT_PCI0_BS_CS3BT: case GT_PCI1_IACK: case GT_PCI0_IACK: case GT_PCI0_BARE: case GT_PCI0_PREFMBR: case GT_PCI0_SCS10_BAR: case GT_PCI0_SCS32_BAR: case GT_PCI0_CS20_BAR: case GT_PCI0_CS3BT_BAR: case GT_PCI0_SSCS10_BAR: case GT_PCI0_SSCS32_BAR: case GT_PCI0_SCS3BT_BAR: case GT_PCI1_TOR: case GT_PCI1_BS_SCS10: case GT_PCI1_BS_SCS32: case GT_PCI1_BS_CS20: case GT_PCI1_BS_CS3BT: case GT_PCI1_BARE: case GT_PCI1_PREFMBR: case GT_PCI1_SCS10_BAR: case GT_PCI1_SCS32_BAR: case GT_PCI1_CS20_BAR: case GT_PCI1_CS3BT_BAR: case GT_PCI1_SSCS10_BAR: case GT_PCI1_SSCS32_BAR: case GT_PCI1_SCS3BT_BAR: case GT_PCI1_CFGADDR: case GT_PCI1_CFGDATA: /* not implemented */ break; case GT_PCI0_CFGADDR: s->pci->config_reg = val & 0x80fffffc; break; case GT_PCI0_CFGDATA: if (s->pci->config_reg & (1u << 31)) pci_host_data_writel(s->pci, 0, val); break; /* Interrupts */ case GT_INTRCAUSE: /* not really implemented */ s->regs[saddr] = ~(~(s->regs[saddr]) | ~(val & 0xfffffffe)); s->regs[saddr] |= !!(s->regs[saddr] & 0xfffffffe); dprintf(\"INTRCAUSE %x\\n\", val); break; case GT_INTRMASK: s->regs[saddr] = val & 0x3c3ffffe; dprintf(\"INTRMASK %x\\n\", val); break; case GT_PCI0_ICMASK: s->regs[saddr] = val & 0x03fffffe; dprintf(\"ICMASK %x\\n\", val); break; case GT_PCI0_SERR0MASK: s->regs[saddr] = val & 0x0000003f; dprintf(\"SERR0MASK %x\\n\", val); break; /* Reserved when only PCI_0 is configured. */ case GT_HINTRCAUSE: case GT_CPU_INTSEL: case GT_PCI0_INTSEL: case GT_HINTRMASK: case GT_PCI0_HICMASK: case GT_PCI1_SERR1MASK: /* not implemented */ break; /* SDRAM Parameters */ case GT_SDRAM_B0: case GT_SDRAM_B1: case GT_SDRAM_B2: case GT_SDRAM_B3: /* We don't simulate electrical parameters of the SDRAM. Accept, but ignore the values. */ s->regs[saddr] = val; break; default: dprintf (\"Bad register offset 0x%x\\n\", (int)addr); break; } }"} {"target": 1, "idx": 16109, "func": "static void buffer_reset(Buffer *buffer) { buffer->offset = 0; }"} {"target": 1, "idx": 16122, "func": "static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src) { int i, res; VP9Context *s = dst->priv_data, *ssrc = src->priv_data; // FIXME scalability, size, etc. for (i = 0; i < 2; i++) { if (s->frames[i].tf.f->data[0]) vp9_unref_frame(dst, &s->frames[i]); if (ssrc->frames[i].tf.f->data[0]) { if ((res = vp9_ref_frame(dst, &s->frames[i], &ssrc->frames[i])) < 0) return res; } } for (i = 0; i < 8; i++) { if (s->refs[i].f->data[0]) ff_thread_release_buffer(dst, &s->refs[i]); if (ssrc->next_refs[i].f->data[0]) { if ((res = ff_thread_ref_frame(&s->refs[i], &ssrc->next_refs[i])) < 0) return res; } } s->invisible = ssrc->invisible; s->keyframe = ssrc->keyframe; s->uses_2pass = ssrc->uses_2pass; memcpy(&s->prob_ctx, &ssrc->prob_ctx, sizeof(s->prob_ctx)); memcpy(&s->lf_delta, &ssrc->lf_delta, sizeof(s->lf_delta)); if (ssrc->segmentation.enabled) { memcpy(&s->segmentation.feat, &ssrc->segmentation.feat, sizeof(s->segmentation.feat)); } return 0; }"} {"target": 1, "idx": 16124, "func": "static void test_hash_speed(const void *opaque) { size_t chunk_size = (size_t)opaque; uint8_t *in = NULL, *out = NULL; size_t out_len = 0; double total = 0.0; struct iovec iov; int ret; in = g_new0(uint8_t, chunk_size); memset(in, g_test_rand_int(), chunk_size); iov.iov_base = (char *)in; iov.iov_len = chunk_size; g_test_timer_start(); do { ret = qcrypto_hash_bytesv(QCRYPTO_HASH_ALG_SHA256, &iov, 1, &out, &out_len, NULL); g_assert(ret == 0); total += chunk_size; } while (g_test_timer_elapsed() < 5.0); total /= 1024 * 1024; /* to MB */ g_print(\"sha256: \"); g_print(\"Testing chunk_size %ld bytes \", chunk_size); g_print(\"done: %.2f MB in %.2f secs: \", total, g_test_timer_last()); g_print(\"%.2f MB/sec\\n\", total / g_test_timer_last()); g_free(out); g_free(in); }"} {"target": 1, "idx": 16126, "func": "static int vhost_kernel_memslots_limit(struct vhost_dev *dev) { int limit = 64; char *s; if (g_file_get_contents(\"/sys/module/vhost/parameters/max_mem_regions\", &s, NULL, NULL)) { uint64_t val = g_ascii_strtoull(s, NULL, 10); if (!((val == G_MAXUINT64 || !val) && errno)) { return val; } error_report(\"ignoring invalid max_mem_regions value in vhost module:\" \" %s\", s); } return limit; }"} {"target": 1, "idx": 16132, "func": "static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift) { MotionEstContext * const c= &s->me; const int size= 1; const int h=8; int block; int P[10][2]; int dmin_sum=0, mx4_sum=0, my4_sum=0; int same=1; const int stride= c->stride; uint8_t *mv_penalty= c->current_mv_penalty; init_mv4_ref(c); for(block=0; block<4; block++){ int mx4, my4; int pred_x4, pred_y4; int dmin4; static const int off[4]= {2, 1, 1, -1}; const int mot_stride = s->b8_stride; const int mot_xy = s->block_index[block]; P_LEFT[0] = s->current_picture.motion_val[0][mot_xy - 1][0]; P_LEFT[1] = s->current_picture.motion_val[0][mot_xy - 1][1]; if(P_LEFT[0] > (c->xmax<xmax<first_slice_line && block<2) { c->pred_x= pred_x4= P_LEFT[0]; c->pred_y= pred_y4= P_LEFT[1]; } else { P_TOP[0] = s->current_picture.motion_val[0][mot_xy - mot_stride ][0]; P_TOP[1] = s->current_picture.motion_val[0][mot_xy - mot_stride ][1]; P_TOPRIGHT[0] = s->current_picture.motion_val[0][mot_xy - mot_stride + off[block]][0]; P_TOPRIGHT[1] = s->current_picture.motion_val[0][mot_xy - mot_stride + off[block]][1]; if(P_TOP[1] > (c->ymax<ymax<xmin<xmin< (c->xmax<xmax< (c->ymax<ymax<pred_x= pred_x4 = P_MEDIAN[0]; c->pred_y= pred_y4 = P_MEDIAN[1]; } P_MV1[0]= mx; P_MV1[1]= my; dmin4 = epzs_motion_search4(s, &mx4, &my4, P, block, block, s->p_mv_table, (1<<16)>>shift); dmin4= c->sub_motion_search(s, &mx4, &my4, dmin4, block, block, size, h); if(s->dsp.me_sub_cmp[0] != s->dsp.mb_cmp[0]){ int dxy; const int offset= ((block&1) + (block>>1)*stride)*8; uint8_t *dest_y = c->scratchpad + offset; if(s->quarter_sample){ uint8_t *ref= c->ref[block][0] + (mx4>>2) + (my4>>2)*stride; dxy = ((my4 & 3) << 2) | (mx4 & 3); if(s->no_rounding) s->dsp.put_no_rnd_qpel_pixels_tab[1][dxy](dest_y , ref , stride); else s->dsp.put_qpel_pixels_tab [1][dxy](dest_y , ref , stride); }else{ uint8_t *ref= c->ref[block][0] + (mx4>>1) + (my4>>1)*stride; dxy = ((my4 & 1) << 1) | (mx4 & 1); if(s->no_rounding) s->hdsp.put_no_rnd_pixels_tab[1][dxy](dest_y , ref , stride, h); else s->hdsp.put_pixels_tab [1][dxy](dest_y , ref , stride, h); } dmin_sum+= (mv_penalty[mx4-pred_x4] + mv_penalty[my4-pred_y4])*c->mb_penalty_factor; }else dmin_sum+= dmin4; if(s->quarter_sample){ mx4_sum+= mx4/2; my4_sum+= my4/2; }else{ mx4_sum+= mx4; my4_sum+= my4; } s->current_picture.motion_val[0][s->block_index[block]][0] = mx4; s->current_picture.motion_val[0][s->block_index[block]][1] = my4; if(mx4 != mx || my4 != my) same=0; } if(same) return INT_MAX; if(s->dsp.me_sub_cmp[0] != s->dsp.mb_cmp[0]){ dmin_sum += s->dsp.mb_cmp[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*16*stride, c->scratchpad, stride, 16); } if(c->avctx->mb_cmp&FF_CMP_CHROMA){ int dxy; int mx, my; int offset; mx= ff_h263_round_chroma(mx4_sum); my= ff_h263_round_chroma(my4_sum); dxy = ((my & 1) << 1) | (mx & 1); offset= (s->mb_x*8 + (mx>>1)) + (s->mb_y*8 + (my>>1))*s->uvlinesize; if(s->no_rounding){ s->hdsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad , s->last_picture.f.data[1] + offset, s->uvlinesize, 8); s->hdsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad + 8, s->last_picture.f.data[2] + offset, s->uvlinesize, 8); }else{ s->hdsp.put_pixels_tab [1][dxy](c->scratchpad , s->last_picture.f.data[1] + offset, s->uvlinesize, 8); s->hdsp.put_pixels_tab [1][dxy](c->scratchpad + 8, s->last_picture.f.data[2] + offset, s->uvlinesize, 8); } dmin_sum += s->dsp.mb_cmp[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*8*s->uvlinesize, c->scratchpad , s->uvlinesize, 8); dmin_sum += s->dsp.mb_cmp[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*8*s->uvlinesize, c->scratchpad+8, s->uvlinesize, 8); } c->pred_x= mx; c->pred_y= my; switch(c->avctx->mb_cmp&0xFF){ /*case FF_CMP_SSE: return dmin_sum+ 32*s->qscale*s->qscale;*/ case FF_CMP_RD: return dmin_sum; default: return dmin_sum+ 11*c->mb_penalty_factor; } }"} {"target": 0, "idx": 16138, "func": "static inline void powerpc_excp(CPUState *env, int excp_model, int excp) { target_ulong msr, new_msr, vector; int srr0, srr1, asrr0, asrr1; int lpes0, lpes1, lev; if (0) { /* XXX: find a suitable condition to enable the hypervisor mode */ lpes0 = (env->spr[SPR_LPCR] >> 1) & 1; lpes1 = (env->spr[SPR_LPCR] >> 2) & 1; } else { /* Those values ensure we won't enter the hypervisor mode */ lpes0 = 0; lpes1 = 1; } qemu_log_mask(CPU_LOG_INT, \"Raise exception at \" TARGET_FMT_lx \" => %08x (%02x)\\n\", env->nip, excp, env->error_code); /* new srr1 value excluding must-be-zero bits */ msr = env->msr & ~0x783f0000ULL; /* new interrupt handler msr */ new_msr = env->msr & ((target_ulong)1 << MSR_ME); /* target registers */ srr0 = SPR_SRR0; srr1 = SPR_SRR1; asrr0 = -1; asrr1 = -1; switch (excp) { case POWERPC_EXCP_NONE: /* Should never happen */ return; case POWERPC_EXCP_CRITICAL: /* Critical input */ switch (excp_model) { case POWERPC_EXCP_40x: srr0 = SPR_40x_SRR2; srr1 = SPR_40x_SRR3; break; case POWERPC_EXCP_BOOKE: srr0 = SPR_BOOKE_CSRR0; srr1 = SPR_BOOKE_CSRR1; break; case POWERPC_EXCP_G2: break; default: goto excp_invalid; } goto store_next; case POWERPC_EXCP_MCHECK: /* Machine check exception */ if (msr_me == 0) { /* Machine check exception is not enabled. * Enter checkstop state. */ if (qemu_log_enabled()) { qemu_log(\"Machine check while not allowed. \" \"Entering checkstop state\\n\"); } else { fprintf(stderr, \"Machine check while not allowed. \" \"Entering checkstop state\\n\"); } env->halted = 1; env->interrupt_request |= CPU_INTERRUPT_EXITTB; } if (0) { /* XXX: find a suitable condition to enable the hypervisor mode */ new_msr |= (target_ulong)MSR_HVB; } /* machine check exceptions don't have ME set */ new_msr &= ~((target_ulong)1 << MSR_ME); /* XXX: should also have something loaded in DAR / DSISR */ switch (excp_model) { case POWERPC_EXCP_40x: srr0 = SPR_40x_SRR2; srr1 = SPR_40x_SRR3; break; case POWERPC_EXCP_BOOKE: srr0 = SPR_BOOKE_MCSRR0; srr1 = SPR_BOOKE_MCSRR1; asrr0 = SPR_BOOKE_CSRR0; asrr1 = SPR_BOOKE_CSRR1; break; default: break; } goto store_next; case POWERPC_EXCP_DSI: /* Data storage exception */ LOG_EXCP(\"DSI exception: DSISR=\" TARGET_FMT_lx\" DAR=\" TARGET_FMT_lx \"\\n\", env->spr[SPR_DSISR], env->spr[SPR_DAR]); if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; goto store_next; case POWERPC_EXCP_ISI: /* Instruction storage exception */ LOG_EXCP(\"ISI exception: msr=\" TARGET_FMT_lx \", nip=\" TARGET_FMT_lx \"\\n\", msr, env->nip); if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; msr |= env->error_code; goto store_next; case POWERPC_EXCP_EXTERNAL: /* External input */ if (lpes0 == 1) new_msr |= (target_ulong)MSR_HVB; goto store_next; case POWERPC_EXCP_ALIGN: /* Alignment exception */ if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; /* XXX: this is false */ /* Get rS/rD and rA from faulting opcode */ env->spr[SPR_DSISR] |= (ldl_code((env->nip - 4)) & 0x03FF0000) >> 16; goto store_current; case POWERPC_EXCP_PROGRAM: /* Program exception */ switch (env->error_code & ~0xF) { case POWERPC_EXCP_FP: if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) { LOG_EXCP(\"Ignore floating point exception\\n\"); env->exception_index = POWERPC_EXCP_NONE; env->error_code = 0; return; } if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; msr |= 0x00100000; if (msr_fe0 == msr_fe1) goto store_next; msr |= 0x00010000; break; case POWERPC_EXCP_INVAL: LOG_EXCP(\"Invalid instruction at \" TARGET_FMT_lx \"\\n\", env->nip); if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; msr |= 0x00080000; break; case POWERPC_EXCP_PRIV: if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; msr |= 0x00040000; break; case POWERPC_EXCP_TRAP: if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; msr |= 0x00020000; break; default: /* Should never occur */ cpu_abort(env, \"Invalid program exception %d. Aborting\\n\", env->error_code); break; } goto store_current; case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; goto store_current; case POWERPC_EXCP_SYSCALL: /* System call exception */ /* NOTE: this is a temporary hack to support graphics OSI calls from the MOL driver */ /* XXX: To be removed */ if (env->gpr[3] == 0x113724fa && env->gpr[4] == 0x77810f9b && env->osi_call) { if (env->osi_call(env) != 0) { env->exception_index = POWERPC_EXCP_NONE; env->error_code = 0; return; } } dump_syscall(env); lev = env->error_code; if (lev == 1 || (lpes0 == 0 && lpes1 == 0)) new_msr |= (target_ulong)MSR_HVB; goto store_next; case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ goto store_current; case POWERPC_EXCP_DECR: /* Decrementer exception */ if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; goto store_next; case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ /* FIT on 4xx */ LOG_EXCP(\"FIT exception\\n\"); goto store_next; case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ LOG_EXCP(\"WDT exception\\n\"); switch (excp_model) { case POWERPC_EXCP_BOOKE: srr0 = SPR_BOOKE_CSRR0; srr1 = SPR_BOOKE_CSRR1; break; default: break; } goto store_next; case POWERPC_EXCP_DTLB: /* Data TLB error */ goto store_next; case POWERPC_EXCP_ITLB: /* Instruction TLB error */ goto store_next; case POWERPC_EXCP_DEBUG: /* Debug interrupt */ switch (excp_model) { case POWERPC_EXCP_BOOKE: srr0 = SPR_BOOKE_DSRR0; srr1 = SPR_BOOKE_DSRR1; asrr0 = SPR_BOOKE_CSRR0; asrr1 = SPR_BOOKE_CSRR1; break; default: break; } /* XXX: TODO */ cpu_abort(env, \"Debug exception is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */ goto store_current; case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */ /* XXX: TODO */ cpu_abort(env, \"Embedded floating point data exception \" \"is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */ /* XXX: TODO */ cpu_abort(env, \"Embedded floating point round exception \" \"is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */ /* XXX: TODO */ cpu_abort(env, \"Performance counter exception is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ /* XXX: TODO */ cpu_abort(env, \"Embedded doorbell interrupt is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ switch (excp_model) { case POWERPC_EXCP_BOOKE: srr0 = SPR_BOOKE_CSRR0; srr1 = SPR_BOOKE_CSRR1; break; default: break; } /* XXX: TODO */ cpu_abort(env, \"Embedded doorbell critical interrupt \" \"is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_RESET: /* System reset exception */ if (msr_pow) { /* indicate that we resumed from power save mode */ msr |= 0x10000; } else { new_msr &= ~((target_ulong)1 << MSR_ME); } if (0) { /* XXX: find a suitable condition to enable the hypervisor mode */ new_msr |= (target_ulong)MSR_HVB; } goto store_next; case POWERPC_EXCP_DSEG: /* Data segment exception */ if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; goto store_next; case POWERPC_EXCP_ISEG: /* Instruction segment exception */ if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; goto store_next; case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ srr0 = SPR_HSRR0; srr1 = SPR_HSRR1; new_msr |= (target_ulong)MSR_HVB; new_msr |= env->msr & ((target_ulong)1 << MSR_RI); goto store_next; case POWERPC_EXCP_TRACE: /* Trace exception */ if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; goto store_next; case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ srr0 = SPR_HSRR0; srr1 = SPR_HSRR1; new_msr |= (target_ulong)MSR_HVB; new_msr |= env->msr & ((target_ulong)1 << MSR_RI); goto store_next; case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */ srr0 = SPR_HSRR0; srr1 = SPR_HSRR1; new_msr |= (target_ulong)MSR_HVB; new_msr |= env->msr & ((target_ulong)1 << MSR_RI); goto store_next; case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */ srr0 = SPR_HSRR0; srr1 = SPR_HSRR1; new_msr |= (target_ulong)MSR_HVB; new_msr |= env->msr & ((target_ulong)1 << MSR_RI); goto store_next; case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */ srr0 = SPR_HSRR0; srr1 = SPR_HSRR1; new_msr |= (target_ulong)MSR_HVB; new_msr |= env->msr & ((target_ulong)1 << MSR_RI); goto store_next; case POWERPC_EXCP_VPU: /* Vector unavailable exception */ if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; goto store_current; case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */ LOG_EXCP(\"PIT exception\\n\"); goto store_next; case POWERPC_EXCP_IO: /* IO error exception */ /* XXX: TODO */ cpu_abort(env, \"601 IO error exception is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_RUNM: /* Run mode exception */ /* XXX: TODO */ cpu_abort(env, \"601 run mode exception is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_EMUL: /* Emulation trap exception */ /* XXX: TODO */ cpu_abort(env, \"602 emulation trap exception \" \"is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ if (lpes1 == 0) /* XXX: check this */ new_msr |= (target_ulong)MSR_HVB; switch (excp_model) { case POWERPC_EXCP_602: case POWERPC_EXCP_603: case POWERPC_EXCP_603E: case POWERPC_EXCP_G2: goto tlb_miss_tgpr; case POWERPC_EXCP_7x5: goto tlb_miss; case POWERPC_EXCP_74xx: goto tlb_miss_74xx; default: cpu_abort(env, \"Invalid instruction TLB miss exception\\n\"); break; } break; case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ if (lpes1 == 0) /* XXX: check this */ new_msr |= (target_ulong)MSR_HVB; switch (excp_model) { case POWERPC_EXCP_602: case POWERPC_EXCP_603: case POWERPC_EXCP_603E: case POWERPC_EXCP_G2: goto tlb_miss_tgpr; case POWERPC_EXCP_7x5: goto tlb_miss; case POWERPC_EXCP_74xx: goto tlb_miss_74xx; default: cpu_abort(env, \"Invalid data load TLB miss exception\\n\"); break; } break; case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ if (lpes1 == 0) /* XXX: check this */ new_msr |= (target_ulong)MSR_HVB; switch (excp_model) { case POWERPC_EXCP_602: case POWERPC_EXCP_603: case POWERPC_EXCP_603E: case POWERPC_EXCP_G2: tlb_miss_tgpr: /* Swap temporary saved registers with GPRs */ if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) { new_msr |= (target_ulong)1 << MSR_TGPR; hreg_swap_gpr_tgpr(env); } goto tlb_miss; case POWERPC_EXCP_7x5: tlb_miss: #if defined (DEBUG_SOFTWARE_TLB) if (qemu_log_enabled()) { const char *es; target_ulong *miss, *cmp; int en; if (excp == POWERPC_EXCP_IFTLB) { es = \"I\"; en = 'I'; miss = &env->spr[SPR_IMISS]; cmp = &env->spr[SPR_ICMP]; } else { if (excp == POWERPC_EXCP_DLTLB) es = \"DL\"; else es = \"DS\"; en = 'D'; miss = &env->spr[SPR_DMISS]; cmp = &env->spr[SPR_DCMP]; } qemu_log(\"6xx %sTLB miss: %cM \" TARGET_FMT_lx \" %cC \" TARGET_FMT_lx \" H1 \" TARGET_FMT_lx \" H2 \" TARGET_FMT_lx \" %08x\\n\", es, en, *miss, en, *cmp, env->spr[SPR_HASH1], env->spr[SPR_HASH2], env->error_code); } #endif msr |= env->crf[0] << 28; msr |= env->error_code; /* key, D/I, S/L bits */ /* Set way using a LRU mechanism */ msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; break; case POWERPC_EXCP_74xx: tlb_miss_74xx: #if defined (DEBUG_SOFTWARE_TLB) if (qemu_log_enabled()) { const char *es; target_ulong *miss, *cmp; int en; if (excp == POWERPC_EXCP_IFTLB) { es = \"I\"; en = 'I'; miss = &env->spr[SPR_TLBMISS]; cmp = &env->spr[SPR_PTEHI]; } else { if (excp == POWERPC_EXCP_DLTLB) es = \"DL\"; else es = \"DS\"; en = 'D'; miss = &env->spr[SPR_TLBMISS]; cmp = &env->spr[SPR_PTEHI]; } qemu_log(\"74xx %sTLB miss: %cM \" TARGET_FMT_lx \" %cC \" TARGET_FMT_lx \" %08x\\n\", es, en, *miss, en, *cmp, env->error_code); } #endif msr |= env->error_code; /* key bit */ break; default: cpu_abort(env, \"Invalid data store TLB miss exception\\n\"); break; } goto store_next; case POWERPC_EXCP_FPA: /* Floating-point assist exception */ /* XXX: TODO */ cpu_abort(env, \"Floating point assist exception \" \"is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_DABR: /* Data address breakpoint */ /* XXX: TODO */ cpu_abort(env, \"DABR exception is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ /* XXX: TODO */ cpu_abort(env, \"IABR exception is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_SMI: /* System management interrupt */ /* XXX: TODO */ cpu_abort(env, \"SMI exception is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_THERM: /* Thermal interrupt */ /* XXX: TODO */ cpu_abort(env, \"Thermal management exception \" \"is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; /* XXX: TODO */ cpu_abort(env, \"Performance counter exception is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_VPUA: /* Vector assist exception */ /* XXX: TODO */ cpu_abort(env, \"VPU assist exception is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_SOFTP: /* Soft patch exception */ /* XXX: TODO */ cpu_abort(env, \"970 soft-patch exception is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_MAINT: /* Maintenance exception */ /* XXX: TODO */ cpu_abort(env, \"970 maintenance exception is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */ /* XXX: TODO */ cpu_abort(env, \"Maskable external exception \" \"is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */ /* XXX: TODO */ cpu_abort(env, \"Non maskable external exception \" \"is not implemented yet !\\n\"); goto store_next; default: excp_invalid: cpu_abort(env, \"Invalid PowerPC exception %d. Aborting\\n\", excp); break; store_current: /* save current instruction location */ env->spr[srr0] = env->nip - 4; break; store_next: /* save next instruction location */ env->spr[srr0] = env->nip; break; } /* Save MSR */ env->spr[srr1] = msr; /* If any alternate SRR register are defined, duplicate saved values */ if (asrr0 != -1) env->spr[asrr0] = env->spr[srr0]; if (asrr1 != -1) env->spr[asrr1] = env->spr[srr1]; /* If we disactivated any translation, flush TLBs */ if (new_msr & ((1 << MSR_IR) | (1 << MSR_DR))) tlb_flush(env, 1); if (msr_ile) { new_msr |= (target_ulong)1 << MSR_LE; } /* Jump to handler */ vector = env->excp_vectors[excp]; if (vector == (target_ulong)-1ULL) { cpu_abort(env, \"Raised an exception without defined vector %d\\n\", excp); } vector |= env->excp_prefix; #if defined(TARGET_PPC64) if (excp_model == POWERPC_EXCP_BOOKE) { if (!msr_icm) { vector = (uint32_t)vector; } else { new_msr |= (target_ulong)1 << MSR_CM; } } else { if (!msr_isf && !(env->mmu_model & POWERPC_MMU_64)) { vector = (uint32_t)vector; } else { new_msr |= (target_ulong)1 << MSR_SF; } } #endif /* XXX: we don't use hreg_store_msr here as already have treated * any special case that could occur. Just store MSR and update hflags */ env->msr = new_msr & env->msr_mask; hreg_compute_hflags(env); env->nip = vector; /* Reset exception state */ env->exception_index = POWERPC_EXCP_NONE; env->error_code = 0; if (env->mmu_model == POWERPC_MMU_BOOKE) { /* XXX: The BookE changes address space when switching modes, we should probably implement that as different MMU indexes, but for the moment we do it the slow way and flush all. */ tlb_flush(env, 1); } }"} {"target": 0, "idx": 16140, "func": "static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) { BlockDriverState *bs = self->bs; BdrvTrackedRequest *req; bool retry; bool waited = false; if (!bs->serialising_in_flight) { return false; } do { retry = false; QLIST_FOREACH(req, &bs->tracked_requests, list) { if (req == self || (!req->serialising && !self->serialising)) { continue; } if (tracked_request_overlaps(req, self->overlap_offset, self->overlap_bytes)) { /* Hitting this means there was a reentrant request, for * example, a block driver issuing nested requests. This must * never happen since it means deadlock. */ assert(qemu_coroutine_self() != req->co); /* If the request is already (indirectly) waiting for us, or * will wait for us as soon as it wakes up, then just go on * (instead of producing a deadlock in the former case). */ if (!req->waiting_for) { self->waiting_for = req; qemu_co_queue_wait(&req->wait_queue); self->waiting_for = NULL; retry = true; waited = true; break; } } } } while (retry); return waited; }"} {"target": 0, "idx": 16168, "func": "static int usb_device_init(USBDevice *dev) { USBDeviceClass *klass = USB_DEVICE_GET_CLASS(dev); if (klass->init) { return klass->init(dev); } return 0; }"} {"target": 0, "idx": 16184, "func": "static int url_alloc_for_protocol(URLContext **puc, struct URLProtocol *up, const char *filename, int flags, const AVIOInterruptCB *int_cb) { URLContext *uc; int err; #if CONFIG_NETWORK if (up->flags & URL_PROTOCOL_FLAG_NETWORK && !ff_network_init()) return AVERROR(EIO); #endif uc = av_mallocz(sizeof(URLContext) + strlen(filename) + 1); if (!uc) { err = AVERROR(ENOMEM); goto fail; } uc->av_class = &ffurl_context_class; uc->filename = (char *)&uc[1]; strcpy(uc->filename, filename); uc->prot = up; uc->flags = flags; uc->is_streamed = 0; /* default = not streamed */ uc->max_packet_size = 0; /* default: stream file */ if (up->priv_data_size) { uc->priv_data = av_mallocz(up->priv_data_size); if (!uc->priv_data) { err = AVERROR(ENOMEM); goto fail; } if (up->priv_data_class) { *(const AVClass **)uc->priv_data = up->priv_data_class; av_opt_set_defaults(uc->priv_data); } } if (int_cb) uc->interrupt_callback = *int_cb; *puc = uc; return 0; fail: *puc = NULL; if (uc) av_freep(&uc->priv_data); av_freep(&uc); #if CONFIG_NETWORK if (up->flags & URL_PROTOCOL_FLAG_NETWORK) ff_network_close(); #endif return err; }"} {"target": 0, "idx": 16185, "func": "static int rv10_decode_packet(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int buf_size2) { RVDecContext *rv = avctx->priv_data; MpegEncContext *s = &rv->m; int mb_count, mb_pos, left, start_mb_x, active_bits_size, ret; active_bits_size = buf_size * 8; init_get_bits(&s->gb, buf, FFMAX(buf_size, buf_size2) * 8); if (s->codec_id == AV_CODEC_ID_RV10) mb_count = rv10_decode_picture_header(s); else mb_count = rv20_decode_picture_header(rv); if (mb_count < 0) { av_log(s->avctx, AV_LOG_ERROR, \"HEADER ERROR\\n\"); return AVERROR_INVALIDDATA; } if (s->mb_x >= s->mb_width || s->mb_y >= s->mb_height) { av_log(s->avctx, AV_LOG_ERROR, \"POS ERROR %d %d\\n\", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } mb_pos = s->mb_y * s->mb_width + s->mb_x; left = s->mb_width * s->mb_height - mb_pos; if (mb_count > left) { av_log(s->avctx, AV_LOG_ERROR, \"COUNT ERROR\\n\"); return AVERROR_INVALIDDATA; } if ((s->mb_x == 0 && s->mb_y == 0) || s->current_picture_ptr == NULL) { // FIXME write parser so we always have complete frames? if (s->current_picture_ptr) { ff_er_frame_end(&s->er); ff_mpv_frame_end(s); s->mb_x = s->mb_y = s->resync_mb_x = s->resync_mb_y = 0; } if ((ret = ff_mpv_frame_start(s, avctx)) < 0) return ret; ff_mpeg_er_frame_start(s); } else { if (s->current_picture_ptr->f->pict_type != s->pict_type) { av_log(s->avctx, AV_LOG_ERROR, \"Slice type mismatch\\n\"); return AVERROR_INVALIDDATA; } } av_dlog(avctx, \"qscale=%d\\n\", s->qscale); /* default quantization values */ if (s->codec_id == AV_CODEC_ID_RV10) { if (s->mb_y == 0) s->first_slice_line = 1; } else { s->first_slice_line = 1; s->resync_mb_x = s->mb_x; } start_mb_x = s->mb_x; s->resync_mb_y = s->mb_y; if (s->h263_aic) { s->y_dc_scale_table = s->c_dc_scale_table = ff_aic_dc_scale_table; } else { s->y_dc_scale_table = s->c_dc_scale_table = ff_mpeg1_dc_scale_table; } if (s->modified_quant) s->chroma_qscale_table = ff_h263_chroma_qscale_table; ff_set_qscale(s, s->qscale); s->rv10_first_dc_coded[0] = 0; s->rv10_first_dc_coded[1] = 0; s->rv10_first_dc_coded[2] = 0; s->block_wrap[0] = s->block_wrap[1] = s->block_wrap[2] = s->block_wrap[3] = s->b8_stride; s->block_wrap[4] = s->block_wrap[5] = s->mb_stride; ff_init_block_index(s); /* decode each macroblock */ for (s->mb_num_left = mb_count; s->mb_num_left > 0; s->mb_num_left--) { int ret; ff_update_block_index(s); av_dlog(avctx, \"**mb x=%d y=%d\\n\", s->mb_x, s->mb_y); s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; ret = ff_h263_decode_mb(s, s->block); // Repeat the slice end check from ff_h263_decode_mb with our active // bitstream size if (ret != SLICE_ERROR) { int v = show_bits(&s->gb, 16); if (get_bits_count(&s->gb) + 16 > active_bits_size) v >>= get_bits_count(&s->gb) + 16 - active_bits_size; if (!v) ret = SLICE_END; } if (ret != SLICE_ERROR && active_bits_size < get_bits_count(&s->gb) && 8 * buf_size2 >= get_bits_count(&s->gb)) { active_bits_size = buf_size2 * 8; av_log(avctx, AV_LOG_DEBUG, \"update size from %d to %d\\n\", 8 * buf_size, active_bits_size); ret = SLICE_OK; } if (ret == SLICE_ERROR || active_bits_size < get_bits_count(&s->gb)) { av_log(s->avctx, AV_LOG_ERROR, \"ERROR at MB %d %d\\n\", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } if (s->pict_type != AV_PICTURE_TYPE_B) ff_h263_update_motion_val(s); ff_mpv_decode_mb(s, s->block); if (s->loop_filter) ff_h263_loop_filter(s); if (++s->mb_x == s->mb_width) { s->mb_x = 0; s->mb_y++; ff_init_block_index(s); } if (s->mb_x == s->resync_mb_x) s->first_slice_line = 0; if (ret == SLICE_END) break; } ff_er_add_slice(&s->er, start_mb_x, s->resync_mb_y, s->mb_x - 1, s->mb_y, ER_MB_END); return active_bits_size; }"} {"target": 1, "idx": 16214, "func": "int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) { BdrvIoctlCoData data = { .bs = bs, .req = req, .buf = buf, .ret = -EINPROGRESS, }; if (qemu_in_coroutine()) { /* Fast-path if already in coroutine context */ bdrv_co_ioctl_entry(&data); } else { Coroutine *co = qemu_coroutine_create(bdrv_co_ioctl_entry); qemu_coroutine_enter(co, &data); while (data.ret == -EINPROGRESS) { aio_poll(bdrv_get_aio_context(bs), true); } } return data.ret; }"} {"target": 0, "idx": 16235, "func": "static void pxa2xx_timer_write(void *opaque, target_phys_addr_t offset, uint32_t value) { int i, tm = 0; PXA2xxTimerInfo *s = (PXA2xxTimerInfo *) opaque; switch (offset) { case OSMR3: tm ++; case OSMR2: tm ++; case OSMR1: tm ++; case OSMR0: s->timer[tm].value = value; pxa2xx_timer_update(s, qemu_get_clock(vm_clock)); break; case OSMR11: tm ++; case OSMR10: tm ++; case OSMR9: tm ++; case OSMR8: tm ++; case OSMR7: tm ++; case OSMR6: tm ++; case OSMR5: tm ++; case OSMR4: if (!pxa2xx_timer_has_tm4(s)) goto badreg; s->tm4[tm].tm.value = value; pxa2xx_timer_update4(s, qemu_get_clock(vm_clock), tm); break; case OSCR: s->oldclock = s->clock; s->lastload = qemu_get_clock(vm_clock); s->clock = value; pxa2xx_timer_update(s, s->lastload); break; case OSCR11: tm ++; case OSCR10: tm ++; case OSCR9: tm ++; case OSCR8: tm ++; case OSCR7: tm ++; case OSCR6: tm ++; case OSCR5: tm ++; case OSCR4: if (!pxa2xx_timer_has_tm4(s)) goto badreg; s->tm4[tm].oldclock = s->tm4[tm].clock; s->tm4[tm].lastload = qemu_get_clock(vm_clock); s->tm4[tm].clock = value; pxa2xx_timer_update4(s, s->tm4[tm].lastload, tm); break; case OIER: s->irq_enabled = value & 0xfff; break; case OSSR: /* Status register */ s->events &= ~value; for (i = 0; i < 4; i ++, value >>= 1) { if (s->timer[i].level && (value & 1)) { s->timer[i].level = 0; qemu_irq_lower(s->timer[i].irq); } } if (pxa2xx_timer_has_tm4(s)) { for (i = 0; i < 8; i ++, value >>= 1) if (s->tm4[i].tm.level && (value & 1)) s->tm4[i].tm.level = 0; if (!(s->events & 0xff0)) qemu_irq_lower(s->tm4->tm.irq); } break; case OWER: /* XXX: Reset on OSMR3 match? */ s->reset3 = value; break; case OMCR7: tm ++; case OMCR6: tm ++; case OMCR5: tm ++; case OMCR4: if (!pxa2xx_timer_has_tm4(s)) goto badreg; s->tm4[tm].control = value & 0x0ff; /* XXX Stop if running (shouldn't happen) */ if ((value & (1 << 7)) || tm == 0) s->tm4[tm].freq = pxa2xx_timer4_freq[value & 7]; else { s->tm4[tm].freq = 0; pxa2xx_timer_update4(s, qemu_get_clock(vm_clock), tm); } break; case OMCR11: tm ++; case OMCR10: tm ++; case OMCR9: tm ++; case OMCR8: tm += 4; if (!pxa2xx_timer_has_tm4(s)) goto badreg; s->tm4[tm].control = value & 0x3ff; /* XXX Stop if running (shouldn't happen) */ if ((value & (1 << 7)) || !(tm & 1)) s->tm4[tm].freq = pxa2xx_timer4_freq[(value & (1 << 8)) ? 0 : (value & 7)]; else { s->tm4[tm].freq = 0; pxa2xx_timer_update4(s, qemu_get_clock(vm_clock), tm); } break; default: badreg: hw_error(\"pxa2xx_timer_write: Bad offset \" REG_FMT \"\\n\", offset); } }"} {"target": 0, "idx": 16243, "func": "static void mipsnet_ioport_write(void *opaque, hwaddr addr, uint64_t val, unsigned int size) { MIPSnetState *s = opaque; addr &= 0x3f; trace_mipsnet_write(addr, val); switch (addr) { case MIPSNET_TX_DATA_COUNT: s->tx_count = (val <= MAX_ETH_FRAME_SIZE) ? val : 0; s->tx_written = 0; break; case MIPSNET_INT_CTL: if (val & MIPSNET_INTCTL_TXDONE) { s->intctl &= ~MIPSNET_INTCTL_TXDONE; } else if (val & MIPSNET_INTCTL_RXDONE) { s->intctl &= ~MIPSNET_INTCTL_RXDONE; } else if (val & MIPSNET_INTCTL_TESTBIT) { mipsnet_reset(s); s->intctl |= MIPSNET_INTCTL_TESTBIT; } else if (!val) { /* ACK testbit interrupt, flag was cleared on read. */ } s->busy = !!s->intctl; mipsnet_update_irq(s); if (mipsnet_can_receive(s->nic->ncs)) { qemu_flush_queued_packets(qemu_get_queue(s->nic)); } break; case MIPSNET_TX_DATA_BUFFER: s->tx_buffer[s->tx_written++] = val; if (s->tx_written == s->tx_count) { /* Send buffer. */ trace_mipsnet_send(s->tx_count); qemu_send_packet(qemu_get_queue(s->nic), s->tx_buffer, s->tx_count); s->tx_count = s->tx_written = 0; s->intctl |= MIPSNET_INTCTL_TXDONE; s->busy = 1; mipsnet_update_irq(s); } break; /* Read-only registers */ case MIPSNET_DEV_ID: case MIPSNET_BUSY: case MIPSNET_RX_DATA_COUNT: case MIPSNET_INTERRUPT_INFO: case MIPSNET_RX_DATA_BUFFER: default: break; } }"} {"target": 0, "idx": 16247, "func": "void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb, uintptr_t ra) { CRW crw; uint64_t addr; int cc; CPUS390XState *env = &cpu->env; uint8_t ar; addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra); return; } cc = css_do_stcrw(&crw); /* 0 - crw stored, 1 - zeroes stored */ if (s390_cpu_virt_mem_write(cpu, addr, ar, &crw, sizeof(crw)) == 0) { setcc(cpu, cc); } else if (cc == 0) { /* Write failed: requeue CRW since STCRW is a suppressing instruction */ css_undo_stcrw(&crw); } }"} {"target": 0, "idx": 16252, "func": "static void migrate_put_status(QDict *qdict, const char *name, uint64_t trans, uint64_t rem, uint64_t total) { QObject *obj; obj = qobject_from_jsonf(\"{ 'transferred': %\" PRId64 \", \" \"'remaining': %\" PRId64 \", \" \"'total': %\" PRId64 \" }\", trans, rem, total); assert(obj != NULL); qdict_put_obj(qdict, name, obj); }"} {"target": 0, "idx": 16254, "func": "static int parse_pixel_format(AVCodecContext *avctx) { DDSContext *ctx = avctx->priv_data; GetByteContext *gbc = &ctx->gbc; char buf[32]; uint32_t flags, fourcc, gimp_tag; enum DDSDXGIFormat dxgi; int size, bpp, r, g, b, a; int alpha_exponent, ycocg_classic, ycocg_scaled, normal_map, array; /* Alternative DDS implementations use reserved1 as custom header. */ bytestream2_skip(gbc, 4 * 3); gimp_tag = bytestream2_get_le32(gbc); alpha_exponent = gimp_tag == MKTAG('A', 'E', 'X', 'P'); ycocg_classic = gimp_tag == MKTAG('Y', 'C', 'G', '1'); ycocg_scaled = gimp_tag == MKTAG('Y', 'C', 'G', '2'); bytestream2_skip(gbc, 4 * 7); /* Now the real DDPF starts. */ size = bytestream2_get_le32(gbc); if (size != 32) { av_log(avctx, AV_LOG_ERROR, \"Invalid pixel format header %d.\\n\", size); return AVERROR_INVALIDDATA; } flags = bytestream2_get_le32(gbc); ctx->compressed = flags & DDPF_FOURCC; ctx->paletted = flags & DDPF_PALETTE; normal_map = flags & DDPF_NORMALMAP; fourcc = bytestream2_get_le32(gbc); if (ctx->compressed && ctx->paletted) { av_log(avctx, AV_LOG_WARNING, \"Disabling invalid palette flag for compressed dds.\\n\"); ctx->paletted = 0; } bpp = bytestream2_get_le32(gbc); // rgbbitcount r = bytestream2_get_le32(gbc); // rbitmask g = bytestream2_get_le32(gbc); // gbitmask b = bytestream2_get_le32(gbc); // bbitmask a = bytestream2_get_le32(gbc); // abitmask bytestream2_skip(gbc, 4); // caps bytestream2_skip(gbc, 4); // caps2 bytestream2_skip(gbc, 4); // caps3 bytestream2_skip(gbc, 4); // caps4 bytestream2_skip(gbc, 4); // reserved2 av_get_codec_tag_string(buf, sizeof(buf), fourcc); av_log(avctx, AV_LOG_VERBOSE, \"fourcc %s bpp %d \" \"r 0x%x g 0x%x b 0x%x a 0x%x\\n\", buf, bpp, r, g, b, a); if (gimp_tag) { av_get_codec_tag_string(buf, sizeof(buf), gimp_tag); av_log(avctx, AV_LOG_VERBOSE, \"and GIMP-DDS tag %s\\n\", buf); } if (ctx->compressed) avctx->pix_fmt = AV_PIX_FMT_RGBA; if (ctx->compressed) { switch (fourcc) { case MKTAG('D', 'X', 'T', '1'): ctx->tex_ratio = 8; ctx->tex_funct = ctx->texdsp.dxt1a_block; break; case MKTAG('D', 'X', 'T', '2'): ctx->tex_ratio = 16; ctx->tex_funct = ctx->texdsp.dxt2_block; break; case MKTAG('D', 'X', 'T', '3'): ctx->tex_ratio = 16; ctx->tex_funct = ctx->texdsp.dxt3_block; break; case MKTAG('D', 'X', 'T', '4'): ctx->tex_ratio = 16; ctx->tex_funct = ctx->texdsp.dxt4_block; break; case MKTAG('D', 'X', 'T', '5'): ctx->tex_ratio = 16; if (ycocg_scaled) ctx->tex_funct = ctx->texdsp.dxt5ys_block; else if (ycocg_classic) ctx->tex_funct = ctx->texdsp.dxt5y_block; else ctx->tex_funct = ctx->texdsp.dxt5_block; break; case MKTAG('R', 'X', 'G', 'B'): ctx->tex_ratio = 16; ctx->tex_funct = ctx->texdsp.dxt5_block; /* This format may be considered as a normal map, * but it is handled differently in a separate postproc. */ ctx->postproc = DDS_SWIZZLE_RXGB; normal_map = 0; break; case MKTAG('A', 'T', 'I', '1'): case MKTAG('B', 'C', '4', 'U'): ctx->tex_ratio = 8; ctx->tex_funct = ctx->texdsp.rgtc1u_block; break; case MKTAG('B', 'C', '4', 'S'): ctx->tex_ratio = 8; ctx->tex_funct = ctx->texdsp.rgtc1s_block; break; case MKTAG('A', 'T', 'I', '2'): /* RGT2 variant with swapped R and G (3Dc)*/ ctx->tex_ratio = 16; ctx->tex_funct = ctx->texdsp.dxn3dc_block; break; case MKTAG('B', 'C', '5', 'U'): ctx->tex_ratio = 16; ctx->tex_funct = ctx->texdsp.rgtc2u_block; break; case MKTAG('B', 'C', '5', 'S'): ctx->tex_ratio = 16; ctx->tex_funct = ctx->texdsp.rgtc2s_block; break; case MKTAG('U', 'Y', 'V', 'Y'): ctx->compressed = 0; avctx->pix_fmt = AV_PIX_FMT_UYVY422; break; case MKTAG('Y', 'U', 'Y', '2'): ctx->compressed = 0; avctx->pix_fmt = AV_PIX_FMT_YUYV422; break; case MKTAG('P', '8', ' ', ' '): /* ATI Palette8, same as normal palette */ ctx->compressed = 0; ctx->paletted = 1; avctx->pix_fmt = AV_PIX_FMT_PAL8; break; case MKTAG('D', 'X', '1', '0'): /* DirectX 10 extra header */ dxgi = bytestream2_get_le32(gbc); bytestream2_skip(gbc, 4); // resourceDimension bytestream2_skip(gbc, 4); // miscFlag array = bytestream2_get_le32(gbc); bytestream2_skip(gbc, 4); // miscFlag2 if (array != 0) av_log(avctx, AV_LOG_VERBOSE, \"Found array of size %d (ignored).\\n\", array); /* Only BC[1-5] are actually compressed. */ ctx->compressed = (dxgi >= 70) && (dxgi <= 84); av_log(avctx, AV_LOG_VERBOSE, \"DXGI format %d.\\n\", dxgi); switch (dxgi) { /* RGB types. */ case DXGI_FORMAT_R16G16B16A16_TYPELESS: case DXGI_FORMAT_R16G16B16A16_FLOAT: case DXGI_FORMAT_R16G16B16A16_UNORM: case DXGI_FORMAT_R16G16B16A16_UINT: case DXGI_FORMAT_R16G16B16A16_SNORM: case DXGI_FORMAT_R16G16B16A16_SINT: avctx->pix_fmt = AV_PIX_FMT_BGRA64; break; case DXGI_FORMAT_R8G8B8A8_UNORM_SRGB: avctx->colorspace = AVCOL_SPC_RGB; case DXGI_FORMAT_R8G8B8A8_TYPELESS: case DXGI_FORMAT_R8G8B8A8_UNORM: case DXGI_FORMAT_R8G8B8A8_UINT: case DXGI_FORMAT_R8G8B8A8_SNORM: case DXGI_FORMAT_R8G8B8A8_SINT: avctx->pix_fmt = AV_PIX_FMT_BGRA; break; case DXGI_FORMAT_B8G8R8A8_UNORM_SRGB: avctx->colorspace = AVCOL_SPC_RGB; case DXGI_FORMAT_B8G8R8A8_TYPELESS: case DXGI_FORMAT_B8G8R8A8_UNORM: avctx->pix_fmt = AV_PIX_FMT_RGBA; break; case DXGI_FORMAT_B8G8R8X8_UNORM_SRGB: avctx->colorspace = AVCOL_SPC_RGB; case DXGI_FORMAT_B8G8R8X8_TYPELESS: case DXGI_FORMAT_B8G8R8X8_UNORM: avctx->pix_fmt = AV_PIX_FMT_RGBA; // opaque break; case DXGI_FORMAT_B5G6R5_UNORM: avctx->pix_fmt = AV_PIX_FMT_RGB565LE; break; /* Texture types. */ case DXGI_FORMAT_BC1_UNORM_SRGB: avctx->colorspace = AVCOL_SPC_RGB; case DXGI_FORMAT_BC1_TYPELESS: case DXGI_FORMAT_BC1_UNORM: ctx->tex_ratio = 8; ctx->tex_funct = ctx->texdsp.dxt1a_block; break; case DXGI_FORMAT_BC2_UNORM_SRGB: avctx->colorspace = AVCOL_SPC_RGB; case DXGI_FORMAT_BC2_TYPELESS: case DXGI_FORMAT_BC2_UNORM: ctx->tex_ratio = 16; ctx->tex_funct = ctx->texdsp.dxt3_block; break; case DXGI_FORMAT_BC3_UNORM_SRGB: avctx->colorspace = AVCOL_SPC_RGB; case DXGI_FORMAT_BC3_TYPELESS: case DXGI_FORMAT_BC3_UNORM: ctx->tex_ratio = 16; ctx->tex_funct = ctx->texdsp.dxt5_block; break; case DXGI_FORMAT_BC4_TYPELESS: case DXGI_FORMAT_BC4_UNORM: ctx->tex_ratio = 8; ctx->tex_funct = ctx->texdsp.rgtc1u_block; break; case DXGI_FORMAT_BC4_SNORM: ctx->tex_ratio = 8; ctx->tex_funct = ctx->texdsp.rgtc1s_block; break; case DXGI_FORMAT_BC5_TYPELESS: case DXGI_FORMAT_BC5_UNORM: ctx->tex_ratio = 16; ctx->tex_funct = ctx->texdsp.rgtc2u_block; break; case DXGI_FORMAT_BC5_SNORM: ctx->tex_ratio = 16; ctx->tex_funct = ctx->texdsp.rgtc2s_block; break; default: av_log(avctx, AV_LOG_ERROR, \"Unsupported DXGI format %d.\\n\", dxgi); return AVERROR_INVALIDDATA; } break; default: av_log(avctx, AV_LOG_ERROR, \"Unsupported %s fourcc.\\n\", buf); return AVERROR_INVALIDDATA; } } else if (ctx->paletted) { if (bpp == 8) { avctx->pix_fmt = AV_PIX_FMT_PAL8; } else { av_log(avctx, AV_LOG_ERROR, \"Unsupported palette bpp %d.\\n\", bpp); return AVERROR_INVALIDDATA; } } else { /* 8 bpp */ if (bpp == 8 && r == 0xff && g == 0 && b == 0 && a == 0) avctx->pix_fmt = AV_PIX_FMT_GRAY8; else if (bpp == 8 && r == 0 && g == 0 && b == 0 && a == 0xff) avctx->pix_fmt = AV_PIX_FMT_GRAY8; /* 16 bpp */ else if (bpp == 16 && r == 0xff && g == 0 && b == 0 && a == 0xff00) avctx->pix_fmt = AV_PIX_FMT_YA8; else if (bpp == 16 && r == 0xffff && g == 0 && b == 0 && a == 0) avctx->pix_fmt = AV_PIX_FMT_GRAY16LE; else if (bpp == 16 && r == 0x7c00 && g == 0x3e0 && b == 0x1f && a == 0) avctx->pix_fmt = AV_PIX_FMT_RGB555LE; else if (bpp == 16 && r == 0x7c00 && g == 0x3e0 && b == 0x1f && a == 0x8000) avctx->pix_fmt = AV_PIX_FMT_RGB555LE; // alpha ignored else if (bpp == 16 && r == 0xf800 && g == 0x7e0 && b == 0x1f && a == 0) avctx->pix_fmt = AV_PIX_FMT_RGB565LE; /* 24 bpp */ else if (bpp == 24 && r == 0xff0000 && g == 0xff00 && b == 0xff && a == 0) avctx->pix_fmt = AV_PIX_FMT_BGR24; /* 32 bpp */ else if (bpp == 32 && r == 0xff0000 && g == 0xff00 && b == 0xff && a == 0) avctx->pix_fmt = AV_PIX_FMT_BGRA; // opaque else if (bpp == 32 && r == 0xff && g == 0xff00 && b == 0xff0000 && a == 0) avctx->pix_fmt = AV_PIX_FMT_RGBA; // opaque else if (bpp == 32 && r == 0xff0000 && g == 0xff00 && b == 0xff && a == 0xff000000) avctx->pix_fmt = AV_PIX_FMT_BGRA; else if (bpp == 32 && r == 0xff && g == 0xff00 && b == 0xff0000 && a == 0xff000000) avctx->pix_fmt = AV_PIX_FMT_RGBA; /* give up */ else { av_log(avctx, AV_LOG_ERROR, \"Unknown pixel format \" \"[bpp %d r 0x%x g 0x%x b 0x%x a 0x%x].\\n\", bpp, r, g, b, a); return AVERROR_INVALIDDATA; } } /* Set any remaining post-proc that should happen before frame is ready. */ if (alpha_exponent) ctx->postproc = DDS_ALPHA_EXP; else if (normal_map) ctx->postproc = DDS_NORMAL_MAP; else if (ycocg_classic && !ctx->compressed) ctx->postproc = DDS_RAW_YCOCG; else if (avctx->pix_fmt == AV_PIX_FMT_YA8) ctx->postproc = DDS_SWAP_ALPHA; /* ATI/NVidia variants sometimes add swizzling in bpp. */ switch (bpp) { case MKTAG('A', '2', 'X', 'Y'): ctx->postproc = DDS_SWIZZLE_A2XY; break; case MKTAG('x', 'G', 'B', 'R'): ctx->postproc = DDS_SWIZZLE_XGBR; break; case MKTAG('x', 'R', 'B', 'G'): ctx->postproc = DDS_SWIZZLE_XRBG; break; case MKTAG('R', 'B', 'x', 'G'): ctx->postproc = DDS_SWIZZLE_RBXG; break; case MKTAG('R', 'G', 'x', 'B'): ctx->postproc = DDS_SWIZZLE_RGXB; break; case MKTAG('R', 'x', 'B', 'G'): ctx->postproc = DDS_SWIZZLE_RXBG; break; case MKTAG('x', 'G', 'x', 'R'): ctx->postproc = DDS_SWIZZLE_XGXR; break; case MKTAG('A', '2', 'D', '5'): ctx->postproc = DDS_NORMAL_MAP; break; } return 0; }"} {"target": 0, "idx": 16257, "func": "static int svc_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { SVCContext *s = avctx->priv_data; SBufferInfo info = { 0 }; uint8_t* ptrs[3]; int linesize[3]; AVFrame *avframe = data; DECODING_STATE state; state = (*s->decoder)->DecodeFrame2(s->decoder, avpkt->data, avpkt->size, ptrs, &info); if (state != dsErrorFree) { av_log(avctx, AV_LOG_ERROR, \"DecodeFrame2 failed\\n\"); return AVERROR_UNKNOWN; } if (info.iBufferStatus != 1) { av_log(avctx, AV_LOG_DEBUG, \"No frame produced\\n\"); return avpkt->size; } ff_set_dimensions(avctx, info.UsrData.sSystemBuffer.iWidth, info.UsrData.sSystemBuffer.iHeight); // The decoder doesn't (currently) support decoding into a user // provided buffer, so do a copy instead. if (ff_get_buffer(avctx, avframe, 0) < 0) { av_log(avctx, AV_LOG_ERROR, \"Unable to allocate buffer\\n\"); return AVERROR(ENOMEM); } linesize[0] = info.UsrData.sSystemBuffer.iStride[0]; linesize[1] = linesize[2] = info.UsrData.sSystemBuffer.iStride[1]; av_image_copy(avframe->data, avframe->linesize, (const uint8_t **) ptrs, linesize, avctx->pix_fmt, avctx->width, avctx->height); avframe->pts = avpkt->pts; avframe->pkt_dts = avpkt->dts; #if FF_API_PKT_PTS FF_DISABLE_DEPRECATION_WARNINGS avframe->pkt_pts = avpkt->pts; FF_ENABLE_DEPRECATION_WARNINGS #endif *got_frame = 1; return avpkt->size; }"} {"target": 0, "idx": 16262, "func": "PCIBus *pci_apb_init(target_phys_addr_t special_base, target_phys_addr_t mem_base, qemu_irq *ivec_irqs, PCIBus **bus2, PCIBus **bus3, qemu_irq **pbm_irqs) { DeviceState *dev; SysBusDevice *s; APBState *d; PCIDevice *pci_dev; PCIBridge *br; /* Ultrasparc PBM main bus */ dev = qdev_create(NULL, \"pbm\"); qdev_init_nofail(dev); s = sysbus_from_qdev(dev); /* apb_config */ sysbus_mmio_map(s, 0, special_base); /* PCI configuration space */ sysbus_mmio_map(s, 1, special_base + 0x1000000ULL); /* pci_ioport */ sysbus_mmio_map(s, 2, special_base + 0x2000000ULL); d = FROM_SYSBUS(APBState, s); memory_region_init(&d->pci_mmio, \"pci-mmio\", 0x100000000ULL); memory_region_add_subregion(get_system_memory(), mem_base, &d->pci_mmio); d->bus = pci_register_bus(&d->busdev.qdev, \"pci\", pci_apb_set_irq, pci_pbm_map_irq, d, &d->pci_mmio, get_system_io(), 0, 32); *pbm_irqs = d->pbm_irqs; d->ivec_irqs = ivec_irqs; pci_create_simple(d->bus, 0, \"pbm-pci\"); /* APB secondary busses */ pci_dev = pci_create_multifunction(d->bus, PCI_DEVFN(1, 0), true, \"pbm-bridge\"); br = DO_UPCAST(PCIBridge, dev, pci_dev); pci_bridge_map_irq(br, \"Advanced PCI Bus secondary bridge 1\", pci_apb_map_irq); qdev_init_nofail(&pci_dev->qdev); *bus2 = pci_bridge_get_sec_bus(br); pci_dev = pci_create_multifunction(d->bus, PCI_DEVFN(1, 1), true, \"pbm-bridge\"); br = DO_UPCAST(PCIBridge, dev, pci_dev); pci_bridge_map_irq(br, \"Advanced PCI Bus secondary bridge 2\", pci_apb_map_irq); qdev_init_nofail(&pci_dev->qdev); *bus3 = pci_bridge_get_sec_bus(br); return d->bus; }"} {"target": 0, "idx": 16270, "func": "static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) { /* Perform I/O through a temporary buffer so that users who scribble over * their read buffer while the operation is in progress do not end up * modifying the image file. This is critical for zero-copy guest I/O * where anything might happen inside guest memory. */ void *bounce_buffer; BlockDriver *drv = bs->drv; struct iovec iov; QEMUIOVector bounce_qiov; int64_t cluster_sector_num; int cluster_nb_sectors; size_t skip_bytes; int ret; /* Cover entire cluster so no additional backing file I/O is required when * allocating cluster in the image file. */ bdrv_round_to_clusters(bs, sector_num, nb_sectors, &cluster_sector_num, &cluster_nb_sectors); trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, cluster_sector_num, cluster_nb_sectors); iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE; iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len); if (bounce_buffer == NULL) { ret = -ENOMEM; goto err; } qemu_iovec_init_external(&bounce_qiov, &iov, 1); ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors, &bounce_qiov); if (ret < 0) { goto err; } if (drv->bdrv_co_write_zeroes && buffer_is_zero(bounce_buffer, iov.iov_len)) { ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num, cluster_nb_sectors, 0); } else { /* This does not change the data on the disk, it is not necessary * to flush even in cache=writethrough mode. */ ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors, &bounce_qiov); } if (ret < 0) { /* It might be okay to ignore write errors for guest requests. If this * is a deliberate copy-on-read then we don't want to ignore the error. * Simply report it in all cases. */ goto err; } skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE; qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, nb_sectors * BDRV_SECTOR_SIZE); err: qemu_vfree(bounce_buffer); return ret; }"} {"target": 0, "idx": 16277, "func": "static void qmp_input_stack_object_free(StackObject *tos) { if (tos->h) { g_hash_table_unref(tos->h); } g_free(tos); }"} {"target": 0, "idx": 16279, "func": "bool timer_expired(QEMUTimer *timer_head, int64_t current_time) { return timer_expired_ns(timer_head, current_time * timer_head->scale); }"} {"target": 1, "idx": 16283, "func": "static void check_cmd(AHCIState *s, int port) { AHCIPortRegs *pr = &s->dev[port].port_regs; int slot; if ((pr->cmd & PORT_CMD_START) && pr->cmd_issue) { for (slot = 0; (slot < 32) && pr->cmd_issue; slot++) { if ((pr->cmd_issue & (1 << slot)) && !handle_cmd(s, port, slot)) { pr->cmd_issue &= ~(1 << slot); } } } }"} {"target": 1, "idx": 16290, "func": "static av_always_inline void iadst4_1d(const dctcoef *in, ptrdiff_t stride, dctcoef *out, int pass) { int t0, t1, t2, t3; t0 = 5283 * IN(0) + 15212 * IN(2) + 9929 * IN(3); t1 = 9929 * IN(0) - 5283 * IN(2) - 15212 * IN(3); t2 = 13377 * (IN(0) - IN(2) + IN(3)); t3 = 13377 * IN(1); out[0] = (t0 + t3 + (1 << 13)) >> 14; out[1] = (t1 + t3 + (1 << 13)) >> 14; out[2] = (t2 + (1 << 13)) >> 14; out[3] = (t0 + t1 - t3 + (1 << 13)) >> 14; }"} {"target": 1, "idx": 16294, "func": "static void cleanup_infolist(CommandLineParameterInfoList *head) { CommandLineParameterInfoList *pre_entry, *cur, *del_entry; cur = head; while (cur->next) { pre_entry = head; while (pre_entry != cur->next) { if (!strcmp(pre_entry->value->name, cur->next->value->name)) { del_entry = cur->next; cur->next = cur->next->next; g_free(del_entry); break; } pre_entry = pre_entry->next; } cur = cur->next; } }"} {"target": 0, "idx": 16305, "func": "static inline void gen_check_align(DisasContext *ctx, TCGv EA, int mask) { int l1 = gen_new_label(); TCGv t0 = tcg_temp_new(); TCGv_i32 t1, t2; /* NIP cannot be restored if the memory exception comes from an helper */ gen_update_nip(ctx, ctx->nip - 4); tcg_gen_andi_tl(t0, EA, mask); tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1); t1 = tcg_const_i32(POWERPC_EXCP_ALIGN); t2 = tcg_const_i32(0); gen_helper_raise_exception_err(cpu_env, t1, t2); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); gen_set_label(l1); tcg_temp_free(t0); }"} {"target": 0, "idx": 16307, "func": "_syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode) #endif #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat) _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode) #endif #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16) _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname, uid_t,owner,gid_t,group,int,flags) #endif #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \\ defined(__NR_fstatat64) _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname, struct stat *,buf,int,flags) #endif #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat) _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname, const struct timeval *,times) #endif #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \\ defined(__NR_newfstatat) _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname, struct stat *,buf,int,flags) #endif #if defined(TARGET_NR_linkat) && defined(__NR_linkat) _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath, int,newdirfd,const char *,newpath,int,flags) #endif #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat) _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode) #endif #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat) _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname, mode_t,mode,dev_t,dev) #endif #if defined(TARGET_NR_openat) && defined(__NR_openat) _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode) #endif #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat) _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname, char *,buf,size_t,bufsize) #endif #if defined(TARGET_NR_renameat) && defined(__NR_renameat) _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath, int,newdirfd,const char *,newpath) #endif #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat) _syscall3(int,sys_symlinkat,const char *,oldpath, int,newdirfd,const char *,newpath) #endif #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat) _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags) #endif #endif /* CONFIG_ATFILE */ #ifdef CONFIG_UTIMENSAT static int sys_utimensat(int dirfd, const char *pathname, const struct timespec times[2], int flags) { if (pathname == NULL) return futimens(dirfd, times); else return utimensat(dirfd, pathname, times, flags); }"} {"target": 0, "idx": 16319, "func": "int64_t bdrv_nb_sectors(BlockDriverState *bs) { BlockDriver *drv = bs->drv; if (!drv) return -ENOMEDIUM; if (drv->has_variable_length) { int ret = refresh_total_sectors(bs, bs->total_sectors); if (ret < 0) { return ret; } } return bs->total_sectors; }"} {"target": 1, "idx": 16339, "func": "static void udp_chr_update_read_handler(CharDriverState *chr) { NetCharDriver *s = chr->opaque; if (s->tag) { g_source_remove(s->tag); s->tag = 0; } if (s->chan) { s->tag = io_add_watch_poll(s->chan, udp_chr_read_poll, udp_chr_read, chr); } }"} {"target": 1, "idx": 16348, "func": "void qemu_system_guest_panicked(GuestPanicInformation *info) { if (current_cpu) { current_cpu->crash_occurred = true; } qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_PAUSE, !!info, info, &error_abort); vm_stop(RUN_STATE_GUEST_PANICKED); if (!no_shutdown) { qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_POWEROFF, !!info, info, &error_abort); qemu_system_shutdown_request(); } if (info) { if (info->type == GUEST_PANIC_INFORMATION_KIND_HYPER_V) { qemu_log_mask(LOG_GUEST_ERROR, \"HV crash parameters: (%#\"PRIx64 \" %#\"PRIx64\" %#\"PRIx64\" %#\"PRIx64\" %#\"PRIx64\")\\n\", info->u.hyper_v.data->arg1, info->u.hyper_v.data->arg2, info->u.hyper_v.data->arg3, info->u.hyper_v.data->arg4, info->u.hyper_v.data->arg5); } qapi_free_GuestPanicInformation(info); } }"} {"target": 1, "idx": 16358, "func": "static void vp8_decode_flush(AVCodecContext *avctx) { vp8_decode_flush_impl(avctx, 0, 0); }"} {"target": 1, "idx": 16361, "func": "static int ipvideo_decode_block_opcode_0x9(IpvideoContext *s, AVFrame *frame) { int x, y; unsigned char P[4]; /* 4-color encoding */ bytestream2_get_buffer(&s->stream_ptr, P, 4); if (P[0] <= P[1]) { if (P[2] <= P[3]) { /* 1 of 4 colors for each pixel, need 16 more bytes */ for (y = 0; y < 8; y++) { /* get the next set of 8 2-bit flags */ int flags = bytestream2_get_le16(&s->stream_ptr); for (x = 0; x < 8; x++, flags >>= 2) *s->pixel_ptr++ = P[flags & 0x03]; s->pixel_ptr += s->line_inc; } else { uint32_t flags; /* 1 of 4 colors for each 2x2 block, need 4 more bytes */ flags = bytestream2_get_le32(&s->stream_ptr); for (y = 0; y < 8; y += 2) { for (x = 0; x < 8; x += 2, flags >>= 2) { s->pixel_ptr[x ] = s->pixel_ptr[x + 1 ] = s->pixel_ptr[x + s->stride] = s->pixel_ptr[x + 1 + s->stride] = P[flags & 0x03]; s->pixel_ptr += s->stride * 2; } else { uint64_t flags; /* 1 of 4 colors for each 2x1 or 1x2 block, need 8 more bytes */ flags = bytestream2_get_le64(&s->stream_ptr); if (P[2] <= P[3]) { for (y = 0; y < 8; y++) { for (x = 0; x < 8; x += 2, flags >>= 2) { s->pixel_ptr[x ] = s->pixel_ptr[x + 1] = P[flags & 0x03]; s->pixel_ptr += s->stride; } else { for (y = 0; y < 8; y += 2) { for (x = 0; x < 8; x++, flags >>= 2) { s->pixel_ptr[x ] = s->pixel_ptr[x + s->stride] = P[flags & 0x03]; s->pixel_ptr += s->stride * 2; /* report success */ return 0;"} {"target": 0, "idx": 16396, "func": "static void pc_compat_0_13(MachineState *machine) { pc_compat_1_2(machine); kvmclock_enabled = false; }"} {"target": 0, "idx": 16408, "func": "static void do_gdbserver(int argc, const char **argv) { int port; port = DEFAULT_GDBSTUB_PORT; if (argc >= 2) port = atoi(argv[1]); if (gdbserver_start(port) < 0) { qemu_printf(\"Could not open gdbserver socket on port %d\\n\", port); } else { qemu_printf(\"Waiting gdb connection on port %d\\n\", port); } }"} {"target": 0, "idx": 16409, "func": "sofcantsendmore(struct socket *so) { if ((so->so_state & SS_NOFDREF) == 0) { shutdown(so->s,1); /* send FIN to fhost */ if (global_readfds) { FD_CLR(so->s,global_readfds); } if (global_xfds) { FD_CLR(so->s,global_xfds); } } so->so_state &= ~(SS_ISFCONNECTING); if (so->so_state & SS_FCANTRCVMORE) { so->so_state &= SS_PERSISTENT_MASK; so->so_state |= SS_NOFDREF; /* as above */ } else { so->so_state |= SS_FCANTSENDMORE; } }"} {"target": 0, "idx": 16411, "func": "static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel) { const char *rn = \"invalid\"; if (sel != 0) check_insn(env, ctx, ISA_MIPS32); switch (reg) { case 0: switch (sel) { case 0: gen_op_mtc0_index(); rn = \"Index\"; break; case 1: check_mips_mt(env, ctx); gen_op_mtc0_mvpcontrol(); rn = \"MVPControl\"; break; case 2: check_mips_mt(env, ctx); /* ignored */ rn = \"MVPConf0\"; break; case 3: check_mips_mt(env, ctx); /* ignored */ rn = \"MVPConf1\"; break; default: goto die; } break; case 1: switch (sel) { case 0: /* ignored */ rn = \"Random\"; break; case 1: check_mips_mt(env, ctx); gen_op_mtc0_vpecontrol(); rn = \"VPEControl\"; break; case 2: check_mips_mt(env, ctx); gen_op_mtc0_vpeconf0(); rn = \"VPEConf0\"; break; case 3: check_mips_mt(env, ctx); gen_op_mtc0_vpeconf1(); rn = \"VPEConf1\"; break; case 4: check_mips_mt(env, ctx); gen_op_mtc0_yqmask(); rn = \"YQMask\"; break; case 5: check_mips_mt(env, ctx); gen_op_mtc0_vpeschedule(); rn = \"VPESchedule\"; break; case 6: check_mips_mt(env, ctx); gen_op_mtc0_vpeschefback(); rn = \"VPEScheFBack\"; break; case 7: check_mips_mt(env, ctx); gen_op_mtc0_vpeopt(); rn = \"VPEOpt\"; break; default: goto die; } break; case 2: switch (sel) { case 0: gen_op_mtc0_entrylo0(); rn = \"EntryLo0\"; break; case 1: check_mips_mt(env, ctx); gen_op_mtc0_tcstatus(); rn = \"TCStatus\"; break; case 2: check_mips_mt(env, ctx); gen_op_mtc0_tcbind(); rn = \"TCBind\"; break; case 3: check_mips_mt(env, ctx); gen_op_mtc0_tcrestart(); rn = \"TCRestart\"; break; case 4: check_mips_mt(env, ctx); gen_op_mtc0_tchalt(); rn = \"TCHalt\"; break; case 5: check_mips_mt(env, ctx); gen_op_mtc0_tccontext(); rn = \"TCContext\"; break; case 6: check_mips_mt(env, ctx); gen_op_mtc0_tcschedule(); rn = \"TCSchedule\"; break; case 7: check_mips_mt(env, ctx); gen_op_mtc0_tcschefback(); rn = \"TCScheFBack\"; break; default: goto die; } break; case 3: switch (sel) { case 0: gen_op_mtc0_entrylo1(); rn = \"EntryLo1\"; break; default: goto die; } break; case 4: switch (sel) { case 0: gen_op_mtc0_context(); rn = \"Context\"; break; case 1: // gen_op_mtc0_contextconfig(); /* SmartMIPS ASE */ rn = \"ContextConfig\"; // break; default: goto die; } break; case 5: switch (sel) { case 0: gen_op_mtc0_pagemask(); rn = \"PageMask\"; break; case 1: check_insn(env, ctx, ISA_MIPS32R2); gen_op_mtc0_pagegrain(); rn = \"PageGrain\"; break; default: goto die; } break; case 6: switch (sel) { case 0: gen_op_mtc0_wired(); rn = \"Wired\"; break; case 1: check_insn(env, ctx, ISA_MIPS32R2); gen_op_mtc0_srsconf0(); rn = \"SRSConf0\"; break; case 2: check_insn(env, ctx, ISA_MIPS32R2); gen_op_mtc0_srsconf1(); rn = \"SRSConf1\"; break; case 3: check_insn(env, ctx, ISA_MIPS32R2); gen_op_mtc0_srsconf2(); rn = \"SRSConf2\"; break; case 4: check_insn(env, ctx, ISA_MIPS32R2); gen_op_mtc0_srsconf3(); rn = \"SRSConf3\"; break; case 5: check_insn(env, ctx, ISA_MIPS32R2); gen_op_mtc0_srsconf4(); rn = \"SRSConf4\"; break; default: goto die; } break; case 7: switch (sel) { case 0: check_insn(env, ctx, ISA_MIPS32R2); gen_op_mtc0_hwrena(); rn = \"HWREna\"; break; default: goto die; } break; case 8: /* ignored */ rn = \"BadVaddr\"; break; case 9: switch (sel) { case 0: gen_op_mtc0_count(); rn = \"Count\"; break; /* 6,7 are implementation dependent */ default: goto die; } /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; break; case 10: switch (sel) { case 0: gen_op_mtc0_entryhi(); rn = \"EntryHi\"; break; default: goto die; } break; case 11: switch (sel) { case 0: gen_op_mtc0_compare(); rn = \"Compare\"; break; /* 6,7 are implementation dependent */ default: goto die; } /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; break; case 12: switch (sel) { case 0: gen_op_mtc0_status(); /* BS_STOP isn't good enough here, hflags may have changed. */ gen_save_pc(ctx->pc + 4); ctx->bstate = BS_EXCP; rn = \"Status\"; break; case 1: check_insn(env, ctx, ISA_MIPS32R2); gen_op_mtc0_intctl(); /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; rn = \"IntCtl\"; break; case 2: check_insn(env, ctx, ISA_MIPS32R2); gen_op_mtc0_srsctl(); /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; rn = \"SRSCtl\"; break; case 3: check_insn(env, ctx, ISA_MIPS32R2); gen_op_mtc0_srsmap(); /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; rn = \"SRSMap\"; break; default: goto die; } break; case 13: switch (sel) { case 0: gen_op_mtc0_cause(); rn = \"Cause\"; break; default: goto die; } /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; break; case 14: switch (sel) { case 0: gen_op_mtc0_epc(); rn = \"EPC\"; break; default: goto die; } break; case 15: switch (sel) { case 0: /* ignored */ rn = \"PRid\"; break; case 1: check_insn(env, ctx, ISA_MIPS32R2); gen_op_mtc0_ebase(); rn = \"EBase\"; break; default: goto die; } break; case 16: switch (sel) { case 0: gen_op_mtc0_config0(); rn = \"Config\"; /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; break; case 1: /* ignored, read only */ rn = \"Config1\"; break; case 2: gen_op_mtc0_config2(); rn = \"Config2\"; /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; break; case 3: /* ignored, read only */ rn = \"Config3\"; break; /* 4,5 are reserved */ /* 6,7 are implementation dependent */ case 6: /* ignored */ rn = \"Config6\"; break; case 7: /* ignored */ rn = \"Config7\"; break; default: rn = \"Invalid config selector\"; goto die; } break; case 17: switch (sel) { case 0: /* ignored */ rn = \"LLAddr\"; break; default: goto die; } break; case 18: switch (sel) { case 0 ... 7: gen_op_mtc0_watchlo(sel); rn = \"WatchLo\"; break; default: goto die; } break; case 19: switch (sel) { case 0 ... 7: gen_op_mtc0_watchhi(sel); rn = \"WatchHi\"; break; default: goto die; } break; case 20: switch (sel) { case 0: #if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64) check_insn(env, ctx, ISA_MIPS3); gen_op_mtc0_xcontext(); rn = \"XContext\"; break; #endif default: goto die; } break; case 21: /* Officially reserved, but sel 0 is used for R1x000 framemask */ switch (sel) { case 0: gen_op_mtc0_framemask(); rn = \"Framemask\"; break; default: goto die; } break; case 22: /* ignored */ rn = \"Diagnostic\"; /* implementation dependent */ break; case 23: switch (sel) { case 0: gen_op_mtc0_debug(); /* EJTAG support */ /* BS_STOP isn't good enough here, hflags may have changed. */ gen_save_pc(ctx->pc + 4); ctx->bstate = BS_EXCP; rn = \"Debug\"; break; case 1: // gen_op_mtc0_tracecontrol(); /* PDtrace support */ rn = \"TraceControl\"; /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; // break; case 2: // gen_op_mtc0_tracecontrol2(); /* PDtrace support */ rn = \"TraceControl2\"; /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; // break; case 3: /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; // gen_op_mtc0_usertracedata(); /* PDtrace support */ rn = \"UserTraceData\"; /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; // break; case 4: // gen_op_mtc0_debug(); /* PDtrace support */ /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; rn = \"TraceBPC\"; // break; default: goto die; } break; case 24: switch (sel) { case 0: gen_op_mtc0_depc(); /* EJTAG support */ rn = \"DEPC\"; break; default: goto die; } break; case 25: switch (sel) { case 0: gen_op_mtc0_performance0(); rn = \"Performance0\"; break; case 1: // gen_op_mtc0_performance1(); rn = \"Performance1\"; // break; case 2: // gen_op_mtc0_performance2(); rn = \"Performance2\"; // break; case 3: // gen_op_mtc0_performance3(); rn = \"Performance3\"; // break; case 4: // gen_op_mtc0_performance4(); rn = \"Performance4\"; // break; case 5: // gen_op_mtc0_performance5(); rn = \"Performance5\"; // break; case 6: // gen_op_mtc0_performance6(); rn = \"Performance6\"; // break; case 7: // gen_op_mtc0_performance7(); rn = \"Performance7\"; // break; default: goto die; } break; case 26: /* ignored */ rn = \"ECC\"; break; case 27: switch (sel) { case 0 ... 3: /* ignored */ rn = \"CacheErr\"; break; default: goto die; } break; case 28: switch (sel) { case 0: case 2: case 4: case 6: gen_op_mtc0_taglo(); rn = \"TagLo\"; break; case 1: case 3: case 5: case 7: gen_op_mtc0_datalo(); rn = \"DataLo\"; break; default: goto die; } break; case 29: switch (sel) { case 0: case 2: case 4: case 6: gen_op_mtc0_taghi(); rn = \"TagHi\"; break; case 1: case 3: case 5: case 7: gen_op_mtc0_datahi(); rn = \"DataHi\"; break; default: rn = \"invalid sel\"; goto die; } break; case 30: switch (sel) { case 0: gen_op_mtc0_errorepc(); rn = \"ErrorEPC\"; break; default: goto die; } break; case 31: switch (sel) { case 0: gen_op_mtc0_desave(); /* EJTAG support */ rn = \"DESAVE\"; break; default: goto die; } /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; break; default: goto die; } #if defined MIPS_DEBUG_DISAS if (loglevel & CPU_LOG_TB_IN_ASM) { fprintf(logfile, \"mtc0 %s (reg %d sel %d)\\n\", rn, reg, sel); } #endif return; die: #if defined MIPS_DEBUG_DISAS if (loglevel & CPU_LOG_TB_IN_ASM) { fprintf(logfile, \"mtc0 %s (reg %d sel %d)\\n\", rn, reg, sel); } #endif generate_exception(ctx, EXCP_RI); }"} {"target": 0, "idx": 16414, "func": "static void thread_pool_init(void) { thread_pool_init_one(&global_pool, NULL); }"} {"target": 1, "idx": 16425, "func": "static void handle_notify(EventNotifier *e) { VirtIOBlockDataPlane *s = container_of(e, VirtIOBlockDataPlane, host_notifier); VirtQueueElement *elem; VirtIOBlockReq *req; int ret; MultiReqBuffer mrb = { .num_writes = 0, }; event_notifier_test_and_clear(&s->host_notifier); bdrv_io_plug(s->blk->conf.bs); for (;;) { /* Disable guest->host notifies to avoid unnecessary vmexits */ vring_disable_notification(s->vdev, &s->vring); for (;;) { ret = vring_pop(s->vdev, &s->vring, &elem); if (ret < 0) { assert(elem == NULL); break; /* no more requests */ } trace_virtio_blk_data_plane_process_request(s, elem->out_num, elem->in_num, elem->index); req = g_slice_new(VirtIOBlockReq); req->dev = VIRTIO_BLK(s->vdev); req->elem = elem; virtio_blk_handle_request(req, &mrb); } virtio_submit_multiwrite(s->blk->conf.bs, &mrb); if (likely(ret == -EAGAIN)) { /* vring emptied */ /* Re-enable guest->host notifies and stop processing the vring. * But if the guest has snuck in more descriptors, keep processing. */ if (vring_enable_notification(s->vdev, &s->vring)) { break; } } else { /* fatal error */ break; } } bdrv_io_unplug(s->blk->conf.bs); }"} {"target": 1, "idx": 16427, "func": "static int get_int32_equal(QEMUFile *f, void *pv, size_t size, VMStateField *field) { int32_t *v = pv; int32_t v2; qemu_get_sbe32s(f, &v2); if (*v == v2) { return 0; error_report(\"%\" PRIx32 \" != %\" PRIx32, *v, v2); return -EINVAL;"} {"target": 1, "idx": 16435, "func": "static int select_input_picture(MpegEncContext *s) { int i, ret; for (i = 1; i < MAX_PICTURE_COUNT; i++) s->reordered_input_picture[i - 1] = s->reordered_input_picture[i]; s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL; /* set next picture type & ordering */ if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) { if (/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr == NULL || s->intra_only) { s->reordered_input_picture[0] = s->input_picture[0]; s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I; s->reordered_input_picture[0]->f.coded_picture_number = s->coded_picture_number++; } else { int b_frames; if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) { if (s->picture_in_gop_number < s->gop_size && skip_check(s, s->input_picture[0], s->next_picture_ptr)) { // FIXME check that te gop check above is +-1 correct av_frame_unref(&s->input_picture[0]->f); emms_c(); ff_vbv_update(s, 0); goto no_output_pic; } } if (s->flags & CODEC_FLAG_PASS2) { for (i = 0; i < s->max_b_frames + 1; i++) { int pict_num = s->input_picture[0]->f.display_picture_number + i; if (pict_num >= s->rc_context.num_entries) break; if (!s->input_picture[i]) { s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P; break; } s->input_picture[i]->f.pict_type = s->rc_context.entry[pict_num].new_pict_type; } } if (s->avctx->b_frame_strategy == 0) { b_frames = s->max_b_frames; while (b_frames && !s->input_picture[b_frames]) b_frames--; } else if (s->avctx->b_frame_strategy == 1) { for (i = 1; i < s->max_b_frames + 1; i++) { if (s->input_picture[i] && s->input_picture[i]->b_frame_score == 0) { s->input_picture[i]->b_frame_score = get_intra_count(s, s->input_picture[i ]->f.data[0], s->input_picture[i - 1]->f.data[0], s->linesize) + 1; } } for (i = 0; i < s->max_b_frames + 1; i++) { if (s->input_picture[i] == NULL || s->input_picture[i]->b_frame_score - 1 > s->mb_num / s->avctx->b_sensitivity) break; } b_frames = FFMAX(0, i - 1); /* reset scores */ for (i = 0; i < b_frames + 1; i++) { s->input_picture[i]->b_frame_score = 0; } } else if (s->avctx->b_frame_strategy == 2) { b_frames = estimate_best_b_count(s); } else { av_log(s->avctx, AV_LOG_ERROR, \"illegal b frame strategy\\n\"); b_frames = 0; } emms_c(); for (i = b_frames - 1; i >= 0; i--) { int type = s->input_picture[i]->f.pict_type; if (type && type != AV_PICTURE_TYPE_B) b_frames = i; } if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B && b_frames == s->max_b_frames) { av_log(s->avctx, AV_LOG_ERROR, \"warning, too many b frames in a row\\n\"); } if (s->picture_in_gop_number + b_frames >= s->gop_size) { if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) && s->gop_size > s->picture_in_gop_number) { b_frames = s->gop_size - s->picture_in_gop_number - 1; } else { if (s->flags & CODEC_FLAG_CLOSED_GOP) b_frames = 0; s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I; } } if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames && s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I) b_frames--; s->reordered_input_picture[0] = s->input_picture[b_frames]; if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I) s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P; s->reordered_input_picture[0]->f.coded_picture_number = s->coded_picture_number++; for (i = 0; i < b_frames; i++) { s->reordered_input_picture[i + 1] = s->input_picture[i]; s->reordered_input_picture[i + 1]->f.pict_type = AV_PICTURE_TYPE_B; s->reordered_input_picture[i + 1]->f.coded_picture_number = s->coded_picture_number++; } } } no_output_pic: if (s->reordered_input_picture[0]) { s->reordered_input_picture[0]->reference = s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_B ? 3 : 0; ff_mpeg_unref_picture(s, &s->new_picture); if ((ret = ff_mpeg_ref_picture(s, &s->new_picture, s->reordered_input_picture[0]))) return ret; if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) { // input is a shared pix, so we can't modifiy it -> alloc a new // one & ensure that the shared one is reuseable Picture *pic; int i = ff_find_unused_picture(s, 0); if (i < 0) return i; pic = &s->picture[i]; pic->reference = s->reordered_input_picture[0]->reference; if (ff_alloc_picture(s, pic, 0) < 0) { return -1; } ret = av_frame_copy_props(&pic->f, &s->reordered_input_picture[0]->f); if (ret < 0) return ret; /* mark us unused / free shared pic */ av_frame_unref(&s->reordered_input_picture[0]->f); s->reordered_input_picture[0]->shared = 0; s->current_picture_ptr = pic; } else { // input is not a shared pix -> reuse buffer for current_pix s->current_picture_ptr = s->reordered_input_picture[0]; for (i = 0; i < 4; i++) { s->new_picture.f.data[i] += INPLACE_OFFSET; } } ff_mpeg_unref_picture(s, &s->current_picture); if ((ret = ff_mpeg_ref_picture(s, &s->current_picture, s->current_picture_ptr)) < 0) return ret; s->picture_number = s->new_picture.f.display_picture_number; } else { ff_mpeg_unref_picture(s, &s->new_picture); } return 0; }"} {"target": 1, "idx": 16464, "func": "float ff_amr_set_fixed_gain(float fixed_gain_factor, float fixed_mean_energy, float *prediction_error, float energy_mean, const float *pred_table) { // Equations 66-69: // ^g_c = ^gamma_gc * 100.05 (predicted dB + mean dB - dB of fixed vector) // Note 10^(0.05 * -10log(average x2)) = 1/sqrt((average x2)). float val = fixed_gain_factor * ff_exp10(0.05 * (avpriv_scalarproduct_float_c(pred_table, prediction_error, 4) + energy_mean)) / sqrtf(fixed_mean_energy); // update quantified prediction error energy history memmove(&prediction_error[0], &prediction_error[1], 3 * sizeof(prediction_error[0])); prediction_error[3] = 20.0 * log10f(fixed_gain_factor); return val; }"} {"target": 1, "idx": 16468, "func": "static void cpu_sh4_reset(CPUSH4State * env) { #if defined(CONFIG_USER_ONLY) env->sr = 0; #else env->sr = SR_MD | SR_RB | SR_BL | SR_I3 | SR_I2 | SR_I1 | SR_I0; #endif env->vbr = 0; env->pc = 0xA0000000; #if defined(CONFIG_USER_ONLY) env->fpscr = FPSCR_PR; /* value for userspace according to the kernel */ set_float_rounding_mode(float_round_nearest_even, &env->fp_status); /* ?! */ #else env->fpscr = 0x00040001; /* CPU reset value according to SH4 manual */ set_float_rounding_mode(float_round_to_zero, &env->fp_status); #endif env->mmucr = 0;"} {"target": 0, "idx": 16484, "func": "static void spapr_vlan_cleanup(NetClientState *nc) { VIOsPAPRVLANDevice *dev = qemu_get_nic_opaque(nc); dev->nic = NULL; }"} {"target": 0, "idx": 16487, "func": "BlockDriverAIOCB *win32_aio_submit(BlockDriverState *bs, QEMUWin32AIOState *aio, HANDLE hfile, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque, int type) { struct QEMUWin32AIOCB *waiocb; uint64_t offset = sector_num * 512; DWORD rc; waiocb = qemu_aio_get(&win32_aio_pool, bs, cb, opaque); waiocb->nbytes = nb_sectors * 512; waiocb->qiov = qiov; waiocb->is_read = (type == QEMU_AIO_READ); if (qiov->niov > 1) { waiocb->buf = qemu_blockalign(bs, qiov->size); if (type & QEMU_AIO_WRITE) { char *p = waiocb->buf; int i; for (i = 0; i < qiov->niov; ++i) { memcpy(p, qiov->iov[i].iov_base, qiov->iov[i].iov_len); p += qiov->iov[i].iov_len; } } waiocb->is_linear = false; } else { waiocb->buf = qiov->iov[0].iov_base; waiocb->is_linear = true; } waiocb->ov = (OVERLAPPED) { .Offset = (DWORD) offset, .OffsetHigh = (DWORD) (offset >> 32), .hEvent = event_notifier_get_handle(&aio->e) }; aio->count++; if (type & QEMU_AIO_READ) { rc = ReadFile(hfile, waiocb->buf, waiocb->nbytes, NULL, &waiocb->ov); } else { rc = WriteFile(hfile, waiocb->buf, waiocb->nbytes, NULL, &waiocb->ov); } if(rc == 0 && GetLastError() != ERROR_IO_PENDING) { goto out_dec_count; } return &waiocb->common; out_dec_count: aio->count--; qemu_aio_release(waiocb); return NULL; }"} {"target": 0, "idx": 16500, "func": "static int blkdebug_open(BlockDriverState *bs, const char *filename, int flags) { BDRVBlkdebugState *s = bs->opaque; int ret; char *config, *c; /* Parse the blkdebug: prefix */ if (strncmp(filename, \"blkdebug:\", strlen(\"blkdebug:\"))) { return -EINVAL; } filename += strlen(\"blkdebug:\"); /* Read rules from config file */ c = strchr(filename, ':'); if (c == NULL) { return -EINVAL; } config = strdup(filename); config[c - filename] = '\\0'; ret = read_config(s, config); free(config); if (ret < 0) { return ret; } filename = c + 1; /* Set initial state */ s->vars.state = 1; /* Open the backing file */ ret = bdrv_file_open(&bs->file, filename, flags); if (ret < 0) { return ret; } return 0; }"} {"target": 0, "idx": 16503, "func": "static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, int *got_output, const uint8_t *buf, int buf_size) { Mpeg1Context *s = avctx->priv_data; MpegEncContext *s2 = &s->mpeg_enc_ctx; const uint8_t *buf_ptr = buf; const uint8_t *buf_end = buf + buf_size; int ret, input_size; int last_code = 0, skip_frame = 0; for (;;) { /* find next start code */ uint32_t start_code = -1; buf_ptr = avpriv_find_start_code(buf_ptr, buf_end, &start_code); if (start_code > 0x1ff) { if (!skip_frame) { if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) && !avctx->hwaccel) { int i; avctx->execute(avctx, slice_decode_thread, &s2->thread_context[0], NULL, s->slice_count, sizeof(void *)); for (i = 0; i < s->slice_count; i++) s2->er.error_count += s2->thread_context[i]->er.error_count; } ret = slice_end(avctx, picture); if (ret < 0) return ret; else if (ret) { // FIXME: merge with the stuff in mpeg_decode_slice if (s2->last_picture_ptr || s2->low_delay) *got_output = 1; } } s2->pict_type = 0; return FFMAX(0, buf_ptr - buf - s2->parse_context.last_index); } input_size = buf_end - buf_ptr; if (avctx->debug & FF_DEBUG_STARTCODE) av_log(avctx, AV_LOG_DEBUG, \"%3\"PRIX32\" at %td left %d\\n\", start_code, buf_ptr - buf, input_size); /* prepare data for next start code */ switch (start_code) { case SEQ_START_CODE: if (last_code == 0) { mpeg1_decode_sequence(avctx, buf_ptr, input_size); s->sync = 1; } else { av_log(avctx, AV_LOG_ERROR, \"ignoring SEQ_START_CODE after %X\\n\", last_code); if (avctx->err_recognition & AV_EF_EXPLODE) return AVERROR_INVALIDDATA; } break; case PICTURE_START_CODE: if (s2->width <= 0 || s2->height <= 0) { av_log(avctx, AV_LOG_ERROR, \"Invalid frame dimensions %dx%d.\\n\", s2->width, s2->height); return AVERROR_INVALIDDATA; } if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) && !avctx->hwaccel && s->slice_count) { int i; avctx->execute(avctx, slice_decode_thread, s2->thread_context, NULL, s->slice_count, sizeof(void *)); for (i = 0; i < s->slice_count; i++) s2->er.error_count += s2->thread_context[i]->er.error_count; s->slice_count = 0; } if (last_code == 0 || last_code == SLICE_MIN_START_CODE) { ret = mpeg_decode_postinit(avctx); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, \"mpeg_decode_postinit() failure\\n\"); return ret; } /* We have a complete image: we try to decompress it. */ if (mpeg1_decode_picture(avctx, buf_ptr, input_size) < 0) s2->pict_type = 0; s->first_slice = 1; last_code = PICTURE_START_CODE; } else { av_log(avctx, AV_LOG_ERROR, \"ignoring pic after %X\\n\", last_code); if (avctx->err_recognition & AV_EF_EXPLODE) return AVERROR_INVALIDDATA; } break; case EXT_START_CODE: init_get_bits(&s2->gb, buf_ptr, input_size * 8); switch (get_bits(&s2->gb, 4)) { case 0x1: if (last_code == 0) { mpeg_decode_sequence_extension(s); } else { av_log(avctx, AV_LOG_ERROR, \"ignoring seq ext after %X\\n\", last_code); if (avctx->err_recognition & AV_EF_EXPLODE) return AVERROR_INVALIDDATA; } break; case 0x2: mpeg_decode_sequence_display_extension(s); break; case 0x3: mpeg_decode_quant_matrix_extension(s2); break; case 0x7: mpeg_decode_picture_display_extension(s); break; case 0x8: if (last_code == PICTURE_START_CODE) { mpeg_decode_picture_coding_extension(s); } else { av_log(avctx, AV_LOG_ERROR, \"ignoring pic cod ext after %X\\n\", last_code); if (avctx->err_recognition & AV_EF_EXPLODE) return AVERROR_INVALIDDATA; } break; } break; case USER_START_CODE: mpeg_decode_user_data(avctx, buf_ptr, input_size); break; case GOP_START_CODE: if (last_code == 0) { s2->first_field = 0; mpeg_decode_gop(avctx, buf_ptr, input_size); s->sync = 1; } else { av_log(avctx, AV_LOG_ERROR, \"ignoring GOP_START_CODE after %X\\n\", last_code); if (avctx->err_recognition & AV_EF_EXPLODE) return AVERROR_INVALIDDATA; } break; default: if (start_code >= SLICE_MIN_START_CODE && start_code <= SLICE_MAX_START_CODE && last_code != 0) { const int field_pic = s2->picture_structure != PICT_FRAME; int mb_y = (start_code - SLICE_MIN_START_CODE) << field_pic; last_code = SLICE_MIN_START_CODE; if (s2->picture_structure == PICT_BOTTOM_FIELD) mb_y++; if (mb_y >= s2->mb_height) { av_log(s2->avctx, AV_LOG_ERROR, \"slice below image (%d >= %d)\\n\", mb_y, s2->mb_height); return -1; } if (s2->last_picture_ptr == NULL) { /* Skip B-frames if we do not have reference frames and * GOP is not closed. */ if (s2->pict_type == AV_PICTURE_TYPE_B) { if (!s->closed_gop) { skip_frame = 1; break; } } } if (s2->pict_type == AV_PICTURE_TYPE_I) s->sync = 1; if (s2->next_picture_ptr == NULL) { /* Skip P-frames if we do not have a reference frame or * we have an invalid header. */ if (s2->pict_type == AV_PICTURE_TYPE_P && !s->sync) { skip_frame = 1; break; } } if ((avctx->skip_frame >= AVDISCARD_NONREF && s2->pict_type == AV_PICTURE_TYPE_B) || (avctx->skip_frame >= AVDISCARD_NONKEY && s2->pict_type != AV_PICTURE_TYPE_I) || avctx->skip_frame >= AVDISCARD_ALL) { skip_frame = 1; break; } if (!s->mpeg_enc_ctx_allocated) break; if (s2->codec_id == AV_CODEC_ID_MPEG2VIDEO) { if (mb_y < avctx->skip_top || mb_y >= s2->mb_height - avctx->skip_bottom) break; } if (!s2->pict_type) { av_log(avctx, AV_LOG_ERROR, \"Missing picture start code\\n\"); if (avctx->err_recognition & AV_EF_EXPLODE) return AVERROR_INVALIDDATA; break; } if (s->first_slice) { skip_frame = 0; s->first_slice = 0; if (mpeg_field_start(s2, buf, buf_size) < 0) return -1; } if (!s2->current_picture_ptr) { av_log(avctx, AV_LOG_ERROR, \"current_picture not initialized\\n\"); return AVERROR_INVALIDDATA; } if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) && !avctx->hwaccel) { int threshold = (s2->mb_height * s->slice_count + s2->slice_context_count / 2) / s2->slice_context_count; if (threshold <= mb_y) { MpegEncContext *thread_context = s2->thread_context[s->slice_count]; thread_context->start_mb_y = mb_y; thread_context->end_mb_y = s2->mb_height; if (s->slice_count) { s2->thread_context[s->slice_count - 1]->end_mb_y = mb_y; ret = ff_update_duplicate_context(thread_context, s2); if (ret < 0) return ret; } init_get_bits(&thread_context->gb, buf_ptr, input_size * 8); s->slice_count++; } buf_ptr += 2; // FIXME add minimum number of bytes per slice } else { ret = mpeg_decode_slice(s2, mb_y, &buf_ptr, input_size); emms_c(); if (ret < 0) { if (avctx->err_recognition & AV_EF_EXPLODE) return ret; if (s2->resync_mb_x >= 0 && s2->resync_mb_y >= 0) ff_er_add_slice(&s2->er, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x, s2->mb_y, ER_AC_ERROR | ER_DC_ERROR | ER_MV_ERROR); } else { ff_er_add_slice(&s2->er, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x - 1, s2->mb_y, ER_AC_END | ER_DC_END | ER_MV_END); } } } break; } } }"} {"target": 0, "idx": 16508, "func": "void cpu_loop(CPUPPCState *env) { target_siginfo_t info; int trapnr; uint32_t ret; for(;;) { trapnr = cpu_ppc_exec(env); switch(trapnr) { case POWERPC_EXCP_NONE: /* Just go on */ break; case POWERPC_EXCP_CRITICAL: /* Critical input */ cpu_abort(env, \"Critical interrupt while in user mode. \" \"Aborting\\n\"); break; case POWERPC_EXCP_MCHECK: /* Machine check exception */ cpu_abort(env, \"Machine check exception while in user mode. \" \"Aborting\\n\"); break; case POWERPC_EXCP_DSI: /* Data storage exception */ EXCP_DUMP(env, \"Invalid data memory access: 0x\" ADDRX \"\\n\", env->spr[SPR_DAR]); /* XXX: check this. Seems bugged */ switch (env->error_code & 0xFF000000) { case 0x40000000: info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; info.si_code = TARGET_SEGV_MAPERR; break; case 0x04000000: info.si_signo = TARGET_SIGILL; info.si_errno = 0; info.si_code = TARGET_ILL_ILLADR; break; case 0x08000000: info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; info.si_code = TARGET_SEGV_ACCERR; break; default: /* Let's send a regular segfault... */ EXCP_DUMP(env, \"Invalid segfault errno (%02x)\\n\", env->error_code); info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; info.si_code = TARGET_SEGV_MAPERR; break; } info._sifields._sigfault._addr = env->nip; queue_signal(info.si_signo, &info); break; case POWERPC_EXCP_ISI: /* Instruction storage exception */ EXCP_DUMP(env, \"Invalid instruction fetch: 0x\\n\" ADDRX \"\\n\", env->spr[SPR_SRR0]); /* XXX: check this */ switch (env->error_code & 0xFF000000) { case 0x40000000: info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; info.si_code = TARGET_SEGV_MAPERR; break; case 0x10000000: case 0x08000000: info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; info.si_code = TARGET_SEGV_ACCERR; break; default: /* Let's send a regular segfault... */ EXCP_DUMP(env, \"Invalid segfault errno (%02x)\\n\", env->error_code); info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; info.si_code = TARGET_SEGV_MAPERR; break; } info._sifields._sigfault._addr = env->nip - 4; queue_signal(info.si_signo, &info); break; case POWERPC_EXCP_EXTERNAL: /* External input */ cpu_abort(env, \"External interrupt while in user mode. \" \"Aborting\\n\"); break; case POWERPC_EXCP_ALIGN: /* Alignment exception */ EXCP_DUMP(env, \"Unaligned memory access\\n\"); /* XXX: check this */ info.si_signo = TARGET_SIGBUS; info.si_errno = 0; info.si_code = TARGET_BUS_ADRALN; info._sifields._sigfault._addr = env->nip - 4; queue_signal(info.si_signo, &info); break; case POWERPC_EXCP_PROGRAM: /* Program exception */ /* XXX: check this */ switch (env->error_code & ~0xF) { case POWERPC_EXCP_FP: EXCP_DUMP(env, \"Floating point program exception\\n\"); info.si_signo = TARGET_SIGFPE; info.si_errno = 0; switch (env->error_code & 0xF) { case POWERPC_EXCP_FP_OX: info.si_code = TARGET_FPE_FLTOVF; break; case POWERPC_EXCP_FP_UX: info.si_code = TARGET_FPE_FLTUND; break; case POWERPC_EXCP_FP_ZX: case POWERPC_EXCP_FP_VXZDZ: info.si_code = TARGET_FPE_FLTDIV; break; case POWERPC_EXCP_FP_XX: info.si_code = TARGET_FPE_FLTRES; break; case POWERPC_EXCP_FP_VXSOFT: info.si_code = TARGET_FPE_FLTINV; break; case POWERPC_EXCP_FP_VXSNAN: case POWERPC_EXCP_FP_VXISI: case POWERPC_EXCP_FP_VXIDI: case POWERPC_EXCP_FP_VXIMZ: case POWERPC_EXCP_FP_VXVC: case POWERPC_EXCP_FP_VXSQRT: case POWERPC_EXCP_FP_VXCVI: info.si_code = TARGET_FPE_FLTSUB; break; default: EXCP_DUMP(env, \"Unknown floating point exception (%02x)\\n\", env->error_code); break; } break; case POWERPC_EXCP_INVAL: EXCP_DUMP(env, \"Invalid instruction\\n\"); info.si_signo = TARGET_SIGILL; info.si_errno = 0; switch (env->error_code & 0xF) { case POWERPC_EXCP_INVAL_INVAL: info.si_code = TARGET_ILL_ILLOPC; break; case POWERPC_EXCP_INVAL_LSWX: info.si_code = TARGET_ILL_ILLOPN; break; case POWERPC_EXCP_INVAL_SPR: info.si_code = TARGET_ILL_PRVREG; break; case POWERPC_EXCP_INVAL_FP: info.si_code = TARGET_ILL_COPROC; break; default: EXCP_DUMP(env, \"Unknown invalid operation (%02x)\\n\", env->error_code & 0xF); info.si_code = TARGET_ILL_ILLADR; break; } break; case POWERPC_EXCP_PRIV: EXCP_DUMP(env, \"Privilege violation\\n\"); info.si_signo = TARGET_SIGILL; info.si_errno = 0; switch (env->error_code & 0xF) { case POWERPC_EXCP_PRIV_OPC: info.si_code = TARGET_ILL_PRVOPC; break; case POWERPC_EXCP_PRIV_REG: info.si_code = TARGET_ILL_PRVREG; break; default: EXCP_DUMP(env, \"Unknown privilege violation (%02x)\\n\", env->error_code & 0xF); info.si_code = TARGET_ILL_PRVOPC; break; } break; case POWERPC_EXCP_TRAP: cpu_abort(env, \"Tried to call a TRAP\\n\"); break; default: /* Should not happen ! */ cpu_abort(env, \"Unknown program exception (%02x)\\n\", env->error_code); break; } info._sifields._sigfault._addr = env->nip - 4; queue_signal(info.si_signo, &info); break; case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ EXCP_DUMP(env, \"No floating point allowed\\n\"); info.si_signo = TARGET_SIGILL; info.si_errno = 0; info.si_code = TARGET_ILL_COPROC; info._sifields._sigfault._addr = env->nip - 4; queue_signal(info.si_signo, &info); break; case POWERPC_EXCP_SYSCALL: /* System call exception */ cpu_abort(env, \"Syscall exception while in user mode. \" \"Aborting\\n\"); break; case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ EXCP_DUMP(env, \"No APU instruction allowed\\n\"); info.si_signo = TARGET_SIGILL; info.si_errno = 0; info.si_code = TARGET_ILL_COPROC; info._sifields._sigfault._addr = env->nip - 4; queue_signal(info.si_signo, &info); break; case POWERPC_EXCP_DECR: /* Decrementer exception */ cpu_abort(env, \"Decrementer interrupt while in user mode. \" \"Aborting\\n\"); break; case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ cpu_abort(env, \"Fix interval timer interrupt while in user mode. \" \"Aborting\\n\"); break; case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ cpu_abort(env, \"Watchdog timer interrupt while in user mode. \" \"Aborting\\n\"); break; case POWERPC_EXCP_DTLB: /* Data TLB error */ cpu_abort(env, \"Data TLB exception while in user mode. \" \"Aborting\\n\"); break; case POWERPC_EXCP_ITLB: /* Instruction TLB error */ cpu_abort(env, \"Instruction TLB exception while in user mode. \" \"Aborting\\n\"); break; case POWERPC_EXCP_DEBUG: /* Debug interrupt */ /* XXX: check this */ { int sig; sig = gdb_handlesig(env, TARGET_SIGTRAP); if (sig) { info.si_signo = sig; info.si_errno = 0; info.si_code = TARGET_TRAP_BRKPT; queue_signal(info.si_signo, &info); } } break; case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavail. */ EXCP_DUMP(env, \"No SPE/floating-point instruction allowed\\n\"); info.si_signo = TARGET_SIGILL; info.si_errno = 0; info.si_code = TARGET_ILL_COPROC; info._sifields._sigfault._addr = env->nip - 4; queue_signal(info.si_signo, &info); break; case POWERPC_EXCP_EFPDI: /* Embedded floating-point data IRQ */ cpu_abort(env, \"Embedded floating-point data IRQ not handled\\n\"); break; case POWERPC_EXCP_EFPRI: /* Embedded floating-point round IRQ */ cpu_abort(env, \"Embedded floating-point round IRQ not handled\\n\"); break; case POWERPC_EXCP_EPERFM: /* Embedded performance monitor IRQ */ cpu_abort(env, \"Performance monitor exception not handled\\n\"); break; case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ cpu_abort(env, \"Doorbell interrupt while in user mode. \" \"Aborting\\n\"); break; case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ cpu_abort(env, \"Doorbell critical interrupt while in user mode. \" \"Aborting\\n\"); break; case POWERPC_EXCP_RESET: /* System reset exception */ cpu_abort(env, \"Reset interrupt while in user mode. \" \"Aborting\\n\"); break; #if defined(TARGET_PPC64) && !defined(TARGET_ABI32) /* PowerPC 64 */ case POWERPC_EXCP_DSEG: /* Data segment exception */ cpu_abort(env, \"Data segment exception while in user mode. \" \"Aborting\\n\"); break; case POWERPC_EXCP_ISEG: /* Instruction segment exception */ cpu_abort(env, \"Instruction segment exception \" \"while in user mode. Aborting\\n\"); break; #endif /* defined(TARGET_PPC64) && !defined(TARGET_ABI32) */ #if defined(TARGET_PPC64H) && !defined(TARGET_ABI32) /* PowerPC 64 with hypervisor mode support */ case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ cpu_abort(env, \"Hypervisor decrementer interrupt \" \"while in user mode. Aborting\\n\"); break; #endif /* defined(TARGET_PPC64H) && !defined(TARGET_ABI32) */ case POWERPC_EXCP_TRACE: /* Trace exception */ /* Nothing to do: * we use this exception to emulate step-by-step execution mode. */ break; #if defined(TARGET_PPC64H) && !defined(TARGET_ABI32) /* PowerPC 64 with hypervisor mode support */ case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ cpu_abort(env, \"Hypervisor data storage exception \" \"while in user mode. Aborting\\n\"); break; case POWERPC_EXCP_HISI: /* Hypervisor instruction storage excp */ cpu_abort(env, \"Hypervisor instruction storage exception \" \"while in user mode. Aborting\\n\"); break; case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */ cpu_abort(env, \"Hypervisor data segment exception \" \"while in user mode. Aborting\\n\"); break; case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment excp */ cpu_abort(env, \"Hypervisor instruction segment exception \" \"while in user mode. Aborting\\n\"); break; #endif /* defined(TARGET_PPC64H) && !defined(TARGET_ABI32) */ case POWERPC_EXCP_VPU: /* Vector unavailable exception */ EXCP_DUMP(env, \"No Altivec instructions allowed\\n\"); info.si_signo = TARGET_SIGILL; info.si_errno = 0; info.si_code = TARGET_ILL_COPROC; info._sifields._sigfault._addr = env->nip - 4; queue_signal(info.si_signo, &info); break; case POWERPC_EXCP_PIT: /* Programmable interval timer IRQ */ cpu_abort(env, \"Programable interval timer interrupt \" \"while in user mode. Aborting\\n\"); break; case POWERPC_EXCP_IO: /* IO error exception */ cpu_abort(env, \"IO error exception while in user mode. \" \"Aborting\\n\"); break; case POWERPC_EXCP_RUNM: /* Run mode exception */ cpu_abort(env, \"Run mode exception while in user mode. \" \"Aborting\\n\"); break; case POWERPC_EXCP_EMUL: /* Emulation trap exception */ cpu_abort(env, \"Emulation trap exception not handled\\n\"); break; case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ cpu_abort(env, \"Instruction fetch TLB exception \" \"while in user-mode. Aborting\"); break; case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ cpu_abort(env, \"Data load TLB exception while in user-mode. \" \"Aborting\"); break; case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ cpu_abort(env, \"Data store TLB exception while in user-mode. \" \"Aborting\"); break; case POWERPC_EXCP_FPA: /* Floating-point assist exception */ cpu_abort(env, \"Floating-point assist exception not handled\\n\"); break; case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ cpu_abort(env, \"Instruction address breakpoint exception \" \"not handled\\n\"); break; case POWERPC_EXCP_SMI: /* System management interrupt */ cpu_abort(env, \"System management interrupt while in user mode. \" \"Aborting\\n\"); break; case POWERPC_EXCP_THERM: /* Thermal interrupt */ cpu_abort(env, \"Thermal interrupt interrupt while in user mode. \" \"Aborting\\n\"); break; case POWERPC_EXCP_PERFM: /* Embedded performance monitor IRQ */ cpu_abort(env, \"Performance monitor exception not handled\\n\"); break; case POWERPC_EXCP_VPUA: /* Vector assist exception */ cpu_abort(env, \"Vector assist exception not handled\\n\"); break; case POWERPC_EXCP_SOFTP: /* Soft patch exception */ cpu_abort(env, \"Soft patch exception not handled\\n\"); break; case POWERPC_EXCP_MAINT: /* Maintenance exception */ cpu_abort(env, \"Maintenance exception while in user mode. \" \"Aborting\\n\"); break; case POWERPC_EXCP_STOP: /* stop translation */ /* We did invalidate the instruction cache. Go on */ break; case POWERPC_EXCP_BRANCH: /* branch instruction: */ /* We just stopped because of a branch. Go on */ break; case POWERPC_EXCP_SYSCALL_USER: /* system call in user-mode emulation */ /* WARNING: * PPC ABI uses overflow flag in cr0 to signal an error * in syscalls. */ #if 0 printf(\"syscall %d 0x%08x 0x%08x 0x%08x 0x%08x\\n\", env->gpr[0], env->gpr[3], env->gpr[4], env->gpr[5], env->gpr[6]); #endif env->crf[0] &= ~0x1; ret = do_syscall(env, env->gpr[0], env->gpr[3], env->gpr[4], env->gpr[5], env->gpr[6], env->gpr[7], env->gpr[8]); if (ret > (uint32_t)(-515)) { env->crf[0] |= 0x1; ret = -ret; } env->gpr[3] = ret; #if 0 printf(\"syscall returned 0x%08x (%d)\\n\", ret, ret); #endif break; case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ break; default: cpu_abort(env, \"Unknown exception 0x%d. Aborting\\n\", trapnr); break; } process_pending_signals(env); } }"} {"target": 0, "idx": 16510, "func": "static inline int seek_to_sector(BlockDriverState *bs, int64_t sector_num) { BDRVBochsState *s = bs->opaque; int64_t offset = sector_num * 512; int64_t extent_index, extent_offset, bitmap_offset, block_offset; char bitmap_entry; // seek to sector extent_index = offset / s->extent_size; extent_offset = (offset % s->extent_size) / 512; if (s->catalog_bitmap[extent_index] == 0xffffffff) { // fprintf(stderr, \"page not allocated [%x - %x:%x]\\n\", // sector_num, extent_index, extent_offset); return -1; // not allocated } bitmap_offset = s->data_offset + (512 * s->catalog_bitmap[extent_index] * (s->extent_blocks + s->bitmap_blocks)); block_offset = bitmap_offset + (512 * (s->bitmap_blocks + extent_offset)); // fprintf(stderr, \"sect: %x [ext i: %x o: %x] -> %x bitmap: %x block: %x\\n\", // sector_num, extent_index, extent_offset, // le32_to_cpu(s->catalog_bitmap[extent_index]), // bitmap_offset, block_offset); // read in bitmap for current extent lseek(s->fd, bitmap_offset + (extent_offset / 8), SEEK_SET); if (read(s->fd, &bitmap_entry, 1) != 1) return -1; if (!((bitmap_entry >> (extent_offset % 8)) & 1)) { // fprintf(stderr, \"sector (%x) in bitmap not allocated\\n\", // sector_num); return -1; // not allocated } lseek(s->fd, block_offset, SEEK_SET); return 0; }"} {"target": 0, "idx": 16511, "func": "static void vnc_handshake_io(void *opaque) { struct VncState *vs = (struct VncState *)opaque; VNC_DEBUG(\"Handshake IO continue\\n\"); vnc_continue_handshake(vs); }"} {"target": 0, "idx": 16515, "func": "static int local_remove(FsContext *ctx, const char *path) { int err; struct stat stbuf; char buffer[PATH_MAX]; if (ctx->export_flags & V9FS_SM_MAPPED_FILE) { err = lstat(rpath(ctx, path, buffer), &stbuf); if (err) { goto err_out; } /* * If directory remove .virtfs_metadata contained in the * directory */ if (S_ISDIR(stbuf.st_mode)) { snprintf(buffer, ARRAY_SIZE(buffer), \"%s/%s/%s\", ctx->fs_root, path, VIRTFS_META_DIR); err = remove(buffer); if (err < 0 && errno != ENOENT) { /* * We didn't had the .virtfs_metadata file. May be file created * in non-mapped mode ?. Ignore ENOENT. */ goto err_out; } } /* * Now remove the name from parent directory * .virtfs_metadata directory */ err = remove(local_mapped_attr_path(ctx, path, buffer)); if (err < 0 && errno != ENOENT) { /* * We didn't had the .virtfs_metadata file. May be file created * in non-mapped mode ?. Ignore ENOENT. */ goto err_out; } } return remove(rpath(ctx, path, buffer)); err_out: return err; }"} {"target": 0, "idx": 16527, "func": "static void qed_unplug_allocating_write_reqs(BDRVQEDState *s) { assert(s->allocating_write_reqs_plugged); s->allocating_write_reqs_plugged = false; qemu_co_enter_next(&s->allocating_write_reqs); }"} {"target": 0, "idx": 16533, "func": "static int xen_pt_bar_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg, uint32_t real_offset, uint32_t *data) { uint32_t reg_field = 0; int index; index = xen_pt_bar_offset_to_index(reg->offset); if (index < 0 || index >= PCI_NUM_REGIONS) { XEN_PT_ERR(&s->dev, \"Internal error: Invalid BAR index [%d].\\n\", index); return -1; } /* set BAR flag */ s->bases[index].bar_flag = xen_pt_bar_reg_parse(s, reg); if (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED) { reg_field = XEN_PT_INVALID_REG; } *data = reg_field; return 0; }"} {"target": 0, "idx": 16541, "func": "static void ehci_frame_timer(void *opaque) { EHCIState *ehci = opaque; int schedules = 0; int64_t expire_time, t_now; uint64_t ns_elapsed; int frames; int i; int skipped_frames = 0; t_now = qemu_get_clock_ns(vm_clock); ns_elapsed = t_now - ehci->last_run_ns; frames = ns_elapsed / FRAME_TIMER_NS; if (ehci_periodic_enabled(ehci) || ehci->pstate != EST_INACTIVE) { schedules++; expire_time = t_now + (get_ticks_per_sec() / FRAME_TIMER_FREQ); for (i = 0; i < frames; i++) { ehci_update_frindex(ehci, 1); if (frames - i > ehci->maxframes) { skipped_frames++; } else { ehci_advance_periodic_state(ehci); } ehci->last_run_ns += FRAME_TIMER_NS; } } else { if (ehci->async_stepdown < ehci->maxframes / 2) { ehci->async_stepdown++; } expire_time = t_now + (get_ticks_per_sec() * ehci->async_stepdown / FRAME_TIMER_FREQ); ehci_update_frindex(ehci, frames); ehci->last_run_ns += FRAME_TIMER_NS * frames; } #if 0 if (skipped_frames) { DPRINTF(\"WARNING - EHCI skipped %d frames\\n\", skipped_frames); } #endif /* Async is not inside loop since it executes everything it can once * called */ if (ehci_async_enabled(ehci) || ehci->astate != EST_INACTIVE) { schedules++; qemu_bh_schedule(ehci->async_bh); } if (schedules) { qemu_mod_timer(ehci->frame_timer, expire_time); } }"} {"target": 0, "idx": 16558, "func": "void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, int dirty_flags) { CPUState *env; unsigned long length, start1; int i; start &= TARGET_PAGE_MASK; end = TARGET_PAGE_ALIGN(end); length = end - start; if (length == 0) return; cpu_physical_memory_mask_dirty_range(start, length, dirty_flags); /* we modify the TLB cache so that the dirty bit will be set again when accessing the range */ start1 = (unsigned long)qemu_safe_ram_ptr(start); /* Chek that we don't span multiple blocks - this breaks the address comparisons below. */ if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1 != (end - 1) - start) { abort(); } for(env = first_cpu; env != NULL; env = env->next_cpu) { int mmu_idx; for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { for(i = 0; i < CPU_TLB_SIZE; i++) tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i], start1, length); } } }"} {"target": 0, "idx": 16569, "func": "static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb, uint16_t leaf) { /* Wildly overreserve - it doesn't matter much. */ phys_map_node_reserve(3 * P_L2_LEVELS); phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); }"} {"target": 0, "idx": 16570, "func": "void qemu_chr_fe_printf(CharDriverState *s, const char *fmt, ...) { char buf[READ_BUF_LEN]; va_list ap; va_start(ap, fmt); vsnprintf(buf, sizeof(buf), fmt, ap); qemu_chr_fe_write(s, (uint8_t *)buf, strlen(buf)); va_end(ap); }"} {"target": 0, "idx": 16577, "func": "int hw_device_setup_for_decode(InputStream *ist) { enum AVHWDeviceType type; HWDevice *dev; int err; if (ist->hwaccel_device) { dev = hw_device_get_by_name(ist->hwaccel_device); if (!dev) { char *tmp; type = hw_device_match_type_by_hwaccel(ist->hwaccel_id); if (type == AV_HWDEVICE_TYPE_NONE) { // No match - this isn't necessarily invalid, though, // because an explicit device might not be needed or // the hwaccel setup could be handled elsewhere. return 0; } tmp = av_asprintf(\"%s:%s\", av_hwdevice_get_type_name(type), ist->hwaccel_device); if (!tmp) return AVERROR(ENOMEM); err = hw_device_init_from_string(tmp, &dev); av_free(tmp); if (err < 0) return err; } } else { if (ist->hwaccel_id != HWACCEL_NONE) type = hw_device_match_type_by_hwaccel(ist->hwaccel_id); else type = hw_device_match_type_in_name(ist->dec->name); if (type != AV_HWDEVICE_TYPE_NONE) { dev = hw_device_get_by_type(type); if (!dev) { hw_device_init_from_string(av_hwdevice_get_type_name(type), &dev); } } else { // No device required. return 0; } } if (!dev) { av_log(ist->dec_ctx, AV_LOG_WARNING, \"No device available \" \"for decoder (device type %s for codec %s).\\n\", av_hwdevice_get_type_name(type), ist->dec->name); return 0; } ist->dec_ctx->hw_device_ctx = av_buffer_ref(dev->device_ref); if (!ist->dec_ctx->hw_device_ctx) return AVERROR(ENOMEM); return 0; }"} {"target": 0, "idx": 16578, "func": "static int megasas_dcmd_cfg_read(MegasasState *s, MegasasCmd *cmd) { uint8_t data[4096]; struct mfi_config_data *info; int num_pd_disks = 0, array_offset, ld_offset; BusChild *kid; if (cmd->iov_size > 4096) { return MFI_STAT_INVALID_PARAMETER; } QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { num_pd_disks++; } info = (struct mfi_config_data *)&data; /* * Array mapping: * - One array per SCSI device * - One logical drive per SCSI device * spanning the entire device */ info->array_count = num_pd_disks; info->array_size = sizeof(struct mfi_array) * num_pd_disks; info->log_drv_count = num_pd_disks; info->log_drv_size = sizeof(struct mfi_ld_config) * num_pd_disks; info->spares_count = 0; info->spares_size = sizeof(struct mfi_spare); info->size = sizeof(struct mfi_config_data) + info->array_size + info->log_drv_size; if (info->size > 4096) { return MFI_STAT_INVALID_PARAMETER; } array_offset = sizeof(struct mfi_config_data); ld_offset = array_offset + sizeof(struct mfi_array) * num_pd_disks; QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { SCSIDevice *sdev = DO_UPCAST(SCSIDevice, qdev, kid->child); BlockConf *conf = &sdev->conf; uint16_t sdev_id = ((sdev->id & 0xFF) >> 8) | (sdev->lun & 0xFF); struct mfi_array *array; struct mfi_ld_config *ld; uint64_t pd_size; int i; array = (struct mfi_array *)(data + array_offset); bdrv_get_geometry(conf->bs, &pd_size); array->size = cpu_to_le64(pd_size); array->num_drives = 1; array->array_ref = cpu_to_le16(sdev_id); array->pd[0].ref.v.device_id = cpu_to_le16(sdev_id); array->pd[0].ref.v.seq_num = 0; array->pd[0].fw_state = MFI_PD_STATE_ONLINE; array->pd[0].encl.pd = 0xFF; array->pd[0].encl.slot = (sdev->id & 0xFF); for (i = 1; i < MFI_MAX_ROW_SIZE; i++) { array->pd[i].ref.v.device_id = 0xFFFF; array->pd[i].ref.v.seq_num = 0; array->pd[i].fw_state = MFI_PD_STATE_UNCONFIGURED_GOOD; array->pd[i].encl.pd = 0xFF; array->pd[i].encl.slot = 0xFF; } array_offset += sizeof(struct mfi_array); ld = (struct mfi_ld_config *)(data + ld_offset); memset(ld, 0, sizeof(struct mfi_ld_config)); ld->properties.ld.v.target_id = (sdev->id & 0xFF); ld->properties.default_cache_policy = MR_LD_CACHE_READ_AHEAD | MR_LD_CACHE_READ_ADAPTIVE; ld->properties.current_cache_policy = MR_LD_CACHE_READ_AHEAD | MR_LD_CACHE_READ_ADAPTIVE; ld->params.state = MFI_LD_STATE_OPTIMAL; ld->params.stripe_size = 3; ld->params.num_drives = 1; ld->params.span_depth = 1; ld->params.is_consistent = 1; ld->span[0].start_block = 0; ld->span[0].num_blocks = cpu_to_le64(pd_size); ld->span[0].array_ref = cpu_to_le16(sdev_id); ld_offset += sizeof(struct mfi_ld_config); } cmd->iov_size -= dma_buf_read((uint8_t *)data, info->size, &cmd->qsg); return MFI_STAT_OK; }"} {"target": 0, "idx": 16582, "func": "timer_read(void *opaque, target_phys_addr_t addr, unsigned int size) { struct etrax_timer *t = opaque; uint32_t r = 0; switch (addr) { case R_TMR0_DATA: r = ptimer_get_count(t->ptimer_t0); break; case R_TMR1_DATA: r = ptimer_get_count(t->ptimer_t1); break; case R_TIME: r = qemu_get_clock_ns(vm_clock) / 10; break; case RW_INTR_MASK: r = t->rw_intr_mask; break; case R_MASKED_INTR: r = t->r_intr & t->rw_intr_mask; break; default: D(printf (\"%s %x\\n\", __func__, addr)); break; } return r; }"} {"target": 0, "idx": 16584, "func": "static void virt_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); mc->desc = \"ARM Virtual Machine\", mc->init = machvirt_init; mc->max_cpus = 8; mc->has_dynamic_sysbus = true; mc->block_default_type = IF_VIRTIO; mc->no_cdrom = 1; }"} {"target": 0, "idx": 16596, "func": "static GArray *nvdimm_build_device_structure(void) { GSList *device_list = nvdimm_get_plugged_device_list(); GArray *structures = g_array_new(false, true /* clear */, 1); for (; device_list; device_list = device_list->next) { DeviceState *dev = device_list->data; /* build System Physical Address Range Structure. */ nvdimm_build_structure_spa(structures, dev); /* * build Memory Device to System Physical Address Range Mapping * Structure. */ nvdimm_build_structure_memdev(structures, dev); /* build NVDIMM Control Region Structure. */ nvdimm_build_structure_dcr(structures, dev); } g_slist_free(device_list); return structures; }"} {"target": 0, "idx": 16597, "func": "static void pflash_timer (void *opaque) { pflash_t *pfl = opaque; DPRINTF(\"%s: command %02x done\\n\", __func__, pfl->cmd); /* Reset flash */ pfl->status ^= 0x80; if (pfl->bypass) { pfl->wcycle = 2; } else { memory_region_rom_device_set_readable(&pfl->mem, true); pfl->wcycle = 0; } pfl->cmd = 0; }"} {"target": 0, "idx": 16624, "func": "void helper_evaluate_flags_mcp(void) { uint32_t src; uint32_t dst; uint32_t res; uint32_t flags = 0; src = env->cc_src; dst = env->cc_dest; res = env->cc_result; if ((res & 0x80000000L) != 0L) { flags |= N_FLAG; if (((src & 0x80000000L) == 0L) && ((dst & 0x80000000L) == 0L)) { flags |= V_FLAG; } else if (((src & 0x80000000L) != 0L) && ((dst & 0x80000000L) != 0L)) { flags |= R_FLAG; } } else { if (res == 0L) flags |= Z_FLAG; if (((src & 0x80000000L) != 0L) && ((dst & 0x80000000L) != 0L)) flags |= V_FLAG; if ((dst & 0x80000000L) != 0L || (src & 0x80000000L) != 0L) flags |= R_FLAG; } evaluate_flags_writeback(flags); }"} {"target": 0, "idx": 16633, "func": "static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, TCGv_i64 inaddr, int size, int is_pair) { /* if (env->exclusive_addr == addr && env->exclusive_val == [addr] * && (!is_pair || env->exclusive_high == [addr + datasize])) { * [addr] = {Rt}; * if (is_pair) { * [addr + datasize] = {Rt2}; * } * {Rd} = 0; * } else { * {Rd} = 1; * } * env->exclusive_addr = -1; */ int fail_label = gen_new_label(); int done_label = gen_new_label(); TCGv_i64 addr = tcg_temp_local_new_i64(); TCGv_i64 tmp; /* Copy input into a local temp so it is not trashed when the * basic block ends at the branch insn. */ tcg_gen_mov_i64(addr, inaddr); tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label); tmp = tcg_temp_new_i64(); tcg_gen_qemu_ld_i64(tmp, addr, get_mem_index(s), MO_TE + size); tcg_gen_brcond_i64(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label); tcg_temp_free_i64(tmp); if (is_pair) { TCGv_i64 addrhi = tcg_temp_new_i64(); TCGv_i64 tmphi = tcg_temp_new_i64(); tcg_gen_addi_i64(addrhi, addr, 1 << size); tcg_gen_qemu_ld_i64(tmphi, addrhi, get_mem_index(s), MO_TE + size); tcg_gen_brcond_i64(TCG_COND_NE, tmphi, cpu_exclusive_high, fail_label); tcg_temp_free_i64(tmphi); tcg_temp_free_i64(addrhi); } /* We seem to still have the exclusive monitor, so do the store */ tcg_gen_qemu_st_i64(cpu_reg(s, rt), addr, get_mem_index(s), MO_TE + size); if (is_pair) { TCGv_i64 addrhi = tcg_temp_new_i64(); tcg_gen_addi_i64(addrhi, addr, 1 << size); tcg_gen_qemu_st_i64(cpu_reg(s, rt2), addrhi, get_mem_index(s), MO_TE + size); tcg_temp_free_i64(addrhi); } tcg_temp_free_i64(addr); tcg_gen_movi_i64(cpu_reg(s, rd), 0); tcg_gen_br(done_label); gen_set_label(fail_label); tcg_gen_movi_i64(cpu_reg(s, rd), 1); gen_set_label(done_label); tcg_gen_movi_i64(cpu_exclusive_addr, -1); }"} {"target": 0, "idx": 16648, "func": "QError *qerror_new(void) { QError *qerr; qerr = g_malloc0(sizeof(*qerr)); QOBJECT_INIT(qerr, &qerror_type); return qerr; }"} {"target": 1, "idx": 16659, "func": "static void tgq_decode_mb(TgqContext *s, int mb_y, int mb_x, const uint8_t **bs, const uint8_t *buf_end){ int mode; int i; int8_t dc[6]; mode = bytestream_get_byte(bs); if (mode>buf_end-*bs) { av_log(s->avctx, AV_LOG_ERROR, \"truncated macroblock\\n\"); return; } if (mode>12) { GetBitContext gb; init_get_bits(&gb, *bs, mode*8); for(i=0; i<6; i++) tgq_decode_block(s, s->block[i], &gb); tgq_idct_put_mb(s, s->block, mb_x, mb_y); }else{ if (mode==3) { memset(dc, (*bs)[0], 4); dc[4] = (*bs)[1]; dc[5] = (*bs)[2]; }else if (mode==6) { memcpy(dc, *bs, 6); }else if (mode==12) { for(i=0; i<6; i++) dc[i] = (*bs)[i*2]; }else{ av_log(s->avctx, AV_LOG_ERROR, \"unsupported mb mode %i\\n\", mode); } tgq_idct_put_mb_dconly(s, mb_x, mb_y, dc); } *bs += mode; }"} {"target": 1, "idx": 16660, "func": "void ff_prores_idct(DCTELEM *block, const int16_t *qmat) { int i; for (i = 0; i < 64; i++) block[i] *= qmat[i]; for (i = 0; i < 8; i++) idctRowCondDC_10(block + i*8); for (i = 0; i < 64; i++) block[i] >>= 2; for (i = 0; i < 8; i++) idctSparseCol_10(block + i); }"} {"target": 1, "idx": 16685, "func": "int bdrv_write(BlockDriverState *bs, int64_t sector_num, const uint8_t *buf, int nb_sectors) { BlockDriver *drv = bs->drv; if (!bs->drv) return -ENOMEDIUM; if (bs->read_only) return -EACCES; if (drv->bdrv_pwrite) { int ret, len, count = 0; len = nb_sectors * 512; do { ret = drv->bdrv_pwrite(bs, sector_num * 512, buf, len - count); if (ret < 0) { printf(\"bdrv_write ret=%d\\n\", ret); return ret; } count += ret; buf += ret; } while (count != len); bs->wr_bytes += (unsigned) len; bs->wr_ops ++; return 0; } return drv->bdrv_write(bs, sector_num, buf, nb_sectors); }"} {"target": 1, "idx": 16698, "func": "static void pnv_chip_power8nvl_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PnvChipClass *k = PNV_CHIP_CLASS(klass); k->cpu_model = \"POWER8NVL\"; k->chip_type = PNV_CHIP_POWER8NVL; k->chip_cfam_id = 0x120d304980000000ull; /* P8 Naples DD1.0 */ k->cores_mask = POWER8_CORE_MASK; k->core_pir = pnv_chip_core_pir_p8; dc->desc = \"PowerNV Chip POWER8NVL\"; }"} {"target": 1, "idx": 16704, "func": "static inline void idct_col (int16_t * col, int offset) { #define T1 13036 #define T2 27146 #define T3 43790 #define C4 23170 static const short _T1[] ATTR_ALIGN(8) = {T1,T1,T1,T1}; static const short _T2[] ATTR_ALIGN(8) = {T2,T2,T2,T2}; static const short _T3[] ATTR_ALIGN(8) = {T3,T3,T3,T3}; static const short _C4[] ATTR_ALIGN(8) = {C4,C4,C4,C4}; /* column code adapted from Peter Gubanov */ /* http://www.elecard.com/peter/idct.shtml */ movq_m2r (*_T1, mm0); // mm0 = T1 movq_m2r (*(col+offset+1*8), mm1); // mm1 = x1 movq_r2r (mm0, mm2); // mm2 = T1 movq_m2r (*(col+offset+7*8), mm4); // mm4 = x7 pmulhw_r2r (mm1, mm0); // mm0 = T1*x1 movq_m2r (*_T3, mm5); // mm5 = T3 pmulhw_r2r (mm4, mm2); // mm2 = T1*x7 movq_m2r (*(col+offset+5*8), mm6); // mm6 = x5 movq_r2r (mm5, mm7); // mm7 = T3-1 movq_m2r (*(col+offset+3*8), mm3); // mm3 = x3 psubsw_r2r (mm4, mm0); // mm0 = v17 movq_m2r (*_T2, mm4); // mm4 = T2 pmulhw_r2r (mm3, mm5); // mm5 = (T3-1)*x3 paddsw_r2r (mm2, mm1); // mm1 = u17 pmulhw_r2r (mm6, mm7); // mm7 = (T3-1)*x5 /* slot */ movq_r2r (mm4, mm2); // mm2 = T2 paddsw_r2r (mm3, mm5); // mm5 = T3*x3 pmulhw_m2r (*(col+offset+2*8), mm4);// mm4 = T2*x2 paddsw_r2r (mm6, mm7); // mm7 = T3*x5 psubsw_r2r (mm6, mm5); // mm5 = v35 paddsw_r2r (mm3, mm7); // mm7 = u35 movq_m2r (*(col+offset+6*8), mm3); // mm3 = x6 movq_r2r (mm0, mm6); // mm6 = v17 pmulhw_r2r (mm3, mm2); // mm2 = T2*x6 psubsw_r2r (mm5, mm0); // mm0 = b3 psubsw_r2r (mm3, mm4); // mm4 = v26 paddsw_r2r (mm6, mm5); // mm5 = v12 movq_r2m (mm0, *(col+offset+3*8)); // save b3 in scratch0 movq_r2r (mm1, mm6); // mm6 = u17 paddsw_m2r (*(col+offset+2*8), mm2);// mm2 = u26 paddsw_r2r (mm7, mm6); // mm6 = b0 psubsw_r2r (mm7, mm1); // mm1 = u12 movq_r2r (mm1, mm7); // mm7 = u12 movq_m2r (*(col+offset+0*8), mm3); // mm3 = x0 paddsw_r2r (mm5, mm1); // mm1 = u12+v12 movq_m2r (*_C4, mm0); // mm0 = C4/2 psubsw_r2r (mm5, mm7); // mm7 = u12-v12 movq_r2m (mm6, *(col+offset+5*8)); // save b0 in scratch1 pmulhw_r2r (mm0, mm1); // mm1 = b1/2 movq_r2r (mm4, mm6); // mm6 = v26 pmulhw_r2r (mm0, mm7); // mm7 = b2/2 movq_m2r (*(col+offset+4*8), mm5); // mm5 = x4 movq_r2r (mm3, mm0); // mm0 = x0 psubsw_r2r (mm5, mm3); // mm3 = v04 paddsw_r2r (mm5, mm0); // mm0 = u04 paddsw_r2r (mm3, mm4); // mm4 = a1 movq_r2r (mm0, mm5); // mm5 = u04 psubsw_r2r (mm6, mm3); // mm3 = a2 paddsw_r2r (mm2, mm5); // mm5 = a0 paddsw_r2r (mm1, mm1); // mm1 = b1 psubsw_r2r (mm2, mm0); // mm0 = a3 paddsw_r2r (mm7, mm7); // mm7 = b2 movq_r2r (mm3, mm2); // mm2 = a2 movq_r2r (mm4, mm6); // mm6 = a1 paddsw_r2r (mm7, mm3); // mm3 = a2+b2 psraw_i2r (COL_SHIFT, mm3); // mm3 = y2 paddsw_r2r (mm1, mm4); // mm4 = a1+b1 psraw_i2r (COL_SHIFT, mm4); // mm4 = y1 psubsw_r2r (mm1, mm6); // mm6 = a1-b1 movq_m2r (*(col+offset+5*8), mm1); // mm1 = b0 psubsw_r2r (mm7, mm2); // mm2 = a2-b2 psraw_i2r (COL_SHIFT, mm6); // mm6 = y6 movq_r2r (mm5, mm7); // mm7 = a0 movq_r2m (mm4, *(col+offset+1*8)); // save y1 psraw_i2r (COL_SHIFT, mm2); // mm2 = y5 movq_r2m (mm3, *(col+offset+2*8)); // save y2 paddsw_r2r (mm1, mm5); // mm5 = a0+b0 movq_m2r (*(col+offset+3*8), mm4); // mm4 = b3 psubsw_r2r (mm1, mm7); // mm7 = a0-b0 psraw_i2r (COL_SHIFT, mm5); // mm5 = y0 movq_r2r (mm0, mm3); // mm3 = a3 movq_r2m (mm2, *(col+offset+5*8)); // save y5 psubsw_r2r (mm4, mm3); // mm3 = a3-b3 psraw_i2r (COL_SHIFT, mm7); // mm7 = y7 paddsw_r2r (mm0, mm4); // mm4 = a3+b3 movq_r2m (mm5, *(col+offset+0*8)); // save y0 psraw_i2r (COL_SHIFT, mm3); // mm3 = y4 movq_r2m (mm6, *(col+offset+6*8)); // save y6 psraw_i2r (COL_SHIFT, mm4); // mm4 = y3 movq_r2m (mm7, *(col+offset+7*8)); // save y7 movq_r2m (mm3, *(col+offset+4*8)); // save y4 movq_r2m (mm4, *(col+offset+3*8)); // save y3 #undef T1 #undef T2 #undef T3 #undef C4 }"} {"target": 1, "idx": 16709, "func": "static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count) { int16_t weight[12][64]; int16_t orig[12][64]; const int mb_x = s->mb_x; const int mb_y = s->mb_y; int i; int skip_dct[12]; int dct_offset = s->linesize * 8; // default for progressive frames int uv_dct_offset = s->uvlinesize * 8; uint8_t *ptr_y, *ptr_cb, *ptr_cr; int wrap_y, wrap_c; for (i = 0; i < mb_block_count; i++) skip_dct[i] = s->skipdct; if (s->adaptive_quant) { const int last_qp = s->qscale; const int mb_xy = mb_x + mb_y * s->mb_stride; s->lambda = s->lambda_table[mb_xy]; update_qscale(s); if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) { s->qscale = s->current_picture_ptr->qscale_table[mb_xy]; s->dquant = s->qscale - last_qp; if (s->out_format == FMT_H263) { s->dquant = av_clip(s->dquant, -2, 2); if (s->codec_id == AV_CODEC_ID_MPEG4) { if (!s->mb_intra) { if (s->pict_type == AV_PICTURE_TYPE_B) { if (s->dquant & 1 || s->mv_dir & MV_DIRECT) s->dquant = 0; } if (s->mv_type == MV_TYPE_8X8) s->dquant = 0; } } } } ff_set_qscale(s, last_qp + s->dquant); } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD) ff_set_qscale(s, s->qscale + s->dquant); wrap_y = s->linesize; wrap_c = s->uvlinesize; ptr_y = s->new_picture.f.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16; ptr_cb = s->new_picture.f.data[1] + (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width; ptr_cr = s->new_picture.f.data[2] + (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width; if((mb_x*16+16 > s->width || mb_y*16+16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){ uint8_t *ebuf = s->edge_emu_buffer + 32; int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift; int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift; s->vdsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16, mb_y * 16, s->width, s->height); ptr_y = ebuf; s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, mb_block_width, mb_block_height, mb_x * mb_block_width, mb_y * mb_block_height, cw, ch); ptr_cb = ebuf + 18 * wrap_y; s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 16, ptr_cr, wrap_c, mb_block_width, mb_block_height, mb_x * mb_block_width, mb_y * mb_block_height, cw, ch); ptr_cr = ebuf + 18 * wrap_y + 16; } if (s->mb_intra) { if (s->flags & CODEC_FLAG_INTERLACED_DCT) { int progressive_score, interlaced_score; s->interlaced_dct = 0; progressive_score = s->dsp.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) + s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8, NULL, wrap_y, 8) - 400; if (progressive_score > 0) { interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y, NULL, wrap_y * 2, 8) + s->dsp.ildct_cmp[4](s, ptr_y + wrap_y, NULL, wrap_y * 2, 8); if (progressive_score > interlaced_score) { s->interlaced_dct = 1; dct_offset = wrap_y; uv_dct_offset = wrap_c; wrap_y <<= 1; if (s->chroma_format == CHROMA_422 || s->chroma_format == CHROMA_444) wrap_c <<= 1; } } } s->dsp.get_pixels(s->block[0], ptr_y , wrap_y); s->dsp.get_pixels(s->block[1], ptr_y + 8 , wrap_y); s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y); s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y); if (s->flags & CODEC_FLAG_GRAY) { skip_dct[4] = 1; skip_dct[5] = 1; } else { s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c); s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c); if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */ s->dsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c); s->dsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c); } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */ s->dsp.get_pixels(s->block[6], ptr_cb + 8, wrap_c); s->dsp.get_pixels(s->block[7], ptr_cr + 8, wrap_c); s->dsp.get_pixels(s->block[8], ptr_cb + uv_dct_offset, wrap_c); s->dsp.get_pixels(s->block[9], ptr_cr + uv_dct_offset, wrap_c); s->dsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c); s->dsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c); } } } else { op_pixels_func (*op_pix)[4]; qpel_mc_func (*op_qpix)[16]; uint8_t *dest_y, *dest_cb, *dest_cr; dest_y = s->dest[0]; dest_cb = s->dest[1]; dest_cr = s->dest[2]; if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) { op_pix = s->hdsp.put_pixels_tab; op_qpix = s->dsp.put_qpel_pixels_tab; } else { op_pix = s->hdsp.put_no_rnd_pixels_tab; op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab; } if (s->mv_dir & MV_DIR_FORWARD) { ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix); op_pix = s->hdsp.avg_pixels_tab; op_qpix = s->dsp.avg_qpel_pixels_tab; } if (s->mv_dir & MV_DIR_BACKWARD) { ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix); } if (s->flags & CODEC_FLAG_INTERLACED_DCT) { int progressive_score, interlaced_score; s->interlaced_dct = 0; progressive_score = s->dsp.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) + s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8, ptr_y + wrap_y * 8, wrap_y, 8) - 400; if (s->avctx->ildct_cmp == FF_CMP_VSSE) progressive_score -= 400; if (progressive_score > 0) { interlaced_score = s->dsp.ildct_cmp[0](s, dest_y, ptr_y, wrap_y * 2, 8) + s->dsp.ildct_cmp[0](s, dest_y + wrap_y, ptr_y + wrap_y, wrap_y * 2, 8); if (progressive_score > interlaced_score) { s->interlaced_dct = 1; dct_offset = wrap_y; uv_dct_offset = wrap_c; wrap_y <<= 1; if (s->chroma_format == CHROMA_422) wrap_c <<= 1; } } } s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y); s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y); s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset, dest_y + dct_offset, wrap_y); s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8, dest_y + dct_offset + 8, wrap_y); if (s->flags & CODEC_FLAG_GRAY) { skip_dct[4] = 1; skip_dct[5] = 1; } else { s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c); s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c); if (!s->chroma_y_shift) { /* 422 */ s->dsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset, dest_cb + uv_dct_offset, wrap_c); s->dsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset, dest_cr + uv_dct_offset, wrap_c); } } /* pre quantization */ if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] < 2 * s->qscale * s->qscale) { // FIXME optimize if (s->dsp.sad[1](NULL, ptr_y , dest_y, wrap_y, 8) < 20 * s->qscale) skip_dct[0] = 1; if (s->dsp.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale) skip_dct[1] = 1; if (s->dsp.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale) skip_dct[2] = 1; if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8, wrap_y, 8) < 20 * s->qscale) skip_dct[3] = 1; if (s->dsp.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale) skip_dct[4] = 1; if (s->dsp.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale) skip_dct[5] = 1; if (!s->chroma_y_shift) { /* 422 */ if (s->dsp.sad[1](NULL, ptr_cb + uv_dct_offset, dest_cb + uv_dct_offset, wrap_c, 8) < 20 * s->qscale) skip_dct[6] = 1; if (s->dsp.sad[1](NULL, ptr_cr + uv_dct_offset, dest_cr + uv_dct_offset, wrap_c, 8) < 20 * s->qscale) skip_dct[7] = 1; } } } if (s->quantizer_noise_shaping) { if (!skip_dct[0]) get_visual_weight(weight[0], ptr_y , wrap_y); if (!skip_dct[1]) get_visual_weight(weight[1], ptr_y + 8, wrap_y); if (!skip_dct[2]) get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y); if (!skip_dct[3]) get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y); if (!skip_dct[4]) get_visual_weight(weight[4], ptr_cb , wrap_c); if (!skip_dct[5]) get_visual_weight(weight[5], ptr_cr , wrap_c); if (!s->chroma_y_shift) { /* 422 */ if (!skip_dct[6]) get_visual_weight(weight[6], ptr_cb + uv_dct_offset, wrap_c); if (!skip_dct[7]) get_visual_weight(weight[7], ptr_cr + uv_dct_offset, wrap_c); } memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count); } /* DCT & quantize */ av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8); { for (i = 0; i < mb_block_count; i++) { if (!skip_dct[i]) { int overflow; s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow); // FIXME we could decide to change to quantizer instead of // clipping // JS: I don't think that would be a good idea it could lower // quality instead of improve it. Just INTRADC clipping // deserves changes in quantizer if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]); } else s->block_last_index[i] = -1; } if (s->quantizer_noise_shaping) { for (i = 0; i < mb_block_count; i++) { if (!skip_dct[i]) { s->block_last_index[i] = dct_quantize_refine(s, s->block[i], weight[i], orig[i], i, s->qscale); } } } if (s->luma_elim_threshold && !s->mb_intra) for (i = 0; i < 4; i++) dct_single_coeff_elimination(s, i, s->luma_elim_threshold); if (s->chroma_elim_threshold && !s->mb_intra) for (i = 4; i < mb_block_count; i++) dct_single_coeff_elimination(s, i, s->chroma_elim_threshold); if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) { for (i = 0; i < mb_block_count; i++) { if (s->block_last_index[i] == -1) s->coded_score[i] = INT_MAX / 256; } } } if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) { s->block_last_index[4] = s->block_last_index[5] = 0; s->block[4][0] = s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale; if (!s->chroma_y_shift) { /* 422 / 444 */ for (i=6; i<12; i++) { s->block_last_index[i] = 0; s->block[i][0] = s->block[4][0]; } } } // non c quantize code returns incorrect block_last_index FIXME if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) { for (i = 0; i < mb_block_count; i++) { int j; if (s->block_last_index[i] > 0) { for (j = 63; j > 0; j--) { if (s->block[i][s->intra_scantable.permutated[j]]) break; } s->block_last_index[i] = j; } } } /* huffman encode */ switch(s->codec_id){ //FIXME funct ptr could be slightly faster case AV_CODEC_ID_MPEG1VIDEO: case AV_CODEC_ID_MPEG2VIDEO: if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y); break; case AV_CODEC_ID_MPEG4: if (CONFIG_MPEG4_ENCODER) ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y); break; case AV_CODEC_ID_MSMPEG4V2: case AV_CODEC_ID_MSMPEG4V3: case AV_CODEC_ID_WMV1: if (CONFIG_MSMPEG4_ENCODER) ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y); break; case AV_CODEC_ID_WMV2: if (CONFIG_WMV2_ENCODER) ff_wmv2_encode_mb(s, s->block, motion_x, motion_y); break; case AV_CODEC_ID_H261: if (CONFIG_H261_ENCODER) ff_h261_encode_mb(s, s->block, motion_x, motion_y); break; case AV_CODEC_ID_H263: case AV_CODEC_ID_H263P: case AV_CODEC_ID_FLV1: case AV_CODEC_ID_RV10: case AV_CODEC_ID_RV20: if (CONFIG_H263_ENCODER) ff_h263_encode_mb(s, s->block, motion_x, motion_y); break; case AV_CODEC_ID_MJPEG: case AV_CODEC_ID_AMV: if (CONFIG_MJPEG_ENCODER) ff_mjpeg_encode_mb(s, s->block); break; default: av_assert1(0); } }"} {"target": 1, "idx": 16721, "func": "static void gd_menu_switch_vc(GtkMenuItem *item, void *opaque) { GtkDisplayState *s = opaque; VirtualConsole *vc = gd_vc_find_by_menu(s); GtkNotebook *nb = GTK_NOTEBOOK(s->notebook); gint page; gtk_release_modifiers(s); if (vc) { page = gtk_notebook_page_num(nb, vc->tab_item); gtk_notebook_set_current_page(nb, page); gtk_widget_grab_focus(vc->focus); } }"} {"target": 0, "idx": 16731, "func": "void blockdev_mark_auto_del(BlockDriverState *bs) { BlockBackend *blk = bs->blk; DriveInfo *dinfo = blk_legacy_dinfo(blk); if (dinfo && !dinfo->enable_auto_del) { return; } if (bs->job) { block_job_cancel(bs->job); } if (dinfo) { dinfo->auto_del = 1; } }"} {"target": 0, "idx": 16737, "func": "static void create_gic(VirtBoardInfo *vbi, qemu_irq *pic, bool secure) { /* We create a standalone GIC v2 */ DeviceState *gicdev; SysBusDevice *gicbusdev; const char *gictype; int i; gictype = gic_class_name(); gicdev = qdev_create(NULL, gictype); qdev_prop_set_uint32(gicdev, \"revision\", 2); qdev_prop_set_uint32(gicdev, \"num-cpu\", smp_cpus); /* Note that the num-irq property counts both internal and external * interrupts; there are always 32 of the former (mandated by GIC spec). */ qdev_prop_set_uint32(gicdev, \"num-irq\", NUM_IRQS + 32); if (!kvm_irqchip_in_kernel()) { qdev_prop_set_bit(gicdev, \"has-security-extensions\", secure); } qdev_init_nofail(gicdev); gicbusdev = SYS_BUS_DEVICE(gicdev); sysbus_mmio_map(gicbusdev, 0, vbi->memmap[VIRT_GIC_DIST].base); sysbus_mmio_map(gicbusdev, 1, vbi->memmap[VIRT_GIC_CPU].base); /* Wire the outputs from each CPU's generic timer to the * appropriate GIC PPI inputs, and the GIC's IRQ output to * the CPU's IRQ input. */ for (i = 0; i < smp_cpus; i++) { DeviceState *cpudev = DEVICE(qemu_get_cpu(i)); int ppibase = NUM_IRQS + i * GIC_INTERNAL + GIC_NR_SGIS; int irq; /* Mapping from the output timer irq lines from the CPU to the * GIC PPI inputs we use for the virt board. */ const int timer_irq[] = { [GTIMER_PHYS] = ARCH_TIMER_NS_EL1_IRQ, [GTIMER_VIRT] = ARCH_TIMER_VIRT_IRQ, [GTIMER_HYP] = ARCH_TIMER_NS_EL2_IRQ, [GTIMER_SEC] = ARCH_TIMER_S_EL1_IRQ, }; for (irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) { qdev_connect_gpio_out(cpudev, irq, qdev_get_gpio_in(gicdev, ppibase + timer_irq[irq])); } sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ)); sysbus_connect_irq(gicbusdev, i + smp_cpus, qdev_get_gpio_in(cpudev, ARM_CPU_FIQ)); } for (i = 0; i < NUM_IRQS; i++) { pic[i] = qdev_get_gpio_in(gicdev, i); } fdt_add_gic_node(vbi); create_v2m(vbi, pic); }"} {"target": 0, "idx": 16744, "func": "static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) { int rd, rn, rm; int op; int nregs; int interleave; int spacing; int stride; int size; int reg; int pass; int load; int shift; int n; TCGv_i32 addr; TCGv_i32 tmp; TCGv_i32 tmp2; TCGv_i64 tmp64; /* FIXME: this access check should not take precedence over UNDEF * for invalid encodings; we will generate incorrect syndrome information * for attempts to execute invalid vfp/neon encodings with FP disabled. */ if (!s->cpacr_fpen) { gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, s->thumb), default_exception_el(s)); return 0; } if (!s->vfp_enabled) return 1; VFP_DREG_D(rd, insn); rn = (insn >> 16) & 0xf; rm = insn & 0xf; load = (insn & (1 << 21)) != 0; if ((insn & (1 << 23)) == 0) { /* Load store all elements. */ op = (insn >> 8) & 0xf; size = (insn >> 6) & 3; if (op > 10) return 1; /* Catch UNDEF cases for bad values of align field */ switch (op & 0xc) { case 4: if (((insn >> 5) & 1) == 1) { return 1; } break; case 8: if (((insn >> 4) & 3) == 3) { return 1; } break; default: break; } nregs = neon_ls_element_type[op].nregs; interleave = neon_ls_element_type[op].interleave; spacing = neon_ls_element_type[op].spacing; if (size == 3 && (interleave | spacing) != 1) return 1; addr = tcg_temp_new_i32(); load_reg_var(s, addr, rn); stride = (1 << size) * interleave; for (reg = 0; reg < nregs; reg++) { if (interleave > 2 || (interleave == 2 && nregs == 2)) { load_reg_var(s, addr, rn); tcg_gen_addi_i32(addr, addr, (1 << size) * reg); } else if (interleave == 2 && nregs == 4 && reg == 2) { load_reg_var(s, addr, rn); tcg_gen_addi_i32(addr, addr, 1 << size); } if (size == 3) { tmp64 = tcg_temp_new_i64(); if (load) { gen_aa32_ld64(tmp64, addr, get_mem_index(s)); neon_store_reg64(tmp64, rd); } else { neon_load_reg64(tmp64, rd); gen_aa32_st64(tmp64, addr, get_mem_index(s)); } tcg_temp_free_i64(tmp64); tcg_gen_addi_i32(addr, addr, stride); } else { for (pass = 0; pass < 2; pass++) { if (size == 2) { if (load) { tmp = tcg_temp_new_i32(); gen_aa32_ld32u(tmp, addr, get_mem_index(s)); neon_store_reg(rd, pass, tmp); } else { tmp = neon_load_reg(rd, pass); gen_aa32_st32(tmp, addr, get_mem_index(s)); tcg_temp_free_i32(tmp); } tcg_gen_addi_i32(addr, addr, stride); } else if (size == 1) { if (load) { tmp = tcg_temp_new_i32(); gen_aa32_ld16u(tmp, addr, get_mem_index(s)); tcg_gen_addi_i32(addr, addr, stride); tmp2 = tcg_temp_new_i32(); gen_aa32_ld16u(tmp2, addr, get_mem_index(s)); tcg_gen_addi_i32(addr, addr, stride); tcg_gen_shli_i32(tmp2, tmp2, 16); tcg_gen_or_i32(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); neon_store_reg(rd, pass, tmp); } else { tmp = neon_load_reg(rd, pass); tmp2 = tcg_temp_new_i32(); tcg_gen_shri_i32(tmp2, tmp, 16); gen_aa32_st16(tmp, addr, get_mem_index(s)); tcg_temp_free_i32(tmp); tcg_gen_addi_i32(addr, addr, stride); gen_aa32_st16(tmp2, addr, get_mem_index(s)); tcg_temp_free_i32(tmp2); tcg_gen_addi_i32(addr, addr, stride); } } else /* size == 0 */ { if (load) { TCGV_UNUSED_I32(tmp2); for (n = 0; n < 4; n++) { tmp = tcg_temp_new_i32(); gen_aa32_ld8u(tmp, addr, get_mem_index(s)); tcg_gen_addi_i32(addr, addr, stride); if (n == 0) { tmp2 = tmp; } else { tcg_gen_shli_i32(tmp, tmp, n * 8); tcg_gen_or_i32(tmp2, tmp2, tmp); tcg_temp_free_i32(tmp); } } neon_store_reg(rd, pass, tmp2); } else { tmp2 = neon_load_reg(rd, pass); for (n = 0; n < 4; n++) { tmp = tcg_temp_new_i32(); if (n == 0) { tcg_gen_mov_i32(tmp, tmp2); } else { tcg_gen_shri_i32(tmp, tmp2, n * 8); } gen_aa32_st8(tmp, addr, get_mem_index(s)); tcg_temp_free_i32(tmp); tcg_gen_addi_i32(addr, addr, stride); } tcg_temp_free_i32(tmp2); } } } } rd += spacing; } tcg_temp_free_i32(addr); stride = nregs * 8; } else { size = (insn >> 10) & 3; if (size == 3) { /* Load single element to all lanes. */ int a = (insn >> 4) & 1; if (!load) { return 1; } size = (insn >> 6) & 3; nregs = ((insn >> 8) & 3) + 1; if (size == 3) { if (nregs != 4 || a == 0) { return 1; } /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */ size = 2; } if (nregs == 1 && a == 1 && size == 0) { return 1; } if (nregs == 3 && a == 1) { return 1; } addr = tcg_temp_new_i32(); load_reg_var(s, addr, rn); if (nregs == 1) { /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */ tmp = gen_load_and_replicate(s, addr, size); tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0)); tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1)); if (insn & (1 << 5)) { tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0)); tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1)); } tcg_temp_free_i32(tmp); } else { /* VLD2/3/4 to all lanes: bit 5 indicates register stride */ stride = (insn & (1 << 5)) ? 2 : 1; for (reg = 0; reg < nregs; reg++) { tmp = gen_load_and_replicate(s, addr, size); tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0)); tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1)); tcg_temp_free_i32(tmp); tcg_gen_addi_i32(addr, addr, 1 << size); rd += stride; } } tcg_temp_free_i32(addr); stride = (1 << size) * nregs; } else { /* Single element. */ int idx = (insn >> 4) & 0xf; pass = (insn >> 7) & 1; switch (size) { case 0: shift = ((insn >> 5) & 3) * 8; stride = 1; break; case 1: shift = ((insn >> 6) & 1) * 16; stride = (insn & (1 << 5)) ? 2 : 1; break; case 2: shift = 0; stride = (insn & (1 << 6)) ? 2 : 1; break; default: abort(); } nregs = ((insn >> 8) & 3) + 1; /* Catch the UNDEF cases. This is unavoidably a bit messy. */ switch (nregs) { case 1: if (((idx & (1 << size)) != 0) || (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) { return 1; } break; case 3: if ((idx & 1) != 0) { return 1; } /* fall through */ case 2: if (size == 2 && (idx & 2) != 0) { return 1; } break; case 4: if ((size == 2) && ((idx & 3) == 3)) { return 1; } break; default: abort(); } if ((rd + stride * (nregs - 1)) > 31) { /* Attempts to write off the end of the register file * are UNPREDICTABLE; we choose to UNDEF because otherwise * the neon_load_reg() would write off the end of the array. */ return 1; } addr = tcg_temp_new_i32(); load_reg_var(s, addr, rn); for (reg = 0; reg < nregs; reg++) { if (load) { tmp = tcg_temp_new_i32(); switch (size) { case 0: gen_aa32_ld8u(tmp, addr, get_mem_index(s)); break; case 1: gen_aa32_ld16u(tmp, addr, get_mem_index(s)); break; case 2: gen_aa32_ld32u(tmp, addr, get_mem_index(s)); break; default: /* Avoid compiler warnings. */ abort(); } if (size != 2) { tmp2 = neon_load_reg(rd, pass); tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, size ? 16 : 8); tcg_temp_free_i32(tmp2); } neon_store_reg(rd, pass, tmp); } else { /* Store */ tmp = neon_load_reg(rd, pass); if (shift) tcg_gen_shri_i32(tmp, tmp, shift); switch (size) { case 0: gen_aa32_st8(tmp, addr, get_mem_index(s)); break; case 1: gen_aa32_st16(tmp, addr, get_mem_index(s)); break; case 2: gen_aa32_st32(tmp, addr, get_mem_index(s)); break; } tcg_temp_free_i32(tmp); } rd += stride; tcg_gen_addi_i32(addr, addr, 1 << size); } tcg_temp_free_i32(addr); stride = nregs * (1 << size); } } if (rm != 15) { TCGv_i32 base; base = load_reg(s, rn); if (rm == 13) { tcg_gen_addi_i32(base, base, stride); } else { TCGv_i32 index; index = load_reg(s, rm); tcg_gen_add_i32(base, base, index); tcg_temp_free_i32(index); } store_reg(s, rn, base); } return 0; }"} {"target": 1, "idx": 16753, "func": "e1000_can_receive(NetClientState *nc) { E1000State *s = qemu_get_nic_opaque(nc); return (s->mac_reg[RCTL] & E1000_RCTL_EN) && e1000_has_rxbufs(s, 1); }"} {"target": 1, "idx": 16758, "func": "static void rtce_init(VIOsPAPRDevice *dev) { size_t size = (dev->rtce_window_size >> SPAPR_VIO_TCE_PAGE_SHIFT) * sizeof(VIOsPAPR_RTCE); if (size) { dev->rtce_table = kvmppc_create_spapr_tce(dev->reg, dev->rtce_window_size, &dev->kvmtce_fd); if (!dev->rtce_table) { dev->rtce_table = g_malloc0(size); } } }"} {"target": 1, "idx": 16759, "func": "static void vnc_desktop_resize(VncState *vs) { DisplaySurface *ds = vs->vd->ds; if (vs->csock == -1 || !vnc_has_feature(vs, VNC_FEATURE_RESIZE)) { return; } if (vs->client_width == surface_width(ds) && vs->client_height == surface_height(ds)) { return; } vs->client_width = surface_width(ds); vs->client_height = surface_height(ds); vnc_lock_output(vs); vnc_write_u8(vs, VNC_MSG_SERVER_FRAMEBUFFER_UPDATE); vnc_write_u8(vs, 0); vnc_write_u16(vs, 1); /* number of rects */ vnc_framebuffer_update(vs, 0, 0, vs->client_width, vs->client_height, VNC_ENCODING_DESKTOPRESIZE); vnc_unlock_output(vs); vnc_flush(vs); }"} {"target": 1, "idx": 16765, "func": "static void rtcp_send_sr(AVFormatContext *s1, int64_t ntp_time, int bye) { RTPMuxContext *s = s1->priv_data; uint32_t rtp_ts; av_log(s1, AV_LOG_TRACE, \"RTCP: %02x %\"PRIx64\" %x\\n\", s->payload_type, ntp_time, s->timestamp); s->last_rtcp_ntp_time = ntp_time; rtp_ts = av_rescale_q(ntp_time - s->first_rtcp_ntp_time, (AVRational){1, 1000000}, s1->streams[0]->time_base) + s->base_timestamp; avio_w8(s1->pb, RTP_VERSION << 6); avio_w8(s1->pb, RTCP_SR); avio_wb16(s1->pb, 6); /* length in words - 1 */ avio_wb32(s1->pb, s->ssrc); avio_wb64(s1->pb, NTP_TO_RTP_FORMAT(ntp_time)); avio_wb32(s1->pb, rtp_ts); avio_wb32(s1->pb, s->packet_count); avio_wb32(s1->pb, s->octet_count); if (s->cname) { int len = FFMIN(strlen(s->cname), 255); avio_w8(s1->pb, (RTP_VERSION << 6) + 1); avio_w8(s1->pb, RTCP_SDES); avio_wb16(s1->pb, (7 + len + 3) / 4); /* length in words - 1 */ avio_wb32(s1->pb, s->ssrc); avio_w8(s1->pb, 0x01); /* CNAME */ avio_w8(s1->pb, len); avio_write(s1->pb, s->cname, len); avio_w8(s1->pb, 0); /* END */ for (len = (7 + len) % 4; len % 4; len++) avio_w8(s1->pb, 0); } if (bye) { avio_w8(s1->pb, (RTP_VERSION << 6) | 1); avio_w8(s1->pb, RTCP_BYE); avio_wb16(s1->pb, 1); /* length in words - 1 */ avio_wb32(s1->pb, s->ssrc); } avio_flush(s1->pb); }"} {"target": 0, "idx": 16776, "func": "void stl_be_phys(target_phys_addr_t addr, uint32_t val) { stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN); }"} {"target": 0, "idx": 16777, "func": "static uint8_t *l2cap_bframe_out(struct bt_l2cap_conn_params_s *parm, int len) { struct l2cap_chan_s *chan = (struct l2cap_chan_s *) parm; if (len > chan->params.remote_mtu) { fprintf(stderr, \"%s: B-Frame for CID %04x longer than %i octets.\\n\", __func__, chan->remote_cid, chan->params.remote_mtu); exit(-1); } return l2cap_pdu_out(chan->l2cap, chan->remote_cid, len); }"} {"target": 0, "idx": 16804, "func": "static int do_open_tray(const char *device, bool force, Error **errp) { BlockBackend *blk; bool locked; blk = blk_by_name(device); if (!blk) { error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, \"Device '%s' not found\", device); return -ENODEV; } if (!blk_dev_has_removable_media(blk)) { error_setg(errp, \"Device '%s' is not removable\", device); return -ENOTSUP; } if (!blk_dev_has_tray(blk)) { /* Ignore this command on tray-less devices */ return ENOSYS; } if (blk_dev_is_tray_open(blk)) { return 0; } locked = blk_dev_is_medium_locked(blk); if (locked) { blk_dev_eject_request(blk, force); } if (!locked || force) { blk_dev_change_media_cb(blk, false); } if (locked && !force) { return EINPROGRESS; } return 0; }"} {"target": 0, "idx": 16816, "func": "static int read_config(BDRVBlkdebugState *s, const char *filename, QDict *options, Error **errp) { FILE *f = NULL; int ret; struct add_rule_data d; Error *local_err = NULL; if (filename) { f = fopen(filename, \"r\"); if (f == NULL) { error_setg_errno(errp, errno, \"Could not read blkdebug config file\"); return -errno; } ret = qemu_config_parse(f, config_groups, filename); if (ret < 0) { error_setg(errp, \"Could not parse blkdebug config file\"); ret = -EINVAL; goto fail; } } qemu_config_parse_qdict(options, config_groups, &local_err); if (local_err) { error_propagate(errp, local_err); ret = -EINVAL; goto fail; } d.s = s; d.action = ACTION_INJECT_ERROR; qemu_opts_foreach(&inject_error_opts, add_rule, &d, &local_err); if (local_err) { error_propagate(errp, local_err); ret = -EINVAL; goto fail; } d.action = ACTION_SET_STATE; qemu_opts_foreach(&set_state_opts, add_rule, &d, &local_err); if (local_err) { error_propagate(errp, local_err); ret = -EINVAL; goto fail; } ret = 0; fail: qemu_opts_reset(&inject_error_opts); qemu_opts_reset(&set_state_opts); if (f) { fclose(f); } return ret; }"} {"target": 0, "idx": 16817, "func": "static void kvm_log_start(MemoryListener *listener, MemoryRegionSection *section) { int r; r = kvm_dirty_pages_log_change(section->offset_within_address_space, int128_get64(section->size), true); if (r < 0) { abort(); } }"} {"target": 0, "idx": 16821, "func": "int tcp_socket_outgoing_spec(const char *address_and_port) { return inet_connect(address_and_port, true, NULL); }"} {"target": 1, "idx": 16850, "func": "static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc) { switch (blkdev->protocol) { case BLKIF_PROTOCOL_NATIVE: memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc), sizeof(ioreq->req)); break; case BLKIF_PROTOCOL_X86_32: blkif_get_x86_32_req(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc)); break; case BLKIF_PROTOCOL_X86_64: blkif_get_x86_64_req(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc)); break; } return 0; }"} {"target": 0, "idx": 16856, "func": "static int msrle_decode_pal4(AVCodecContext *avctx, AVPicture *pic, GetByteContext *gb) { unsigned char rle_code; unsigned char extra_byte, odd_pixel; unsigned char stream_byte; unsigned int pixel_ptr = 0; int row_dec = pic->linesize[0]; int row_ptr = (avctx->height - 1) * row_dec; int frame_size = row_dec * avctx->height; int i; while (row_ptr >= 0) { if (bytestream2_get_bytes_left(gb) <= 0) { av_log(avctx, AV_LOG_ERROR, \"MS RLE: bytestream overrun, %d rows left\\n\", row_ptr); return AVERROR_INVALIDDATA; } rle_code = stream_byte = bytestream2_get_byteu(gb); if (rle_code == 0) { /* fetch the next byte to see how to handle escape code */ stream_byte = bytestream2_get_byte(gb); if (stream_byte == 0) { /* line is done, goto the next one */ row_ptr -= row_dec; pixel_ptr = 0; } else if (stream_byte == 1) { /* decode is done */ return 0; } else if (stream_byte == 2) { /* reposition frame decode coordinates */ stream_byte = bytestream2_get_byte(gb); pixel_ptr += stream_byte; stream_byte = bytestream2_get_byte(gb); row_ptr -= stream_byte * row_dec; } else { // copy pixels from encoded stream odd_pixel = stream_byte & 1; rle_code = (stream_byte + 1) / 2; extra_byte = rle_code & 0x01; if (row_ptr + pixel_ptr + stream_byte > frame_size || bytestream2_get_bytes_left(gb) < rle_code) { av_log(avctx, AV_LOG_ERROR, \"MS RLE: frame/stream ptr just went out of bounds (copy)\\n\"); return AVERROR_INVALIDDATA; } for (i = 0; i < rle_code; i++) { if (pixel_ptr >= avctx->width) break; stream_byte = bytestream2_get_byteu(gb); pic->data[0][row_ptr + pixel_ptr] = stream_byte >> 4; pixel_ptr++; if (i + 1 == rle_code && odd_pixel) break; if (pixel_ptr >= avctx->width) break; pic->data[0][row_ptr + pixel_ptr] = stream_byte & 0x0F; pixel_ptr++; } // if the RLE code is odd, skip a byte in the stream if (extra_byte) bytestream2_skip(gb, 1); } } else { // decode a run of data if (row_ptr + pixel_ptr + stream_byte > frame_size) { av_log(avctx, AV_LOG_ERROR, \"MS RLE: frame ptr just went out of bounds (run)\\n\"); return AVERROR_INVALIDDATA; } stream_byte = bytestream2_get_byte(gb); for (i = 0; i < rle_code; i++) { if (pixel_ptr >= avctx->width) break; if ((i & 1) == 0) pic->data[0][row_ptr + pixel_ptr] = stream_byte >> 4; else pic->data[0][row_ptr + pixel_ptr] = stream_byte & 0x0F; pixel_ptr++; } } } /* one last sanity check on the way out */ if (bytestream2_get_bytes_left(gb)) { av_log(avctx, AV_LOG_ERROR, \"MS RLE: ended frame decode with %d bytes left over\\n\", bytestream2_get_bytes_left(gb)); return AVERROR_INVALIDDATA; } return 0; }"} {"target": 0, "idx": 16868, "func": "void blk_remove_bs(BlockBackend *blk) { assert(blk->root->bs->blk == blk); notifier_list_notify(&blk->remove_bs_notifiers, blk); if (blk->public.throttle_state) { throttle_timers_detach_aio_context(&blk->public.throttle_timers); } blk_update_root_state(blk); blk->root->bs->blk = NULL; bdrv_root_unref_child(blk->root); blk->root = NULL; }"} {"target": 0, "idx": 16891, "func": "static int vnc_display_get_addresses(QemuOpts *opts, bool reverse, SocketAddressLegacy ***retsaddr, size_t *retnsaddr, SocketAddressLegacy ***retwsaddr, size_t *retnwsaddr, Error **errp) { SocketAddressLegacy *saddr = NULL; SocketAddressLegacy *wsaddr = NULL; QemuOptsIter addriter; const char *addr; int to = qemu_opt_get_number(opts, \"to\", 0); bool has_ipv4 = qemu_opt_get(opts, \"ipv4\"); bool has_ipv6 = qemu_opt_get(opts, \"ipv6\"); bool ipv4 = qemu_opt_get_bool(opts, \"ipv4\", false); bool ipv6 = qemu_opt_get_bool(opts, \"ipv6\", false); size_t i; int displaynum = -1; int ret = -1; *retsaddr = NULL; *retnsaddr = 0; *retwsaddr = NULL; *retnwsaddr = 0; addr = qemu_opt_get(opts, \"vnc\"); if (addr == NULL || g_str_equal(addr, \"none\")) { ret = 0; goto cleanup; } if (qemu_opt_get(opts, \"websocket\") && !qcrypto_hash_supports(QCRYPTO_HASH_ALG_SHA1)) { error_setg(errp, \"SHA1 hash support is required for websockets\"); goto cleanup; } qemu_opt_iter_init(&addriter, opts, \"vnc\"); while ((addr = qemu_opt_iter_next(&addriter)) != NULL) { int rv; rv = vnc_display_get_address(addr, false, reverse, 0, to, has_ipv4, has_ipv6, ipv4, ipv6, &saddr, errp); if (rv < 0) { goto cleanup; } /* Historical compat - first listen address can be used * to set the default websocket port */ if (displaynum == -1) { displaynum = rv; } *retsaddr = g_renew(SocketAddressLegacy *, *retsaddr, *retnsaddr + 1); (*retsaddr)[(*retnsaddr)++] = saddr; } /* If we had multiple primary displays, we don't do defaults * for websocket, and require explicit config instead. */ if (*retnsaddr > 1) { displaynum = -1; } qemu_opt_iter_init(&addriter, opts, \"websocket\"); while ((addr = qemu_opt_iter_next(&addriter)) != NULL) { if (vnc_display_get_address(addr, true, reverse, displaynum, to, has_ipv4, has_ipv6, ipv4, ipv6, &wsaddr, errp) < 0) { goto cleanup; } /* Historical compat - if only a single listen address was * provided, then this is used to set the default listen * address for websocket too */ if (*retnsaddr == 1 && (*retsaddr)[0]->type == SOCKET_ADDRESS_LEGACY_KIND_INET && wsaddr->type == SOCKET_ADDRESS_LEGACY_KIND_INET && g_str_equal(wsaddr->u.inet.data->host, \"\") && !g_str_equal((*retsaddr)[0]->u.inet.data->host, \"\")) { g_free(wsaddr->u.inet.data->host); wsaddr->u.inet.data->host = g_strdup((*retsaddr)[0]->u.inet.data->host); } *retwsaddr = g_renew(SocketAddressLegacy *, *retwsaddr, *retnwsaddr + 1); (*retwsaddr)[(*retnwsaddr)++] = wsaddr; } ret = 0; cleanup: if (ret < 0) { for (i = 0; i < *retnsaddr; i++) { qapi_free_SocketAddressLegacy((*retsaddr)[i]); } g_free(*retsaddr); for (i = 0; i < *retnwsaddr; i++) { qapi_free_SocketAddressLegacy((*retwsaddr)[i]); } g_free(*retwsaddr); *retsaddr = *retwsaddr = NULL; *retnsaddr = *retnwsaddr = 0; } return ret; }"} {"target": 0, "idx": 16894, "func": "static void gen_logicq_cc(TCGv val) { TCGv tmp = new_tmp(); gen_helper_logicq_cc(tmp, val); gen_logic_CC(tmp); dead_tmp(tmp); }"} {"target": 0, "idx": 16895, "func": "static int64_t nfs_client_open(NFSClient *client, QDict *options, int flags, int open_flags, Error **errp) { int ret = -EINVAL; QemuOpts *opts = NULL; Error *local_err = NULL; struct stat st; char *file = NULL, *strp = NULL; opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); qemu_opts_absorb_qdict(opts, options, &local_err); if (local_err) { error_propagate(errp, local_err); ret = -EINVAL; goto fail; } client->path = g_strdup(qemu_opt_get(opts, \"path\")); if (!client->path) { ret = -EINVAL; error_setg(errp, \"No path was specified\"); goto fail; } strp = strrchr(client->path, '/'); if (strp == NULL) { error_setg(errp, \"Invalid URL specified\"); goto fail; } file = g_strdup(strp); *strp = 0; /* Pop the config into our state object, Exit if invalid */ client->server = nfs_config(options, errp); if (!client->server) { ret = -EINVAL; goto fail; } client->context = nfs_init_context(); if (client->context == NULL) { error_setg(errp, \"Failed to init NFS context\"); goto fail; } if (qemu_opt_get(opts, \"user\")) { client->uid = qemu_opt_get_number(opts, \"user\", 0); nfs_set_uid(client->context, client->uid); } if (qemu_opt_get(opts, \"group\")) { client->gid = qemu_opt_get_number(opts, \"group\", 0); nfs_set_gid(client->context, client->gid); } if (qemu_opt_get(opts, \"tcp-syn-count\")) { client->tcp_syncnt = qemu_opt_get_number(opts, \"tcp-syn-count\", 0); nfs_set_tcp_syncnt(client->context, client->tcp_syncnt); } #ifdef LIBNFS_FEATURE_READAHEAD if (qemu_opt_get(opts, \"readahead-size\")) { if (open_flags & BDRV_O_NOCACHE) { error_setg(errp, \"Cannot enable NFS readahead \" \"if cache.direct = on\"); goto fail; } client->readahead = qemu_opt_get_number(opts, \"readahead-size\", 0); if (client->readahead > QEMU_NFS_MAX_READAHEAD_SIZE) { error_report(\"NFS Warning: Truncating NFS readahead \" \"size to %d\", QEMU_NFS_MAX_READAHEAD_SIZE); client->readahead = QEMU_NFS_MAX_READAHEAD_SIZE; } nfs_set_readahead(client->context, client->readahead); #ifdef LIBNFS_FEATURE_PAGECACHE nfs_set_pagecache_ttl(client->context, 0); #endif client->cache_used = true; } #endif #ifdef LIBNFS_FEATURE_PAGECACHE if (qemu_opt_get(opts, \"page-cache-size\")) { if (open_flags & BDRV_O_NOCACHE) { error_setg(errp, \"Cannot enable NFS pagecache \" \"if cache.direct = on\"); goto fail; } client->pagecache = qemu_opt_get_number(opts, \"page-cache-size\", 0); if (client->pagecache > QEMU_NFS_MAX_PAGECACHE_SIZE) { error_report(\"NFS Warning: Truncating NFS pagecache \" \"size to %d pages\", QEMU_NFS_MAX_PAGECACHE_SIZE); client->pagecache = QEMU_NFS_MAX_PAGECACHE_SIZE; } nfs_set_pagecache(client->context, client->pagecache); nfs_set_pagecache_ttl(client->context, 0); client->cache_used = true; } #endif #ifdef LIBNFS_FEATURE_DEBUG if (qemu_opt_get(opts, \"debug\")) { client->debug = qemu_opt_get_number(opts, \"debug\", 0); /* limit the maximum debug level to avoid potential flooding * of our log files. */ if (client->debug > QEMU_NFS_MAX_DEBUG_LEVEL) { error_report(\"NFS Warning: Limiting NFS debug level \" \"to %d\", QEMU_NFS_MAX_DEBUG_LEVEL); client->debug = QEMU_NFS_MAX_DEBUG_LEVEL; } nfs_set_debug(client->context, client->debug); } #endif ret = nfs_mount(client->context, client->server->host, client->path); if (ret < 0) { error_setg(errp, \"Failed to mount nfs share: %s\", nfs_get_error(client->context)); goto fail; } if (flags & O_CREAT) { ret = nfs_creat(client->context, file, 0600, &client->fh); if (ret < 0) { error_setg(errp, \"Failed to create file: %s\", nfs_get_error(client->context)); goto fail; } } else { ret = nfs_open(client->context, file, flags, &client->fh); if (ret < 0) { error_setg(errp, \"Failed to open file : %s\", nfs_get_error(client->context)); goto fail; } } ret = nfs_fstat(client->context, client->fh, &st); if (ret < 0) { error_setg(errp, \"Failed to fstat file: %s\", nfs_get_error(client->context)); goto fail; } ret = DIV_ROUND_UP(st.st_size, BDRV_SECTOR_SIZE); client->st_blocks = st.st_blocks; client->has_zero_init = S_ISREG(st.st_mode); *strp = '/'; goto out; fail: nfs_client_close(client); out: qemu_opts_del(opts); g_free(file); return ret; }"} {"target": 0, "idx": 16897, "func": "static AVStream *new_pes_av_stream(PESContext *pes, uint32_t prog_reg_desc, uint32_t code) { AVStream *st = av_new_stream(pes->stream, pes->pid); if (!st) return NULL; av_set_pts_info(st, 33, 1, 90000); st->priv_data = pes; st->codec->codec_type = CODEC_TYPE_DATA; st->codec->codec_id = CODEC_ID_NONE; st->need_parsing = AVSTREAM_PARSE_FULL; pes->st = st; dprintf(pes->stream, \"stream_type=%x pid=%x prog_reg_desc=%.4s\\n\", pes->stream_type, pes->pid, (char*)&prog_reg_desc); st->codec->codec_tag = pes->stream_type; mpegts_find_stream_type(st, pes->stream_type, ISO_types); if (prog_reg_desc == AV_RL32(\"HDMV\") && st->codec->codec_id == CODEC_ID_NONE) { mpegts_find_stream_type(st, pes->stream_type, HDMV_types); if (pes->stream_type == 0x83) { // HDMV TrueHD streams also contain an AC3 coded version of the // audio track - add a second stream for this AVStream *sub_st; // priv_data cannot be shared between streams PESContext *sub_pes = av_malloc(sizeof(*sub_pes)); if (!sub_pes) return NULL; memcpy(sub_pes, pes, sizeof(*sub_pes)); sub_st = av_new_stream(pes->stream, pes->pid); if (!sub_st) { av_free(sub_pes); return NULL; } av_set_pts_info(sub_st, 33, 1, 90000); sub_st->priv_data = sub_pes; sub_st->codec->codec_type = CODEC_TYPE_AUDIO; sub_st->codec->codec_id = CODEC_ID_AC3; sub_st->need_parsing = AVSTREAM_PARSE_FULL; sub_pes->sub_st = pes->sub_st = sub_st; } } if (st->codec->codec_id == CODEC_ID_NONE) mpegts_find_stream_type(st, pes->stream_type, MISC_types); /* stream was not present in PMT, guess based on PES start code */ if (st->codec->codec_id == CODEC_ID_NONE) { if (code >= 0x1c0 && code <= 0x1df) { st->codec->codec_type = CODEC_TYPE_AUDIO; st->codec->codec_id = CODEC_ID_MP2; } else if (code == 0x1bd) { st->codec->codec_type = CODEC_TYPE_AUDIO; st->codec->codec_id = CODEC_ID_AC3; } } return st; }"} {"target": 0, "idx": 16898, "func": "BlockAIOCB *dma_bdrv_io( BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num, DMAIOFunc *io_func, BlockCompletionFunc *cb, void *opaque, DMADirection dir) { DMAAIOCB *dbs = qemu_aio_get(&dma_aiocb_info, bs, cb, opaque); trace_dma_bdrv_io(dbs, bs, sector_num, (dir == DMA_DIRECTION_TO_DEVICE)); dbs->acb = NULL; dbs->bs = bs; dbs->sg = sg; dbs->sector_num = sector_num; dbs->sg_cur_index = 0; dbs->sg_cur_byte = 0; dbs->dir = dir; dbs->io_func = io_func; dbs->bh = NULL; qemu_iovec_init(&dbs->iov, sg->nsg); dma_bdrv_cb(dbs, 0); return &dbs->common; }"} {"target": 0, "idx": 16909, "func": "void helper_ldl_l_raw(uint64_t t0, uint64_t t1) { env->lock = t1; ldl_raw(t1, t0); }"} {"target": 0, "idx": 16916, "func": "static void assign_storage(SCLPDevice *sclp, SCCB *sccb) { MemoryRegion *mr = NULL; uint64_t this_subregion_size; AssignStorage *assign_info = (AssignStorage *) sccb; sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev(); assert(mhd); ram_addr_t assign_addr = (assign_info->rn - 1) * mhd->rzm; MemoryRegion *sysmem = get_system_memory(); if ((assign_addr % MEM_SECTION_SIZE == 0) && (assign_addr >= mhd->padded_ram_size)) { /* Re-use existing memory region if found */ mr = memory_region_find(sysmem, assign_addr, 1).mr; memory_region_unref(mr); if (!mr) { MemoryRegion *standby_ram = g_new(MemoryRegion, 1); /* offset to align to standby_subregion_size for allocation */ ram_addr_t offset = assign_addr - (assign_addr - mhd->padded_ram_size) % mhd->standby_subregion_size; /* strlen(\"standby.ram\") + 4 (Max of KVM_MEMORY_SLOTS) + NULL */ char id[16]; snprintf(id, 16, \"standby.ram%d\", (int)((offset - mhd->padded_ram_size) / mhd->standby_subregion_size) + 1); /* Allocate a subregion of the calculated standby_subregion_size */ if (offset + mhd->standby_subregion_size > mhd->padded_ram_size + mhd->standby_mem_size) { this_subregion_size = mhd->padded_ram_size + mhd->standby_mem_size - offset; } else { this_subregion_size = mhd->standby_subregion_size; } memory_region_init_ram(standby_ram, NULL, id, this_subregion_size, &error_abort); /* This is a hack to make memory hotunplug work again. Once we have * subdevices, we have to unparent them when unassigning memory, * instead of doing it via the ref count of the MemoryRegion. */ object_ref(OBJECT(standby_ram)); object_unparent(OBJECT(standby_ram)); vmstate_register_ram_global(standby_ram); memory_region_add_subregion(sysmem, offset, standby_ram); } /* The specified subregion is no longer in standby */ mhd->standby_state_map[(assign_addr - mhd->padded_ram_size) / MEM_SECTION_SIZE] = 1; } sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION); }"} {"target": 0, "idx": 16917, "func": "static void dump_aml_files(test_data *data) { AcpiSdtTable *sdt; GError *error = NULL; gint fd; ssize_t ret; int i; for (i = 0; i < data->ssdt_tables->len; ++i) { sdt = &g_array_index(data->ssdt_tables, AcpiSdtTable, i); g_assert(sdt->aml); fd = g_file_open_tmp(\"aml-XXXXXX\", &sdt->aml_file, &error); g_assert_no_error(error); ret = qemu_write_full(fd, sdt, sizeof(AcpiTableHeader)); g_assert(ret == sizeof(AcpiTableHeader)); ret = qemu_write_full(fd, sdt->aml, sdt->aml_len); g_assert(ret == sdt->aml_len); close(fd); } }"} {"target": 1, "idx": 16921, "func": "void ff_er_frame_end(MpegEncContext *s){ int i, mb_x, mb_y, error, error_type, dc_error, mv_error, ac_error; int distance; int threshold_part[4]= {100,100,100}; int threshold= 50; int is_intra_likely; int size = s->b8_stride * 2 * s->mb_height; Picture *pic= s->current_picture_ptr; if(!s->error_recognition || s->error_count==0 || s->avctx->lowres || s->avctx->hwaccel || s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU || s->error_count==3*s->mb_width*(s->avctx->skip_top + s->avctx->skip_bottom)) return; if(s->current_picture.motion_val[0] == NULL){ av_log(s->avctx, AV_LOG_ERROR, \"Warning MVs not available\\n\"); for(i=0; i<2; i++){ pic->ref_index[i]= av_mallocz(size * sizeof(uint8_t)); pic->motion_val_base[i]= av_mallocz((size+4) * 2 * sizeof(uint16_t)); pic->motion_val[i]= pic->motion_val_base[i]+4; } pic->motion_subsample_log2= 3; s->current_picture= *s->current_picture_ptr; } for(i=0; i<2; i++){ if(pic->ref_index[i]) memset(pic->ref_index[i], 0, size * sizeof(uint8_t)); } if(s->avctx->debug&FF_DEBUG_ER){ for(mb_y=0; mb_ymb_height; mb_y++){ for(mb_x=0; mb_xmb_width; mb_x++){ int status= s->error_status_table[mb_x + mb_y*s->mb_stride]; av_log(s->avctx, AV_LOG_DEBUG, \"%2X \", status); } av_log(s->avctx, AV_LOG_DEBUG, \"\\n\"); } } #if 1 /* handle overlapping slices */ for(error_type=1; error_type<=3; error_type++){ int end_ok=0; for(i=s->mb_num-1; i>=0; i--){ const int mb_xy= s->mb_index2xy[i]; int error= s->error_status_table[mb_xy]; if(error&(1<error_status_table[mb_xy]|= 1<partitioned_frame){ int end_ok=0; for(i=s->mb_num-1; i>=0; i--){ const int mb_xy= s->mb_index2xy[i]; int error= s->error_status_table[mb_xy]; if(error&AC_END) end_ok=0; if((error&MV_END) || (error&DC_END) || (error&AC_ERROR)) end_ok=1; if(!end_ok) s->error_status_table[mb_xy]|= AC_ERROR; if(error&VP_START) end_ok=0; } } #endif /* handle missing slices */ if(s->error_recognition>=4){ int end_ok=1; for(i=s->mb_num-2; i>=s->mb_width+100; i--){ //FIXME +100 hack const int mb_xy= s->mb_index2xy[i]; int error1= s->error_status_table[mb_xy ]; int error2= s->error_status_table[s->mb_index2xy[i+1]]; if(error1&VP_START) end_ok=1; if( error2==(VP_START|DC_ERROR|AC_ERROR|MV_ERROR|AC_END|DC_END|MV_END) && error1!=(VP_START|DC_ERROR|AC_ERROR|MV_ERROR|AC_END|DC_END|MV_END) && ((error1&AC_END) || (error1&DC_END) || (error1&MV_END))){ //end & uninit end_ok=0; } if(!end_ok) s->error_status_table[mb_xy]|= DC_ERROR|AC_ERROR|MV_ERROR; } } #if 1 /* backward mark errors */ distance=9999999; for(error_type=1; error_type<=3; error_type++){ for(i=s->mb_num-1; i>=0; i--){ const int mb_xy= s->mb_index2xy[i]; int error= s->error_status_table[mb_xy]; if(!s->mbskip_table[mb_xy]) //FIXME partition specific distance++; if(error&(1<partitioned_frame){ if(distance < threshold_part[error_type-1]) s->error_status_table[mb_xy]|= 1<error_status_table[mb_xy]|= 1<mb_num; i++){ const int mb_xy= s->mb_index2xy[i]; int old_error= s->error_status_table[mb_xy]; if(old_error&VP_START) error= old_error& (DC_ERROR|AC_ERROR|MV_ERROR); else{ error|= old_error& (DC_ERROR|AC_ERROR|MV_ERROR); s->error_status_table[mb_xy]|= error; } } #if 1 /* handle not partitioned case */ if(!s->partitioned_frame){ for(i=0; imb_num; i++){ const int mb_xy= s->mb_index2xy[i]; error= s->error_status_table[mb_xy]; if(error&(AC_ERROR|DC_ERROR|MV_ERROR)) error|= AC_ERROR|DC_ERROR|MV_ERROR; s->error_status_table[mb_xy]= error; } } #endif dc_error= ac_error= mv_error=0; for(i=0; imb_num; i++){ const int mb_xy= s->mb_index2xy[i]; error= s->error_status_table[mb_xy]; if(error&DC_ERROR) dc_error ++; if(error&AC_ERROR) ac_error ++; if(error&MV_ERROR) mv_error ++; } av_log(s->avctx, AV_LOG_INFO, \"concealing %d DC, %d AC, %d MV errors\\n\", dc_error, ac_error, mv_error); is_intra_likely= is_intra_more_likely(s); /* set unknown mb-type to most likely */ for(i=0; imb_num; i++){ const int mb_xy= s->mb_index2xy[i]; error= s->error_status_table[mb_xy]; if(!((error&DC_ERROR) && (error&MV_ERROR))) continue; if(is_intra_likely) s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA4x4; else s->current_picture.mb_type[mb_xy]= MB_TYPE_16x16 | MB_TYPE_L0; } // change inter to intra blocks if no reference frames are available if (!s->last_picture.data[0] && !s->next_picture.data[0]) for(i=0; imb_num; i++){ const int mb_xy= s->mb_index2xy[i]; if(!IS_INTRA(s->current_picture.mb_type[mb_xy])) s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA4x4; } /* handle inter blocks with damaged AC */ for(mb_y=0; mb_ymb_height; mb_y++){ for(mb_x=0; mb_xmb_width; mb_x++){ const int mb_xy= mb_x + mb_y * s->mb_stride; const int mb_type= s->current_picture.mb_type[mb_xy]; int dir = !s->last_picture.data[0]; error= s->error_status_table[mb_xy]; if(IS_INTRA(mb_type)) continue; //intra if(error&MV_ERROR) continue; //inter with damaged MV if(!(error&AC_ERROR)) continue; //undamaged inter s->mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD; s->mb_intra=0; s->mb_skipped=0; if(IS_8X8(mb_type)){ int mb_index= mb_x*2 + mb_y*2*s->b8_stride; int j; s->mv_type = MV_TYPE_8X8; for(j=0; j<4; j++){ s->mv[0][j][0] = s->current_picture.motion_val[dir][ mb_index + (j&1) + (j>>1)*s->b8_stride ][0]; s->mv[0][j][1] = s->current_picture.motion_val[dir][ mb_index + (j&1) + (j>>1)*s->b8_stride ][1]; } }else{ s->mv_type = MV_TYPE_16X16; s->mv[0][0][0] = s->current_picture.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][0]; s->mv[0][0][1] = s->current_picture.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][1]; } s->dsp.clear_blocks(s->block[0]); s->mb_x= mb_x; s->mb_y= mb_y; decode_mb(s); } } /* guess MVs */ if(s->pict_type==FF_B_TYPE){ for(mb_y=0; mb_ymb_height; mb_y++){ for(mb_x=0; mb_xmb_width; mb_x++){ int xy= mb_x*2 + mb_y*2*s->b8_stride; const int mb_xy= mb_x + mb_y * s->mb_stride; const int mb_type= s->current_picture.mb_type[mb_xy]; error= s->error_status_table[mb_xy]; if(IS_INTRA(mb_type)) continue; if(!(error&MV_ERROR)) continue; //inter with undamaged MV if(!(error&AC_ERROR)) continue; //undamaged inter s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD; if(!s->last_picture.data[0]) s->mv_dir &= ~MV_DIR_FORWARD; if(!s->next_picture.data[0]) s->mv_dir &= ~MV_DIR_BACKWARD; s->mb_intra=0; s->mv_type = MV_TYPE_16X16; s->mb_skipped=0; if(s->pp_time){ int time_pp= s->pp_time; int time_pb= s->pb_time; s->mv[0][0][0] = s->next_picture.motion_val[0][xy][0]*time_pb/time_pp; s->mv[0][0][1] = s->next_picture.motion_val[0][xy][1]*time_pb/time_pp; s->mv[1][0][0] = s->next_picture.motion_val[0][xy][0]*(time_pb - time_pp)/time_pp; s->mv[1][0][1] = s->next_picture.motion_val[0][xy][1]*(time_pb - time_pp)/time_pp; }else{ s->mv[0][0][0]= 0; s->mv[0][0][1]= 0; s->mv[1][0][0]= 0; s->mv[1][0][1]= 0; } s->dsp.clear_blocks(s->block[0]); s->mb_x= mb_x; s->mb_y= mb_y; decode_mb(s); } } }else guess_mv(s); /* the filters below are not XvMC compatible, skip them */ if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) goto ec_clean; /* fill DC for inter blocks */ for(mb_y=0; mb_ymb_height; mb_y++){ for(mb_x=0; mb_xmb_width; mb_x++){ int dc, dcu, dcv, y, n; int16_t *dc_ptr; uint8_t *dest_y, *dest_cb, *dest_cr; const int mb_xy= mb_x + mb_y * s->mb_stride; const int mb_type= s->current_picture.mb_type[mb_xy]; error= s->error_status_table[mb_xy]; if(IS_INTRA(mb_type) && s->partitioned_frame) continue; // if(error&MV_ERROR) continue; //inter data damaged FIXME is this good? dest_y = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize; dest_cb= s->current_picture.data[1] + mb_x*8 + mb_y*8 *s->uvlinesize; dest_cr= s->current_picture.data[2] + mb_x*8 + mb_y*8 *s->uvlinesize; dc_ptr= &s->dc_val[0][mb_x*2 + mb_y*2*s->b8_stride]; for(n=0; n<4; n++){ dc=0; for(y=0; y<8; y++){ int x; for(x=0; x<8; x++){ dc+= dest_y[x + (n&1)*8 + (y + (n>>1)*8)*s->linesize]; } } dc_ptr[(n&1) + (n>>1)*s->b8_stride]= (dc+4)>>3; } dcu=dcv=0; for(y=0; y<8; y++){ int x; for(x=0; x<8; x++){ dcu+=dest_cb[x + y*(s->uvlinesize)]; dcv+=dest_cr[x + y*(s->uvlinesize)]; } } s->dc_val[1][mb_x + mb_y*s->mb_stride]= (dcu+4)>>3; s->dc_val[2][mb_x + mb_y*s->mb_stride]= (dcv+4)>>3; } } #if 1 /* guess DC for damaged blocks */ guess_dc(s, s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride, 1); guess_dc(s, s->dc_val[1], s->mb_width , s->mb_height , s->mb_stride, 0); guess_dc(s, s->dc_val[2], s->mb_width , s->mb_height , s->mb_stride, 0); #endif /* filter luma DC */ filter181(s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride); #if 1 /* render DC only intra */ for(mb_y=0; mb_ymb_height; mb_y++){ for(mb_x=0; mb_xmb_width; mb_x++){ uint8_t *dest_y, *dest_cb, *dest_cr; const int mb_xy= mb_x + mb_y * s->mb_stride; const int mb_type= s->current_picture.mb_type[mb_xy]; error= s->error_status_table[mb_xy]; if(IS_INTER(mb_type)) continue; if(!(error&AC_ERROR)) continue; //undamaged dest_y = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize; dest_cb= s->current_picture.data[1] + mb_x*8 + mb_y*8 *s->uvlinesize; dest_cr= s->current_picture.data[2] + mb_x*8 + mb_y*8 *s->uvlinesize; put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y); } } #endif if(s->avctx->error_concealment&FF_EC_DEBLOCK){ /* filter horizontal block boundaries */ h_block_filter(s, s->current_picture.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1); h_block_filter(s, s->current_picture.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0); h_block_filter(s, s->current_picture.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0); /* filter vertical block boundaries */ v_block_filter(s, s->current_picture.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1); v_block_filter(s, s->current_picture.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0); v_block_filter(s, s->current_picture.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0); } ec_clean: /* clean a few tables */ for(i=0; imb_num; i++){ const int mb_xy= s->mb_index2xy[i]; int error= s->error_status_table[mb_xy]; if(s->pict_type!=FF_B_TYPE && (error&(DC_ERROR|MV_ERROR|AC_ERROR))){ s->mbskip_table[mb_xy]=0; } s->mbintra_table[mb_xy]=1; } }"} {"target": 1, "idx": 16932, "func": "static int matroska_deliver_packet(MatroskaDemuxContext *matroska, AVPacket *pkt) { if (matroska->num_packets > 0) { memcpy(pkt, matroska->packets[0], sizeof(AVPacket)); av_free(matroska->packets[0]); if (matroska->num_packets > 1) { memmove(&matroska->packets[0], &matroska->packets[1], (matroska->num_packets - 1) * sizeof(AVPacket *)); matroska->packets = av_realloc(matroska->packets, (matroska->num_packets - 1) * sizeof(AVPacket *)); } else { av_freep(&matroska->packets); } matroska->num_packets--; return 0; } return -1; }"} {"target": 0, "idx": 16942, "func": "static int vc1_parse_init(AVCodecParserContext *s) { VC1ParseContext *vpc = s->priv_data; vpc->v.s.slice_context_count = 1; return 0; }"} {"target": 0, "idx": 16943, "func": "static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame) { AVCodecInternal *avci = avctx->internal; DecodeSimpleContext *ds = &avci->ds; AVPacket *pkt = ds->in_pkt; // copy to ensure we do not change pkt AVPacket tmp; int got_frame, actual_got_frame, did_split; int ret; if (!pkt->data && !avci->draining) { av_packet_unref(pkt); ret = ff_decode_get_packet(avctx, pkt); if (ret < 0 && ret != AVERROR_EOF) return ret; } // Some codecs (at least wma lossless) will crash when feeding drain packets // after EOF was signaled. if (avci->draining_done) return AVERROR_EOF; if (!pkt->data && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY || avctx->active_thread_type & FF_THREAD_FRAME)) return AVERROR_EOF; tmp = *pkt; #if FF_API_MERGE_SD FF_DISABLE_DEPRECATION_WARNINGS did_split = av_packet_split_side_data(&tmp); if (did_split) { ret = extract_packet_props(avctx->internal, &tmp); if (ret < 0) return ret; ret = apply_param_change(avctx, &tmp); if (ret < 0) return ret; } FF_ENABLE_DEPRECATION_WARNINGS #endif got_frame = 0; if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) { ret = ff_thread_decode_frame(avctx, frame, &got_frame, &tmp); } else { ret = avctx->codec->decode(avctx, frame, &got_frame, &tmp); if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) { if (!(avctx->codec->caps_internal & FF_CODEC_CAP_SETS_PKT_DTS)) frame->pkt_dts = pkt->dts; if(!avctx->has_b_frames) frame->pkt_pos = pkt->pos; //FIXME these should be under if(!avctx->has_b_frames) /* get_buffer is supposed to set frame parameters */ if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) { if (!frame->sample_aspect_ratio.num) frame->sample_aspect_ratio = avctx->sample_aspect_ratio; if (!frame->width) frame->width = avctx->width; if (!frame->height) frame->height = avctx->height; if (frame->format == AV_PIX_FMT_NONE) frame->format = avctx->pix_fmt; } } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) { frame->pkt_dts = pkt->dts; } } emms_c(); actual_got_frame = got_frame; if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) { if (frame->flags & AV_FRAME_FLAG_DISCARD) got_frame = 0; if (got_frame) frame->best_effort_timestamp = guess_correct_pts(avctx, frame->pts, frame->pkt_dts); } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) { uint8_t *side; int side_size; uint32_t discard_padding = 0; uint8_t skip_reason = 0; uint8_t discard_reason = 0; if (ret >= 0 && got_frame) { frame->best_effort_timestamp = guess_correct_pts(avctx, frame->pts, frame->pkt_dts); if (frame->format == AV_SAMPLE_FMT_NONE) frame->format = avctx->sample_fmt; if (!frame->channel_layout) frame->channel_layout = avctx->channel_layout; if (!frame->channels) frame->channels = avctx->channels; if (!frame->sample_rate) frame->sample_rate = avctx->sample_rate; } side= av_packet_get_side_data(pkt, AV_PKT_DATA_SKIP_SAMPLES, &side_size); if(side && side_size>=10) { avctx->internal->skip_samples = AV_RL32(side) * avctx->internal->skip_samples_multiplier; discard_padding = AV_RL32(side + 4); av_log(avctx, AV_LOG_DEBUG, \"skip %d / discard %d samples due to side data\\n\", avctx->internal->skip_samples, (int)discard_padding); skip_reason = AV_RL8(side + 8); discard_reason = AV_RL8(side + 9); } if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame && !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) { avctx->internal->skip_samples = FFMAX(0, avctx->internal->skip_samples - frame->nb_samples); got_frame = 0; } if (avctx->internal->skip_samples > 0 && got_frame && !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) { if(frame->nb_samples <= avctx->internal->skip_samples){ got_frame = 0; avctx->internal->skip_samples -= frame->nb_samples; av_log(avctx, AV_LOG_DEBUG, \"skip whole frame, skip left: %d\\n\", avctx->internal->skip_samples); } else { av_samples_copy(frame->extended_data, frame->extended_data, 0, avctx->internal->skip_samples, frame->nb_samples - avctx->internal->skip_samples, avctx->channels, frame->format); if(avctx->pkt_timebase.num && avctx->sample_rate) { int64_t diff_ts = av_rescale_q(avctx->internal->skip_samples, (AVRational){1, avctx->sample_rate}, avctx->pkt_timebase); if(frame->pts!=AV_NOPTS_VALUE) frame->pts += diff_ts; #if FF_API_PKT_PTS FF_DISABLE_DEPRECATION_WARNINGS if(frame->pkt_pts!=AV_NOPTS_VALUE) frame->pkt_pts += diff_ts; FF_ENABLE_DEPRECATION_WARNINGS #endif if(frame->pkt_dts!=AV_NOPTS_VALUE) frame->pkt_dts += diff_ts; if (frame->pkt_duration >= diff_ts) frame->pkt_duration -= diff_ts; } else { av_log(avctx, AV_LOG_WARNING, \"Could not update timestamps for skipped samples.\\n\"); } av_log(avctx, AV_LOG_DEBUG, \"skip %d/%d samples\\n\", avctx->internal->skip_samples, frame->nb_samples); frame->nb_samples -= avctx->internal->skip_samples; avctx->internal->skip_samples = 0; } } if (discard_padding > 0 && discard_padding <= frame->nb_samples && got_frame && !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) { if (discard_padding == frame->nb_samples) { got_frame = 0; } else { if(avctx->pkt_timebase.num && avctx->sample_rate) { int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding, (AVRational){1, avctx->sample_rate}, avctx->pkt_timebase); frame->pkt_duration = diff_ts; } else { av_log(avctx, AV_LOG_WARNING, \"Could not update timestamps for discarded samples.\\n\"); } av_log(avctx, AV_LOG_DEBUG, \"discard %d/%d samples\\n\", (int)discard_padding, frame->nb_samples); frame->nb_samples -= discard_padding; } } if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) { AVFrameSideData *fside = av_frame_new_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES, 10); if (fside) { AV_WL32(fside->data, avctx->internal->skip_samples); AV_WL32(fside->data + 4, discard_padding); AV_WL8(fside->data + 8, skip_reason); AV_WL8(fside->data + 9, discard_reason); avctx->internal->skip_samples = 0; } } } #if FF_API_MERGE_SD if (did_split) { av_packet_free_side_data(&tmp); if(ret == tmp.size) ret = pkt->size; } #endif if (avctx->codec->type == AVMEDIA_TYPE_AUDIO && !avci->showed_multi_packet_warning && ret >= 0 && ret != pkt->size && !(avctx->codec->capabilities & AV_CODEC_CAP_SUBFRAMES)) { av_log(avctx, AV_LOG_WARNING, \"Multiple frames in a packet.\\n\"); avci->showed_multi_packet_warning = 1; } if (!got_frame) av_frame_unref(frame); if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED)) ret = pkt->size; #if FF_API_AVCTX_TIMEBASE if (avctx->framerate.num > 0 && avctx->framerate.den > 0) avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1})); #endif /* do not stop draining when actual_got_frame != 0 or ret < 0 */ /* got_frame == 0 but actual_got_frame != 0 when frame is discarded */ if (avctx->internal->draining && !actual_got_frame) { if (ret < 0) { /* prevent infinite loop if a decoder wrongly always return error on draining */ /* reasonable nb_errors_max = maximum b frames + thread count */ int nb_errors_max = 20 + (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME ? avctx->thread_count : 1); if (avci->nb_draining_errors++ >= nb_errors_max) { av_log(avctx, AV_LOG_ERROR, \"Too many errors when draining, this is a bug. \" \"Stop draining and force EOF.\\n\"); avci->draining_done = 1; ret = AVERROR_BUG; } } else { avci->draining_done = 1; } } avci->compat_decode_consumed += ret; if (ret >= pkt->size || ret < 0) { av_packet_unref(pkt); } else { int consumed = ret; pkt->data += consumed; pkt->size -= consumed; avci->last_pkt_props->size -= consumed; // See extract_packet_props() comment. pkt->pts = AV_NOPTS_VALUE; pkt->dts = AV_NOPTS_VALUE; avci->last_pkt_props->pts = AV_NOPTS_VALUE; avci->last_pkt_props->dts = AV_NOPTS_VALUE; } if (got_frame) av_assert0(frame->buf[0]); return ret < 0 ? ret : 0; }"} {"target": 0, "idx": 16947, "func": "static void sd_erase(SDState *sd) { int i, start, end; if (!sd->erase_start || !sd->erase_end) { sd->card_status |= ERASE_SEQ_ERROR; return; } start = sd->erase_start >> (HWBLOCK_SHIFT + SECTOR_SHIFT + WPGROUP_SHIFT); end = sd->erase_end >> (HWBLOCK_SHIFT + SECTOR_SHIFT + WPGROUP_SHIFT); sd->erase_start = 0; sd->erase_end = 0; sd->csd[14] |= 0x40; for (i = start; i <= end; i ++) if (sd->wp_groups[i]) sd->card_status |= WP_ERASE_SKIP; }"} {"target": 0, "idx": 16948, "func": "static void tcg_init_vcpu(void *_env) { CPUState *env = _env; /* share a single thread for all cpus with TCG */ if (!tcg_cpu_thread) { env->thread = qemu_mallocz(sizeof(QemuThread)); env->halt_cond = qemu_mallocz(sizeof(QemuCond)); qemu_cond_init(env->halt_cond); qemu_thread_create(env->thread, tcg_cpu_thread_fn, env); while (env->created == 0) qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100); tcg_cpu_thread = env->thread; tcg_halt_cond = env->halt_cond; } else { env->thread = tcg_cpu_thread; env->halt_cond = tcg_halt_cond; } }"} {"target": 0, "idx": 16966, "func": "static int nbd_negotiate_handle_list(NBDClient *client, uint32_t length, Error **errp) { NBDExport *exp; if (length) { if (nbd_drop(client->ioc, length, errp) < 0) { return -EIO; } return nbd_negotiate_send_rep_err(client->ioc, NBD_REP_ERR_INVALID, NBD_OPT_LIST, errp, \"OPT_LIST should not have length\"); } /* For each export, send a NBD_REP_SERVER reply. */ QTAILQ_FOREACH(exp, &exports, next) { if (nbd_negotiate_send_rep_list(client->ioc, exp, errp)) { return -EINVAL; } } /* Finish with a NBD_REP_ACK. */ return nbd_negotiate_send_rep(client->ioc, NBD_REP_ACK, NBD_OPT_LIST, errp); }"} {"target": 0, "idx": 16967, "func": "static void init_excp_620 (CPUPPCState *env) { #if !defined(CONFIG_USER_ONLY) env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; env->excp_vectors[POWERPC_EXCP_DSEG] = 0x00000380; env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; env->excp_vectors[POWERPC_EXCP_ISEG] = 0x00000480; env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; env->excp_vectors[POWERPC_EXCP_FPA] = 0x00000E00; env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00; env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400; env->excp_prefix = 0xFFF00000UL; /* Hardware reset vector */ env->hreset_vector = 0x0000000000000100ULL; #endif }"} {"target": 1, "idx": 16969, "func": "static void json_print_chapter_header(WriterContext *wctx, const char *chapter) { JSONContext *json = wctx->priv; char *chapter_esc; if (wctx->nb_chapter) printf(\",\"); json->multiple_entries = !strcmp(chapter, \"packets\") || !strcmp(chapter, \"streams\"); chapter_esc = json_escape_str(chapter); printf(\"\\n \\\"%s\\\":%s\", chapter_esc ? chapter_esc : \"\", json->multiple_entries ? \" [\" : \" \"); av_free(chapter_esc); }"} {"target": 1, "idx": 16977, "func": "void qemu_macaddr_default_if_unset(MACAddr *macaddr) { static int index = 0; static const MACAddr zero = { .a = { 0,0,0,0,0,0 } }; if (memcmp(macaddr, &zero, sizeof(zero)) != 0) return; macaddr->a[0] = 0x52; macaddr->a[1] = 0x54; macaddr->a[2] = 0x00; macaddr->a[3] = 0x12; macaddr->a[4] = 0x34; macaddr->a[5] = 0x56 + index++; }"} {"target": 0, "idx": 16981, "func": "static int compute_bit_allocation(AC3EncodeContext *s, uint8_t bap[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS][AC3_MAX_COEFS], uint8_t encoded_exp[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS][AC3_MAX_COEFS], uint8_t exp_strategy[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS], int frame_bits) { int blk, ch; int coarse_snr_offset, fine_snr_offset; uint8_t bap1[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS][AC3_MAX_COEFS]; int16_t psd[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS][AC3_MAX_COEFS]; int16_t mask[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS][AC3_CRITICAL_BANDS]; static const int frame_bits_inc[8] = { 0, 0, 2, 2, 2, 4, 2, 4 }; /* init default parameters */ s->slow_decay_code = 2; s->fast_decay_code = 1; s->slow_gain_code = 1; s->db_per_bit_code = 2; s->floor_code = 4; for (ch = 0; ch < s->channels; ch++) s->fast_gain_code[ch] = 4; /* compute real values */ s->bit_alloc.slow_decay = ff_ac3_slow_decay_tab[s->slow_decay_code] >> s->bit_alloc.sr_shift; s->bit_alloc.fast_decay = ff_ac3_fast_decay_tab[s->fast_decay_code] >> s->bit_alloc.sr_shift; s->bit_alloc.slow_gain = ff_ac3_slow_gain_tab[s->slow_gain_code]; s->bit_alloc.db_per_bit = ff_ac3_db_per_bit_tab[s->db_per_bit_code]; s->bit_alloc.floor = ff_ac3_floor_tab[s->floor_code]; /* header size */ frame_bits += 65; // if (s->channel_mode == 2) // frame_bits += 2; frame_bits += frame_bits_inc[s->channel_mode]; /* audio blocks */ for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { frame_bits += s->fbw_channels * 2 + 2; /* blksw * c, dithflag * c, dynrnge, cplstre */ if (s->channel_mode == AC3_CHMODE_STEREO) { frame_bits++; /* rematstr */ if (!blk) frame_bits += 4; } frame_bits += 2 * s->fbw_channels; /* chexpstr[2] * c */ if (s->lfe_on) frame_bits++; /* lfeexpstr */ for (ch = 0; ch < s->fbw_channels; ch++) { if (exp_strategy[blk][ch] != EXP_REUSE) frame_bits += 6 + 2; /* chbwcod[6], gainrng[2] */ } frame_bits++; /* baie */ frame_bits++; /* snr */ frame_bits += 2; /* delta / skip */ } frame_bits++; /* cplinu for block 0 */ /* bit alloc info */ /* sdcycod[2], fdcycod[2], sgaincod[2], dbpbcod[2], floorcod[3] */ /* csnroffset[6] */ /* (fsnoffset[4] + fgaincod[4]) * c */ frame_bits += 2*4 + 3 + 6 + s->channels * (4 + 3); /* auxdatae, crcrsv */ frame_bits += 2; /* CRC */ frame_bits += 16; /* calculate psd and masking curve before doing bit allocation */ bit_alloc_masking(s, encoded_exp, exp_strategy, psd, mask); /* now the big work begins : do the bit allocation. Modify the snr offset until we can pack everything in the requested frame size */ coarse_snr_offset = s->coarse_snr_offset; while (coarse_snr_offset >= 0 && bit_alloc(s, mask, psd, bap, frame_bits, coarse_snr_offset, 0) < 0) coarse_snr_offset -= SNR_INC1; if (coarse_snr_offset < 0) { av_log(NULL, AV_LOG_ERROR, \"Bit allocation failed. Try increasing the bitrate.\\n\"); return -1; } while (coarse_snr_offset + SNR_INC1 <= 63 && bit_alloc(s, mask, psd, bap1, frame_bits, coarse_snr_offset + SNR_INC1, 0) >= 0) { coarse_snr_offset += SNR_INC1; memcpy(bap, bap1, sizeof(bap1)); } while (coarse_snr_offset + 1 <= 63 && bit_alloc(s, mask, psd, bap1, frame_bits, coarse_snr_offset + 1, 0) >= 0) { coarse_snr_offset++; memcpy(bap, bap1, sizeof(bap1)); } fine_snr_offset = 0; while (fine_snr_offset + SNR_INC1 <= 15 && bit_alloc(s, mask, psd, bap1, frame_bits, coarse_snr_offset, fine_snr_offset + SNR_INC1) >= 0) { fine_snr_offset += SNR_INC1; memcpy(bap, bap1, sizeof(bap1)); } while (fine_snr_offset + 1 <= 15 && bit_alloc(s, mask, psd, bap1, frame_bits, coarse_snr_offset, fine_snr_offset + 1) >= 0) { fine_snr_offset++; memcpy(bap, bap1, sizeof(bap1)); } s->coarse_snr_offset = coarse_snr_offset; for (ch = 0; ch < s->channels; ch++) s->fine_snr_offset[ch] = fine_snr_offset; return 0; }"} {"target": 0, "idx": 16988, "func": "static int h264_frame_start(H264Context *h) { H264Picture *pic; int i, ret; const int pixel_shift = h->pixel_shift; ret = initialize_cur_frame(h); if (ret < 0) return ret; pic = h->cur_pic_ptr; pic->reference = h->droppable ? 0 : h->picture_structure; pic->f->coded_picture_number = h->coded_picture_number++; pic->field_picture = h->picture_structure != PICT_FRAME; pic->frame_num = h->frame_num; /* * Zero key_frame here; IDR markings per slice in frame or fields are ORed * in later. * See decode_nal_units(). */ pic->f->key_frame = 0; pic->mmco_reset = 0; pic->recovered = 0; if (CONFIG_ERROR_RESILIENCE && h->enable_er) ff_er_frame_start(&h->slice_ctx[0].er); for (i = 0; i < 16; i++) { h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3); h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3); } for (i = 0; i < 16; i++) { h->block_offset[16 + i] = h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3); h->block_offset[48 + 16 + i] = h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3); } /* Some macroblocks can be accessed before they're available in case * of lost slices, MBAFF or threading. */ memset(h->slice_table, -1, (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table)); /* We mark the current picture as non-reference after allocating it, so * that if we break out due to an error it can be released automatically * in the next ff_mpv_frame_start(). */ h->cur_pic_ptr->reference = 0; h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX; h->next_output_pic = NULL; assert(h->cur_pic_ptr->long_ref == 0); return 0; }"} {"target": 0, "idx": 17006, "func": "void acpi_setup(void) { PCMachineState *pcms = PC_MACHINE(qdev_get_machine()); PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); AcpiBuildTables tables; AcpiBuildState *build_state; if (!pcms->fw_cfg) { ACPI_BUILD_DPRINTF(\"No fw cfg. Bailing out.\\n\"); return; } if (!pcmc->has_acpi_build) { ACPI_BUILD_DPRINTF(\"ACPI build disabled. Bailing out.\\n\"); return; } if (!acpi_enabled) { ACPI_BUILD_DPRINTF(\"ACPI disabled. Bailing out.\\n\"); return; } build_state = g_malloc0(sizeof *build_state); acpi_set_pci_info(); acpi_build_tables_init(&tables); acpi_build(&tables, MACHINE(pcms)); /* Now expose it all to Guest */ build_state->table_mr = acpi_add_rom_blob(build_state, tables.table_data, ACPI_BUILD_TABLE_FILE, ACPI_BUILD_TABLE_MAX_SIZE); assert(build_state->table_mr != NULL); build_state->linker_mr = acpi_add_rom_blob(build_state, tables.linker, \"etc/table-loader\", 0); fw_cfg_add_file(pcms->fw_cfg, ACPI_BUILD_TPMLOG_FILE, tables.tcpalog->data, acpi_data_len(tables.tcpalog)); if (!pcmc->rsdp_in_ram) { /* * Keep for compatibility with old machine types. * Though RSDP is small, its contents isn't immutable, so * we'll update it along with the rest of tables on guest access. */ uint32_t rsdp_size = acpi_data_len(tables.rsdp); build_state->rsdp = g_memdup(tables.rsdp->data, rsdp_size); fw_cfg_add_file_callback(pcms->fw_cfg, ACPI_BUILD_RSDP_FILE, acpi_build_update, build_state, build_state->rsdp, rsdp_size); build_state->rsdp_mr = NULL; } else { build_state->rsdp = NULL; build_state->rsdp_mr = acpi_add_rom_blob(build_state, tables.rsdp, ACPI_BUILD_RSDP_FILE, 0); } qemu_register_reset(acpi_build_reset, build_state); acpi_build_reset(build_state); vmstate_register(NULL, 0, &vmstate_acpi_build, build_state); /* Cleanup tables but don't free the memory: we track it * in build_state. */ acpi_build_tables_cleanup(&tables, false); }"} {"target": 0, "idx": 17020, "func": "DECLARE_WEIGHT(sse2) DECLARE_WEIGHT(ssse3) /** @{ */ /** * Define one qpel function. * LOOPSIZE must be already set to the number of pixels processed per * iteration in the inner loop of the called functions. * COFF(x) must be already defined so as to provide the offset into any * array of coeffs used by the called function for the qpel position x. */ #define QPEL_FUNC_DECL(OP, SIZE, PH, PV, OPT) \\ static void OP ## rv40_qpel ##SIZE ##_mc ##PH ##PV ##OPT(uint8_t *dst, \\ uint8_t *src, \\ int stride) \\ { \\ int i; \\ if (PH && PV) { \\ DECLARE_ALIGNED(16, uint8_t, tmp)[SIZE * (SIZE + 5)]; \\ uint8_t *tmpptr = tmp + SIZE * 2; \\ src -= stride * 2; \\ \\ for (i = 0; i < SIZE; i += LOOPSIZE) \\ ff_put_rv40_qpel_h ##OPT(tmp + i, SIZE, src + i, stride, \\ SIZE + 5, HCOFF(PH)); \\ for (i = 0; i < SIZE; i += LOOPSIZE) \\ ff_ ##OP ##rv40_qpel_v ##OPT(dst + i, stride, tmpptr + i, \\ SIZE, SIZE, VCOFF(PV)); \\ } else if (PV) { \\ for (i = 0; i < SIZE; i += LOOPSIZE) \\ ff_ ##OP ##rv40_qpel_v ## OPT(dst + i, stride, src + i, \\ stride, SIZE, VCOFF(PV)); \\ } else { \\ for (i = 0; i < SIZE; i += LOOPSIZE) \\ ff_ ##OP ##rv40_qpel_h ## OPT(dst + i, stride, src + i, \\ stride, SIZE, HCOFF(PH)); \\ } \\ }; /** Declare functions for sizes 8 and 16 and given operations * and qpel position. */ #define QPEL_FUNCS_DECL(OP, PH, PV, OPT) \\ QPEL_FUNC_DECL(OP, 8, PH, PV, OPT) \\ QPEL_FUNC_DECL(OP, 16, PH, PV, OPT) /** Declare all functions for all sizes and qpel positions */ #define QPEL_MC_DECL(OP, OPT) \\ void ff_ ##OP ##rv40_qpel_h ##OPT(uint8_t *dst, ptrdiff_t dstStride, \\ const uint8_t *src, \\ ptrdiff_t srcStride, \\ int len, int m); \\ void ff_ ##OP ##rv40_qpel_v ##OPT(uint8_t *dst, ptrdiff_t dstStride, \\ const uint8_t *src, \\ ptrdiff_t srcStride, \\ int len, int m); \\ QPEL_FUNCS_DECL(OP, 0, 1, OPT) \\ QPEL_FUNCS_DECL(OP, 0, 3, OPT) \\ QPEL_FUNCS_DECL(OP, 1, 0, OPT) \\ QPEL_FUNCS_DECL(OP, 1, 1, OPT) \\ QPEL_FUNCS_DECL(OP, 1, 2, OPT) \\ QPEL_FUNCS_DECL(OP, 1, 3, OPT) \\ QPEL_FUNCS_DECL(OP, 2, 1, OPT) \\ QPEL_FUNCS_DECL(OP, 2, 2, OPT) \\ QPEL_FUNCS_DECL(OP, 2, 3, OPT) \\ QPEL_FUNCS_DECL(OP, 3, 0, OPT) \\ QPEL_FUNCS_DECL(OP, 3, 1, OPT) \\ QPEL_FUNCS_DECL(OP, 3, 2, OPT) /** @} */ #define LOOPSIZE 8 #define HCOFF(x) (32 * (x - 1)) #define VCOFF(x) (32 * (x - 1)) QPEL_MC_DECL(put_, _ssse3) QPEL_MC_DECL(avg_, _ssse3) #undef LOOPSIZE #undef HCOFF #undef VCOFF #define LOOPSIZE 8 #define HCOFF(x) (64 * (x - 1)) #define VCOFF(x) (64 * (x - 1)) QPEL_MC_DECL(put_, _sse2) QPEL_MC_DECL(avg_, _sse2) #if ARCH_X86_32 #undef LOOPSIZE #undef HCOFF #undef VCOFF #define LOOPSIZE 4 #define HCOFF(x) (64 * (x - 1)) #define VCOFF(x) (64 * (x - 1)) QPEL_MC_DECL(put_, _mmx) #define ff_put_rv40_qpel_h_mmx2 ff_put_rv40_qpel_h_mmx #define ff_put_rv40_qpel_v_mmx2 ff_put_rv40_qpel_v_mmx QPEL_MC_DECL(avg_, _mmx2) #define ff_put_rv40_qpel_h_3dnow ff_put_rv40_qpel_h_mmx #define ff_put_rv40_qpel_v_3dnow ff_put_rv40_qpel_v_mmx QPEL_MC_DECL(avg_, _3dnow) #endif /** @{ */ /** Set one function */ #define QPEL_FUNC_SET(OP, SIZE, PH, PV, OPT) \\ c-> OP ## pixels_tab[2 - SIZE / 8][4 * PV + PH] = OP ## rv40_qpel ##SIZE ## _mc ##PH ##PV ##OPT; /** Set functions put and avg for sizes 8 and 16 and a given qpel position */ #define QPEL_FUNCS_SET(OP, PH, PV, OPT) \\ QPEL_FUNC_SET(OP, 8, PH, PV, OPT) \\ QPEL_FUNC_SET(OP, 16, PH, PV, OPT) /** Set all functions for all sizes and qpel positions */ #define QPEL_MC_SET(OP, OPT) \\ QPEL_FUNCS_SET (OP, 0, 1, OPT) \\ QPEL_FUNCS_SET (OP, 0, 3, OPT) \\ QPEL_FUNCS_SET (OP, 1, 0, OPT) \\ QPEL_FUNCS_SET (OP, 1, 1, OPT) \\ QPEL_FUNCS_SET (OP, 1, 2, OPT) \\ QPEL_FUNCS_SET (OP, 1, 3, OPT) \\ QPEL_FUNCS_SET (OP, 2, 1, OPT) \\ QPEL_FUNCS_SET (OP, 2, 2, OPT) \\ QPEL_FUNCS_SET (OP, 2, 3, OPT) \\ QPEL_FUNCS_SET (OP, 3, 0, OPT) \\ QPEL_FUNCS_SET (OP, 3, 1, OPT) \\ QPEL_FUNCS_SET (OP, 3, 2, OPT) /** @} */ #endif /* HAVE_YASM */ void ff_rv40dsp_init_x86(RV34DSPContext *c, DSPContext *dsp) { #if HAVE_YASM int mm_flags = av_get_cpu_flags(); if (mm_flags & AV_CPU_FLAG_MMX) { c->put_chroma_pixels_tab[0] = ff_put_rv40_chroma_mc8_mmx; c->put_chroma_pixels_tab[1] = ff_put_rv40_chroma_mc4_mmx; #if HAVE_INLINE_ASM c->put_pixels_tab[0][15] = ff_put_rv40_qpel16_mc33_mmx; c->put_pixels_tab[1][15] = ff_put_rv40_qpel8_mc33_mmx; c->avg_pixels_tab[0][15] = ff_avg_rv40_qpel16_mc33_mmx; c->avg_pixels_tab[1][15] = ff_avg_rv40_qpel8_mc33_mmx; #endif /* HAVE_INLINE_ASM */ #if ARCH_X86_32 QPEL_MC_SET(put_, _mmx) #endif } if (mm_flags & AV_CPU_FLAG_MMXEXT) { c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_mmx2; c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_mmx2; c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_mmx2; c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_mmx2; c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_mmx2; c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_mmx2; #if ARCH_X86_32 QPEL_MC_SET(avg_, _mmx2) #endif } else if (mm_flags & AV_CPU_FLAG_3DNOW) { c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_3dnow; c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_3dnow; #if ARCH_X86_32 QPEL_MC_SET(avg_, _3dnow) #endif } if (mm_flags & AV_CPU_FLAG_SSE2) { c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_sse2; c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_sse2; c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_sse2; c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_sse2; QPEL_MC_SET(put_, _sse2) QPEL_MC_SET(avg_, _sse2) } if (mm_flags & AV_CPU_FLAG_SSSE3) { c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_ssse3; c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_ssse3; c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_ssse3; c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_ssse3; QPEL_MC_SET(put_, _ssse3) QPEL_MC_SET(avg_, _ssse3) } #endif /* HAVE_YASM */ }"} {"target": 0, "idx": 17024, "func": "static void axidma_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { XilinxAXIDMA *d = opaque; struct Stream *s; int sid; sid = streamid_from_addr(addr); s = &d->streams[sid]; addr = addr % 0x30; addr >>= 2; switch (addr) { case R_DMACR: /* Tailptr mode is always on. */ value |= DMACR_TAILPTR_MODE; /* Remember our previous reset state. */ value |= (s->regs[addr] & DMACR_RESET); s->regs[addr] = value; if (value & DMACR_RESET) { stream_reset(s); } if ((value & 1) && !stream_resetting(s)) { /* Start processing. */ s->regs[R_DMASR] &= ~(DMASR_HALTED | DMASR_IDLE); } stream_reload_complete_cnt(s); break; case R_DMASR: /* Mask away write to clear irq lines. */ value &= ~(value & DMASR_IRQ_MASK); s->regs[addr] = value; break; case R_TAILDESC: s->regs[addr] = value; s->regs[R_DMASR] &= ~DMASR_IDLE; /* Not idle. */ if (!sid) { stream_process_mem2s(s, d->tx_dev); } break; default: D(qemu_log(\"%s: ch=%d addr=\" TARGET_FMT_plx \" v=%x\\n\", __func__, sid, addr * 4, (unsigned)value)); s->regs[addr] = value; break; } if (sid == 1 && d->notify) { d->notify(d->notify_opaque); d->notify = NULL; } stream_update_irq(s); }"} {"target": 0, "idx": 17028, "func": "static uint32_t bmdma_readb(void *opaque, uint32_t addr) { BMDMAState *bm = opaque; PCIIDEState *pci_dev = pci_from_bm(bm); uint32_t val; switch(addr & 3) { case 0: val = bm->cmd; break; case 1: val = pci_dev->dev.config[MRDMODE]; break; case 2: val = bm->status; break; case 3: if (bm->unit == 0) { val = pci_dev->dev.config[UDIDETCR0]; } else { val = pci_dev->dev.config[UDIDETCR1]; } break; default: val = 0xff; break; } #ifdef DEBUG_IDE printf(\"bmdma: readb 0x%02x : 0x%02x\\n\", addr, val); #endif return val; }"} {"target": 0, "idx": 17033, "func": "query_params_append (struct QueryParams *ps, const char *name, const char *value) { if (ps->n >= ps->alloc) { ps->p = g_renew(QueryParam, ps->p, ps->alloc * 2); ps->alloc *= 2; } ps->p[ps->n].name = g_strdup(name); ps->p[ps->n].value = value ? g_strdup(value) : NULL; ps->p[ps->n].ignore = 0; ps->n++; return 0; }"} {"target": 0, "idx": 17036, "func": "static void nbd_parse_filename(const char *filename, QDict *options, Error **errp) { char *file; char *export_name; const char *host_spec; const char *unixpath; if (nbd_has_filename_options_conflict(options, errp)) { return; } if (strstr(filename, \"://\")) { int ret = nbd_parse_uri(filename, options); if (ret < 0) { error_setg(errp, \"No valid URL specified\"); } return; } file = g_strdup(filename); export_name = strstr(file, EN_OPTSTR); if (export_name) { if (export_name[strlen(EN_OPTSTR)] == 0) { goto out; } export_name[0] = 0; /* truncate 'file' */ export_name += strlen(EN_OPTSTR); qdict_put(options, \"export\", qstring_from_str(export_name)); } /* extract the host_spec - fail if it's not nbd:... */ if (!strstart(file, \"nbd:\", &host_spec)) { error_setg(errp, \"File name string for NBD must start with 'nbd:'\"); goto out; } if (!*host_spec) { goto out; } /* are we a UNIX or TCP socket? */ if (strstart(host_spec, \"unix:\", &unixpath)) { qdict_put(options, \"server.type\", qstring_from_str(\"unix\")); qdict_put(options, \"server.data.path\", qstring_from_str(unixpath)); } else { InetSocketAddress *addr = NULL; addr = inet_parse(host_spec, errp); if (!addr) { goto out; } qdict_put(options, \"server.type\", qstring_from_str(\"inet\")); qdict_put(options, \"server.data.host\", qstring_from_str(addr->host)); qdict_put(options, \"server.data.port\", qstring_from_str(addr->port)); qapi_free_InetSocketAddress(addr); } out: g_free(file); }"} {"target": 1, "idx": 17056, "func": "static inline int range_get_symbol(APEContext * ctx, const uint32_t counts[], const uint16_t counts_diff[]) { int symbol, cf; cf = range_decode_culshift(ctx, 16); /* figure out the symbol inefficiently; a binary search would be much better */ for (symbol = 0; counts[symbol + 1] <= cf; symbol++); range_decode_update(ctx, counts_diff[symbol], counts[symbol]);"} {"target": 0, "idx": 17088, "func": "static TCGv new_tmp(void) { TCGv tmp; if (num_temps == MAX_TEMPS) abort(); if (GET_TCGV(temps[num_temps])) return temps[num_temps++]; tmp = tcg_temp_new(TCG_TYPE_I32); temps[num_temps++] = tmp; return tmp; }"} {"target": 0, "idx": 17091, "func": "void cpu_mips_store_status(CPUMIPSState *env, target_ulong val) { uint32_t mask = env->CP0_Status_rw_bitmask; target_ulong old = env->CP0_Status; if (env->insn_flags & ISA_MIPS32R6) { bool has_supervisor = extract32(mask, CP0St_KSU, 2) == 0x3; #if defined(TARGET_MIPS64) uint32_t ksux = (1 << CP0St_KX) & val; ksux |= (ksux >> 1) & val; /* KX = 0 forces SX to be 0 */ ksux |= (ksux >> 1) & val; /* SX = 0 forces UX to be 0 */ val = (val & ~(7 << CP0St_UX)) | ksux; #endif if (has_supervisor && extract32(val, CP0St_KSU, 2) == 0x3) { mask &= ~(3 << CP0St_KSU); } mask &= ~(((1 << CP0St_SR) | (1 << CP0St_NMI)) & val); } env->CP0_Status = (old & ~mask) | (val & mask); #if defined(TARGET_MIPS64) if ((env->CP0_Status ^ old) & (old & (7 << CP0St_UX))) { /* Access to at least one of the 64-bit segments has been disabled */ cpu_mips_tlb_flush(env); } #endif if (env->CP0_Config3 & (1 << CP0C3_MT)) { sync_c0_status(env, env, env->current_tc); } else { compute_hflags(env); } }"} {"target": 0, "idx": 17140, "func": "static SocketAddress *nbd_config(BDRVNBDState *s, QDict *options, char **export, Error **errp) { SocketAddress *saddr; if (qdict_haskey(options, \"path\") == qdict_haskey(options, \"host\")) { if (qdict_haskey(options, \"path\")) { error_setg(errp, \"path and host may not be used at the same time.\"); } else { error_setg(errp, \"one of path and host must be specified.\"); } return NULL; } saddr = g_new0(SocketAddress, 1); if (qdict_haskey(options, \"path\")) { UnixSocketAddress *q_unix; saddr->type = SOCKET_ADDRESS_KIND_UNIX; q_unix = saddr->u.q_unix = g_new0(UnixSocketAddress, 1); q_unix->path = g_strdup(qdict_get_str(options, \"path\")); qdict_del(options, \"path\"); } else { InetSocketAddress *inet; saddr->type = SOCKET_ADDRESS_KIND_INET; inet = saddr->u.inet = g_new0(InetSocketAddress, 1); inet->host = g_strdup(qdict_get_str(options, \"host\")); if (!qdict_get_try_str(options, \"port\")) { inet->port = g_strdup_printf(\"%d\", NBD_DEFAULT_PORT); } else { inet->port = g_strdup(qdict_get_str(options, \"port\")); } qdict_del(options, \"host\"); qdict_del(options, \"port\"); } s->client.is_unix = saddr->type == SOCKET_ADDRESS_KIND_UNIX; *export = g_strdup(qdict_get_try_str(options, \"export\")); if (*export) { qdict_del(options, \"export\"); } return saddr; }"} {"target": 0, "idx": 17144, "func": "static void gen_jmpcc(DisasContext *s, int cond, int l1) { TCGv tmp; /* TODO: Optimize compare/branch pairs rather than always flushing flag state to CC_OP_FLAGS. */ gen_flush_flags(s); switch (cond) { case 0: /* T */ tcg_gen_br(l1); break; case 1: /* F */ break; case 2: /* HI (!C && !Z) */ tmp = tcg_temp_new(); tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C | CCF_Z); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1); break; case 3: /* LS (C || Z) */ tmp = tcg_temp_new(); tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C | CCF_Z); tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1); break; case 4: /* CC (!C) */ tmp = tcg_temp_new(); tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1); break; case 5: /* CS (C) */ tmp = tcg_temp_new(); tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_C); tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1); break; case 6: /* NE (!Z) */ tmp = tcg_temp_new(); tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_Z); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1); break; case 7: /* EQ (Z) */ tmp = tcg_temp_new(); tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_Z); tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1); break; case 8: /* VC (!V) */ tmp = tcg_temp_new(); tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_V); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1); break; case 9: /* VS (V) */ tmp = tcg_temp_new(); tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_V); tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1); break; case 10: /* PL (!N) */ tmp = tcg_temp_new(); tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1); break; case 11: /* MI (N) */ tmp = tcg_temp_new(); tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N); tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1); break; case 12: /* GE (!(N ^ V)) */ tmp = tcg_temp_new(); assert(CCF_V == (CCF_N >> 2)); tcg_gen_shri_i32(tmp, QREG_CC_DEST, 2); tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST); tcg_gen_andi_i32(tmp, tmp, CCF_V); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1); break; case 13: /* LT (N ^ V) */ tmp = tcg_temp_new(); assert(CCF_V == (CCF_N >> 2)); tcg_gen_shri_i32(tmp, QREG_CC_DEST, 2); tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST); tcg_gen_andi_i32(tmp, tmp, CCF_V); tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1); break; case 14: /* GT (!(Z || (N ^ V))) */ tmp = tcg_temp_new(); assert(CCF_V == (CCF_N >> 2)); tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N); tcg_gen_shri_i32(tmp, tmp, 2); tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST); tcg_gen_andi_i32(tmp, tmp, CCF_V | CCF_Z); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, l1); break; case 15: /* LE (Z || (N ^ V)) */ tmp = tcg_temp_new(); assert(CCF_V == (CCF_N >> 2)); tcg_gen_andi_i32(tmp, QREG_CC_DEST, CCF_N); tcg_gen_shri_i32(tmp, tmp, 2); tcg_gen_xor_i32(tmp, tmp, QREG_CC_DEST); tcg_gen_andi_i32(tmp, tmp, CCF_V | CCF_Z); tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, l1); break; default: /* Should ever happen. */ abort(); } }"} {"target": 0, "idx": 17157, "func": "static void add_pollfd(AioHandler *node) { if (npfd == nalloc) { if (nalloc == 0) { pollfds_cleanup_notifier.notify = pollfds_cleanup; qemu_thread_atexit_add(&pollfds_cleanup_notifier); nalloc = 8; } else { g_assert(nalloc <= INT_MAX); nalloc *= 2; } pollfds = g_renew(GPollFD, pollfds, nalloc); nodes = g_renew(AioHandler *, nodes, nalloc); } nodes[npfd] = node; pollfds[npfd] = (GPollFD) { .fd = node->pfd.fd, .events = node->pfd.events, }; npfd++; }"} {"target": 0, "idx": 17183, "func": "static void check_loopfilter() { LOCAL_ALIGNED_32(uint8_t, base0, [32 + 16 * 16 * 2]); LOCAL_ALIGNED_32(uint8_t, base1, [32 + 16 * 16 * 2]); VP9DSPContext dsp; int dir, wd, wd2, bit_depth; static const char *const dir_name[2] = { \"h\", \"v\" }; int E[2] = { 20, 28 }, I[2] = { 10, 16 }, H[2] = { 7, 11 }, F[2] = { 1, 1 }; declare_func(void, uint8_t *dst, ptrdiff_t stride, int E, int I, int H); for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) { ff_vp9dsp_init(&dsp, bit_depth, 0); for (dir = 0; dir < 2; dir++) { uint8_t *buf0, *buf1; int midoff = (dir ? 8 * 8 : 8) * SIZEOF_PIXEL; int midoff_aligned = (dir ? 8 * 8 : 16) * SIZEOF_PIXEL; buf0 = base0 + midoff_aligned; buf1 = base1 + midoff_aligned; for (wd = 0; wd < 3; wd++) { // 4/8/16wd_8px if (check_func(dsp.loop_filter_8[wd][dir], \"vp9_loop_filter_%s_%d_8_%dbpp\", dir_name[dir], 4 << wd, bit_depth)) { randomize_buffers(0, 0, 8); memcpy(buf1 - midoff, buf0 - midoff, 16 * 8 * SIZEOF_PIXEL); call_ref(buf0, 16 * SIZEOF_PIXEL >> dir, E[0], I[0], H[0]); call_new(buf1, 16 * SIZEOF_PIXEL >> dir, E[0], I[0], H[0]); if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 8 * SIZEOF_PIXEL)) fail(); bench_new(buf1, 16 * SIZEOF_PIXEL >> dir, E[0], I[0], H[0]); } } midoff = (dir ? 16 * 8 : 8) * SIZEOF_PIXEL; midoff_aligned = (dir ? 16 * 8 : 16) * SIZEOF_PIXEL; // 16wd_16px loopfilter if (check_func(dsp.loop_filter_16[dir], \"vp9_loop_filter_%s_16_16_%dbpp\", dir_name[dir], bit_depth)) { randomize_buffers(0, 0, 16); randomize_buffers(0, 8, 16); memcpy(buf1 - midoff, buf0 - midoff, 16 * 16 * SIZEOF_PIXEL); call_ref(buf0, 16 * SIZEOF_PIXEL, E[0], I[0], H[0]); call_new(buf1, 16 * SIZEOF_PIXEL, E[0], I[0], H[0]); if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 16 * SIZEOF_PIXEL)) fail(); bench_new(buf1, 16 * SIZEOF_PIXEL, E[0], I[0], H[0]); } for (wd = 0; wd < 2; wd++) { for (wd2 = 0; wd2 < 2; wd2++) { // mix2 loopfilter if (check_func(dsp.loop_filter_mix2[wd][wd2][dir], \"vp9_loop_filter_mix2_%s_%d%d_16_%dbpp\", dir_name[dir], 4 << wd, 4 << wd2, bit_depth)) { randomize_buffers(0, 0, 16); randomize_buffers(1, 8, 16); memcpy(buf1 - midoff, buf0 - midoff, 16 * 16 * SIZEOF_PIXEL); #define M(a) ((a[1] << 8) | a[0]) call_ref(buf0, 16 * SIZEOF_PIXEL, M(E), M(I), M(H)); call_new(buf1, 16 * SIZEOF_PIXEL, M(E), M(I), M(H)); if (memcmp(buf0 - midoff, buf1 - midoff, 16 * 16 * SIZEOF_PIXEL)) fail(); bench_new(buf1, 16 * SIZEOF_PIXEL, M(E), M(I), M(H)); #undef M } } } } } report(\"loopfilter\"); }"} {"target": 1, "idx": 17191, "func": "static int url_alloc_for_protocol (URLContext **puc, struct URLProtocol *up, const char *filename, int flags, const AVIOInterruptCB *int_cb) { URLContext *uc; int err; #if CONFIG_NETWORK if (up->flags & URL_PROTOCOL_FLAG_NETWORK && !ff_network_init()) return AVERROR(EIO); #endif if ((flags & AVIO_FLAG_READ) && !up->url_read) { av_log(NULL, AV_LOG_ERROR, \"Impossible to open the '%s' protocol for reading\\n\", up->name); return AVERROR(EIO); if ((flags & AVIO_FLAG_WRITE) && !up->url_write) { av_log(NULL, AV_LOG_ERROR, \"Impossible to open the '%s' protocol for writing\\n\", up->name); return AVERROR(EIO); uc = av_mallocz(sizeof(URLContext) + strlen(filename) + 1); if (!uc) { uc->av_class = &ffurl_context_class; uc->filename = (char *) &uc[1]; strcpy(uc->filename, filename); uc->prot = up; uc->flags = flags; uc->is_streamed = 0; /* default = not streamed */ uc->max_packet_size = 0; /* default: stream file */ if (up->priv_data_size) { uc->priv_data = av_mallocz(up->priv_data_size); if (up->priv_data_class) { int proto_len= strlen(up->name); char *start = strchr(uc->filename, ','); *(const AVClass**)uc->priv_data = up->priv_data_class; av_opt_set_defaults(uc->priv_data); if(!strncmp(up->name, uc->filename, proto_len) && uc->filename + proto_len == start){ int ret= 0; char *p= start; char sep= *++p; char *key, *val; p++; while(ret >= 0 && (key= strchr(p, sep)) && ppriv_data, p, key+1, 0); if (ret == AVERROR_OPTION_NOT_FOUND) av_log(uc, AV_LOG_ERROR, \"Key '%s' not found.\\n\", p); *val= *key= sep; p= val+1; if(ret<0 || p!=key){ av_log(uc, AV_LOG_ERROR, \"Error parsing options string %s\\n\", start); av_freep(&uc->priv_data); av_freep(&uc); err = AVERROR(EINVAL); memmove(start, key+1, strlen(key)); if (int_cb) uc->interrupt_callback = *int_cb; *puc = uc; return 0; fail: *puc = NULL; if (uc) av_freep(&uc->priv_data); av_freep(&uc); #if CONFIG_NETWORK if (up->flags & URL_PROTOCOL_FLAG_NETWORK) ff_network_close(); #endif return err;"} {"target": 1, "idx": 17193, "func": "static int flashsv_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; FlashSVContext *s = avctx->priv_data; int h_blocks, v_blocks, h_part, v_part, i, j; GetBitContext gb; /* no supplementary picture */ if (buf_size == 0) return 0; init_get_bits(&gb, buf, buf_size * 8); /* start to parse the bitstream */ s->block_width = 16* (get_bits(&gb, 4)+1); s->image_width = get_bits(&gb,12); s->block_height= 16* (get_bits(&gb, 4)+1); s->image_height= get_bits(&gb,12); /* calculate amount of blocks and the size of the border blocks */ h_blocks = s->image_width / s->block_width; h_part = s->image_width % s->block_width; v_blocks = s->image_height / s->block_height; v_part = s->image_height % s->block_height; /* the block size could change between frames, make sure the buffer * is large enough, if not, get a larger one */ if(s->block_size < s->block_width*s->block_height) { if (s->tmpblock != NULL) av_free(s->tmpblock); if ((s->tmpblock = av_malloc(3*s->block_width*s->block_height)) == NULL) { av_log(avctx, AV_LOG_ERROR, \"Can't allocate decompression buffer.\\n\"); } } s->block_size = s->block_width*s->block_height; /* init the image size once */ if((avctx->width==0) && (avctx->height==0)){ avctx->width = s->image_width; avctx->height = s->image_height; } /* check for changes of image width and image height */ if ((avctx->width != s->image_width) || (avctx->height != s->image_height)) { av_log(avctx, AV_LOG_ERROR, \"Frame width or height differs from first frames!\\n\"); av_log(avctx, AV_LOG_ERROR, \"fh = %d, fv %d vs ch = %d, cv = %d\\n\",avctx->height, avctx->width,s->image_height,s->image_width); } av_log(avctx, AV_LOG_DEBUG, \"image: %dx%d block: %dx%d num: %dx%d part: %dx%d\\n\", s->image_width, s->image_height, s->block_width, s->block_height, h_blocks, v_blocks, h_part, v_part); s->frame.reference = 1; s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if(avctx->reget_buffer(avctx, &s->frame) < 0){ av_log(avctx, AV_LOG_ERROR, \"reget_buffer() failed\\n\"); } /* loop over all block columns */ for (j = 0; j < v_blocks + (v_part?1:0); j++) { int hp = j*s->block_height; // horiz position in frame int hs = (jblock_height:v_part; // size of block /* loop over all block rows */ for (i = 0; i < h_blocks + (h_part?1:0); i++) { int wp = i*s->block_width; // vert position in frame int ws = (iblock_width:h_part; // size of block /* get the size of the compressed zlib chunk */ int size = get_bits(&gb, 16); if (8 * size > get_bits_left(&gb)) { avctx->release_buffer(avctx, &s->frame); s->frame.data[0] = NULL; } if (size == 0) { /* no change, don't do anything */ } else { /* decompress block */ int ret = inflateReset(&(s->zstream)); if (ret != Z_OK) { av_log(avctx, AV_LOG_ERROR, \"error in decompression (reset) of block %dx%d\\n\", i, j); /* return -1; */ } s->zstream.next_in = buf+(get_bits_count(&gb)/8); s->zstream.avail_in = size; s->zstream.next_out = s->tmpblock; s->zstream.avail_out = s->block_size*3; ret = inflate(&(s->zstream), Z_FINISH); if (ret == Z_DATA_ERROR) { av_log(avctx, AV_LOG_ERROR, \"Zlib resync occurred\\n\"); inflateSync(&(s->zstream)); ret = inflate(&(s->zstream), Z_FINISH); } if ((ret != Z_OK) && (ret != Z_STREAM_END)) { av_log(avctx, AV_LOG_ERROR, \"error in decompression of block %dx%d: %d\\n\", i, j, ret); /* return -1; */ } copy_region(s->tmpblock, s->frame.data[0], s->image_height-(hp+hs+1), wp, hs, ws, s->frame.linesize[0]); skip_bits_long(&gb, 8*size); /* skip the consumed bits */ } } } *data_size = sizeof(AVFrame); *(AVFrame*)data = s->frame; if ((get_bits_count(&gb)/8) != buf_size) av_log(avctx, AV_LOG_ERROR, \"buffer not fully consumed (%d != %d)\\n\", buf_size, (get_bits_count(&gb)/8)); /* report that the buffer was completely consumed */ return buf_size; }"} {"target": 1, "idx": 17195, "func": "static void xenfb_handle_events(struct XenFB *xenfb) { uint32_t prod, cons; struct xenfb_page *page = xenfb->c.page; prod = page->out_prod; if (prod == page->out_cons) return; xen_rmb(); /* ensure we see ring contents up to prod */ for (cons = page->out_cons; cons != prod; cons++) { union xenfb_out_event *event = &XENFB_OUT_RING_REF(page, cons); int x, y, w, h; switch (event->type) { case XENFB_TYPE_UPDATE: if (xenfb->up_count == UP_QUEUE) xenfb->up_fullscreen = 1; if (xenfb->up_fullscreen) break; x = MAX(event->update.x, 0); y = MAX(event->update.y, 0); w = MIN(event->update.width, xenfb->width - x); h = MIN(event->update.height, xenfb->height - y); if (w < 0 || h < 0) { xen_be_printf(&xenfb->c.xendev, 1, \"bogus update ignored\\n\"); break; } if (x != event->update.x || y != event->update.y || w != event->update.width || h != event->update.height) { xen_be_printf(&xenfb->c.xendev, 1, \"bogus update clipped\\n\"); } if (w == xenfb->width && h > xenfb->height / 2) { /* scroll detector: updated more than 50% of the lines, * don't bother keeping track of the rectangles then */ xenfb->up_fullscreen = 1; } else { xenfb->up_rects[xenfb->up_count].x = x; xenfb->up_rects[xenfb->up_count].y = y; xenfb->up_rects[xenfb->up_count].w = w; xenfb->up_rects[xenfb->up_count].h = h; xenfb->up_count++; } break; #ifdef XENFB_TYPE_RESIZE case XENFB_TYPE_RESIZE: if (xenfb_configure_fb(xenfb, xenfb->fb_len, event->resize.width, event->resize.height, event->resize.depth, xenfb->fb_len, event->resize.offset, event->resize.stride) < 0) break; xenfb_invalidate(xenfb); break; #endif } } xen_mb(); /* ensure we're done with ring contents */ page->out_cons = cons; }"} {"target": 1, "idx": 17224, "func": "static int find_video_stream_info(AVFormatContext *fmt_ctx, int decode) { int ret = 0; int i, done = 0; AVPacket pkt; av_init_packet(&pkt); while (!done) { AVCodecContext *codec_ctx = NULL; AVStream *st; if ((ret = av_read_frame(fmt_ctx, &pkt)) < 0) { av_log(fmt_ctx, AV_LOG_ERROR, \"Failed to read frame\\n\"); goto end; st = fmt_ctx->streams[pkt.stream_index]; codec_ctx = st->codec; /* Writing to AVStream.codec_info_nb_frames must not be done by * user applications. It is done here for testing purposing as * find_video_stream_info tries to mimic avformat_find_stream_info * which writes to this field. * */ if (codec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || st->codec_info_nb_frames++ > 0) { av_packet_unref(&pkt); continue; ret = try_decode_video_frame(codec_ctx, &pkt, decode); if (ret < 0) { av_log(fmt_ctx, AV_LOG_ERROR, \"Failed to decode video frame\\n\"); goto end; av_packet_unref(&pkt); /* check if all video streams have demuxed a packet */ done = 1; st = fmt_ctx->streams[i]; codec_ctx = st->codec; if (codec_ctx->codec_type != AVMEDIA_TYPE_VIDEO) continue; done &= st->codec_info_nb_frames > 0; end: av_packet_unref(&pkt); return ret < 0;"} {"target": 1, "idx": 17227, "func": "static int vdpau_mpeg4_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size) { Mpeg4DecContext *ctx = avctx->priv_data; MpegEncContext * const s = &ctx->m; Picture *pic = s->current_picture_ptr; struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private; VdpPictureInfoMPEG4Part2 *info = &pic_ctx->info.mpeg4; VdpVideoSurface ref; int i; /* fill VdpPictureInfoMPEG4Part2 struct */ info->forward_reference = VDP_INVALID_HANDLE; info->backward_reference = VDP_INVALID_HANDLE; info->vop_coding_type = 0; switch (s->pict_type) { case AV_PICTURE_TYPE_B: ref = ff_vdpau_get_surface_id(&s->next_picture.f); assert(ref != VDP_INVALID_HANDLE); info->backward_reference = ref; info->vop_coding_type = 2; /* fall-through */ case AV_PICTURE_TYPE_P: ref = ff_vdpau_get_surface_id(&s->last_picture.f); assert(ref != VDP_INVALID_HANDLE); info->forward_reference = ref; } info->trd[0] = s->pp_time; info->trb[0] = s->pb_time; info->trd[1] = s->pp_field_time >> 1; info->trb[1] = s->pb_field_time >> 1; info->vop_time_increment_resolution = s->avctx->time_base.den; info->vop_fcode_forward = s->f_code; info->vop_fcode_backward = s->b_code; info->resync_marker_disable = !ctx->resync_marker; info->interlaced = !s->progressive_sequence; info->quant_type = s->mpeg_quant; info->quarter_sample = s->quarter_sample; info->short_video_header = avctx->codec->id == AV_CODEC_ID_H263; info->rounding_control = s->no_rounding; info->alternate_vertical_scan_flag = s->alternate_scan; info->top_field_first = s->top_field_first; for (i = 0; i < 64; ++i) { info->intra_quantizer_matrix[i] = s->intra_matrix[i]; info->non_intra_quantizer_matrix[i] = s->inter_matrix[i]; } ff_vdpau_common_start_frame(pic_ctx, buffer, size); return ff_vdpau_add_buffer(pic_ctx, buffer, size); }"} {"target": 0, "idx": 17245, "func": "static void calculate_geometry(int64_t total_sectors, uint16_t* cyls, uint8_t* heads, uint8_t* secs_per_cyl) { uint32_t cyls_times_heads; if (total_sectors > 65535 * 16 * 255) total_sectors = 65535 * 16 * 255; if (total_sectors > 65535 * 16 * 63) { *secs_per_cyl = 255; *heads = 16; cyls_times_heads = total_sectors / *secs_per_cyl; } else { *secs_per_cyl = 17; cyls_times_heads = total_sectors / *secs_per_cyl; *heads = (cyls_times_heads + 1023) / 1024; if (*heads < 4) *heads = 4; if (cyls_times_heads >= (*heads * 1024) || *heads > 16) { *secs_per_cyl = 31; *heads = 16; cyls_times_heads = total_sectors / *secs_per_cyl; } if (cyls_times_heads >= (*heads * 1024)) { *secs_per_cyl = 63; *heads = 16; cyls_times_heads = total_sectors / *secs_per_cyl; } } // Note: Rounding up deviates from the Virtual PC behaviour // However, we need this to avoid truncating images in qemu-img convert *cyls = (cyls_times_heads + *heads - 1) / *heads; }"} {"target": 0, "idx": 17248, "func": "static void h264_v_loop_filter_chroma_c(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) { h264_loop_filter_chroma_c(pix, stride, 1, alpha, beta, tc0); }"} {"target": 0, "idx": 17249, "func": "static int within_hwc_y_range(SM501State *state, int y, int crt) { int hwc_y = get_hwc_y(state, crt); return (hwc_y <= y && y < hwc_y + SM501_HWC_HEIGHT); }"} {"target": 0, "idx": 17251, "func": "static inline void gen_evmwumi(DisasContext *ctx) { TCGv_i64 t0, t1; if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_APU); return; } t0 = tcg_temp_new_i64(); t1 = tcg_temp_new_i64(); /* t0 := rA; t1 := rB */ #if defined(TARGET_PPC64) tcg_gen_ext32u_tl(t0, cpu_gpr[rA(ctx->opcode)]); tcg_gen_ext32u_tl(t1, cpu_gpr[rB(ctx->opcode)]); #else tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]); tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]); #endif tcg_gen_mul_i64(t0, t0, t1); /* t0 := rA * rB */ gen_store_gpr64(rD(ctx->opcode), t0); /* rD := t0 */ tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); }"} {"target": 0, "idx": 17259, "func": "static int nvdec_vp9_end_frame(AVCodecContext *avctx) { NVDECContext *ctx = avctx->internal->hwaccel_priv_data; int ret = ff_nvdec_end_frame(avctx); ctx->bitstream = NULL; return ret; }"} {"target": 0, "idx": 17289, "func": "SwsVector *sws_allocVec(int length) { SwsVector *vec = av_malloc(sizeof(SwsVector)); if (!vec) return NULL; vec->length = length; vec->coeff = av_malloc(sizeof(double) * length); if (!vec->coeff) av_freep(&vec); return vec; }"} {"target": 0, "idx": 17299, "func": "static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp, uint64_t off, uint32_t max_count) { ssize_t err; size_t offset = 7; int read_count; int64_t xattr_len; V9fsVirtioState *v = container_of(s, V9fsVirtioState, state); VirtQueueElement *elem = &v->elems[pdu->idx]; xattr_len = fidp->fs.xattr.len; read_count = xattr_len - off; if (read_count > max_count) { read_count = max_count; } else if (read_count < 0) { /* * read beyond XATTR value */ read_count = 0; } err = pdu_marshal(pdu, offset, \"d\", read_count); if (err < 0) { return err; } offset += err; err = v9fs_pack(elem->in_sg, elem->in_num, offset, ((char *)fidp->fs.xattr.value) + off, read_count); if (err < 0) { return err; } offset += err; return offset; }"} {"target": 0, "idx": 17305, "func": "static void virtio_blk_dma_restart_cb(void *opaque, int running, int reason) { VirtIOBlock *s = opaque; VirtIOBlockReq *req = s->rq; if (!running) return; s->rq = NULL; while (req) { virtio_blk_handle_write(req); req = req->next; } }"} {"target": 0, "idx": 17323, "func": "static void pic_ioport_write(void *opaque, target_phys_addr_t addr64, uint64_t val64, unsigned size) { PICCommonState *s = opaque; uint32_t addr = addr64; uint32_t val = val64; int priority, cmd, irq; DPRINTF(\"write: addr=0x%02x val=0x%02x\\n\", addr, val); if (addr == 0) { if (val & 0x10) { pic_init_reset(s); s->init_state = 1; s->init4 = val & 1; s->single_mode = val & 2; if (val & 0x08) { hw_error(\"level sensitive irq not supported\"); } } else if (val & 0x08) { if (val & 0x04) { s->poll = 1; } if (val & 0x02) { s->read_reg_select = val & 1; } if (val & 0x40) { s->special_mask = (val >> 5) & 1; } } else { cmd = val >> 5; switch (cmd) { case 0: case 4: s->rotate_on_auto_eoi = cmd >> 2; break; case 1: /* end of interrupt */ case 5: priority = get_priority(s, s->isr); if (priority != 8) { irq = (priority + s->priority_add) & 7; s->isr &= ~(1 << irq); if (cmd == 5) { s->priority_add = (irq + 1) & 7; } pic_update_irq(s); } break; case 3: irq = val & 7; s->isr &= ~(1 << irq); pic_update_irq(s); break; case 6: s->priority_add = (val + 1) & 7; pic_update_irq(s); break; case 7: irq = val & 7; s->isr &= ~(1 << irq); s->priority_add = (irq + 1) & 7; pic_update_irq(s); break; default: /* no operation */ break; } } } else { switch (s->init_state) { case 0: /* normal mode */ s->imr = val; pic_update_irq(s); break; case 1: s->irq_base = val & 0xf8; s->init_state = s->single_mode ? (s->init4 ? 3 : 0) : 2; break; case 2: if (s->init4) { s->init_state = 3; } else { s->init_state = 0; } break; case 3: s->special_fully_nested_mode = (val >> 4) & 1; s->auto_eoi = (val >> 1) & 1; s->init_state = 0; break; } } }"} {"target": 0, "idx": 17336, "func": "void ff_af_queue_log_state(AudioFrameQueue *afq) { AudioFrame *f; av_log(afq->avctx, AV_LOG_DEBUG, \"remaining delay = %d\\n\", afq->remaining_delay); av_log(afq->avctx, AV_LOG_DEBUG, \"remaining samples = %d\\n\", afq->remaining_samples); av_log(afq->avctx, AV_LOG_DEBUG, \"frames:\\n\"); f = afq->frame_queue; while (f) { av_log(afq->avctx, AV_LOG_DEBUG, \" [ pts=%9\"PRId64\" duration=%d ]\\n\", f->pts, f->duration); f = f->next; } }"} {"target": 1, "idx": 17338, "func": "static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, int cmd, abi_long arg) { void *argptr; struct dm_ioctl *host_dm; abi_long guest_data; uint32_t guest_data_size; int target_size; const argtype *arg_type = ie->arg_type; abi_long ret; void *big_buf = NULL; char *host_data; arg_type++; target_size = thunk_type_size(arg_type, 0); argptr = lock_user(VERIFY_READ, arg, target_size, 1); thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST); unlock_user(argptr, arg, 0); /* buf_temp is too small, so fetch things into a bigger buffer */ big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2); memcpy(big_buf, buf_temp, target_size); buf_temp = big_buf; host_dm = big_buf; guest_data = arg + host_dm->data_start; if ((guest_data - arg) < 0) { ret = -EINVAL; guest_data_size = host_dm->data_size - host_dm->data_start; host_data = (char*)host_dm + host_dm->data_start; argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1); switch (ie->host_cmd) { case DM_REMOVE_ALL: case DM_LIST_DEVICES: case DM_DEV_CREATE: case DM_DEV_REMOVE: case DM_DEV_SUSPEND: case DM_DEV_STATUS: case DM_DEV_WAIT: case DM_TABLE_STATUS: case DM_TABLE_CLEAR: case DM_TABLE_DEPS: case DM_LIST_VERSIONS: /* no input data */ break; case DM_DEV_RENAME: case DM_DEV_SET_GEOMETRY: /* data contains only strings */ memcpy(host_data, argptr, guest_data_size); break; case DM_TARGET_MSG: memcpy(host_data, argptr, guest_data_size); *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr); break; case DM_TABLE_LOAD: { void *gspec = argptr; void *cur_data = host_data; const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; int spec_size = thunk_type_size(arg_type, 0); int i; for (i = 0; i < host_dm->target_count; i++) { struct dm_target_spec *spec = cur_data; uint32_t next; int slen; thunk_convert(spec, gspec, arg_type, THUNK_HOST); slen = strlen((char*)gspec + spec_size) + 1; next = spec->next; spec->next = sizeof(*spec) + slen; strcpy((char*)&spec[1], gspec + spec_size); gspec += next; cur_data += spec->next; break; default: ret = -TARGET_EINVAL; unlock_user(argptr, guest_data, 0); unlock_user(argptr, guest_data, 0); ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); if (!is_error(ret)) { guest_data = arg + host_dm->data_start; guest_data_size = host_dm->data_size - host_dm->data_start; argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0); switch (ie->host_cmd) { case DM_REMOVE_ALL: case DM_DEV_CREATE: case DM_DEV_REMOVE: case DM_DEV_RENAME: case DM_DEV_SUSPEND: case DM_DEV_STATUS: case DM_TABLE_LOAD: case DM_TABLE_CLEAR: case DM_TARGET_MSG: case DM_DEV_SET_GEOMETRY: /* no return data */ break; case DM_LIST_DEVICES: { struct dm_name_list *nl = (void*)host_dm + host_dm->data_start; uint32_t remaining_data = guest_data_size; void *cur_data = argptr; const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) }; int nl_size = 12; /* can't use thunk_size due to alignment */ while (1) { uint32_t next = nl->next; if (next) { nl->next = nl_size + (strlen(nl->name) + 1); if (remaining_data < nl->next) { host_dm->flags |= DM_BUFFER_FULL_FLAG; break; thunk_convert(cur_data, nl, arg_type, THUNK_TARGET); strcpy(cur_data + nl_size, nl->name); cur_data += nl->next; remaining_data -= nl->next; if (!next) { break; nl = (void*)nl + next; break; case DM_DEV_WAIT: case DM_TABLE_STATUS: { struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start; void *cur_data = argptr; const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) }; int spec_size = thunk_type_size(arg_type, 0); int i; for (i = 0; i < host_dm->target_count; i++) { uint32_t next = spec->next; int slen = strlen((char*)&spec[1]) + 1; spec->next = (cur_data - argptr) + spec_size + slen; if (guest_data_size < spec->next) { host_dm->flags |= DM_BUFFER_FULL_FLAG; break; thunk_convert(cur_data, spec, arg_type, THUNK_TARGET); strcpy(cur_data + spec_size, (char*)&spec[1]); cur_data = argptr + spec->next; spec = (void*)host_dm + host_dm->data_start + next; break; case DM_TABLE_DEPS: { void *hdata = (void*)host_dm + host_dm->data_start; int count = *(uint32_t*)hdata; uint64_t *hdev = hdata + 8; uint64_t *gdev = argptr + 8; int i; *(uint32_t*)argptr = tswap32(count); for (i = 0; i < count; i++) { *gdev = tswap64(*hdev); gdev++; hdev++; break; case DM_LIST_VERSIONS: { struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start; uint32_t remaining_data = guest_data_size; void *cur_data = argptr; const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) }; int vers_size = thunk_type_size(arg_type, 0); while (1) { uint32_t next = vers->next; if (next) { vers->next = vers_size + (strlen(vers->name) + 1); if (remaining_data < vers->next) { host_dm->flags |= DM_BUFFER_FULL_FLAG; break; thunk_convert(cur_data, vers, arg_type, THUNK_TARGET); strcpy(cur_data + vers_size, vers->name); cur_data += vers->next; remaining_data -= vers->next; if (!next) { break; vers = (void*)vers + next; break; default: unlock_user(argptr, guest_data, 0); ret = -TARGET_EINVAL; unlock_user(argptr, guest_data, guest_data_size); argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET); unlock_user(argptr, arg, target_size); out: g_free(big_buf); return ret;"} {"target": 1, "idx": 17355, "func": "static void arm1026_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); cpu->dtb_compatible = \"arm,arm1026\"; set_feature(&cpu->env, ARM_FEATURE_V5); set_feature(&cpu->env, ARM_FEATURE_VFP); set_feature(&cpu->env, ARM_FEATURE_AUXCR); set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN); cpu->midr = 0x4106a262; cpu->reset_fpsid = 0x410110a0; cpu->ctr = 0x1dd20d2; cpu->reset_sctlr = 0x00090078; cpu->reset_auxcr = 1; { /* The 1026 had an IFAR at c6,c0,0,1 rather than the ARMv6 c6,c0,0,2 */ ARMCPRegInfo ifar = { .name = \"IFAR\", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.ifar_ns), .resetvalue = 0 }; define_one_arm_cp_reg(cpu, &ifar); } }"} {"target": 0, "idx": 17358, "func": "static void opt_frame_pad_bottom(const char *arg) { frame_padbottom = atoi(arg); if (frame_padbottom < 0) { fprintf(stderr, \"Incorrect bottom pad size\\n\"); av_exit(1); } }"} {"target": 1, "idx": 17367, "func": "static void acpi_memory_hotplug_write(void *opaque, hwaddr addr, uint64_t data, unsigned int size) { MemHotplugState *mem_st = opaque; MemStatus *mdev; ACPIOSTInfo *info; DeviceState *dev = NULL; HotplugHandler *hotplug_ctrl = NULL; Error *local_err = NULL; if (!mem_st->dev_count) { return; } if (addr) { if (mem_st->selector >= mem_st->dev_count) { trace_mhp_acpi_invalid_slot_selected(mem_st->selector); return; } } switch (addr) { case 0x0: /* DIMM slot selector */ mem_st->selector = data; trace_mhp_acpi_write_slot(mem_st->selector); break; case 0x4: /* _OST event */ mdev = &mem_st->devs[mem_st->selector]; if (data == 1) { /* TODO: handle device insert OST event */ } else if (data == 3) { /* TODO: handle device remove OST event */ } mdev->ost_event = data; trace_mhp_acpi_write_ost_ev(mem_st->selector, mdev->ost_event); break; case 0x8: /* _OST status */ mdev = &mem_st->devs[mem_st->selector]; mdev->ost_status = data; trace_mhp_acpi_write_ost_status(mem_st->selector, mdev->ost_status); /* TODO: implement memory removal on guest signal */ info = acpi_memory_device_status(mem_st->selector, mdev); qapi_event_send_acpi_device_ost(info, &error_abort); qapi_free_ACPIOSTInfo(info); break; case 0x14: /* set is_* fields */ mdev = &mem_st->devs[mem_st->selector]; if (data & 2) { /* clear insert event */ mdev->is_inserting = false; trace_mhp_acpi_clear_insert_evt(mem_st->selector); } else if (data & 4) { mdev->is_removing = false; trace_mhp_acpi_clear_remove_evt(mem_st->selector); } else if (data & 8) { if (!mdev->is_enabled) { trace_mhp_acpi_ejecting_invalid_slot(mem_st->selector); break; } dev = DEVICE(mdev->dimm); hotplug_ctrl = qdev_get_hotplug_handler(dev); /* call pc-dimm unplug cb */ hotplug_handler_unplug(hotplug_ctrl, dev, &local_err); if (local_err) { trace_mhp_acpi_pc_dimm_delete_failed(mem_st->selector); qapi_event_send_mem_unplug_error(dev->id, error_get_pretty(local_err), &error_abort); break; } trace_mhp_acpi_pc_dimm_deleted(mem_st->selector); } break; default: break; } }"} {"target": 1, "idx": 17375, "func": "long do_rt_sigreturn(CPUState *env) { fprintf(stderr, \"do_rt_sigreturn: not implemented\\n\"); return -TARGET_ENOSYS; }"} {"target": 1, "idx": 17376, "func": "void vga_hw_screen_dump(const char *filename) { TextConsole *previous_active_console; previous_active_console = active_console; active_console = consoles[0]; /* There is currently no way of specifying which screen we want to dump, so always dump the first one. */ if (consoles[0] && consoles[0]->hw_screen_dump) consoles[0]->hw_screen_dump(consoles[0]->hw, filename); active_console = previous_active_console; }"} {"target": 1, "idx": 17378, "func": "static int net_socket_udp_init(NetClientState *peer, const char *model, const char *name, const char *rhost, const char *lhost) { NetSocketState *s; int fd, ret; struct sockaddr_in laddr, raddr; if (parse_host_port(&laddr, lhost) < 0) { return -1; } if (parse_host_port(&raddr, rhost) < 0) { return -1; } fd = qemu_socket(PF_INET, SOCK_DGRAM, 0); if (fd < 0) { perror(\"socket(PF_INET, SOCK_DGRAM)\"); return -1; } ret = socket_set_fast_reuse(fd); if (ret < 0) { closesocket(fd); return -1; } ret = bind(fd, (struct sockaddr *)&laddr, sizeof(laddr)); if (ret < 0) { perror(\"bind\"); closesocket(fd); return -1; } qemu_set_nonblock(fd); s = net_socket_fd_init(peer, model, name, fd, 0); if (!s) { return -1; } s->dgram_dst = raddr; snprintf(s->nc.info_str, sizeof(s->nc.info_str), \"socket: udp=%s:%d\", inet_ntoa(raddr.sin_addr), ntohs(raddr.sin_port)); return 0; }"} {"target": 1, "idx": 17386, "func": "static void audio_init (PCIBus *pci_bus) { struct soundhw *c; int audio_enabled = 0; for (c = soundhw; !audio_enabled && c->name; ++c) { audio_enabled = c->enabled; } if (audio_enabled) { AudioState *s; s = AUD_init (); if (s) { for (c = soundhw; c->name; ++c) { if (c->enabled) c->init.init_pci (pci_bus, s); } } } }"} {"target": 1, "idx": 17388, "func": "static int query_codec(enum CodecID id, int std_compliance) { CodecMime *cm= ff_id3v2_mime_tags; while(cm->id != CODEC_ID_NONE) { if(id == cm->id) return MKTAG('A', 'P', 'I', 'C'); cm++; } return -1; }"} {"target": 0, "idx": 17398, "func": "DECLARE_LOOP_FILTER(mmxext) DECLARE_LOOP_FILTER(sse2) DECLARE_LOOP_FILTER(ssse3) DECLARE_LOOP_FILTER(sse4) #endif #define VP8_LUMA_MC_FUNC(IDX, SIZE, OPT) \\ c->put_vp8_epel_pixels_tab[IDX][0][2] = ff_put_vp8_epel ## SIZE ## _h6_ ## OPT; \\ c->put_vp8_epel_pixels_tab[IDX][2][0] = ff_put_vp8_epel ## SIZE ## _v6_ ## OPT; \\ c->put_vp8_epel_pixels_tab[IDX][2][2] = ff_put_vp8_epel ## SIZE ## _h6v6_ ## OPT #define VP8_MC_FUNC(IDX, SIZE, OPT) \\ c->put_vp8_epel_pixels_tab[IDX][0][1] = ff_put_vp8_epel ## SIZE ## _h4_ ## OPT; \\ c->put_vp8_epel_pixels_tab[IDX][1][0] = ff_put_vp8_epel ## SIZE ## _v4_ ## OPT; \\ c->put_vp8_epel_pixels_tab[IDX][1][1] = ff_put_vp8_epel ## SIZE ## _h4v4_ ## OPT; \\ c->put_vp8_epel_pixels_tab[IDX][1][2] = ff_put_vp8_epel ## SIZE ## _h6v4_ ## OPT; \\ c->put_vp8_epel_pixels_tab[IDX][2][1] = ff_put_vp8_epel ## SIZE ## _h4v6_ ## OPT; \\ VP8_LUMA_MC_FUNC(IDX, SIZE, OPT) #define VP8_BILINEAR_MC_FUNC(IDX, SIZE, OPT) \\ c->put_vp8_bilinear_pixels_tab[IDX][0][1] = ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT; \\ c->put_vp8_bilinear_pixels_tab[IDX][0][2] = ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT; \\ c->put_vp8_bilinear_pixels_tab[IDX][1][0] = ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT; \\ c->put_vp8_bilinear_pixels_tab[IDX][1][1] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \\ c->put_vp8_bilinear_pixels_tab[IDX][1][2] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \\ c->put_vp8_bilinear_pixels_tab[IDX][2][0] = ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT; \\ c->put_vp8_bilinear_pixels_tab[IDX][2][1] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \\ c->put_vp8_bilinear_pixels_tab[IDX][2][2] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c) { mm_flags = mm_support(); #if HAVE_YASM if (mm_flags & FF_MM_MMX) { c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx; c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_mmx; c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_mmx; c->vp8_idct_add = ff_vp8_idct_add_mmx; c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_mmx; c->put_vp8_epel_pixels_tab[0][0][0] = c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_mmx; c->put_vp8_epel_pixels_tab[1][0][0] = c->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_mmx; c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmx; c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmx; c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmx; c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmx; c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmx; c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmx; c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmx; c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmx; c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmx; c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmx; } /* note that 4-tap width=16 functions are missing because w=16 * is only used for luma, and luma is always a copy or sixtap. */ if (mm_flags & FF_MM_MMX2) { VP8_LUMA_MC_FUNC(0, 16, mmxext); VP8_MC_FUNC(1, 8, mmxext); VP8_MC_FUNC(2, 4, mmxext); VP8_BILINEAR_MC_FUNC(0, 16, mmxext); VP8_BILINEAR_MC_FUNC(1, 8, mmxext); VP8_BILINEAR_MC_FUNC(2, 4, mmxext); c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmxext; c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmxext; c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmxext; c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmxext; c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmxext; c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmxext; c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmxext; c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmxext; c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmxext; c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmxext; } if (mm_flags & FF_MM_SSE) { c->vp8_idct_add = ff_vp8_idct_add_sse; c->put_vp8_epel_pixels_tab[0][0][0] = c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_sse; } if (mm_flags & (FF_MM_SSE2|FF_MM_SSE2SLOW)) { VP8_LUMA_MC_FUNC(0, 16, sse2); VP8_MC_FUNC(1, 8, sse2); VP8_BILINEAR_MC_FUNC(0, 16, sse2); VP8_BILINEAR_MC_FUNC(1, 8, sse2); c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_sse2; c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse2; c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_sse2; c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_sse2; c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_sse2; c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_sse2; } if (mm_flags & FF_MM_SSE2) { c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_sse2; c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_sse2; c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_sse2; c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_sse2; c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse2; } if (mm_flags & FF_MM_SSSE3) { VP8_LUMA_MC_FUNC(0, 16, ssse3); VP8_MC_FUNC(1, 8, ssse3); VP8_MC_FUNC(2, 4, ssse3); VP8_BILINEAR_MC_FUNC(0, 16, ssse3); VP8_BILINEAR_MC_FUNC(1, 8, ssse3); VP8_BILINEAR_MC_FUNC(2, 4, ssse3); c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_ssse3; c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_ssse3; c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_ssse3; c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_ssse3; c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_ssse3; c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_ssse3; c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_ssse3; c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_ssse3; c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_ssse3; c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_ssse3; } if (mm_flags & FF_MM_SSE4) { c->vp8_idct_dc_add = ff_vp8_idct_dc_add_sse4; c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_sse4; c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse4; } #endif }"} {"target": 1, "idx": 17402, "func": "static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, MMUAccessType access_type, ARMMMUIdx mmu_idx, hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, target_ulong *page_size_ptr, ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) { ARMCPU *cpu = arm_env_get_cpu(env); CPUState *cs = CPU(cpu); /* Read an LPAE long-descriptor translation table. */ ARMFaultType fault_type = ARMFault_Translation; uint32_t level; uint32_t epd = 0; int32_t t0sz, t1sz; uint32_t tg; uint64_t ttbr; int ttbr_select; hwaddr descaddr, indexmask, indexmask_grainsize; uint32_t tableattrs; target_ulong page_size; uint32_t attrs; int32_t stride = 9; int32_t addrsize; int inputsize; int32_t tbi = 0; TCR *tcr = regime_tcr(env, mmu_idx); int ap, ns, xn, pxn; uint32_t el = regime_el(env, mmu_idx); bool ttbr1_valid = true; uint64_t descaddrmask; bool aarch64 = arm_el_is_aa64(env, el); /* TODO: * This code does not handle the different format TCR for VTCR_EL2. * This code also does not support shareability levels. * Attribute and permission bit handling should also be checked when adding * support for those page table walks. */ if (aarch64) { level = 0; addrsize = 64; if (el > 1) { if (mmu_idx != ARMMMUIdx_S2NS) { tbi = extract64(tcr->raw_tcr, 20, 1); } } else { if (extract64(address, 55, 1)) { tbi = extract64(tcr->raw_tcr, 38, 1); } else { tbi = extract64(tcr->raw_tcr, 37, 1); } } tbi *= 8; /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it * invalid. */ if (el > 1) { ttbr1_valid = false; } } else { level = 1; addrsize = 32; /* There is no TTBR1 for EL2 */ if (el == 2) { ttbr1_valid = false; } } /* Determine whether this address is in the region controlled by * TTBR0 or TTBR1 (or if it is in neither region and should fault). * This is a Non-secure PL0/1 stage 1 translation, so controlled by * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32: */ if (aarch64) { /* AArch64 translation. */ t0sz = extract32(tcr->raw_tcr, 0, 6); t0sz = MIN(t0sz, 39); t0sz = MAX(t0sz, 16); } else if (mmu_idx != ARMMMUIdx_S2NS) { /* AArch32 stage 1 translation. */ t0sz = extract32(tcr->raw_tcr, 0, 3); } else { /* AArch32 stage 2 translation. */ bool sext = extract32(tcr->raw_tcr, 4, 1); bool sign = extract32(tcr->raw_tcr, 3, 1); /* Address size is 40-bit for a stage 2 translation, * and t0sz can be negative (from -8 to 7), * so we need to adjust it to use the TTBR selecting logic below. */ addrsize = 40; t0sz = sextract32(tcr->raw_tcr, 0, 4) + 8; /* If the sign-extend bit is not the same as t0sz[3], the result * is unpredictable. Flag this as a guest error. */ if (sign != sext) { qemu_log_mask(LOG_GUEST_ERROR, \"AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\\n\"); } } t1sz = extract32(tcr->raw_tcr, 16, 6); if (aarch64) { t1sz = MIN(t1sz, 39); t1sz = MAX(t1sz, 16); } if (t0sz && !extract64(address, addrsize - t0sz, t0sz - tbi)) { /* there is a ttbr0 region and we are in it (high bits all zero) */ ttbr_select = 0; } else if (ttbr1_valid && t1sz && !extract64(~address, addrsize - t1sz, t1sz - tbi)) { /* there is a ttbr1 region and we are in it (high bits all one) */ ttbr_select = 1; } else if (!t0sz) { /* ttbr0 region is \"everything not in the ttbr1 region\" */ ttbr_select = 0; } else if (!t1sz && ttbr1_valid) { /* ttbr1 region is \"everything not in the ttbr0 region\" */ ttbr_select = 1; } else { /* in the gap between the two regions, this is a Translation fault */ fault_type = ARMFault_Translation; goto do_fault; } /* Note that QEMU ignores shareability and cacheability attributes, * so we don't need to do anything with the SH, ORGN, IRGN fields * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently * implement any ASID-like capability so we can ignore it (instead * we will always flush the TLB any time the ASID is changed). */ if (ttbr_select == 0) { ttbr = regime_ttbr(env, mmu_idx, 0); if (el < 2) { epd = extract32(tcr->raw_tcr, 7, 1); } inputsize = addrsize - t0sz; tg = extract32(tcr->raw_tcr, 14, 2); if (tg == 1) { /* 64KB pages */ stride = 13; } if (tg == 2) { /* 16KB pages */ stride = 11; } } else { /* We should only be here if TTBR1 is valid */ assert(ttbr1_valid); ttbr = regime_ttbr(env, mmu_idx, 1); epd = extract32(tcr->raw_tcr, 23, 1); inputsize = addrsize - t1sz; tg = extract32(tcr->raw_tcr, 30, 2); if (tg == 3) { /* 64KB pages */ stride = 13; } if (tg == 1) { /* 16KB pages */ stride = 11; } } /* Here we should have set up all the parameters for the translation: * inputsize, ttbr, epd, stride, tbi */ if (epd) { /* Translation table walk disabled => Translation fault on TLB miss * Note: This is always 0 on 64-bit EL2 and EL3. */ goto do_fault; } if (mmu_idx != ARMMMUIdx_S2NS) { /* The starting level depends on the virtual address size (which can * be up to 48 bits) and the translation granule size. It indicates * the number of strides (stride bits at a time) needed to * consume the bits of the input address. In the pseudocode this is: * level = 4 - RoundUp((inputsize - grainsize) / stride) * where their 'inputsize' is our 'inputsize', 'grainsize' is * our 'stride + 3' and 'stride' is our 'stride'. * Applying the usual \"rounded up m/n is (m+n-1)/n\" and simplifying: * = 4 - (inputsize - stride - 3 + stride - 1) / stride * = 4 - (inputsize - 4) / stride; */ level = 4 - (inputsize - 4) / stride; } else { /* For stage 2 translations the starting level is specified by the * VTCR_EL2.SL0 field (whose interpretation depends on the page size) */ uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2); uint32_t startlevel; bool ok; if (!aarch64 || stride == 9) { /* AArch32 or 4KB pages */ startlevel = 2 - sl0; } else { /* 16KB or 64KB pages */ startlevel = 3 - sl0; } /* Check that the starting level is valid. */ ok = check_s2_mmu_setup(cpu, aarch64, startlevel, inputsize, stride); if (!ok) { fault_type = ARMFault_Translation; goto do_fault; } level = startlevel; } indexmask_grainsize = (1ULL << (stride + 3)) - 1; indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1; /* Now we can extract the actual base address from the TTBR */ descaddr = extract64(ttbr, 0, 48); descaddr &= ~indexmask; /* The address field in the descriptor goes up to bit 39 for ARMv7 * but up to bit 47 for ARMv8, but we use the descaddrmask * up to bit 39 for AArch32, because we don't need other bits in that case * to construct next descriptor address (anyway they should be all zeroes). */ descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) & ~indexmask_grainsize; /* Secure accesses start with the page table in secure memory and * can be downgraded to non-secure at any step. Non-secure accesses * remain non-secure. We implement this by just ORing in the NSTable/NS * bits at each step. */ tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4); for (;;) { uint64_t descriptor; bool nstable; descaddr |= (address >> (stride * (4 - level))) & indexmask; descaddr &= ~7ULL; nstable = extract32(tableattrs, 4, 1); descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi); if (fi->s1ptw) { goto do_fault; } if (!(descriptor & 1) || (!(descriptor & 2) && (level == 3))) { /* Invalid, or the Reserved level 3 encoding */ goto do_fault; } descaddr = descriptor & descaddrmask; if ((descriptor & 2) && (level < 3)) { /* Table entry. The top five bits are attributes which may * propagate down through lower levels of the table (and * which are all arranged so that 0 means \"no effect\", so * we can gather them up by ORing in the bits at each level). */ tableattrs |= extract64(descriptor, 59, 5); level++; indexmask = indexmask_grainsize; continue; } /* Block entry at level 1 or 2, or page entry at level 3. * These are basically the same thing, although the number * of bits we pull in from the vaddr varies. */ page_size = (1ULL << ((stride * (4 - level)) + 3)); descaddr |= (address & (page_size - 1)); /* Extract attributes from the descriptor */ attrs = extract64(descriptor, 2, 10) | (extract64(descriptor, 52, 12) << 10); if (mmu_idx == ARMMMUIdx_S2NS) { /* Stage 2 table descriptors do not include any attribute fields */ break; } /* Merge in attributes from table descriptors */ attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */ attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */ /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 * means \"force PL1 access only\", which means forcing AP[1] to 0. */ if (extract32(tableattrs, 2, 1)) { attrs &= ~(1 << 4); } attrs |= nstable << 3; /* NS */ break; } /* Here descaddr is the final physical address, and attributes * are all in attrs. */ fault_type = ARMFault_AccessFlag; if ((attrs & (1 << 8)) == 0) { /* Access flag */ goto do_fault; } ap = extract32(attrs, 4, 2); xn = extract32(attrs, 12, 1); if (mmu_idx == ARMMMUIdx_S2NS) { ns = true; *prot = get_S2prot(env, ap, xn); } else { ns = extract32(attrs, 3, 1); pxn = extract32(attrs, 11, 1); *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn); } fault_type = ARMFault_Permission; if (!(*prot & (1 << access_type))) { goto do_fault; } if (ns) { /* The NS bit will (as required by the architecture) have no effect if * the CPU doesn't support TZ or this is a non-secure translation * regime, because the attribute will already be non-secure. */ txattrs->secure = false; } if (cacheattrs != NULL) { if (mmu_idx == ARMMMUIdx_S2NS) { cacheattrs->attrs = convert_stage2_attrs(env, extract32(attrs, 0, 4)); } else { /* Index into MAIR registers for cache attributes */ uint8_t attrindx = extract32(attrs, 0, 3); uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; assert(attrindx <= 7); cacheattrs->attrs = extract64(mair, attrindx * 8, 8); } cacheattrs->shareability = extract32(attrs, 6, 2); } *phys_ptr = descaddr; *page_size_ptr = page_size; return false; do_fault: fi->type = fault_type; fi->level = level; /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */ fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS); return true; }"} {"target": 1, "idx": 17407, "func": "void cuda_init (int *cuda_mem_index, qemu_irq irq) { struct tm tm; CUDAState *s = &cuda_state; s->irq = irq; s->timers[0].index = 0; s->timers[0].timer = qemu_new_timer(vm_clock, cuda_timer1, s); s->timers[1].index = 1; qemu_get_timedate(&tm, RTC_OFFSET); s->tick_offset = mktimegm(&tm); s->adb_poll_timer = qemu_new_timer(vm_clock, cuda_adb_poll, s); *cuda_mem_index = cpu_register_io_memory(0, cuda_read, cuda_write, s); register_savevm(\"cuda\", -1, 1, cuda_save, cuda_load, s); qemu_register_reset(cuda_reset, s); cuda_reset(s); }"} {"target": 1, "idx": 17447, "func": "static void qdict_crumple_test_recursive(void) { QDict *src, *dst, *rule, *vnc, *acl, *listen; QObject *res; QList *rules; src = qdict_new(); qdict_put(src, \"vnc.listen.addr\", qstring_from_str(\"127.0.0.1\")); qdict_put(src, \"vnc.listen.port\", qstring_from_str(\"5901\")); qdict_put(src, \"vnc.acl.rules.0.match\", qstring_from_str(\"fred\")); qdict_put(src, \"vnc.acl.rules.0.policy\", qstring_from_str(\"allow\")); qdict_put(src, \"vnc.acl.rules.1.match\", qstring_from_str(\"bob\")); qdict_put(src, \"vnc.acl.rules.1.policy\", qstring_from_str(\"deny\")); qdict_put(src, \"vnc.acl.default\", qstring_from_str(\"deny\")); qdict_put(src, \"vnc.acl..name\", qstring_from_str(\"acl0\")); qdict_put(src, \"vnc.acl.rule..name\", qstring_from_str(\"acl0\")); res = qdict_crumple(src, &error_abort); g_assert_cmpint(qobject_type(res), ==, QTYPE_QDICT); dst = qobject_to_qdict(res); g_assert_cmpint(qdict_size(dst), ==, 1); vnc = qdict_get_qdict(dst, \"vnc\"); g_assert(vnc); listen = qdict_get_qdict(vnc, \"listen\"); g_assert(listen); g_assert_cmpstr(\"127.0.0.1\", ==, qdict_get_str(listen, \"addr\")); g_assert_cmpstr(\"5901\", ==, qdict_get_str(listen, \"port\")); acl = qdict_get_qdict(vnc, \"acl\"); g_assert(acl); rules = qdict_get_qlist(acl, \"rules\"); g_assert(rules); g_assert_cmpint(qlist_size(rules), ==, 2); rule = qobject_to_qdict(qlist_pop(rules)); g_assert_cmpint(qdict_size(rule), ==, 2); g_assert_cmpstr(\"fred\", ==, qdict_get_str(rule, \"match\")); g_assert_cmpstr(\"allow\", ==, qdict_get_str(rule, \"policy\")); QDECREF(rule); rule = qobject_to_qdict(qlist_pop(rules)); g_assert_cmpint(qdict_size(rule), ==, 2); g_assert_cmpstr(\"bob\", ==, qdict_get_str(rule, \"match\")); g_assert_cmpstr(\"deny\", ==, qdict_get_str(rule, \"policy\")); QDECREF(rule); /* With recursive crumpling, we should see all names unescaped */ g_assert_cmpstr(\"acl0\", ==, qdict_get_str(vnc, \"acl.name\")); g_assert_cmpstr(\"acl0\", ==, qdict_get_str(acl, \"rule.name\")); QDECREF(src); QDECREF(dst); }"} {"target": 1, "idx": 17448, "func": "void vga_ioport_write(void *opaque, uint32_t addr, uint32_t val) { VGACommonState *s = opaque; int index; /* check port range access depending on color/monochrome mode */ if (vga_ioport_invalid(s, addr)) { return; } #ifdef DEBUG_VGA printf(\"VGA: write addr=0x%04x data=0x%02x\\n\", addr, val); #endif switch(addr) { case VGA_ATT_W: if (s->ar_flip_flop == 0) { val &= 0x3f; s->ar_index = val; } else { index = s->ar_index & 0x1f; switch(index) { case VGA_ATC_PALETTE0 ... VGA_ATC_PALETTEF: s->ar[index] = val & 0x3f; break; case VGA_ATC_MODE: s->ar[index] = val & ~0x10; break; case VGA_ATC_OVERSCAN: s->ar[index] = val; break; case VGA_ATC_PLANE_ENABLE: s->ar[index] = val & ~0xc0; break; case VGA_ATC_PEL: s->ar[index] = val & ~0xf0; break; case VGA_ATC_COLOR_PAGE: s->ar[index] = val & ~0xf0; break; default: break; } } s->ar_flip_flop ^= 1; break; case VGA_MIS_W: s->msr = val & ~0x10; s->update_retrace_info(s); break; case VGA_SEQ_I: s->sr_index = val & 7; break; case VGA_SEQ_D: #ifdef DEBUG_VGA_REG printf(\"vga: write SR%x = 0x%02x\\n\", s->sr_index, val); #endif s->sr[s->sr_index] = val & sr_mask[s->sr_index]; vbe_update_vgaregs(s); if (s->sr_index == VGA_SEQ_CLOCK_MODE) { s->update_retrace_info(s); } vga_update_memory_access(s); break; case VGA_PEL_IR: s->dac_read_index = val; s->dac_sub_index = 0; s->dac_state = 3; break; case VGA_PEL_IW: s->dac_write_index = val; s->dac_sub_index = 0; s->dac_state = 0; break; case VGA_PEL_D: s->dac_cache[s->dac_sub_index] = val; if (++s->dac_sub_index == 3) { memcpy(&s->palette[s->dac_write_index * 3], s->dac_cache, 3); s->dac_sub_index = 0; s->dac_write_index++; } break; case VGA_GFX_I: s->gr_index = val & 0x0f; break; case VGA_GFX_D: #ifdef DEBUG_VGA_REG printf(\"vga: write GR%x = 0x%02x\\n\", s->gr_index, val); #endif s->gr[s->gr_index] = val & gr_mask[s->gr_index]; vbe_update_vgaregs(s); vga_update_memory_access(s); break; case VGA_CRT_IM: case VGA_CRT_IC: s->cr_index = val; break; case VGA_CRT_DM: case VGA_CRT_DC: #ifdef DEBUG_VGA_REG printf(\"vga: write CR%x = 0x%02x\\n\", s->cr_index, val); #endif /* handle CR0-7 protection */ if ((s->cr[VGA_CRTC_V_SYNC_END] & VGA_CR11_LOCK_CR0_CR7) && s->cr_index <= VGA_CRTC_OVERFLOW) { /* can always write bit 4 of CR7 */ if (s->cr_index == VGA_CRTC_OVERFLOW) { s->cr[VGA_CRTC_OVERFLOW] = (s->cr[VGA_CRTC_OVERFLOW] & ~0x10) | (val & 0x10); vbe_update_vgaregs(s); } return; } s->cr[s->cr_index] = val; vbe_update_vgaregs(s); switch(s->cr_index) { case VGA_CRTC_H_TOTAL: case VGA_CRTC_H_SYNC_START: case VGA_CRTC_H_SYNC_END: case VGA_CRTC_V_TOTAL: case VGA_CRTC_OVERFLOW: case VGA_CRTC_V_SYNC_END: case VGA_CRTC_MODE: s->update_retrace_info(s); break; } break; case VGA_IS1_RM: case VGA_IS1_RC: s->fcr = val & 0x10; break; } }"} {"target": 0, "idx": 17454, "func": "static unsigned int celt_decode_band(CeltContext *s, OpusRangeCoder *rc, const int band, float *X, float *Y, int N, int b, unsigned int blocks, float *lowband, int duration, float *lowband_out, int level, float gain, float *lowband_scratch, int fill) { const uint8_t *cache; int dualstereo, split; int imid = 0, iside = 0; unsigned int N0 = N; int N_B; int N_B0; int B0 = blocks; int time_divide = 0; int recombine = 0; int inv = 0; float mid = 0, side = 0; int longblocks = (B0 == 1); unsigned int cm = 0; N_B0 = N_B = N / blocks; split = dualstereo = (Y != NULL); if (N == 1) { /* special case for one sample */ int i; float *x = X; for (i = 0; i <= dualstereo; i++) { int sign = 0; if (s->remaining2 >= 1<<3) { sign = opus_getrawbits(rc, 1); s->remaining2 -= 1 << 3; b -= 1 << 3; } x[0] = sign ? -1.0f : 1.0f; x = Y; } if (lowband_out) lowband_out[0] = X[0]; return 1; } if (!dualstereo && level == 0) { int tf_change = s->tf_change[band]; int k; if (tf_change > 0) recombine = tf_change; /* Band recombining to increase frequency resolution */ if (lowband && (recombine || ((N_B & 1) == 0 && tf_change < 0) || B0 > 1)) { int j; for (j = 0; j < N; j++) lowband_scratch[j] = lowband[j]; lowband = lowband_scratch; } for (k = 0; k < recombine; k++) { if (lowband) celt_haar1(lowband, N >> k, 1 << k); fill = celt_bit_interleave[fill & 0xF] | celt_bit_interleave[fill >> 4] << 2; } blocks >>= recombine; N_B <<= recombine; /* Increasing the time resolution */ while ((N_B & 1) == 0 && tf_change < 0) { if (lowband) celt_haar1(lowband, N_B, blocks); fill |= fill << blocks; blocks <<= 1; N_B >>= 1; time_divide++; tf_change++; } B0 = blocks; N_B0 = N_B; /* Reorganize the samples in time order instead of frequency order */ if (B0 > 1 && lowband) celt_deinterleave_hadamard(s->scratch, lowband, N_B >> recombine, B0 << recombine, longblocks); } /* If we need 1.5 more bit than we can produce, split the band in two. */ cache = celt_cache_bits + celt_cache_index[(duration + 1) * CELT_MAX_BANDS + band]; if (!dualstereo && duration >= 0 && b > cache[cache[0]] + 12 && N > 2) { N >>= 1; Y = X + N; split = 1; duration -= 1; if (blocks == 1) fill = (fill & 1) | (fill << 1); blocks = (blocks + 1) >> 1; } if (split) { int qn; int itheta = 0; int mbits, sbits, delta; int qalloc; int pulse_cap; int offset; int orig_fill; int tell; /* Decide on the resolution to give to the split parameter theta */ pulse_cap = celt_log_freq_range[band] + duration * 8; offset = (pulse_cap >> 1) - (dualstereo && N == 2 ? CELT_QTHETA_OFFSET_TWOPHASE : CELT_QTHETA_OFFSET); qn = (dualstereo && band >= s->intensitystereo) ? 1 : celt_compute_qn(N, b, offset, pulse_cap, dualstereo); tell = opus_rc_tell_frac(rc); if (qn != 1) { /* Entropy coding of the angle. We use a uniform pdf for the time split, a step for stereo, and a triangular one for the rest. */ if (dualstereo && N > 2) itheta = opus_rc_stepmodel(rc, qn/2); else if (dualstereo || B0 > 1) itheta = opus_rc_unimodel(rc, qn+1); else itheta = opus_rc_trimodel(rc, qn); itheta = itheta * 16384 / qn; /* NOTE: Renormalising X and Y *may* help fixed-point a bit at very high rate. Let's do that at higher complexity */ } else if (dualstereo) { inv = (b > 2 << 3 && s->remaining2 > 2 << 3) ? opus_rc_p2model(rc, 2) : 0; itheta = 0; } qalloc = opus_rc_tell_frac(rc) - tell; b -= qalloc; orig_fill = fill; if (itheta == 0) { imid = 32767; iside = 0; fill &= (1 << blocks) - 1; delta = -16384; } else if (itheta == 16384) { imid = 0; iside = 32767; fill &= ((1 << blocks) - 1) << blocks; delta = 16384; } else { imid = celt_cos(itheta); iside = celt_cos(16384-itheta); /* This is the mid vs side allocation that minimizes squared error in that band. */ delta = ROUND_MUL16((N - 1) << 7, celt_log2tan(iside, imid)); } mid = imid / 32768.0f; side = iside / 32768.0f; /* This is a special case for N=2 that only works for stereo and takes advantage of the fact that mid and side are orthogonal to encode the side with just one bit. */ if (N == 2 && dualstereo) { int c; int sign = 0; float tmp; float *x2, *y2; mbits = b; /* Only need one bit for the side */ sbits = (itheta != 0 && itheta != 16384) ? 1 << 3 : 0; mbits -= sbits; c = (itheta > 8192); s->remaining2 -= qalloc+sbits; x2 = c ? Y : X; y2 = c ? X : Y; if (sbits) sign = opus_getrawbits(rc, 1); sign = 1 - 2 * sign; /* We use orig_fill here because we want to fold the side, but if itheta==16384, we'll have cleared the low bits of fill. */ cm = celt_decode_band(s, rc, band, x2, NULL, N, mbits, blocks, lowband, duration, lowband_out, level, gain, lowband_scratch, orig_fill); /* We don't split N=2 bands, so cm is either 1 or 0 (for a fold-collapse), and there's no need to worry about mixing with the other channel. */ y2[0] = -sign * x2[1]; y2[1] = sign * x2[0]; X[0] *= mid; X[1] *= mid; Y[0] *= side; Y[1] *= side; tmp = X[0]; X[0] = tmp - Y[0]; Y[0] = tmp + Y[0]; tmp = X[1]; X[1] = tmp - Y[1]; Y[1] = tmp + Y[1]; } else { /* \"Normal\" split code */ float *next_lowband2 = NULL; float *next_lowband_out1 = NULL; int next_level = 0; int rebalance; /* Give more bits to low-energy MDCTs than they would * otherwise deserve */ if (B0 > 1 && !dualstereo && (itheta & 0x3fff)) { if (itheta > 8192) /* Rough approximation for pre-echo masking */ delta -= delta >> (4 - duration); else /* Corresponds to a forward-masking slope of * 1.5 dB per 10 ms */ delta = FFMIN(0, delta + (N << 3 >> (5 - duration))); } mbits = av_clip((b - delta) / 2, 0, b); sbits = b - mbits; s->remaining2 -= qalloc; if (lowband && !dualstereo) next_lowband2 = lowband + N; /* >32-bit split case */ /* Only stereo needs to pass on lowband_out. * Otherwise, it's handled at the end */ if (dualstereo) next_lowband_out1 = lowband_out; else next_level = level + 1; rebalance = s->remaining2; if (mbits >= sbits) { /* In stereo mode, we do not apply a scaling to the mid * because we need the normalized mid for folding later */ cm = celt_decode_band(s, rc, band, X, NULL, N, mbits, blocks, lowband, duration, next_lowband_out1, next_level, dualstereo ? 1.0f : (gain * mid), lowband_scratch, fill); rebalance = mbits - (rebalance - s->remaining2); if (rebalance > 3 << 3 && itheta != 0) sbits += rebalance - (3 << 3); /* For a stereo split, the high bits of fill are always zero, * so no folding will be done to the side. */ cm |= celt_decode_band(s, rc, band, Y, NULL, N, sbits, blocks, next_lowband2, duration, NULL, next_level, gain * side, NULL, fill >> blocks) << ((B0 >> 1) & (dualstereo - 1)); } else { /* For a stereo split, the high bits of fill are always zero, * so no folding will be done to the side. */ cm = celt_decode_band(s, rc, band, Y, NULL, N, sbits, blocks, next_lowband2, duration, NULL, next_level, gain * side, NULL, fill >> blocks) << ((B0 >> 1) & (dualstereo - 1)); rebalance = sbits - (rebalance - s->remaining2); if (rebalance > 3 << 3 && itheta != 16384) mbits += rebalance - (3 << 3); /* In stereo mode, we do not apply a scaling to the mid because * we need the normalized mid for folding later */ cm |= celt_decode_band(s, rc, band, X, NULL, N, mbits, blocks, lowband, duration, next_lowband_out1, next_level, dualstereo ? 1.0f : (gain * mid), lowband_scratch, fill); } } } else { /* This is the basic no-split case */ unsigned int q = celt_bits2pulses(cache, b); unsigned int curr_bits = celt_pulses2bits(cache, q); s->remaining2 -= curr_bits; /* Ensures we can never bust the budget */ while (s->remaining2 < 0 && q > 0) { s->remaining2 += curr_bits; curr_bits = celt_pulses2bits(cache, --q); s->remaining2 -= curr_bits; } if (q != 0) { /* Finally do the actual quantization */ cm = celt_alg_unquant(rc, X, N, (q < 8) ? q : (8 + (q & 7)) << ((q >> 3) - 1), s->spread, blocks, gain); } else { /* If there's no pulse, fill the band anyway */ int j; unsigned int cm_mask = (1 << blocks) - 1; fill &= cm_mask; if (!fill) { for (j = 0; j < N; j++) X[j] = 0.0f; } else { if (lowband == NULL) { /* Noise */ for (j = 0; j < N; j++) X[j] = (((int32_t)celt_rng(s)) >> 20); cm = cm_mask; } else { /* Folded spectrum */ for (j = 0; j < N; j++) { /* About 48 dB below the \"normal\" folding level */ X[j] = lowband[j] + (((celt_rng(s)) & 0x8000) ? 1.0f / 256 : -1.0f / 256); } cm = fill; } celt_renormalize_vector(X, N, gain); } } } /* This code is used by the decoder and by the resynthesis-enabled encoder */ if (dualstereo) { int j; if (N != 2) celt_stereo_merge(X, Y, mid, N); if (inv) { for (j = 0; j < N; j++) Y[j] *= -1; } } else if (level == 0) { int k; /* Undo the sample reorganization going from time order to frequency order */ if (B0 > 1) celt_interleave_hadamard(s->scratch, X, N_B>>recombine, B0<>= 1; N_B <<= 1; cm |= cm >> blocks; celt_haar1(X, N_B, blocks); } for (k = 0; k < recombine; k++) { cm = celt_bit_deinterleave[cm]; celt_haar1(X, N0>>k, 1<width * s->bpp) + 7) >> 3; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(p->format); int is_yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) && desc->nb_components >= 2; if (s->planar) width /= s->bppcount; if (size <= 0) return AVERROR_INVALIDDATA; if (is_yuv) { int bytes_per_row = (((s->width - 1) / s->subsampling[0] + 1) * s->bpp * s->subsampling[0] * s->subsampling[1] + 7) >> 3; av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, bytes_per_row); if (s->yuv_line == NULL) { av_log(s->avctx, AV_LOG_ERROR, \"Not enough memory\\n\"); return AVERROR(ENOMEM); } dst = s->yuv_line; stride = 0; width = s->width * s->subsampling[1] + 2*(s->width / s->subsampling[0]); av_assert0(width <= bytes_per_row); av_assert0(s->bpp == 24); } if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE) { #if CONFIG_ZLIB return tiff_unpack_zlib(s, p, dst, stride, src, size, width, lines, strip_start, is_yuv); #else av_log(s->avctx, AV_LOG_ERROR, \"zlib support not enabled, \" \"deflate compression not supported\\n\"); return AVERROR(ENOSYS); #endif } if (s->compr == TIFF_LZMA) { #if CONFIG_LZMA return tiff_unpack_lzma(s, p, dst, stride, src, size, width, lines, strip_start, is_yuv); #else av_log(s->avctx, AV_LOG_ERROR, \"LZMA support not enabled\\n\"); return AVERROR(ENOSYS); #endif } if (s->compr == TIFF_LZW) { if (s->fill_order) { if ((ret = deinvert_buffer(s, src, size)) < 0) return ret; ssrc = src = s->deinvert_buf; } if (size > 1 && !src[0] && (src[1]&1)) { av_log(s->avctx, AV_LOG_ERROR, \"Old style LZW is unsupported\\n\"); } if ((ret = ff_lzw_decode_init(s->lzw, 8, src, size, FF_LZW_TIFF)) < 0) { av_log(s->avctx, AV_LOG_ERROR, \"Error initializing LZW decoder\\n\"); return ret; } for (line = 0; line < lines; line++) { pixels = ff_lzw_decode(s->lzw, dst, width); if (pixels < width) { av_log(s->avctx, AV_LOG_ERROR, \"Decoded only %i bytes of %i\\n\", pixels, width); return AVERROR_INVALIDDATA; } if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) horizontal_fill(s->bpp, dst, 1, dst, 0, width, 0); if (is_yuv) { unpack_yuv(s, p, dst, strip_start + line); line += s->subsampling[1] - 1; } dst += stride; } return 0; } if (s->compr == TIFF_CCITT_RLE || s->compr == TIFF_G3 || s->compr == TIFF_G4) { if (is_yuv) return AVERROR_INVALIDDATA; return tiff_unpack_fax(s, dst, stride, src, size, width, lines); } bytestream2_init(&s->gb, src, size); bytestream2_init_writer(&pb, dst, is_yuv ? s->yuv_line_size : (stride * lines)); for (line = 0; line < lines; line++) { if (src - ssrc > size) { av_log(s->avctx, AV_LOG_ERROR, \"Source data overread\\n\"); return AVERROR_INVALIDDATA; } if (bytestream2_get_bytes_left(&s->gb) == 0 || bytestream2_get_eof(&pb)) break; bytestream2_seek_p(&pb, stride * line, SEEK_SET); switch (s->compr) { case TIFF_RAW: if (ssrc + size - src < width) return AVERROR_INVALIDDATA; if (!s->fill_order) { horizontal_fill(s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8), dst, 1, src, 0, width, 0); } else { int i; for (i = 0; i < width; i++) dst[i] = ff_reverse[src[i]]; } src += width; break; case TIFF_PACKBITS: for (pixels = 0; pixels < width;) { if (ssrc + size - src < 2) { av_log(s->avctx, AV_LOG_ERROR, \"Read went out of bounds\\n\"); return AVERROR_INVALIDDATA; } code = s->fill_order ? (int8_t) ff_reverse[*src++]: (int8_t) *src++; if (code >= 0) { code++; if (pixels + code > width || ssrc + size - src < code) { av_log(s->avctx, AV_LOG_ERROR, \"Copy went out of bounds\\n\"); return AVERROR_INVALIDDATA; } horizontal_fill(s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8), dst, 1, src, 0, code, pixels); src += code; pixels += code; } else if (code != -128) { // -127..-1 code = (-code) + 1; if (pixels + code > width) { av_log(s->avctx, AV_LOG_ERROR, \"Run went out of bounds\\n\"); return AVERROR_INVALIDDATA; } c = *src++; horizontal_fill(s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8), dst, 0, NULL, c, code, pixels); pixels += code; } } if (s->fill_order) { int i; for (i = 0; i < width; i++) dst[i] = ff_reverse[dst[i]]; } break; } if (is_yuv) { unpack_yuv(s, p, dst, strip_start + line); line += s->subsampling[1] - 1; } dst += stride; } return 0; }"} {"target": 1, "idx": 17458, "func": "static void mkv_free(MatroskaMuxContext *mkv) { if (mkv->main_seekhead) { av_freep(&mkv->main_seekhead->entries); av_freep(&mkv->main_seekhead); if (mkv->cues) { av_freep(&mkv->cues->entries); av_freep(&mkv->cues); if (mkv->attachments) { av_freep(&mkv->attachments->entries); av_freep(&mkv->attachments); av_freep(&mkv->tracks); av_freep(&mkv->stream_durations); av_freep(&mkv->stream_duration_offsets);"} {"target": 1, "idx": 17463, "func": "static void raven_pcihost_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); dc->realize = raven_pcihost_realizefn; dc->fw_name = \"pci\"; dc->no_user = 1; }"} {"target": 1, "idx": 17478, "func": "static int chr_can_read(void *opaque) { int can_read; SCLPConsole *scon = opaque; can_read = SIZE_BUFFER_VT220 - scon->iov_data_len; return can_read; }"} {"target": 0, "idx": 17492, "func": "int show_filters(void *optctx, const char *opt, const char *arg) { AVFilter av_unused(**filter) = NULL; printf(\"Filters:\\n\"); #if CONFIG_AVFILTER while ((filter = av_filter_next(filter)) && *filter) printf(\"%-16s %s\\n\", (*filter)->name, (*filter)->description); #endif return 0; }"} {"target": 0, "idx": 17505, "func": "static int cavs_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size) { int pic_found, i; uint32_t state; pic_found= pc->frame_start_found; state= pc->state; i=0; if(!pic_found){ for(i=0; i SLICE_MAX_START_CODE){ pc->frame_start_found=0; pc->state=-1; return i-3; } } } } pc->frame_start_found= pic_found; pc->state= state; return END_NOT_FOUND; }"} {"target": 1, "idx": 17525, "func": "int net_client_init(Monitor *mon, QemuOpts *opts, int is_netdev) { const char *name; const char *type; int i; type = qemu_opt_get(opts, \"type\"); if (!type) { qerror_report(QERR_MISSING_PARAMETER, \"type\"); return -1; } if (is_netdev) { if (strcmp(type, \"tap\") != 0 && #ifdef CONFIG_SLIRP strcmp(type, \"user\") != 0 && #endif #ifdef CONFIG_VDE strcmp(type, \"vde\") != 0 && #endif strcmp(type, \"socket\") != 0) { qerror_report(QERR_INVALID_PARAMETER_VALUE, \"type\", \"a netdev backend type\"); return -1; } if (qemu_opt_get(opts, \"vlan\")) { qerror_report(QERR_INVALID_PARAMETER, \"vlan\"); return -1; } if (qemu_opt_get(opts, \"name\")) { qerror_report(QERR_INVALID_PARAMETER, \"name\"); return -1; } if (!qemu_opts_id(opts)) { qerror_report(QERR_MISSING_PARAMETER, \"id\"); return -1; } } name = qemu_opts_id(opts); if (!name) { name = qemu_opt_get(opts, \"name\"); } for (i = 0; i < NET_CLIENT_TYPE_MAX; i++) { if (net_client_types[i].type != NULL && !strcmp(net_client_types[i].type, type)) { VLANState *vlan = NULL; int ret; if (qemu_opts_validate(opts, &net_client_types[i].desc[0]) == -1) { return -1; } /* Do not add to a vlan if it's a -netdev or a nic with a * netdev= parameter. */ if (!(is_netdev || (strcmp(type, \"nic\") == 0 && qemu_opt_get(opts, \"netdev\")))) { vlan = qemu_find_vlan(qemu_opt_get_number(opts, \"vlan\", 0), 1); } ret = 0; if (net_client_types[i].init) { ret = net_client_types[i].init(opts, mon, name, vlan); if (ret < 0) { /* TODO push error reporting into init() methods */ qerror_report(QERR_DEVICE_INIT_FAILED, type); return -1; } } return ret; } } qerror_report(QERR_INVALID_PARAMETER_VALUE, \"type\", \"a network client type\"); return -1; }"} {"target": 0, "idx": 17539, "func": "static void bwf_write_bext_chunk(AVFormatContext *s) { AVDictionaryEntry *tmp_tag; uint64_t time_reference = 0; int64_t bext = ff_start_tag(s->pb, \"bext\"); bwf_write_bext_string(s, \"description\", 256); bwf_write_bext_string(s, \"originator\", 32); bwf_write_bext_string(s, \"originator_reference\", 32); bwf_write_bext_string(s, \"origination_date\", 10); bwf_write_bext_string(s, \"origination_time\", 8); if (tmp_tag = av_dict_get(s->metadata, \"time_reference\", NULL, 0)) time_reference = strtoll(tmp_tag->value, NULL, 10); avio_wl64(s->pb, time_reference); avio_wl16(s->pb, 1); // set version to 1 if (tmp_tag = av_dict_get(s->metadata, \"umid\", NULL, 0)) { unsigned char umidpart_str[17] = {0}; int64_t i; uint64_t umidpart; size_t len = strlen(tmp_tag->value+2); for (i = 0; i < len/16; i++) { memcpy(umidpart_str, tmp_tag->value + 2 + (i*16), 16); umidpart = strtoll(umidpart_str, NULL, 16); avio_wb64(s->pb, umidpart); } ffio_fill(s->pb, 0, 64 - i*8); } else ffio_fill(s->pb, 0, 64); // zero UMID ffio_fill(s->pb, 0, 190); // Reserved if (tmp_tag = av_dict_get(s->metadata, \"coding_history\", NULL, 0)) avio_put_str(s->pb, tmp_tag->value); ff_end_tag(s->pb, bext); }"} {"target": 1, "idx": 17544, "func": "static void free_schro_frame(SchroFrame *frame, void *priv) { AVFrame *p_pic = priv; av_frame_free(&p_pic); }"} {"target": 1, "idx": 17558, "func": "static void qdict_do_flatten(QDict *qdict, QDict *target, const char *prefix) { QObject *value; const QDictEntry *entry, *next; const char *new_key; bool delete; entry = qdict_first(qdict); while (entry != NULL) { next = qdict_next(qdict, entry); value = qdict_entry_value(entry); new_key = NULL; delete = false; if (prefix) { qobject_incref(value); new_key = g_strdup_printf(\"%s.%s\", prefix, entry->key); qdict_put_obj(target, new_key, value); delete = true; } if (qobject_type(value) == QTYPE_QDICT) { qdict_do_flatten(qobject_to_qdict(value), target, new_key ? new_key : entry->key); delete = true; } if (delete) { qdict_del(qdict, entry->key); /* Restart loop after modifying the iterated QDict */ entry = qdict_first(qdict); continue; } entry = next; } }"} {"target": 1, "idx": 17562, "func": "static av_cold int flashsv2_encode_init(AVCodecContext * avctx) { FlashSV2Context *s = avctx->priv_data; s->avctx = avctx; s->comp = avctx->compression_level; if (s->comp == -1) s->comp = 9; if (s->comp < 0 || s->comp > 9) { \"Compression level should be 0-9, not %d\\n\", s->comp); if ((avctx->width > 4095) || (avctx->height > 4095)) { \"Input dimensions too large, input must be max 4096x4096 !\\n\"); if (av_image_check_size(avctx->width, avctx->height, 0, avctx) < 0) s->last_key_frame = 0; s->image_width = avctx->width; s->image_height = avctx->height; s->block_width = (s->image_width / 12) & ~15; s->block_height = (s->image_height / 12) & ~15; if(!s->block_width) s->block_width = 1; if(!s->block_height) s->block_height = 1; s->rows = (s->image_height + s->block_height - 1) / s->block_height; s->cols = (s->image_width + s->block_width - 1) / s->block_width; s->frame_size = s->image_width * s->image_height * 3; s->blocks_size = s->rows * s->cols * sizeof(Block); s->encbuffer = av_mallocz(s->frame_size); s->keybuffer = av_mallocz(s->frame_size); s->databuffer = av_mallocz(s->frame_size * 6); s->current_frame = av_mallocz(s->frame_size); s->key_frame = av_mallocz(s->frame_size); s->frame_blocks = av_mallocz(s->blocks_size); s->key_blocks = av_mallocz(s->blocks_size); init_blocks(s, s->frame_blocks, s->encbuffer, s->databuffer); init_blocks(s, s->key_blocks, s->keybuffer, 0); reset_stats(s); #ifndef FLASHSV2_DUMB s->total_bits = 1; #endif s->use_custom_palette = 0; s->palette_type = -1; // so that the palette will be generated in reconfigure_at_keyframe if (!s->encbuffer || !s->keybuffer || !s->databuffer || !s->current_frame || !s->key_frame || !s->key_blocks || !s->frame_blocks) { av_log(avctx, AV_LOG_ERROR, \"Memory allocation failed.\\n\"); cleanup(s); return 0;"} {"target": 0, "idx": 17571, "func": "float64 HELPER(ucf64_divd)(float64 a, float64 b, CPUUniCore32State *env) { return float64_div(a, b, &env->ucf64.fp_status); }"} {"target": 0, "idx": 17585, "func": "static void decode_pulses(Pulse * pulse, GetBitContext * gb, const uint16_t * swb_offset) { int i; pulse->num_pulse = get_bits(gb, 2) + 1; pulse->pos[0] = swb_offset[get_bits(gb, 6)]; pulse->pos[0] += get_bits(gb, 5); pulse->amp[0] = get_bits(gb, 4); for (i = 1; i < pulse->num_pulse; i++) { pulse->pos[i] = get_bits(gb, 5) + pulse->pos[i-1]; pulse->amp[i] = get_bits(gb, 4); } }"} {"target": 0, "idx": 17588, "func": "static inline void gen_op_eval_fbuge(TCGv dst, TCGv src, unsigned int fcc_offset) { gen_mov_reg_FCC0(dst, src, fcc_offset); gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset); tcg_gen_xori_tl(cpu_tmp0, cpu_tmp0, 0x1); tcg_gen_and_tl(dst, dst, cpu_tmp0); tcg_gen_xori_tl(dst, dst, 0x1); }"} {"target": 0, "idx": 17594, "func": "int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp) { char backing_filename[PATH_MAX]; int back_flags, ret; BlockDriver *back_drv = NULL; Error *local_err = NULL; if (bs->backing_hd != NULL) { QDECREF(options); return 0; } /* NULL means an empty set of options */ if (options == NULL) { options = qdict_new(); } bs->open_flags &= ~BDRV_O_NO_BACKING; if (qdict_haskey(options, \"file.filename\")) { backing_filename[0] = '\\0'; } else if (bs->backing_file[0] == '\\0' && qdict_size(options) == 0) { QDECREF(options); return 0; } bs->backing_hd = bdrv_new(\"\"); bdrv_get_full_backing_filename(bs, backing_filename, sizeof(backing_filename)); if (bs->backing_format[0] != '\\0') { back_drv = bdrv_find_format(bs->backing_format); } /* backing files always opened read-only */ back_flags = bs->open_flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT); ret = bdrv_open(bs->backing_hd, *backing_filename ? backing_filename : NULL, options, back_flags, back_drv, &local_err); if (ret < 0) { bdrv_unref(bs->backing_hd); bs->backing_hd = NULL; bs->open_flags |= BDRV_O_NO_BACKING; error_propagate(errp, local_err); return ret; } return 0; }"} {"target": 0, "idx": 17600, "func": "static OSStatus audioDeviceIOProc( AudioDeviceID inDevice, const AudioTimeStamp* inNow, const AudioBufferList* inInputData, const AudioTimeStamp* inInputTime, AudioBufferList* outOutputData, const AudioTimeStamp* inOutputTime, void* hwptr) { UInt32 frame, frameCount; float *out = outOutputData->mBuffers[0].mData; HWVoiceOut *hw = hwptr; coreaudioVoiceOut *core = (coreaudioVoiceOut *) hwptr; int rpos, live; st_sample_t *src; #ifndef FLOAT_MIXENG #ifdef RECIPROCAL const float scale = 1.f / UINT_MAX; #else const float scale = UINT_MAX; #endif #endif if (coreaudio_lock (core, \"audioDeviceIOProc\")) { inInputTime = 0; return 0; } frameCount = core->audioDevicePropertyBufferFrameSize; live = core->live; /* if there are not enough samples, set signal and return */ if (live < frameCount) { inInputTime = 0; coreaudio_unlock (core, \"audioDeviceIOProc(empty)\"); return 0; } rpos = core->rpos; src = hw->mix_buf + rpos; /* fill buffer */ for (frame = 0; frame < frameCount; frame++) { #ifdef FLOAT_MIXENG *out++ = src[frame].l; /* left channel */ *out++ = src[frame].r; /* right channel */ #else #ifdef RECIPROCAL *out++ = src[frame].l * scale; /* left channel */ *out++ = src[frame].r * scale; /* right channel */ #else *out++ = src[frame].l / scale; /* left channel */ *out++ = src[frame].r / scale; /* right channel */ #endif #endif } rpos = (rpos + frameCount) % hw->samples; core->decr += frameCount; core->rpos = rpos; coreaudio_unlock (core, \"audioDeviceIOProc\"); return 0; }"} {"target": 1, "idx": 17604, "func": "static void m68k_cpu_initfn(Object *obj) { CPUState *cs = CPU(obj); M68kCPU *cpu = M68K_CPU(obj); CPUM68KState *env = &cpu->env; static bool inited; cs->env_ptr = env; cpu_exec_init(cs, &error_abort); if (tcg_enabled() && !inited) { inited = true; m68k_tcg_init(); } }"} {"target": 0, "idx": 17621, "func": "int ff_start_frame(AVFilterLink *link, AVFilterBufferRef *picref) { int (*start_frame)(AVFilterLink *, AVFilterBufferRef *); AVFilterPad *src = link->srcpad; AVFilterPad *dst = link->dstpad; int ret, perms; AVFilterCommand *cmd= link->dst->command_queue; int64_t pts; FF_TPRINTF_START(NULL, start_frame); ff_tlog_link(NULL, link, 0); ff_tlog(NULL, \" \"); ff_tlog_ref(NULL, picref, 1); av_assert1(picref->format == link->format); av_assert1(picref->video->w == link->w); av_assert1(picref->video->h == link->h); if (link->closed) { avfilter_unref_buffer(picref); return AVERROR_EOF; } if (!(start_frame = dst->start_frame)) start_frame = default_start_frame; av_assert1((picref->perms & src->min_perms) == src->min_perms); picref->perms &= ~ src->rej_perms; perms = picref->perms; if (picref->linesize[0] < 0) perms |= AV_PERM_NEG_LINESIZES; /* prepare to copy the picture if it has insufficient permissions */ if ((dst->min_perms & perms) != dst->min_perms || dst->rej_perms & perms) { av_log(link->dst, AV_LOG_DEBUG, \"frame copy needed (have perms %x, need %x, reject %x)\\n\", picref->perms, link->dstpad->min_perms, link->dstpad->rej_perms); link->cur_buf = ff_get_video_buffer(link, dst->min_perms, link->w, link->h); if (!link->cur_buf) { avfilter_unref_bufferp(&picref); return AVERROR(ENOMEM); } link->src_buf = picref; avfilter_copy_buffer_ref_props(link->cur_buf, link->src_buf); /* copy palette if required */ if (av_pix_fmt_descriptors[link->format].flags & PIX_FMT_PAL) memcpy(link->cur_buf->data[1], link->src_buf-> data[1], AVPALETTE_SIZE); } else link->cur_buf = picref; link->cur_buf_copy = link->cur_buf; while(cmd && cmd->time <= picref->pts * av_q2d(link->time_base)){ av_log(link->dst, AV_LOG_DEBUG, \"Processing command time:%f command:%s arg:%s\\n\", cmd->time, cmd->command, cmd->arg); avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags); ff_command_queue_pop(link->dst); cmd= link->dst->command_queue; } pts = link->cur_buf->pts; ret = start_frame(link, link->cur_buf); ff_update_link_current_pts(link, pts); if (ret < 0) clear_link(link); else /* incoming buffers must not be freed in start frame, because they can still be in use by the automatic copy mechanism */ av_assert1(link->cur_buf_copy->buf->refcount > 0); return ret; }"} {"target": 0, "idx": 17622, "func": "static int decode_wave_header(AVCodecContext *avctx, const uint8_t *header, int header_size) { int len; short wave_format; if (bytestream_get_le32(&header) != MKTAG('R','I','F','F')) { av_log(avctx, AV_LOG_ERROR, \"missing RIFF tag\\n\"); return -1; } header += 4; /* chunk size */; if (bytestream_get_le32(&header) != MKTAG('W','A','V','E')) { av_log(avctx, AV_LOG_ERROR, \"missing WAVE tag\\n\"); return -1; } while (bytestream_get_le32(&header) != MKTAG('f','m','t',' ')) { len = bytestream_get_le32(&header); header += len; } len = bytestream_get_le32(&header); if (len < 16) { av_log(avctx, AV_LOG_ERROR, \"fmt chunk was too short\\n\"); return -1; } wave_format = bytestream_get_le16(&header); switch (wave_format) { case WAVE_FORMAT_PCM: break; default: av_log(avctx, AV_LOG_ERROR, \"unsupported wave format\\n\"); return -1; } header += 2; // skip channels (already got from shorten header) avctx->sample_rate = bytestream_get_le32(&header); header += 4; // skip bit rate (represents original uncompressed bit rate) header += 2; // skip block align (not needed) avctx->bits_per_coded_sample = bytestream_get_le16(&header); if (avctx->bits_per_coded_sample != 16) { av_log(avctx, AV_LOG_ERROR, \"unsupported number of bits per sample\\n\"); return -1; } len -= 16; if (len > 0) av_log(avctx, AV_LOG_INFO, \"%d header bytes unparsed\\n\", len); return 0; }"} {"target": 0, "idx": 17623, "func": "static bool bdrv_drain_one(BlockDriverState *bs) { bool bs_busy; bdrv_flush_io_queue(bs); bdrv_start_throttled_reqs(bs); bs_busy = bdrv_requests_pending(bs); bs_busy |= aio_poll(bdrv_get_aio_context(bs), bs_busy); return bs_busy; }"} {"target": 0, "idx": 17636, "func": "static BlockAIOCB *raw_aio_readv(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockCompletionFunc *cb, void *opaque) { return raw_aio_submit(bs, sector_num, qiov, nb_sectors, cb, opaque, QEMU_AIO_READ); }"} {"target": 0, "idx": 17637, "func": "static int authenticate(BDRVSSHState *s, const char *user) { int r, ret; const char *userauthlist; LIBSSH2_AGENT *agent = NULL; struct libssh2_agent_publickey *identity; struct libssh2_agent_publickey *prev_identity = NULL; userauthlist = libssh2_userauth_list(s->session, user, strlen(user)); if (strstr(userauthlist, \"publickey\") == NULL) { ret = -EPERM; error_report(\"remote server does not support \\\"publickey\\\" authentication\"); goto out; } /* Connect to ssh-agent and try each identity in turn. */ agent = libssh2_agent_init(s->session); if (!agent) { ret = -EINVAL; session_error_report(s, \"failed to initialize ssh-agent support\"); goto out; } if (libssh2_agent_connect(agent)) { ret = -ECONNREFUSED; session_error_report(s, \"failed to connect to ssh-agent\"); goto out; } if (libssh2_agent_list_identities(agent)) { ret = -EINVAL; session_error_report(s, \"failed requesting identities from ssh-agent\"); goto out; } for(;;) { r = libssh2_agent_get_identity(agent, &identity, prev_identity); if (r == 1) { /* end of list */ break; } if (r < 0) { ret = -EINVAL; session_error_report(s, \"failed to obtain identity from ssh-agent\"); goto out; } r = libssh2_agent_userauth(agent, user, identity); if (r == 0) { /* Authenticated! */ ret = 0; goto out; } /* Failed to authenticate with this identity, try the next one. */ prev_identity = identity; } ret = -EPERM; error_report(\"failed to authenticate using publickey authentication \" \"and the identities held by your ssh-agent\"); out: if (agent != NULL) { /* Note: libssh2 implementation implicitly calls * libssh2_agent_disconnect if necessary. */ libssh2_agent_free(agent); } return ret; }"} {"target": 0, "idx": 17639, "func": "static bool bdrv_is_valid_name(const char *name) { return qemu_opts_id_wellformed(name); }"} {"target": 0, "idx": 17645, "func": "static void cirrus_vga_mem_write(void *opaque, target_phys_addr_t addr, uint64_t mem_value, uint32_t size) { CirrusVGAState *s = opaque; unsigned bank_index; unsigned bank_offset; unsigned mode; if ((s->vga.sr[0x07] & 0x01) == 0) { vga_mem_writeb(&s->vga, addr, mem_value); return; } if (addr < 0x10000) { if (s->cirrus_srcptr != s->cirrus_srcptr_end) { /* bitblt */ *s->cirrus_srcptr++ = (uint8_t) mem_value; if (s->cirrus_srcptr >= s->cirrus_srcptr_end) { cirrus_bitblt_cputovideo_next(s); } } else { /* video memory */ bank_index = addr >> 15; bank_offset = addr & 0x7fff; if (bank_offset < s->cirrus_bank_limit[bank_index]) { bank_offset += s->cirrus_bank_base[bank_index]; if ((s->vga.gr[0x0B] & 0x14) == 0x14) { bank_offset <<= 4; } else if (s->vga.gr[0x0B] & 0x02) { bank_offset <<= 3; } bank_offset &= s->cirrus_addr_mask; mode = s->vga.gr[0x05] & 0x7; if (mode < 4 || mode > 5 || ((s->vga.gr[0x0B] & 0x4) == 0)) { *(s->vga.vram_ptr + bank_offset) = mem_value; memory_region_set_dirty(&s->vga.vram, bank_offset, sizeof(mem_value)); } else { if ((s->vga.gr[0x0B] & 0x14) != 0x14) { cirrus_mem_writeb_mode4and5_8bpp(s, mode, bank_offset, mem_value); } else { cirrus_mem_writeb_mode4and5_16bpp(s, mode, bank_offset, mem_value); } } } } } else if (addr >= 0x18000 && addr < 0x18100) { /* memory-mapped I/O */ if ((s->vga.sr[0x17] & 0x44) == 0x04) { cirrus_mmio_blt_write(s, addr & 0xff, mem_value); } } else { #ifdef DEBUG_CIRRUS printf(\"cirrus: mem_writeb \" TARGET_FMT_plx \" value %02x\\n\", addr, mem_value); #endif } }"} {"target": 0, "idx": 17647, "func": "static void vnc_tls_handshake_io(void *opaque) { struct VncState *vs = (struct VncState *)opaque; VNC_DEBUG(\"Handshake IO continue\\n\"); vnc_start_vencrypt_handshake(vs); }"} {"target": 0, "idx": 17654, "func": "int sclp_service_call(uint32_t sccb, uint64_t code) { int r = 0; SCCB work_sccb; hwaddr sccb_len = sizeof(SCCB); /* first some basic checks on program checks */ if (cpu_physical_memory_is_io(sccb)) { r = -PGM_ADDRESSING; goto out; } if (sccb & ~0x7ffffff8ul) { r = -PGM_SPECIFICATION; goto out; } /* * we want to work on a private copy of the sccb, to prevent guests * from playing dirty tricks by modifying the memory content after * the host has checked the values */ cpu_physical_memory_read(sccb, &work_sccb, sccb_len); /* Valid sccb sizes */ if (be16_to_cpu(work_sccb.h.length) < sizeof(SCCBHeader) || be16_to_cpu(work_sccb.h.length) > SCCB_SIZE) { r = -PGM_SPECIFICATION; goto out; } sclp_execute((SCCB *)&work_sccb, code); cpu_physical_memory_write(sccb, &work_sccb, be16_to_cpu(work_sccb.h.length)); sclp_service_interrupt(sccb); out: return r; }"} {"target": 0, "idx": 17663, "func": "static void taihu_cpld_writeb (void *opaque, hwaddr addr, uint32_t value) { taihu_cpld_t *cpld; cpld = opaque; switch (addr) { case 0x0: /* Read only */ break; case 0x1: cpld->reg1 = value; break; default: break; } }"} {"target": 0, "idx": 17665, "func": "static int usb_bt_handle_control(USBDevice *dev, int request, int value, int index, int length, uint8_t *data) { struct USBBtState *s = (struct USBBtState *) dev->opaque; int ret; ret = usb_desc_handle_control(dev, request, value, index, length, data); if (ret >= 0) { return ret; } ret = 0; switch (request) { case DeviceRequest | USB_REQ_GET_STATUS: case InterfaceRequest | USB_REQ_GET_STATUS: case EndpointRequest | USB_REQ_GET_STATUS: data[0] = (1 << USB_DEVICE_SELF_POWERED) | (dev->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP); data[1] = 0x00; ret = 2; break; case DeviceOutRequest | USB_REQ_CLEAR_FEATURE: case InterfaceOutRequest | USB_REQ_CLEAR_FEATURE: case EndpointOutRequest | USB_REQ_CLEAR_FEATURE: if (value == USB_DEVICE_REMOTE_WAKEUP) { dev->remote_wakeup = 0; } else { goto fail; } ret = 0; break; case DeviceOutRequest | USB_REQ_SET_FEATURE: case InterfaceOutRequest | USB_REQ_SET_FEATURE: case EndpointOutRequest | USB_REQ_SET_FEATURE: if (value == USB_DEVICE_REMOTE_WAKEUP) { dev->remote_wakeup = 1; } else { goto fail; } ret = 0; break; case DeviceRequest | USB_REQ_GET_CONFIGURATION: data[0] = 1; ret = 1; s->config = 0; break; case DeviceOutRequest | USB_REQ_SET_CONFIGURATION: ret = 0; if (value != 1 && value != 0) { printf(\"%s: Wrong SET_CONFIGURATION request (%i)\\n\", __FUNCTION__, value); goto fail; } s->config = 1; usb_bt_fifo_reset(&s->evt); usb_bt_fifo_reset(&s->acl); usb_bt_fifo_reset(&s->sco); break; case InterfaceRequest | USB_REQ_GET_INTERFACE: if (value != 0 || (index & ~1) || length != 1) goto fail; if (index == 1) data[0] = s->altsetting; else data[0] = 0; ret = 1; break; case InterfaceOutRequest | USB_REQ_SET_INTERFACE: if ((index & ~1) || length != 0 || (index == 1 && (value < 0 || value > 4)) || (index == 0 && value != 0)) { printf(\"%s: Wrong SET_INTERFACE request (%i, %i)\\n\", __FUNCTION__, index, value); goto fail; } s->altsetting = value; ret = 0; break; case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_DEVICE) << 8): if (s->config) usb_bt_fifo_out_enqueue(s, &s->outcmd, s->hci->cmd_send, usb_bt_hci_cmd_complete, data, length); break; default: fail: ret = USB_RET_STALL; break; } return ret; }"} {"target": 0, "idx": 17677, "func": "static av_cold int svq1_encode_init(AVCodecContext *avctx) { SVQ1EncContext *const s = avctx->priv_data; int ret; ff_hpeldsp_init(&s->hdsp, avctx->flags); ff_me_cmp_init(&s->mecc, avctx); ff_mpegvideoencdsp_init(&s->m.mpvencdsp, avctx); avctx->coded_frame = av_frame_alloc(); s->current_picture = av_frame_alloc(); s->last_picture = av_frame_alloc(); if (!avctx->coded_frame || !s->current_picture || !s->last_picture) { svq1_encode_end(avctx); return AVERROR(ENOMEM); } s->frame_width = avctx->width; s->frame_height = avctx->height; s->y_block_width = (s->frame_width + 15) / 16; s->y_block_height = (s->frame_height + 15) / 16; s->c_block_width = (s->frame_width / 4 + 15) / 16; s->c_block_height = (s->frame_height / 4 + 15) / 16; s->avctx = avctx; s->m.avctx = avctx; if ((ret = ff_mpv_common_init(&s->m)) < 0) { svq1_encode_end(avctx); return ret; } s->m.picture_structure = PICT_FRAME; s->m.me.temp = s->m.me.scratchpad = av_mallocz((avctx->width + 64) * 2 * 16 * 2 * sizeof(uint8_t)); s->m.me.map = av_mallocz(ME_MAP_SIZE * sizeof(uint32_t)); s->m.me.score_map = av_mallocz(ME_MAP_SIZE * sizeof(uint32_t)); s->mb_type = av_mallocz((s->y_block_width + 1) * s->y_block_height * sizeof(int16_t)); s->dummy = av_mallocz((s->y_block_width + 1) * s->y_block_height * sizeof(int32_t)); s->ssd_int8_vs_int16 = ssd_int8_vs_int16_c; if (!s->m.me.temp || !s->m.me.scratchpad || !s->m.me.map || !s->m.me.score_map || !s->mb_type || !s->dummy) { svq1_encode_end(avctx); return AVERROR(ENOMEM); } if (ARCH_PPC) ff_svq1enc_init_ppc(s); if (ARCH_X86) ff_svq1enc_init_x86(s); ff_h263_encode_init(&s->m); // mv_penalty return 0; }"} {"target": 0, "idx": 17688, "func": "static void dp8393x_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) { uint16_t old_val = dp8393x_readw(opaque, addr & ~0x1); switch (addr & 3) { case 0: val = val | (old_val & 0xff00); break; case 1: val = (val << 8) | (old_val & 0x00ff); break; } dp8393x_writew(opaque, addr & ~0x1, val); }"} {"target": 0, "idx": 17692, "func": "static ssize_t nbd_co_receive_request(NBDRequestData *req, NBDRequest *request) { NBDClient *client = req->client; ssize_t rc; g_assert(qemu_in_coroutine()); assert(client->recv_coroutine == qemu_coroutine_self()); rc = nbd_receive_request(client->ioc, request); if (rc < 0) { if (rc != -EAGAIN) { rc = -EIO; } goto out; } TRACE(\"Decoding type\"); if (request->type != NBD_CMD_WRITE) { /* No payload, we are ready to read the next request. */ req->complete = true; } if (request->type == NBD_CMD_DISC) { /* Special case: we're going to disconnect without a reply, * whether or not flags, from, or len are bogus */ TRACE(\"Request type is DISCONNECT\"); rc = -EIO; goto out; } /* Check for sanity in the parameters, part 1. Defer as many * checks as possible until after reading any NBD_CMD_WRITE * payload, so we can try and keep the connection alive. */ if ((request->from + request->len) < request->from) { LOG(\"integer overflow detected, you're probably being attacked\"); rc = -EINVAL; goto out; } if (request->type == NBD_CMD_READ || request->type == NBD_CMD_WRITE) { if (request->len > NBD_MAX_BUFFER_SIZE) { LOG(\"len (%\" PRIu32\" ) is larger than max len (%u)\", request->len, NBD_MAX_BUFFER_SIZE); rc = -EINVAL; goto out; } req->data = blk_try_blockalign(client->exp->blk, request->len); if (req->data == NULL) { rc = -ENOMEM; goto out; } } if (request->type == NBD_CMD_WRITE) { TRACE(\"Reading %\" PRIu32 \" byte(s)\", request->len); if (read_sync(client->ioc, req->data, request->len, NULL) < 0) { LOG(\"reading from socket failed\"); rc = -EIO; goto out; } req->complete = true; } /* Sanity checks, part 2. */ if (request->from + request->len > client->exp->size) { LOG(\"operation past EOF; From: %\" PRIu64 \", Len: %\" PRIu32 \", Size: %\" PRIu64, request->from, request->len, (uint64_t)client->exp->size); rc = request->type == NBD_CMD_WRITE ? -ENOSPC : -EINVAL; goto out; } if (request->flags & ~(NBD_CMD_FLAG_FUA | NBD_CMD_FLAG_NO_HOLE)) { LOG(\"unsupported flags (got 0x%x)\", request->flags); rc = -EINVAL; goto out; } if (request->type != NBD_CMD_WRITE_ZEROES && (request->flags & NBD_CMD_FLAG_NO_HOLE)) { LOG(\"unexpected flags (got 0x%x)\", request->flags); rc = -EINVAL; goto out; } rc = 0; out: client->recv_coroutine = NULL; nbd_client_receive_next_request(client); return rc; }"} {"target": 0, "idx": 17693, "func": "static int pfpu_decode_insn(MilkymistPFPUState *s) { uint32_t pc = s->regs[R_PC]; uint32_t insn = s->microcode[pc]; uint32_t reg_a = (insn >> 18) & 0x7f; uint32_t reg_b = (insn >> 11) & 0x7f; uint32_t op = (insn >> 7) & 0xf; uint32_t reg_d = insn & 0x7f; uint32_t r = 0; int latency = 0; switch (op) { case OP_NOP: break; case OP_FADD: { float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]); float b = REINTERPRET_CAST(float, s->gp_regs[reg_b]); float t = a + b; r = REINTERPRET_CAST(uint32_t, t); latency = LATENCY_FADD; D_EXEC(qemu_log(\"ADD a=%f b=%f t=%f, r=%08x\\n\", a, b, t, r)); } break; case OP_FSUB: { float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]); float b = REINTERPRET_CAST(float, s->gp_regs[reg_b]); float t = a - b; r = REINTERPRET_CAST(uint32_t, t); latency = LATENCY_FSUB; D_EXEC(qemu_log(\"SUB a=%f b=%f t=%f, r=%08x\\n\", a, b, t, r)); } break; case OP_FMUL: { float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]); float b = REINTERPRET_CAST(float, s->gp_regs[reg_b]); float t = a * b; r = REINTERPRET_CAST(uint32_t, t); latency = LATENCY_FMUL; D_EXEC(qemu_log(\"MUL a=%f b=%f t=%f, r=%08x\\n\", a, b, t, r)); } break; case OP_FABS: { float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]); float t = fabsf(a); r = REINTERPRET_CAST(uint32_t, t); latency = LATENCY_FABS; D_EXEC(qemu_log(\"ABS a=%f t=%f, r=%08x\\n\", a, t, r)); } break; case OP_F2I: { float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]); int32_t t = a; r = REINTERPRET_CAST(uint32_t, t); latency = LATENCY_F2I; D_EXEC(qemu_log(\"F2I a=%f t=%d, r=%08x\\n\", a, t, r)); } break; case OP_I2F: { int32_t a = REINTERPRET_CAST(int32_t, s->gp_regs[reg_a]); float t = a; r = REINTERPRET_CAST(uint32_t, t); latency = LATENCY_I2F; D_EXEC(qemu_log(\"I2F a=%08x t=%f, r=%08x\\n\", a, t, r)); } break; case OP_VECTOUT: { uint32_t a = cpu_to_be32(s->gp_regs[reg_a]); uint32_t b = cpu_to_be32(s->gp_regs[reg_b]); target_phys_addr_t dma_ptr = get_dma_address(s->regs[R_MESHBASE], s->gp_regs[GPR_X], s->gp_regs[GPR_Y]); cpu_physical_memory_write(dma_ptr, (uint8_t *)&a, 4); cpu_physical_memory_write(dma_ptr + 4, (uint8_t *)&b, 4); s->regs[R_LASTDMA] = dma_ptr + 4; D_EXEC(qemu_log(\"VECTOUT a=%08x b=%08x dma=%08x\\n\", a, b, dma_ptr)); trace_milkymist_pfpu_vectout(a, b, dma_ptr); } break; case OP_SIN: { int32_t a = REINTERPRET_CAST(int32_t, s->gp_regs[reg_a]); float t = sinf(a * (1.0f / (M_PI * 4096.0f))); r = REINTERPRET_CAST(uint32_t, t); latency = LATENCY_SIN; D_EXEC(qemu_log(\"SIN a=%d t=%f, r=%08x\\n\", a, t, r)); } break; case OP_COS: { int32_t a = REINTERPRET_CAST(int32_t, s->gp_regs[reg_a]); float t = cosf(a * (1.0f / (M_PI * 4096.0f))); r = REINTERPRET_CAST(uint32_t, t); latency = LATENCY_COS; D_EXEC(qemu_log(\"COS a=%d t=%f, r=%08x\\n\", a, t, r)); } break; case OP_ABOVE: { float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]); float b = REINTERPRET_CAST(float, s->gp_regs[reg_b]); float t = (a > b) ? 1.0f : 0.0f; r = REINTERPRET_CAST(uint32_t, t); latency = LATENCY_ABOVE; D_EXEC(qemu_log(\"ABOVE a=%f b=%f t=%f, r=%08x\\n\", a, b, t, r)); } break; case OP_EQUAL: { float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]); float b = REINTERPRET_CAST(float, s->gp_regs[reg_b]); float t = (a == b) ? 1.0f : 0.0f; r = REINTERPRET_CAST(uint32_t, t); latency = LATENCY_EQUAL; D_EXEC(qemu_log(\"EQUAL a=%f b=%f t=%f, r=%08x\\n\", a, b, t, r)); } break; case OP_COPY: { r = s->gp_regs[reg_a]; latency = LATENCY_COPY; D_EXEC(qemu_log(\"COPY\")); } break; case OP_IF: { float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]); float b = REINTERPRET_CAST(float, s->gp_regs[reg_b]); uint32_t f = s->gp_regs[GPR_FLAGS]; float t = (f != 0) ? a : b; r = REINTERPRET_CAST(uint32_t, t); latency = LATENCY_IF; D_EXEC(qemu_log(\"IF f=%u a=%f b=%f t=%f, r=%08x\\n\", f, a, b, t, r)); } break; case OP_TSIGN: { float a = REINTERPRET_CAST(float, s->gp_regs[reg_a]); float b = REINTERPRET_CAST(float, s->gp_regs[reg_b]); float t = (b < 0) ? -a : a; r = REINTERPRET_CAST(uint32_t, t); latency = LATENCY_TSIGN; D_EXEC(qemu_log(\"TSIGN a=%f b=%f t=%f, r=%08x\\n\", a, b, t, r)); } break; case OP_QUAKE: { uint32_t a = s->gp_regs[reg_a]; r = 0x5f3759df - (a >> 1); latency = LATENCY_QUAKE; D_EXEC(qemu_log(\"QUAKE a=%d r=%08x\\n\", a, r)); } break; default: error_report(\"milkymist_pfpu: unknown opcode %d\", op); break; } if (!reg_d) { D_EXEC(qemu_log(\"%04d %8s R%03d, R%03d \\n\", s->regs[R_PC], opcode_to_str[op], reg_a, reg_b, latency, s->regs[R_PC] + latency)); } else { D_EXEC(qemu_log(\"%04d %8s R%03d, R%03d -> R%03d\\n\", s->regs[R_PC], opcode_to_str[op], reg_a, reg_b, latency, s->regs[R_PC] + latency, reg_d)); } if (op == OP_VECTOUT) { return 0; } /* store output for this cycle */ if (reg_d) { uint32_t val = output_queue_remove(s); D_EXEC(qemu_log(\"R%03d <- 0x%08x\\n\", reg_d, val)); s->gp_regs[reg_d] = val; } output_queue_advance(s); /* store op output */ if (op != OP_NOP) { output_queue_insert(s, r, latency-1); } /* advance PC */ s->regs[R_PC]++; return 1; };"} {"target": 0, "idx": 17698, "func": "static void eepro100_write(void *opaque, target_phys_addr_t addr, uint64_t data, unsigned size) { EEPRO100State *s = opaque; switch (size) { case 1: eepro100_write1(s, addr, data); break; case 2: eepro100_write2(s, addr, data); break; case 4: eepro100_write4(s, addr, data); break; default: abort(); } }"} {"target": 0, "idx": 17713, "func": "static uint8_t lsi_reg_readb(LSIState *s, int offset) { uint8_t tmp; #define CASE_GET_REG32(name, addr) \\ case addr: return s->name & 0xff; \\ case addr + 1: return (s->name >> 8) & 0xff; \\ case addr + 2: return (s->name >> 16) & 0xff; \\ case addr + 3: return (s->name >> 24) & 0xff; #ifdef DEBUG_LSI_REG DPRINTF(\"Read reg %x\\n\", offset); #endif switch (offset) { case 0x00: /* SCNTL0 */ return s->scntl0; case 0x01: /* SCNTL1 */ return s->scntl1; case 0x02: /* SCNTL2 */ return s->scntl2; case 0x03: /* SCNTL3 */ return s->scntl3; case 0x04: /* SCID */ return s->scid; case 0x05: /* SXFER */ return s->sxfer; case 0x06: /* SDID */ return s->sdid; case 0x07: /* GPREG0 */ return 0x7f; case 0x08: /* Revision ID */ return 0x00; case 0xa: /* SSID */ return s->ssid; case 0xb: /* SBCL */ /* ??? This is not correct. However it's (hopefully) only used for diagnostics, so should be ok. */ return 0; case 0xc: /* DSTAT */ tmp = s->dstat | 0x80; if ((s->istat0 & LSI_ISTAT0_INTF) == 0) s->dstat = 0; lsi_update_irq(s); return tmp; case 0x0d: /* SSTAT0 */ return s->sstat0; case 0x0e: /* SSTAT1 */ return s->sstat1; case 0x0f: /* SSTAT2 */ return s->scntl1 & LSI_SCNTL1_CON ? 0 : 2; CASE_GET_REG32(dsa, 0x10) case 0x14: /* ISTAT0 */ return s->istat0; case 0x16: /* MBOX0 */ return s->mbox0; case 0x17: /* MBOX1 */ return s->mbox1; case 0x18: /* CTEST0 */ return 0xff; case 0x19: /* CTEST1 */ return 0; case 0x1a: /* CTEST2 */ tmp = LSI_CTEST2_DACK | LSI_CTEST2_CM; if (s->istat0 & LSI_ISTAT0_SIGP) { s->istat0 &= ~LSI_ISTAT0_SIGP; tmp |= LSI_CTEST2_SIGP; } return tmp; case 0x1b: /* CTEST3 */ return s->ctest3; CASE_GET_REG32(temp, 0x1c) case 0x20: /* DFIFO */ return 0; case 0x21: /* CTEST4 */ return s->ctest4; case 0x22: /* CTEST5 */ return s->ctest5; case 0x23: /* CTEST6 */ return 0; case 0x24: /* DBC[0:7] */ return s->dbc & 0xff; case 0x25: /* DBC[8:15] */ return (s->dbc >> 8) & 0xff; case 0x26: /* DBC[16->23] */ return (s->dbc >> 16) & 0xff; case 0x27: /* DCMD */ return s->dcmd; CASE_GET_REG32(dsp, 0x2c) CASE_GET_REG32(dsps, 0x30) CASE_GET_REG32(scratch[0], 0x34) case 0x38: /* DMODE */ return s->dmode; case 0x39: /* DIEN */ return s->dien; case 0x3b: /* DCNTL */ return s->dcntl; case 0x40: /* SIEN0 */ return s->sien0; case 0x41: /* SIEN1 */ return s->sien1; case 0x42: /* SIST0 */ tmp = s->sist0; s->sist0 = 0; lsi_update_irq(s); return tmp; case 0x43: /* SIST1 */ tmp = s->sist1; s->sist1 = 0; lsi_update_irq(s); return tmp; case 0x47: /* GPCNTL0 */ return 0x0f; case 0x48: /* STIME0 */ return s->stime0; case 0x4a: /* RESPID0 */ return s->respid0; case 0x4b: /* RESPID1 */ return s->respid1; case 0x4d: /* STEST1 */ return s->stest1; case 0x4e: /* STEST2 */ return s->stest2; case 0x4f: /* STEST3 */ return s->stest3; case 0x50: /* SIDL */ /* This is needed by the linux drivers. We currently only update it during the MSG IN phase. */ return s->sidl; case 0x52: /* STEST4 */ return 0xe0; case 0x56: /* CCNTL0 */ return s->ccntl0; case 0x57: /* CCNTL1 */ return s->ccntl1; case 0x58: /* SBDL */ /* Some drivers peek at the data bus during the MSG IN phase. */ if ((s->sstat1 & PHASE_MASK) == PHASE_MI) return s->msg[0]; return 0; case 0x59: /* SBDL high */ return 0; CASE_GET_REG32(mmrs, 0xa0) CASE_GET_REG32(mmws, 0xa4) CASE_GET_REG32(sfs, 0xa8) CASE_GET_REG32(drs, 0xac) CASE_GET_REG32(sbms, 0xb0) CASE_GET_REG32(dmbs, 0xb4) CASE_GET_REG32(dnad64, 0xb8) CASE_GET_REG32(pmjad1, 0xc0) CASE_GET_REG32(pmjad2, 0xc4) CASE_GET_REG32(rbc, 0xc8) CASE_GET_REG32(ua, 0xcc) CASE_GET_REG32(ia, 0xd4) CASE_GET_REG32(sbc, 0xd8) CASE_GET_REG32(csbc, 0xdc) } if (offset >= 0x5c && offset < 0xa0) { int n; int shift; n = (offset - 0x58) >> 2; shift = (offset & 3) * 8; return (s->scratch[n] >> shift) & 0xff; } BADF(\"readb 0x%x\\n\", offset); exit(1); #undef CASE_GET_REG32 }"} {"target": 0, "idx": 17721, "func": "void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong ra, target_ulong rb, target_ulong offset) { #ifndef CONFIG_USER_ONLY int spr = (ra | offset); int idx; OpenRISCCPU *cpu = openrisc_env_get_cpu(env); CPUState *cs = CPU(cpu); switch (spr) { case TO_SPR(0, 0): /* VR */ env->vr = rb; break; case TO_SPR(0, 16): /* NPC */ env->npc = rb; break; case TO_SPR(0, 17): /* SR */ if ((env->sr & (SR_IME | SR_DME | SR_SM)) ^ (rb & (SR_IME | SR_DME | SR_SM))) { tlb_flush(env, 1); } env->sr = rb; env->sr |= SR_FO; /* FO is const equal to 1 */ if (env->sr & SR_DME) { env->tlb->cpu_openrisc_map_address_data = &cpu_openrisc_get_phys_data; } else { env->tlb->cpu_openrisc_map_address_data = &cpu_openrisc_get_phys_nommu; } if (env->sr & SR_IME) { env->tlb->cpu_openrisc_map_address_code = &cpu_openrisc_get_phys_code; } else { env->tlb->cpu_openrisc_map_address_code = &cpu_openrisc_get_phys_nommu; } break; case TO_SPR(0, 18): /* PPC */ env->ppc = rb; break; case TO_SPR(0, 32): /* EPCR */ env->epcr = rb; break; case TO_SPR(0, 48): /* EEAR */ env->eear = rb; break; case TO_SPR(0, 64): /* ESR */ env->esr = rb; break; case TO_SPR(1, 512) ... TO_SPR(1, 639): /* DTLBW0MR 0-127 */ idx = spr - TO_SPR(1, 512); if (!(rb & 1)) { tlb_flush_page(env, env->tlb->dtlb[0][idx].mr & TARGET_PAGE_MASK); } env->tlb->dtlb[0][idx].mr = rb; break; case TO_SPR(1, 640) ... TO_SPR(1, 767): /* DTLBW0TR 0-127 */ idx = spr - TO_SPR(1, 640); env->tlb->dtlb[0][idx].tr = rb; break; case TO_SPR(1, 768) ... TO_SPR(1, 895): /* DTLBW1MR 0-127 */ case TO_SPR(1, 896) ... TO_SPR(1, 1023): /* DTLBW1TR 0-127 */ case TO_SPR(1, 1024) ... TO_SPR(1, 1151): /* DTLBW2MR 0-127 */ case TO_SPR(1, 1152) ... TO_SPR(1, 1279): /* DTLBW2TR 0-127 */ case TO_SPR(1, 1280) ... TO_SPR(1, 1407): /* DTLBW3MR 0-127 */ case TO_SPR(1, 1408) ... TO_SPR(1, 1535): /* DTLBW3TR 0-127 */ break; case TO_SPR(2, 512) ... TO_SPR(2, 639): /* ITLBW0MR 0-127 */ idx = spr - TO_SPR(2, 512); if (!(rb & 1)) { tlb_flush_page(env, env->tlb->itlb[0][idx].mr & TARGET_PAGE_MASK); } env->tlb->itlb[0][idx].mr = rb; break; case TO_SPR(2, 640) ... TO_SPR(2, 767): /* ITLBW0TR 0-127 */ idx = spr - TO_SPR(2, 640); env->tlb->itlb[0][idx].tr = rb; break; case TO_SPR(2, 768) ... TO_SPR(2, 895): /* ITLBW1MR 0-127 */ case TO_SPR(2, 896) ... TO_SPR(2, 1023): /* ITLBW1TR 0-127 */ case TO_SPR(2, 1024) ... TO_SPR(2, 1151): /* ITLBW2MR 0-127 */ case TO_SPR(2, 1152) ... TO_SPR(2, 1279): /* ITLBW2TR 0-127 */ case TO_SPR(2, 1280) ... TO_SPR(2, 1407): /* ITLBW3MR 0-127 */ case TO_SPR(2, 1408) ... TO_SPR(2, 1535): /* ITLBW3TR 0-127 */ break; case TO_SPR(9, 0): /* PICMR */ env->picmr |= rb; break; case TO_SPR(9, 2): /* PICSR */ env->picsr &= ~rb; break; case TO_SPR(10, 0): /* TTMR */ { if ((env->ttmr & TTMR_M) ^ (rb & TTMR_M)) { switch (rb & TTMR_M) { case TIMER_NONE: cpu_openrisc_count_stop(cpu); break; case TIMER_INTR: case TIMER_SHOT: case TIMER_CONT: cpu_openrisc_count_start(cpu); break; default: break; } } int ip = env->ttmr & TTMR_IP; if (rb & TTMR_IP) { /* Keep IP bit. */ env->ttmr = (rb & ~TTMR_IP) | ip; } else { /* Clear IP bit. */ env->ttmr = rb & ~TTMR_IP; cs->interrupt_request &= ~CPU_INTERRUPT_TIMER; } cpu_openrisc_timer_update(cpu); } break; case TO_SPR(10, 1): /* TTCR */ env->ttcr = rb; if (env->ttmr & TIMER_NONE) { return; } cpu_openrisc_timer_update(cpu); break; default: break; } #endif }"} {"target": 0, "idx": 17734, "func": "static int asf_probe(AVProbeData *pd) { /* check file header */ if (pd->buf_size <= 32) return 0; if (!memcmp(pd->buf, &asf_header, sizeof(GUID))) return AVPROBE_SCORE_MAX; else return 0; }"} {"target": 1, "idx": 17739, "func": "static inline TCGv gen_ld8s(TCGv addr, int index) { TCGv tmp = new_tmp(); tcg_gen_qemu_ld8s(tmp, addr, index); return tmp; }"} {"target": 1, "idx": 17749, "func": "static int write_adaptation_set(AVFormatContext *s, int as_index) { WebMDashMuxContext *w = s->priv_data; AdaptationSet *as = &w->as[as_index]; AVCodecContext *codec = s->streams[as->streams[0]]->codec; AVDictionaryEntry *lang; int i; static const char boolean[2][6] = { \"false\", \"true\" }; int subsegmentStartsWithSAP = 1; // Width, Height and Sample Rate will go in the AdaptationSet tag if they // are the same for all contained Representations. otherwise, they will go // on their respective Representation tag. For live streams, they always go // in the Representation tag. int width_in_as = 1, height_in_as = 1, sample_rate_in_as = 1; if (codec->codec_type == AVMEDIA_TYPE_VIDEO) { width_in_as = !w->is_live && check_matching_width(s, as); height_in_as = !w->is_live && check_matching_height(s, as); } else { sample_rate_in_as = !w->is_live && check_matching_sample_rate(s, as); } avio_printf(s->pb, \"id); avio_printf(s->pb, \" mimeType=\\\"%s/webm\\\"\", codec->codec_type == AVMEDIA_TYPE_VIDEO ? \"video\" : \"audio\"); avio_printf(s->pb, \" codecs=\\\"%s\\\"\", get_codec_name(codec->codec_id)); lang = av_dict_get(s->streams[as->streams[0]]->metadata, \"language\", NULL, 0); if (lang) avio_printf(s->pb, \" lang=\\\"%s\\\"\", lang->value); if (codec->codec_type == AVMEDIA_TYPE_VIDEO && width_in_as) avio_printf(s->pb, \" width=\\\"%d\\\"\", codec->width); if (codec->codec_type == AVMEDIA_TYPE_VIDEO && height_in_as) avio_printf(s->pb, \" height=\\\"%d\\\"\", codec->height); if (codec->codec_type == AVMEDIA_TYPE_AUDIO && sample_rate_in_as) avio_printf(s->pb, \" audioSamplingRate=\\\"%d\\\"\", codec->sample_rate); avio_printf(s->pb, \" bitstreamSwitching=\\\"%s\\\"\", boolean[bitstream_switching(s, as)]); avio_printf(s->pb, \" subsegmentAlignment=\\\"%s\\\"\", boolean[w->is_live || subsegment_alignment(s, as)]); for (i = 0; i < as->nb_streams; i++) { AVDictionaryEntry *kf = av_dict_get(s->streams[as->streams[i]]->metadata, CLUSTER_KEYFRAME, NULL, 0); if (!w->is_live && (!kf || !strncmp(kf->value, \"0\", 1))) subsegmentStartsWithSAP = 0; } avio_printf(s->pb, \" subsegmentStartsWithSAP=\\\"%d\\\"\", subsegmentStartsWithSAP); avio_printf(s->pb, \">\\n\"); if (w->is_live) { AVDictionaryEntry *filename = av_dict_get(s->streams[as->streams[0]]->metadata, FILENAME, NULL, 0); char *initialization_pattern = NULL; char *media_pattern = NULL; int ret = parse_filename(filename->value, NULL, &initialization_pattern, &media_pattern); if (ret) return ret; avio_printf(s->pb, \"\\n\", codec->codec_type == AVMEDIA_TYPE_VIDEO ? \"video\" : \"audio\"); avio_printf(s->pb, \"pb, \" timescale=\\\"1000\\\"\"); avio_printf(s->pb, \" duration=\\\"%d\\\"\", w->chunk_duration); avio_printf(s->pb, \" media=\\\"%s\\\"\", media_pattern); avio_printf(s->pb, \" startNumber=\\\"%d\\\"\", w->chunk_start_index); avio_printf(s->pb, \" initialization=\\\"%s\\\"\", initialization_pattern); avio_printf(s->pb, \"/>\\n\"); av_free(initialization_pattern); av_free(media_pattern); } for (i = 0; i < as->nb_streams; i++) { char *representation_id = NULL; int ret; if (w->is_live) { AVDictionaryEntry *filename = av_dict_get(s->streams[as->streams[i]]->metadata, FILENAME, NULL, 0); if (!filename || (ret = parse_filename(filename->value, &representation_id, NULL, NULL))) { return ret; } } else { representation_id = av_asprintf(\"%d\", w->representation_id++); if (!representation_id) return AVERROR(ENOMEM); } ret = write_representation(s, s->streams[as->streams[i]], representation_id, !width_in_as, !height_in_as, !sample_rate_in_as); if (ret) return ret; av_free(representation_id); } avio_printf(s->pb, \"\\n\"); return 0; }"} {"target": 1, "idx": 17751, "func": "static void usbredir_handle_bulk_data(USBRedirDevice *dev, USBPacket *p, uint8_t ep) { struct usb_redir_bulk_packet_header bulk_packet; size_t size = (p->combined) ? p->combined->iov.size : p->iov.size; DPRINTF(\"bulk-out ep %02X len %zd id %\"PRIu64\"\\n\", ep, size, p->id); if (usbredir_already_in_flight(dev, p->id)) { p->status = USB_RET_ASYNC; return; } bulk_packet.endpoint = ep; bulk_packet.length = size; bulk_packet.stream_id = 0; bulk_packet.length_high = size >> 16; assert(bulk_packet.length_high == 0 || usbredirparser_peer_has_cap(dev->parser, usb_redir_cap_32bits_bulk_length)); if (ep & USB_DIR_IN) { usbredirparser_send_bulk_packet(dev->parser, p->id, &bulk_packet, NULL, 0); } else { uint8_t buf[size]; if (p->combined) { iov_to_buf(p->combined->iov.iov, p->combined->iov.niov, 0, buf, size); } else { usb_packet_copy(p, buf, size); } usbredir_log_data(dev, \"bulk data out:\", buf, size); usbredirparser_send_bulk_packet(dev->parser, p->id, &bulk_packet, buf, size); } usbredirparser_do_write(dev->parser); p->status = USB_RET_ASYNC; }"} {"target": 1, "idx": 17767, "func": "static ExitStatus trans_fop_dedd(DisasContext *ctx, uint32_t insn, const DisasInsn *di) { unsigned rt = extract32(insn, 0, 5); unsigned rb = extract32(insn, 16, 5); unsigned ra = extract32(insn, 21, 5); return do_fop_dedd(ctx, rt, ra, rb, di->f_dedd); }"} {"target": 1, "idx": 17768, "func": "static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, enum AVCodecID codec_id) { AVCodecContext *c; AVStream *st; /* find the encoder */ *codec = avcodec_find_encoder(codec_id); if (!(*codec)) { fprintf(stderr, \"Could not find encoder for '%s'\\n\", avcodec_get_name(codec_id)); exit(1); } st = avformat_new_stream(oc, *codec); if (!st) { fprintf(stderr, \"Could not allocate stream\\n\"); exit(1); } st->id = oc->nb_streams-1; c = st->codec; switch ((*codec)->type) { case AVMEDIA_TYPE_AUDIO: st->id = 1; c->sample_fmt = AV_SAMPLE_FMT_S16; c->bit_rate = 64000; c->sample_rate = 44100; c->channels = 2; break; case AVMEDIA_TYPE_VIDEO: avcodec_get_context_defaults3(c, *codec); c->codec_id = codec_id; c->bit_rate = 400000; /* Resolution must be a multiple of two. */ c->width = 352; c->height = 288; /* timebase: This is the fundamental unit of time (in seconds) in terms * of which frame timestamps are represented. For fixed-fps content, * timebase should be 1/framerate and timestamp increments should be * identical to 1. */ c->time_base.den = STREAM_FRAME_RATE; c->time_base.num = 1; c->gop_size = 12; /* emit one intra frame every twelve frames at most */ c->pix_fmt = STREAM_PIX_FMT; if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) { /* just for testing, we also add B frames */ c->max_b_frames = 2; } if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) { /* Needed to avoid using macroblocks in which some coeffs overflow. * This does not happen with normal video, it just happens here as * the motion of the chroma plane does not match the luma plane. */ c->mb_decision = 2; } break; default: break; } /* Some formats want stream headers to be separate. */ if (oc->oformat->flags & AVFMT_GLOBALHEADER) c->flags |= CODEC_FLAG_GLOBAL_HEADER; return st; }"} {"target": 1, "idx": 17779, "func": "static int kvm_get_sregs(CPUState *env) { struct kvm_sregs sregs; uint32_t hflags; int ret; ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs); if (ret < 0) return ret; memcpy(env->interrupt_bitmap, sregs.interrupt_bitmap, sizeof(sregs.interrupt_bitmap)); get_seg(&env->segs[R_CS], &sregs.cs); get_seg(&env->segs[R_DS], &sregs.ds); get_seg(&env->segs[R_ES], &sregs.es); get_seg(&env->segs[R_FS], &sregs.fs); get_seg(&env->segs[R_GS], &sregs.gs); get_seg(&env->segs[R_SS], &sregs.ss); get_seg(&env->tr, &sregs.tr); get_seg(&env->ldt, &sregs.ldt); env->idt.limit = sregs.idt.limit; env->idt.base = sregs.idt.base; env->gdt.limit = sregs.gdt.limit; env->gdt.base = sregs.gdt.base; env->cr[0] = sregs.cr0; env->cr[2] = sregs.cr2; env->cr[3] = sregs.cr3; env->cr[4] = sregs.cr4; cpu_set_apic_base(env, sregs.apic_base); env->efer = sregs.efer; //cpu_set_apic_tpr(env, sregs.cr8); #define HFLAG_COPY_MASK ~( \\ HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \\ HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \\ HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \\ HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); hflags |= (env->cr[4] & CR4_OSFXSR_MASK) << (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT); if (env->efer & MSR_EFER_LMA) { hflags |= HF_LMA_MASK; } if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; } else { hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> (DESC_B_SHIFT - HF_CS32_SHIFT); hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> (DESC_B_SHIFT - HF_SS32_SHIFT); if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || !(hflags & HF_CS32_MASK)) { hflags |= HF_ADDSEG_MASK; } else { hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; } } env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags; env->cc_src = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); env->df = 1 - (2 * ((env->eflags >> 10) & 1)); env->cc_op = CC_OP_EFLAGS; env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); return 0; }"} {"target": 1, "idx": 17783, "func": "int qemu_opt_set_bool(QemuOpts *opts, const char *name, bool val) { QemuOpt *opt; const QemuOptDesc *desc = opts->list->desc; int i; for (i = 0; desc[i].name != NULL; i++) { if (strcmp(desc[i].name, name) == 0) { break; } } if (desc[i].name == NULL) { if (i == 0) { /* empty list -> allow any */; } else { qerror_report(QERR_INVALID_PARAMETER, name); return -1; } } opt = g_malloc0(sizeof(*opt)); opt->name = g_strdup(name); opt->opts = opts; QTAILQ_INSERT_TAIL(&opts->head, opt, next); if (desc[i].name != NULL) { opt->desc = desc+i; } opt->value.boolean = !!val; return 0; }"} {"target": 0, "idx": 17795, "func": "static int vc1_decode_init(AVCodecContext *avctx) { VC1Context *v = avctx->priv_data; MpegEncContext *s = &v->s; GetBitContext gb; if (!avctx->extradata_size || !avctx->extradata) return -1; if (!(avctx->flags & CODEC_FLAG_GRAY)) avctx->pix_fmt = PIX_FMT_YUV420P; else avctx->pix_fmt = PIX_FMT_GRAY8; v->s.avctx = avctx; avctx->flags |= CODEC_FLAG_EMU_EDGE; v->s.flags |= CODEC_FLAG_EMU_EDGE; if(avctx->idct_algo==FF_IDCT_AUTO){ avctx->idct_algo=FF_IDCT_WMV2; } if(ff_h263_decode_init(avctx) < 0) return -1; if (vc1_init_common(v) < 0) return -1; avctx->coded_width = avctx->width; avctx->coded_height = avctx->height; if (avctx->codec_id == CODEC_ID_WMV3) { int count = 0; // looks like WMV3 has a sequence header stored in the extradata // advanced sequence header may be before the first frame // the last byte of the extradata is a version number, 1 for the // samples we can decode init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8); if (decode_sequence_header(avctx, &gb) < 0) return -1; count = avctx->extradata_size*8 - get_bits_count(&gb); if (count>0) { av_log(avctx, AV_LOG_INFO, \"Extra data: %i bits left, value: %X\\n\", count, get_bits(&gb, count)); } else if (count < 0) { av_log(avctx, AV_LOG_INFO, \"Read %i bits in overflow\\n\", -count); } } else { // VC1/WVC1 const uint8_t *start = avctx->extradata; uint8_t *end = avctx->extradata + avctx->extradata_size; const uint8_t *next; int size, buf2_size; uint8_t *buf2 = NULL; int seq_inited = 0, ep_inited = 0; if(avctx->extradata_size < 16) { av_log(avctx, AV_LOG_ERROR, \"Extradata size too small: %i\\n\", avctx->extradata_size); return -1; } buf2 = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); if(start[0]) start++; // in WVC1 extradata first byte is its size next = start; for(; next < end; start = next){ next = find_next_marker(start + 4, end); size = next - start - 4; if(size <= 0) continue; buf2_size = vc1_unescape_buffer(start + 4, size, buf2); init_get_bits(&gb, buf2, buf2_size * 8); switch(AV_RB32(start)){ case VC1_CODE_SEQHDR: if(decode_sequence_header(avctx, &gb) < 0){ av_free(buf2); return -1; } seq_inited = 1; break; case VC1_CODE_ENTRYPOINT: if(decode_entry_point(avctx, &gb) < 0){ av_free(buf2); return -1; } ep_inited = 1; break; } } av_free(buf2); if(!seq_inited || !ep_inited){ av_log(avctx, AV_LOG_ERROR, \"Incomplete extradata\\n\"); return -1; } } avctx->has_b_frames= !!(avctx->max_b_frames); s->low_delay = !avctx->has_b_frames; s->mb_width = (avctx->coded_width+15)>>4; s->mb_height = (avctx->coded_height+15)>>4; /* Allocate mb bitplanes */ v->mv_type_mb_plane = av_malloc(s->mb_stride * s->mb_height); v->direct_mb_plane = av_malloc(s->mb_stride * s->mb_height); v->acpred_plane = av_malloc(s->mb_stride * s->mb_height); v->over_flags_plane = av_malloc(s->mb_stride * s->mb_height); /* allocate block type info in that way so it could be used with s->block_index[] */ v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2); v->mb_type[0] = v->mb_type_base + s->b8_stride + 1; v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1; v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1); /* Init coded blocks info */ if (v->profile == PROFILE_ADVANCED) { // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0) // return -1; // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0) // return -1; } ff_intrax8_common_init(&v->x8,s); return 0; }"} {"target": 1, "idx": 17818, "func": "static CharDriverState *qemu_chr_open_pp_fd(int fd) { CharDriverState *chr; ParallelCharDriver *drv; if (ioctl(fd, PPCLAIM) < 0) { close(fd); return NULL; } drv = g_malloc0(sizeof(ParallelCharDriver)); drv->fd = fd; drv->mode = IEEE1284_MODE_COMPAT; chr = qemu_chr_alloc(); chr->chr_write = null_chr_write; chr->chr_ioctl = pp_ioctl; chr->chr_close = pp_close; chr->opaque = drv; return chr; }"} {"target": 1, "idx": 17825, "func": "av_cold void ff_vp9dsp_init(VP9DSPContext *dsp) { vp9dsp_intrapred_init(dsp); vp9dsp_itxfm_init(dsp); vp9dsp_loopfilter_init(dsp); vp9dsp_mc_init(dsp); if (ARCH_X86) ff_vp9dsp_init_x86(dsp); }"} {"target": 0, "idx": 17835, "func": "static int flv_write_header(AVFormatContext *s) { AVIOContext *pb = s->pb; FLVContext *flv = s->priv_data; AVCodecContext *audio_enc = NULL, *video_enc = NULL; int i, metadata_count = 0; double framerate = 0.0; int64_t metadata_size_pos, data_size, metadata_count_pos; AVDictionaryEntry *tag = NULL; for(i=0; inb_streams; i++){ AVCodecContext *enc = s->streams[i]->codec; if (enc->codec_type == AVMEDIA_TYPE_VIDEO) { if (s->streams[i]->r_frame_rate.den && s->streams[i]->r_frame_rate.num) { framerate = av_q2d(s->streams[i]->r_frame_rate); } else { framerate = 1/av_q2d(s->streams[i]->codec->time_base); } video_enc = enc; if(enc->codec_tag == 0) { av_log(enc, AV_LOG_ERROR, \"video codec not compatible with flv\\n\"); return -1; } } else { audio_enc = enc; if(get_audio_flags(enc)<0) return -1; } av_set_pts_info(s->streams[i], 32, 1, 1000); /* 32 bit pts in ms */ } avio_write(pb, \"FLV\", 3); avio_w8(pb,1); avio_w8(pb, FLV_HEADER_FLAG_HASAUDIO * !!audio_enc + FLV_HEADER_FLAG_HASVIDEO * !!video_enc); avio_wb32(pb,9); avio_wb32(pb,0); for(i=0; inb_streams; i++){ if(s->streams[i]->codec->codec_tag == 5){ avio_w8(pb,8); // message type avio_wb24(pb,0); // include flags avio_wb24(pb,0); // time stamp avio_wb32(pb,0); // reserved avio_wb32(pb,11); // size flv->reserved=5; } } flv->last_video_ts = -1; /* write meta_tag */ avio_w8(pb, 18); // tag type META metadata_size_pos= avio_tell(pb); avio_wb24(pb, 0); // size of data part (sum of all parts below) avio_wb24(pb, 0); // time stamp avio_wb32(pb, 0); // reserved /* now data of data_size size */ /* first event name as a string */ avio_w8(pb, AMF_DATA_TYPE_STRING); put_amf_string(pb, \"onMetaData\"); // 12 bytes /* mixed array (hash) with size and string/type/data tuples */ avio_w8(pb, AMF_DATA_TYPE_MIXEDARRAY); metadata_count_pos = avio_tell(pb); metadata_count = 5*!!video_enc + 5*!!audio_enc + 2; // +2 for duration and file size avio_wb32(pb, metadata_count); put_amf_string(pb, \"duration\"); flv->duration_offset= avio_tell(pb); put_amf_double(pb, s->duration / AV_TIME_BASE); // fill in the guessed duration, it'll be corrected later if incorrect if(video_enc){ put_amf_string(pb, \"width\"); put_amf_double(pb, video_enc->width); put_amf_string(pb, \"height\"); put_amf_double(pb, video_enc->height); put_amf_string(pb, \"videodatarate\"); put_amf_double(pb, video_enc->bit_rate / 1024.0); put_amf_string(pb, \"framerate\"); put_amf_double(pb, framerate); put_amf_string(pb, \"videocodecid\"); put_amf_double(pb, video_enc->codec_tag); } if(audio_enc){ put_amf_string(pb, \"audiodatarate\"); put_amf_double(pb, audio_enc->bit_rate / 1024.0); put_amf_string(pb, \"audiosamplerate\"); put_amf_double(pb, audio_enc->sample_rate); put_amf_string(pb, \"audiosamplesize\"); put_amf_double(pb, audio_enc->codec_id == CODEC_ID_PCM_U8 ? 8 : 16); put_amf_string(pb, \"stereo\"); put_amf_bool(pb, audio_enc->channels == 2); put_amf_string(pb, \"audiocodecid\"); put_amf_double(pb, audio_enc->codec_tag); } while ((tag = av_dict_get(s->metadata, \"\", tag, AV_DICT_IGNORE_SUFFIX))) { put_amf_string(pb, tag->key); avio_w8(pb, AMF_DATA_TYPE_STRING); put_amf_string(pb, tag->value); metadata_count++; } put_amf_string(pb, \"filesize\"); flv->filesize_offset= avio_tell(pb); put_amf_double(pb, 0); // delayed write put_amf_string(pb, \"\"); avio_w8(pb, AMF_END_OF_OBJECT); /* write total size of tag */ data_size= avio_tell(pb) - metadata_size_pos - 10; avio_seek(pb, metadata_count_pos, SEEK_SET); avio_wb32(pb, metadata_count); avio_seek(pb, metadata_size_pos, SEEK_SET); avio_wb24(pb, data_size); avio_skip(pb, data_size + 10 - 3); avio_wb32(pb, data_size + 11); for (i = 0; i < s->nb_streams; i++) { AVCodecContext *enc = s->streams[i]->codec; if (enc->codec_id == CODEC_ID_AAC || enc->codec_id == CODEC_ID_H264) { int64_t pos; avio_w8(pb, enc->codec_type == AVMEDIA_TYPE_VIDEO ? FLV_TAG_TYPE_VIDEO : FLV_TAG_TYPE_AUDIO); avio_wb24(pb, 0); // size patched later avio_wb24(pb, 0); // ts avio_w8(pb, 0); // ts ext avio_wb24(pb, 0); // streamid pos = avio_tell(pb); if (enc->codec_id == CODEC_ID_AAC) { avio_w8(pb, get_audio_flags(enc)); avio_w8(pb, 0); // AAC sequence header avio_write(pb, enc->extradata, enc->extradata_size); } else { avio_w8(pb, enc->codec_tag | FLV_FRAME_KEY); // flags avio_w8(pb, 0); // AVC sequence header avio_wb24(pb, 0); // composition time ff_isom_write_avcc(pb, enc->extradata, enc->extradata_size); } data_size = avio_tell(pb) - pos; avio_seek(pb, -data_size - 10, SEEK_CUR); avio_wb24(pb, data_size); avio_skip(pb, data_size + 10 - 3); avio_wb32(pb, data_size + 11); // previous tag size } } return 0; }"} {"target": 1, "idx": 17842, "func": "void qdev_prop_set_globals(DeviceState *dev) { ObjectClass *class = object_get_class(OBJECT(dev)); do { qdev_prop_set_globals_for_type(dev, object_class_get_name(class)); class = object_class_get_parent(class); } while (class); }"} {"target": 1, "idx": 17858, "func": "BlockDriverState *bdrv_new(const char *device_name, Error **errp) { BlockDriverState *bs; int i; if (bdrv_find(device_name)) { error_setg(errp, \"Device with id '%s' already exists\", device_name); return NULL; } if (bdrv_find_node(device_name)) { error_setg(errp, \"Device with node-name '%s' already exists\", device_name); return NULL; } bs = g_malloc0(sizeof(BlockDriverState)); QLIST_INIT(&bs->dirty_bitmaps); pstrcpy(bs->device_name, sizeof(bs->device_name), device_name); if (device_name[0] != '\\0') { QTAILQ_INSERT_TAIL(&bdrv_states, bs, device_list); } for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) { QLIST_INIT(&bs->op_blockers[i]); } bdrv_iostatus_disable(bs); notifier_list_init(&bs->close_notifiers); notifier_with_return_list_init(&bs->before_write_notifiers); qemu_co_queue_init(&bs->throttled_reqs[0]); qemu_co_queue_init(&bs->throttled_reqs[1]); bs->refcnt = 1; bs->aio_context = qemu_get_aio_context(); return bs; }"} {"target": 1, "idx": 17866, "func": "static void filter_edges(void *dst1, void *prev1, void *cur1, void *next1, int w, int prefs, int mrefs, int parity, int mode) { uint8_t *dst = dst1; uint8_t *prev = prev1; uint8_t *cur = cur1; uint8_t *next = next1; int x; uint8_t *prev2 = parity ? prev : cur ; uint8_t *next2 = parity ? cur : next; /* Only edge pixels need to be processed here. A constant value of false * for is_not_edge should let the compiler ignore the whole branch. */ FILTER(0, 3, 0) dst = (uint8_t*)dst1 + w - 3; prev = (uint8_t*)prev1 + w - 3; cur = (uint8_t*)cur1 + w - 3; next = (uint8_t*)next1 + w - 3; prev2 = (uint8_t*)(parity ? prev : cur); next2 = (uint8_t*)(parity ? cur : next); FILTER(w - 3, w, 0) }"} {"target": 1, "idx": 17871, "func": "static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot) { struct kvm_userspace_memory_region mem; mem.slot = slot->slot; mem.guest_phys_addr = slot->start_addr; mem.userspace_addr = (unsigned long)slot->ram; mem.flags = slot->flags; if (s->migration_log) { mem.flags |= KVM_MEM_LOG_DIRTY_PAGES; } if (mem.flags & KVM_MEM_READONLY) { /* Set the slot size to 0 before setting the slot to the desired * value. This is needed based on KVM commit 75d61fbc. */ mem.memory_size = 0; kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); } mem.memory_size = slot->memory_size; return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); }"} {"target": 1, "idx": 17878, "func": "static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { V210DecContext *s = avctx->priv_data; int h, w, stride, aligned_input; AVFrame *pic = avctx->coded_frame; const uint8_t *psrc = avpkt->data; uint16_t *y, *u, *v; if (s->custom_stride ) stride = s->custom_stride; else { int aligned_width = ((avctx->width + 47) / 48) * 48; stride = aligned_width * 8 / 3; } aligned_input = !((uintptr_t)psrc & 0xf) && !(stride & 0xf); if (aligned_input != s->aligned_input) { s->aligned_input = aligned_input; if (HAVE_MMX) v210_x86_init(s); } if (pic->data[0]) avctx->release_buffer(avctx, pic); if (avpkt->size < stride * avctx->height) { av_log(avctx, AV_LOG_ERROR, \"packet too small\\n\"); return -1; } pic->reference = 0; if (avctx->get_buffer(avctx, pic) < 0) return -1; y = (uint16_t*)pic->data[0]; u = (uint16_t*)pic->data[1]; v = (uint16_t*)pic->data[2]; pic->pict_type = AV_PICTURE_TYPE_I; pic->key_frame = 1; for (h = 0; h < avctx->height; h++) { const uint32_t *src = (const uint32_t*)psrc; uint32_t val; w = (avctx->width / 6) * 6; s->unpack_frame(src, y, u, v, w); y += w; u += w >> 1; v += w >> 1; src += (w << 1) / 3; if (w < avctx->width - 1) { READ_PIXELS(u, y, v); val = av_le2ne32(*src++); *y++ = val & 0x3FF; } if (w < avctx->width - 3) { *u++ = (val >> 10) & 0x3FF; *y++ = (val >> 20) & 0x3FF; val = av_le2ne32(*src++); *v++ = val & 0x3FF; *y++ = (val >> 10) & 0x3FF; } psrc += stride; y += pic->linesize[0] / 2 - avctx->width; u += pic->linesize[1] / 2 - avctx->width / 2; v += pic->linesize[2] / 2 - avctx->width / 2; } *data_size = sizeof(AVFrame); *(AVFrame*)data = *avctx->coded_frame; return avpkt->size; }"} {"target": 1, "idx": 17889, "func": "void cpu_exec_init(CPUState *cpu, Error **errp) { CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu); cpu_list_add(cpu); #ifndef CONFIG_USER_ONLY if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu); } if (cc->vmsd != NULL) { vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu); } #endif }"} {"target": 1, "idx": 17892, "func": "static int read_shape_from_file(int *cols, int *rows, int **values, const char *filename, void *log_ctx) { uint8_t *buf, *p, *pend; size_t size; int ret, i, j, w; if ((ret = av_file_map(filename, &buf, &size, 0, log_ctx)) < 0) return ret; /* prescan file to get the number of lines and the maximum width */ w = 0; for (i = 0; i < size; i++) { if (buf[i] == '\\n') { if (*rows == INT_MAX) { av_log(log_ctx, AV_LOG_ERROR, \"Overflow on the number of rows in the file\\n\"); return AVERROR_INVALIDDATA; } ++(*rows); *cols = FFMAX(*cols, w); w = 0; } else if (w == INT_MAX) { av_log(log_ctx, AV_LOG_ERROR, \"Overflow on the number of columns in the file\\n\"); return AVERROR_INVALIDDATA; } w++; } if (*rows > (SIZE_MAX / sizeof(int) / *cols)) { av_log(log_ctx, AV_LOG_ERROR, \"File with size %dx%d is too big\\n\", *rows, *cols); return AVERROR_INVALIDDATA; } if (!(*values = av_mallocz_array(sizeof(int) * *rows, *cols))) return AVERROR(ENOMEM); /* fill *values */ p = buf; pend = buf + size-1; for (i = 0; i < *rows; i++) { for (j = 0;; j++) { if (p > pend || *p == '\\n') { p++; break; } else (*values)[*cols*i + j] = !!av_isgraph(*(p++)); } } av_file_unmap(buf, size); #ifdef DEBUG { char *line; if (!(line = av_malloc(*cols + 1))) return AVERROR(ENOMEM); for (i = 0; i < *rows; i++) { for (j = 0; j < *cols; j++) line[j] = (*values)[i * *cols + j] ? '@' : ' '; line[j] = 0; av_log(log_ctx, AV_LOG_DEBUG, \"%3d: %s\\n\", i, line); } av_free(line); } #endif return 0; }"} {"target": 1, "idx": 17894, "func": "static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, long src_size) { const uint16_t *end; #ifdef HAVE_MMX const uint16_t *mm_end; #endif uint8_t *d = (uint8_t *)dst; const uint16_t *s = (uint16_t *)src; end = s + src_size/2; #ifdef HAVE_MMX __asm __volatile(PREFETCH\" %0\"::\"m\"(*s):\"memory\"); __asm __volatile(\"pxor %%mm7,%%mm7\\n\\t\":::\"memory\"); mm_end = end - 3; while(s < mm_end) { __asm __volatile( PREFETCH\" 32%1\\n\\t\" \"movq %1, %%mm0\\n\\t\" \"movq %1, %%mm1\\n\\t\" \"movq %1, %%mm2\\n\\t\" \"pand %2, %%mm0\\n\\t\" \"pand %3, %%mm1\\n\\t\" \"pand %4, %%mm2\\n\\t\" \"psllq $3, %%mm0\\n\\t\" \"psrlq $3, %%mm1\\n\\t\" \"psrlq $8, %%mm2\\n\\t\" \"movq %%mm0, %%mm3\\n\\t\" \"movq %%mm1, %%mm4\\n\\t\" \"movq %%mm2, %%mm5\\n\\t\" \"punpcklwd %%mm7, %%mm0\\n\\t\" \"punpcklwd %%mm7, %%mm1\\n\\t\" \"punpcklwd %%mm7, %%mm2\\n\\t\" \"punpckhwd %%mm7, %%mm3\\n\\t\" \"punpckhwd %%mm7, %%mm4\\n\\t\" \"punpckhwd %%mm7, %%mm5\\n\\t\" \"psllq $8, %%mm1\\n\\t\" \"psllq $16, %%mm2\\n\\t\" \"por %%mm1, %%mm0\\n\\t\" \"por %%mm2, %%mm0\\n\\t\" \"psllq $8, %%mm4\\n\\t\" \"psllq $16, %%mm5\\n\\t\" \"por %%mm4, %%mm3\\n\\t\" \"por %%mm5, %%mm3\\n\\t\" MOVNTQ\" %%mm0, %0\\n\\t\" MOVNTQ\" %%mm3, 8%0\\n\\t\" :\"=m\"(*d) :\"m\"(*s),\"m\"(mask16b),\"m\"(mask16g),\"m\"(mask16r) :\"memory\"); d += 16; s += 4; } __asm __volatile(SFENCE:::\"memory\"); __asm __volatile(EMMS:::\"memory\"); #endif while(s < end) { register uint16_t bgr; bgr = *s++; #ifdef WORDS_BIGENDIAN *d++ = 0; *d++ = (bgr&0xF800)>>8; *d++ = (bgr&0x7E0)>>3; *d++ = (bgr&0x1F)<<3; #else *d++ = (bgr&0x1F)<<3; *d++ = (bgr&0x7E0)>>3; *d++ = (bgr&0xF800)>>8; *d++ = 0; #endif } }"} {"target": 1, "idx": 17925, "func": "int ff_packet_split_and_drop_side_data(AVPacket *pkt){ if (!pkt->side_data_elems && pkt->size >12 && AV_RB64(pkt->data + pkt->size - 8) == FF_MERGE_MARKER){ int i; unsigned int size; uint8_t *p; p = pkt->data + pkt->size - 8 - 5; for (i=1; ; i++){ size = AV_RB32(p); if (size>INT_MAX - 5 || p - pkt->data < size) if (p[4]&128) break; if (p - pkt->data < size + 5) p-= size+5; } pkt->size = p - pkt->data - size; av_assert0(pkt->size >= 0); return 1; } }"} {"target": 1, "idx": 17933, "func": "static bool aio_dispatch(AioContext *ctx) { AioHandler *node; bool progress = false; /* * We have to walk very carefully in case qemu_aio_set_fd_handler is * called while we're walking. */ node = QLIST_FIRST(&ctx->aio_handlers); while (node) { AioHandler *tmp; int revents; ctx->walking_handlers++; revents = node->pfd.revents & node->pfd.events; node->pfd.revents = 0; if (!node->deleted && (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) && node->io_read) { node->io_read(node->opaque); progress = true; } if (!node->deleted && (revents & (G_IO_OUT | G_IO_ERR)) && node->io_write) { node->io_write(node->opaque); progress = true; } tmp = node; node = QLIST_NEXT(node, node); ctx->walking_handlers--; if (!ctx->walking_handlers && tmp->deleted) { QLIST_REMOVE(tmp, node); g_free(tmp); } } return progress; }"} {"target": 1, "idx": 17938, "func": "static void piix4_ide_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); k->no_hotplug = 1; k->init = pci_piix_ide_initfn; k->exit = pci_piix_ide_exitfn; k->vendor_id = PCI_VENDOR_ID_INTEL; k->device_id = PCI_DEVICE_ID_INTEL_82371AB; k->class_id = PCI_CLASS_STORAGE_IDE; set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); dc->no_user = 1; }"} {"target": 1, "idx": 17943, "func": "static float get_band_cost_SPAIR_mips(struct AACEncContext *s, PutBitContext *pb, const float *in, const float *scaled, int size, int scale_idx, int cb, const float lambda, const float uplim, int *bits) { const float Q34 = ff_aac_pow34sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512]; const float IQ = ff_aac_pow2sf_tab [POW_SF2_ZERO + scale_idx - SCALE_ONE_POS + SCALE_DIV_512]; int i; float cost = 0; int qc1, qc2, qc3, qc4; int curbits = 0; uint8_t *p_bits = (uint8_t *)ff_aac_spectral_bits[cb-1]; float *p_codes = (float *)ff_aac_codebook_vectors[cb-1]; for (i = 0; i < size; i += 4) { const float *vec, *vec2; int curidx, curidx2; int *in_int = (int *)&in[i]; float *in_pos = (float *)&in[i]; float di0, di1, di2, di3; int t0, t1, t2, t3, t4, t5, t6, t7; qc1 = scaled[i ] * Q34 + ROUND_STANDARD; qc2 = scaled[i+1] * Q34 + ROUND_STANDARD; qc3 = scaled[i+2] * Q34 + ROUND_STANDARD; qc4 = scaled[i+3] * Q34 + ROUND_STANDARD; __asm__ volatile ( \".set push \\n\\t\" \".set noreorder \\n\\t\" \"ori %[t4], $zero, 4 \\n\\t\" \"slt %[t0], %[t4], %[qc1] \\n\\t\" \"slt %[t1], %[t4], %[qc2] \\n\\t\" \"slt %[t2], %[t4], %[qc3] \\n\\t\" \"slt %[t3], %[t4], %[qc4] \\n\\t\" \"movn %[qc1], %[t4], %[t0] \\n\\t\" \"movn %[qc2], %[t4], %[t1] \\n\\t\" \"movn %[qc3], %[t4], %[t2] \\n\\t\" \"movn %[qc4], %[t4], %[t3] \\n\\t\" \"lw %[t0], 0(%[in_int]) \\n\\t\" \"lw %[t1], 4(%[in_int]) \\n\\t\" \"lw %[t2], 8(%[in_int]) \\n\\t\" \"lw %[t3], 12(%[in_int]) \\n\\t\" \"srl %[t0], %[t0], 31 \\n\\t\" \"srl %[t1], %[t1], 31 \\n\\t\" \"srl %[t2], %[t2], 31 \\n\\t\" \"srl %[t3], %[t3], 31 \\n\\t\" \"subu %[t4], $zero, %[qc1] \\n\\t\" \"subu %[t5], $zero, %[qc2] \\n\\t\" \"subu %[t6], $zero, %[qc3] \\n\\t\" \"subu %[t7], $zero, %[qc4] \\n\\t\" \"movn %[qc1], %[t4], %[t0] \\n\\t\" \"movn %[qc2], %[t5], %[t1] \\n\\t\" \"movn %[qc3], %[t6], %[t2] \\n\\t\" \"movn %[qc4], %[t7], %[t3] \\n\\t\" \".set pop \\n\\t\" : [qc1]\"+r\"(qc1), [qc2]\"+r\"(qc2), [qc3]\"+r\"(qc3), [qc4]\"+r\"(qc4), [t0]\"=&r\"(t0), [t1]\"=&r\"(t1), [t2]\"=&r\"(t2), [t3]\"=&r\"(t3), [t4]\"=&r\"(t4), [t5]\"=&r\"(t5), [t6]\"=&r\"(t6), [t7]\"=&r\"(t7) : [in_int]\"r\"(in_int) : \"memory\" ); curidx = 9 * qc1; curidx += qc2 + 40; curidx2 = 9 * qc3; curidx2 += qc4 + 40; curbits += p_bits[curidx]; curbits += p_bits[curidx2]; vec = &p_codes[curidx*2]; vec2 = &p_codes[curidx2*2]; __asm__ volatile ( \".set push \\n\\t\" \".set noreorder \\n\\t\" \"lwc1 $f0, 0(%[in_pos]) \\n\\t\" \"lwc1 $f1, 0(%[vec]) \\n\\t\" \"lwc1 $f2, 4(%[in_pos]) \\n\\t\" \"lwc1 $f3, 4(%[vec]) \\n\\t\" \"lwc1 $f4, 8(%[in_pos]) \\n\\t\" \"lwc1 $f5, 0(%[vec2]) \\n\\t\" \"lwc1 $f6, 12(%[in_pos]) \\n\\t\" \"lwc1 $f7, 4(%[vec2]) \\n\\t\" \"nmsub.s %[di0], $f0, $f1, %[IQ] \\n\\t\" \"nmsub.s %[di1], $f2, $f3, %[IQ] \\n\\t\" \"nmsub.s %[di2], $f4, $f5, %[IQ] \\n\\t\" \"nmsub.s %[di3], $f6, $f7, %[IQ] \\n\\t\" \".set pop \\n\\t\" : [di0]\"=&f\"(di0), [di1]\"=&f\"(di1), [di2]\"=&f\"(di2), [di3]\"=&f\"(di3) : [in_pos]\"r\"(in_pos), [vec]\"r\"(vec), [vec2]\"r\"(vec2), [IQ]\"f\"(IQ) : \"$f0\", \"$f1\", \"$f2\", \"$f3\", \"$f4\", \"$f5\", \"$f6\", \"$f7\", \"memory\" ); cost += di0 * di0 + di1 * di1 + di2 * di2 + di3 * di3; } if (bits) *bits = curbits; return cost * lambda + curbits; }"} {"target": 0, "idx": 17958, "func": "static void qdict_add_key(const char *key, QObject *obj, void *opaque) { GHashTable *h = opaque; g_hash_table_insert(h, (gpointer) key, NULL); }"} {"target": 0, "idx": 17964, "func": "static void eject_device(BlockDriverState *bs, int force, Error **errp) { if (bdrv_in_use(bs)) { error_set(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs)); return; } if (!bdrv_dev_has_removable_media(bs)) { error_setg(errp, \"Device '%s' is not removable\", bdrv_get_device_name(bs)); return; } if (bdrv_dev_is_medium_locked(bs) && !bdrv_dev_is_tray_open(bs)) { bdrv_dev_eject_request(bs, force); if (!force) { error_setg(errp, \"Device '%s' is locked\", bdrv_get_device_name(bs)); return; } } bdrv_close(bs); }"} {"target": 1, "idx": 17984, "func": "void ff_rm_free_rmstream (RMStream *rms) { av_free(rms->videobuf); av_free(rms->audiobuf); }"} {"target": 1, "idx": 17986, "func": "void qmp_dump_guest_memory(bool paging, const char *file, bool has_begin, int64_t begin, bool has_length, int64_t length, Error **errp) { const char *p; int fd = -1; DumpState *s; int ret; if (has_begin && !has_length) { error_set(errp, QERR_MISSING_PARAMETER, \"length\"); return; } if (!has_begin && has_length) { error_set(errp, QERR_MISSING_PARAMETER, \"begin\"); return; } #if !defined(WIN32) if (strstart(file, \"fd:\", &p)) { fd = monitor_get_fd(cur_mon, p, errp); if (fd == -1) { return; } } #endif if (strstart(file, \"file:\", &p)) { fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR); if (fd < 0) { error_setg_file_open(errp, errno, p); return; } } if (fd == -1) { error_set(errp, QERR_INVALID_PARAMETER, \"protocol\"); return; } s = g_malloc0(sizeof(DumpState)); ret = dump_init(s, fd, paging, has_begin, begin, length, errp); if (ret < 0) { g_free(s); return; } if (create_vmcore(s) < 0 && !error_is_set(s->errp)) { error_set(errp, QERR_IO_ERROR); } g_free(s); }"} {"target": 1, "idx": 17989, "func": "static bool spapr_drc_needed(void *opaque) { sPAPRDRConnector *drc = (sPAPRDRConnector *)opaque; sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); /* If no dev is plugged in there is no need to migrate the DRC state */ if (!drc->dev) { return false; } /* * We need to migrate the state if it's not equal to the expected * long-term state, which is the same as the coldplugged initial * state */ return (drc->state != drck->ready_state); }"} {"target": 1, "idx": 17993, "func": "int qcow2_snapshot_delete(BlockDriverState *bs, const char *snapshot_id, const char *name, Error **errp) { BDRVQcowState *s = bs->opaque; QCowSnapshot sn; int snapshot_index, ret; /* Search the snapshot */ snapshot_index = find_snapshot_by_id_and_name(bs, snapshot_id, name); if (snapshot_index < 0) { error_setg(errp, \"Can't find the snapshot\"); return -ENOENT; } sn = s->snapshots[snapshot_index]; /* Remove it from the snapshot list */ memmove(s->snapshots + snapshot_index, s->snapshots + snapshot_index + 1, (s->nb_snapshots - snapshot_index - 1) * sizeof(sn)); s->nb_snapshots--; ret = qcow2_write_snapshots(bs); if (ret < 0) { error_setg(errp, \"Failed to remove snapshot from snapshot list\"); return ret; } /* * The snapshot is now unused, clean up. If we fail after this point, we * won't recover but just leak clusters. */ g_free(sn.id_str); g_free(sn.name); /* * Now decrease the refcounts of clusters referenced by the snapshot and * free the L1 table. */ ret = qcow2_update_snapshot_refcount(bs, sn.l1_table_offset, sn.l1_size, -1); if (ret < 0) { error_setg(errp, \"Failed to free the cluster and L1 table\"); return ret; } qcow2_free_clusters(bs, sn.l1_table_offset, sn.l1_size * sizeof(uint64_t), QCOW2_DISCARD_SNAPSHOT); /* must update the copied flag on the current cluster offsets */ ret = qcow2_update_snapshot_refcount(bs, s->l1_table_offset, s->l1_size, 0); if (ret < 0) { error_setg(errp, \"Failed to update snapshot status in disk\"); return ret; } #ifdef DEBUG_ALLOC { BdrvCheckResult result = {0}; qcow2_check_refcounts(bs, &result, 0); } #endif return 0; }"} {"target": 0, "idx": 17998, "func": "int ff_raw_read_header(AVFormatContext *s, AVFormatParameters *ap) { AVStream *st; enum CodecID id; st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); id = s->iformat->value; if (id == CODEC_ID_RAWVIDEO) { st->codec->codec_type = AVMEDIA_TYPE_VIDEO; } else { st->codec->codec_type = AVMEDIA_TYPE_AUDIO; } st->codec->codec_id = id; switch(st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: { RawAudioDemuxerContext *s1 = s->priv_data; #if FF_API_FORMAT_PARAMETERS if (ap->sample_rate) st->codec->sample_rate = ap->sample_rate; if (ap->channels) st->codec->channels = ap->channels; else st->codec->channels = 1; #endif if (s1->sample_rate) st->codec->sample_rate = s1->sample_rate; if (s1->channels) st->codec->channels = s1->channels; st->codec->bits_per_coded_sample = av_get_bits_per_sample(st->codec->codec_id); assert(st->codec->bits_per_coded_sample > 0); st->codec->block_align = st->codec->bits_per_coded_sample*st->codec->channels/8; av_set_pts_info(st, 64, 1, st->codec->sample_rate); break; } case AVMEDIA_TYPE_VIDEO: { FFRawVideoDemuxerContext *s1 = s->priv_data; int width = 0, height = 0, ret; enum PixelFormat pix_fmt; if(ap->time_base.num) av_set_pts_info(st, 64, ap->time_base.num, ap->time_base.den); else av_set_pts_info(st, 64, 1, 25); if (s1->video_size && (ret = av_parse_video_size(&width, &height, s1->video_size)) < 0) { av_log(s, AV_LOG_ERROR, \"Couldn't parse video size.\\n\"); goto fail; } if ((pix_fmt = av_get_pix_fmt(s1->pixel_format)) == PIX_FMT_NONE) { av_log(s, AV_LOG_ERROR, \"No such pixel format: %s.\\n\", s1->pixel_format); ret = AVERROR(EINVAL); goto fail; } #if FF_API_FORMAT_PARAMETERS if (ap->width > 0) width = ap->width; if (ap->height > 0) height = ap->height; if (ap->pix_fmt) pix_fmt = ap->pix_fmt; #endif st->codec->width = width; st->codec->height = height; st->codec->pix_fmt = pix_fmt; fail: av_freep(&s1->video_size); av_freep(&s1->pixel_format); return ret; } default: return -1; } return 0; }"} {"target": 0, "idx": 18003, "func": "void mcf_uart_mm_init(MemoryRegion *sysmem, target_phys_addr_t base, qemu_irq irq, CharDriverState *chr) { mcf_uart_state *s; s = mcf_uart_init(irq, chr); memory_region_init_io(&s->iomem, &mcf_uart_ops, s, \"uart\", 0x40); memory_region_add_subregion(sysmem, base, &s->iomem); }"} {"target": 0, "idx": 18005, "func": "static void expr_error(Monitor *mon, const char *msg) { monitor_printf(mon, \"%s\\n\", msg); siglongjmp(expr_env, 1); }"} {"target": 1, "idx": 18028, "func": "static void usb_msd_send_status(MSDState *s, USBPacket *p) { struct usb_msd_csw csw; int len; csw.sig = cpu_to_le32(0x53425355); csw.tag = cpu_to_le32(s->tag); csw.residue = s->residue; csw.status = s->result; len = MIN(sizeof(csw), p->len); memcpy(p->data, &csw, len); }"} {"target": 1, "idx": 18032, "func": "static int read_access_unit(AVCodecContext *avctx, void* data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MLPDecodeContext *m = avctx->priv_data; GetBitContext gb; unsigned int length, substr; unsigned int substream_start; unsigned int header_size = 4; unsigned int substr_header_size = 0; uint8_t substream_parity_present[MAX_SUBSTREAMS]; uint16_t substream_data_len[MAX_SUBSTREAMS]; uint8_t parity_bits; if (buf_size < 4) return 0; length = (AV_RB16(buf) & 0xfff) * 2; if (length > buf_size) return -1; init_get_bits(&gb, (buf + 4), (length - 4) * 8); m->is_major_sync_unit = 0; if (show_bits_long(&gb, 31) == (0xf8726fba >> 1)) { if (read_major_sync(m, &gb) < 0) goto error; m->is_major_sync_unit = 1; header_size += 28; } if (!m->params_valid) { av_log(m->avctx, AV_LOG_WARNING, \"Stream parameters not seen; skipping frame.\\n\"); *data_size = 0; return length; } substream_start = 0; for (substr = 0; substr < m->num_substreams; substr++) { int extraword_present, checkdata_present, end, nonrestart_substr; extraword_present = get_bits1(&gb); nonrestart_substr = get_bits1(&gb); checkdata_present = get_bits1(&gb); skip_bits1(&gb); end = get_bits(&gb, 12) * 2; substr_header_size += 2; if (extraword_present) { if (m->avctx->codec_id == CODEC_ID_MLP) { av_log(m->avctx, AV_LOG_ERROR, \"There must be no extraword for MLP.\\n\"); goto error; } skip_bits(&gb, 16); substr_header_size += 2; } if (!(nonrestart_substr ^ m->is_major_sync_unit)) { av_log(m->avctx, AV_LOG_ERROR, \"Invalid nonrestart_substr.\\n\"); goto error; } if (end + header_size + substr_header_size > length) { av_log(m->avctx, AV_LOG_ERROR, \"Indicated length of substream %d data goes off end of \" \"packet.\\n\", substr); end = length - header_size - substr_header_size; } if (end < substream_start) { av_log(avctx, AV_LOG_ERROR, \"Indicated end offset of substream %d data \" \"is smaller than calculated start offset.\\n\", substr); goto error; } if (substr > m->max_decoded_substream) continue; substream_parity_present[substr] = checkdata_present; substream_data_len[substr] = end - substream_start; substream_start = end; } parity_bits = ff_mlp_calculate_parity(buf, 4); parity_bits ^= ff_mlp_calculate_parity(buf + header_size, substr_header_size); if ((((parity_bits >> 4) ^ parity_bits) & 0xF) != 0xF) { av_log(avctx, AV_LOG_ERROR, \"Parity check failed.\\n\"); goto error; } buf += header_size + substr_header_size; for (substr = 0; substr <= m->max_decoded_substream; substr++) { SubStream *s = &m->substream[substr]; init_get_bits(&gb, buf, substream_data_len[substr] * 8); m->matrix_changed = 0; memset(m->filter_changed, 0, sizeof(m->filter_changed)); s->blockpos = 0; do { if (get_bits1(&gb)) { if (get_bits1(&gb)) { /* A restart header should be present. */ if (read_restart_header(m, &gb, buf, substr) < 0) goto next_substr; s->restart_seen = 1; } if (!s->restart_seen) goto next_substr; if (read_decoding_params(m, &gb, substr) < 0) goto next_substr; } if (!s->restart_seen) goto next_substr; if (read_block_data(m, &gb, substr) < 0) return -1; if (get_bits_count(&gb) >= substream_data_len[substr] * 8) goto substream_length_mismatch; } while (!get_bits1(&gb)); skip_bits(&gb, (-get_bits_count(&gb)) & 15); if (substream_data_len[substr] * 8 - get_bits_count(&gb) >= 32) { int shorten_by; if (get_bits(&gb, 16) != 0xD234) return -1; shorten_by = get_bits(&gb, 16); if (m->avctx->codec_id == CODEC_ID_TRUEHD && shorten_by & 0x2000) s->blockpos -= FFMIN(shorten_by & 0x1FFF, s->blockpos); else if (m->avctx->codec_id == CODEC_ID_MLP && shorten_by != 0xD234) return -1; if (substr == m->max_decoded_substream) av_log(m->avctx, AV_LOG_INFO, \"End of stream indicated.\\n\"); } if (substream_parity_present[substr]) { uint8_t parity, checksum; if (substream_data_len[substr] * 8 - get_bits_count(&gb) != 16) goto substream_length_mismatch; parity = ff_mlp_calculate_parity(buf, substream_data_len[substr] - 2); checksum = ff_mlp_checksum8 (buf, substream_data_len[substr] - 2); if ((get_bits(&gb, 8) ^ parity) != 0xa9 ) av_log(m->avctx, AV_LOG_ERROR, \"Substream %d parity check failed.\\n\", substr); if ( get_bits(&gb, 8) != checksum) av_log(m->avctx, AV_LOG_ERROR, \"Substream %d checksum failed.\\n\" , substr); } if (substream_data_len[substr] * 8 != get_bits_count(&gb)) goto substream_length_mismatch; next_substr: if (!s->restart_seen) av_log(m->avctx, AV_LOG_ERROR, \"No restart header present in substream %d.\\n\", substr); buf += substream_data_len[substr]; } rematrix_channels(m, m->max_decoded_substream); if (output_data(m, m->max_decoded_substream, data, data_size) < 0) return -1; return length; substream_length_mismatch: av_log(m->avctx, AV_LOG_ERROR, \"substream %d length mismatch\\n\", substr); return -1; error: m->params_valid = 0; return -1; }"} {"target": 0, "idx": 18034, "func": "static void sunrast_image_write_image(AVCodecContext *avctx, const uint8_t *pixels, const uint32_t *palette_data, int linesize) { SUNRASTContext *s = avctx->priv_data; const uint8_t *ptr; int len, alen, x; if (s->maplength) { // palettized PutByteContext pb_r, pb_g; int len = s->maplength / 3; pb_r = s->p; bytestream2_skip_p(&s->p, len); pb_g = s->p; bytestream2_skip_p(&s->p, len); for (x = 0; x < len; x++) { uint32_t pixel = palette_data[x]; bytestream2_put_byteu(&pb_r, (pixel >> 16) & 0xFF); bytestream2_put_byteu(&pb_g, (pixel >> 8) & 0xFF); bytestream2_put_byteu(&s->p, pixel & 0xFF); } } len = (s->depth * avctx->width + 7) >> 3; alen = len + (len & 1); ptr = pixels; if (s->type == RT_BYTE_ENCODED) { uint8_t value, value2; int run; const uint8_t *end = pixels + avctx->height * linesize; ptr = pixels; #define GET_VALUE ptr >= end ? 0 : x >= len ? ptr[len-1] : ptr[x] x = 0; value2 = GET_VALUE; while (ptr < end) { run = 1; value = value2; x++; if (x >= alen) { x = 0; ptr += linesize; } value2 = GET_VALUE; while (value2 == value && run < 256 && ptr < end) { x++; run++; if (x >= alen) { x = 0; ptr += linesize; } value2 = GET_VALUE; } if (run > 2 || value == RLE_TRIGGER) { bytestream2_put_byteu(&s->p, RLE_TRIGGER); bytestream2_put_byteu(&s->p, run - 1); if (run > 1) bytestream2_put_byteu(&s->p, value); } else if (run == 1) { bytestream2_put_byteu(&s->p, value); } else bytestream2_put_be16u(&s->p, (value << 8) | value); } // update data length for header s->length = bytestream2_tell_p(&s->p) - 32 - s->maplength; } else { int y; for (y = 0; y < avctx->height; y++) { bytestream2_put_buffer(&s->p, ptr, len); if (len < alen) bytestream2_put_byteu(&s->p, 0); ptr += linesize; } } }"} {"target": 0, "idx": 18045, "func": "static NetSocketState *net_socket_fd_init(VLANState *vlan, const char *model, const char *name, int fd, int is_connected) { int so_type=-1, optlen=sizeof(so_type); if(getsockopt(fd, SOL_SOCKET, SO_TYPE, (char *)&so_type, (socklen_t *)&optlen)< 0) { fprintf(stderr, \"qemu: error: getsockopt(SO_TYPE) for fd=%d failed\\n\", fd); return NULL; } switch(so_type) { case SOCK_DGRAM: return net_socket_fd_init_dgram(vlan, model, name, fd, is_connected); case SOCK_STREAM: return net_socket_fd_init_stream(vlan, model, name, fd, is_connected); default: /* who knows ... this could be a eg. a pty, do warn and continue as stream */ fprintf(stderr, \"qemu: warning: socket type=%d for fd=%d is not SOCK_DGRAM or SOCK_STREAM\\n\", so_type, fd); return net_socket_fd_init_stream(vlan, model, name, fd, is_connected); } return NULL; }"} {"target": 0, "idx": 18061, "func": "static void ich_ahci_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); k->init = pci_ich9_ahci_init; k->exit = pci_ich9_uninit; k->config_write = pci_ich9_write_config; k->vendor_id = PCI_VENDOR_ID_INTEL; k->device_id = PCI_DEVICE_ID_INTEL_82801IR; k->revision = 0x02; k->class_id = PCI_CLASS_STORAGE_SATA; dc->alias = \"ahci\"; dc->vmsd = &vmstate_ahci; }"} {"target": 0, "idx": 18064, "func": "static int sol_probe(AVProbeData *p) { /* check file header */ uint16_t magic; if (p->buf_size <= 14) return 0; magic=le2me_16(*((uint16_t*)p->buf)); if ((magic == 0x0B8D || magic == 0x0C0D || magic == 0x0C8D) && p->buf[2] == 'S' && p->buf[3] == 'O' && p->buf[4] == 'L' && p->buf[5] == 0) return AVPROBE_SCORE_MAX; else return 0; }"} {"target": 1, "idx": 18072, "func": "static int daala_header(AVFormatContext *s, int idx) { int i, err; uint8_t *cdp; GetByteContext gb; AVRational timebase; struct ogg *ogg = s->priv_data; struct ogg_stream *os = ogg->streams + idx; AVStream *st = s->streams[idx]; int cds = st->codec->extradata_size + os->psize + 2; DaalaInfoHeader *hdr = os->private; if (!(os->buf[os->pstart] & 0x80)) return 0; if (!hdr) { hdr = av_mallocz(sizeof(*hdr)); if (!hdr) return AVERROR(ENOMEM); os->private = hdr; switch (os->buf[os->pstart]) { case 0x80: bytestream2_init(&gb, os->buf + os->pstart, os->psize); bytestream2_skip(&gb, ff_daala_codec.magicsize); hdr->version_maj = bytestream2_get_byte(&gb); hdr->version_min = bytestream2_get_byte(&gb); hdr->version_sub = bytestream2_get_byte(&gb); st->codec->width = bytestream2_get_ne32(&gb); st->codec->height = bytestream2_get_ne32(&gb); st->sample_aspect_ratio.num = bytestream2_get_ne32(&gb); st->sample_aspect_ratio.den = bytestream2_get_ne32(&gb); timebase.num = bytestream2_get_ne32(&gb); timebase.den = bytestream2_get_ne32(&gb); if (timebase.num < 0 && timebase.den < 0) { av_log(s, AV_LOG_WARNING, \"Invalid timebase, assuming 30 FPS\\n\"); timebase.num = 1; timebase.den = 30; avpriv_set_pts_info(st, 64, timebase.den, timebase.num); hdr->frame_duration = bytestream2_get_ne32(&gb); hdr->gpshift = bytestream2_get_byte(&gb); hdr->gpmask = (1 << hdr->gpshift) - 1; hdr->format.depth = 8 + 2*(bytestream2_get_byte(&gb)-1); hdr->fpr = bytestream2_get_byte(&gb); hdr->format.planes = bytestream2_get_byte(&gb); for (i = 0; i < hdr->format.planes; i++) { hdr->format.xdec[i] = bytestream2_get_byte(&gb); hdr->format.ydec[i] = bytestream2_get_byte(&gb); if ((st->codec->pix_fmt = daala_match_pix_fmt(&hdr->format)) < 0) av_log(s, AV_LOG_ERROR, \"Unsupported pixel format - %i %i\\n\", hdr->format.depth, hdr->format.planes); st->codec->codec_id = AV_CODEC_ID_DAALA; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->need_parsing = AVSTREAM_PARSE_HEADERS; hdr->init_d = 1; break; case 0x81: if (!hdr->init_d) ff_vorbis_stream_comment(s, st, os->buf + os->pstart + ff_daala_codec.magicsize, os->psize - ff_daala_codec.magicsize); break; case 0x82: if (!hdr->init_d) break; default: av_log(s, AV_LOG_ERROR, \"Unknown header type %X\\n\", os->buf[os->pstart]); break; if ((err = av_reallocp(&st->codec->extradata, cds + AV_INPUT_BUFFER_PADDING_SIZE)) < 0) { st->codec->extradata_size = 0; return err; memset(st->codec->extradata + cds, 0, AV_INPUT_BUFFER_PADDING_SIZE); cdp = st->codec->extradata + st->codec->extradata_size; *cdp++ = os->psize >> 8; *cdp++ = os->psize & 0xff; memcpy(cdp, os->buf + os->pstart, os->psize); st->codec->extradata_size = cds; return 1;"} {"target": 1, "idx": 18074, "func": "static int vorbis_parse_setup_hdr_mappings(vorbis_context *vc) { GetBitContext *gb=&vc->gb; uint_fast8_t i, j; vc->mapping_count=get_bits(gb, 6)+1; vc->mappings=av_mallocz(vc->mapping_count * sizeof(vorbis_mapping)); AV_DEBUG(\" There are %d mappings. \\n\", vc->mapping_count); for(i=0;imapping_count;++i) { vorbis_mapping *mapping_setup=&vc->mappings[i]; if (get_bits(gb, 16)) { av_log(vc->avccontext, AV_LOG_ERROR, \"Other mappings than type 0 are not compliant with the Vorbis I specification. \\n\"); return 1; } if (get_bits1(gb)) { mapping_setup->submaps=get_bits(gb, 4)+1; } else { mapping_setup->submaps=1; } if (get_bits1(gb)) { mapping_setup->coupling_steps=get_bits(gb, 8)+1; mapping_setup->magnitude=av_mallocz(mapping_setup->coupling_steps * sizeof(uint_fast8_t)); mapping_setup->angle =av_mallocz(mapping_setup->coupling_steps * sizeof(uint_fast8_t)); for(j=0;jcoupling_steps;++j) { mapping_setup->magnitude[j]=get_bits(gb, ilog(vc->audio_channels-1)); mapping_setup->angle[j]=get_bits(gb, ilog(vc->audio_channels-1)); // FIXME: sanity checks } } else { mapping_setup->coupling_steps=0; } AV_DEBUG(\" %d mapping coupling steps: %d \\n\", i, mapping_setup->coupling_steps); if(get_bits(gb, 2)) { av_log(vc->avccontext, AV_LOG_ERROR, \"%d. mapping setup data invalid. \\n\", i); return 1; // following spec. } if (mapping_setup->submaps>1) { mapping_setup->mux=av_mallocz(vc->audio_channels * sizeof(uint_fast8_t)); for(j=0;jaudio_channels;++j) { mapping_setup->mux[j]=get_bits(gb, 4); } } for(j=0;jsubmaps;++j) { skip_bits(gb, 8); // FIXME check? mapping_setup->submap_floor[j]=get_bits(gb, 8); mapping_setup->submap_residue[j]=get_bits(gb, 8); AV_DEBUG(\" %d mapping %d submap : floor %d, residue %d \\n\", i, j, mapping_setup->submap_floor[j], mapping_setup->submap_residue[j]); } } return 0; }"} {"target": 1, "idx": 18078, "func": "static target_long monitor_get_psr (const struct MonitorDef *md, int val) { CPUState *env = mon_get_cpu(); if (!env) return 0; return GET_PSR(env); }"} {"target": 1, "idx": 18081, "func": "static int qemu_rdma_connect(RDMAContext *rdma, Error **errp) { RDMACapabilities cap = { .version = RDMA_CONTROL_VERSION_CURRENT, .flags = 0, }; struct rdma_conn_param conn_param = { .initiator_depth = 2, .retry_count = 5, .private_data = &cap, .private_data_len = sizeof(cap), }; struct rdma_cm_event *cm_event; int ret; /* * Only negotiate the capability with destination if the user * on the source first requested the capability. */ if (rdma->pin_all) { trace_qemu_rdma_connect_pin_all_requested(); cap.flags |= RDMA_CAPABILITY_PIN_ALL; } caps_to_network(&cap); ret = rdma_connect(rdma->cm_id, &conn_param); if (ret) { perror(\"rdma_connect\"); ERROR(errp, \"connecting to destination!\"); goto err_rdma_source_connect; } ret = rdma_get_cm_event(rdma->channel, &cm_event); if (ret) { perror(\"rdma_get_cm_event after rdma_connect\"); ERROR(errp, \"connecting to destination!\"); rdma_ack_cm_event(cm_event); goto err_rdma_source_connect; } if (cm_event->event != RDMA_CM_EVENT_ESTABLISHED) { perror(\"rdma_get_cm_event != EVENT_ESTABLISHED after rdma_connect\"); ERROR(errp, \"connecting to destination!\"); rdma_ack_cm_event(cm_event); goto err_rdma_source_connect; } rdma->connected = true; memcpy(&cap, cm_event->param.conn.private_data, sizeof(cap)); network_to_caps(&cap); /* * Verify that the *requested* capabilities are supported by the destination * and disable them otherwise. */ if (rdma->pin_all && !(cap.flags & RDMA_CAPABILITY_PIN_ALL)) { ERROR(errp, \"Server cannot support pinning all memory. \" \"Will register memory dynamically.\"); rdma->pin_all = false; } trace_qemu_rdma_connect_pin_all_outcome(rdma->pin_all); rdma_ack_cm_event(cm_event); ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY); if (ret) { ERROR(errp, \"posting second control recv!\"); goto err_rdma_source_connect; } rdma->control_ready_expected = 1; rdma->nb_sent = 0; return 0; err_rdma_source_connect: qemu_rdma_cleanup(rdma); return -1; }"} {"target": 0, "idx": 18102, "func": "opts_start_list(Visitor *v, const char *name, Error **errp) { OptsVisitor *ov = to_ov(v); /* we can't traverse a list in a list */ assert(ov->list_mode == LM_NONE); ov->repeated_opts = lookup_distinct(ov, name, errp); if (ov->repeated_opts != NULL) { ov->list_mode = LM_STARTED; } }"} {"target": 0, "idx": 18111, "func": "void commit_start(const char *job_id, BlockDriverState *bs, BlockDriverState *base, BlockDriverState *top, int64_t speed, BlockdevOnError on_error, BlockCompletionFunc *cb, void *opaque, const char *backing_file_str, Error **errp) { CommitBlockJob *s; BlockReopenQueue *reopen_queue = NULL; int orig_overlay_flags; int orig_base_flags; BlockDriverState *overlay_bs; Error *local_err = NULL; assert(top != bs); if (top == base) { error_setg(errp, \"Invalid files for merge: top and base are the same\"); return; } overlay_bs = bdrv_find_overlay(bs, top); if (overlay_bs == NULL) { error_setg(errp, \"Could not find overlay image for %s:\", top->filename); return; } s = block_job_create(job_id, &commit_job_driver, bs, speed, cb, opaque, errp); if (!s) { return; } orig_base_flags = bdrv_get_flags(base); orig_overlay_flags = bdrv_get_flags(overlay_bs); /* convert base & overlay_bs to r/w, if necessary */ if (!(orig_overlay_flags & BDRV_O_RDWR)) { reopen_queue = bdrv_reopen_queue(reopen_queue, overlay_bs, NULL, orig_overlay_flags | BDRV_O_RDWR); } if (!(orig_base_flags & BDRV_O_RDWR)) { reopen_queue = bdrv_reopen_queue(reopen_queue, base, NULL, orig_base_flags | BDRV_O_RDWR); } if (reopen_queue) { bdrv_reopen_multiple(reopen_queue, &local_err); if (local_err != NULL) { error_propagate(errp, local_err); block_job_unref(&s->common); return; } } s->base = blk_new(); blk_insert_bs(s->base, base); s->top = blk_new(); blk_insert_bs(s->top, top); s->active = bs; s->base_flags = orig_base_flags; s->orig_overlay_flags = orig_overlay_flags; s->backing_file_str = g_strdup(backing_file_str); s->on_error = on_error; s->common.co = qemu_coroutine_create(commit_run, s); trace_commit_start(bs, base, top, s, s->common.co, opaque); qemu_coroutine_enter(s->common.co); }"} {"target": 0, "idx": 18114, "func": "static bool bdrv_drain_recurse(BlockDriverState *bs) { BdrvChild *child; bool waited; waited = bdrv_drain_poll(bs); if (bs->drv && bs->drv->bdrv_drain) { bs->drv->bdrv_drain(bs); } QLIST_FOREACH(child, &bs->children, next) { waited |= bdrv_drain_recurse(child->bs); } return waited; }"} {"target": 0, "idx": 18127, "func": "static void pxa2xx_i2c_slave_class_init(ObjectClass *klass, void *data) { I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); k->init = pxa2xx_i2c_slave_init; k->event = pxa2xx_i2c_event; k->recv = pxa2xx_i2c_rx; k->send = pxa2xx_i2c_tx; }"} {"target": 0, "idx": 18129, "func": "static inline int32_t efsctsi(uint32_t val) { CPU_FloatU u; u.l = val; /* NaN are not treated the same way IEEE 754 does */ if (unlikely(float32_is_nan(u.f))) return 0; return float32_to_int32(u.f, &env->vec_status); }"} {"target": 0, "idx": 18130, "func": "static inline int handle_cpu_signal(unsigned long pc, unsigned long address, int is_write, sigset_t *old_set) { #if defined(DEBUG_SIGNAL) printf(\"qemu: SIGSEGV pc=0x%08lx address=%08lx wr=%d oldset=0x%08lx\\n\", pc, address, is_write, *(unsigned long *)old_set); #endif /* XXX: locking issue */ if (is_write && page_unprotect(address)) { sigprocmask(SIG_SETMASK, old_set, NULL); return 1; } if (pc >= (unsigned long)code_gen_buffer && pc < (unsigned long)code_gen_buffer + CODE_GEN_BUFFER_SIZE) { /* the PC is inside the translated code. It means that we have a virtual CPU fault */ /* we restore the process signal mask as the sigreturn should do it */ sigprocmask(SIG_SETMASK, old_set, NULL); /* XXX: need to compute virtual pc position by retranslating code. The rest of the CPU state should be correct. */ env->cr2 = address; raise_exception_err(EXCP0E_PAGE, 4 | (is_write << 1)); /* never comes here */ return 1; } else { return 0; } }"} {"target": 0, "idx": 18133, "func": "void DBDMA_schedule(void) { CPUState *env = cpu_single_env; if (env) cpu_interrupt(env, CPU_INTERRUPT_EXIT); }"} {"target": 0, "idx": 18146, "func": "void helper_fdmulq(CPUSPARCState *env, float64 src1, float64 src2) { clear_float_exceptions(env); QT0 = float128_mul(float64_to_float128(src1, &env->fp_status), float64_to_float128(src2, &env->fp_status), &env->fp_status); check_ieee_exceptions(env); }"} {"target": 0, "idx": 18149, "func": "void helper_iret_real(int shift) { uint32_t sp, new_cs, new_eip, new_eflags, new_esp; uint8_t *ssp; int eflags_mask; sp = env->regs[R_ESP] & 0xffff; ssp = env->segs[R_SS].base + sp; if (shift == 1) { /* 32 bits */ new_eflags = ldl(ssp + 8); new_cs = ldl(ssp + 4) & 0xffff; new_eip = ldl(ssp) & 0xffff; } else { /* 16 bits */ new_eflags = lduw(ssp + 4); new_cs = lduw(ssp + 2); new_eip = lduw(ssp); } new_esp = sp + (6 << shift); env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) | (new_esp & 0xffff); load_seg_vm(R_CS, new_cs); env->eip = new_eip; eflags_mask = FL_UPDATE_CPL0_MASK; if (shift == 0) eflags_mask &= 0xffff; load_eflags(new_eflags, eflags_mask); }"} {"target": 0, "idx": 18154, "func": "static void residue_encode(venc_context_t * venc, residue_t * rc, PutBitContext * pb, float * coeffs, int samples, int real_ch) { int pass, i, j, p, k; int psize = rc->partition_size; int partitions = (rc->end - rc->begin) / psize; int channels = (rc->type == 2) ? 1 : real_ch; int classes[channels][partitions]; int classwords = venc->codebooks[rc->classbook].ndimentions; assert(rc->type == 2); assert(real_ch == 2); for (p = 0; p < partitions; p++) { float max1 = 0., max2 = 0.; int s = rc->begin + p * psize; for (k = s; k < s + psize; k += 2) { max1 = FFMAX(max1, fabs(coeffs[ k / real_ch])); max2 = FFMAX(max2, fabs(coeffs[samples + k / real_ch])); } for (i = 0; i < rc->classifications - 1; i++) { if (max1 < rc->maxes[i][0] && max2 < rc->maxes[i][1]) break; } classes[0][p] = i; } for (pass = 0; pass < 8; pass++) { p = 0; while (p < partitions) { if (pass == 0) for (j = 0; j < channels; j++) { codebook_t * book = &venc->codebooks[rc->classbook]; int entry = 0; for (i = 0; i < classwords; i++) { entry *= rc->classifications; entry += classes[j][p + i]; } put_codeword(pb, book, entry); } for (i = 0; i < classwords && p < partitions; i++, p++) { for (j = 0; j < channels; j++) { int nbook = rc->books[classes[j][p]][pass]; codebook_t * book = &venc->codebooks[nbook]; float * buf = coeffs + samples*j + rc->begin + p*psize; if (nbook == -1) continue; assert(rc->type == 0 || rc->type == 2); assert(!(psize % book->ndimentions)); if (rc->type == 0) { for (k = 0; k < psize; k += book->ndimentions) { float * a = put_vector(book, pb, &buf[k]); int l; for (l = 0; l < book->ndimentions; l++) buf[k + l] -= a[l]; } } else { for (k = 0; k < psize; k += book->ndimentions) { int dim = book->ndimentions, s = rc->begin + p * psize + k, l; float vec[dim], * a = vec; for (l = s; l < s + dim; l++) *a++ = coeffs[(l % real_ch) * samples + l / real_ch]; a = put_vector(book, pb, vec); for (l = s; l < s + dim; l++) coeffs[(l % real_ch) * samples + l / real_ch] -= *a++; } } } } } } }"} {"target": 1, "idx": 18172, "func": "static void process_ncq_command(AHCIState *s, int port, uint8_t *cmd_fis, uint8_t slot) { AHCIDevice *ad = &s->dev[port]; IDEState *ide_state = &ad->port.ifs[0]; NCQFrame *ncq_fis = (NCQFrame*)cmd_fis; uint8_t tag = ncq_fis->tag >> 3; NCQTransferState *ncq_tfs = &ad->ncq_tfs[tag]; size_t size; g_assert(is_ncq(ncq_fis->command)); if (ncq_tfs->used) { /* error - already in use */ fprintf(stderr, \"%s: tag %d already used\\n\", __FUNCTION__, tag); return; } ncq_tfs->used = 1; ncq_tfs->drive = ad; ncq_tfs->slot = slot; ncq_tfs->cmdh = &((AHCICmdHdr *)ad->lst)[slot]; ncq_tfs->cmd = ncq_fis->command; ncq_tfs->lba = ((uint64_t)ncq_fis->lba5 << 40) | ((uint64_t)ncq_fis->lba4 << 32) | ((uint64_t)ncq_fis->lba3 << 24) | ((uint64_t)ncq_fis->lba2 << 16) | ((uint64_t)ncq_fis->lba1 << 8) | (uint64_t)ncq_fis->lba0; ncq_tfs->tag = tag; /* Sanity-check the NCQ packet */ if (tag != slot) { DPRINTF(port, \"Warn: NCQ slot (%d) did not match the given tag (%d)\\n\", slot, tag); } if (ncq_fis->aux0 || ncq_fis->aux1 || ncq_fis->aux2 || ncq_fis->aux3) { DPRINTF(port, \"Warn: Attempt to use NCQ auxiliary fields.\\n\"); } if (ncq_fis->prio || ncq_fis->icc) { DPRINTF(port, \"Warn: Unsupported attempt to use PRIO/ICC fields\\n\"); } if (ncq_fis->fua & NCQ_FIS_FUA_MASK) { DPRINTF(port, \"Warn: Unsupported attempt to use Force Unit Access\\n\"); } if (ncq_fis->tag & NCQ_FIS_RARC_MASK) { DPRINTF(port, \"Warn: Unsupported attempt to use Rebuild Assist\\n\"); } ncq_tfs->sector_count = ((ncq_fis->sector_count_high << 8) | ncq_fis->sector_count_low); if (!ncq_tfs->sector_count) { ncq_tfs->sector_count = 0x10000; } size = ncq_tfs->sector_count * 512; ahci_populate_sglist(ad, &ncq_tfs->sglist, ncq_tfs->cmdh, size, 0); if (ncq_tfs->sglist.size < size) { error_report(\"ahci: PRDT length for NCQ command (0x%zx) \" \"is smaller than the requested size (0x%zx)\", ncq_tfs->sglist.size, size); qemu_sglist_destroy(&ncq_tfs->sglist); ncq_err(ncq_tfs); ahci_trigger_irq(ad->hba, ad, PORT_IRQ_OVERFLOW); return; } else if (ncq_tfs->sglist.size != size) { DPRINTF(port, \"Warn: PRDTL (0x%zx)\" \" does not match requested size (0x%zx)\", ncq_tfs->sglist.size, size); } DPRINTF(port, \"NCQ transfer LBA from %\"PRId64\" to %\"PRId64\", \" \"drive max %\"PRId64\"\\n\", ncq_tfs->lba, ncq_tfs->lba + ncq_tfs->sector_count - 1, ide_state->nb_sectors - 1); execute_ncq_command(ncq_tfs); }"} {"target": 1, "idx": 18179, "func": "USBDevice *usb_msd_init(const char *filename) { static int nr=0; char id[8]; QemuOpts *opts; DriveInfo *dinfo; USBDevice *dev; int fatal_error; const char *p1; char fmt[32]; /* parse -usbdevice disk: syntax into drive opts */ snprintf(id, sizeof(id), \"usb%d\", nr++); opts = qemu_opts_create(&qemu_drive_opts, id, 0); p1 = strchr(filename, ':'); if (p1++) { const char *p2; if (strstart(filename, \"format=\", &p2)) { int len = MIN(p1 - p2, sizeof(fmt)); pstrcpy(fmt, len, p2); qemu_opt_set(opts, \"format\", fmt); } else if (*filename != ':') { printf(\"unrecognized USB mass-storage option %s\\n\", filename); return NULL; } filename = p1; } if (!*filename) { printf(\"block device specification needed\\n\"); return NULL; } qemu_opt_set(opts, \"file\", filename); qemu_opt_set(opts, \"if\", \"none\"); /* create host drive */ dinfo = drive_init(opts, NULL, &fatal_error); if (!dinfo) { qemu_opts_del(opts); return NULL; } /* create guest device */ dev = usb_create(NULL /* FIXME */, \"QEMU USB MSD\"); qdev_prop_set_drive(&dev->qdev, \"drive\", dinfo); qdev_init(&dev->qdev); return dev; }"} {"target": 1, "idx": 18183, "func": "sowrite(so) struct socket *so; { int n,nn; struct sbuf *sb = &so->so_rcv; int len = sb->sb_cc; struct iovec iov[2]; DEBUG_CALL(\"sowrite\"); DEBUG_ARG(\"so = %lx\", (long)so); if (so->so_urgc) { sosendoob(so); if (sb->sb_cc == 0) return 0; } /* * No need to check if there's something to write, * sowrite wouldn't have been called otherwise */ len = sb->sb_cc; iov[0].iov_base = sb->sb_rptr; if (sb->sb_rptr < sb->sb_wptr) { iov[0].iov_len = sb->sb_wptr - sb->sb_rptr; /* Should never succeed, but... */ if (iov[0].iov_len > len) iov[0].iov_len = len; n = 1; } else { iov[0].iov_len = (sb->sb_data + sb->sb_datalen) - sb->sb_rptr; if (iov[0].iov_len > len) iov[0].iov_len = len; len -= iov[0].iov_len; if (len) { iov[1].iov_base = sb->sb_data; iov[1].iov_len = sb->sb_wptr - sb->sb_data; if (iov[1].iov_len > len) iov[1].iov_len = len; n = 2; } else n = 1; } /* Check if there's urgent data to send, and if so, send it */ #ifdef HAVE_READV nn = writev(so->s, (const struct iovec *)iov, n); DEBUG_MISC((dfd, \" ... wrote nn = %d bytes\\n\", nn)); #else nn = send(so->s, iov[0].iov_base, iov[0].iov_len,0); #endif /* This should never happen, but people tell me it does *shrug* */ if (nn < 0 && (errno == EAGAIN || errno == EINTR)) return 0; if (nn <= 0) { DEBUG_MISC((dfd, \" --- sowrite disconnected, so->so_state = %x, errno = %d\\n\", so->so_state, errno)); sofcantsendmore(so); tcp_sockclosed(sototcpcb(so)); return -1; } #ifndef HAVE_READV if (n == 2 && nn == iov[0].iov_len) { int ret; ret = send(so->s, iov[1].iov_base, iov[1].iov_len,0); if (ret > 0) nn += ret; } DEBUG_MISC((dfd, \" ... wrote nn = %d bytes\\n\", nn)); #endif /* Update sbuf */ sb->sb_cc -= nn; sb->sb_rptr += nn; if (sb->sb_rptr >= (sb->sb_data + sb->sb_datalen)) sb->sb_rptr -= sb->sb_datalen; /* * If in DRAIN mode, and there's no more data, set * it CANTSENDMORE */ if ((so->so_state & SS_FWDRAIN) && sb->sb_cc == 0) sofcantsendmore(so); return nn; }"} {"target": 1, "idx": 18191, "func": "static void rtc_get_date(Object *obj, Visitor *v, void *opaque, const char *name, Error **errp) { Error *err = NULL; RTCState *s = MC146818_RTC(obj); struct tm current_tm; rtc_update_time(s); rtc_get_time(s, ¤t_tm); visit_start_struct(v, NULL, \"struct tm\", name, 0, &err); if (err) { goto out; } visit_type_int32(v, ¤t_tm.tm_year, \"tm_year\", &err); visit_type_int32(v, ¤t_tm.tm_mon, \"tm_mon\", &err); visit_type_int32(v, ¤t_tm.tm_mday, \"tm_mday\", &err); visit_type_int32(v, ¤t_tm.tm_hour, \"tm_hour\", &err); visit_type_int32(v, ¤t_tm.tm_min, \"tm_min\", &err); visit_type_int32(v, ¤t_tm.tm_sec, \"tm_sec\", &err); visit_end_struct(v, &err); out: error_propagate(errp, err); }"} {"target": 1, "idx": 18217, "func": "static void ff_compute_band_indexes(MPADecodeContext *s, GranuleDef *g) { if (g->block_type == 2) { if (g->switch_point) { /* if switched mode, we handle the 36 first samples as long blocks. For 8000Hz, we handle the 72 first exponents as long blocks */ if (s->sample_rate_index <= 2) g->long_end = 8; else g->long_end = 6; g->short_start = 2 + (s->sample_rate_index != 8); } else { g->long_end = 0; g->short_start = 0; } } else { g->short_start = 13; g->long_end = 22; } }"} {"target": 0, "idx": 18221, "func": "static uint16_t phys_section_add(MemoryRegionSection *section) { /* The physical section number is ORed with a page-aligned * pointer to produce the iotlb entries. Thus it should * never overflow into the page-aligned value. */ assert(next_map.sections_nb < TARGET_PAGE_SIZE); if (next_map.sections_nb == next_map.sections_nb_alloc) { next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2, 16); next_map.sections = g_renew(MemoryRegionSection, next_map.sections, next_map.sections_nb_alloc); } next_map.sections[next_map.sections_nb] = *section; memory_region_ref(section->mr); return next_map.sections_nb++; }"} {"target": 0, "idx": 18232, "func": "static uint32_t intel_hda_mmio_readb(void *opaque, target_phys_addr_t addr) { IntelHDAState *d = opaque; const IntelHDAReg *reg = intel_hda_reg_find(d, addr); return intel_hda_reg_read(d, reg, 0xff); }"} {"target": 0, "idx": 18234, "func": "static av_cold int vaapi_encode_h264_init_fixed_qp(AVCodecContext *avctx) { VAAPIEncodeContext *ctx = avctx->priv_data; VAAPIEncodeH264Context *priv = ctx->priv_data; VAAPIEncodeH264Options *opt = ctx->codec_options; priv->fixed_qp_p = opt->qp; if (avctx->i_quant_factor > 0.0) priv->fixed_qp_idr = (int)((priv->fixed_qp_p * avctx->i_quant_factor + avctx->i_quant_offset) + 0.5); else priv->fixed_qp_idr = priv->fixed_qp_p; if (avctx->b_quant_factor > 0.0) priv->fixed_qp_b = (int)((priv->fixed_qp_p * avctx->b_quant_factor + avctx->b_quant_offset) + 0.5); else priv->fixed_qp_b = priv->fixed_qp_p; av_log(avctx, AV_LOG_DEBUG, \"Using fixed QP = \" \"%d / %d / %d for IDR- / P- / B-frames.\\n\", priv->fixed_qp_idr, priv->fixed_qp_p, priv->fixed_qp_b); return 0; }"} {"target": 0, "idx": 18238, "func": "static bool is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes) { int nr; int64_t res; int64_t start; /* TODO: Widening to sector boundaries should only be needed as * long as we can't query finer granularity. */ start = QEMU_ALIGN_DOWN(offset, BDRV_SECTOR_SIZE); bytes = QEMU_ALIGN_UP(offset + bytes, BDRV_SECTOR_SIZE) - start; /* Clamp to image length, before checking status of underlying sectors */ if (start + bytes > bs->total_sectors * BDRV_SECTOR_SIZE) { bytes = bs->total_sectors * BDRV_SECTOR_SIZE - start; } if (!bytes) { return true; } res = bdrv_get_block_status_above(bs, NULL, start >> BDRV_SECTOR_BITS, bytes >> BDRV_SECTOR_BITS, &nr, NULL); return res >= 0 && (res & BDRV_BLOCK_ZERO) && nr * BDRV_SECTOR_SIZE == bytes; }"} {"target": 0, "idx": 18257, "func": "int ff_bgmc_init(AVCodecContext *avctx, uint8_t **cf_lut, int **cf_lut_status) { *cf_lut = av_malloc(sizeof(**cf_lut) * LUT_BUFF * 16 * LUT_SIZE); *cf_lut_status = av_malloc(sizeof(**cf_lut_status) * LUT_BUFF); if (!cf_lut || !cf_lut_status) { ff_bgmc_end(cf_lut, cf_lut_status); av_log(avctx, AV_LOG_ERROR, \"Allocating buffer memory failed.\\n\"); return AVERROR(ENOMEM); } else { // initialize lut_status buffer to a value never used to compare against memset(*cf_lut_status, -1, sizeof(**cf_lut_status) * LUT_BUFF); } return 0; }"} {"target": 0, "idx": 18278, "func": "void ram_handle_compressed(void *host, uint8_t ch, uint64_t size) { if (ch != 0 || !is_zero_range(host, size)) { memset(host, ch, size); #ifndef _WIN32 if (ch == 0 && (!kvm_enabled() || kvm_has_sync_mmu())) { size = size & ~(getpagesize() - 1); if (size > 0) { qemu_madvise(host, size, QEMU_MADV_DONTNEED); } } #endif } }"} {"target": 1, "idx": 18283, "func": "static QEMUMachine *machine_parse(const char *name) { QEMUMachine *m, *machine = NULL; if (name) { machine = find_machine(name); } if (machine) { return machine; } printf(\"Supported machines are:\\n\"); for (m = first_machine; m != NULL; m = m->next) { if (m->alias) { printf(\"%-20s %s (alias of %s)\\n\", m->alias, m->desc, m->name); } printf(\"%-20s %s%s\\n\", m->name, m->desc, m->is_default ? \" (default)\" : \"\"); } exit(!name || *name != '?'); }"} {"target": 1, "idx": 18290, "func": "static int do_getfd(Monitor *mon, const QDict *qdict, QObject **ret_data) { const char *fdname = qdict_get_str(qdict, \"fdname\"); mon_fd_t *monfd; int fd; fd = qemu_chr_get_msgfd(mon->chr); if (fd == -1) { qerror_report(QERR_FD_NOT_SUPPLIED); return -1; } if (qemu_isdigit(fdname[0])) { qerror_report(QERR_INVALID_PARAMETER_VALUE, \"fdname\", \"a name not starting with a digit\"); return -1; } fd = dup(fd); if (fd == -1) { if (errno == EMFILE) qerror_report(QERR_TOO_MANY_FILES); else qerror_report(QERR_UNDEFINED_ERROR); return -1; } QLIST_FOREACH(monfd, &mon->fds, next) { if (strcmp(monfd->name, fdname) != 0) { continue; } close(monfd->fd); monfd->fd = fd; return 0; } monfd = qemu_mallocz(sizeof(mon_fd_t)); monfd->name = qemu_strdup(fdname); monfd->fd = fd; QLIST_INSERT_HEAD(&mon->fds, monfd, next); return 0; }"} {"target": 1, "idx": 18298, "func": "static int virtio_net_load(QEMUFile *f, void *opaque, int version_id) { VirtIONet *n = opaque; if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION) return -EINVAL; virtio_load(&n->vdev, f); qemu_get_buffer(f, n->mac, ETH_ALEN); n->tx_timer_active = qemu_get_be32(f); n->mergeable_rx_bufs = qemu_get_be32(f); if (version_id >= 3) n->status = qemu_get_be16(f); if (version_id >= 4) { if (version_id < 8) { n->promisc = qemu_get_be32(f); n->allmulti = qemu_get_be32(f); } else { n->promisc = qemu_get_byte(f); n->allmulti = qemu_get_byte(f); } } if (version_id >= 5) { n->mac_table.in_use = qemu_get_be32(f); /* MAC_TABLE_ENTRIES may be different from the saved image */ if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) { qemu_get_buffer(f, n->mac_table.macs, n->mac_table.in_use * ETH_ALEN); } else if (n->mac_table.in_use) { qemu_fseek(f, n->mac_table.in_use * ETH_ALEN, SEEK_CUR); n->promisc = 1; n->mac_table.in_use = 0; } } if (version_id >= 6) qemu_get_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3); if (version_id >= 7 && qemu_get_be32(f)) { fprintf(stderr, \"virtio-net: saved image requires vnet header support\\n\"); exit(1); } if (n->tx_timer_active) { qemu_mod_timer(n->tx_timer, qemu_get_clock(vm_clock) + TX_TIMER_INTERVAL); } return 0; }"} {"target": 1, "idx": 18300, "func": "uint32_t lm4549_write_samples(lm4549_state *s, uint32_t left, uint32_t right) { /* The left and right samples are in 20-bit resolution. The LM4549 has 18-bit resolution and only uses the bits [19:2]. This model supports 16-bit playback. */ if (s->buffer_level >= LM4549_BUFFER_SIZE) { DPRINTF(\"write_sample Buffer full\\n\"); return 0; } /* Store 16-bit samples in the buffer */ s->buffer[s->buffer_level++] = (left >> 4); s->buffer[s->buffer_level++] = (right >> 4); if (s->buffer_level == LM4549_BUFFER_SIZE) { /* Trigger the transfer of the buffer to the audio host */ lm4549_audio_transfer(s); } return 1; }"} {"target": 1, "idx": 18320, "func": "static inline void code_gen_alloc(size_t tb_size) { tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size); tcg_ctx.code_gen_buffer = alloc_code_gen_buffer(); if (tcg_ctx.code_gen_buffer == NULL) { fprintf(stderr, \"Could not allocate dynamic translator buffer\\n\"); exit(1); } qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size, QEMU_MADV_HUGEPAGE); /* Estimate a good size for the number of TBs we can support. We still haven't deducted the prologue from the buffer size here, but that's minimal and won't affect the estimate much. */ tcg_ctx.code_gen_max_blocks = tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE; tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock, tcg_ctx.code_gen_max_blocks); qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock); }"} {"target": 1, "idx": 18328, "func": "int net_init_socket(QemuOpts *opts, Monitor *mon, const char *name, VLANState *vlan) { if (qemu_opt_get(opts, \"fd\")) { int fd; if (qemu_opt_get(opts, \"listen\") || qemu_opt_get(opts, \"connect\") || qemu_opt_get(opts, \"mcast\") || qemu_opt_get(opts, \"localaddr\")) { error_report(\"listen=, connect=, mcast= and localaddr= is invalid with fd=\"); return -1; } fd = net_handle_fd_param(mon, qemu_opt_get(opts, \"fd\")); if (fd == -1) { return -1; } if (!net_socket_fd_init(vlan, \"socket\", name, fd, 1)) { close(fd); return -1; } } else if (qemu_opt_get(opts, \"listen\")) { const char *listen; if (qemu_opt_get(opts, \"fd\") || qemu_opt_get(opts, \"connect\") || qemu_opt_get(opts, \"mcast\") || qemu_opt_get(opts, \"localaddr\")) { error_report(\"fd=, connect=, mcast= and localaddr= is invalid with listen=\"); return -1; } listen = qemu_opt_get(opts, \"listen\"); if (net_socket_listen_init(vlan, \"socket\", name, listen) == -1) { return -1; } } else if (qemu_opt_get(opts, \"connect\")) { const char *connect; if (qemu_opt_get(opts, \"fd\") || qemu_opt_get(opts, \"listen\") || qemu_opt_get(opts, \"mcast\") || qemu_opt_get(opts, \"localaddr\")) { error_report(\"fd=, listen=, mcast= and localaddr= is invalid with connect=\"); return -1; } connect = qemu_opt_get(opts, \"connect\"); if (net_socket_connect_init(vlan, \"socket\", name, connect) == -1) { return -1; } } else if (qemu_opt_get(opts, \"mcast\")) { const char *mcast, *localaddr; if (qemu_opt_get(opts, \"fd\") || qemu_opt_get(opts, \"connect\") || qemu_opt_get(opts, \"listen\")) { error_report(\"fd=, connect= and listen= is invalid with mcast=\"); return -1; } mcast = qemu_opt_get(opts, \"mcast\"); localaddr = qemu_opt_get(opts, \"localaddr\"); if (net_socket_mcast_init(vlan, \"socket\", name, mcast, localaddr) == -1) { return -1; } } else { error_report(\"-socket requires fd=, listen=, connect= or mcast=\"); return -1; } return 0; }"} {"target": 0, "idx": 18342, "func": "QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClockType type) { return main_loop_tlg.tl[type]; }"} {"target": 0, "idx": 18347, "func": "static void openpic_update_irq(openpic_t *opp, int n_IRQ) { IRQ_src_t *src; int i; src = &opp->src[n_IRQ]; if (!src->pending) { /* no irq pending */ DPRINTF(\"%s: IRQ %d is not pending\\n\", __func__, n_IRQ); return; } if (test_bit(&src->ipvp, IPVP_MASK)) { /* Interrupt source is disabled */ DPRINTF(\"%s: IRQ %d is disabled\\n\", __func__, n_IRQ); return; } if (IPVP_PRIORITY(src->ipvp) == 0) { /* Priority set to zero */ DPRINTF(\"%s: IRQ %d has 0 priority\\n\", __func__, n_IRQ); return; } if (test_bit(&src->ipvp, IPVP_ACTIVITY)) { /* IRQ already active */ DPRINTF(\"%s: IRQ %d is already active\\n\", __func__, n_IRQ); return; } if (src->ide == 0x00000000) { /* No target */ DPRINTF(\"%s: IRQ %d has no target\\n\", __func__, n_IRQ); return; } if (src->ide == (1 << src->last_cpu)) { /* Only one CPU is allowed to receive this IRQ */ IRQ_local_pipe(opp, src->last_cpu, n_IRQ); } else if (!test_bit(&src->ipvp, IPVP_MODE)) { /* Directed delivery mode */ for (i = 0; i < opp->nb_cpus; i++) { if (test_bit(&src->ide, i)) IRQ_local_pipe(opp, i, n_IRQ); } } else { /* Distributed delivery mode */ for (i = src->last_cpu + 1; i != src->last_cpu; i++) { if (i == opp->nb_cpus) i = 0; if (test_bit(&src->ide, i)) { IRQ_local_pipe(opp, i, n_IRQ); src->last_cpu = i; break; } } } }"} {"target": 0, "idx": 18349, "func": "static void kqemu_reset_modified_ram_pages(void) { int i; unsigned long page_index; for(i = 0; i < nb_modified_ram_pages; i++) { page_index = modified_ram_pages[i] >> TARGET_PAGE_BITS; modified_ram_pages_table[page_index] = 0; } nb_modified_ram_pages = 0; }"} {"target": 0, "idx": 18353, "func": "static void cirrus_mmio_write(void *opaque, target_phys_addr_t addr, uint64_t val, unsigned size) { CirrusVGAState *s = opaque; if (addr >= 0x100) { cirrus_mmio_blt_write(s, addr - 0x100, val); } else { cirrus_vga_ioport_write(s, addr + 0x3c0, val); } }"} {"target": 0, "idx": 18356, "func": "void register_cp_regs_for_features(ARMCPU *cpu) { /* Register all the coprocessor registers based on feature bits */ CPUARMState *env = &cpu->env; if (arm_feature(env, ARM_FEATURE_M)) { /* M profile has no coprocessor registers */ return; } define_arm_cp_regs(cpu, cp_reginfo); if (!arm_feature(env, ARM_FEATURE_V8)) { /* Must go early as it is full of wildcards that may be * overridden by later definitions. */ define_arm_cp_regs(cpu, not_v8_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_V6)) { /* The ID registers all have impdef reset values */ ARMCPRegInfo v6_idregs[] = { { .name = \"ID_PFR0\", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_pfr0 }, { .name = \"ID_PFR1\", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_pfr1 }, { .name = \"ID_DFR0\", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_dfr0 }, { .name = \"ID_AFR0\", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_afr0 }, { .name = \"ID_MMFR0\", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_mmfr0 }, { .name = \"ID_MMFR1\", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_mmfr1 }, { .name = \"ID_MMFR2\", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_mmfr2 }, { .name = \"ID_MMFR3\", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_mmfr3 }, { .name = \"ID_ISAR0\", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_isar0 }, { .name = \"ID_ISAR1\", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_isar1 }, { .name = \"ID_ISAR2\", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_isar2 }, { .name = \"ID_ISAR3\", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_isar3 }, { .name = \"ID_ISAR4\", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_isar4 }, { .name = \"ID_ISAR5\", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_isar5 }, /* 6..7 are as yet unallocated and must RAZ */ { .name = \"ID_ISAR6\", .cp = 15, .crn = 0, .crm = 2, .opc1 = 0, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = \"ID_ISAR7\", .cp = 15, .crn = 0, .crm = 2, .opc1 = 0, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, REGINFO_SENTINEL }; define_arm_cp_regs(cpu, v6_idregs); define_arm_cp_regs(cpu, v6_cp_reginfo); } else { define_arm_cp_regs(cpu, not_v6_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_V6K)) { define_arm_cp_regs(cpu, v6k_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_V7)) { /* v7 performance monitor control register: same implementor * field as main ID register, and we implement only the cycle * count register. */ #ifndef CONFIG_USER_ONLY ARMCPRegInfo pmcr = { .name = \"PMCR\", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, .access = PL0_RW, .resetvalue = cpu->midr & 0xff000000, .type = ARM_CP_IO, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), .accessfn = pmreg_access, .writefn = pmcr_write, .raw_writefn = raw_write, }; define_one_arm_cp_reg(cpu, &pmcr); #endif ARMCPRegInfo clidr = { .name = \"CLIDR\", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr }; define_one_arm_cp_reg(cpu, &clidr); define_arm_cp_regs(cpu, v7_cp_reginfo); } else { define_arm_cp_regs(cpu, not_v7_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_V8)) { /* AArch64 ID registers, which all have impdef reset values */ ARMCPRegInfo v8_idregs[] = { { .name = \"ID_AA64PFR0_EL1\", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_aa64pfr0 }, { .name = \"ID_AA64PFR1_EL1\", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_aa64pfr1}, { .name = \"ID_AA64DFR0_EL1\", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, /* We mask out the PMUVer field, because we don't currently * implement the PMU. Not advertising it prevents the guest * from trying to use it and getting UNDEFs on registers we * don't implement. */ .resetvalue = cpu->id_aa64dfr0 & ~0xf00 }, { .name = \"ID_AA64DFR1_EL1\", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_aa64dfr1 }, { .name = \"ID_AA64AFR0_EL1\", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_aa64afr0 }, { .name = \"ID_AA64AFR1_EL1\", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_aa64afr1 }, { .name = \"ID_AA64ISAR0_EL1\", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_aa64isar0 }, { .name = \"ID_AA64ISAR1_EL1\", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_aa64isar1 }, { .name = \"ID_AA64MMFR0_EL1\", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_aa64mmfr0 }, { .name = \"ID_AA64MMFR1_EL1\", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->id_aa64mmfr1 }, { .name = \"MVFR0_EL1\", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->mvfr0 }, { .name = \"MVFR1_EL1\", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->mvfr1 }, { .name = \"MVFR2_EL1\", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->mvfr2 }, REGINFO_SENTINEL }; ARMCPRegInfo rvbar = { .name = \"RVBAR_EL1\", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2, .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar }; define_one_arm_cp_reg(cpu, &rvbar); define_arm_cp_regs(cpu, v8_idregs); define_arm_cp_regs(cpu, v8_cp_reginfo); define_aarch64_debug_regs(cpu); } if (arm_feature(env, ARM_FEATURE_EL2)) { define_arm_cp_regs(cpu, v8_el2_cp_reginfo); } else { /* If EL2 is missing but higher ELs are enabled, we need to * register the no_el2 reginfos. */ if (arm_feature(env, ARM_FEATURE_EL3)) { define_arm_cp_regs(cpu, v8_el3_no_el2_cp_reginfo); } } if (arm_feature(env, ARM_FEATURE_EL3)) { define_arm_cp_regs(cpu, v8_el3_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_MPU)) { /* These are the MPU registers prior to PMSAv6. Any new * PMSA core later than the ARM946 will require that we * implement the PMSAv6 or PMSAv7 registers, which are * completely different. */ assert(!arm_feature(env, ARM_FEATURE_V6)); define_arm_cp_regs(cpu, pmsav5_cp_reginfo); } else { define_arm_cp_regs(cpu, vmsa_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { define_arm_cp_regs(cpu, t2ee_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { define_arm_cp_regs(cpu, generic_timer_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_VAPA)) { define_arm_cp_regs(cpu, vapa_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) { define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) { define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) { define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_OMAPCP)) { define_arm_cp_regs(cpu, omap_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_STRONGARM)) { define_arm_cp_regs(cpu, strongarm_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_XSCALE)) { define_arm_cp_regs(cpu, xscale_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) { define_arm_cp_regs(cpu, dummy_c15_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_LPAE)) { define_arm_cp_regs(cpu, lpae_cp_reginfo); } /* Slightly awkwardly, the OMAP and StrongARM cores need all of * cp15 crn=0 to be writes-ignored, whereas for other cores they should * be read-only (ie write causes UNDEF exception). */ { ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = { /* Pre-v8 MIDR space. * Note that the MIDR isn't a simple constant register because * of the TI925 behaviour where writes to another register can * cause the MIDR value to change. * * Unimplemented registers in the c15 0 0 0 space default to * MIDR. Define MIDR first as this entire space, then CTR, TCMTR * and friends override accordingly. */ { .name = \"MIDR\", .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_R, .resetvalue = cpu->midr, .writefn = arm_cp_write_ignore, .raw_writefn = raw_write, .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), .type = ARM_CP_OVERRIDE }, /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */ { .name = \"DUMMY\", .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = \"DUMMY\", .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = \"DUMMY\", .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = \"DUMMY\", .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = \"DUMMY\", .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, REGINFO_SENTINEL }; ARMCPRegInfo id_v8_midr_cp_reginfo[] = { /* v8 MIDR -- the wildcard isn't necessary, and nor is the * variable-MIDR TI925 behaviour. Instead we have a single * (strictly speaking IMPDEF) alias of the MIDR, REVIDR. */ { .name = \"MIDR_EL1\", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->midr }, { .name = \"REVIDR_EL1\", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->midr }, REGINFO_SENTINEL }; ARMCPRegInfo id_cp_reginfo[] = { /* These are common to v8 and pre-v8 */ { .name = \"CTR\", .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, { .name = \"CTR_EL0\", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0, .access = PL0_R, .accessfn = ctr_el0_access, .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ { .name = \"TCMTR\", .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = \"TLBTR\", .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, REGINFO_SENTINEL }; ARMCPRegInfo crn0_wi_reginfo = { .name = \"CRN0_WI\", .cp = 15, .crn = 0, .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_OVERRIDE }; if (arm_feature(env, ARM_FEATURE_OMAPCP) || arm_feature(env, ARM_FEATURE_STRONGARM)) { ARMCPRegInfo *r; /* Register the blanket \"writes ignored\" value first to cover the * whole space. Then update the specific ID registers to allow write * access, so that they ignore writes rather than causing them to * UNDEF. */ define_one_arm_cp_reg(cpu, &crn0_wi_reginfo); for (r = id_pre_v8_midr_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) { r->access = PL1_RW; } for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) { r->access = PL1_RW; } } if (arm_feature(env, ARM_FEATURE_V8)) { define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo); } else { define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo); } define_arm_cp_regs(cpu, id_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_MPIDR)) { define_arm_cp_regs(cpu, mpidr_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_AUXCR)) { ARMCPRegInfo auxcr = { .name = \"ACTLR_EL1\", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr }; define_one_arm_cp_reg(cpu, &auxcr); } if (arm_feature(env, ARM_FEATURE_CBAR)) { if (arm_feature(env, ARM_FEATURE_AARCH64)) { /* 32 bit view is [31:18] 0...0 [43:32]. */ uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) | extract64(cpu->reset_cbar, 32, 12); ARMCPRegInfo cbar_reginfo[] = { { .name = \"CBAR\", .type = ARM_CP_CONST, .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, .access = PL1_R, .resetvalue = cpu->reset_cbar }, { .name = \"CBAR_EL1\", .state = ARM_CP_STATE_AA64, .type = ARM_CP_CONST, .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0, .access = PL1_R, .resetvalue = cbar32 }, REGINFO_SENTINEL }; /* We don't implement a r/w 64 bit CBAR currently */ assert(arm_feature(env, ARM_FEATURE_CBAR_RO)); define_arm_cp_regs(cpu, cbar_reginfo); } else { ARMCPRegInfo cbar = { .name = \"CBAR\", .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar, .fieldoffset = offsetof(CPUARMState, cp15.c15_config_base_address) }; if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { cbar.access = PL1_R; cbar.fieldoffset = 0; cbar.type = ARM_CP_CONST; } define_one_arm_cp_reg(cpu, &cbar); } } /* Generic registers whose values depend on the implementation */ { ARMCPRegInfo sctlr = { .name = \"SCTLR\", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_sys), .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr, .raw_writefn = raw_write, }; if (arm_feature(env, ARM_FEATURE_XSCALE)) { /* Normally we would always end the TB on an SCTLR write, but Linux * arch/arm/mach-pxa/sleep.S expects two instructions following * an MMU enable to execute from cache. Imitate this behaviour. */ sctlr.type |= ARM_CP_SUPPRESS_TB_END; } define_one_arm_cp_reg(cpu, &sctlr); } }"} {"target": 0, "idx": 18361, "func": "static int megasas_pd_get_info_submit(SCSIDevice *sdev, int lun, MegasasCmd *cmd) { struct mfi_pd_info *info = cmd->iov_buf; size_t dcmd_size = sizeof(struct mfi_pd_info); BlockConf *conf = &sdev->conf; uint64_t pd_size; uint16_t sdev_id = ((sdev->id & 0xFF) >> 8) | (lun & 0xFF); uint8_t cmdbuf[6]; SCSIRequest *req; size_t len, resid; if (!cmd->iov_buf) { cmd->iov_buf = g_malloc(dcmd_size); memset(cmd->iov_buf, 0, dcmd_size); info = cmd->iov_buf; info->inquiry_data[0] = 0x7f; /* Force PQual 0x3, PType 0x1f */ info->vpd_page83[0] = 0x7f; megasas_setup_inquiry(cmdbuf, 0, sizeof(info->inquiry_data)); req = scsi_req_new(sdev, cmd->index, lun, cmdbuf, cmd); if (!req) { trace_megasas_dcmd_req_alloc_failed(cmd->index, \"PD get info std inquiry\"); g_free(cmd->iov_buf); cmd->iov_buf = NULL; return MFI_STAT_FLASH_ALLOC_FAIL; } trace_megasas_dcmd_internal_submit(cmd->index, \"PD get info std inquiry\", lun); len = scsi_req_enqueue(req); if (len > 0) { cmd->iov_size = len; scsi_req_continue(req); } return MFI_STAT_INVALID_STATUS; } else if (info->inquiry_data[0] != 0x7f && info->vpd_page83[0] == 0x7f) { megasas_setup_inquiry(cmdbuf, 0x83, sizeof(info->vpd_page83)); req = scsi_req_new(sdev, cmd->index, lun, cmdbuf, cmd); if (!req) { trace_megasas_dcmd_req_alloc_failed(cmd->index, \"PD get info vpd inquiry\"); return MFI_STAT_FLASH_ALLOC_FAIL; } trace_megasas_dcmd_internal_submit(cmd->index, \"PD get info vpd inquiry\", lun); len = scsi_req_enqueue(req); if (len > 0) { cmd->iov_size = len; scsi_req_continue(req); } return MFI_STAT_INVALID_STATUS; } /* Finished, set FW state */ if ((info->inquiry_data[0] >> 5) == 0) { if (megasas_is_jbod(cmd->state)) { info->fw_state = cpu_to_le16(MFI_PD_STATE_SYSTEM); } else { info->fw_state = cpu_to_le16(MFI_PD_STATE_ONLINE); } } else { info->fw_state = cpu_to_le16(MFI_PD_STATE_OFFLINE); } info->ref.v.device_id = cpu_to_le16(sdev_id); info->state.ddf.pd_type = cpu_to_le16(MFI_PD_DDF_TYPE_IN_VD| MFI_PD_DDF_TYPE_INTF_SAS); bdrv_get_geometry(conf->bs, &pd_size); info->raw_size = cpu_to_le64(pd_size); info->non_coerced_size = cpu_to_le64(pd_size); info->coerced_size = cpu_to_le64(pd_size); info->encl_device_id = 0xFFFF; info->slot_number = (sdev->id & 0xFF); info->path_info.count = 1; info->path_info.sas_addr[0] = cpu_to_le64(megasas_get_sata_addr(sdev_id)); info->connected_port_bitmap = 0x1; info->device_speed = 1; info->link_speed = 1; resid = dma_buf_read(cmd->iov_buf, dcmd_size, &cmd->qsg); g_free(cmd->iov_buf); cmd->iov_size = dcmd_size - resid; cmd->iov_buf = NULL; return MFI_STAT_OK; }"} {"target": 0, "idx": 18366, "func": "static void yuv_from_cqt(ColorFloat *c, const FFTComplex *v, float gamma, int len) { int x; for (x = 0; x < len; x++) { float r, g, b; r = calculate_gamma(FFMIN(1.0f, v[x].re), gamma); g = calculate_gamma(FFMIN(1.0f, 0.5f * (v[x].re + v[x].im)), gamma); b = calculate_gamma(FFMIN(1.0f, v[x].im), gamma); c[x].yuv.y = 16.0f + 65.481f * r + 128.553f * g + 24.966f * b; c[x].yuv.u = 128.0f - 37.797f * r - 74.203f * g + 112.0f * b; c[x].yuv.v = 128.0f + 112.0f * r - 93.786f * g - 18.214 * b; } }"} {"target": 1, "idx": 18390, "func": "static void tcp_chr_close(CharDriverState *chr) { TCPCharDriver *s = chr->opaque; if (s->fd >= 0) { if (s->tag) { g_source_remove(s->tag); s->tag = 0; } if (s->chan) { g_io_channel_unref(s->chan); } closesocket(s->fd); } if (s->listen_fd >= 0) { if (s->listen_tag) { g_source_remove(s->listen_tag); s->listen_tag = 0; } if (s->listen_chan) { g_io_channel_unref(s->listen_chan); } closesocket(s->listen_fd); } g_free(s); qemu_chr_be_event(chr, CHR_EVENT_CLOSED); }"} {"target": 1, "idx": 18402, "func": "static int kvm_put_vcpu_events(CPUState *env, int level) { struct kvm_vcpu_events events; if (!kvm_has_vcpu_events()) { return 0; } events.exception.injected = (env->exception_injected >= 0); events.exception.nr = env->exception_injected; events.exception.has_error_code = env->has_error_code; events.exception.error_code = env->error_code; events.interrupt.injected = (env->interrupt_injected >= 0); events.interrupt.nr = env->interrupt_injected; events.interrupt.soft = env->soft_interrupt; events.nmi.injected = env->nmi_injected; events.nmi.pending = env->nmi_pending; events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK); events.nmi.pad = 0; events.sipi_vector = env->sipi_vector; events.flags = 0; if (level >= KVM_PUT_RESET_STATE) { events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR; } return kvm_vcpu_ioctl(env, KVM_SET_VCPU_EVENTS, &events); }"} {"target": 1, "idx": 18405, "func": "static int rtsp_read_packet(AVFormatContext *s, AVPacket *pkt) { RTSPState *rt = s->priv_data; int ret; RTSPMessageHeader reply1, *reply = &reply1; char cmd[1024]; if (rt->server_type == RTSP_SERVER_REAL) { int i; enum AVDiscard cache[MAX_STREAMS]; for (i = 0; i < s->nb_streams; i++) cache[i] = s->streams[i]->discard; if (!rt->need_subscription) { if (memcmp (cache, rt->real_setup_cache, sizeof(enum AVDiscard) * s->nb_streams)) { av_strlcatf(cmd, sizeof(cmd), \"SET_PARAMETER %s RTSP/1.0\\r\\n\" \"Unsubscribe: %s\\r\\n\", s->filename, rt->last_subscription); rtsp_send_cmd(s, cmd, reply, NULL); if (reply->status_code != RTSP_STATUS_OK) return AVERROR_INVALIDDATA; rt->need_subscription = 1; } } if (rt->need_subscription) { int r, rule_nr, first = 1; memcpy(rt->real_setup_cache, cache, sizeof(enum AVDiscard) * s->nb_streams); rt->last_subscription[0] = 0; snprintf(cmd, sizeof(cmd), \"SET_PARAMETER %s RTSP/1.0\\r\\n\" \"Subscribe: \", s->filename); for (i = 0; i < rt->nb_rtsp_streams; i++) { rule_nr = 0; for (r = 0; r < s->nb_streams; r++) { if (s->streams[r]->priv_data == rt->rtsp_streams[i]) { if (s->streams[r]->discard != AVDISCARD_ALL) { if (!first) av_strlcat(rt->last_subscription, \",\", sizeof(rt->last_subscription)); ff_rdt_subscribe_rule( rt->last_subscription, sizeof(rt->last_subscription), i, rule_nr); first = 0; } rule_nr++; } } } av_strlcatf(cmd, sizeof(cmd), \"%s\\r\\n\", rt->last_subscription); rtsp_send_cmd(s, cmd, reply, NULL); if (reply->status_code != RTSP_STATUS_OK) return AVERROR_INVALIDDATA; rt->need_subscription = 0; if (rt->state == RTSP_STATE_PLAYING) rtsp_read_play (s); } } ret = rtsp_fetch_packet(s, pkt); if (ret < 0) return ret; /* send dummy request to keep TCP connection alive */ if ((rt->server_type == RTSP_SERVER_WMS || rt->server_type == RTSP_SERVER_REAL) && (av_gettime() - rt->last_cmd_time) / 1000000 >= rt->timeout / 2) { if (rt->server_type == RTSP_SERVER_WMS) { snprintf(cmd, sizeof(cmd) - 1, \"GET_PARAMETER %s RTSP/1.0\\r\\n\", s->filename); rtsp_send_cmd_async(s, cmd, reply, NULL); } else { rtsp_send_cmd_async(s, \"OPTIONS * RTSP/1.0\\r\\n\", reply, NULL); } } return 0; }"} {"target": 1, "idx": 18411, "func": "static target_ulong h_resize_hpt_prepare(PowerPCCPU *cpu, sPAPRMachineState *spapr, target_ulong opcode, target_ulong *args) { target_ulong flags = args[0]; int shift = args[1]; sPAPRPendingHPT *pending = spapr->pending_hpt; uint64_t current_ram_size = MACHINE(spapr)->ram_size; int rc; if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) { return H_AUTHORITY; } if (!spapr->htab_shift) { /* Radix guest, no HPT */ return H_NOT_AVAILABLE; } trace_spapr_h_resize_hpt_prepare(flags, shift); if (flags != 0) { return H_PARAMETER; } if (shift && ((shift < 18) || (shift > 46))) { return H_PARAMETER; } current_ram_size = pc_existing_dimms_capacity(&error_fatal); /* We only allow the guest to allocate an HPT one order above what * we'd normally give them (to stop a small guest claiming a huge * chunk of resources in the HPT */ if (shift > (spapr_hpt_shift_for_ramsize(current_ram_size) + 1)) { return H_RESOURCE; } rc = kvmppc_resize_hpt_prepare(cpu, flags, shift); if (rc != -ENOSYS) { return resize_hpt_convert_rc(rc); } if (pending) { /* something already in progress */ if (pending->shift == shift) { /* and it's suitable */ if (pending->complete) { return pending->ret; } else { return H_LONG_BUSY_ORDER_100_MSEC; } } /* not suitable, cancel and replace */ cancel_hpt_prepare(spapr); } if (!shift) { /* nothing to do */ return H_SUCCESS; } /* start new prepare */ pending = g_new0(sPAPRPendingHPT, 1); pending->shift = shift; pending->ret = H_HARDWARE; qemu_thread_create(&pending->thread, \"sPAPR HPT prepare\", hpt_prepare_thread, pending, QEMU_THREAD_DETACHED); spapr->pending_hpt = pending; /* In theory we could estimate the time more accurately based on * the new size, but there's not much point */ return H_LONG_BUSY_ORDER_100_MSEC; }"} {"target": 1, "idx": 18413, "func": "void qemu_cond_signal(QemuCond *cond) { DWORD result; /* * Signal only when there are waiters. cond->waiters is * incremented by pthread_cond_wait under the external lock, * so we are safe about that. */ if (cond->waiters == 0) { return; } /* * Waiting threads decrement it outside the external lock, but * only if another thread is executing pthread_cond_broadcast and * has the mutex. So, it also cannot be decremented concurrently * with this particular access. */ cond->target = cond->waiters - 1; result = SignalObjectAndWait(cond->sema, cond->continue_event, INFINITE, FALSE); if (result == WAIT_ABANDONED || result == WAIT_FAILED) { error_exit(GetLastError(), __func__); } }"} {"target": 0, "idx": 18419, "func": "static int split_field_half_ref_list(Picture *dest, int dest_len, Picture *src, int src_len, int parity){ int same_parity = 1; int same_i = 0; int opp_i = 0; int out_i; int field_output; for (out_i = 0; out_i < dest_len; out_i += field_output) { if (same_parity && same_i < src_len) { field_output = split_field_copy(dest + out_i, src + same_i, parity, 1); same_parity = !field_output; same_i++; } else if (opp_i < src_len) { field_output = split_field_copy(dest + out_i, src + opp_i, PICT_FRAME - parity, 0); same_parity = field_output; opp_i++; } else { break; } } return out_i; }"} {"target": 1, "idx": 18425, "func": "int64_t xbzrle_cache_resize(int64_t new_size) { if (new_size < TARGET_PAGE_SIZE) { return -1; } if (XBZRLE.cache != NULL) { return cache_resize(XBZRLE.cache, new_size / TARGET_PAGE_SIZE) * TARGET_PAGE_SIZE; } return pow2floor(new_size); }"} {"target": 1, "idx": 18427, "func": "static void count_usage(uint8_t *src, int width, int height, uint32_t *counts) { int i, j; for (j = 0; j < height; j++) { for (i = 0; i < width; i++) { counts[src[i]]++; } src += width; } }"} {"target": 1, "idx": 18451, "func": "void ff_rtp_send_hevc(AVFormatContext *ctx, const uint8_t *frame_buf, int frame_size) { const uint8_t *next_NAL_unit; const uint8_t *buf_ptr, *buf_end = frame_buf + frame_size; RTPMuxContext *rtp_ctx = ctx->priv_data; /* use the default 90 KHz time stamp */ rtp_ctx->timestamp = rtp_ctx->cur_timestamp; rtp_ctx->buf_ptr = rtp_ctx->buf; if (rtp_ctx->nal_length_size) buf_ptr = ff_avc_mp4_find_startcode(frame_buf, buf_end, rtp_ctx->nal_length_size) ? frame_buf : buf_end; else buf_ptr = ff_avc_find_startcode(frame_buf, buf_end); /* find all NAL units and send them as separate packets */ while (buf_ptr < buf_end) { if (rtp_ctx->nal_length_size) { next_NAL_unit = ff_avc_mp4_find_startcode(buf_ptr, buf_end, rtp_ctx->nal_length_size); if (!next_NAL_unit) next_NAL_unit = buf_end; buf_ptr += rtp_ctx->nal_length_size; } else { while (!*(buf_ptr++)) ; next_NAL_unit = ff_avc_find_startcode(buf_ptr, buf_end); } /* send the next NAL unit */ nal_send(ctx, buf_ptr, next_NAL_unit - buf_ptr, next_NAL_unit == buf_end); /* jump to the next NAL unit */ buf_ptr = next_NAL_unit; } flush_buffered(ctx, 1); }"} {"target": 0, "idx": 18464, "func": "static inline int dc1394_read_common(AVFormatContext *c, AVFormatParameters *ap, struct dc1394_frame_format **select_fmt, struct dc1394_frame_rate **select_fps) { dc1394_data* dc1394 = c->priv_data; AVStream* vst; struct dc1394_frame_format *fmt; struct dc1394_frame_rate *fps; enum PixelFormat pix_fmt; int width, height; AVRational framerate; int ret = 0; if ((pix_fmt = av_get_pix_fmt(dc1394->pixel_format)) == PIX_FMT_NONE) { av_log(c, AV_LOG_ERROR, \"No such pixel format: %s.\\n\", dc1394->pixel_format); ret = AVERROR(EINVAL); goto out; } if ((ret = av_parse_video_size(&width, &height, dc1394->video_size)) < 0) { av_log(c, AV_LOG_ERROR, \"Couldn't parse video size.\\n\"); goto out; } if ((ret = av_parse_video_rate(&framerate, dc1394->framerate)) < 0) { av_log(c, AV_LOG_ERROR, \"Couldn't parse framerate.\\n\"); goto out; } #if FF_API_FORMAT_PARAMETERS if (ap->width > 0) width = ap->width; if (ap->height > 0) height = ap->height; if (ap->pix_fmt) pix_fmt = ap->pix_fmt; if (ap->time_base.num) framerate = (AVRational){ap->time_base.den, ap->time_base.num}; #endif dc1394->frame_rate = av_rescale(1000, framerate.num, framerate.den); for (fmt = dc1394_frame_formats; fmt->width; fmt++) if (fmt->pix_fmt == pix_fmt && fmt->width == width && fmt->height == height) break; for (fps = dc1394_frame_rates; fps->frame_rate; fps++) if (fps->frame_rate == dc1394->frame_rate) break; if (!fps->frame_rate || !fmt->width) { av_log(c, AV_LOG_ERROR, \"Can't find matching camera format for %s, %dx%d@%d:1000fps\\n\", av_get_pix_fmt_name(pix_fmt), width, height, dc1394->frame_rate); ret = AVERROR(EINVAL); goto out; } /* create a video stream */ vst = av_new_stream(c, 0); if (!vst) { ret = AVERROR(ENOMEM); goto out; } av_set_pts_info(vst, 64, 1, 1000); vst->codec->codec_type = AVMEDIA_TYPE_VIDEO; vst->codec->codec_id = CODEC_ID_RAWVIDEO; vst->codec->time_base.den = framerate.num; vst->codec->time_base.num = framerate.den; vst->codec->width = fmt->width; vst->codec->height = fmt->height; vst->codec->pix_fmt = fmt->pix_fmt; /* packet init */ av_init_packet(&dc1394->packet); dc1394->packet.size = avpicture_get_size(fmt->pix_fmt, fmt->width, fmt->height); dc1394->packet.stream_index = vst->index; dc1394->packet.flags |= AV_PKT_FLAG_KEY; dc1394->current_frame = 0; vst->codec->bit_rate = av_rescale(dc1394->packet.size * 8, fps->frame_rate, 1000); *select_fps = fps; *select_fmt = fmt; out: return ret; }"} {"target": 1, "idx": 18472, "func": "int av_tempfile(const char *prefix, char **filename, int log_offset, void *log_ctx) { FileLogContext file_log_ctx = { &file_log_ctx_class, log_offset, log_ctx }; int fd=-1; #if !HAVE_MKSTEMP void *ptr= tempnam(NULL, prefix); if(!ptr) ptr= tempnam(\".\", prefix); *filename = av_strdup(ptr); #undef free free(ptr); #else size_t len = strlen(prefix) + 12; /* room for \"/tmp/\" and \"XXXXXX\\0\" */ *filename = av_malloc(len); #endif /* -----common section-----*/ if (*filename == NULL) { av_log(&file_log_ctx, AV_LOG_ERROR, \"ff_tempfile: Cannot allocate file name\\n\"); return AVERROR(ENOMEM); } #if !HAVE_MKSTEMP # ifndef O_BINARY # define O_BINARY 0 # endif # ifndef O_EXCL # define O_EXCL 0 # endif fd = open(*filename, O_RDWR | O_BINARY | O_CREAT | O_EXCL, 0600); #else snprintf(*filename, len, \"/tmp/%sXXXXXX\", prefix); fd = mkstemp(*filename); #ifdef _WIN32 if (fd < 0) { snprintf(*filename, len, \"./%sXXXXXX\", prefix); fd = mkstemp(*filename); } #endif #endif /* -----common section-----*/ if (fd < 0) { int err = AVERROR(errno); av_log(&file_log_ctx, AV_LOG_ERROR, \"ff_tempfile: Cannot open temporary file %s\\n\", *filename); return err; } return fd; /* success */ }"} {"target": 1, "idx": 18481, "func": "static void i82374_isa_realize(DeviceState *dev, Error **errp) { ISAi82374State *isa = I82374(dev); I82374State *s = &isa->state; PortioList *port_list = g_new(PortioList, 1); portio_list_init(port_list, OBJECT(isa), i82374_portio_list, s, \"i82374\"); portio_list_add(port_list, isa_address_space_io(&isa->parent_obj), isa->iobase); i82374_realize(s, errp); qdev_init_gpio_out(dev, &s->out, 1); }"} {"target": 1, "idx": 18488, "func": "uint64_t pc_dimm_get_free_addr(uint64_t address_space_start, uint64_t address_space_size, uint64_t *hint, uint64_t align, uint64_t size, Error **errp) { GSList *list = NULL, *item; uint64_t new_addr, ret = 0; uint64_t address_space_end = address_space_start + address_space_size; g_assert(QEMU_ALIGN_UP(address_space_start, align) == address_space_start); g_assert(QEMU_ALIGN_UP(address_space_size, align) == address_space_size); if (!address_space_size) { error_setg(errp, \"memory hotplug is not enabled, \" \"please add maxmem option\"); goto out; } if (hint && QEMU_ALIGN_UP(*hint, align) != *hint) { error_setg(errp, \"address must be aligned to 0x%\" PRIx64 \" bytes\", align); goto out; } if (QEMU_ALIGN_UP(size, align) != size) { error_setg(errp, \"backend memory size must be multiple of 0x%\" PRIx64, align); goto out; } assert(address_space_end > address_space_start); object_child_foreach(qdev_get_machine(), pc_dimm_built_list, &list); if (hint) { new_addr = *hint; } else { new_addr = address_space_start; } /* find address range that will fit new DIMM */ for (item = list; item; item = g_slist_next(item)) { PCDIMMDevice *dimm = item->data; uint64_t dimm_size = object_property_get_int(OBJECT(dimm), PC_DIMM_SIZE_PROP, errp); if (errp && *errp) { goto out; } if (ranges_overlap(dimm->addr, dimm_size, new_addr, size)) { if (hint) { DeviceState *d = DEVICE(dimm); error_setg(errp, \"address range conflicts with '%s'\", d->id); goto out; } new_addr = QEMU_ALIGN_UP(dimm->addr + dimm_size, align); } } ret = new_addr; if (new_addr < address_space_start) { error_setg(errp, \"can't add memory [0x%\" PRIx64 \":0x%\" PRIx64 \"] at 0x%\" PRIx64, new_addr, size, address_space_start); } else if ((new_addr + size) > address_space_end) { error_setg(errp, \"can't add memory [0x%\" PRIx64 \":0x%\" PRIx64 \"] beyond 0x%\" PRIx64, new_addr, size, address_space_end); } out: g_slist_free(list); return ret; }"} {"target": 1, "idx": 18497, "func": "floatx80 floatx80_scalbn( floatx80 a, int n STATUS_PARAM ) { flag aSign; int16 aExp; uint64_t aSig; aSig = extractFloatx80Frac( a ); aExp = extractFloatx80Exp( a ); aSign = extractFloatx80Sign( a ); if ( aExp == 0x7FF ) { return a; } if (aExp == 0 && aSig == 0) return a; aExp += n; return normalizeRoundAndPackFloatx80( STATUS(floatx80_rounding_precision), aSign, aExp, aSig, 0 STATUS_VAR ); }"} {"target": 0, "idx": 18501, "func": "static int decode_frame_header(NUTContext *nut, int *flags_ret, int64_t *pts, int *stream_id, int frame_code){ AVFormatContext *s= nut->avf; ByteIOContext *bc = &s->pb; StreamContext *stc; int size, flags, size_mul, pts_delta, i, reserved_count; uint64_t tmp; if(url_ftell(bc) > nut->last_syncpoint_pos + nut->max_distance){ av_log(s, AV_LOG_ERROR, \"last frame must have been damaged %Ld > %Ld + %d\\n\", url_ftell(bc), nut->last_syncpoint_pos, nut->max_distance); return -1; } flags = nut->frame_code[frame_code].flags; size_mul = nut->frame_code[frame_code].size_mul; size = nut->frame_code[frame_code].size_lsb; *stream_id = nut->frame_code[frame_code].stream_id; pts_delta = nut->frame_code[frame_code].pts_delta; reserved_count = nut->frame_code[frame_code].reserved_count; if(flags & FLAG_INVALID) return -1; if(flags & FLAG_CODED) flags ^= get_v(bc); if(flags & FLAG_STREAM_ID){ GET_V(*stream_id, tmp < s->nb_streams) } stc= &nut->stream[*stream_id]; if(flags&FLAG_CODED_PTS){ int coded_pts= get_v(bc); //FIXME check last_pts validity? if(coded_pts < (1<msb_pts_shift)){ *pts=lsb2full(stc, coded_pts); }else *pts=coded_pts - (1<msb_pts_shift); }else *pts= stc->last_pts + pts_delta; if(flags&FLAG_SIZE_MSB){ size += size_mul*get_v(bc); } if(flags&FLAG_RESERVED) reserved_count= get_v(bc); for(i=0; i 2*nut->max_distance){ av_log(s, AV_LOG_ERROR, \"frame size > 2max_distance and no checksum\\n\"); return -1; } *flags_ret= flags; stc->last_pts= *pts; stc->last_key_frame= flags&FLAG_KEY; //FIXME change to last flags return size; }"} {"target": 0, "idx": 18503, "func": "static av_cold int alac_encode_init(AVCodecContext *avctx) { AlacEncodeContext *s = avctx->priv_data; int ret; uint8_t *alac_extradata; avctx->frame_size = s->frame_size = DEFAULT_FRAME_SIZE; if (avctx->sample_fmt != AV_SAMPLE_FMT_S16) { av_log(avctx, AV_LOG_ERROR, \"only pcm_s16 input samples are supported\\n\"); return -1; } /* TODO: Correctly implement multi-channel ALAC. It is similar to multi-channel AAC, in that it has a series of single-channel (SCE), channel-pair (CPE), and LFE elements. */ if (avctx->channels > 2) { av_log(avctx, AV_LOG_ERROR, \"only mono or stereo input is currently supported\\n\"); return AVERROR_PATCHWELCOME; } // Set default compression level if (avctx->compression_level == FF_COMPRESSION_DEFAULT) s->compression_level = 2; else s->compression_level = av_clip(avctx->compression_level, 0, 2); // Initialize default Rice parameters s->rc.history_mult = 40; s->rc.initial_history = 10; s->rc.k_modifier = 14; s->rc.rice_modifier = 4; s->max_coded_frame_size = get_max_frame_size(avctx->frame_size, avctx->channels, DEFAULT_SAMPLE_SIZE); // FIXME: consider wasted_bytes s->write_sample_size = DEFAULT_SAMPLE_SIZE + avctx->channels - 1; avctx->extradata = av_mallocz(ALAC_EXTRADATA_SIZE + FF_INPUT_BUFFER_PADDING_SIZE); if (!avctx->extradata) { ret = AVERROR(ENOMEM); goto error; } avctx->extradata_size = ALAC_EXTRADATA_SIZE; alac_extradata = avctx->extradata; AV_WB32(alac_extradata, ALAC_EXTRADATA_SIZE); AV_WB32(alac_extradata+4, MKBETAG('a','l','a','c')); AV_WB32(alac_extradata+12, avctx->frame_size); AV_WB8 (alac_extradata+17, DEFAULT_SAMPLE_SIZE); AV_WB8 (alac_extradata+21, avctx->channels); AV_WB32(alac_extradata+24, s->max_coded_frame_size); AV_WB32(alac_extradata+28, avctx->sample_rate * avctx->channels * DEFAULT_SAMPLE_SIZE); // average bitrate AV_WB32(alac_extradata+32, avctx->sample_rate); // Set relevant extradata fields if (s->compression_level > 0) { AV_WB8(alac_extradata+18, s->rc.history_mult); AV_WB8(alac_extradata+19, s->rc.initial_history); AV_WB8(alac_extradata+20, s->rc.k_modifier); } s->min_prediction_order = DEFAULT_MIN_PRED_ORDER; if (avctx->min_prediction_order >= 0) { if (avctx->min_prediction_order < MIN_LPC_ORDER || avctx->min_prediction_order > ALAC_MAX_LPC_ORDER) { av_log(avctx, AV_LOG_ERROR, \"invalid min prediction order: %d\\n\", avctx->min_prediction_order); ret = AVERROR(EINVAL); goto error; } s->min_prediction_order = avctx->min_prediction_order; } s->max_prediction_order = DEFAULT_MAX_PRED_ORDER; if (avctx->max_prediction_order >= 0) { if (avctx->max_prediction_order < MIN_LPC_ORDER || avctx->max_prediction_order > ALAC_MAX_LPC_ORDER) { av_log(avctx, AV_LOG_ERROR, \"invalid max prediction order: %d\\n\", avctx->max_prediction_order); ret = AVERROR(EINVAL); goto error; } s->max_prediction_order = avctx->max_prediction_order; } if (s->max_prediction_order < s->min_prediction_order) { av_log(avctx, AV_LOG_ERROR, \"invalid prediction orders: min=%d max=%d\\n\", s->min_prediction_order, s->max_prediction_order); ret = AVERROR(EINVAL); goto error; } avctx->coded_frame = avcodec_alloc_frame(); if (!avctx->coded_frame) { ret = AVERROR(ENOMEM); goto error; } s->avctx = avctx; if ((ret = ff_lpc_init(&s->lpc_ctx, avctx->frame_size, s->max_prediction_order, FF_LPC_TYPE_LEVINSON)) < 0) { goto error; } return 0; error: alac_encode_close(avctx); return ret; }"} {"target": 1, "idx": 18524, "func": "static int rv34_decode_mv(RV34DecContext *r, int block_type) { MpegEncContext *s = &r->s; GetBitContext *gb = &s->gb; int i, j, k, l; int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride; int next_bt; memset(r->dmv, 0, sizeof(r->dmv)); for(i = 0; i < num_mvs[block_type]; i++){ r->dmv[i][0] = get_interleaved_se_golomb(gb); r->dmv[i][1] = get_interleaved_se_golomb(gb); switch(block_type){ case RV34_MB_TYPE_INTRA: case RV34_MB_TYPE_INTRA16x16: ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); return 0; case RV34_MB_SKIP: if(s->pict_type == AV_PICTURE_TYPE_P){ ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0); break; case RV34_MB_B_DIRECT: //surprisingly, it uses motion scheme from next reference frame /* wait for the current mb row to be finished */ if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME)) ff_thread_await_progress(&s->next_picture_ptr->tf, FFMAX(0, s->mb_y-1), 0); next_bt = s->next_picture_ptr->mb_type[s->mb_x + s->mb_y * s->mb_stride]; if(IS_INTRA(next_bt) || IS_SKIP(next_bt)){ ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); ZERO8x2(s->current_picture_ptr->motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); }else for(j = 0; j < 2; j++) for(i = 0; i < 2; i++) for(k = 0; k < 2; k++) for(l = 0; l < 2; l++) s->current_picture_ptr->motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][k]); if(!(IS_16X8(next_bt) || IS_8X16(next_bt) || IS_8X8(next_bt))) //we can use whole macroblock MC rv34_mc_2mv(r, block_type); else rv34_mc_2mv_skip(r); ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); break; case RV34_MB_P_16x16: case RV34_MB_P_MIX16x16: rv34_pred_mv(r, block_type, 0, 0); rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0); break; case RV34_MB_B_FORWARD: case RV34_MB_B_BACKWARD: r->dmv[1][0] = r->dmv[0][0]; r->dmv[1][1] = r->dmv[0][1]; if(r->rv30) rv34_pred_mv_rv3(r, block_type, block_type == RV34_MB_B_BACKWARD); else rv34_pred_mv_b (r, block_type, block_type == RV34_MB_B_BACKWARD); rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, block_type == RV34_MB_B_BACKWARD); break; case RV34_MB_P_16x8: case RV34_MB_P_8x16: rv34_pred_mv(r, block_type, 0, 0); rv34_pred_mv(r, block_type, 1 + (block_type == RV34_MB_P_16x8), 1); if(block_type == RV34_MB_P_16x8){ rv34_mc_1mv(r, block_type, 0, 0, 0, 2, 1, 0); rv34_mc_1mv(r, block_type, 0, 8, s->b8_stride, 2, 1, 0); if(block_type == RV34_MB_P_8x16){ rv34_mc_1mv(r, block_type, 0, 0, 0, 1, 2, 0); rv34_mc_1mv(r, block_type, 8, 0, 1, 1, 2, 0); break; case RV34_MB_B_BIDIR: rv34_pred_mv_b (r, block_type, 0); rv34_pred_mv_b (r, block_type, 1); rv34_mc_2mv (r, block_type); break; case RV34_MB_P_8x8: for(i=0;i< 4;i++){ rv34_pred_mv(r, block_type, i, i); rv34_mc_1mv (r, block_type, (i&1)<<3, (i&2)<<2, (i&1)+(i>>1)*s->b8_stride, 1, 1, 0); break; return 0;"} {"target": 1, "idx": 18528, "func": "static void pl181_send_command(pl181_state *s) { SDRequest request; uint8_t response[16]; int rlen; request.cmd = s->cmd & PL181_CMD_INDEX; request.arg = s->cmdarg; DPRINTF(\"Command %d %08x\\n\", request.cmd, request.arg); rlen = sd_do_command(s->card, &request, response); if (rlen < 0) goto error; if (s->cmd & PL181_CMD_RESPONSE) { #define RWORD(n) ((response[n] << 24) | (response[n + 1] << 16) \\ | (response[n + 2] << 8) | response[n + 3]) if (rlen == 0 || (rlen == 4 && (s->cmd & PL181_CMD_LONGRESP))) goto error; if (rlen != 4 && rlen != 16) goto error; s->response[0] = RWORD(0); if (rlen == 4) { s->response[1] = s->response[2] = s->response[3] = 0; } else { s->response[1] = RWORD(4); s->response[2] = RWORD(8); s->response[3] = RWORD(12) & ~1; } DPRINTF(\"Response received\\n\"); s->status |= PL181_STATUS_CMDRESPEND; #undef RWORD } else { DPRINTF(\"Command sent\\n\"); s->status |= PL181_STATUS_CMDSENT; } return; error: DPRINTF(\"Timeout\\n\"); s->status |= PL181_STATUS_CMDTIMEOUT; }"} {"target": 0, "idx": 18541, "func": "static void print_format_entry(const char *tag, const char *val) { if (!fmt_entries_to_show) { if (tag) { printf(\"%s=%s\\n\", tag, val); } else { printf(\"%s\\n\", val); } } else if (tag && av_dict_get(fmt_entries_to_show, tag, NULL, 0)) { if (nb_fmt_entries_to_show > 1) printf(\"%s=\", tag); printf(\"%s\\n\", val); } }"} {"target": 0, "idx": 18543, "func": "static int dnxhd_decode_dct_block_8(const DNXHDContext *ctx, RowContext *row, int n) { return dnxhd_decode_dct_block(ctx, row, n, 4, 32, 6); }"} {"target": 1, "idx": 18557, "func": "void ram_control_load_hook(QEMUFile *f, uint64_t flags) { int ret = -EINVAL; if (f->ops->hook_ram_load) { ret = f->ops->hook_ram_load(f, f->opaque, flags); if (ret < 0) { qemu_file_set_error(f, ret); } } else { qemu_file_set_error(f, ret); } }"} {"target": 1, "idx": 18560, "func": "static int ogg_packet(AVFormatContext *s, int *str, int *dstart, int *dsize, int64_t *fpos) { struct ogg *ogg = s->priv_data; int idx, i, ret; struct ogg_stream *os; int complete = 0; int segp = 0, psize = 0; av_dlog(s, \"ogg_packet: curidx=%i\\n\", ogg->curidx); do{ idx = ogg->curidx; while (idx < 0){ ret = ogg_read_page(s, &idx); if (ret < 0) return ret; } os = ogg->streams + idx; av_dlog(s, \"ogg_packet: idx=%d pstart=%d psize=%d segp=%d nsegs=%d\\n\", idx, os->pstart, os->psize, os->segp, os->nsegs); if (!os->codec){ if (os->header < 0){ os->codec = ogg_find_codec (os->buf, os->bufpos); if (!os->codec){ av_log(s, AV_LOG_WARNING, \"Codec not found\\n\"); os->header = 0; return 0; } }else{ return 0; } } segp = os->segp; psize = os->psize; while (os->segp < os->nsegs){ int ss = os->segments[os->segp++]; os->psize += ss; if (ss < 255){ complete = 1; break; } } if (!complete && os->segp == os->nsegs){ ogg->curidx = -1; os->incomplete = 1; } }while (!complete); if (os->granule == -1) av_log(s, AV_LOG_WARNING, \"Page at %\"PRId64\" is missing granule\\n\", os->page_pos); ogg->curidx = idx; os->incomplete = 0; if (os->header) { os->header = os->codec->header (s, idx); if (!os->header){ os->segp = segp; os->psize = psize; // We have reached the first non-header packet in this stream. // Unfortunately more header packets may still follow for others, // but if we continue with header parsing we may lose data packets. ogg->headers = 1; // Update the header state for all streams and // compute the data_offset. if (!s->data_offset) s->data_offset = os->sync_pos; for (i = 0; i < ogg->nstreams; i++) { struct ogg_stream *cur_os = ogg->streams + i; // if we have a partial non-header packet, its start is // obviously at or after the data start if (cur_os->incomplete) s->data_offset = FFMIN(s->data_offset, cur_os->sync_pos); } }else{ os->pstart += os->psize; os->psize = 0; } } else { os->pflags = 0; os->pduration = 0; if (os->codec && os->codec->packet) os->codec->packet (s, idx); if (str) *str = idx; if (dstart) *dstart = os->pstart; if (dsize) *dsize = os->psize; if (fpos) *fpos = os->sync_pos; os->pstart += os->psize; os->psize = 0; if(os->pstart == os->bufpos) os->bufpos = os->pstart = 0; os->sync_pos = os->page_pos; } // determine whether there are more complete packets in this page // if not, the page's granule will apply to this packet os->page_end = 1; for (i = os->segp; i < os->nsegs; i++) if (os->segments[i] < 255) { os->page_end = 0; break; } if (os->segp == os->nsegs) ogg->curidx = -1; return 0; }"} {"target": 1, "idx": 18561, "func": "static void migration_bitmap_sync(void) { RAMBlock *block; uint64_t num_dirty_pages_init = migration_dirty_pages; MigrationState *s = migrate_get_current(); int64_t end_time; int64_t bytes_xfer_now; bitmap_sync_count++; if (!bytes_xfer_prev) { bytes_xfer_prev = ram_bytes_transferred(); } if (!start_time) { start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); } trace_migration_bitmap_sync_start(); address_space_sync_dirty_bitmap(&address_space_memory); rcu_read_lock(); QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { migration_bitmap_sync_range(block->mr->ram_addr, block->used_length); } rcu_read_unlock(); trace_migration_bitmap_sync_end(migration_dirty_pages - num_dirty_pages_init); num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init; end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); /* more than 1 second = 1000 millisecons */ if (end_time > start_time + 1000) { if (migrate_auto_converge()) { /* The following detection logic can be refined later. For now: Check to see if the dirtied bytes is 50% more than the approx. amount of bytes that just got transferred since the last time we were in this routine. If that happens >N times (for now N==4) we turn on the throttle down logic */ bytes_xfer_now = ram_bytes_transferred(); if (s->dirty_pages_rate && (num_dirty_pages_period * TARGET_PAGE_SIZE > (bytes_xfer_now - bytes_xfer_prev)/2) && (dirty_rate_high_cnt++ > 4)) { trace_migration_throttle(); mig_throttle_on = true; dirty_rate_high_cnt = 0; } bytes_xfer_prev = bytes_xfer_now; } else { mig_throttle_on = false; } if (migrate_use_xbzrle()) { if (iterations_prev != acct_info.iterations) { acct_info.xbzrle_cache_miss_rate = (double)(acct_info.xbzrle_cache_miss - xbzrle_cache_miss_prev) / (acct_info.iterations - iterations_prev); } iterations_prev = acct_info.iterations; xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss; } s->dirty_pages_rate = num_dirty_pages_period * 1000 / (end_time - start_time); s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE; start_time = end_time; num_dirty_pages_period = 0; } s->dirty_sync_count = bitmap_sync_count; }"} {"target": 1, "idx": 18571, "func": "static void gen_window_check3(DisasContext *dc, unsigned r1, unsigned r2, unsigned r3) { gen_window_check2(dc, r1, r2 > r3 ? r2 : r3); }"} {"target": 0, "idx": 18580, "func": "void av_log_default_callback(void* ptr, int level, const char* fmt, va_list vl) { static int print_prefix = 1; static int count; static char prev[LINE_SZ]; AVBPrint part[4]; char line[LINE_SZ]; static int is_atty; int type[2]; unsigned tint = 0; if (level >= 0) { tint = level & 0xff00; level &= 0xff; } if (level > av_log_level) return; #if HAVE_PTHREADS pthread_mutex_lock(&mutex); #endif format_line(ptr, level, fmt, vl, part, &print_prefix, type); snprintf(line, sizeof(line), \"%s%s%s%s\", part[0].str, part[1].str, part[2].str, part[3].str); #if HAVE_ISATTY if (!is_atty) is_atty = isatty(2) ? 1 : -1; #endif if (print_prefix && (flags & AV_LOG_SKIP_REPEATED) && !strcmp(line, prev) && *line && line[strlen(line) - 1] != '\\r'){ count++; if (is_atty == 1) fprintf(stderr, \" Last message repeated %d times\\r\", count); goto end; } if (count > 0) { fprintf(stderr, \" Last message repeated %d times\\n\", count); count = 0; } strcpy(prev, line); sanitize(part[0].str); colored_fputs(type[0], 0, part[0].str); sanitize(part[1].str); colored_fputs(type[1], 0, part[1].str); sanitize(part[2].str); colored_fputs(av_clip(level >> 3, 0, NB_LEVELS - 1), tint >> 8, part[2].str); sanitize(part[3].str); colored_fputs(av_clip(level >> 3, 0, NB_LEVELS - 1), tint >> 8, part[3].str); #if CONFIG_VALGRIND_BACKTRACE if (level <= BACKTRACE_LOGLEVEL) VALGRIND_PRINTF_BACKTRACE(\"%s\", \"\"); #endif end: av_bprint_finalize(part+3, NULL); #if HAVE_PTHREADS pthread_mutex_unlock(&mutex); #endif }"} {"target": 0, "idx": 18603, "func": "int usb_desc_handle_control(USBDevice *dev, int request, int value, int index, int length, uint8_t *data) { const USBDesc *desc = dev->info->usb_desc; int ret = -1; assert(desc != NULL); switch(request) { case DeviceOutRequest | USB_REQ_SET_ADDRESS: dev->addr = value; trace_usb_set_addr(dev->addr); ret = 0; break; case DeviceRequest | USB_REQ_GET_DESCRIPTOR: ret = usb_desc_get_descriptor(dev, value, data, length); break; } return ret; }"} {"target": 0, "idx": 18613, "func": "static void trigger_prot_fault(CPUS390XState *env, target_ulong vaddr, uint64_t mode) { CPUState *cs = CPU(s390_env_get_cpu(env)); int ilen = ILEN_LATER_INC; int bits = trans_bits(env, mode) | 4; DPRINTF(\"%s: vaddr=%016\" PRIx64 \" bits=%d\\n\", __func__, vaddr, bits); stq_phys(cs->as, env->psa + offsetof(LowCore, trans_exc_code), vaddr | bits); trigger_pgm_exception(env, PGM_PROTECTION, ilen); }"} {"target": 0, "idx": 18615, "func": "int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type, const uint8_t **pp, const uint8_t *desc_list_end, Mp4Descr *mp4_descr, int mp4_descr_count, int pid, MpegTSContext *ts) { const uint8_t *desc_end; int desc_len, desc_tag, desc_es_id; char language[252]; int i; desc_tag = get8(pp, desc_list_end); if (desc_tag < 0) return AVERROR_INVALIDDATA; desc_len = get8(pp, desc_list_end); if (desc_len < 0) return AVERROR_INVALIDDATA; desc_end = *pp + desc_len; if (desc_end > desc_list_end) return AVERROR_INVALIDDATA; av_dlog(fc, \"tag: 0x%02x len=%d\\n\", desc_tag, desc_len); if (st->codec->codec_id == AV_CODEC_ID_NONE && stream_type == STREAM_TYPE_PRIVATE_DATA) mpegts_find_stream_type(st, desc_tag, DESC_types); switch (desc_tag) { case 0x1E: /* SL descriptor */ desc_es_id = get16(pp, desc_end); if (ts && ts->pids[pid]) ts->pids[pid]->es_id = desc_es_id; for (i = 0; i < mp4_descr_count; i++) if (mp4_descr[i].dec_config_descr_len && mp4_descr[i].es_id == desc_es_id) { AVIOContext pb; ffio_init_context(&pb, mp4_descr[i].dec_config_descr, mp4_descr[i].dec_config_descr_len, 0, NULL, NULL, NULL, NULL); ff_mp4_read_dec_config_descr(fc, st, &pb); if (st->codec->codec_id == AV_CODEC_ID_AAC && st->codec->extradata_size > 0) st->need_parsing = 0; if (st->codec->codec_id == AV_CODEC_ID_MPEG4SYSTEMS) mpegts_open_section_filter(ts, pid, m4sl_cb, ts, 1); } break; case 0x1F: /* FMC descriptor */ get16(pp, desc_end); if (mp4_descr_count > 0 && st->codec->codec_id == AV_CODEC_ID_AAC_LATM && mp4_descr->dec_config_descr_len && mp4_descr->es_id == pid) { AVIOContext pb; ffio_init_context(&pb, mp4_descr->dec_config_descr, mp4_descr->dec_config_descr_len, 0, NULL, NULL, NULL, NULL); ff_mp4_read_dec_config_descr(fc, st, &pb); if (st->codec->codec_id == AV_CODEC_ID_AAC && st->codec->extradata_size > 0) st->need_parsing = 0; } break; case 0x56: /* DVB teletext descriptor */ language[0] = get8(pp, desc_end); language[1] = get8(pp, desc_end); language[2] = get8(pp, desc_end); language[3] = 0; av_dict_set(&st->metadata, \"language\", language, 0); break; case 0x59: /* subtitling descriptor */ language[0] = get8(pp, desc_end); language[1] = get8(pp, desc_end); language[2] = get8(pp, desc_end); language[3] = 0; /* hearing impaired subtitles detection */ switch (get8(pp, desc_end)) { case 0x20: /* DVB subtitles (for the hard of hearing) with no monitor aspect ratio criticality */ case 0x21: /* DVB subtitles (for the hard of hearing) for display on 4:3 aspect ratio monitor */ case 0x22: /* DVB subtitles (for the hard of hearing) for display on 16:9 aspect ratio monitor */ case 0x23: /* DVB subtitles (for the hard of hearing) for display on 2.21:1 aspect ratio monitor */ case 0x24: /* DVB subtitles (for the hard of hearing) for display on a high definition monitor */ case 0x25: /* DVB subtitles (for the hard of hearing) with plano-stereoscopic disparity for display on a high definition monitor */ st->disposition |= AV_DISPOSITION_HEARING_IMPAIRED; break; } if (st->codec->extradata) { if (st->codec->extradata_size == 4 && memcmp(st->codec->extradata, *pp, 4)) avpriv_request_sample(fc, \"DVB sub with multiple IDs\"); } else { st->codec->extradata = av_malloc(4 + FF_INPUT_BUFFER_PADDING_SIZE); if (st->codec->extradata) { st->codec->extradata_size = 4; memcpy(st->codec->extradata, *pp, 4); } } *pp += 4; av_dict_set(&st->metadata, \"language\", language, 0); break; case 0x0a: /* ISO 639 language descriptor */ for (i = 0; i + 4 <= desc_len; i += 4) { language[i + 0] = get8(pp, desc_end); language[i + 1] = get8(pp, desc_end); language[i + 2] = get8(pp, desc_end); language[i + 3] = ','; switch (get8(pp, desc_end)) { case 0x01: st->disposition |= AV_DISPOSITION_CLEAN_EFFECTS; break; case 0x02: st->disposition |= AV_DISPOSITION_HEARING_IMPAIRED; break; case 0x03: st->disposition |= AV_DISPOSITION_VISUAL_IMPAIRED; break; } } if (i && language[0]) { language[i - 1] = 0; av_dict_set(&st->metadata, \"language\", language, 0); } break; case 0x05: /* registration descriptor */ st->codec->codec_tag = bytestream_get_le32(pp); av_dlog(fc, \"reg_desc=%.4s\\n\", (char *)&st->codec->codec_tag); if (st->codec->codec_id == AV_CODEC_ID_NONE) mpegts_find_stream_type(st, st->codec->codec_tag, REGD_types); break; default: break; } *pp = desc_end; return 0; }"} {"target": 0, "idx": 18621, "func": "void string_output_visitor_cleanup(StringOutputVisitor *sov) { if (sov->string) { g_string_free(sov->string, true); } g_list_foreach(sov->ranges, free_range, NULL); g_list_free(sov->ranges); g_free(sov); }"} {"target": 1, "idx": 18627, "func": "void memory_region_allocate_system_memory(MemoryRegion *mr, Object *owner, const char *name, uint64_t ram_size) { uint64_t addr = 0; int i; if (nb_numa_nodes == 0 || !have_memdevs) { allocate_system_memory_nonnuma(mr, owner, name, ram_size); return; memory_region_init(mr, owner, name, ram_size); for (i = 0; i < MAX_NODES; i++) { Error *local_err = NULL; uint64_t size = numa_info[i].node_mem; HostMemoryBackend *backend = numa_info[i].node_memdev; if (!backend) { continue; MemoryRegion *seg = host_memory_backend_get_memory(backend, &local_err); if (local_err) { qerror_report_err(local_err); memory_region_add_subregion(mr, addr, seg); vmstate_register_ram_global(seg); addr += size;"} {"target": 1, "idx": 18643, "func": "void av_register_output_format(AVOutputFormat *format) { AVOutputFormat **p = last_oformat; format->next = NULL; while(*p || avpriv_atomic_ptr_cas((void * volatile *)p, NULL, format)) p = &(*p)->next; last_oformat = &format->next; }"} {"target": 0, "idx": 18660, "func": "static inline void gen_jcc(DisasContext *s, int b, target_ulong val, target_ulong next_eip) { int l1, l2; gen_update_cc_op(s); if (s->jmp_opt) { l1 = gen_new_label(); gen_jcc1(s, b, l1); set_cc_op(s, CC_OP_DYNAMIC); gen_goto_tb(s, 0, next_eip); gen_set_label(l1); gen_goto_tb(s, 1, val); s->is_jmp = DISAS_TB_JUMP; } else { l1 = gen_new_label(); l2 = gen_new_label(); gen_jcc1(s, b, l1); set_cc_op(s, CC_OP_DYNAMIC); gen_jmp_im(next_eip); tcg_gen_br(l2); gen_set_label(l1); gen_jmp_im(val); gen_set_label(l2); gen_eob(s); } }"} {"target": 1, "idx": 18702, "func": "static int check_shm_size(IVShmemState *s, int fd) { /* check that the guest isn't going to try and map more memory than the * the object has allocated return -1 to indicate error */ struct stat buf; fstat(fd, &buf); if (s->ivshmem_size > buf.st_size) { fprintf(stderr, \"IVSHMEM ERROR: Requested memory size greater\" \" than shared object size (%\" PRIu64 \" > %\" PRIu64\")\\n\", s->ivshmem_size, (uint64_t)buf.st_size); return -1; } else { return 0; } }"} {"target": 1, "idx": 18706, "func": "e1000e_rss_get_hash_type(E1000ECore *core, struct NetRxPkt *pkt) { bool isip4, isip6, isudp, istcp; assert(e1000e_rss_enabled(core)); net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); if (isip4) { bool fragment = net_rx_pkt_get_ip4_info(pkt)->fragment; trace_e1000e_rx_rss_ip4(fragment, istcp, core->mac[MRQC], E1000_MRQC_EN_TCPIPV4(core->mac[MRQC]), E1000_MRQC_EN_IPV4(core->mac[MRQC])); if (!fragment && istcp && E1000_MRQC_EN_TCPIPV4(core->mac[MRQC])) { return E1000_MRQ_RSS_TYPE_IPV4TCP; } if (E1000_MRQC_EN_IPV4(core->mac[MRQC])) { return E1000_MRQ_RSS_TYPE_IPV4; } } else if (isip6) { eth_ip6_hdr_info *ip6info = net_rx_pkt_get_ip6_info(pkt); bool ex_dis = core->mac[RFCTL] & E1000_RFCTL_IPV6_EX_DIS; bool new_ex_dis = core->mac[RFCTL] & E1000_RFCTL_NEW_IPV6_EXT_DIS; trace_e1000e_rx_rss_ip6(core->mac[RFCTL], ex_dis, new_ex_dis, istcp, ip6info->has_ext_hdrs, ip6info->rss_ex_dst_valid, ip6info->rss_ex_src_valid, core->mac[MRQC], E1000_MRQC_EN_TCPIPV6(core->mac[MRQC]), E1000_MRQC_EN_IPV6EX(core->mac[MRQC]), E1000_MRQC_EN_IPV6(core->mac[MRQC])); if ((!ex_dis || !ip6info->has_ext_hdrs) && (!new_ex_dis || !(ip6info->rss_ex_dst_valid || ip6info->rss_ex_src_valid))) { if (istcp && !ip6info->fragment && E1000_MRQC_EN_TCPIPV6(core->mac[MRQC])) { return E1000_MRQ_RSS_TYPE_IPV6TCP; } if (E1000_MRQC_EN_IPV6EX(core->mac[MRQC])) { return E1000_MRQ_RSS_TYPE_IPV6EX; } } if (E1000_MRQC_EN_IPV6(core->mac[MRQC])) { return E1000_MRQ_RSS_TYPE_IPV6; } } return E1000_MRQ_RSS_TYPE_NONE; }"} {"target": 0, "idx": 18723, "func": "static void select_input_picture(MpegEncContext *s){ int i; for(i=1; ireordered_input_picture[i-1]= s->reordered_input_picture[i]; s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL; /* set next picture type & ordering */ if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){ if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){ s->reordered_input_picture[0]= s->input_picture[0]; s->reordered_input_picture[0]->pict_type= I_TYPE; s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++; }else{ int b_frames; if(s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor){ if(skip_check(s, s->input_picture[0], s->next_picture_ptr)){ //av_log(NULL, AV_LOG_DEBUG, \"skip %p %Ld\\n\", s->input_picture[0]->data[0], s->input_picture[0]->pts); if(s->input_picture[0]->type == FF_BUFFER_TYPE_SHARED){ for(i=0; i<4; i++) s->input_picture[0]->data[i]= NULL; s->input_picture[0]->type= 0; }else{ assert( s->input_picture[0]->type==FF_BUFFER_TYPE_USER || s->input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL); s->avctx->release_buffer(s->avctx, (AVFrame*)s->input_picture[0]); } goto no_output_pic; } } if(s->flags&CODEC_FLAG_PASS2){ for(i=0; imax_b_frames+1; i++){ int pict_num= s->input_picture[0]->display_picture_number + i; if(pict_num >= s->rc_context.num_entries) break; if(!s->input_picture[i]){ s->rc_context.entry[pict_num-1].new_pict_type = P_TYPE; break; } s->input_picture[i]->pict_type= s->rc_context.entry[pict_num].new_pict_type; } } if(s->avctx->b_frame_strategy==0){ b_frames= s->max_b_frames; while(b_frames && !s->input_picture[b_frames]) b_frames--; }else if(s->avctx->b_frame_strategy==1){ for(i=1; imax_b_frames+1; i++){ if(s->input_picture[i] && s->input_picture[i]->b_frame_score==0){ s->input_picture[i]->b_frame_score= get_intra_count(s, s->input_picture[i ]->data[0], s->input_picture[i-1]->data[0], s->linesize) + 1; } } for(i=0; imax_b_frames+1; i++){ if(s->input_picture[i]==NULL || s->input_picture[i]->b_frame_score - 1 > s->mb_num/40) break; } b_frames= FFMAX(0, i-1); /* reset scores */ for(i=0; iinput_picture[i]->b_frame_score=0; } }else{ av_log(s->avctx, AV_LOG_ERROR, \"illegal b frame strategy\\n\"); b_frames=0; } emms_c(); //static int b_count=0; //b_count+= b_frames; //av_log(s->avctx, AV_LOG_DEBUG, \"b_frames: %d\\n\", b_count); for(i= b_frames - 1; i>=0; i--){ int type= s->input_picture[i]->pict_type; if(type && type != B_TYPE) b_frames= i; } if(s->input_picture[b_frames]->pict_type == B_TYPE && b_frames == s->max_b_frames){ av_log(s->avctx, AV_LOG_ERROR, \"warning, too many b frames in a row\\n\"); } if(s->picture_in_gop_number + b_frames >= s->gop_size){ if((s->flags2 & CODEC_FLAG2_STRICT_GOP) && s->gop_size > s->picture_in_gop_number){ b_frames= s->gop_size - s->picture_in_gop_number - 1; }else{ if(s->flags & CODEC_FLAG_CLOSED_GOP) b_frames=0; s->input_picture[b_frames]->pict_type= I_TYPE; } } if( (s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames && s->input_picture[b_frames]->pict_type== I_TYPE) b_frames--; s->reordered_input_picture[0]= s->input_picture[b_frames]; if(s->reordered_input_picture[0]->pict_type != I_TYPE) s->reordered_input_picture[0]->pict_type= P_TYPE; s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++; for(i=0; ireordered_input_picture[i+1]= s->input_picture[i]; s->reordered_input_picture[i+1]->pict_type= B_TYPE; s->reordered_input_picture[i+1]->coded_picture_number= s->coded_picture_number++; } } } no_output_pic: if(s->reordered_input_picture[0]){ s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE ? 3 : 0; copy_picture(&s->new_picture, s->reordered_input_picture[0]); if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){ // input is a shared pix, so we can't modifiy it -> alloc a new one & ensure that the shared one is reuseable int i= ff_find_unused_picture(s, 0); Picture *pic= &s->picture[i]; /* mark us unused / free shared pic */ for(i=0; i<4; i++) s->reordered_input_picture[0]->data[i]= NULL; s->reordered_input_picture[0]->type= 0; pic->reference = s->reordered_input_picture[0]->reference; alloc_picture(s, pic, 0); copy_picture_attributes(s, (AVFrame*)pic, (AVFrame*)s->reordered_input_picture[0]); s->current_picture_ptr= pic; }else{ // input is not a shared pix -> reuse buffer for current_pix assert( s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL); s->current_picture_ptr= s->reordered_input_picture[0]; for(i=0; i<4; i++){ s->new_picture.data[i]+=16; } } copy_picture(&s->current_picture, s->current_picture_ptr); s->picture_number= s->new_picture.display_picture_number; //printf(\"dpn:%d\\n\", s->picture_number); }else{ memset(&s->new_picture, 0, sizeof(Picture)); } }"} {"target": 0, "idx": 18736, "func": "static inline uint32_t lduw_phys_internal(target_phys_addr_t addr, enum device_endian endian) { uint8_t *ptr; uint64_t val; MemoryRegionSection *section; section = phys_page_find(addr >> TARGET_PAGE_BITS); if (!(memory_region_is_ram(section->mr) || memory_region_is_romd(section->mr))) { /* I/O case */ addr = memory_region_section_addr(section, addr); val = io_mem_read(section->mr, addr, 2); #if defined(TARGET_WORDS_BIGENDIAN) if (endian == DEVICE_LITTLE_ENDIAN) { val = bswap16(val); } #else if (endian == DEVICE_BIG_ENDIAN) { val = bswap16(val); } #endif } else { /* RAM case */ ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) + memory_region_section_addr(section, addr)); switch (endian) { case DEVICE_LITTLE_ENDIAN: val = lduw_le_p(ptr); break; case DEVICE_BIG_ENDIAN: val = lduw_be_p(ptr); break; default: val = lduw_p(ptr); break; } } return val; }"} {"target": 0, "idx": 18749, "func": "static inline void xchg_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int xchg, int simple){ MpegEncContext * const s = &h->s; int temp8, i; uint64_t temp64; int deblock_left; int deblock_top; int mb_xy; int step = 1; int offset = 1; int uvoffset= 1; int top_idx = 1; if(!simple && FRAME_MBAFF){ if(s->mb_y&1){ offset = MB_MBAFF ? 1 : 17; uvoffset= MB_MBAFF ? 1 : 9; }else{ offset = uvoffset= top_idx = MB_MBAFF ? 0 : 1; } step= MB_MBAFF ? 2 : 1; } if(h->deblocking_filter == 2) { mb_xy = h->mb_xy; deblock_left = h->slice_table[mb_xy] == h->slice_table[mb_xy - 1]; deblock_top = h->slice_table[mb_xy] == h->slice_table[h->top_mb_xy]; } else { deblock_left = (s->mb_x > 0); deblock_top = (s->mb_y > !!MB_FIELD); } src_y -= linesize + 1; src_cb -= uvlinesize + 1; src_cr -= uvlinesize + 1; #define XCHG(a,b,t,xchg)\\ t= a;\\ if(xchg)\\ a= b;\\ b= t; if(deblock_left){ for(i = !deblock_top; i<16; i++){ XCHG(h->left_border[offset+i*step], src_y [i* linesize], temp8, xchg); } XCHG(h->left_border[offset+i*step], src_y [i* linesize], temp8, 1); } if(deblock_top){ XCHG(*(uint64_t*)(h->top_borders[top_idx][s->mb_x]+0), *(uint64_t*)(src_y +1), temp64, xchg); XCHG(*(uint64_t*)(h->top_borders[top_idx][s->mb_x]+8), *(uint64_t*)(src_y +9), temp64, 1); if(s->mb_x+1 < s->mb_width){ XCHG(*(uint64_t*)(h->top_borders[top_idx][s->mb_x+1]), *(uint64_t*)(src_y +17), temp64, 1); } } if(simple || !CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ if(deblock_left){ for(i = !deblock_top; i<8; i++){ XCHG(h->left_border[uvoffset+34 +i*step], src_cb[i*uvlinesize], temp8, xchg); XCHG(h->left_border[uvoffset+34+18+i*step], src_cr[i*uvlinesize], temp8, xchg); } XCHG(h->left_border[uvoffset+34 +i*step], src_cb[i*uvlinesize], temp8, 1); XCHG(h->left_border[uvoffset+34+18+i*step], src_cr[i*uvlinesize], temp8, 1); } if(deblock_top){ XCHG(*(uint64_t*)(h->top_borders[top_idx][s->mb_x]+16), *(uint64_t*)(src_cb+1), temp64, 1); XCHG(*(uint64_t*)(h->top_borders[top_idx][s->mb_x]+24), *(uint64_t*)(src_cr+1), temp64, 1); } } }"} {"target": 1, "idx": 18753, "func": "static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right) { uint8_t *prev = dst - stride + 4; uint32_t topleft; if(!up && !left) itype = DC_128_PRED; else if(!up){ if(itype == VERT_PRED) itype = HOR_PRED; if(itype == DC_PRED) itype = LEFT_DC_PRED; }else if(!left){ if(itype == HOR_PRED) itype = VERT_PRED; if(itype == DC_PRED) itype = TOP_DC_PRED; if(itype == DIAG_DOWN_LEFT_PRED) itype = DIAG_DOWN_LEFT_PRED_RV40_NODOWN; } if(!down){ if(itype == DIAG_DOWN_LEFT_PRED) itype = DIAG_DOWN_LEFT_PRED_RV40_NODOWN; if(itype == HOR_UP_PRED) itype = HOR_UP_PRED_RV40_NODOWN; if(itype == VERT_LEFT_PRED) itype = VERT_LEFT_PRED_RV40_NODOWN; } if(!right && up){ topleft = dst[-stride + 3] * 0x01010101; prev = (uint8_t*)&topleft; } r->h.pred4x4[itype](dst, prev, stride); }"} {"target": 1, "idx": 18756, "func": "static av_cold int MP3lame_encode_init(AVCodecContext *avctx) { Mp3AudioContext *s = avctx->priv_data; if (avctx->channels > 2) return -1; s->stereo = avctx->channels > 1 ? 1 : 0; if ((s->gfp = lame_init()) == NULL) goto err; lame_set_in_samplerate(s->gfp, avctx->sample_rate); lame_set_out_samplerate(s->gfp, avctx->sample_rate); lame_set_num_channels(s->gfp, avctx->channels); if(avctx->compression_level == FF_COMPRESSION_DEFAULT) { lame_set_quality(s->gfp, 5); } else { lame_set_quality(s->gfp, avctx->compression_level); } lame_set_mode(s->gfp, s->stereo ? JOINT_STEREO : MONO); lame_set_brate(s->gfp, avctx->bit_rate/1000); if(avctx->flags & CODEC_FLAG_QSCALE) { lame_set_brate(s->gfp, 0); lame_set_VBR(s->gfp, vbr_default); lame_set_VBR_quality(s->gfp, avctx->global_quality/(float)FF_QP2LAMBDA); } lame_set_bWriteVbrTag(s->gfp,0); lame_set_disable_reservoir(s->gfp, avctx->flags2 & CODEC_FLAG2_BIT_RESERVOIR ? 0 : 1); if (lame_init_params(s->gfp) < 0) goto err_close; avctx->frame_size = lame_get_framesize(s->gfp); avctx->coded_frame= avcodec_alloc_frame(); avctx->coded_frame->key_frame= 1; return 0; err_close: lame_close(s->gfp); err: return -1; }"} {"target": 0, "idx": 18766, "func": "int av_strerror(int errnum, char *errbuf, size_t errbuf_size) { int ret = 0; const char *errstr = NULL; switch (errnum) { case AVERROR_EOF: errstr = \"End of file\"; break; case AVERROR_INVALIDDATA: errstr = \"Invalid data found when processing input\"; break; case AVERROR_NUMEXPECTED: errstr = \"Number syntax expected in filename\"; break; case AVERROR_PATCHWELCOME: errstr = \"Not yet implemented in FFmpeg, patches welcome\"; break; } if (errstr) { av_strlcpy(errbuf, errstr, errbuf_size); } else { #if HAVE_STRERROR_R ret = strerror_r(AVUNERROR(errnum), errbuf, errbuf_size); #endif if (!HAVE_STRERROR_R || ret < 0) snprintf(errbuf, errbuf_size, \"Error number %d occurred\", errnum); } return ret; }"} {"target": 1, "idx": 18776, "func": "void *postcopy_get_tmp_page(MigrationIncomingState *mis) { if (!mis->postcopy_tmp_page) { mis->postcopy_tmp_page = mmap(NULL, getpagesize(), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (!mis->postcopy_tmp_page) { error_report(\"%s: %s\", __func__, strerror(errno)); return NULL; } } return mis->postcopy_tmp_page; }"} {"target": 0, "idx": 18795, "func": "static int pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num) { int slot_addend; slot_addend = (pci_dev->devfn >> 3) - 1; return (irq_num + slot_addend) & 3; }"} {"target": 0, "idx": 18806, "func": "static int h263_decode_gob_header(MpegEncContext *s) { unsigned int val, gob_number; int left; /* Check for GOB Start Code */ val = show_bits(&s->gb, 16); if(val) return -1; /* We have a GBSC probably with GSTUFF */ skip_bits(&s->gb, 16); /* Drop the zeros */ left= get_bits_left(&s->gb); //MN: we must check the bits left or we might end in a infinite loop (or segfault) for(;left>13; left--){ if(get_bits1(&s->gb)) break; /* Seek the '1' bit */ } if(left<=13) return -1; if(s->h263_slice_structured){ if(get_bits1(&s->gb)==0) return -1; ff_h263_decode_mba(s); if(s->mb_num > 1583) if(get_bits1(&s->gb)==0) return -1; s->qscale = get_bits(&s->gb, 5); /* SQUANT */ if(get_bits1(&s->gb)==0) return -1; skip_bits(&s->gb, 2); /* GFID */ }else{ gob_number = get_bits(&s->gb, 5); /* GN */ s->mb_x= 0; s->mb_y= s->gob_index* gob_number; skip_bits(&s->gb, 2); /* GFID */ s->qscale = get_bits(&s->gb, 5); /* GQUANT */ } if(s->mb_y >= s->mb_height) return -1; if(s->qscale==0) return -1; return 0; }"} {"target": 1, "idx": 18814, "func": "DevicePropertyInfoList *qmp_device_list_properties(const char *typename, Error **errp) { ObjectClass *klass; Object *obj; ObjectProperty *prop; DevicePropertyInfoList *prop_list = NULL; klass = object_class_by_name(typename); if (klass == NULL) { error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, \"Device '%s' not found\", typename); klass = object_class_dynamic_cast(klass, TYPE_DEVICE); if (klass == NULL) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, \"name\", TYPE_DEVICE); if (object_class_is_abstract(klass)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, \"name\", \"non-abstract device type\"); obj = object_new(typename); QTAILQ_FOREACH(prop, &obj->properties, node) { DevicePropertyInfo *info; DevicePropertyInfoList *entry; /* Skip Object and DeviceState properties */ if (strcmp(prop->name, \"type\") == 0 || strcmp(prop->name, \"realized\") == 0 || strcmp(prop->name, \"hotpluggable\") == 0 || strcmp(prop->name, \"hotplugged\") == 0 || strcmp(prop->name, \"parent_bus\") == 0) { continue; /* Skip legacy properties since they are just string versions of * properties that we already list. */ if (strstart(prop->name, \"legacy-\", NULL)) { continue; info = make_device_property_info(klass, prop->name, prop->type, prop->description); if (!info) { continue; entry = g_malloc0(sizeof(*entry)); entry->value = info; entry->next = prop_list; prop_list = entry; object_unref(obj); return prop_list;"} {"target": 1, "idx": 18828, "func": "static void FUNCC(pred4x4_dc)(uint8_t *_src, const uint8_t *topright, int _stride){ pixel *src = (pixel*)_src; int stride = _stride/sizeof(pixel); const int dc= ( src[-stride] + src[1-stride] + src[2-stride] + src[3-stride] + src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 4) >>3; ((pixel4*)(src+0*stride))[0]= ((pixel4*)(src+1*stride))[0]= ((pixel4*)(src+2*stride))[0]= ((pixel4*)(src+3*stride))[0]= PIXEL_SPLAT_X4(dc); }"} {"target": 1, "idx": 18834, "func": "static void test_bmdma_setup(void) { ide_test_start( \"-vnc none \" \"-drive file=%s,if=ide,serial=%s,cache=writeback \" \"-global ide-hd.ver=%s\", tmp_path, \"testdisk\", \"version\"); }"} {"target": 0, "idx": 18852, "func": "static void unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb) { int i; int dc_y_table; int dc_c_table; int ac_y_table; int ac_c_table; int residual_eob_run = 0; /* fetch the DC table indices */ dc_y_table = get_bits(gb, 4); dc_c_table = get_bits(gb, 4); /* unpack the Y plane DC coefficients */ debug_vp3(\" vp3: unpacking Y plane DC coefficients using table %d\\n\", dc_y_table); residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_y_table], 0, s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run); /* unpack the C plane DC coefficients */ debug_vp3(\" vp3: unpacking C plane DC coefficients using table %d\\n\", dc_c_table); residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0, s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run); /* fetch the AC table indices */ ac_y_table = get_bits(gb, 4); ac_c_table = get_bits(gb, 4); /* unpack the group 1 AC coefficients (coeffs 1-5) */ for (i = 1; i <= 5; i++) { debug_vp3(\" vp3: unpacking level %d Y plane AC coefficients using table %d\\n\", i, ac_y_table); residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_1[ac_y_table], i, s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run); debug_vp3(\" vp3: unpacking level %d C plane AC coefficients using table %d\\n\", i, ac_c_table); residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_1[ac_c_table], i, s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run); } /* unpack the group 2 AC coefficients (coeffs 6-14) */ for (i = 6; i <= 14; i++) { debug_vp3(\" vp3: unpacking level %d Y plane AC coefficients using table %d\\n\", i, ac_y_table); residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_2[ac_y_table], i, s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run); debug_vp3(\" vp3: unpacking level %d C plane AC coefficients using table %d\\n\", i, ac_c_table); residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_2[ac_c_table], i, s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run); } /* unpack the group 3 AC coefficients (coeffs 15-27) */ for (i = 15; i <= 27; i++) { debug_vp3(\" vp3: unpacking level %d Y plane AC coefficients using table %d\\n\", i, ac_y_table); residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_3[ac_y_table], i, s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run); debug_vp3(\" vp3: unpacking level %d C plane AC coefficients using table %d\\n\", i, ac_c_table); residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_3[ac_c_table], i, s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run); } /* unpack the group 4 AC coefficients (coeffs 28-63) */ for (i = 28; i <= 63; i++) { debug_vp3(\" vp3: unpacking level %d Y plane AC coefficients using table %d\\n\", i, ac_y_table); residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_4[ac_y_table], i, s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run); debug_vp3(\" vp3: unpacking level %d C plane AC coefficients using table %d\\n\", i, ac_c_table); residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_4[ac_c_table], i, s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run); } }"} {"target": 0, "idx": 18872, "func": "int bdrv_has_zero_init(BlockDriverState *bs) { assert(bs->drv); if (bs->drv->no_zero_init) { return 0; } else if (bs->file) { return bdrv_has_zero_init(bs->file); } return 1; }"} {"target": 0, "idx": 18883, "func": "void ff_fix_long_mvs(MpegEncContext * s, uint8_t *field_select_table, int field_select, int16_t (*mv_table)[2], int f_code, int type, int truncate) { MotionEstContext * const c= &s->me; int y, h_range, v_range; // RAL: 8 in MPEG-1, 16 in MPEG-4 int range = (((s->out_format == FMT_MPEG1 || s->msmpeg4_version) ? 8 : 16) << f_code); if(c->avctx->me_range && range > c->avctx->me_range) range= c->avctx->me_range; h_range= range; v_range= field_select_table ? range>>1 : range; /* clip / convert to intra 16x16 type MVs */ for(y=0; ymb_height; y++){ int x; int xy= y*s->mb_stride; for(x=0; xmb_width; x++){ if (s->mb_type[xy] & type){ // RAL: \"type\" test added... if(field_select_table==NULL || field_select_table[xy] == field_select){ if( mv_table[xy][0] >=h_range || mv_table[xy][0] <-h_range || mv_table[xy][1] >=v_range || mv_table[xy][1] <-v_range){ if(truncate){ if (mv_table[xy][0] > h_range-1) mv_table[xy][0]= h_range-1; else if(mv_table[xy][0] < -h_range ) mv_table[xy][0]= -h_range; if (mv_table[xy][1] > v_range-1) mv_table[xy][1]= v_range-1; else if(mv_table[xy][1] < -v_range ) mv_table[xy][1]= -v_range; }else{ s->mb_type[xy] &= ~type; s->mb_type[xy] |= CANDIDATE_MB_TYPE_INTRA; mv_table[xy][0]= mv_table[xy][1]= 0; } } } } xy++; } } }"} {"target": 0, "idx": 18891, "func": "build_madt(GArray *table_data, GArray *linker, PCMachineState *pcms) { MachineClass *mc = MACHINE_GET_CLASS(pcms); CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(MACHINE(pcms)); int madt_start = table_data->len; AcpiMultipleApicTable *madt; AcpiMadtIoApic *io_apic; AcpiMadtIntsrcovr *intsrcovr; AcpiMadtLocalNmi *local_nmi; int i; madt = acpi_data_push(table_data, sizeof *madt); madt->local_apic_address = cpu_to_le32(APIC_DEFAULT_ADDRESS); madt->flags = cpu_to_le32(1); for (i = 0; i < apic_ids->len; i++) { AcpiMadtProcessorApic *apic = acpi_data_push(table_data, sizeof *apic); int apic_id = apic_ids->cpus[i].arch_id; apic->type = ACPI_APIC_PROCESSOR; apic->length = sizeof(*apic); apic->processor_id = i; apic->local_apic_id = apic_id; if (apic_ids->cpus[i].cpu != NULL) { apic->flags = cpu_to_le32(1); } else { /* ACPI spec says that LAPIC entry for non present * CPU may be omitted from MADT or it must be marked * as disabled. However omitting non present CPU from * MADT breaks hotplug on linux. So possible CPUs * should be put in MADT but kept disabled. */ apic->flags = cpu_to_le32(0); } } g_free(apic_ids); io_apic = acpi_data_push(table_data, sizeof *io_apic); io_apic->type = ACPI_APIC_IO; io_apic->length = sizeof(*io_apic); #define ACPI_BUILD_IOAPIC_ID 0x0 io_apic->io_apic_id = ACPI_BUILD_IOAPIC_ID; io_apic->address = cpu_to_le32(IO_APIC_DEFAULT_ADDRESS); io_apic->interrupt = cpu_to_le32(0); if (pcms->apic_xrupt_override) { intsrcovr = acpi_data_push(table_data, sizeof *intsrcovr); intsrcovr->type = ACPI_APIC_XRUPT_OVERRIDE; intsrcovr->length = sizeof(*intsrcovr); intsrcovr->source = 0; intsrcovr->gsi = cpu_to_le32(2); intsrcovr->flags = cpu_to_le16(0); /* conforms to bus specifications */ } for (i = 1; i < 16; i++) { #define ACPI_BUILD_PCI_IRQS ((1<<5) | (1<<9) | (1<<10) | (1<<11)) if (!(ACPI_BUILD_PCI_IRQS & (1 << i))) { /* No need for a INT source override structure. */ continue; } intsrcovr = acpi_data_push(table_data, sizeof *intsrcovr); intsrcovr->type = ACPI_APIC_XRUPT_OVERRIDE; intsrcovr->length = sizeof(*intsrcovr); intsrcovr->source = i; intsrcovr->gsi = cpu_to_le32(i); intsrcovr->flags = cpu_to_le16(0xd); /* active high, level triggered */ } local_nmi = acpi_data_push(table_data, sizeof *local_nmi); local_nmi->type = ACPI_APIC_LOCAL_NMI; local_nmi->length = sizeof(*local_nmi); local_nmi->processor_id = 0xff; /* all processors */ local_nmi->flags = cpu_to_le16(0); local_nmi->lint = 1; /* ACPI_LINT1 */ build_header(linker, table_data, (void *)(table_data->data + madt_start), \"APIC\", table_data->len - madt_start, 1, NULL, NULL); }"} {"target": 0, "idx": 18898, "func": "static int ftp_write(URLContext *h, const unsigned char *buf, int size) { int err; FTPContext *s = h->priv_data; int written; av_dlog(h, \"ftp protocol write %d bytes\\n\", size); if (s->state == DISCONNECTED) { if ((err = ftp_connect_data_connection(h)) < 0) return err; } if (s->state == READY) { if ((err = ftp_store(s)) < 0) return err; } if (s->conn_data && s->state == UPLOADING) { written = ffurl_write(s->conn_data, buf, size); if (written > 0) { s->position += written; s->filesize = FFMAX(s->filesize, s->position); } return written; } av_log(h, AV_LOG_ERROR, \"FTP write failed\\n\"); return AVERROR(EIO); }"} {"target": 0, "idx": 18905, "func": "static void integratorcm_write(void *opaque, target_phys_addr_t offset, uint64_t value, unsigned size) { integratorcm_state *s = (integratorcm_state *)opaque; switch (offset >> 2) { case 2: /* CM_OSC */ if (s->cm_lock == 0xa05f) s->cm_osc = value; break; case 3: /* CM_CTRL */ integratorcm_set_ctrl(s, value); break; case 5: /* CM_LOCK */ s->cm_lock = value & 0xffff; break; case 7: /* CM_AUXOSC */ if (s->cm_lock == 0xa05f) s->cm_auxosc = value; break; case 8: /* CM_SDRAM */ s->cm_sdram = value; break; case 9: /* CM_INIT */ /* ??? This can change the memory bus frequency. */ s->cm_init = value; break; case 12: /* CM_FLAGSS */ s->cm_flags |= value; break; case 13: /* CM_FLAGSC */ s->cm_flags &= ~value; break; case 14: /* CM_NVFLAGSS */ s->cm_nvflags |= value; break; case 15: /* CM_NVFLAGSS */ s->cm_nvflags &= ~value; break; case 18: /* CM_IRQ_ENSET */ s->irq_enabled |= value; integratorcm_update(s); break; case 19: /* CM_IRQ_ENCLR */ s->irq_enabled &= ~value; integratorcm_update(s); break; case 20: /* CM_SOFT_INTSET */ s->int_level |= (value & 1); integratorcm_update(s); break; case 21: /* CM_SOFT_INTCLR */ s->int_level &= ~(value & 1); integratorcm_update(s); break; case 26: /* CM_FIQ_ENSET */ s->fiq_enabled |= value; integratorcm_update(s); break; case 27: /* CM_FIQ_ENCLR */ s->fiq_enabled &= ~value; integratorcm_update(s); break; case 32: /* CM_VOLTAGE_CTL0 */ case 33: /* CM_VOLTAGE_CTL1 */ case 34: /* CM_VOLTAGE_CTL2 */ case 35: /* CM_VOLTAGE_CTL3 */ /* ??? Voltage control unimplemented. */ break; default: hw_error(\"integratorcm_write: Unimplemented offset 0x%x\\n\", (int)offset); break; } }"} {"target": 0, "idx": 18916, "func": "static int pci_vpb_map_irq(PCIDevice *d, int irq_num) { PCIVPBState *s = container_of(d->bus, PCIVPBState, pci_bus); if (s->irq_mapping == PCI_VPB_IRQMAP_BROKEN) { /* Legacy broken IRQ mapping for compatibility with old and * buggy Linux guests */ return irq_num; } /* Slot to IRQ mapping for RealView Platform Baseboard 926 backplane * name slot IntA IntB IntC IntD * A 31 IRQ28 IRQ29 IRQ30 IRQ27 * B 30 IRQ27 IRQ28 IRQ29 IRQ30 * C 29 IRQ30 IRQ27 IRQ28 IRQ29 * Slot C is for the host bridge; A and B the peripherals. * Our output irqs 0..3 correspond to the baseboard's 27..30. * * This mapping function takes account of an oddity in the PB926 * board wiring, where the FPGA's P_nINTA input is connected to * the INTB connection on the board PCI edge connector, P_nINTB * is connected to INTC, and so on, so everything is one number * further round from where you might expect. */ return pci_swizzle_map_irq_fn(d, irq_num + 2); }"} {"target": 0, "idx": 18930, "func": "static uint32_t omap_l4_io_readh(void *opaque, target_phys_addr_t addr) { unsigned int i = (addr - OMAP2_L4_BASE) >> TARGET_PAGE_BITS; return omap_l4_io_readh_fn[i](omap_l4_io_opaque[i], addr); }"} {"target": 1, "idx": 18943, "func": "static void test_keyval_parse_list(void) { Error *err = NULL; QDict *qdict, *sub_qdict; /* Root can't be a list */ qdict = keyval_parse(\"0=1\", NULL, &err); error_free_or_abort(&err); g_assert(!qdict); /* List elements need not be in order */ qdict = keyval_parse(\"list.0=null,list.2=zwei,list.1=eins\", NULL, &error_abort); g_assert_cmpint(qdict_size(qdict), ==, 1); check_list012(qdict_get_qlist(qdict, \"list\")); QDECREF(qdict); /* Multiple indexes, last one wins */ qdict = keyval_parse(\"list.1=goner,list.0=null,list.1=eins,list.2=zwei\", NULL, &error_abort); g_assert_cmpint(qdict_size(qdict), ==, 1); check_list012(qdict_get_qlist(qdict, \"list\")); QDECREF(qdict); /* List at deeper nesting */ qdict = keyval_parse(\"a.list.1=eins,a.list.0=null,a.list.2=zwei\", NULL, &error_abort); g_assert_cmpint(qdict_size(qdict), ==, 1); sub_qdict = qdict_get_qdict(qdict, \"a\"); g_assert_cmpint(qdict_size(sub_qdict), ==, 1); check_list012(qdict_get_qlist(sub_qdict, \"list\")); QDECREF(qdict); /* Inconsistent dotted keys: both list and dictionary */ qdict = keyval_parse(\"a.b.c=1,a.b.0=2\", NULL, &err); error_free_or_abort(&err); g_assert(!qdict); qdict = keyval_parse(\"a.0.c=1,a.b.c=2\", NULL, &err); error_free_or_abort(&err); g_assert(!qdict); /* Missing list indexes */ qdict = keyval_parse(\"list.2=lonely\", NULL, &err); error_free_or_abort(&err); g_assert(!qdict); qdict = keyval_parse(\"list.0=null,list.2=eins,list.02=zwei\", NULL, &err); error_free_or_abort(&err); g_assert(!qdict); }"} {"target": 1, "idx": 18946, "func": "static int ds1338_recv(I2CSlave *i2c) { DS1338State *s = FROM_I2C_SLAVE(DS1338State, i2c); uint8_t res; res = s->nvram[s->ptr]; s->ptr = (s->ptr + 1) & 0xff; return res; }"} {"target": 1, "idx": 18955, "func": "size_t qsb_get_length(const QEMUSizedBuffer *qsb) { return qsb->used; }"} {"target": 1, "idx": 18959, "func": "static int udp_write(URLContext *h, const uint8_t *buf, int size) { UDPContext *s = h->priv_data; int ret; #if HAVE_PTHREAD_CANCEL if (s->fifo) { uint8_t tmp[4]; pthread_mutex_lock(&s->mutex); /* Return error if last tx failed. Here we can't know on which packet error was, but it needs to know that error exists. */ if (s->circular_buffer_error<0) { int err=s->circular_buffer_error; s->circular_buffer_error=0; pthread_mutex_unlock(&s->mutex); return err; } if(av_fifo_space(s->fifo) < size + 4) { /* What about a partial packet tx ? */ pthread_mutex_unlock(&s->mutex); return AVERROR(ENOMEM); } AV_WL32(tmp, size); av_fifo_generic_write(s->fifo, tmp, 4, NULL); /* size of packet */ av_fifo_generic_write(s->fifo, (uint8_t *)buf, size, NULL); /* the data */ pthread_cond_signal(&s->cond); pthread_mutex_unlock(&s->mutex); return size; } #endif if (!(h->flags & AVIO_FLAG_NONBLOCK)) { ret = ff_network_wait_fd(s->udp_fd, 1); if (ret < 0) return ret; } if (!s->is_connected) { ret = sendto (s->udp_fd, buf, size, 0, (struct sockaddr *) &s->dest_addr, s->dest_addr_len); } else ret = send(s->udp_fd, buf, size, 0); return ret < 0 ? ff_neterrno() : ret; }"} {"target": 1, "idx": 18965, "func": "static void compute_frame_duration(int *pnum, int *pden, AVStream *st, AVCodecParserContext *pc, AVPacket *pkt) { int frame_size; *pnum = 0; *pden = 0; switch(st->codec->codec_type) { case AVMEDIA_TYPE_VIDEO: if (st->r_frame_rate.num) { *pnum = st->r_frame_rate.den; *pden = st->r_frame_rate.num; } else if(st->time_base.num*1000LL > st->time_base.den) { *pnum = st->time_base.num; *pden = st->time_base.den; }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){ *pnum = st->codec->time_base.num; *pden = st->codec->time_base.den; if (pc && pc->repeat_pict) { *pnum = (*pnum) * (1 + pc->repeat_pict); } //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet //Thus if we have no parser in such case leave duration undefined. if(st->codec->ticks_per_frame>1 && !pc){ *pnum = *pden = 0; } } break; case AVMEDIA_TYPE_AUDIO: frame_size = get_audio_frame_size(st->codec, pkt->size, 0); if (frame_size <= 0 || st->codec->sample_rate <= 0) break; *pnum = frame_size; *pden = st->codec->sample_rate; break; default: break; } }"} {"target": 0, "idx": 18990, "func": "static int ram_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque) { ram_addr_t addr; uint64_t bytes_transferred_last; double bwidth = 0; uint64_t expected_time = 0; if (stage < 0) { cpu_physical_memory_set_dirty_tracking(0); return 0; } if (cpu_physical_sync_dirty_bitmap(0, TARGET_PHYS_ADDR_MAX) != 0) { qemu_file_set_error(f); return 0; } if (stage == 1) { bytes_transferred = 0; /* Make sure all dirty bits are set */ for (addr = 0; addr < last_ram_offset; addr += TARGET_PAGE_SIZE) { if (!cpu_physical_memory_get_dirty(addr, MIGRATION_DIRTY_FLAG)) cpu_physical_memory_set_dirty(addr); } /* Enable dirty memory tracking */ cpu_physical_memory_set_dirty_tracking(1); qemu_put_be64(f, last_ram_offset | RAM_SAVE_FLAG_MEM_SIZE); } bytes_transferred_last = bytes_transferred; bwidth = qemu_get_clock_ns(rt_clock); while (!qemu_file_rate_limit(f)) { int ret; ret = ram_save_block(f); bytes_transferred += ret * TARGET_PAGE_SIZE; if (ret == 0) /* no more blocks */ break; } bwidth = qemu_get_clock_ns(rt_clock) - bwidth; bwidth = (bytes_transferred - bytes_transferred_last) / bwidth; /* if we haven't transferred anything this round, force expected_time to a * a very high value, but without crashing */ if (bwidth == 0) bwidth = 0.000001; /* try transferring iterative blocks of memory */ if (stage == 3) { /* flush all remaining blocks regardless of rate limiting */ while (ram_save_block(f) != 0) { bytes_transferred += TARGET_PAGE_SIZE; } cpu_physical_memory_set_dirty_tracking(0); } qemu_put_be64(f, RAM_SAVE_FLAG_EOS); expected_time = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth; return (stage == 2) && (expected_time <= migrate_max_downtime()); }"} {"target": 0, "idx": 18994, "func": "static int zerocodec_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { ZeroCodecContext *zc = avctx->priv_data; AVFrame *pic = avctx->coded_frame; AVFrame *prev_pic = &zc->previous_frame; z_stream *zstream = &zc->zstream; uint8_t *prev, *dst; int i, j, zret; pic->reference = 3; if (avctx->get_buffer(avctx, pic) < 0) { av_log(avctx, AV_LOG_ERROR, \"Could not allocate buffer.\\n\"); return AVERROR(ENOMEM); } zret = inflateReset(zstream); if (zret != Z_OK) { av_log(avctx, AV_LOG_ERROR, \"Could not reset inflate: %d\\n\", zret); return AVERROR(EINVAL); } zstream->next_in = avpkt->data; zstream->avail_in = avpkt->size; prev = prev_pic->data[0]; dst = pic->data[0]; /** * ZeroCodec has very simple interframe compression. If a value * is the same as the previous frame, set it to 0. */ if (avpkt->flags & AV_PKT_FLAG_KEY) { pic->key_frame = 1; pic->pict_type = AV_PICTURE_TYPE_I; for (i = 0; i < avctx->height; i++) { zstream->next_out = dst; zstream->avail_out = avctx->width << 1; zret = inflate(zstream, Z_SYNC_FLUSH); if (zret != Z_OK && zret != Z_STREAM_END) { av_log(avctx, AV_LOG_ERROR, \"Inflate failed with return code: %d\\n\", zret); return AVERROR(EINVAL); } dst += pic->linesize[0]; } } else { pic->key_frame = 0; pic->pict_type = AV_PICTURE_TYPE_P; for (i = 0; i < avctx->height; i++) { zstream->next_out = dst; zstream->avail_out = avctx->width << 1; zret = inflate(zstream, Z_SYNC_FLUSH); if (zret != Z_OK && zret != Z_STREAM_END) { av_log(avctx, AV_LOG_ERROR, \"Inflate failed with return code: %d\\n\", zret); return AVERROR(EINVAL); } for (j = 0; j < avctx->width << 1; j++) dst[j] += prev[j] & -!dst[j]; prev += prev_pic->linesize[0]; dst += pic->linesize[0]; } } /* Release the previous buffer if need be */ if (prev_pic->data[0]) avctx->release_buffer(avctx, prev_pic); /* Store the previouse frame for use later */ *prev_pic = *pic; *data_size = sizeof(AVFrame); *(AVFrame *)data = *pic; return avpkt->size; }"} {"target": 0, "idx": 18997, "func": "static void g364fb_screen_dump(void *opaque, const char *filename, bool cswitch, Error **errp) { G364State *s = opaque; int ret, y, x; uint8_t index; uint8_t *data_buffer; FILE *f; qemu_flush_coalesced_mmio_buffer(); if (s->depth != 8) { error_setg(errp, \"g364: unknown guest depth %d\", s->depth); return; } f = fopen(filename, \"wb\"); if (!f) { error_setg(errp, \"failed to open file '%s': %s\", filename, strerror(errno)); return; } if (s->ctla & CTLA_FORCE_BLANK) { /* blank screen */ ret = fprintf(f, \"P4\\n%d %d\\n\", s->width, s->height); if (ret < 0) { goto write_err; } for (y = 0; y < s->height; y++) for (x = 0; x < s->width; x++) { ret = fputc(0, f); if (ret == EOF) { goto write_err; } } } else { data_buffer = s->vram + s->top_of_screen; ret = fprintf(f, \"P6\\n%d %d\\n%d\\n\", s->width, s->height, 255); if (ret < 0) { goto write_err; } for (y = 0; y < s->height; y++) for (x = 0; x < s->width; x++, data_buffer++) { index = *data_buffer; ret = fputc(s->color_palette[index][0], f); if (ret == EOF) { goto write_err; } ret = fputc(s->color_palette[index][1], f); if (ret == EOF) { goto write_err; } ret = fputc(s->color_palette[index][2], f); if (ret == EOF) { goto write_err; } } } out: fclose(f); return; write_err: error_setg(errp, \"failed to write to file '%s': %s\", filename, strerror(errno)); unlink(filename); goto out; }"} {"target": 0, "idx": 18999, "func": "float64 helper_fsmuld(CPUSPARCState *env, float32 src1, float32 src2) { float64 ret; clear_float_exceptions(env); ret = float64_mul(float32_to_float64(src1, &env->fp_status), float32_to_float64(src2, &env->fp_status), &env->fp_status); check_ieee_exceptions(env); return ret; }"} {"target": 0, "idx": 19002, "func": "target_phys_addr_t omap_l4_attach(struct omap_target_agent_s *ta, int region, int iotype) { target_phys_addr_t base; ssize_t size; #ifdef L4_MUX_HACK int i; #endif if (region < 0 || region >= ta->regions) { fprintf(stderr, \"%s: bad io region (%i)\\n\", __FUNCTION__, region); exit(-1); } base = ta->bus->base + ta->start[region].offset; size = ta->start[region].size; if (iotype) { #ifndef L4_MUX_HACK cpu_register_physical_memory(base, size, iotype); #else cpu_register_physical_memory(base, size, omap_cpu_io_entry); i = (base - ta->bus->base) / TARGET_PAGE_SIZE; for (; size > 0; size -= TARGET_PAGE_SIZE, i ++) { omap_l4_io_readb_fn[i] = omap_l4_io_entry[iotype].mem_read[0]; omap_l4_io_readh_fn[i] = omap_l4_io_entry[iotype].mem_read[1]; omap_l4_io_readw_fn[i] = omap_l4_io_entry[iotype].mem_read[2]; omap_l4_io_writeb_fn[i] = omap_l4_io_entry[iotype].mem_write[0]; omap_l4_io_writeh_fn[i] = omap_l4_io_entry[iotype].mem_write[1]; omap_l4_io_writew_fn[i] = omap_l4_io_entry[iotype].mem_write[2]; omap_l4_io_opaque[i] = omap_l4_io_entry[iotype].opaque; } #endif } return base; }"} {"target": 1, "idx": 19006, "func": "static void setup_frame(int sig, struct target_sigaction * ka, target_sigset_t *set, CPUMIPSState *regs) { struct sigframe *frame; abi_ulong frame_addr; int i; frame_addr = get_sigframe(ka, regs, sizeof(*frame)); if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) goto give_sigsegv; install_sigtramp(frame->sf_code, TARGET_NR_sigreturn); setup_sigcontext(regs, &frame->sf_sc); for(i = 0; i < TARGET_NSIG_WORDS; i++) { if(__put_user(set->sig[i], &frame->sf_mask.sig[i])) goto give_sigsegv; } /* * Arguments to signal handler: * * a0 = signal number * a1 = 0 (should be cause) * a2 = pointer to struct sigcontext * * $25 and PC point to the signal handler, $29 points to the * struct sigframe. */ regs->active_tc.gpr[ 4] = sig; regs->active_tc.gpr[ 5] = 0; regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc); regs->active_tc.gpr[29] = frame_addr; regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code); /* The original kernel code sets CP0_EPC to the handler * since it returns to userland using eret * we cannot do this here, and we must set PC directly */ regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler; mips_set_hflags_isa_mode_from_pc(regs); unlock_user_struct(frame, frame_addr, 1); return; give_sigsegv: unlock_user_struct(frame, frame_addr, 1); force_sig(TARGET_SIGSEGV/*, current*/); }"} {"target": 1, "idx": 19009, "func": "int update_dimensions(VP8Context *s, int width, int height, int is_vp7) { AVCodecContext *avctx = s->avctx; int i, ret; if (width != s->avctx->width || ((width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height) && s->macroblocks_base || height != s->avctx->height) { vp8_decode_flush_impl(s->avctx, 1); ret = ff_set_dimensions(s->avctx, width, height); if (ret < 0) return ret; } s->mb_width = (s->avctx->coded_width + 15) / 16; s->mb_height = (s->avctx->coded_height + 15) / 16; s->mb_layout = is_vp7 || avctx->active_thread_type == FF_THREAD_SLICE && FFMIN(s->num_coeff_partitions, avctx->thread_count) > 1; if (!s->mb_layout) { // Frame threading and one thread s->macroblocks_base = av_mallocz((s->mb_width + s->mb_height * 2 + 1) * sizeof(*s->macroblocks)); s->intra4x4_pred_mode_top = av_mallocz(s->mb_width * 4); } else // Sliced threading s->macroblocks_base = av_mallocz((s->mb_width + 2) * (s->mb_height + 2) * sizeof(*s->macroblocks)); s->top_nnz = av_mallocz(s->mb_width * sizeof(*s->top_nnz)); s->top_border = av_mallocz((s->mb_width + 1) * sizeof(*s->top_border)); s->thread_data = av_mallocz(MAX_THREADS * sizeof(VP8ThreadData)); if (!s->macroblocks_base || !s->top_nnz || !s->top_border || !s->thread_data || (!s->intra4x4_pred_mode_top && !s->mb_layout)) { free_buffers(s); return AVERROR(ENOMEM); } for (i = 0; i < MAX_THREADS; i++) { s->thread_data[i].filter_strength = av_mallocz(s->mb_width * sizeof(*s->thread_data[0].filter_strength)); if (!s->thread_data[i].filter_strength) { free_buffers(s); return AVERROR(ENOMEM); } #if HAVE_THREADS pthread_mutex_init(&s->thread_data[i].lock, NULL); pthread_cond_init(&s->thread_data[i].cond, NULL); #endif } s->macroblocks = s->macroblocks_base + 1; return 0; }"} {"target": 0, "idx": 19012, "func": "static int mpeg_decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size) { Mpeg1Context *s = avctx->priv_data; const uint8_t *buf_end; const uint8_t *buf_ptr; uint32_t start_code; int ret, input_size; AVFrame *picture = data; MpegEncContext *s2 = &s->mpeg_enc_ctx; dprintf(avctx, \"fill_buffer\\n\"); if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) { /* special case for last picture */ if (s2->low_delay==0 && s2->next_picture_ptr) { *picture= *(AVFrame*)s2->next_picture_ptr; s2->next_picture_ptr= NULL; *data_size = sizeof(AVFrame); } return 0; } if(s2->flags&CODEC_FLAG_TRUNCATED){ int next= ff_mpeg1_find_frame_end(&s2->parse_context, buf, buf_size); if( ff_combine_frame(&s2->parse_context, next, (const uint8_t **)&buf, &buf_size) < 0 ) return buf_size; } buf_ptr = buf; buf_end = buf + buf_size; #if 0 if (s->repeat_field % 2 == 1) { s->repeat_field++; //fprintf(stderr,\"\\nRepeating last frame: %d -> %d! pict: %d %d\", avctx->frame_number-1, avctx->frame_number, // s2->picture_number, s->repeat_field); if (avctx->flags & CODEC_FLAG_REPEAT_FIELD) { *data_size = sizeof(AVPicture); goto the_end; } } #endif if(s->mpeg_enc_ctx_allocated==0 && avctx->codec_tag == ff_get_fourcc(\"VCR2\")) vcr2_init_sequence(avctx); s->slice_count= 0; for(;;) { /* find start next code */ start_code = -1; buf_ptr = ff_find_start_code(buf_ptr,buf_end, &start_code); if (start_code > 0x1ff){ if(s2->pict_type != B_TYPE || avctx->skip_frame <= AVDISCARD_DEFAULT){ if(avctx->thread_count > 1){ int i; avctx->execute(avctx, slice_decode_thread, (void**)&(s2->thread_context[0]), NULL, s->slice_count); for(i=0; islice_count; i++) s2->error_count += s2->thread_context[i]->error_count; } if (slice_end(avctx, picture)) { if(s2->last_picture_ptr || s2->low_delay) //FIXME merge with the stuff in mpeg_decode_slice *data_size = sizeof(AVPicture); } } return FFMAX(0, buf_ptr - buf - s2->parse_context.last_index); } input_size = buf_end - buf_ptr; if(avctx->debug & FF_DEBUG_STARTCODE){ av_log(avctx, AV_LOG_DEBUG, \"%3X at %zd left %d\\n\", start_code, buf_ptr-buf, input_size); } /* prepare data for next start code */ switch(start_code) { case SEQ_START_CODE: mpeg1_decode_sequence(avctx, buf_ptr, input_size); break; case PICTURE_START_CODE: /* we have a complete image : we try to decompress it */ mpeg1_decode_picture(avctx, buf_ptr, input_size); break; case EXT_START_CODE: mpeg_decode_extension(avctx, buf_ptr, input_size); break; case USER_START_CODE: mpeg_decode_user_data(avctx, buf_ptr, input_size); break; case GOP_START_CODE: s2->first_field=0; mpeg_decode_gop(avctx, buf_ptr, input_size); break; default: if (start_code >= SLICE_MIN_START_CODE && start_code <= SLICE_MAX_START_CODE) { int mb_y= start_code - SLICE_MIN_START_CODE; if(s2->last_picture_ptr==NULL){ /* Skip B-frames if we do not have reference frames. */ if(s2->pict_type==B_TYPE) break; /* Skip P-frames if we do not have reference frame no valid header. */ // if(s2->pict_type==P_TYPE && s2->first_field && !s2->first_slice) break; } /* Skip B-frames if we are in a hurry. */ if(avctx->hurry_up && s2->pict_type==B_TYPE) break; if( (avctx->skip_frame >= AVDISCARD_NONREF && s2->pict_type==B_TYPE) ||(avctx->skip_frame >= AVDISCARD_NONKEY && s2->pict_type!=I_TYPE) || avctx->skip_frame >= AVDISCARD_ALL) break; /* Skip everything if we are in a hurry>=5. */ if(avctx->hurry_up>=5) break; if (!s->mpeg_enc_ctx_allocated) break; if(s2->codec_id == CODEC_ID_MPEG2VIDEO){ if(mb_y < avctx->skip_top || mb_y >= s2->mb_height - avctx->skip_bottom) break; } if(s2->first_slice){ s2->first_slice=0; if(mpeg_field_start(s2) < 0) return -1; } if(!s2->current_picture_ptr){ av_log(avctx, AV_LOG_ERROR, \"current_picture not initialized\\n\"); return -1; } if(avctx->thread_count > 1){ int threshold= (s2->mb_height*s->slice_count + avctx->thread_count/2) / avctx->thread_count; if(threshold <= mb_y){ MpegEncContext *thread_context= s2->thread_context[s->slice_count]; thread_context->start_mb_y= mb_y; thread_context->end_mb_y = s2->mb_height; if(s->slice_count){ s2->thread_context[s->slice_count-1]->end_mb_y= mb_y; ff_update_duplicate_context(thread_context, s2); } init_get_bits(&thread_context->gb, buf_ptr, input_size*8); s->slice_count++; } buf_ptr += 2; //FIXME add minimum num of bytes per slice }else{ ret = mpeg_decode_slice(s, mb_y, &buf_ptr, input_size); emms_c(); if(ret < 0){ if(s2->resync_mb_x>=0 && s2->resync_mb_y>=0) ff_er_add_slice(s2, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x, s2->mb_y, AC_ERROR|DC_ERROR|MV_ERROR); }else{ ff_er_add_slice(s2, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x-1, s2->mb_y, AC_END|DC_END|MV_END); } } } break; } } }"} {"target": 1, "idx": 19014, "func": "static void pci_msix_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { XenPCIPassthroughState *s = opaque; XenPTMSIX *msix = s->msix; XenPTMSIXEntry *entry; int entry_nr, offset; entry_nr = addr / PCI_MSIX_ENTRY_SIZE; if (entry_nr < 0 || entry_nr >= msix->total_entries) { XEN_PT_ERR(&s->dev, \"asked MSI-X entry '%i' invalid!\\n\", entry_nr); return; } entry = &msix->msix_entry[entry_nr]; offset = addr % PCI_MSIX_ENTRY_SIZE; if (offset != PCI_MSIX_ENTRY_VECTOR_CTRL) { const volatile uint32_t *vec_ctrl; if (get_entry_value(entry, offset) == val && entry->pirq != XEN_PT_UNASSIGNED_PIRQ) { return; } /* * If Xen intercepts the mask bit access, entry->vec_ctrl may not be * up-to-date. Read from hardware directly. */ vec_ctrl = s->msix->phys_iomem_base + entry_nr * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL; if (msix->enabled && !(*vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)) { XEN_PT_ERR(&s->dev, \"Can't update msix entry %d since MSI-X is\" \" already enabled.\\n\", entry_nr); return; } entry->updated = true; } set_entry_value(entry, offset, val); if (offset == PCI_MSIX_ENTRY_VECTOR_CTRL) { if (msix->enabled && !(val & PCI_MSIX_ENTRY_CTRL_MASKBIT)) { xen_pt_msix_update_one(s, entry_nr); } } }"} {"target": 0, "idx": 19031, "func": "static void seg_free_context(SegmentContext *seg) { avio_closep(&seg->pb); avformat_free_context(seg->avf); seg->avf = NULL; }"} {"target": 1, "idx": 19051, "func": "static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output) { AVSubtitle subtitle; int i, ret = avcodec_decode_subtitle2(ist->dec_ctx, &subtitle, got_output, pkt); check_decode_result(got_output, ret); if (ret < 0 || !*got_output) { if (!pkt->size) sub2video_flush(ist); return ret; } if (ist->fix_sub_duration) { int end = 1; if (ist->prev_sub.got_output) { end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts, 1000, AV_TIME_BASE); if (end < ist->prev_sub.subtitle.end_display_time) { av_log(ist->dec_ctx, AV_LOG_DEBUG, \"Subtitle duration reduced from %d to %d%s\\n\", ist->prev_sub.subtitle.end_display_time, end, end <= 0 ? \", dropping it\" : \"\"); ist->prev_sub.subtitle.end_display_time = end; } } FFSWAP(int, *got_output, ist->prev_sub.got_output); FFSWAP(int, ret, ist->prev_sub.ret); FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle); if (end <= 0) goto out; } if (!*got_output) return ret; sub2video_update(ist, &subtitle); if (!subtitle.num_rects) goto out; ist->frames_decoded++; for (i = 0; i < nb_output_streams; i++) { OutputStream *ost = output_streams[i]; if (!check_output_constraints(ist, ost) || !ost->encoding_needed || ost->enc->type != AVMEDIA_TYPE_SUBTITLE) continue; do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle); } out: avsubtitle_free(&subtitle); return ret; }"} {"target": 1, "idx": 19062, "func": "static void tap_set_sndbuf(TAPState *s, const char *sndbuf_str, Monitor *mon) { if (sndbuf_str) { config_error(mon, \"No '-net tap,sndbuf=' support available\\n\"); } }"} {"target": 1, "idx": 19065, "func": "static uint64_t error_mem_read(void *opaque, hwaddr addr, unsigned size) { abort(); }"} {"target": 0, "idx": 19087, "func": "static int yuv4_read_header(AVFormatContext *s, AVFormatParameters *ap) { char header[MAX_YUV4_HEADER+10]; // Include headroom for the longest option char *tokstart,*tokend,*header_end; int i; ByteIOContext *pb = s->pb; int width=-1, height=-1, raten=0, rated=0, aspectn=0, aspectd=0; enum PixelFormat pix_fmt=PIX_FMT_NONE,alt_pix_fmt=PIX_FMT_NONE; enum AVChromaLocation chroma_sample_location = AVCHROMA_LOC_UNSPECIFIED; AVStream *st; struct frame_attributes *s1 = s->priv_data; for (i=0; iinterlaced_frame = 0; s1->top_field_first = 0; header_end = &header[i+1]; // Include space for(tokstart = &header[strlen(Y4M_MAGIC) + 1]; tokstart < header_end; tokstart++) { if (*tokstart==0x20) continue; switch (*tokstart++) { case 'W': // Width. Required. width = strtol(tokstart, &tokend, 10); tokstart=tokend; break; case 'H': // Height. Required. height = strtol(tokstart, &tokend, 10); tokstart=tokend; break; case 'C': // Color space if (strncmp(\"420jpeg\",tokstart,7)==0) { pix_fmt = PIX_FMT_YUV420P; chroma_sample_location = AVCHROMA_LOC_CENTER; } else if (strncmp(\"420mpeg2\",tokstart,8)==0) { pix_fmt = PIX_FMT_YUV420P; chroma_sample_location = AVCHROMA_LOC_LEFT; } else if (strncmp(\"420paldv\", tokstart, 8)==0) { pix_fmt = PIX_FMT_YUV420P; chroma_sample_location = AVCHROMA_LOC_TOPLEFT; } else if (strncmp(\"411\", tokstart, 3)==0) pix_fmt = PIX_FMT_YUV411P; else if (strncmp(\"422\", tokstart, 3)==0) pix_fmt = PIX_FMT_YUV422P; else if (strncmp(\"444alpha\", tokstart, 8)==0) { av_log(s, AV_LOG_ERROR, \"Cannot handle 4:4:4:4 YUV4MPEG stream.\\n\"); return -1; } else if (strncmp(\"444\", tokstart, 3)==0) pix_fmt = PIX_FMT_YUV444P; else if (strncmp(\"mono\",tokstart, 4)==0) { pix_fmt = PIX_FMT_GRAY8; } else { av_log(s, AV_LOG_ERROR, \"YUV4MPEG stream contains an unknown pixel format.\\n\"); return -1; } while(tokstartinterlaced_frame=0; break; case 't': s1->interlaced_frame=1; s1->top_field_first=1; break; case 'b': s1->interlaced_frame=1; s1->top_field_first=0; break; case 'm': av_log(s, AV_LOG_ERROR, \"YUV4MPEG stream contains mixed interlaced and non-interlaced frames.\\n\"); return -1; default: av_log(s, AV_LOG_ERROR, \"YUV4MPEG has invalid header.\\n\"); return -1; } break; case 'F': // Frame rate sscanf(tokstart,\"%d:%d\",&raten,&rated); // 0:0 if unknown while(tokstartcodec->width = width; st->codec->height = height; av_reduce(&raten, &rated, raten, rated, (1UL<<31)-1); av_set_pts_info(st, 64, rated, raten); st->codec->pix_fmt = pix_fmt; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_RAWVIDEO; st->sample_aspect_ratio= (AVRational){aspectn, aspectd}; st->codec->chroma_sample_location = chroma_sample_location; return 0; }"} {"target": 0, "idx": 19099, "func": "static void timer_del_locked(QEMUTimerList *timer_list, QEMUTimer *ts) { QEMUTimer **pt, *t; ts->expire_time = -1; pt = &timer_list->active_timers; for(;;) { t = *pt; if (!t) break; if (t == ts) { *pt = t->next; break; } pt = &t->next; } }"} {"target": 0, "idx": 19120, "func": "static void handle_sys(DisasContext *s, uint32_t insn, unsigned int l, unsigned int op1, unsigned int op2, unsigned int crn, unsigned int crm, unsigned int rt) { unsupported_encoding(s, insn); }"} {"target": 0, "idx": 19122, "func": "void axisdev88_init(MachineState *machine) { ram_addr_t ram_size = machine->ram_size; const char *cpu_model = machine->cpu_model; const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; CRISCPU *cpu; CPUCRISState *env; DeviceState *dev; SysBusDevice *s; DriveInfo *nand; qemu_irq irq[30], nmi[2]; void *etraxfs_dmac; struct etraxfs_dma_client *dma_eth; int i; MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *phys_ram = g_new(MemoryRegion, 1); MemoryRegion *phys_intmem = g_new(MemoryRegion, 1); /* init CPUs */ if (cpu_model == NULL) { cpu_model = \"crisv32\"; } cpu = cpu_cris_init(cpu_model); env = &cpu->env; /* allocate RAM */ memory_region_init_ram(phys_ram, NULL, \"axisdev88.ram\", ram_size, &error_abort); vmstate_register_ram_global(phys_ram); memory_region_add_subregion(address_space_mem, 0x40000000, phys_ram); /* The ETRAX-FS has 128Kb on chip ram, the docs refer to it as the internal memory. */ memory_region_init_ram(phys_intmem, NULL, \"axisdev88.chipram\", INTMEM_SIZE, &error_abort); vmstate_register_ram_global(phys_intmem); memory_region_add_subregion(address_space_mem, 0x38000000, phys_intmem); /* Attach a NAND flash to CS1. */ nand = drive_get(IF_MTD, 0, 0); nand_state.nand = nand_init(nand ? blk_bs(blk_by_legacy_dinfo(nand)) : NULL, NAND_MFR_STMICRO, 0x39); memory_region_init_io(&nand_state.iomem, NULL, &nand_ops, &nand_state, \"nand\", 0x05000000); memory_region_add_subregion(address_space_mem, 0x10000000, &nand_state.iomem); gpio_state.nand = &nand_state; memory_region_init_io(&gpio_state.iomem, NULL, &gpio_ops, &gpio_state, \"gpio\", 0x5c); memory_region_add_subregion(address_space_mem, 0x3001a000, &gpio_state.iomem); dev = qdev_create(NULL, \"etraxfs,pic\"); /* FIXME: Is there a proper way to signal vectors to the CPU core? */ qdev_prop_set_ptr(dev, \"interrupt_vector\", &env->interrupt_vector); qdev_init_nofail(dev); s = SYS_BUS_DEVICE(dev); sysbus_mmio_map(s, 0, 0x3001c000); sysbus_connect_irq(s, 0, qdev_get_gpio_in(DEVICE(cpu), CRIS_CPU_IRQ)); sysbus_connect_irq(s, 1, qdev_get_gpio_in(DEVICE(cpu), CRIS_CPU_NMI)); for (i = 0; i < 30; i++) { irq[i] = qdev_get_gpio_in(dev, i); } nmi[0] = qdev_get_gpio_in(dev, 30); nmi[1] = qdev_get_gpio_in(dev, 31); etraxfs_dmac = etraxfs_dmac_init(0x30000000, 10); for (i = 0; i < 10; i++) { /* On ETRAX, odd numbered channels are inputs. */ etraxfs_dmac_connect(etraxfs_dmac, i, irq + 7 + i, i & 1); } /* Add the two ethernet blocks. */ dma_eth = g_malloc0(sizeof dma_eth[0] * 4); /* Allocate 4 channels. */ etraxfs_eth_init(&nd_table[0], 0x30034000, 1, &dma_eth[0], &dma_eth[1]); if (nb_nics > 1) { etraxfs_eth_init(&nd_table[1], 0x30036000, 2, &dma_eth[2], &dma_eth[3]); } /* The DMA Connector block is missing, hardwire things for now. */ etraxfs_dmac_connect_client(etraxfs_dmac, 0, &dma_eth[0]); etraxfs_dmac_connect_client(etraxfs_dmac, 1, &dma_eth[1]); if (nb_nics > 1) { etraxfs_dmac_connect_client(etraxfs_dmac, 6, &dma_eth[2]); etraxfs_dmac_connect_client(etraxfs_dmac, 7, &dma_eth[3]); } /* 2 timers. */ sysbus_create_varargs(\"etraxfs,timer\", 0x3001e000, irq[0x1b], nmi[1], NULL); sysbus_create_varargs(\"etraxfs,timer\", 0x3005e000, irq[0x1b], nmi[1], NULL); for (i = 0; i < 4; i++) { sysbus_create_simple(\"etraxfs,serial\", 0x30026000 + i * 0x2000, irq[0x14 + i]); } if (kernel_filename) { li.image_filename = kernel_filename; li.cmdline = kernel_cmdline; cris_load_image(cpu, &li); } else if (!qtest_enabled()) { fprintf(stderr, \"Kernel image must be specified\\n\"); exit(1); } }"} {"target": 0, "idx": 19130, "func": "static CharDriverState *qemu_chr_open_pp_fd(int fd) { CharDriverState *chr; ParallelCharDriver *drv; if (ioctl(fd, PPCLAIM) < 0) { close(fd); return NULL; } drv = g_malloc0(sizeof(ParallelCharDriver)); drv->fd = fd; drv->mode = IEEE1284_MODE_COMPAT; chr = g_malloc0(sizeof(CharDriverState)); chr->chr_write = null_chr_write; chr->chr_ioctl = pp_ioctl; chr->chr_close = pp_close; chr->opaque = drv; return chr; }"} {"target": 0, "idx": 19137, "func": "const char *bdrv_get_device_or_node_name(const BlockDriverState *bs) { return bs->blk ? blk_name(bs->blk) : bs->node_name; }"} {"target": 0, "idx": 19142, "func": "static QObject *qmp_output_pop(QmpOutputVisitor *qov, void *qapi) { QStackEntry *e = QSLIST_FIRST(&qov->stack); QObject *value; assert(e); assert(e->qapi == qapi); QSLIST_REMOVE_HEAD(&qov->stack, node); value = e->value; assert(value); g_free(e); return value; }"} {"target": 1, "idx": 19156, "func": "static int get_coc(Jpeg2000DecoderContext *s, Jpeg2000CodingStyle *c, uint8_t *properties) { int compno; if (s->buf_end - s->buf < 2) return AVERROR_INVALIDDATA; compno = bytestream_get_byte(&s->buf); c += compno; c->csty = bytestream_get_byte(&s->buf); get_cox(s, c); properties[compno] |= HAD_COC; return 0; }"} {"target": 0, "idx": 19165, "func": "int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes, int64_t *pnum) { BlockDriverState *file; int64_t sector_num = offset >> BDRV_SECTOR_BITS; int nb_sectors = bytes >> BDRV_SECTOR_BITS; int64_t ret; int psectors; assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE) && bytes < INT_MAX); ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &psectors, &file); if (ret < 0) { return ret; } if (pnum) { *pnum = psectors * BDRV_SECTOR_SIZE; } return !!(ret & BDRV_BLOCK_ALLOCATED); }"} {"target": 0, "idx": 19177, "func": "static int rtl8139_cplus_transmit_one(RTL8139State *s) { if (!rtl8139_transmitter_enabled(s)) { DPRINTF(\"+++ C+ mode: transmitter disabled\\n\"); return 0; } if (!rtl8139_cp_transmitter_enabled(s)) { DPRINTF(\"+++ C+ mode: C+ transmitter disabled\\n\"); return 0 ; } int descriptor = s->currCPlusTxDesc; target_phys_addr_t cplus_tx_ring_desc = rtl8139_addr64(s->TxAddr[0], s->TxAddr[1]); /* Normal priority ring */ cplus_tx_ring_desc += 16 * descriptor; DPRINTF(\"+++ C+ mode reading TX descriptor %d from host memory at \" \"%08x0x%08x = 0x\"TARGET_FMT_plx\"\\n\", descriptor, s->TxAddr[1], s->TxAddr[0], cplus_tx_ring_desc); uint32_t val, txdw0,txdw1,txbufLO,txbufHI; cpu_physical_memory_read(cplus_tx_ring_desc, (uint8_t *)&val, 4); txdw0 = le32_to_cpu(val); cpu_physical_memory_read(cplus_tx_ring_desc+4, (uint8_t *)&val, 4); txdw1 = le32_to_cpu(val); cpu_physical_memory_read(cplus_tx_ring_desc+8, (uint8_t *)&val, 4); txbufLO = le32_to_cpu(val); cpu_physical_memory_read(cplus_tx_ring_desc+12, (uint8_t *)&val, 4); txbufHI = le32_to_cpu(val); DPRINTF(\"+++ C+ mode TX descriptor %d %08x %08x %08x %08x\\n\", descriptor, txdw0, txdw1, txbufLO, txbufHI); /* w0 ownership flag */ #define CP_TX_OWN (1<<31) /* w0 end of ring flag */ #define CP_TX_EOR (1<<30) /* first segment of received packet flag */ #define CP_TX_FS (1<<29) /* last segment of received packet flag */ #define CP_TX_LS (1<<28) /* large send packet flag */ #define CP_TX_LGSEN (1<<27) /* large send MSS mask, bits 16...25 */ #define CP_TC_LGSEN_MSS_MASK ((1 << 12) - 1) /* IP checksum offload flag */ #define CP_TX_IPCS (1<<18) /* UDP checksum offload flag */ #define CP_TX_UDPCS (1<<17) /* TCP checksum offload flag */ #define CP_TX_TCPCS (1<<16) /* w0 bits 0...15 : buffer size */ #define CP_TX_BUFFER_SIZE (1<<16) #define CP_TX_BUFFER_SIZE_MASK (CP_TX_BUFFER_SIZE - 1) /* w1 add tag flag */ #define CP_TX_TAGC (1<<17) /* w1 bits 0...15 : VLAN tag (big endian) */ #define CP_TX_VLAN_TAG_MASK ((1<<16) - 1) /* w2 low 32bit of Rx buffer ptr */ /* w3 high 32bit of Rx buffer ptr */ /* set after transmission */ /* FIFO underrun flag */ #define CP_TX_STATUS_UNF (1<<25) /* transmit error summary flag, valid if set any of three below */ #define CP_TX_STATUS_TES (1<<23) /* out-of-window collision flag */ #define CP_TX_STATUS_OWC (1<<22) /* link failure flag */ #define CP_TX_STATUS_LNKF (1<<21) /* excessive collisions flag */ #define CP_TX_STATUS_EXC (1<<20) if (!(txdw0 & CP_TX_OWN)) { DPRINTF(\"C+ Tx mode : descriptor %d is owned by host\\n\", descriptor); return 0 ; } DPRINTF(\"+++ C+ Tx mode : transmitting from descriptor %d\\n\", descriptor); if (txdw0 & CP_TX_FS) { DPRINTF(\"+++ C+ Tx mode : descriptor %d is first segment \" \"descriptor\\n\", descriptor); /* reset internal buffer offset */ s->cplus_txbuffer_offset = 0; } int txsize = txdw0 & CP_TX_BUFFER_SIZE_MASK; target_phys_addr_t tx_addr = rtl8139_addr64(txbufLO, txbufHI); /* make sure we have enough space to assemble the packet */ if (!s->cplus_txbuffer) { s->cplus_txbuffer_len = CP_TX_BUFFER_SIZE; s->cplus_txbuffer = qemu_malloc(s->cplus_txbuffer_len); s->cplus_txbuffer_offset = 0; DPRINTF(\"+++ C+ mode transmission buffer allocated space %d\\n\", s->cplus_txbuffer_len); } while (s->cplus_txbuffer && s->cplus_txbuffer_offset + txsize >= s->cplus_txbuffer_len) { s->cplus_txbuffer_len += CP_TX_BUFFER_SIZE; s->cplus_txbuffer = qemu_realloc(s->cplus_txbuffer, s->cplus_txbuffer_len); DPRINTF(\"+++ C+ mode transmission buffer space changed to %d\\n\", s->cplus_txbuffer_len); } if (!s->cplus_txbuffer) { /* out of memory */ DPRINTF(\"+++ C+ mode transmiter failed to reallocate %d bytes\\n\", s->cplus_txbuffer_len); /* update tally counter */ ++s->tally_counters.TxERR; ++s->tally_counters.TxAbt; return 0; } /* append more data to the packet */ DPRINTF(\"+++ C+ mode transmit reading %d bytes from host memory at \" TARGET_FMT_plx\" to offset %d\\n\", txsize, tx_addr, s->cplus_txbuffer_offset); cpu_physical_memory_read(tx_addr, s->cplus_txbuffer + s->cplus_txbuffer_offset, txsize); s->cplus_txbuffer_offset += txsize; /* seek to next Rx descriptor */ if (txdw0 & CP_TX_EOR) { s->currCPlusTxDesc = 0; } else { ++s->currCPlusTxDesc; if (s->currCPlusTxDesc >= 64) s->currCPlusTxDesc = 0; } /* transfer ownership to target */ txdw0 &= ~CP_RX_OWN; /* reset error indicator bits */ txdw0 &= ~CP_TX_STATUS_UNF; txdw0 &= ~CP_TX_STATUS_TES; txdw0 &= ~CP_TX_STATUS_OWC; txdw0 &= ~CP_TX_STATUS_LNKF; txdw0 &= ~CP_TX_STATUS_EXC; /* update ring data */ val = cpu_to_le32(txdw0); cpu_physical_memory_write(cplus_tx_ring_desc, (uint8_t *)&val, 4); /* Now decide if descriptor being processed is holding the last segment of packet */ if (txdw0 & CP_TX_LS) { uint8_t dot1q_buffer_space[VLAN_HLEN]; uint16_t *dot1q_buffer; DPRINTF(\"+++ C+ Tx mode : descriptor %d is last segment descriptor\\n\", descriptor); /* can transfer fully assembled packet */ uint8_t *saved_buffer = s->cplus_txbuffer; int saved_size = s->cplus_txbuffer_offset; int saved_buffer_len = s->cplus_txbuffer_len; /* create vlan tag */ if (txdw1 & CP_TX_TAGC) { /* the vlan tag is in BE byte order in the descriptor * BE + le_to_cpu() + ~swap()~ = cpu */ DPRINTF(\"+++ C+ Tx mode : inserting vlan tag with \"\"tci: %u\\n\", bswap16(txdw1 & CP_TX_VLAN_TAG_MASK)); dot1q_buffer = (uint16_t *) dot1q_buffer_space; dot1q_buffer[0] = cpu_to_be16(ETH_P_8021Q); /* BE + le_to_cpu() + ~cpu_to_le()~ = BE */ dot1q_buffer[1] = cpu_to_le16(txdw1 & CP_TX_VLAN_TAG_MASK); } else { dot1q_buffer = NULL; } /* reset the card space to protect from recursive call */ s->cplus_txbuffer = NULL; s->cplus_txbuffer_offset = 0; s->cplus_txbuffer_len = 0; if (txdw0 & (CP_TX_IPCS | CP_TX_UDPCS | CP_TX_TCPCS | CP_TX_LGSEN)) { DPRINTF(\"+++ C+ mode offloaded task checksum\\n\"); /* ip packet header */ ip_header *ip = NULL; int hlen = 0; uint8_t ip_protocol = 0; uint16_t ip_data_len = 0; uint8_t *eth_payload_data = NULL; size_t eth_payload_len = 0; int proto = be16_to_cpu(*(uint16_t *)(saved_buffer + 12)); if (proto == ETH_P_IP) { DPRINTF(\"+++ C+ mode has IP packet\\n\"); /* not aligned */ eth_payload_data = saved_buffer + ETH_HLEN; eth_payload_len = saved_size - ETH_HLEN; ip = (ip_header*)eth_payload_data; if (IP_HEADER_VERSION(ip) != IP_HEADER_VERSION_4) { DPRINTF(\"+++ C+ mode packet has bad IP version %d \" \"expected %d\\n\", IP_HEADER_VERSION(ip), IP_HEADER_VERSION_4); ip = NULL; } else { hlen = IP_HEADER_LENGTH(ip); ip_protocol = ip->ip_p; ip_data_len = be16_to_cpu(ip->ip_len) - hlen; } } if (ip) { if (txdw0 & CP_TX_IPCS) { DPRINTF(\"+++ C+ mode need IP checksum\\n\"); if (hleneth_payload_len) {/* min header length */ /* bad packet header len */ /* or packet too short */ } else { ip->ip_sum = 0; ip->ip_sum = ip_checksum(ip, hlen); DPRINTF(\"+++ C+ mode IP header len=%d checksum=%04x\\n\", hlen, ip->ip_sum); } } if ((txdw0 & CP_TX_LGSEN) && ip_protocol == IP_PROTO_TCP) { #if defined (DEBUG_RTL8139) int large_send_mss = (txdw0 >> 16) & CP_TC_LGSEN_MSS_MASK; #endif DPRINTF(\"+++ C+ mode offloaded task TSO MTU=%d IP data %d \" \"frame data %d specified MSS=%d\\n\", ETH_MTU, ip_data_len, saved_size - ETH_HLEN, large_send_mss); int tcp_send_offset = 0; int send_count = 0; /* maximum IP header length is 60 bytes */ uint8_t saved_ip_header[60]; /* save IP header template; data area is used in tcp checksum calculation */ memcpy(saved_ip_header, eth_payload_data, hlen); /* a placeholder for checksum calculation routine in tcp case */ uint8_t *data_to_checksum = eth_payload_data + hlen - 12; // size_t data_to_checksum_len = eth_payload_len - hlen + 12; /* pointer to TCP header */ tcp_header *p_tcp_hdr = (tcp_header*)(eth_payload_data + hlen); int tcp_hlen = TCP_HEADER_DATA_OFFSET(p_tcp_hdr); /* ETH_MTU = ip header len + tcp header len + payload */ int tcp_data_len = ip_data_len - tcp_hlen; int tcp_chunk_size = ETH_MTU - hlen - tcp_hlen; DPRINTF(\"+++ C+ mode TSO IP data len %d TCP hlen %d TCP \" \"data len %d TCP chunk size %d\\n\", ip_data_len, tcp_hlen, tcp_data_len, tcp_chunk_size); /* note the cycle below overwrites IP header data, but restores it from saved_ip_header before sending packet */ int is_last_frame = 0; for (tcp_send_offset = 0; tcp_send_offset < tcp_data_len; tcp_send_offset += tcp_chunk_size) { uint16_t chunk_size = tcp_chunk_size; /* check if this is the last frame */ if (tcp_send_offset + tcp_chunk_size >= tcp_data_len) { is_last_frame = 1; chunk_size = tcp_data_len - tcp_send_offset; } DPRINTF(\"+++ C+ mode TSO TCP seqno %08x\\n\", be32_to_cpu(p_tcp_hdr->th_seq)); /* add 4 TCP pseudoheader fields */ /* copy IP source and destination fields */ memcpy(data_to_checksum, saved_ip_header + 12, 8); DPRINTF(\"+++ C+ mode TSO calculating TCP checksum for \" \"packet with %d bytes data\\n\", tcp_hlen + chunk_size); if (tcp_send_offset) { memcpy((uint8_t*)p_tcp_hdr + tcp_hlen, (uint8_t*)p_tcp_hdr + tcp_hlen + tcp_send_offset, chunk_size); } /* keep PUSH and FIN flags only for the last frame */ if (!is_last_frame) { TCP_HEADER_CLEAR_FLAGS(p_tcp_hdr, TCP_FLAG_PUSH|TCP_FLAG_FIN); } /* recalculate TCP checksum */ ip_pseudo_header *p_tcpip_hdr = (ip_pseudo_header *)data_to_checksum; p_tcpip_hdr->zeros = 0; p_tcpip_hdr->ip_proto = IP_PROTO_TCP; p_tcpip_hdr->ip_payload = cpu_to_be16(tcp_hlen + chunk_size); p_tcp_hdr->th_sum = 0; int tcp_checksum = ip_checksum(data_to_checksum, tcp_hlen + chunk_size + 12); DPRINTF(\"+++ C+ mode TSO TCP checksum %04x\\n\", tcp_checksum); p_tcp_hdr->th_sum = tcp_checksum; /* restore IP header */ memcpy(eth_payload_data, saved_ip_header, hlen); /* set IP data length and recalculate IP checksum */ ip->ip_len = cpu_to_be16(hlen + tcp_hlen + chunk_size); /* increment IP id for subsequent frames */ ip->ip_id = cpu_to_be16(tcp_send_offset/tcp_chunk_size + be16_to_cpu(ip->ip_id)); ip->ip_sum = 0; ip->ip_sum = ip_checksum(eth_payload_data, hlen); DPRINTF(\"+++ C+ mode TSO IP header len=%d \" \"checksum=%04x\\n\", hlen, ip->ip_sum); int tso_send_size = ETH_HLEN + hlen + tcp_hlen + chunk_size; DPRINTF(\"+++ C+ mode TSO transferring packet size \" \"%d\\n\", tso_send_size); rtl8139_transfer_frame(s, saved_buffer, tso_send_size, 0, (uint8_t *) dot1q_buffer); /* add transferred count to TCP sequence number */ p_tcp_hdr->th_seq = cpu_to_be32(chunk_size + be32_to_cpu(p_tcp_hdr->th_seq)); ++send_count; } /* Stop sending this frame */ saved_size = 0; } else if (txdw0 & (CP_TX_TCPCS|CP_TX_UDPCS)) { DPRINTF(\"+++ C+ mode need TCP or UDP checksum\\n\"); /* maximum IP header length is 60 bytes */ uint8_t saved_ip_header[60]; memcpy(saved_ip_header, eth_payload_data, hlen); uint8_t *data_to_checksum = eth_payload_data + hlen - 12; // size_t data_to_checksum_len = eth_payload_len - hlen + 12; /* add 4 TCP pseudoheader fields */ /* copy IP source and destination fields */ memcpy(data_to_checksum, saved_ip_header + 12, 8); if ((txdw0 & CP_TX_TCPCS) && ip_protocol == IP_PROTO_TCP) { DPRINTF(\"+++ C+ mode calculating TCP checksum for \" \"packet with %d bytes data\\n\", ip_data_len); ip_pseudo_header *p_tcpip_hdr = (ip_pseudo_header *)data_to_checksum; p_tcpip_hdr->zeros = 0; p_tcpip_hdr->ip_proto = IP_PROTO_TCP; p_tcpip_hdr->ip_payload = cpu_to_be16(ip_data_len); tcp_header* p_tcp_hdr = (tcp_header *) (data_to_checksum+12); p_tcp_hdr->th_sum = 0; int tcp_checksum = ip_checksum(data_to_checksum, ip_data_len + 12); DPRINTF(\"+++ C+ mode TCP checksum %04x\\n\", tcp_checksum); p_tcp_hdr->th_sum = tcp_checksum; } else if ((txdw0 & CP_TX_UDPCS) && ip_protocol == IP_PROTO_UDP) { DPRINTF(\"+++ C+ mode calculating UDP checksum for \" \"packet with %d bytes data\\n\", ip_data_len); ip_pseudo_header *p_udpip_hdr = (ip_pseudo_header *)data_to_checksum; p_udpip_hdr->zeros = 0; p_udpip_hdr->ip_proto = IP_PROTO_UDP; p_udpip_hdr->ip_payload = cpu_to_be16(ip_data_len); udp_header *p_udp_hdr = (udp_header *) (data_to_checksum+12); p_udp_hdr->uh_sum = 0; int udp_checksum = ip_checksum(data_to_checksum, ip_data_len + 12); DPRINTF(\"+++ C+ mode UDP checksum %04x\\n\", udp_checksum); p_udp_hdr->uh_sum = udp_checksum; } /* restore IP header */ memcpy(eth_payload_data, saved_ip_header, hlen); } } } /* update tally counter */ ++s->tally_counters.TxOk; DPRINTF(\"+++ C+ mode transmitting %d bytes packet\\n\", saved_size); rtl8139_transfer_frame(s, saved_buffer, saved_size, 1, (uint8_t *) dot1q_buffer); /* restore card space if there was no recursion and reset offset */ if (!s->cplus_txbuffer) { s->cplus_txbuffer = saved_buffer; s->cplus_txbuffer_len = saved_buffer_len; s->cplus_txbuffer_offset = 0; } else { qemu_free(saved_buffer); } } else { DPRINTF(\"+++ C+ mode transmission continue to next descriptor\\n\"); } return 1; }"} {"target": 0, "idx": 19181, "func": "static void object_set_link_property(Object *obj, Visitor *v, void *opaque, const char *name, Error **errp) { Object **child = opaque; bool ambiguous = false; const char *type; char *path; type = object_property_get_type(obj, name, NULL); visit_type_str(v, &path, name, errp); if (*child) { object_unref(*child); } if (strcmp(path, \"\") != 0) { Object *target; target = object_resolve_path(path, &ambiguous); if (target) { gchar *target_type; target_type = g_strdup_printf(\"link<%s>\", object_get_typename(OBJECT(target))); if (strcmp(target_type, type) == 0) { *child = target; object_ref(target); } else { error_set(errp, QERR_INVALID_PARAMETER_TYPE, name, type); } g_free(target_type); } else { error_set(errp, QERR_DEVICE_NOT_FOUND, path); } } else { *child = NULL; } g_free(path); }"} {"target": 0, "idx": 19201, "func": "static int decode_frame_packing_arrangement(H264Context *h) { h->sei_fpa.frame_packing_arrangement_id = get_ue_golomb(&h->gb); h->sei_fpa.frame_packing_arrangement_cancel_flag = get_bits1(&h->gb); h->sei_frame_packing_present = !h->sei_fpa.frame_packing_arrangement_cancel_flag; if (h->sei_frame_packing_present) { h->sei_fpa.frame_packing_arrangement_type = h->frame_packing_arrangement_type = get_bits(&h->gb, 7); h->sei_fpa.quincunx_sampling_flag = h->quincunx_subsampling = get_bits1(&h->gb); h->sei_fpa.content_interpretation_type = h->content_interpretation_type = get_bits(&h->gb, 6); // the following skips: spatial_flipping_flag, frame0_flipped_flag, // field_views_flag, current_frame_is_frame0_flag, // frame0_self_contained_flag, frame1_self_contained_flag skip_bits(&h->gb, 6); if (!h->quincunx_subsampling && h->frame_packing_arrangement_type != 5) skip_bits(&h->gb, 16); // frame[01]_grid_position_[xy] skip_bits(&h->gb, 8); // frame_packing_arrangement_reserved_byte h->sei_fpa.frame_packing_arrangement_repetition_period = get_ue_golomb(&h->gb) /* frame_packing_arrangement_repetition_period */; } skip_bits1(&h->gb); // frame_packing_arrangement_extension_flag if (h->avctx->debug & FF_DEBUG_PICT_INFO) av_log(h->avctx, AV_LOG_DEBUG, \"SEI FPA %d %d %d %d %d %d\\n\", h->sei_fpa.frame_packing_arrangement_id, h->sei_fpa.frame_packing_arrangement_cancel_flag, h->sei_fpa.frame_packing_arrangement_type, h->sei_fpa.quincunx_sampling_flag, h->sei_fpa.content_interpretation_type, h->sei_fpa.frame_packing_arrangement_repetition_period); return 0; }"} {"target": 1, "idx": 19213, "func": "static void do_info_trace(Monitor *mon) { st_print_trace((FILE *)mon, &monitor_fprintf); }"} {"target": 1, "idx": 19226, "func": "static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data, ram_addr_t current_addr, RAMBlock *block, ram_addr_t offset, int cont, bool last_stage) { int encoded_len = 0, bytes_sent = -1; uint8_t *prev_cached_page; if (!cache_is_cached(XBZRLE.cache, current_addr)) { acct_info.xbzrle_cache_miss++; if (!last_stage) { if (cache_insert(XBZRLE.cache, current_addr, *current_data) == -1) { return -1; } else { /* update *current_data when the page has been inserted into cache */ *current_data = get_cached_data(XBZRLE.cache, current_addr); } } return -1; } prev_cached_page = get_cached_data(XBZRLE.cache, current_addr); /* save current buffer into memory */ memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE); /* XBZRLE encoding (if there is no overflow) */ encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE, XBZRLE.encoded_buf, TARGET_PAGE_SIZE); if (encoded_len == 0) { DPRINTF(\"Skipping unmodified page\\n\"); return 0; } else if (encoded_len == -1) { DPRINTF(\"Overflow\\n\"); acct_info.xbzrle_overflows++; /* update data in the cache */ if (!last_stage) { memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE); *current_data = prev_cached_page; } return -1; } /* we need to update the data in the cache, in order to get the same data */ if (!last_stage) { memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE); } /* Send XBZRLE based compressed page */ bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE); qemu_put_byte(f, ENCODING_FLAG_XBZRLE); qemu_put_be16(f, encoded_len); qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len); bytes_sent += encoded_len + 1 + 2; acct_info.xbzrle_pages++; acct_info.xbzrle_bytes += bytes_sent; return bytes_sent; }"} {"target": 1, "idx": 19227, "func": "static void scsi_unmap_complete(void *opaque, int ret) { UnmapCBData *data = opaque; SCSIDiskReq *r = data->r; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint64_t sector_num; uint32_t nb_sectors; r->req.aiocb = NULL; if (ret < 0) { if (scsi_handle_rw_error(r, -ret)) { goto done; } } if (data->count > 0 && !r->req.io_canceled) { sector_num = ldq_be_p(&data->inbuf[0]); nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL; if (sector_num > sector_num + nb_sectors || sector_num + nb_sectors - 1 > s->qdev.max_lba) { scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); goto done; } r->req.aiocb = bdrv_aio_discard(s->qdev.conf.bs, sector_num * (s->qdev.blocksize / 512), nb_sectors * (s->qdev.blocksize / 512), scsi_unmap_complete, data); data->count--; data->inbuf += 16; return; } done: if (data->count == 0) { scsi_req_complete(&r->req, GOOD); } if (!r->req.io_canceled) { scsi_req_unref(&r->req); } g_free(data); }"} {"target": 1, "idx": 19233, "func": "int qcrypto_hash_bytesv(QCryptoHashAlgorithm alg, const struct iovec *iov, size_t niov, uint8_t **result, size_t *resultlen, Error **errp) { #ifdef CONFIG_AF_ALG int ret; ret = qcrypto_hash_afalg_driver.hash_bytesv(alg, iov, niov, result, resultlen, errp); if (ret == 0) { return ret; } /* * TODO: * Maybe we should treat some afalg errors as fatal */ error_free(*errp); #endif return qcrypto_hash_lib_driver.hash_bytesv(alg, iov, niov, result, resultlen, errp); }"} {"target": 1, "idx": 19248, "func": "int net_init_slirp(const Netdev *netdev, const char *name, NetClientState *peer, Error **errp) { /* FIXME error_setg(errp, ...) on failure */ struct slirp_config_str *config; char *vnet; int ret; const NetdevUserOptions *user; const char **dnssearch; bool ipv4 = true, ipv6 = true; assert(netdev->type == NET_CLIENT_DRIVER_USER); user = &netdev->u.user; if ((user->has_ipv6 && user->ipv6 && !user->has_ipv4) || (user->has_ipv4 && !user->ipv4)) { ipv4 = 0; } if ((user->has_ipv4 && user->ipv4 && !user->has_ipv6) || (user->has_ipv6 && !user->ipv6)) { ipv6 = 0; } vnet = user->has_net ? g_strdup(user->net) : user->has_ip ? g_strdup_printf(\"%s/24\", user->ip) : NULL; dnssearch = slirp_dnssearch(user->dnssearch); /* all optional fields are initialized to \"all bits zero\" */ net_init_slirp_configs(user->hostfwd, SLIRP_CFG_HOSTFWD); net_init_slirp_configs(user->guestfwd, 0); ret = net_slirp_init(peer, \"user\", name, user->q_restrict, ipv4, vnet, user->host, ipv6, user->ipv6_prefix, user->ipv6_prefixlen, user->ipv6_host, user->hostname, user->tftp, user->bootfile, user->dhcpstart, user->dns, user->ipv6_dns, user->smb, user->smbserver, dnssearch); while (slirp_configs) { config = slirp_configs; slirp_configs = config->next; g_free(config); } g_free(vnet); g_free(dnssearch); return ret; }"} {"target": 0, "idx": 19250, "func": "av_cold int ff_MPV_encode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; int i, ret; MPV_encode_defaults(s); switch (avctx->codec_id) { case AV_CODEC_ID_MPEG2VIDEO: if (avctx->pix_fmt != AV_PIX_FMT_YUV420P && avctx->pix_fmt != AV_PIX_FMT_YUV422P) { av_log(avctx, AV_LOG_ERROR, \"only YUV420 and YUV422 are supported\\n\"); return -1; } break; case AV_CODEC_ID_MJPEG: case AV_CODEC_ID_AMV: if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P && avctx->pix_fmt != AV_PIX_FMT_YUVJ422P && avctx->pix_fmt != AV_PIX_FMT_YUVJ444P && ((avctx->pix_fmt != AV_PIX_FMT_YUV420P && avctx->pix_fmt != AV_PIX_FMT_YUV422P && avctx->pix_fmt != AV_PIX_FMT_YUV444P) || avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) { av_log(avctx, AV_LOG_ERROR, \"colorspace not supported in jpeg\\n\"); return -1; } break; default: if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) { av_log(avctx, AV_LOG_ERROR, \"only YUV420 is supported\\n\"); return -1; } } switch (avctx->pix_fmt) { case AV_PIX_FMT_YUVJ444P: case AV_PIX_FMT_YUV444P: s->chroma_format = CHROMA_444; break; case AV_PIX_FMT_YUVJ422P: case AV_PIX_FMT_YUV422P: s->chroma_format = CHROMA_422; break; case AV_PIX_FMT_YUVJ420P: case AV_PIX_FMT_YUV420P: default: s->chroma_format = CHROMA_420; break; } s->bit_rate = avctx->bit_rate; s->width = avctx->width; s->height = avctx->height; if (avctx->gop_size > 600 && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { av_log(avctx, AV_LOG_WARNING, \"keyframe interval too large!, reducing it from %d to %d\\n\", avctx->gop_size, 600); avctx->gop_size = 600; } s->gop_size = avctx->gop_size; s->avctx = avctx; s->flags = avctx->flags; s->flags2 = avctx->flags2; if (avctx->max_b_frames > MAX_B_FRAMES) { av_log(avctx, AV_LOG_ERROR, \"Too many B-frames requested, maximum \" \"is %d.\\n\", MAX_B_FRAMES); avctx->max_b_frames = MAX_B_FRAMES; } s->max_b_frames = avctx->max_b_frames; s->codec_id = avctx->codec->id; s->strict_std_compliance = avctx->strict_std_compliance; s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0; s->mpeg_quant = avctx->mpeg_quant; s->rtp_mode = !!avctx->rtp_payload_size; s->intra_dc_precision = avctx->intra_dc_precision; s->user_specified_pts = AV_NOPTS_VALUE; if (s->gop_size <= 1) { s->intra_only = 1; s->gop_size = 12; } else { s->intra_only = 0; } s->me_method = avctx->me_method; /* Fixed QSCALE */ s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE); s->adaptive_quant = (s->avctx->lumi_masking || s->avctx->dark_masking || s->avctx->temporal_cplx_masking || s->avctx->spatial_cplx_masking || s->avctx->p_masking || s->avctx->border_masking || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) && !s->fixed_qscale; s->loop_filter = !!(s->flags & CODEC_FLAG_LOOP_FILTER); if (avctx->rc_max_rate && !avctx->rc_buffer_size) { switch(avctx->codec_id) { case AV_CODEC_ID_MPEG1VIDEO: case AV_CODEC_ID_MPEG2VIDEO: avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112L / 15000000 * 16384; break; case AV_CODEC_ID_MPEG4: case AV_CODEC_ID_MSMPEG4V1: case AV_CODEC_ID_MSMPEG4V2: case AV_CODEC_ID_MSMPEG4V3: if (avctx->rc_max_rate >= 15000000) { avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000L) * (760-320) / (38400000 - 15000000); } else if(avctx->rc_max_rate >= 2000000) { avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000L) * (320- 80) / (15000000 - 2000000); } else if(avctx->rc_max_rate >= 384000) { avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000L) * ( 80- 40) / ( 2000000 - 384000); } else avctx->rc_buffer_size = 40; avctx->rc_buffer_size *= 16384; break; } if (avctx->rc_buffer_size) { av_log(avctx, AV_LOG_INFO, \"Automatically choosing VBV buffer size of %d kbyte\\n\", avctx->rc_buffer_size/8192); } } if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) { av_log(avctx, AV_LOG_ERROR, \"Either both buffer size and max rate or neither must be specified\\n\"); if (avctx->rc_max_rate && !avctx->rc_buffer_size) return -1; } if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) { av_log(avctx, AV_LOG_INFO, \"Warning min_rate > 0 but min_rate != max_rate isn't recommended!\\n\"); } if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) { av_log(avctx, AV_LOG_ERROR, \"bitrate below min bitrate\\n\"); return -1; } if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) { av_log(avctx, AV_LOG_ERROR, \"bitrate above max bitrate\\n\"); return -1; } if (avctx->rc_max_rate && avctx->rc_max_rate == avctx->bit_rate && avctx->rc_max_rate != avctx->rc_min_rate) { av_log(avctx, AV_LOG_INFO, \"impossible bitrate constraints, this will fail\\n\"); } if (avctx->rc_buffer_size && avctx->bit_rate * (int64_t)avctx->time_base.num > avctx->rc_buffer_size * (int64_t)avctx->time_base.den) { av_log(avctx, AV_LOG_ERROR, \"VBV buffer too small for bitrate\\n\"); return -1; } if (!s->fixed_qscale && avctx->bit_rate * av_q2d(avctx->time_base) > avctx->bit_rate_tolerance) { av_log(avctx, AV_LOG_ERROR, \"bitrate tolerance %d too small for bitrate %d\\n\", avctx->bit_rate_tolerance, avctx->bit_rate); return -1; } if (s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate && (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) && 90000LL * (avctx->rc_buffer_size - 1) > s->avctx->rc_max_rate * 0xFFFFLL) { av_log(avctx, AV_LOG_INFO, \"Warning vbv_delay will be set to 0xFFFF (=VBR) as the \" \"specified vbv buffer is too large for the given bitrate!\\n\"); } if ((s->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P && s->codec_id != AV_CODEC_ID_FLV1) { av_log(avctx, AV_LOG_ERROR, \"4MV not supported by codec\\n\"); return -1; } if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) { av_log(avctx, AV_LOG_ERROR, \"OBMC is only supported with simple mb decision\\n\"); return -1; } if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) { av_log(avctx, AV_LOG_ERROR, \"qpel not supported by codec\\n\"); return -1; } if (s->max_b_frames && s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) { av_log(avctx, AV_LOG_ERROR, \"b frames not supported by codec\\n\"); return -1; } if (s->max_b_frames < 0) { av_log(avctx, AV_LOG_ERROR, \"max b frames must be 0 or positive for mpegvideo based encoders\\n\"); return -1; } if ((s->codec_id == AV_CODEC_ID_MPEG4 || s->codec_id == AV_CODEC_ID_H263 || s->codec_id == AV_CODEC_ID_H263P) && (avctx->sample_aspect_ratio.num > 255 || avctx->sample_aspect_ratio.den > 255)) { av_log(avctx, AV_LOG_WARNING, \"Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\\n\", avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den); av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den, avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255); } if ((s->codec_id == AV_CODEC_ID_H263 || s->codec_id == AV_CODEC_ID_H263P) && (avctx->width > 2048 || avctx->height > 1152 )) { av_log(avctx, AV_LOG_ERROR, \"H.263 does not support resolutions above 2048x1152\\n\"); return -1; } if ((s->codec_id == AV_CODEC_ID_H263 || s->codec_id == AV_CODEC_ID_H263P) && ((avctx->width &3) || (avctx->height&3) )) { av_log(avctx, AV_LOG_ERROR, \"w/h must be a multiple of 4\\n\"); return -1; } if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO && (avctx->width > 4095 || avctx->height > 4095 )) { av_log(avctx, AV_LOG_ERROR, \"MPEG-1 does not support resolutions above 4095x4095\\n\"); return -1; } if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && (avctx->width > 16383 || avctx->height > 16383 )) { av_log(avctx, AV_LOG_ERROR, \"MPEG-2 does not support resolutions above 16383x16383\\n\"); return -1; } if (s->codec_id == AV_CODEC_ID_RV10 && (avctx->width &15 || avctx->height&15 )) { av_log(avctx, AV_LOG_ERROR, \"width and height must be a multiple of 16\\n\"); return AVERROR(EINVAL); } if (s->codec_id == AV_CODEC_ID_RV20 && (avctx->width &3 || avctx->height&3 )) { av_log(avctx, AV_LOG_ERROR, \"width and height must be a multiple of 4\\n\"); return AVERROR(EINVAL); } if ((s->codec_id == AV_CODEC_ID_WMV1 || s->codec_id == AV_CODEC_ID_WMV2) && avctx->width & 1) { av_log(avctx, AV_LOG_ERROR, \"width must be multiple of 2\\n\"); return -1; } if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) && s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) { av_log(avctx, AV_LOG_ERROR, \"interlacing not supported by codec\\n\"); return -1; } // FIXME mpeg2 uses that too if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) { av_log(avctx, AV_LOG_ERROR, \"mpeg2 style quantization not supported by codec\\n\"); return -1; } if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) { av_log(avctx, AV_LOG_ERROR, \"CBP RD needs trellis quant\\n\"); return -1; } if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && s->avctx->mb_decision != FF_MB_DECISION_RD) { av_log(avctx, AV_LOG_ERROR, \"QP RD needs mbd=2\\n\"); return -1; } if (s->avctx->scenechange_threshold < 1000000000 && (s->flags & CODEC_FLAG_CLOSED_GOP)) { av_log(avctx, AV_LOG_ERROR, \"closed gop with scene change detection are not supported yet, \" \"set threshold to 1000000000\\n\"); return -1; } if (s->flags & CODEC_FLAG_LOW_DELAY) { if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) { av_log(avctx, AV_LOG_ERROR, \"low delay forcing is only available for mpeg2\\n\"); return -1; } if (s->max_b_frames != 0) { av_log(avctx, AV_LOG_ERROR, \"b frames cannot be used with low delay\\n\"); return -1; } } if (s->q_scale_type == 1) { if (avctx->qmax > 12) { av_log(avctx, AV_LOG_ERROR, \"non linear quant only supports qmax <= 12 currently\\n\"); return -1; } } if (s->avctx->thread_count > 1 && s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->codec_id != AV_CODEC_ID_MPEG2VIDEO && s->codec_id != AV_CODEC_ID_MJPEG && (s->codec_id != AV_CODEC_ID_H263P)) { av_log(avctx, AV_LOG_ERROR, \"multi threaded encoding not supported by codec\\n\"); return -1; } if (s->avctx->thread_count < 1) { av_log(avctx, AV_LOG_ERROR, \"automatic thread number detection not supported by codec, \" \"patch welcome\\n\"); return -1; } if (s->avctx->slices > 1 || s->avctx->thread_count > 1) s->rtp_mode = 1; if (s->avctx->thread_count > 1 && s->codec_id == AV_CODEC_ID_H263P) s->h263_slice_structured = 1; if (!avctx->time_base.den || !avctx->time_base.num) { av_log(avctx, AV_LOG_ERROR, \"framerate not set\\n\"); return -1; } i = (INT_MAX / 2 + 128) >> 8; if (avctx->mb_threshold >= i) { av_log(avctx, AV_LOG_ERROR, \"mb_threshold too large, max is %d\\n\", i - 1); return -1; } if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) { av_log(avctx, AV_LOG_INFO, \"notice: b_frame_strategy only affects the first pass\\n\"); avctx->b_frame_strategy = 0; } i = av_gcd(avctx->time_base.den, avctx->time_base.num); if (i > 1) { av_log(avctx, AV_LOG_INFO, \"removing common factors from framerate\\n\"); avctx->time_base.den /= i; avctx->time_base.num /= i; //return -1; } if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) { // (a + x * 3 / 8) / x s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3); s->inter_quant_bias = 0; } else { s->intra_quant_bias = 0; // (a - x / 4) / x s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2)); } if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) { av_log(avctx, AV_LOG_ERROR, \"qmin and or qmax are invalid, they must be 0 < min <= max\\n\"); return AVERROR(EINVAL); } if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS) s->intra_quant_bias = avctx->intra_quant_bias; if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS) s->inter_quant_bias = avctx->inter_quant_bias; av_log(avctx, AV_LOG_DEBUG, \"intra_quant_bias = %d inter_quant_bias = %d\\n\",s->intra_quant_bias,s->inter_quant_bias); if (avctx->codec_id == AV_CODEC_ID_MPEG4 && s->avctx->time_base.den > (1 << 16) - 1) { av_log(avctx, AV_LOG_ERROR, \"timebase %d/%d not supported by MPEG 4 standard, \" \"the maximum admitted value for the timebase denominator \" \"is %d\\n\", s->avctx->time_base.num, s->avctx->time_base.den, (1 << 16) - 1); return -1; } s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1; switch (avctx->codec->id) { case AV_CODEC_ID_MPEG1VIDEO: s->out_format = FMT_MPEG1; s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY); avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1); break; case AV_CODEC_ID_MPEG2VIDEO: s->out_format = FMT_MPEG1; s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY); avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1); s->rtp_mode = 1; break; case AV_CODEC_ID_MJPEG: case AV_CODEC_ID_AMV: s->out_format = FMT_MJPEG; s->intra_only = 1; /* force intra only for jpeg */ if (!CONFIG_MJPEG_ENCODER || ff_mjpeg_encode_init(s) < 0) return -1; avctx->delay = 0; s->low_delay = 1; break; case AV_CODEC_ID_H261: if (!CONFIG_H261_ENCODER) return -1; if (ff_h261_get_picture_format(s->width, s->height) < 0) { av_log(avctx, AV_LOG_ERROR, \"The specified picture size of %dx%d is not valid for the \" \"H.261 codec.\\nValid sizes are 176x144, 352x288\\n\", s->width, s->height); return -1; } s->out_format = FMT_H261; avctx->delay = 0; s->low_delay = 1; break; case AV_CODEC_ID_H263: if (!CONFIG_H263_ENCODER) return -1; if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format), s->width, s->height) == 8) { av_log(avctx, AV_LOG_ERROR, \"The specified picture size of %dx%d is not valid for \" \"the H.263 codec.\\nValid sizes are 128x96, 176x144, \" \"352x288, 704x576, and 1408x1152. \" \"Try H.263+.\\n\", s->width, s->height); return -1; } s->out_format = FMT_H263; avctx->delay = 0; s->low_delay = 1; break; case AV_CODEC_ID_H263P: s->out_format = FMT_H263; s->h263_plus = 1; /* Fx */ s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0; s->modified_quant = s->h263_aic; s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0; s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus; /* /Fx */ /* These are just to be sure */ avctx->delay = 0; s->low_delay = 1; break; case AV_CODEC_ID_FLV1: s->out_format = FMT_H263; s->h263_flv = 2; /* format = 1; 11-bit codes */ s->unrestricted_mv = 1; s->rtp_mode = 0; /* don't allow GOB */ avctx->delay = 0; s->low_delay = 1; break; case AV_CODEC_ID_RV10: s->out_format = FMT_H263; avctx->delay = 0; s->low_delay = 1; break; case AV_CODEC_ID_RV20: s->out_format = FMT_H263; avctx->delay = 0; s->low_delay = 1; s->modified_quant = 1; s->h263_aic = 1; s->h263_plus = 1; s->loop_filter = 1; s->unrestricted_mv = 0; break; case AV_CODEC_ID_MPEG4: s->out_format = FMT_H263; s->h263_pred = 1; s->unrestricted_mv = 1; s->low_delay = s->max_b_frames ? 0 : 1; avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1); break; case AV_CODEC_ID_MSMPEG4V2: s->out_format = FMT_H263; s->h263_pred = 1; s->unrestricted_mv = 1; s->msmpeg4_version = 2; avctx->delay = 0; s->low_delay = 1; break; case AV_CODEC_ID_MSMPEG4V3: s->out_format = FMT_H263; s->h263_pred = 1; s->unrestricted_mv = 1; s->msmpeg4_version = 3; s->flipflop_rounding = 1; avctx->delay = 0; s->low_delay = 1; break; case AV_CODEC_ID_WMV1: s->out_format = FMT_H263; s->h263_pred = 1; s->unrestricted_mv = 1; s->msmpeg4_version = 4; s->flipflop_rounding = 1; avctx->delay = 0; s->low_delay = 1; break; case AV_CODEC_ID_WMV2: s->out_format = FMT_H263; s->h263_pred = 1; s->unrestricted_mv = 1; s->msmpeg4_version = 5; s->flipflop_rounding = 1; avctx->delay = 0; s->low_delay = 1; break; default: return -1; } avctx->has_b_frames = !s->low_delay; s->encoding = 1; s->progressive_frame = s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME) || s->alternate_scan); /* init */ if (ff_MPV_common_init(s) < 0) return -1; s->avctx->coded_frame = s->current_picture.f; if (s->msmpeg4_version) { FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int), fail); } FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail); FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail); FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail); FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail); FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail); FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail); FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail); FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail); FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail); if (s->avctx->noise_reduction) { FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail); } ff_dct_encode_init(s); if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant) s->chroma_qscale_table = ff_h263_chroma_qscale_table; s->quant_precision = 5; ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp); ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp); if (CONFIG_H261_ENCODER && s->out_format == FMT_H261) ff_h261_encode_init(s); if (CONFIG_H263_ENCODER && s->out_format == FMT_H263) ff_h263_encode_init(s); if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version) ff_msmpeg4_encode_init(s); if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) && s->out_format == FMT_MPEG1) ff_mpeg1_encode_init(s); /* init q matrix */ for (i = 0; i < 64; i++) { int j = s->dsp.idct_permutation[i]; if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 && s->mpeg_quant) { s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i]; s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i]; } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) { s->intra_matrix[j] = s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i]; } else { /* mpeg1/2 */ s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i]; s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i]; } if (s->avctx->intra_matrix) s->intra_matrix[j] = s->avctx->intra_matrix[i]; if (s->avctx->inter_matrix) s->inter_matrix[j] = s->avctx->inter_matrix[i]; } /* precompute matrix */ /* for mjpeg, we do include qscale in the matrix */ if (s->out_format != FMT_MJPEG) { ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16, s->intra_matrix, s->intra_quant_bias, avctx->qmin, 31, 1); ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16, s->inter_matrix, s->inter_quant_bias, avctx->qmin, 31, 0); } if (ff_rate_control_init(s) < 0) return -1; #if FF_API_ERROR_RATE FF_DISABLE_DEPRECATION_WARNINGS if (avctx->error_rate) s->error_rate = avctx->error_rate; FF_ENABLE_DEPRECATION_WARNINGS; #endif if (avctx->b_frame_strategy == 2) { for (i = 0; i < s->max_b_frames + 2; i++) { s->tmp_frames[i] = av_frame_alloc(); if (!s->tmp_frames[i]) return AVERROR(ENOMEM); s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P; s->tmp_frames[i]->width = s->width >> avctx->brd_scale; s->tmp_frames[i]->height = s->height >> avctx->brd_scale; ret = av_frame_get_buffer(s->tmp_frames[i], 32); if (ret < 0) return ret; } } return 0; fail: ff_MPV_encode_end(avctx); return AVERROR_UNKNOWN; }"} {"target": 0, "idx": 19252, "func": "int pcnet_can_receive(NetClientState *nc) { PCNetState *s = qemu_get_nic_opaque(nc); if (CSR_STOP(s) || CSR_SPND(s)) return 0; return sizeof(s->buffer)-16; }"} {"target": 0, "idx": 19264, "func": "void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd, bool vfio_accel) { struct kvm_create_spapr_tce args = { .liobn = liobn, .window_size = window_size, }; long len; int fd; void *table; /* Must set fd to -1 so we don't try to munmap when called for * destroying the table, which the upper layers -will- do */ *pfd = -1; if (!cap_spapr_tce || (vfio_accel && !cap_spapr_vfio)) { return NULL; } fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args); if (fd < 0) { fprintf(stderr, \"KVM: Failed to create TCE table for liobn 0x%x\\n\", liobn); return NULL; } len = (window_size / SPAPR_TCE_PAGE_SIZE) * sizeof(uint64_t); /* FIXME: round this up to page size */ table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (table == MAP_FAILED) { fprintf(stderr, \"KVM: Failed to map TCE table for liobn 0x%x\\n\", liobn); close(fd); return NULL; } *pfd = fd; return table; }"} {"target": 0, "idx": 19278, "func": "static void exynos4210_pmu_write(void *opaque, target_phys_addr_t offset, uint64_t val, unsigned size) { Exynos4210PmuState *s = (Exynos4210PmuState *)opaque; unsigned i; const Exynos4210PmuReg *reg_p = exynos4210_pmu_regs; for (i = 0; i < PMU_NUM_OF_REGISTERS; i++) { if (reg_p->offset == offset) { PRINT_DEBUG_EXTEND(\"%s <0x%04x> <- 0x%04x\\n\", reg_p->name, (uint32_t)offset, (uint32_t)val); s->reg[i] = val; return; } reg_p++; } PRINT_DEBUG(\"QEMU PMU ERROR: bad write offset 0x%04x\\n\", (uint32_t)offset); }"} {"target": 0, "idx": 19281, "func": "static int omap_validate_emiff_addr(struct omap_mpu_state_s *s, target_phys_addr_t addr) { return addr >= OMAP_EMIFF_BASE && addr < OMAP_EMIFF_BASE + s->sdram_size; }"} {"target": 0, "idx": 19282, "func": "static int au_write_trailer(AVFormatContext *s) { AVIOContext *pb = s->pb; int64_t file_size; if (s->pb->seekable) { /* update file size */ file_size = avio_tell(pb); avio_seek(pb, 8, SEEK_SET); avio_wb32(pb, (uint32_t)(file_size - 24)); avio_seek(pb, file_size, SEEK_SET); avio_flush(pb); } return 0; }"} {"target": 0, "idx": 19294, "func": "static const CPUArchIdList *pc_possible_cpu_arch_ids(MachineState *machine) { assert(machine->possible_cpus); return machine->possible_cpus; }"} {"target": 0, "idx": 19325, "func": "static void term_update(void) { int i, delta; if (term_cmd_buf_size != term_last_cmd_buf_size || memcmp(term_cmd_buf, term_last_cmd_buf, term_cmd_buf_size) != 0) { for(i = 0; i < term_last_cmd_buf_index; i++) { term_printf(\"\\033[D\"); } term_cmd_buf[term_cmd_buf_size] = '\\0'; term_printf(\"%s\", term_cmd_buf); term_printf(\"\\033[K\"); memcpy(term_last_cmd_buf, term_cmd_buf, term_cmd_buf_size); term_last_cmd_buf_size = term_cmd_buf_size; term_last_cmd_buf_index = term_cmd_buf_size; } if (term_cmd_buf_index != term_last_cmd_buf_index) { delta = term_cmd_buf_index - term_last_cmd_buf_index; if (delta > 0) { for(i = 0;i < delta; i++) { term_printf(\"\\033[C\"); } } else { delta = -delta; for(i = 0;i < delta; i++) { term_printf(\"\\033[D\"); } } term_last_cmd_buf_index = term_cmd_buf_index; } term_flush(); }"} {"target": 1, "idx": 19357, "func": "static uint32_t qvirtio_pci_config_readl(QVirtioDevice *d, uint64_t off) { QVirtioPCIDevice *dev = (QVirtioPCIDevice *)d; uint32_t value; value = qpci_io_readl(dev->pdev, CONFIG_BASE(dev) + off); if (qvirtio_is_big_endian(d)) { value = bswap32(value); } return value; }"} {"target": 1, "idx": 19361, "func": "static int xa_read_packet(AVFormatContext *s, AVPacket *pkt) { MaxisXADemuxContext *xa = s->priv_data; AVStream *st = s->streams[0]; ByteIOContext *pb = s->pb; unsigned int packet_size; int ret; if(xa->sent_bytes > xa->out_size) return AVERROR(EIO); /* 1 byte header and 14 bytes worth of samples * number channels per block */ packet_size = 15*st->codec->channels; ret = av_get_packet(pb, pkt, packet_size); if(ret != packet_size) return AVERROR(EIO); pkt->stream_index = st->index; xa->sent_bytes += packet_size; pkt->pts = xa->audio_frame_counter; /* 14 bytes Samples per channel with 2 samples per byte */ xa->audio_frame_counter += 28 * st->codec->channels; return ret; }"} {"target": 1, "idx": 19366, "func": "int blk_mig_active(void) { return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list); }"} {"target": 0, "idx": 19371, "func": "static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb) { int superblock_starts[3] = { 0, s->u_superblock_start, s->v_superblock_start }; int bit = 0; int current_superblock = 0; int current_run = 0; int num_partial_superblocks = 0; int i, j; int current_fragment; int plane; if (s->keyframe) { memset(s->superblock_coding, SB_FULLY_CODED, s->superblock_count); } else { /* unpack the list of partially-coded superblocks */ bit = get_bits1(gb) ^ 1; current_run = 0; while (current_superblock < s->superblock_count && get_bits_left(gb) > 0) { if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN) bit = get_bits1(gb); else bit ^= 1; current_run = get_vlc2(gb, s->superblock_run_length_vlc.table, 6, 2) + 1; if (current_run == 34) current_run += get_bits(gb, 12); if (current_superblock + current_run > s->superblock_count) { av_log(s->avctx, AV_LOG_ERROR, \"Invalid partially coded superblock run length\\n\"); return -1; } memset(s->superblock_coding + current_superblock, bit, current_run); current_superblock += current_run; if (bit) num_partial_superblocks += current_run; } /* unpack the list of fully coded superblocks if any of the blocks were * not marked as partially coded in the previous step */ if (num_partial_superblocks < s->superblock_count) { int superblocks_decoded = 0; current_superblock = 0; bit = get_bits1(gb) ^ 1; current_run = 0; while (superblocks_decoded < s->superblock_count - num_partial_superblocks && get_bits_left(gb) > 0) { if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN) bit = get_bits1(gb); else bit ^= 1; current_run = get_vlc2(gb, s->superblock_run_length_vlc.table, 6, 2) + 1; if (current_run == 34) current_run += get_bits(gb, 12); for (j = 0; j < current_run; current_superblock++) { if (current_superblock >= s->superblock_count) { av_log(s->avctx, AV_LOG_ERROR, \"Invalid fully coded superblock run length\\n\"); return -1; } /* skip any superblocks already marked as partially coded */ if (s->superblock_coding[current_superblock] == SB_NOT_CODED) { s->superblock_coding[current_superblock] = 2 * bit; j++; } } superblocks_decoded += current_run; } } /* if there were partial blocks, initialize bitstream for * unpacking fragment codings */ if (num_partial_superblocks) { current_run = 0; bit = get_bits1(gb); /* toggle the bit because as soon as the first run length is * fetched the bit will be toggled again */ bit ^= 1; } } /* figure out which fragments are coded; iterate through each * superblock (all planes) */ s->total_num_coded_frags = 0; memset(s->macroblock_coding, MODE_COPY, s->macroblock_count); for (plane = 0; plane < 3; plane++) { int sb_start = superblock_starts[plane]; int sb_end = sb_start + (plane ? s->c_superblock_count : s->y_superblock_count); int num_coded_frags = 0; for (i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) { /* iterate through all 16 fragments in a superblock */ for (j = 0; j < 16; j++) { /* if the fragment is in bounds, check its coding status */ current_fragment = s->superblock_fragments[i * 16 + j]; if (current_fragment != -1) { int coded = s->superblock_coding[i]; if (s->superblock_coding[i] == SB_PARTIALLY_CODED) { /* fragment may or may not be coded; this is the case * that cares about the fragment coding runs */ if (current_run-- == 0) { bit ^= 1; current_run = get_vlc2(gb, s->fragment_run_length_vlc.table, 5, 2); } coded = bit; } if (coded) { /* default mode; actual mode will be decoded in * the next phase */ s->all_fragments[current_fragment].coding_method = MODE_INTER_NO_MV; s->coded_fragment_list[plane][num_coded_frags++] = current_fragment; } else { /* not coded; copy this fragment from the prior frame */ s->all_fragments[current_fragment].coding_method = MODE_COPY; } } } } s->total_num_coded_frags += num_coded_frags; for (i = 0; i < 64; i++) s->num_coded_frags[plane][i] = num_coded_frags; if (plane < 2) s->coded_fragment_list[plane + 1] = s->coded_fragment_list[plane] + num_coded_frags; } return 0; }"} {"target": 0, "idx": 19372, "func": "static void isapc_machine_options(MachineClass *m) { pc_common_machine_options(m); m->desc = \"ISA-only PC\"; m->max_cpus = 1; }"} {"target": 0, "idx": 19406, "func": "static int init_video_param(AVCodecContext *avctx, QSVEncContext *q) { float quant; int ret; ret = ff_qsv_codec_id_to_mfx(avctx->codec_id); if (ret < 0) return AVERROR_BUG; q->param.mfx.CodecId = ret; q->width_align = avctx->codec_id == AV_CODEC_ID_HEVC ? 32 : 16; if (avctx->level > 0) q->param.mfx.CodecLevel = avctx->level; q->param.mfx.CodecProfile = q->profile; q->param.mfx.TargetUsage = q->preset; q->param.mfx.GopPicSize = FFMAX(0, avctx->gop_size); q->param.mfx.GopRefDist = FFMAX(-1, avctx->max_b_frames) + 1; q->param.mfx.GopOptFlag = avctx->flags & AV_CODEC_FLAG_CLOSED_GOP ? MFX_GOP_CLOSED : 0; q->param.mfx.IdrInterval = q->idr_interval; q->param.mfx.NumSlice = avctx->slices; q->param.mfx.NumRefFrame = FFMAX(0, avctx->refs); q->param.mfx.EncodedOrder = 0; q->param.mfx.BufferSizeInKB = 0; q->param.mfx.FrameInfo.FourCC = MFX_FOURCC_NV12; q->param.mfx.FrameInfo.Width = FFALIGN(avctx->width, q->width_align); q->param.mfx.FrameInfo.Height = FFALIGN(avctx->height, 32); q->param.mfx.FrameInfo.CropX = 0; q->param.mfx.FrameInfo.CropY = 0; q->param.mfx.FrameInfo.CropW = avctx->width; q->param.mfx.FrameInfo.CropH = avctx->height; q->param.mfx.FrameInfo.AspectRatioW = avctx->sample_aspect_ratio.num; q->param.mfx.FrameInfo.AspectRatioH = avctx->sample_aspect_ratio.den; q->param.mfx.FrameInfo.PicStruct = MFX_PICSTRUCT_PROGRESSIVE; q->param.mfx.FrameInfo.ChromaFormat = MFX_CHROMAFORMAT_YUV420; q->param.mfx.FrameInfo.BitDepthLuma = 8; q->param.mfx.FrameInfo.BitDepthChroma = 8; if (avctx->framerate.den > 0 && avctx->framerate.num > 0) { q->param.mfx.FrameInfo.FrameRateExtN = avctx->framerate.num; q->param.mfx.FrameInfo.FrameRateExtD = avctx->framerate.den; } else { q->param.mfx.FrameInfo.FrameRateExtN = avctx->time_base.den; q->param.mfx.FrameInfo.FrameRateExtD = avctx->time_base.num; } ret = select_rc_mode(avctx, q); if (ret < 0) return ret; switch (q->param.mfx.RateControlMethod) { case MFX_RATECONTROL_CBR: case MFX_RATECONTROL_VBR: #if QSV_HAVE_VCM case MFX_RATECONTROL_VCM: #endif q->param.mfx.InitialDelayInKB = avctx->rc_initial_buffer_occupancy / 1000; q->param.mfx.TargetKbps = avctx->bit_rate / 1000; q->param.mfx.MaxKbps = avctx->rc_max_rate / 1000; break; case MFX_RATECONTROL_CQP: quant = avctx->global_quality / FF_QP2LAMBDA; q->param.mfx.QPI = av_clip(quant * fabs(avctx->i_quant_factor) + avctx->i_quant_offset, 0, 51); q->param.mfx.QPP = av_clip(quant, 0, 51); q->param.mfx.QPB = av_clip(quant * fabs(avctx->b_quant_factor) + avctx->b_quant_offset, 0, 51); break; case MFX_RATECONTROL_AVBR: q->param.mfx.TargetKbps = avctx->bit_rate / 1000; q->param.mfx.Convergence = q->avbr_convergence; q->param.mfx.Accuracy = q->avbr_accuracy; break; #if QSV_HAVE_LA case MFX_RATECONTROL_LA: q->param.mfx.TargetKbps = avctx->bit_rate / 1000; q->extco2.LookAheadDepth = q->la_depth; break; #if QSV_HAVE_ICQ case MFX_RATECONTROL_LA_ICQ: q->extco2.LookAheadDepth = q->la_depth; case MFX_RATECONTROL_ICQ: q->param.mfx.ICQQuality = avctx->global_quality; break; #endif #endif } // the HEVC encoder plugin currently fails if coding options // are provided if (avctx->codec_id != AV_CODEC_ID_HEVC) { q->extco.Header.BufferId = MFX_EXTBUFF_CODING_OPTION; q->extco.Header.BufferSz = sizeof(q->extco); q->extco.CAVLC = avctx->coder_type == FF_CODER_TYPE_VLC ? MFX_CODINGOPTION_ON : MFX_CODINGOPTION_UNKNOWN; if (q->rdo >= 0) q->extco.RateDistortionOpt = q->rdo > 0 ? MFX_CODINGOPTION_ON : MFX_CODINGOPTION_OFF; if (avctx->codec_id == AV_CODEC_ID_H264) { if (avctx->strict_std_compliance != FF_COMPLIANCE_NORMAL) q->extco.NalHrdConformance = avctx->strict_std_compliance > FF_COMPLIANCE_NORMAL ? MFX_CODINGOPTION_ON : MFX_CODINGOPTION_OFF; if (q->single_sei_nal_unit >= 0) q->extco.SingleSeiNalUnit = q->single_sei_nal_unit ? MFX_CODINGOPTION_ON : MFX_CODINGOPTION_OFF; if (q->recovery_point_sei >= 0) q->extco.RecoveryPointSEI = q->recovery_point_sei ? MFX_CODINGOPTION_ON : MFX_CODINGOPTION_OFF; q->extco.MaxDecFrameBuffering = q->max_dec_frame_buffering; } q->extparam_internal[q->nb_extparam_internal++] = (mfxExtBuffer *)&q->extco; #if QSV_HAVE_CO2 if (avctx->codec_id == AV_CODEC_ID_H264) { q->extco2.Header.BufferId = MFX_EXTBUFF_CODING_OPTION2; q->extco2.Header.BufferSz = sizeof(q->extco2); if (q->int_ref_type >= 0) q->extco2.IntRefType = q->int_ref_type; if (q->int_ref_cycle_size >= 0) q->extco2.IntRefCycleSize = q->int_ref_cycle_size; if (q->int_ref_qp_delta != INT16_MIN) q->extco2.IntRefQPDelta = q->int_ref_qp_delta; if (q->bitrate_limit >= 0) q->extco2.BitrateLimit = q->bitrate_limit ? MFX_CODINGOPTION_ON : MFX_CODINGOPTION_OFF; if (q->mbbrc >= 0) q->extco2.MBBRC = q->mbbrc ? MFX_CODINGOPTION_ON : MFX_CODINGOPTION_OFF; if (q->extbrc >= 0) q->extco2.ExtBRC = q->extbrc ? MFX_CODINGOPTION_ON : MFX_CODINGOPTION_OFF; if (q->max_frame_size >= 0) q->extco2.MaxFrameSize = q->max_frame_size; #if QSV_HAVE_MAX_SLICE_SIZE if (q->max_slice_size >= 0) q->extco2.MaxSliceSize = q->max_slice_size; #endif #if QSV_HAVE_TRELLIS q->extco2.Trellis = q->trellis; #endif #if QSV_HAVE_BREF_TYPE if (avctx->b_frame_strategy >= 0) q->extco2.BRefType = avctx->b_frame_strategy ? MFX_B_REF_PYRAMID : MFX_B_REF_OFF; if (q->adaptive_i >= 0) q->extco2.AdaptiveI = q->adaptive_i ? MFX_CODINGOPTION_ON : MFX_CODINGOPTION_OFF; if (q->adaptive_b >= 0) q->extco2.AdaptiveB = q->adaptive_b ? MFX_CODINGOPTION_ON : MFX_CODINGOPTION_OFF; #endif q->extparam_internal[q->nb_extparam_internal++] = (mfxExtBuffer *)&q->extco2; } #endif } if (!rc_supported(q)) { av_log(avctx, AV_LOG_ERROR, \"Selected ratecontrol mode is not supported by the QSV \" \"runtime. Choose a different mode.\\n\"); return AVERROR(ENOSYS); } return 0; }"} {"target": 1, "idx": 19414, "func": "static void lag_pred_line(LagarithContext *l, uint8_t *buf, int width, int stride, int line) { int L, TL; /* Left pixel is actually prev_row[width] */ L = buf[width - stride - 1]; if (!line) { /* Left prediction only for first line */ L = l->dsp.add_hfyu_left_prediction(buf + 1, buf + 1, width - 1, buf[0]); return; } else if (line == 1) { /* Second line, left predict first pixel, the rest of the line is median predicted * NOTE: In the case of RGB this pixel is top predicted */ TL = l->avctx->pix_fmt == PIX_FMT_YUV420P ? buf[-stride] : L; } else { /* Top left is 2 rows back, last pixel */ TL = buf[width - (2 * stride) - 1]; } add_lag_median_prediction(buf, buf - stride, buf, width, &L, &TL); }"} {"target": 1, "idx": 19423, "func": "static inline AIOReq *alloc_aio_req(BDRVSheepdogState *s, SheepdogAIOCB *acb, uint64_t oid, unsigned int data_len, uint64_t offset, uint8_t flags, uint64_t base_oid, unsigned int iov_offset) { AIOReq *aio_req; aio_req = g_malloc(sizeof(*aio_req)); aio_req->aiocb = acb; aio_req->iov_offset = iov_offset; aio_req->oid = oid; aio_req->base_oid = base_oid; aio_req->offset = offset; aio_req->data_len = data_len; aio_req->flags = flags; aio_req->id = s->aioreq_seq_num++; acb->nr_pending++; return aio_req; }"} {"target": 1, "idx": 19435, "func": "static void ff_acelp_interpolatef_mips(float *out, const float *in, const float *filter_coeffs, int precision, int frac_pos, int filter_length, int length) { int n, i; int prec = precision * 4; int fc_offset = precision - frac_pos; float in_val_p, in_val_m, fc_val_p, fc_val_m; for (n = 0; n < length; n++) { /** * four pointers are defined in order to minimize number of * computations done in inner loop */ const float *p_in_p = &in[n]; const float *p_in_m = &in[n-1]; const float *p_filter_coeffs_p = &filter_coeffs[frac_pos]; const float *p_filter_coeffs_m = filter_coeffs + fc_offset; float v = 0; for (i = 0; i < filter_length;i++) { __asm__ volatile ( \"lwc1 %[in_val_p], 0(%[p_in_p]) \\n\\t\" \"lwc1 %[fc_val_p], 0(%[p_filter_coeffs_p]) \\n\\t\" \"lwc1 %[in_val_m], 0(%[p_in_m]) \\n\\t\" \"lwc1 %[fc_val_m], 0(%[p_filter_coeffs_m]) \\n\\t\" \"addiu %[p_in_p], %[p_in_p], 4 \\n\\t\" \"madd.s %[v],%[v], %[in_val_p],%[fc_val_p] \\n\\t\" \"addiu %[p_in_m], %[p_in_m], -4 \\n\\t\" \"addu %[p_filter_coeffs_p], %[p_filter_coeffs_p], %[prec] \\n\\t\" \"addu %[p_filter_coeffs_m], %[p_filter_coeffs_m], %[prec] \\n\\t\" \"madd.s %[v],%[v],%[in_val_m], %[fc_val_m] \\n\\t\" : [v] \"=&f\" (v),[p_in_p] \"+r\" (p_in_p), [p_in_m] \"+r\" (p_in_m), [p_filter_coeffs_p] \"+r\" (p_filter_coeffs_p), [in_val_p] \"=&f\" (in_val_p), [in_val_m] \"=&f\" (in_val_m), [fc_val_p] \"=&f\" (fc_val_p), [fc_val_m] \"=&f\" (fc_val_m), [p_filter_coeffs_m] \"+r\" (p_filter_coeffs_m) : [prec] \"r\" (prec) : \"memory\" ); } out[n] = v; } }"} {"target": 1, "idx": 19462, "func": "static int mpegts_push_data(MpegTSFilter *filter, const uint8_t *buf, int buf_size, int is_start, int64_t pos) { PESContext *pes = filter->u.pes_filter.opaque; MpegTSContext *ts = pes->ts; const uint8_t *p; int len, code; if(!ts->pkt) return 0; if (is_start) { if (pes->state == MPEGTS_PAYLOAD && pes->data_index > 0) { new_pes_packet(pes, ts->pkt); ts->stop_parse = 1; } pes->state = MPEGTS_HEADER; pes->data_index = 0; pes->ts_packet_pos = pos; } p = buf; while (buf_size > 0) { switch(pes->state) { case MPEGTS_HEADER: len = PES_START_SIZE - pes->data_index; if (len > buf_size) len = buf_size; memcpy(pes->header + pes->data_index, p, len); pes->data_index += len; p += len; buf_size -= len; if (pes->data_index == PES_START_SIZE) { /* we got all the PES or section header. We can now decide */ if (pes->header[0] == 0x00 && pes->header[1] == 0x00 && pes->header[2] == 0x01) { /* it must be an mpeg2 PES stream */ code = pes->header[3] | 0x100; av_dlog(pes->stream, \"pid=%x pes_code=%#x\\n\", pes->pid, code); if ((pes->st && pes->st->discard == AVDISCARD_ALL) || code == 0x1be) /* padding_stream */ goto skip; /* stream not present in PMT */ if (!pes->st) { pes->st = av_new_stream(ts->stream, pes->pid); if (!pes->st) return AVERROR(ENOMEM); mpegts_set_stream_info(pes->st, pes, 0, 0); } pes->total_size = AV_RB16(pes->header + 4); /* NOTE: a zero total size means the PES size is unbounded */ if (!pes->total_size) pes->total_size = MAX_PES_PAYLOAD; /* allocate pes buffer */ pes->buffer = av_malloc(pes->total_size+FF_INPUT_BUFFER_PADDING_SIZE); if (!pes->buffer) return AVERROR(ENOMEM); if (code != 0x1bc && code != 0x1bf && /* program_stream_map, private_stream_2 */ code != 0x1f0 && code != 0x1f1 && /* ECM, EMM */ code != 0x1ff && code != 0x1f2 && /* program_stream_directory, DSMCC_stream */ code != 0x1f8) { /* ITU-T Rec. H.222.1 type E stream */ pes->state = MPEGTS_PESHEADER; if (pes->st->codec->codec_id == CODEC_ID_NONE) { av_dlog(pes->stream, \"pid=%x stream_type=%x probing\\n\", pes->pid, pes->stream_type); pes->st->codec->codec_id = CODEC_ID_PROBE; } } else { pes->state = MPEGTS_PAYLOAD; pes->data_index = 0; } } else { /* otherwise, it should be a table */ /* skip packet */ skip: pes->state = MPEGTS_SKIP; continue; } } break; /**********************************************/ /* PES packing parsing */ case MPEGTS_PESHEADER: len = PES_HEADER_SIZE - pes->data_index; if (len < 0) return -1; if (len > buf_size) len = buf_size; memcpy(pes->header + pes->data_index, p, len); pes->data_index += len; p += len; buf_size -= len; if (pes->data_index == PES_HEADER_SIZE) { pes->pes_header_size = pes->header[8] + 9; pes->state = MPEGTS_PESHEADER_FILL; } break; case MPEGTS_PESHEADER_FILL: len = pes->pes_header_size - pes->data_index; if (len < 0) return -1; if (len > buf_size) len = buf_size; memcpy(pes->header + pes->data_index, p, len); pes->data_index += len; p += len; buf_size -= len; if (pes->data_index == pes->pes_header_size) { const uint8_t *r; unsigned int flags, pes_ext, skip; flags = pes->header[7]; r = pes->header + 9; pes->pts = AV_NOPTS_VALUE; pes->dts = AV_NOPTS_VALUE; if ((flags & 0xc0) == 0x80) { pes->dts = pes->pts = ff_parse_pes_pts(r); r += 5; } else if ((flags & 0xc0) == 0xc0) { pes->pts = ff_parse_pes_pts(r); r += 5; pes->dts = ff_parse_pes_pts(r); r += 5; } pes->extended_stream_id = -1; if (flags & 0x01) { /* PES extension */ pes_ext = *r++; /* Skip PES private data, program packet sequence counter and P-STD buffer */ skip = (pes_ext >> 4) & 0xb; skip += skip & 0x9; r += skip; if ((pes_ext & 0x41) == 0x01 && (r + 2) <= (pes->header + pes->pes_header_size)) { /* PES extension 2 */ if ((r[0] & 0x7f) > 0 && (r[1] & 0x80) == 0) pes->extended_stream_id = r[1]; } } /* we got the full header. We parse it and get the payload */ pes->state = MPEGTS_PAYLOAD; pes->data_index = 0; } break; case MPEGTS_PAYLOAD: if (buf_size > 0 && pes->buffer) { if (pes->data_index > 0 && pes->data_index+buf_size > pes->total_size) { new_pes_packet(pes, ts->pkt); pes->total_size = MAX_PES_PAYLOAD; pes->buffer = av_malloc(pes->total_size+FF_INPUT_BUFFER_PADDING_SIZE); if (!pes->buffer) return AVERROR(ENOMEM); ts->stop_parse = 1; } else if (pes->data_index == 0 && buf_size > pes->total_size) { // pes packet size is < ts size packet and pes data is padded with 0xff // not sure if this is legal in ts but see issue #2392 buf_size = pes->total_size; pes->flags |= AV_PKT_FLAG_CORRUPT; } memcpy(pes->buffer+pes->data_index, p, buf_size); pes->data_index += buf_size; } buf_size = 0; /* emit complete packets with known packet size * decreases demuxer delay for infrequent packets like subtitles from * a couple of seconds to milliseconds for properly muxed files. * total_size is the number of bytes following pes_packet_length * in the pes header, i.e. not counting the first 6 bytes */ if (!ts->stop_parse && pes->total_size < MAX_PES_PAYLOAD && pes->pes_header_size + pes->data_index == pes->total_size + 6) { ts->stop_parse = 1; new_pes_packet(pes, ts->pkt); } break; case MPEGTS_SKIP: buf_size = 0; break; } } return 0; }"} {"target": 0, "idx": 19472, "func": "static void mov_build_index(MOVContext *mov, AVStream *st) { MOVStreamContext *sc = st->priv_data; int64_t current_offset; int64_t current_dts = 0; unsigned int stts_index = 0; unsigned int stsc_index = 0; unsigned int stss_index = 0; unsigned int stps_index = 0; unsigned int i, j; uint64_t stream_size = 0; /* adjust first dts according to edit list */ if (sc->time_offset && mov->time_scale > 0) { if (sc->time_offset < 0) sc->time_offset = av_rescale(sc->time_offset, sc->time_scale, mov->time_scale); current_dts = -sc->time_offset; if (sc->ctts_data && sc->stts_data && sc->stts_data[0].duration && sc->ctts_data[0].duration / sc->stts_data[0].duration > 16) { /* more than 16 frames delay, dts are likely wrong this happens with files created by iMovie */ sc->wrong_dts = 1; st->internal->avctx->has_b_frames = 1; } } /* only use old uncompressed audio chunk demuxing when stts specifies it */ if (!(st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && sc->stts_count == 1 && sc->stts_data[0].duration == 1)) { unsigned int current_sample = 0; unsigned int stts_sample = 0; unsigned int sample_size; unsigned int distance = 0; unsigned int rap_group_index = 0; unsigned int rap_group_sample = 0; int rap_group_present = sc->rap_group_count && sc->rap_group; int key_off = (sc->keyframes && sc->keyframes[0] > 0) || (sc->stps_data && sc->stps_data[0] > 0); current_dts -= sc->dts_shift; if (!sc->sample_count) return; if (sc->sample_count >= UINT_MAX / sizeof(*st->index_entries) - st->nb_index_entries) return; if (av_reallocp_array(&st->index_entries, st->nb_index_entries + sc->sample_count, sizeof(*st->index_entries)) < 0) { st->nb_index_entries = 0; return; } st->index_entries_allocated_size = (st->nb_index_entries + sc->sample_count) * sizeof(*st->index_entries); for (i = 0; i < sc->chunk_count; i++) { current_offset = sc->chunk_offsets[i]; while (mov_stsc_index_valid(stsc_index, sc->stsc_count) && i + 1 == sc->stsc_data[stsc_index + 1].first) stsc_index++; for (j = 0; j < sc->stsc_data[stsc_index].count; j++) { int keyframe = 0; if (current_sample >= sc->sample_count) { av_log(mov->fc, AV_LOG_ERROR, \"wrong sample count\\n\"); return; } if (!sc->keyframe_absent && (!sc->keyframe_count || current_sample+key_off == sc->keyframes[stss_index])) { keyframe = 1; if (stss_index + 1 < sc->keyframe_count) stss_index++; } else if (sc->stps_count && current_sample+key_off == sc->stps_data[stps_index]) { keyframe = 1; if (stps_index + 1 < sc->stps_count) stps_index++; } if (rap_group_present && rap_group_index < sc->rap_group_count) { if (sc->rap_group[rap_group_index].index > 0) keyframe = 1; if (++rap_group_sample == sc->rap_group[rap_group_index].count) { rap_group_sample = 0; rap_group_index++; } } if (keyframe) distance = 0; sample_size = sc->sample_size > 0 ? sc->sample_size : sc->sample_sizes[current_sample]; if (sc->pseudo_stream_id == -1 || sc->stsc_data[stsc_index].id - 1 == sc->pseudo_stream_id) { AVIndexEntry *e = &st->index_entries[st->nb_index_entries++]; e->pos = current_offset; e->timestamp = current_dts; e->size = sample_size; e->min_distance = distance; e->flags = keyframe ? AVINDEX_KEYFRAME : 0; av_log(mov->fc, AV_LOG_TRACE, \"AVIndex stream %d, sample %d, offset %\"PRIx64\", dts %\"PRId64\", \" \"size %d, distance %d, keyframe %d\\n\", st->index, current_sample, current_offset, current_dts, sample_size, distance, keyframe); } current_offset += sample_size; stream_size += sample_size; current_dts += sc->stts_data[stts_index].duration; distance++; stts_sample++; current_sample++; if (stts_index + 1 < sc->stts_count && stts_sample == sc->stts_data[stts_index].count) { stts_sample = 0; stts_index++; } } } if (st->duration > 0) st->codecpar->bit_rate = stream_size*8*sc->time_scale/st->duration; } else { unsigned chunk_samples, total = 0; // compute total chunk count for (i = 0; i < sc->stsc_count; i++) { unsigned count, chunk_count; chunk_samples = sc->stsc_data[i].count; if (i != sc->stsc_count - 1 && sc->samples_per_frame && chunk_samples % sc->samples_per_frame) { av_log(mov->fc, AV_LOG_ERROR, \"error unaligned chunk\\n\"); return; } if (sc->samples_per_frame >= 160) { // gsm count = chunk_samples / sc->samples_per_frame; } else if (sc->samples_per_frame > 1) { unsigned samples = (1024/sc->samples_per_frame)*sc->samples_per_frame; count = (chunk_samples+samples-1) / samples; } else { count = (chunk_samples+1023) / 1024; } if (mov_stsc_index_valid(i, sc->stsc_count)) chunk_count = sc->stsc_data[i+1].first - sc->stsc_data[i].first; else chunk_count = sc->chunk_count - (sc->stsc_data[i].first - 1); total += chunk_count * count; } av_log(mov->fc, AV_LOG_TRACE, \"chunk count %d\\n\", total); if (total >= UINT_MAX / sizeof(*st->index_entries) - st->nb_index_entries) return; if (av_reallocp_array(&st->index_entries, st->nb_index_entries + total, sizeof(*st->index_entries)) < 0) { st->nb_index_entries = 0; return; } st->index_entries_allocated_size = (st->nb_index_entries + total) * sizeof(*st->index_entries); // populate index for (i = 0; i < sc->chunk_count; i++) { current_offset = sc->chunk_offsets[i]; if (mov_stsc_index_valid(stsc_index, sc->stsc_count) && i + 1 == sc->stsc_data[stsc_index + 1].first) stsc_index++; chunk_samples = sc->stsc_data[stsc_index].count; while (chunk_samples > 0) { AVIndexEntry *e; unsigned size, samples; if (sc->samples_per_frame > 1 && !sc->bytes_per_frame) { avpriv_request_sample(mov->fc, \"Zero bytes per frame, but %d samples per frame\", sc->samples_per_frame); return; } if (sc->samples_per_frame >= 160) { // gsm samples = sc->samples_per_frame; size = sc->bytes_per_frame; } else { if (sc->samples_per_frame > 1) { samples = FFMIN((1024 / sc->samples_per_frame)* sc->samples_per_frame, chunk_samples); size = (samples / sc->samples_per_frame) * sc->bytes_per_frame; } else { samples = FFMIN(1024, chunk_samples); size = samples * sc->sample_size; } } if (st->nb_index_entries >= total) { av_log(mov->fc, AV_LOG_ERROR, \"wrong chunk count %d\\n\", total); return; } e = &st->index_entries[st->nb_index_entries++]; e->pos = current_offset; e->timestamp = current_dts; e->size = size; e->min_distance = 0; e->flags = AVINDEX_KEYFRAME; av_log(mov->fc, AV_LOG_TRACE, \"AVIndex stream %d, chunk %d, offset %\"PRIx64\", dts %\"PRId64\", \" \"size %d, duration %d\\n\", st->index, i, current_offset, current_dts, size, samples); current_offset += size; current_dts += samples; chunk_samples -= samples; } } } }"} {"target": 0, "idx": 19476, "func": "static int32_t scsi_target_send_command(SCSIRequest *req, uint8_t *buf) { SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); switch (buf[0]) { case REPORT_LUNS: if (!scsi_target_emulate_report_luns(r)) { goto illegal_request; } break; case INQUIRY: if (!scsi_target_emulate_inquiry(r)) { goto illegal_request; } break; case REQUEST_SENSE: scsi_target_alloc_buf(&r->req, scsi_sense_len(req)); r->len = scsi_device_get_sense(r->req.dev, r->buf, MIN(req->cmd.xfer, r->buf_len), (req->cmd.buf[1] & 1) == 0); if (r->req.dev->sense_is_ua) { scsi_device_unit_attention_reported(req->dev); r->req.dev->sense_len = 0; r->req.dev->sense_is_ua = false; } break; case TEST_UNIT_READY: break; default: scsi_req_build_sense(req, SENSE_CODE(LUN_NOT_SUPPORTED)); scsi_req_complete(req, CHECK_CONDITION); return 0; illegal_request: scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD)); scsi_req_complete(req, CHECK_CONDITION); return 0; } if (!r->len) { scsi_req_complete(req, GOOD); } return r->len; }"} {"target": 0, "idx": 19479, "func": "static inline int onenand_load_spare(OneNANDState *s, int sec, int secn, void *dest) { uint8_t buf[512]; if (s->bdrv_cur) { if (bdrv_read(s->bdrv_cur, s->secs_cur + (sec >> 5), buf, 1) < 0) return 1; memcpy(dest, buf + ((sec & 31) << 4), secn << 4); } else if (sec + secn > s->secs_cur) return 1; else memcpy(dest, s->current + (s->secs_cur << 9) + (sec << 4), secn << 4); return 0; }"} {"target": 0, "idx": 19480, "func": "static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2) { switch (fccno) { case 0: gen_helper_fcmps(cpu_env, r_rs1, r_rs2); break; case 1: gen_helper_fcmps_fcc1(cpu_env, r_rs1, r_rs2); break; case 2: gen_helper_fcmps_fcc2(cpu_env, r_rs1, r_rs2); break; case 3: gen_helper_fcmps_fcc3(cpu_env, r_rs1, r_rs2); break; } }"} {"target": 0, "idx": 19489, "func": "static void tpm_tis_receive_cb(TPMState *s, uint8_t locty) { TPMTISEmuState *tis = &s->s.tis; assert(s->locty_number == locty); qemu_bh_schedule(tis->bh); }"} {"target": 0, "idx": 19504, "func": "void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len, int is_write, target_phys_addr_t access_len) { if (buffer != bounce.buffer) { if (is_write) { ram_addr_t addr1 = qemu_ram_addr_from_host(buffer); while (access_len) { unsigned l; l = TARGET_PAGE_SIZE; if (l > access_len) l = access_len; if (!cpu_physical_memory_is_dirty(addr1)) { /* invalidate code */ tb_invalidate_phys_page_range(addr1, addr1 + l, 0); /* set dirty bit */ phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= (0xff & ~CODE_DIRTY_FLAG); } addr1 += l; access_len -= l; } } return; } if (is_write) { cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len); } qemu_vfree(bounce.buffer); bounce.buffer = NULL; cpu_notify_map_clients(); }"} {"target": 0, "idx": 19520, "func": "void qdev_prop_set_defaults(DeviceState *dev, Property *props) { if (!props) return; while (props->name) { if (props->defval) { qdev_prop_cpy(dev, props, props->defval); } props++; } }"} {"target": 0, "idx": 19523, "func": "static inline void gen_op_evabs(TCGv_i32 ret, TCGv_i32 arg1) { int l1 = gen_new_label(); int l2 = gen_new_label(); tcg_gen_brcondi_i32(TCG_COND_GE, arg1, 0, l1); tcg_gen_neg_i32(ret, arg1); tcg_gen_br(l2); gen_set_label(l1); tcg_gen_mov_i32(ret, arg1); gen_set_label(l2); }"} {"target": 0, "idx": 19525, "func": "static void video_audio_display(VideoState *s) { int i, i_start, x, y1, y, ys, delay, n, nb_display_channels; int ch, channels, h, h2, bgcolor, fgcolor; int16_t time_diff; int rdft_bits, nb_freq; for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++) ; nb_freq = 1 << (rdft_bits - 1); /* compute display index : center on currently output samples */ channels = s->audio_st->codec->channels; nb_display_channels = channels; if (!s->paused) { int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq); n = 2 * channels; delay = audio_write_get_buf_size(s); delay /= n; /* to be more precise, we take into account the time spent since the last buffer computation */ if (audio_callback_time) { time_diff = av_gettime() - audio_callback_time; delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000; } delay += 2 * data_used; if (delay < data_used) delay = data_used; i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE); if (s->show_audio == 1) { h = INT_MIN; for (i = 0; i < 1000; i += channels) { int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE; int a = s->sample_array[idx]; int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE]; int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE]; int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE]; int score = a - d; if (h < score && (b ^ c) < 0) { h = score; i_start = idx; } } } s->last_i_start = i_start; } else { i_start = s->last_i_start; } bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00); if (s->show_audio == 1) { fill_rectangle(screen, s->xleft, s->ytop, s->width, s->height, bgcolor); fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff); /* total height for one channel */ h = s->height / nb_display_channels; /* graph height / 2 */ h2 = (h * 9) / 20; for (ch = 0; ch < nb_display_channels; ch++) { i = i_start + ch; y1 = s->ytop + ch * h + (h / 2); /* position of center line */ for (x = 0; x < s->width; x++) { y = (s->sample_array[i] * h2) >> 15; if (y < 0) { y = -y; ys = y1 - y; } else { ys = y1; } fill_rectangle(screen, s->xleft + x, ys, 1, y, fgcolor); i += channels; if (i >= SAMPLE_ARRAY_SIZE) i -= SAMPLE_ARRAY_SIZE; } } fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff); for (ch = 1; ch < nb_display_channels; ch++) { y = s->ytop + ch * h; fill_rectangle(screen, s->xleft, y, s->width, 1, fgcolor); } SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height); } else { nb_display_channels= FFMIN(nb_display_channels, 2); if (rdft_bits != s->rdft_bits) { av_rdft_end(s->rdft); av_free(s->rdft_data); s->rdft = av_rdft_init(rdft_bits, DFT_R2C); s->rdft_bits = rdft_bits; s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data)); } { FFTSample *data[2]; for (ch = 0; ch < nb_display_channels; ch++) { data[ch] = s->rdft_data + 2 * nb_freq * ch; i = i_start + ch; for (x = 0; x < 2 * nb_freq; x++) { double w = (x-nb_freq) * (1.0 / nb_freq); data[ch][x] = s->sample_array[i] * (1.0 - w * w); i += channels; if (i >= SAMPLE_ARRAY_SIZE) i -= SAMPLE_ARRAY_SIZE; } av_rdft_calc(s->rdft, data[ch]); } // least efficient way to do this, we should of course directly access it but its more than fast enough for (y = 0; y < s->height; y++) { double w = 1 / sqrt(nb_freq); int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1])); int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0] + data[1][2 * y + 1] * data[1][2 * y + 1])) : a; a = FFMIN(a, 255); b = FFMIN(b, 255); fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2); fill_rectangle(screen, s->xpos, s->height-y, 1, 1, fgcolor); } } SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height); s->xpos++; if (s->xpos >= s->width) s->xpos= s->xleft; } }"} {"target": 1, "idx": 19530, "func": "static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs) { int ret; GlusterAIOCB acb; BDRVGlusterState *s = bs->opaque; acb.size = 0; acb.ret = 0; acb.coroutine = qemu_coroutine_self(); acb.aio_context = bdrv_get_aio_context(bs); ret = glfs_fsync_async(s->fd, gluster_finish_aiocb, &acb); if (ret < 0) { return -errno; } qemu_coroutine_yield(); return acb.ret; }"} {"target": 1, "idx": 19539, "func": "static DriveInfo *blockdev_init(QemuOpts *all_opts, BlockInterfaceType block_default_type) { const char *buf; const char *file = NULL; const char *serial; const char *mediastr = \"\"; BlockInterfaceType type; enum { MEDIA_DISK, MEDIA_CDROM } media; int bus_id, unit_id; int cyls, heads, secs, translation; int max_devs; int index; int ro = 0; int bdrv_flags = 0; int on_read_error, on_write_error; const char *devaddr; DriveInfo *dinfo; ThrottleConfig cfg; int snapshot = 0; bool copy_on_read; int ret; Error *error = NULL; QemuOpts *opts; QDict *bs_opts; const char *id; bool has_driver_specific_opts; BlockDriver *drv = NULL; translation = BIOS_ATA_TRANSLATION_AUTO; media = MEDIA_DISK; /* Check common options by copying from all_opts to opts, all other options * are stored in bs_opts. */ id = qemu_opts_id(all_opts); opts = qemu_opts_create(&qemu_common_drive_opts, id, 1, &error); if (error_is_set(&error)) { qerror_report_err(error); error_free(error); return NULL; } bs_opts = qdict_new(); qemu_opts_to_qdict(all_opts, bs_opts); qemu_opts_absorb_qdict(opts, bs_opts, &error); if (error_is_set(&error)) { qerror_report_err(error); error_free(error); return NULL; } if (id) { qdict_del(bs_opts, \"id\"); } has_driver_specific_opts = !!qdict_size(bs_opts); /* extract parameters */ bus_id = qemu_opt_get_number(opts, \"bus\", 0); unit_id = qemu_opt_get_number(opts, \"unit\", -1); index = qemu_opt_get_number(opts, \"index\", -1); cyls = qemu_opt_get_number(opts, \"cyls\", 0); heads = qemu_opt_get_number(opts, \"heads\", 0); secs = qemu_opt_get_number(opts, \"secs\", 0); snapshot = qemu_opt_get_bool(opts, \"snapshot\", 0); ro = qemu_opt_get_bool(opts, \"read-only\", 0); copy_on_read = qemu_opt_get_bool(opts, \"copy-on-read\", false); file = qemu_opt_get(opts, \"file\"); serial = qemu_opt_get(opts, \"serial\"); if ((buf = qemu_opt_get(opts, \"if\")) != NULL) { for (type = 0; type < IF_COUNT && strcmp(buf, if_name[type]); type++) ; if (type == IF_COUNT) { error_report(\"unsupported bus type '%s'\", buf); return NULL; } } else { type = block_default_type; } max_devs = if_max_devs[type]; if (cyls || heads || secs) { if (cyls < 1) { error_report(\"invalid physical cyls number\"); return NULL; } if (heads < 1) { error_report(\"invalid physical heads number\"); return NULL; } if (secs < 1) { error_report(\"invalid physical secs number\"); return NULL; } } if ((buf = qemu_opt_get(opts, \"trans\")) != NULL) { if (!cyls) { error_report(\"'%s' trans must be used with cyls, heads and secs\", buf); return NULL; } if (!strcmp(buf, \"none\")) translation = BIOS_ATA_TRANSLATION_NONE; else if (!strcmp(buf, \"lba\")) translation = BIOS_ATA_TRANSLATION_LBA; else if (!strcmp(buf, \"auto\")) translation = BIOS_ATA_TRANSLATION_AUTO; else { error_report(\"'%s' invalid translation type\", buf); return NULL; } } if ((buf = qemu_opt_get(opts, \"media\")) != NULL) { if (!strcmp(buf, \"disk\")) { media = MEDIA_DISK; } else if (!strcmp(buf, \"cdrom\")) { if (cyls || secs || heads) { error_report(\"CHS can't be set with media=%s\", buf); return NULL; } media = MEDIA_CDROM; } else { error_report(\"'%s' invalid media\", buf); return NULL; } } if ((buf = qemu_opt_get(opts, \"discard\")) != NULL) { if (bdrv_parse_discard_flags(buf, &bdrv_flags) != 0) { error_report(\"invalid discard option\"); return NULL; } } if (qemu_opt_get_bool(opts, \"cache.writeback\", true)) { bdrv_flags |= BDRV_O_CACHE_WB; } if (qemu_opt_get_bool(opts, \"cache.direct\", false)) { bdrv_flags |= BDRV_O_NOCACHE; } if (qemu_opt_get_bool(opts, \"cache.no-flush\", false)) { bdrv_flags |= BDRV_O_NO_FLUSH; } #ifdef CONFIG_LINUX_AIO if ((buf = qemu_opt_get(opts, \"aio\")) != NULL) { if (!strcmp(buf, \"native\")) { bdrv_flags |= BDRV_O_NATIVE_AIO; } else if (!strcmp(buf, \"threads\")) { /* this is the default */ } else { error_report(\"invalid aio option\"); return NULL; } } #endif if ((buf = qemu_opt_get(opts, \"format\")) != NULL) { if (is_help_option(buf)) { error_printf(\"Supported formats:\"); bdrv_iterate_format(bdrv_format_print, NULL); error_printf(\"\\n\"); return NULL; } drv = bdrv_find_whitelisted_format(buf, ro); if (!drv) { if (!ro && bdrv_find_whitelisted_format(buf, !ro)) { error_report(\"'%s' can be only used as read-only device.\", buf); } else { error_report(\"'%s' invalid format\", buf); } return NULL; } } /* disk I/O throttling */ memset(&cfg, 0, sizeof(cfg)); cfg.buckets[THROTTLE_BPS_TOTAL].avg = qemu_opt_get_number(opts, \"throttling.bps-total\", 0); cfg.buckets[THROTTLE_BPS_READ].avg = qemu_opt_get_number(opts, \"throttling.bps-read\", 0); cfg.buckets[THROTTLE_BPS_WRITE].avg = qemu_opt_get_number(opts, \"throttling.bps-write\", 0); cfg.buckets[THROTTLE_OPS_TOTAL].avg = qemu_opt_get_number(opts, \"throttling.iops-total\", 0); cfg.buckets[THROTTLE_OPS_READ].avg = qemu_opt_get_number(opts, \"throttling.iops-read\", 0); cfg.buckets[THROTTLE_OPS_WRITE].avg = qemu_opt_get_number(opts, \"throttling.iops-write\", 0); cfg.buckets[THROTTLE_BPS_TOTAL].max = qemu_opt_get_number(opts, \"throttling.bps-total-max\", 0); cfg.buckets[THROTTLE_BPS_READ].max = qemu_opt_get_number(opts, \"throttling.bps-read-max\", 0); cfg.buckets[THROTTLE_BPS_WRITE].max = qemu_opt_get_number(opts, \"throttling.bps-write-max\", 0); cfg.buckets[THROTTLE_OPS_TOTAL].max = qemu_opt_get_number(opts, \"throttling.iops-total-max\", 0); cfg.buckets[THROTTLE_OPS_READ].max = qemu_opt_get_number(opts, \"throttling.iops-read-max\", 0); cfg.buckets[THROTTLE_OPS_WRITE].max = qemu_opt_get_number(opts, \"throttling.iops-write-max\", 0); cfg.op_size = qemu_opt_get_number(opts, \"throttling.iops-size\", 0); if (!check_throttle_config(&cfg, &error)) { error_report(\"%s\", error_get_pretty(error)); error_free(error); return NULL; } if (qemu_opt_get(opts, \"boot\") != NULL) { fprintf(stderr, \"qemu-kvm: boot=on|off is deprecated and will be \" \"ignored. Future versions will reject this parameter. Please \" \"update your scripts.\\n\"); } on_write_error = BLOCKDEV_ON_ERROR_ENOSPC; if ((buf = qemu_opt_get(opts, \"werror\")) != NULL) { if (type != IF_IDE && type != IF_SCSI && type != IF_VIRTIO && type != IF_NONE) { error_report(\"werror is not supported by this bus type\"); return NULL; } on_write_error = parse_block_error_action(buf, 0); if (on_write_error < 0) { return NULL; } } on_read_error = BLOCKDEV_ON_ERROR_REPORT; if ((buf = qemu_opt_get(opts, \"rerror\")) != NULL) { if (type != IF_IDE && type != IF_VIRTIO && type != IF_SCSI && type != IF_NONE) { error_report(\"rerror is not supported by this bus type\"); return NULL; } on_read_error = parse_block_error_action(buf, 1); if (on_read_error < 0) { return NULL; } } if ((devaddr = qemu_opt_get(opts, \"addr\")) != NULL) { if (type != IF_VIRTIO) { error_report(\"addr is not supported by this bus type\"); return NULL; } } /* compute bus and unit according index */ if (index != -1) { if (bus_id != 0 || unit_id != -1) { error_report(\"index cannot be used with bus and unit\"); return NULL; } bus_id = drive_index_to_bus_id(type, index); unit_id = drive_index_to_unit_id(type, index); } /* if user doesn't specify a unit_id, * try to find the first free */ if (unit_id == -1) { unit_id = 0; while (drive_get(type, bus_id, unit_id) != NULL) { unit_id++; if (max_devs && unit_id >= max_devs) { unit_id -= max_devs; bus_id++; } } } /* check unit id */ if (max_devs && unit_id >= max_devs) { error_report(\"unit %d too big (max is %d)\", unit_id, max_devs - 1); return NULL; } /* * catch multiple definitions */ if (drive_get(type, bus_id, unit_id) != NULL) { error_report(\"drive with bus=%d, unit=%d (index=%d) exists\", bus_id, unit_id, index); return NULL; } /* init */ dinfo = g_malloc0(sizeof(*dinfo)); if ((buf = qemu_opts_id(opts)) != NULL) { dinfo->id = g_strdup(buf); } else { /* no id supplied -> create one */ dinfo->id = g_malloc0(32); if (type == IF_IDE || type == IF_SCSI) mediastr = (media == MEDIA_CDROM) ? \"-cd\" : \"-hd\"; if (max_devs) snprintf(dinfo->id, 32, \"%s%i%s%i\", if_name[type], bus_id, mediastr, unit_id); else snprintf(dinfo->id, 32, \"%s%s%i\", if_name[type], mediastr, unit_id); } dinfo->bdrv = bdrv_new(dinfo->id); dinfo->bdrv->open_flags = snapshot ? BDRV_O_SNAPSHOT : 0; dinfo->bdrv->read_only = ro; dinfo->devaddr = devaddr; dinfo->type = type; dinfo->bus = bus_id; dinfo->unit = unit_id; dinfo->cyls = cyls; dinfo->heads = heads; dinfo->secs = secs; dinfo->trans = translation; dinfo->opts = all_opts; dinfo->refcount = 1; if (serial != NULL) { dinfo->serial = g_strdup(serial); } QTAILQ_INSERT_TAIL(&drives, dinfo, next); bdrv_set_on_error(dinfo->bdrv, on_read_error, on_write_error); /* disk I/O throttling */ if (throttle_enabled(&cfg)) { bdrv_io_limits_enable(dinfo->bdrv); bdrv_set_io_limits(dinfo->bdrv, &cfg); } switch(type) { case IF_IDE: case IF_SCSI: case IF_XEN: case IF_NONE: dinfo->media_cd = media == MEDIA_CDROM; break; case IF_SD: case IF_FLOPPY: case IF_PFLASH: case IF_MTD: break; case IF_VIRTIO: { /* add virtio block device */ QemuOpts *devopts; devopts = qemu_opts_create_nofail(qemu_find_opts(\"device\")); if (arch_type == QEMU_ARCH_S390X) { qemu_opt_set(devopts, \"driver\", \"virtio-blk-s390\"); } else { qemu_opt_set(devopts, \"driver\", \"virtio-blk-pci\"); } qemu_opt_set(devopts, \"drive\", dinfo->id); if (devaddr) qemu_opt_set(devopts, \"addr\", devaddr); break; } default: abort(); } if (!file || !*file) { if (has_driver_specific_opts) { file = NULL; } else { return dinfo; } } if (snapshot) { /* always use cache=unsafe with snapshot */ bdrv_flags &= ~BDRV_O_CACHE_MASK; bdrv_flags |= (BDRV_O_SNAPSHOT|BDRV_O_CACHE_WB|BDRV_O_NO_FLUSH); } if (copy_on_read) { bdrv_flags |= BDRV_O_COPY_ON_READ; } if (runstate_check(RUN_STATE_INMIGRATE)) { bdrv_flags |= BDRV_O_INCOMING; } if (media == MEDIA_CDROM) { /* CDROM is fine for any interface, don't check. */ ro = 1; } else if (ro == 1) { if (type != IF_SCSI && type != IF_VIRTIO && type != IF_FLOPPY && type != IF_NONE && type != IF_PFLASH) { error_report(\"read-only not supported by this bus type\"); goto err; } } bdrv_flags |= ro ? 0 : BDRV_O_RDWR; if (ro && copy_on_read) { error_report(\"warning: disabling copy_on_read on read-only drive\"); } QINCREF(bs_opts); ret = bdrv_open(dinfo->bdrv, file, bs_opts, bdrv_flags, drv, &error); if (ret < 0) { error_report(\"could not open disk image %s: %s\", file ?: dinfo->id, error_get_pretty(error)); goto err; } if (bdrv_key_required(dinfo->bdrv)) autostart = 0; QDECREF(bs_opts); qemu_opts_del(opts); return dinfo; err: qemu_opts_del(opts); QDECREF(bs_opts); bdrv_unref(dinfo->bdrv); g_free(dinfo->id); QTAILQ_REMOVE(&drives, dinfo, next); g_free(dinfo); return NULL; }"} {"target": 1, "idx": 19547, "func": "int ff_get_cpu_flags_arm(void) { int flags = CORE_CPU_FLAGS; uint32_t hwcap; if (get_hwcap(&hwcap) < 0) if (get_cpuinfo(&hwcap) < 0) return flags; #define check_cap(cap, flag) do { \\ if (hwcap & HWCAP_ ## cap) \\ flags |= AV_CPU_FLAG_ ## flag; \\ } while (0) /* No flags explicitly indicate v6 or v6T2 so check others which imply support. */ check_cap(EDSP, ARMV5TE); check_cap(TLS, ARMV6); check_cap(THUMBEE, ARMV6T2); check_cap(VFP, VFP); check_cap(VFPv3, VFPV3); check_cap(NEON, NEON); /* The v6 checks above are not reliable so let higher flags trickle down. */ if (flags & (AV_CPU_FLAG_VFPV3 | AV_CPU_FLAG_NEON)) flags |= AV_CPU_FLAG_ARMV6T2; if (flags & AV_CPU_FLAG_ARMV6T2) flags |= AV_CPU_FLAG_ARMV6; return flags; }"} {"target": 0, "idx": 19549, "func": "static void avc_luma_vt_4w_msa(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, int32_t height) { int32_t loop_cnt; int16_t filt_const0 = 0xfb01; int16_t filt_const1 = 0x1414; int16_t filt_const2 = 0x1fb; v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r, src65_r; v16i8 src87_r, src2110, src4332, src6554, src8776; v16i8 filt0, filt1, filt2; v8i16 out10, out32; v16u8 out; filt0 = (v16i8) __msa_fill_h(filt_const0); filt1 = (v16i8) __msa_fill_h(filt_const1); filt2 = (v16i8) __msa_fill_h(filt_const2); LD_SB5(src, src_stride, src0, src1, src2, src3, src4); src += (5 * src_stride); ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r, src32_r, src43_r); ILVR_D2_SB(src21_r, src10_r, src43_r, src32_r, src2110, src4332); XORI_B2_128_SB(src2110, src4332); for (loop_cnt = (height >> 2); loop_cnt--;) { LD_SB4(src, src_stride, src5, src6, src7, src8); src += (4 * src_stride); ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, src65_r, src76_r, src87_r); ILVR_D2_SB(src65_r, src54_r, src87_r, src76_r, src6554, src8776); XORI_B2_128_SB(src6554, src8776); out10 = DPADD_SH3_SH(src2110, src4332, src6554, filt0, filt1, filt2); out32 = DPADD_SH3_SH(src4332, src6554, src8776, filt0, filt1, filt2); SRARI_H2_SH(out10, out32, 5); SAT_SH2_SH(out10, out32, 7); out = PCKEV_XORI128_UB(out10, out32); ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); dst += (4 * dst_stride); src2110 = src6554; src4332 = src8776; src4 = src8; } }"} {"target": 0, "idx": 19571, "func": "static inline uint32_t reloc_26_val(tcg_insn_unit *pc, tcg_insn_unit *target) { assert((((uintptr_t)pc ^ (uintptr_t)target) & 0xf0000000) == 0); return ((uintptr_t)target >> 2) & 0x3ffffff; }"} {"target": 0, "idx": 19573, "func": "static uint32_t openpic_iack(OpenPICState *opp, IRQDest *dst, int cpu) { IRQSource *src; int retval, irq; DPRINTF(\"Lower OpenPIC INT output\\n\"); qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_INT]); irq = IRQ_get_next(opp, &dst->raised); DPRINTF(\"IACK: irq=%d\\n\", irq); if (irq == -1) { /* No more interrupt pending */ return opp->spve; } src = &opp->src[irq]; if (!(src->ivpr & IVPR_ACTIVITY_MASK) || !(IVPR_PRIORITY(src->ivpr) > dst->ctpr)) { /* - Spurious level-sensitive IRQ * - Priorities has been changed * and the pending IRQ isn't allowed anymore */ src->ivpr &= ~IVPR_ACTIVITY_MASK; retval = opp->spve; } else { /* IRQ enter servicing state */ IRQ_setbit(&dst->servicing, irq); retval = IVPR_VECTOR(opp, src->ivpr); } IRQ_resetbit(&dst->raised, irq); if (!src->level) { /* edge-sensitive IRQ */ src->ivpr &= ~IVPR_ACTIVITY_MASK; src->pending = 0; } if ((irq >= opp->irq_ipi0) && (irq < (opp->irq_ipi0 + MAX_IPI))) { src->idr &= ~(1 << cpu); if (src->idr && !src->level) { /* trigger on CPUs that didn't know about it yet */ openpic_set_irq(opp, irq, 1); openpic_set_irq(opp, irq, 0); /* if all CPUs knew about it, set active bit again */ src->ivpr |= IVPR_ACTIVITY_MASK; } } return retval; }"} {"target": 1, "idx": 19603, "func": "static void realview_init(MachineState *machine, enum realview_board_type board_type) { ARMCPU *cpu = NULL; CPUARMState *env; ObjectClass *cpu_oc; MemoryRegion *sysmem = get_system_memory(); MemoryRegion *ram_lo = g_new(MemoryRegion, 1); MemoryRegion *ram_hi = g_new(MemoryRegion, 1); MemoryRegion *ram_alias = g_new(MemoryRegion, 1); MemoryRegion *ram_hack = g_new(MemoryRegion, 1); DeviceState *dev, *sysctl, *gpio2, *pl041; SysBusDevice *busdev; qemu_irq pic[64]; qemu_irq mmc_irq[2]; PCIBus *pci_bus = NULL; NICInfo *nd; I2CBus *i2c; int n; int done_nic = 0; qemu_irq cpu_irq[4]; int is_mpcore = 0; int is_pb = 0; uint32_t proc_id = 0; uint32_t sys_id; ram_addr_t low_ram_size; ram_addr_t ram_size = machine->ram_size; hwaddr periphbase = 0; switch (board_type) { case BOARD_EB: break; case BOARD_EB_MPCORE: is_mpcore = 1; periphbase = 0x10100000; break; case BOARD_PB_A8: is_pb = 1; break; case BOARD_PBX_A9: is_mpcore = 1; is_pb = 1; periphbase = 0x1f000000; break; } cpu_oc = cpu_class_by_name(TYPE_ARM_CPU, machine->cpu_model); if (!cpu_oc) { fprintf(stderr, \"Unable to find CPU definition\\n\"); exit(1); } for (n = 0; n < smp_cpus; n++) { Object *cpuobj = object_new(object_class_get_name(cpu_oc)); Error *err = NULL; if (is_pb && is_mpcore) { object_property_set_int(cpuobj, periphbase, \"reset-cbar\", &err); if (err) { error_report(\"%s\", error_get_pretty(err)); exit(1); } } object_property_set_bool(cpuobj, true, \"realized\", &err); if (err) { error_report(\"%s\", error_get_pretty(err)); exit(1); } cpu_irq[n] = qdev_get_gpio_in(DEVICE(cpuobj), ARM_CPU_IRQ); } cpu = ARM_CPU(first_cpu); env = &cpu->env; if (arm_feature(env, ARM_FEATURE_V7)) { if (is_mpcore) { proc_id = 0x0c000000; } else { proc_id = 0x0e000000; } } else if (arm_feature(env, ARM_FEATURE_V6K)) { proc_id = 0x06000000; } else if (arm_feature(env, ARM_FEATURE_V6)) { proc_id = 0x04000000; } else { proc_id = 0x02000000; } if (is_pb && ram_size > 0x20000000) { /* Core tile RAM. */ low_ram_size = ram_size - 0x20000000; ram_size = 0x20000000; memory_region_init_ram(ram_lo, NULL, \"realview.lowmem\", low_ram_size, &error_abort); vmstate_register_ram_global(ram_lo); memory_region_add_subregion(sysmem, 0x20000000, ram_lo); } memory_region_init_ram(ram_hi, NULL, \"realview.highmem\", ram_size, &error_abort); vmstate_register_ram_global(ram_hi); low_ram_size = ram_size; if (low_ram_size > 0x10000000) low_ram_size = 0x10000000; /* SDRAM at address zero. */ memory_region_init_alias(ram_alias, NULL, \"realview.alias\", ram_hi, 0, low_ram_size); memory_region_add_subregion(sysmem, 0, ram_alias); if (is_pb) { /* And again at a high address. */ memory_region_add_subregion(sysmem, 0x70000000, ram_hi); } else { ram_size = low_ram_size; } sys_id = is_pb ? 0x01780500 : 0xc1400400; sysctl = qdev_create(NULL, \"realview_sysctl\"); qdev_prop_set_uint32(sysctl, \"sys_id\", sys_id); qdev_prop_set_uint32(sysctl, \"proc_id\", proc_id); qdev_init_nofail(sysctl); sysbus_mmio_map(SYS_BUS_DEVICE(sysctl), 0, 0x10000000); if (is_mpcore) { dev = qdev_create(NULL, is_pb ? \"a9mpcore_priv\": \"realview_mpcore\"); qdev_prop_set_uint32(dev, \"num-cpu\", smp_cpus); qdev_init_nofail(dev); busdev = SYS_BUS_DEVICE(dev); sysbus_mmio_map(busdev, 0, periphbase); for (n = 0; n < smp_cpus; n++) { sysbus_connect_irq(busdev, n, cpu_irq[n]); } sysbus_create_varargs(\"l2x0\", periphbase + 0x2000, NULL); /* Both A9 and 11MPCore put the GIC CPU i/f at base + 0x100 */ realview_binfo.gic_cpu_if_addr = periphbase + 0x100; } else { uint32_t gic_addr = is_pb ? 0x1e000000 : 0x10040000; /* For now just create the nIRQ GIC, and ignore the others. */ dev = sysbus_create_simple(\"realview_gic\", gic_addr, cpu_irq[0]); } for (n = 0; n < 64; n++) { pic[n] = qdev_get_gpio_in(dev, n); } pl041 = qdev_create(NULL, \"pl041\"); qdev_prop_set_uint32(pl041, \"nc_fifo_depth\", 512); qdev_init_nofail(pl041); sysbus_mmio_map(SYS_BUS_DEVICE(pl041), 0, 0x10004000); sysbus_connect_irq(SYS_BUS_DEVICE(pl041), 0, pic[19]); sysbus_create_simple(\"pl050_keyboard\", 0x10006000, pic[20]); sysbus_create_simple(\"pl050_mouse\", 0x10007000, pic[21]); sysbus_create_simple(\"pl011\", 0x10009000, pic[12]); sysbus_create_simple(\"pl011\", 0x1000a000, pic[13]); sysbus_create_simple(\"pl011\", 0x1000b000, pic[14]); sysbus_create_simple(\"pl011\", 0x1000c000, pic[15]); /* DMA controller is optional, apparently. */ sysbus_create_simple(\"pl081\", 0x10030000, pic[24]); sysbus_create_simple(\"sp804\", 0x10011000, pic[4]); sysbus_create_simple(\"sp804\", 0x10012000, pic[5]); sysbus_create_simple(\"pl061\", 0x10013000, pic[6]); sysbus_create_simple(\"pl061\", 0x10014000, pic[7]); gpio2 = sysbus_create_simple(\"pl061\", 0x10015000, pic[8]); sysbus_create_simple(\"pl111\", 0x10020000, pic[23]); dev = sysbus_create_varargs(\"pl181\", 0x10005000, pic[17], pic[18], NULL); /* Wire up MMC card detect and read-only signals. These have * to go to both the PL061 GPIO and the sysctl register. * Note that the PL181 orders these lines (readonly,inserted) * and the PL061 has them the other way about. Also the card * detect line is inverted. */ mmc_irq[0] = qemu_irq_split( qdev_get_gpio_in(sysctl, ARM_SYSCTL_GPIO_MMC_WPROT), qdev_get_gpio_in(gpio2, 1)); mmc_irq[1] = qemu_irq_split( qdev_get_gpio_in(sysctl, ARM_SYSCTL_GPIO_MMC_CARDIN), qemu_irq_invert(qdev_get_gpio_in(gpio2, 0))); qdev_connect_gpio_out(dev, 0, mmc_irq[0]); qdev_connect_gpio_out(dev, 1, mmc_irq[1]); sysbus_create_simple(\"pl031\", 0x10017000, pic[10]); if (!is_pb) { dev = qdev_create(NULL, \"realview_pci\"); busdev = SYS_BUS_DEVICE(dev); qdev_init_nofail(dev); sysbus_mmio_map(busdev, 0, 0x10019000); /* PCI controller registers */ sysbus_mmio_map(busdev, 1, 0x60000000); /* PCI self-config */ sysbus_mmio_map(busdev, 2, 0x61000000); /* PCI config */ sysbus_mmio_map(busdev, 3, 0x62000000); /* PCI I/O */ sysbus_mmio_map(busdev, 4, 0x63000000); /* PCI memory window 1 */ sysbus_mmio_map(busdev, 5, 0x64000000); /* PCI memory window 2 */ sysbus_mmio_map(busdev, 6, 0x68000000); /* PCI memory window 3 */ sysbus_connect_irq(busdev, 0, pic[48]); sysbus_connect_irq(busdev, 1, pic[49]); sysbus_connect_irq(busdev, 2, pic[50]); sysbus_connect_irq(busdev, 3, pic[51]); pci_bus = (PCIBus *)qdev_get_child_bus(dev, \"pci\"); if (usb_enabled(false)) { pci_create_simple(pci_bus, -1, \"pci-ohci\"); } n = drive_get_max_bus(IF_SCSI); while (n >= 0) { pci_create_simple(pci_bus, -1, \"lsi53c895a\"); n--; } } for(n = 0; n < nb_nics; n++) { nd = &nd_table[n]; if (!done_nic && (!nd->model || strcmp(nd->model, is_pb ? \"lan9118\" : \"smc91c111\") == 0)) { if (is_pb) { lan9118_init(nd, 0x4e000000, pic[28]); } else { smc91c111_init(nd, 0x4e000000, pic[28]); } done_nic = 1; } else { if (pci_bus) { pci_nic_init_nofail(nd, pci_bus, \"rtl8139\", NULL); } } } dev = sysbus_create_simple(\"versatile_i2c\", 0x10002000, NULL); i2c = (I2CBus *)qdev_get_child_bus(dev, \"i2c\"); i2c_create_slave(i2c, \"ds1338\", 0x68); /* Memory map for RealView Emulation Baseboard: */ /* 0x10000000 System registers. */ /* 0x10001000 System controller. */ /* 0x10002000 Two-Wire Serial Bus. */ /* 0x10003000 Reserved. */ /* 0x10004000 AACI. */ /* 0x10005000 MCI. */ /* 0x10006000 KMI0. */ /* 0x10007000 KMI1. */ /* 0x10008000 Character LCD. (EB) */ /* 0x10009000 UART0. */ /* 0x1000a000 UART1. */ /* 0x1000b000 UART2. */ /* 0x1000c000 UART3. */ /* 0x1000d000 SSPI. */ /* 0x1000e000 SCI. */ /* 0x1000f000 Reserved. */ /* 0x10010000 Watchdog. */ /* 0x10011000 Timer 0+1. */ /* 0x10012000 Timer 2+3. */ /* 0x10013000 GPIO 0. */ /* 0x10014000 GPIO 1. */ /* 0x10015000 GPIO 2. */ /* 0x10002000 Two-Wire Serial Bus - DVI. (PB) */ /* 0x10017000 RTC. */ /* 0x10018000 DMC. */ /* 0x10019000 PCI controller config. */ /* 0x10020000 CLCD. */ /* 0x10030000 DMA Controller. */ /* 0x10040000 GIC1. (EB) */ /* 0x10050000 GIC2. (EB) */ /* 0x10060000 GIC3. (EB) */ /* 0x10070000 GIC4. (EB) */ /* 0x10080000 SMC. */ /* 0x1e000000 GIC1. (PB) */ /* 0x1e001000 GIC2. (PB) */ /* 0x1e002000 GIC3. (PB) */ /* 0x1e003000 GIC4. (PB) */ /* 0x40000000 NOR flash. */ /* 0x44000000 DoC flash. */ /* 0x48000000 SRAM. */ /* 0x4c000000 Configuration flash. */ /* 0x4e000000 Ethernet. */ /* 0x4f000000 USB. */ /* 0x50000000 PISMO. */ /* 0x54000000 PISMO. */ /* 0x58000000 PISMO. */ /* 0x5c000000 PISMO. */ /* 0x60000000 PCI. */ /* 0x60000000 PCI Self Config. */ /* 0x61000000 PCI Config. */ /* 0x62000000 PCI IO. */ /* 0x63000000 PCI mem 0. */ /* 0x64000000 PCI mem 1. */ /* 0x68000000 PCI mem 2. */ /* ??? Hack to map an additional page of ram for the secondary CPU startup code. I guess this works on real hardware because the BootROM happens to be in ROM/flash or in memory that isn't clobbered until after Linux boots the secondary CPUs. */ memory_region_init_ram(ram_hack, NULL, \"realview.hack\", 0x1000, &error_abort); vmstate_register_ram_global(ram_hack); memory_region_add_subregion(sysmem, SMP_BOOT_ADDR, ram_hack); realview_binfo.ram_size = ram_size; realview_binfo.kernel_filename = machine->kernel_filename; realview_binfo.kernel_cmdline = machine->kernel_cmdline; realview_binfo.initrd_filename = machine->initrd_filename; realview_binfo.nb_cpus = smp_cpus; realview_binfo.board_id = realview_board_id[board_type]; realview_binfo.loader_start = (board_type == BOARD_PB_A8 ? 0x70000000 : 0); arm_load_kernel(ARM_CPU(first_cpu), &realview_binfo); }"} {"target": 1, "idx": 19615, "func": "static void gen_tlbie(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); #else if (unlikely(ctx->pr || !ctx->hv)) { gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; } if (NARROW_MODE(ctx)) { TCGv t0 = tcg_temp_new(); tcg_gen_ext32u_tl(t0, cpu_gpr[rB(ctx->opcode)]); gen_helper_tlbie(cpu_env, t0); tcg_temp_free(t0); } else { gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]); } #endif }"} {"target": 1, "idx": 19620, "func": "static int flashsv_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { int buf_size = avpkt->size; FlashSVContext *s = avctx->priv_data; int h_blocks, v_blocks, h_part, v_part, i, j; GetBitContext gb; /* no supplementary picture */ if (buf_size == 0) return 0; if (buf_size < 4) return -1; init_get_bits(&gb, avpkt->data, buf_size * 8); /* start to parse the bitstream */ s->block_width = 16 * (get_bits(&gb, 4) + 1); s->image_width = get_bits(&gb, 12); s->block_height = 16 * (get_bits(&gb, 4) + 1); s->image_height = get_bits(&gb, 12); if (s->ver == 2) { skip_bits(&gb, 6); if (get_bits1(&gb)) { av_log_missing_feature(avctx, \"iframe\", 1); return AVERROR_PATCHWELCOME; } if (get_bits1(&gb)) { av_log_missing_feature(avctx, \"Custom palette\", 1); return AVERROR_PATCHWELCOME; } } /* calculate number of blocks and size of border (partial) blocks */ h_blocks = s->image_width / s->block_width; h_part = s->image_width % s->block_width; v_blocks = s->image_height / s->block_height; v_part = s->image_height % s->block_height; /* the block size could change between frames, make sure the buffer * is large enough, if not, get a larger one */ if (s->block_size < s->block_width * s->block_height) { int tmpblock_size = 3 * s->block_width * s->block_height; s->tmpblock = av_realloc(s->tmpblock, tmpblock_size); if (!s->tmpblock) { av_log(avctx, AV_LOG_ERROR, \"Can't allocate decompression buffer.\\n\"); return AVERROR(ENOMEM); } if (s->ver == 2) { s->deflate_block_size = calc_deflate_block_size(tmpblock_size); if (s->deflate_block_size <= 0) { av_log(avctx, AV_LOG_ERROR, \"Can't determine deflate buffer size.\\n\"); return -1; } s->deflate_block = av_realloc(s->deflate_block, s->deflate_block_size); if (!s->deflate_block) { av_log(avctx, AV_LOG_ERROR, \"Can't allocate deflate buffer.\\n\"); return AVERROR(ENOMEM); } } } s->block_size = s->block_width * s->block_height; /* initialize the image size once */ if (avctx->width == 0 && avctx->height == 0) { avcodec_set_dimensions(avctx, s->image_width, s->image_height); } /* check for changes of image width and image height */ if (avctx->width != s->image_width || avctx->height != s->image_height) { av_log(avctx, AV_LOG_ERROR, \"Frame width or height differs from first frame!\\n\"); av_log(avctx, AV_LOG_ERROR, \"fh = %d, fv %d vs ch = %d, cv = %d\\n\", avctx->height, avctx->width, s->image_height, s->image_width); return AVERROR_INVALIDDATA; } /* we care for keyframes only in Screen Video v2 */ s->is_keyframe = (avpkt->flags & AV_PKT_FLAG_KEY) && (s->ver == 2); if (s->is_keyframe) { s->keyframedata = av_realloc(s->keyframedata, avpkt->size); memcpy(s->keyframedata, avpkt->data, avpkt->size); } if(s->ver == 2) s->blocks = av_realloc(s->blocks, (v_blocks + !!v_part) * (h_blocks + !!h_part) * sizeof(s->blocks[0])); av_dlog(avctx, \"image: %dx%d block: %dx%d num: %dx%d part: %dx%d\\n\", s->image_width, s->image_height, s->block_width, s->block_height, h_blocks, v_blocks, h_part, v_part); s->frame.reference = 3; s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if (avctx->reget_buffer(avctx, &s->frame) < 0) { av_log(avctx, AV_LOG_ERROR, \"reget_buffer() failed\\n\"); return -1; } /* loop over all block columns */ for (j = 0; j < v_blocks + (v_part ? 1 : 0); j++) { int y_pos = j * s->block_height; // vertical position in frame int cur_blk_height = (j < v_blocks) ? s->block_height : v_part; /* loop over all block rows */ for (i = 0; i < h_blocks + (h_part ? 1 : 0); i++) { int x_pos = i * s->block_width; // horizontal position in frame int cur_blk_width = (i < h_blocks) ? s->block_width : h_part; int has_diff = 0; /* get the size of the compressed zlib chunk */ int size = get_bits(&gb, 16); s->color_depth = 0; s->zlibprime_curr = 0; s->zlibprime_prev = 0; s->diff_start = 0; s->diff_height = cur_blk_height; if (8 * size > get_bits_left(&gb)) { avctx->release_buffer(avctx, &s->frame); s->frame.data[0] = NULL; return AVERROR_INVALIDDATA; } if (s->ver == 2 && size) { skip_bits(&gb, 3); s->color_depth = get_bits(&gb, 2); has_diff = get_bits1(&gb); s->zlibprime_curr = get_bits1(&gb); s->zlibprime_prev = get_bits1(&gb); if (s->color_depth != 0 && s->color_depth != 2) { av_log(avctx, AV_LOG_ERROR, \"%dx%d invalid color depth %d\\n\", i, j, s->color_depth); return AVERROR_INVALIDDATA; } if (has_diff) { if (!s->keyframe) { av_log(avctx, AV_LOG_ERROR, \"inter frame without keyframe\\n\"); return AVERROR_INVALIDDATA; } s->diff_start = get_bits(&gb, 8); s->diff_height = get_bits(&gb, 8); av_log(avctx, AV_LOG_DEBUG, \"%dx%d diff start %d height %d\\n\", i, j, s->diff_start, s->diff_height); size -= 2; } if (s->zlibprime_prev) av_log(avctx, AV_LOG_DEBUG, \"%dx%d zlibprime_prev\\n\", i, j); if (s->zlibprime_curr) { int col = get_bits(&gb, 8); int row = get_bits(&gb, 8); av_log(avctx, AV_LOG_DEBUG, \"%dx%d zlibprime_curr %dx%d\\n\", i, j, col, row); size -= 2; av_log_missing_feature(avctx, \"zlibprime_curr\", 1); return AVERROR_PATCHWELCOME; } if (!s->blocks && (s->zlibprime_curr || s->zlibprime_prev)) { av_log(avctx, AV_LOG_ERROR, \"no data available for zlib \" \"priming\\n\"); return AVERROR_INVALIDDATA; } size--; // account for flags byte } if (has_diff) { int k; int off = (s->image_height - y_pos - 1) * s->frame.linesize[0]; for (k = 0; k < cur_blk_height; k++) memcpy(s->frame.data[0] + off - k*s->frame.linesize[0] + x_pos*3, s->keyframe + off - k*s->frame.linesize[0] + x_pos*3, cur_blk_width * 3); } /* skip unchanged blocks, which have size 0 */ if (size) { if (flashsv_decode_block(avctx, avpkt, &gb, size, cur_blk_width, cur_blk_height, x_pos, y_pos, i + j * (h_blocks + !!h_part))) av_log(avctx, AV_LOG_ERROR, \"error in decompression of block %dx%d\\n\", i, j); } } } if (s->is_keyframe && s->ver == 2) { if (!s->keyframe) { s->keyframe = av_malloc(s->frame.linesize[0] * avctx->height); if (!s->keyframe) { av_log(avctx, AV_LOG_ERROR, \"Cannot allocate image data\\n\"); return AVERROR(ENOMEM); } } memcpy(s->keyframe, s->frame.data[0], s->frame.linesize[0] * avctx->height); } *got_frame = 1; *(AVFrame*)data = s->frame; if ((get_bits_count(&gb) / 8) != buf_size) av_log(avctx, AV_LOG_ERROR, \"buffer not fully consumed (%d != %d)\\n\", buf_size, (get_bits_count(&gb) / 8)); /* report that the buffer was completely consumed */ return buf_size; }"} {"target": 0, "idx": 19625, "func": "static void decode_subband(DiracContext *s, GetBitContext *gb, int quant, int slice_x, int slice_y, int bits_end, SubBand *b1, SubBand *b2) { int left = b1->width * slice_x / s->num_x; int right = b1->width *(slice_x+1) / s->num_x; int top = b1->height * slice_y / s->num_y; int bottom = b1->height *(slice_y+1) / s->num_y; int qfactor, qoffset; uint8_t *buf1 = b1->ibuf + top * b1->stride; uint8_t *buf2 = b2 ? b2->ibuf + top * b2->stride: NULL; int x, y; if (quant > 115) { av_log(s->avctx, AV_LOG_ERROR, \"Unsupported quant %d\\n\", quant); return; } qfactor = ff_dirac_qscale_tab[quant & 0x7f]; qoffset = ff_dirac_qoffset_intra_tab[quant & 0x7f] + 2; /* we have to constantly check for overread since the spec explicitly requires this, with the meaning that all remaining coeffs are set to 0 */ if (get_bits_count(gb) >= bits_end) return; if (s->pshift) { for (y = top; y < bottom; y++) { for (x = left; x < right; x++) { PARSE_VALUES(int32_t, x, gb, bits_end, buf1, buf2); } buf1 += b1->stride; if (buf2) buf2 += b2->stride; } } else { for (y = top; y < bottom; y++) { for (x = left; x < right; x++) { PARSE_VALUES(int16_t, x, gb, bits_end, buf1, buf2); } buf1 += b1->stride; if (buf2) buf2 += b2->stride; } } }"} {"target": 1, "idx": 19648, "func": "void qga_vss_fsfreeze(int *nr_volume, Error **errp, bool freeze) { const char *func_name = freeze ? \"requester_freeze\" : \"requester_thaw\"; QGAVSSRequesterFunc func; ErrorSet errset = { .error_setg_win32 = error_setg_win32, .errp = errp, }; g_assert(errp); /* requester.cpp requires it */ func = (QGAVSSRequesterFunc)GetProcAddress(provider_lib, func_name); if (!func) { error_setg_win32(errp, GetLastError(), \"failed to load %s from %s\", func_name, QGA_VSS_DLL); return; } func(nr_volume, &errset); }"} {"target": 1, "idx": 19659, "func": "static void do_change_block(const char *device, const char *filename) { BlockDriverState *bs; bs = bdrv_find(device); if (!bs) { term_printf(\"device not found\\n\"); return; } if (eject_device(bs, 0) < 0) return; bdrv_open(bs, filename, 0); qemu_key_check(bs, filename); }"} {"target": 1, "idx": 19662, "func": "static int decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src, int log2w, int log2h, int stride) { const int index = size2index[log2h][log2w]; const int h = 1 << log2h; int code = get_vlc2(&f->gb, block_type_vlc[1 - (f->version > 1)][index].table, BLOCK_TYPE_VLC_BITS, 1); uint16_t *start = (uint16_t *)f->last_picture->data[0]; uint16_t *end = start + stride * (f->avctx->height - h + 1) - (1 << log2w); int ret; if (code < 0 || code > 6 || log2w < 0) return AVERROR_INVALIDDATA; if (code == 0) { src += f->mv[bytestream2_get_byte(&f->g)]; if (start > src || src > end) { av_log(f->avctx, AV_LOG_ERROR, \"mv out of pic\\n\"); return AVERROR_INVALIDDATA; } mcdc(dst, src, log2w, h, stride, 1, 0); } else if (code == 1) { log2h--; if ((ret = decode_p_block(f, dst, src, log2w, log2h, stride)) < 0) return ret; if ((ret = decode_p_block(f, dst + (stride << log2h), src + (stride << log2h), log2w, log2h, stride)) < 0) return ret; } else if (code == 2) { log2w--; if ((ret = decode_p_block(f, dst , src, log2w, log2h, stride)) < 0) return ret; if ((ret = decode_p_block(f, dst + (1 << log2w), src + (1 << log2w), log2w, log2h, stride)) < 0) return ret; } else if (code == 3 && f->version < 2) { if (start > src || src > end) { av_log(f->avctx, AV_LOG_ERROR, \"mv out of pic\\n\"); return AVERROR_INVALIDDATA; } mcdc(dst, src, log2w, h, stride, 1, 0); } else if (code == 4) { src += f->mv[bytestream2_get_byte(&f->g)]; if (start > src || src > end) { av_log(f->avctx, AV_LOG_ERROR, \"mv out of pic\\n\"); return AVERROR_INVALIDDATA; } mcdc(dst, src, log2w, h, stride, 1, bytestream2_get_le16(&f->g2)); } else if (code == 5) { if (start > src || src > end) { av_log(f->avctx, AV_LOG_ERROR, \"mv out of pic\\n\"); return AVERROR_INVALIDDATA; } mcdc(dst, src, log2w, h, stride, 0, bytestream2_get_le16(&f->g2)); } else if (code == 6) { if (log2w) { dst[0] = bytestream2_get_le16(&f->g2); dst[1] = bytestream2_get_le16(&f->g2); } else { dst[0] = bytestream2_get_le16(&f->g2); dst[stride] = bytestream2_get_le16(&f->g2); } } return 0; }"} {"target": 1, "idx": 19666, "func": "static NFSServer *nfs_config(QDict *options, Error **errp) { NFSServer *server = NULL; QDict *addr = NULL; QObject *crumpled_addr = NULL; Visitor *iv = NULL; Error *local_error = NULL; qdict_extract_subqdict(options, &addr, \"server.\"); if (!qdict_size(addr)) { error_setg(errp, \"NFS server address missing\"); goto out; } crumpled_addr = qdict_crumple(addr, errp); if (!crumpled_addr) { goto out; } iv = qobject_input_visitor_new(crumpled_addr); visit_type_NFSServer(iv, NULL, &server, &local_error); if (local_error) { error_propagate(errp, local_error); goto out; } out: QDECREF(addr); qobject_decref(crumpled_addr); visit_free(iv); return server; }"} {"target": 1, "idx": 19674, "func": "static void gen_rfmci(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); #else if (unlikely(ctx->pr)) { gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; } /* Restore CPU state */ gen_helper_rfmci(cpu_env); gen_sync_exception(ctx); #endif }"} {"target": 1, "idx": 19682, "func": "static inline int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb, void *dst, const int type) { int i, j, count = 0; int last, t; int A, S, T; int pos = s->pos; uint32_t crc = s->sc.crc; uint32_t crc_extra_bits = s->extra_sc.crc; int16_t *dst16 = dst; int32_t *dst32 = dst; float *dstfl = dst; const int channel_stride = s->avctx->channels; if(s->samples_left == s->samples) s->one = s->zero = s->zeroes = 0; do{ T = wv_get_value(s, gb, 0, &last); S = 0; if(last) break; for(i = 0; i < s->terms; i++){ t = s->decorr[i].value; if(t > 8){ if(t & 1) A = 2 * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1]; else A = (3 * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1]) >> 1; s->decorr[i].samplesA[1] = s->decorr[i].samplesA[0]; j = 0; }else{ A = s->decorr[i].samplesA[pos]; j = (pos + t) & 7; } if(type != AV_SAMPLE_FMT_S16) S = T + ((s->decorr[i].weightA * (int64_t)A + 512) >> 10); else S = T + ((s->decorr[i].weightA * A + 512) >> 10); if(A && T) s->decorr[i].weightA -= ((((T ^ A) >> 30) & 2) - 1) * s->decorr[i].delta; s->decorr[i].samplesA[j] = T = S; } pos = (pos + 1) & 7; crc = crc * 3 + S; if(type == AV_SAMPLE_FMT_FLT){ *dstfl = wv_get_value_float(s, &crc_extra_bits, S); dstfl += channel_stride; }else if(type == AV_SAMPLE_FMT_S32){ *dst32 = wv_get_value_integer(s, &crc_extra_bits, S); dst32 += channel_stride; }else{ *dst16 = wv_get_value_integer(s, &crc_extra_bits, S); dst16 += channel_stride; } count++; }while(!last && count < s->max_samples); s->samples_left -= count; if(!s->samples_left){ wv_reset_saved_context(s); if(crc != s->CRC){ av_log(s->avctx, AV_LOG_ERROR, \"CRC error\\n\"); return -1; } if(s->got_extra_bits && crc_extra_bits != s->crc_extra_bits){ av_log(s->avctx, AV_LOG_ERROR, \"Extra bits CRC error\\n\"); return -1; } }else{ s->pos = pos; s->sc.crc = crc; s->sc.bits_used = get_bits_count(&s->gb); if(s->got_extra_bits){ s->extra_sc.crc = crc_extra_bits; s->extra_sc.bits_used = get_bits_count(&s->gb_extra_bits); } } return count; }"} {"target": 1, "idx": 19689, "func": "void memory_region_transaction_commit(void) { AddressSpace *as; assert(memory_region_transaction_depth); --memory_region_transaction_depth; if (!memory_region_transaction_depth) { if (memory_region_update_pending) { MEMORY_LISTENER_CALL_GLOBAL(begin, Forward); QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { address_space_update_topology(as); } MEMORY_LISTENER_CALL_GLOBAL(commit, Forward); } else if (ioeventfd_update_pending) { QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { address_space_update_ioeventfds(as); } } memory_region_clear_pending(); } }"} {"target": 1, "idx": 19707, "func": "static uint64_t block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size) { /* Estimate pending number of bytes to send */ uint64_t pending; qemu_mutex_lock_iothread(); blk_mig_lock(); pending = get_remaining_dirty() + block_mig_state.submitted * BLOCK_SIZE + block_mig_state.read_done * BLOCK_SIZE; /* Report at least one block pending during bulk phase */ if (pending == 0 && !block_mig_state.bulk_completed) { pending = BLOCK_SIZE; } blk_mig_unlock(); qemu_mutex_unlock_iothread(); DPRINTF(\"Enter save live pending %\" PRIu64 \"\\n\", pending); return pending; }"} {"target": 1, "idx": 19712, "func": "PPC_OP(cmpli) { if (T0 < PARAM(1)) { T0 = 0x08; } else if (T0 > PARAM(1)) { T0 = 0x04; } else { T0 = 0x02; } RETURN(); }"} {"target": 0, "idx": 19725, "func": "static void FUNC(hevc_loop_filter_luma)(uint8_t *_pix, ptrdiff_t _xstride, ptrdiff_t _ystride, int *_beta, int *_tc, uint8_t *_no_p, uint8_t *_no_q) { int d, j; pixel *pix = (pixel *)_pix; ptrdiff_t xstride = _xstride / sizeof(pixel); ptrdiff_t ystride = _ystride / sizeof(pixel); for (j = 0; j < 2; j++) { const int dp0 = abs(P2 - 2 * P1 + P0); const int dq0 = abs(Q2 - 2 * Q1 + Q0); const int dp3 = abs(TP2 - 2 * TP1 + TP0); const int dq3 = abs(TQ2 - 2 * TQ1 + TQ0); const int d0 = dp0 + dq0; const int d3 = dp3 + dq3; const int beta = _beta[j] << (BIT_DEPTH - 8); const int tc = _tc[j] << (BIT_DEPTH - 8); const int no_p = _no_p[j]; const int no_q = _no_q[j]; if (d0 + d3 >= beta) { pix += 4 * ystride; continue; } else { const int beta_3 = beta >> 3; const int beta_2 = beta >> 2; const int tc25 = ((tc * 5 + 1) >> 1); if (abs(P3 - P0) + abs(Q3 - Q0) < beta_3 && abs(P0 - Q0) < tc25 && abs(TP3 - TP0) + abs(TQ3 - TQ0) < beta_3 && abs(TP0 - TQ0) < tc25 && (d0 << 1) < beta_2 && (d3 << 1) < beta_2) { // strong filtering const int tc2 = tc << 1; for (d = 0; d < 4; d++) { const int p3 = P3; const int p2 = P2; const int p1 = P1; const int p0 = P0; const int q0 = Q0; const int q1 = Q1; const int q2 = Q2; const int q3 = Q3; if (!no_p) { P0 = p0 + av_clip(((p2 + 2 * p1 + 2 * p0 + 2 * q0 + q1 + 4) >> 3) - p0, -tc2, tc2); P1 = p1 + av_clip(((p2 + p1 + p0 + q0 + 2) >> 2) - p1, -tc2, tc2); P2 = p2 + av_clip(((2 * p3 + 3 * p2 + p1 + p0 + q0 + 4) >> 3) - p2, -tc2, tc2); } if (!no_q) { Q0 = q0 + av_clip(((p1 + 2 * p0 + 2 * q0 + 2 * q1 + q2 + 4) >> 3) - q0, -tc2, tc2); Q1 = q1 + av_clip(((p0 + q0 + q1 + q2 + 2) >> 2) - q1, -tc2, tc2); Q2 = q2 + av_clip(((2 * q3 + 3 * q2 + q1 + q0 + p0 + 4) >> 3) - q2, -tc2, tc2); } pix += ystride; } } else { // normal filtering int nd_p = 1; int nd_q = 1; const int tc_2 = tc >> 1; if (dp0 + dp3 < ((beta + (beta >> 1)) >> 3)) nd_p = 2; if (dq0 + dq3 < ((beta + (beta >> 1)) >> 3)) nd_q = 2; for (d = 0; d < 4; d++) { const int p2 = P2; const int p1 = P1; const int p0 = P0; const int q0 = Q0; const int q1 = Q1; const int q2 = Q2; int delta0 = (9 * (q0 - p0) - 3 * (q1 - p1) + 8) >> 4; if (abs(delta0) < 10 * tc) { delta0 = av_clip(delta0, -tc, tc); if (!no_p) P0 = av_clip_pixel(p0 + delta0); if (!no_q) Q0 = av_clip_pixel(q0 - delta0); if (!no_p && nd_p > 1) { const int deltap1 = av_clip((((p2 + p0 + 1) >> 1) - p1 + delta0) >> 1, -tc_2, tc_2); P1 = av_clip_pixel(p1 + deltap1); } if (!no_q && nd_q > 1) { const int deltaq1 = av_clip((((q2 + q0 + 1) >> 1) - q1 - delta0) >> 1, -tc_2, tc_2); Q1 = av_clip_pixel(q1 + deltaq1); } } pix += ystride; } } } } }"} {"target": 1, "idx": 19734, "func": "bool qdict_get_try_bool(const QDict *qdict, const char *key, bool def_value) { QObject *obj; obj = qdict_get(qdict, key); if (!obj || qobject_type(obj) != QTYPE_QBOOL) return def_value; return qbool_get_bool(qobject_to_qbool(obj)); }"} {"target": 1, "idx": 19748, "func": "static void qemu_rdma_dump_id(const char *who, struct ibv_context *verbs) { struct ibv_port_attr port; if (ibv_query_port(verbs, 1, &port)) { fprintf(stderr, \"FAILED TO QUERY PORT INFORMATION!\\n\"); return; } printf(\"%s RDMA Device opened: kernel name %s \" \"uverbs device name %s, \" \"infiniband_verbs class device path %s, \" \"infiniband class device path %s, \" \"transport: (%d) %s\\n\", who, verbs->device->name, verbs->device->dev_name, verbs->device->dev_path, verbs->device->ibdev_path, port.link_layer, (port.link_layer == IBV_LINK_LAYER_INFINIBAND) ? \"Infiniband\" : ((port.link_layer == IBV_LINK_LAYER_ETHERNET) ? \"Ethernet\" : \"Unknown\")); }"} {"target": 1, "idx": 19751, "func": "static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, long src_size) { const uint8_t *s = src; const uint8_t *end; #ifdef HAVE_MMX const uint8_t *mm_end; #endif uint16_t *d = (uint16_t *)dst; end = s + src_size; #ifdef HAVE_MMX __asm __volatile(PREFETCH\" %0\"::\"m\"(*src):\"memory\"); __asm __volatile( \"movq %0, %%mm7\\n\\t\" \"movq %1, %%mm6\\n\\t\" ::\"m\"(red_16mask),\"m\"(green_16mask)); mm_end = end - 11; while(s < mm_end) { __asm __volatile( PREFETCH\" 32%1\\n\\t\" \"movd %1, %%mm0\\n\\t\" \"movd 3%1, %%mm3\\n\\t\" \"punpckldq 6%1, %%mm0\\n\\t\" \"punpckldq 9%1, %%mm3\\n\\t\" \"movq %%mm0, %%mm1\\n\\t\" \"movq %%mm0, %%mm2\\n\\t\" \"movq %%mm3, %%mm4\\n\\t\" \"movq %%mm3, %%mm5\\n\\t\" \"psrlq $3, %%mm0\\n\\t\" \"psrlq $3, %%mm3\\n\\t\" \"pand %2, %%mm0\\n\\t\" \"pand %2, %%mm3\\n\\t\" \"psrlq $5, %%mm1\\n\\t\" \"psrlq $5, %%mm4\\n\\t\" \"pand %%mm6, %%mm1\\n\\t\" \"pand %%mm6, %%mm4\\n\\t\" \"psrlq $8, %%mm2\\n\\t\" \"psrlq $8, %%mm5\\n\\t\" \"pand %%mm7, %%mm2\\n\\t\" \"pand %%mm7, %%mm5\\n\\t\" \"por %%mm1, %%mm0\\n\\t\" \"por %%mm4, %%mm3\\n\\t\" \"por %%mm2, %%mm0\\n\\t\" \"por %%mm5, %%mm3\\n\\t\" \"psllq $16, %%mm3\\n\\t\" \"por %%mm3, %%mm0\\n\\t\" MOVNTQ\" %%mm0, %0\\n\\t\" :\"=m\"(*d):\"m\"(*s),\"m\"(blue_16mask):\"memory\"); d += 4; s += 12; } __asm __volatile(SFENCE:::\"memory\"); __asm __volatile(EMMS:::\"memory\"); #endif while(s < end) { const int b= *s++; const int g= *s++; const int r= *s++; *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8); } }"} {"target": 0, "idx": 19752, "func": "static int mp3_read_probe(AVProbeData *p) { int max_frames, first_frames = 0; int fsize, frames, sample_rate; uint32_t header; uint8_t *buf, *buf2, *end; AVCodecContext avctx; if(id3v2_match(p->buf)) return AVPROBE_SCORE_MAX/2+1; // this must be less than mpeg-ps because some retards put id3v2 tags before mpeg-ps files max_frames = 0; buf = p->buf; end = buf + p->buf_size - sizeof(uint32_t); for(; buf < end; buf= buf2+1) { buf2 = buf; for(frames = 0; buf2 < end; frames++) { header = AV_RB32(buf2); fsize = ff_mpa_decode_header(&avctx, header, &sample_rate); if(fsize < 0) break; buf2 += fsize; } max_frames = FFMAX(max_frames, frames); if(buf == p->buf) first_frames= frames; } if (first_frames>=3) return AVPROBE_SCORE_MAX/2+1; else if(max_frames>500)return AVPROBE_SCORE_MAX/2; else if(max_frames>=3) return AVPROBE_SCORE_MAX/4; else if(max_frames>=1) return 1; else return 0; }"} {"target": 0, "idx": 19758, "func": "static int ljpeg_encode_yuv(AVCodecContext *avctx, PutBitContext *pb, const AVFrame *frame) { const int predictor = avctx->prediction_method + 1; LJpegEncContext *s = avctx->priv_data; const int mb_width = (avctx->width + s->hsample[0] - 1) / s->hsample[0]; const int mb_height = (avctx->height + s->vsample[0] - 1) / s->vsample[0]; int mb_x, mb_y; for (mb_y = 0; mb_y < mb_height; mb_y++) { if (pb->buf_end - pb->buf - (put_bits_count(pb) >> 3) < mb_width * 4 * 3 * s->hsample[0] * s->vsample[0]) { av_log(avctx, AV_LOG_ERROR, \"encoded frame too large\\n\"); return -1; } for (mb_x = 0; mb_x < mb_width; mb_x++) ljpeg_encode_yuv_mb(s, pb, frame, predictor, mb_x, mb_y); } return 0; }"} {"target": 1, "idx": 19766, "func": "static CharDriverState *qmp_chardev_open_serial(ChardevHostdev *serial, Error **errp) { #ifdef HAVE_CHARDEV_TTY int fd; fd = qmp_chardev_open_file_source(serial->device, O_RDWR, errp); if (error_is_set(errp)) { return NULL; } qemu_set_nonblock(fd); return qemu_chr_open_tty_fd(fd); #else error_setg(errp, \"character device backend type 'serial' not supported\"); return NULL; #endif }"} {"target": 1, "idx": 19785, "func": "static void gen_mul(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) { TCGv sr_ov = tcg_temp_new(); TCGv t0 = tcg_temp_new(); tcg_gen_muls2_tl(dest, sr_ov, srca, srcb); tcg_gen_sari_tl(t0, dest, TARGET_LONG_BITS - 1); tcg_gen_setcond_tl(TCG_COND_NE, sr_ov, sr_ov, t0); tcg_temp_free(t0); tcg_gen_deposit_tl(cpu_sr, cpu_sr, sr_ov, ctz32(SR_OV), 1); gen_ove_ov(dc, sr_ov); tcg_temp_free(sr_ov); }"} {"target": 1, "idx": 19788, "func": "static av_cold int ra144_decode_init(AVCodecContext * avctx) { RA144Context *ractx = avctx->priv_data; ractx->avctx = avctx; ractx->lpc_coef[0] = ractx->lpc_tables[0]; ractx->lpc_coef[1] = ractx->lpc_tables[1]; avctx->channels = 1; avctx->channel_layout = AV_CH_LAYOUT_MONO; avctx->sample_fmt = AV_SAMPLE_FMT_S16; return 0; }"} {"target": 1, "idx": 19791, "func": "static void rtas_start_cpu(PowerPCCPU *cpu_, sPAPRMachineState *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { target_ulong id, start, r3; PowerPCCPU *cpu; if (nargs != 3 || nret != 1) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; } id = rtas_ld(args, 0); start = rtas_ld(args, 1); r3 = rtas_ld(args, 2); cpu = spapr_find_cpu(id); if (cpu != NULL) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); if (!cs->halted) { rtas_st(rets, 0, RTAS_OUT_HW_ERROR); return; } /* This will make sure qemu state is up to date with kvm, and * mark it dirty so our changes get flushed back before the * new cpu enters */ kvm_cpu_synchronize_state(cs); env->msr = (1ULL << MSR_SF) | (1ULL << MSR_ME); env->nip = start; env->gpr[3] = r3; cs->halted = 0; spapr_cpu_set_endianness(cpu); spapr_cpu_update_tb_offset(cpu); qemu_cpu_kick(cs); rtas_st(rets, 0, RTAS_OUT_SUCCESS); return; } /* Didn't find a matching cpu */ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); }"} {"target": 1, "idx": 19823, "func": "void ide_flush_cache(IDEState *s) { if (s->bs == NULL) { ide_flush_cb(s, 0); return; } bdrv_acct_start(s->bs, &s->acct, 0, BDRV_ACCT_FLUSH); bdrv_aio_flush(s->bs, ide_flush_cb, s); }"} {"target": 1, "idx": 19825, "func": "static void spapr_phb_hot_unplug_child(HotplugHandler *plug_handler, DeviceState *plugged_dev, Error **errp) { sPAPRPHBState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler)); PCIDevice *pdev = PCI_DEVICE(plugged_dev); sPAPRDRConnectorClass *drck; sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev); Error *local_err = NULL; if (!phb->dr_enabled) { error_setg(errp, QERR_BUS_NO_HOTPLUG, object_get_typename(OBJECT(phb))); return; } g_assert(drc); drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); if (!drck->release_pending(drc)) { spapr_phb_remove_pci_device(drc, phb, pdev, &local_err); if (local_err) { error_propagate(errp, local_err); return; } spapr_hotplug_req_remove_by_index(drc); } }"} {"target": 1, "idx": 19833, "func": "int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix) { if (bs->drv->bdrv_check == NULL) { return -ENOTSUP; memset(res, 0, sizeof(*res)); return bs->drv->bdrv_check(bs, res, fix);"} {"target": 1, "idx": 19835, "func": "static void map_linear_vram(CirrusVGAState *s) { if (!s->map_addr && s->lfb_addr && s->lfb_end) { s->map_addr = s->lfb_addr; s->map_end = s->lfb_end; cpu_register_physical_memory(s->map_addr, s->map_end - s->map_addr, s->vram_offset); } if (!s->map_addr) return; s->lfb_vram_mapped = 0; if (!(s->cirrus_srcptr != s->cirrus_srcptr_end) && !((s->sr[0x07] & 0x01) == 0) && !((s->gr[0x0B] & 0x14) == 0x14) && !(s->gr[0x0B] & 0x02)) { (s->vram_offset + s->cirrus_bank_base[0]) | IO_MEM_RAM); (s->vram_offset + s->cirrus_bank_base[1]) | IO_MEM_RAM); s->lfb_vram_mapped = 1; } else { cpu_register_physical_memory(isa_mem_base + 0xa0000, 0x20000, s->vga_io_memory); } vga_dirty_log_start((VGAState *)s); }"} {"target": 1, "idx": 19845, "func": "static int alloc_tables(H264Context *h){ MpegEncContext * const s = &h->s; const int big_mb_num= s->mb_stride * (s->mb_height+1); int x,y; CHECKED_ALLOCZ(h->intra4x4_pred_mode, big_mb_num * 8 * sizeof(uint8_t)) CHECKED_ALLOCZ(h->non_zero_count , big_mb_num * 16 * sizeof(uint8_t)) CHECKED_ALLOCZ(h->slice_table_base , (big_mb_num+s->mb_stride) * sizeof(uint8_t)) CHECKED_ALLOCZ(h->cbp_table, big_mb_num * sizeof(uint16_t)) if( h->pps.cabac ) { CHECKED_ALLOCZ(h->chroma_pred_mode_table, big_mb_num * sizeof(uint8_t)) CHECKED_ALLOCZ(h->mvd_table[0], 32*big_mb_num * sizeof(uint16_t)); CHECKED_ALLOCZ(h->mvd_table[1], 32*big_mb_num * sizeof(uint16_t)); CHECKED_ALLOCZ(h->direct_table, 32*big_mb_num * sizeof(uint8_t)); } memset(h->slice_table_base, -1, (big_mb_num+s->mb_stride) * sizeof(uint8_t)); h->slice_table= h->slice_table_base + s->mb_stride*2 + 1; CHECKED_ALLOCZ(h->mb2b_xy , big_mb_num * sizeof(uint32_t)); CHECKED_ALLOCZ(h->mb2b8_xy , big_mb_num * sizeof(uint32_t)); for(y=0; ymb_height; y++){ for(x=0; xmb_width; x++){ const int mb_xy= x + y*s->mb_stride; const int b_xy = 4*x + 4*y*h->b_stride; const int b8_xy= 2*x + 2*y*h->b8_stride; h->mb2b_xy [mb_xy]= b_xy; h->mb2b8_xy[mb_xy]= b8_xy; } } s->obmc_scratchpad = NULL; if(!h->dequant4_coeff[0]) init_dequant_tables(h); return 0; fail: free_tables(h); return -1; }"} {"target": 1, "idx": 19864, "func": "static void RENAME(swScale)(SwsContext *c, uint8_t* srcParam[], int srcStrideParam[], int srcSliceY, int srcSliceH, uint8_t* dstParam[], int dstStrideParam[]){ /* load a few things into local vars to make the code more readable? and faster */ const int srcW= c->srcW; const int dstW= c->dstW; const int dstH= c->dstH; const int chrDstW= c->chrDstW; const int lumXInc= c->lumXInc; const int chrXInc= c->chrXInc; const int dstFormat= c->dstFormat; const int flags= c->flags; const int canMMX2BeUsed= c->canMMX2BeUsed; int16_t *vLumFilterPos= c->vLumFilterPos; int16_t *vChrFilterPos= c->vChrFilterPos; int16_t *hLumFilterPos= c->hLumFilterPos; int16_t *hChrFilterPos= c->hChrFilterPos; int16_t *vLumFilter= c->vLumFilter; int16_t *vChrFilter= c->vChrFilter; int16_t *hLumFilter= c->hLumFilter; int16_t *hChrFilter= c->hChrFilter; int16_t *lumMmxFilter= c->lumMmxFilter; int16_t *chrMmxFilter= c->chrMmxFilter; const int vLumFilterSize= c->vLumFilterSize; const int vChrFilterSize= c->vChrFilterSize; const int hLumFilterSize= c->hLumFilterSize; const int hChrFilterSize= c->hChrFilterSize; int16_t **lumPixBuf= c->lumPixBuf; int16_t **chrPixBuf= c->chrPixBuf; const int vLumBufSize= c->vLumBufSize; const int vChrBufSize= c->vChrBufSize; uint8_t *funnyYCode= c->funnyYCode; uint8_t *funnyUVCode= c->funnyUVCode; uint8_t *formatConvBuffer= c->formatConvBuffer; /* vars whch will change and which we need to storw back in the context */ int dstY= c->dstY; int lumBufIndex= c->lumBufIndex; int chrBufIndex= c->chrBufIndex; int lastInLumBuf= c->lastInLumBuf; int lastInChrBuf= c->lastInChrBuf; int srcStride[3]; int dstStride[3]; uint8_t *src[3]; uint8_t *dst[3]; if(c->srcFormat == IMGFMT_I420){ src[0]= srcParam[0]; src[1]= srcParam[2]; src[2]= srcParam[1]; srcStride[0]= srcStrideParam[0]; srcStride[1]= srcStrideParam[2]; srcStride[2]= srcStrideParam[1]; } else if(c->srcFormat==IMGFMT_YV12){ src[0]= srcParam[0]; src[1]= srcParam[1]; src[2]= srcParam[2]; srcStride[0]= srcStrideParam[0]; srcStride[1]= srcStrideParam[1]; srcStride[2]= srcStrideParam[2]; } else if(isPacked(c->srcFormat)){ src[0]= src[1]= src[2]= srcParam[0]; srcStride[0]= srcStrideParam[0]; srcStride[1]= srcStride[2]= srcStrideParam[0]<<1; } else if(isGray(c->srcFormat)){ src[0]= srcParam[0]; src[1]= src[2]= NULL; srcStride[0]= srcStrideParam[0]; srcStride[1]= srcStride[2]= 0; } if(dstFormat == IMGFMT_I420){ dst[0]= dstParam[0]; dst[1]= dstParam[2]; dst[2]= dstParam[1]; dstStride[0]= dstStrideParam[0]; dstStride[1]= dstStrideParam[2]; dstStride[2]= dstStrideParam[1]; }else{ dst[0]= dstParam[0]; dst[1]= dstParam[1]; dst[2]= dstParam[2]; dstStride[0]= dstStrideParam[0]; dstStride[1]= dstStrideParam[1]; dstStride[2]= dstStrideParam[2]; } //printf(\"sws Strides:%d %d %d -> %d %d %d\\n\", srcStride[0],srcStride[1],srcStride[2], //dstStride[0],dstStride[1],dstStride[2]); if(dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0) { static int firstTime=1; //FIXME move this into the context perhaps if(flags & SWS_PRINT_INFO && firstTime) { fprintf(stderr, \"SwScaler: Warning: dstStride is not aligned!\\n\" \"SwScaler: ->cannot do aligned memory acesses anymore\\n\"); firstTime=0; } } /* Note the user might start scaling the picture in the middle so this will not get executed this is not really intended but works currently, so ppl might do it */ if(srcSliceY ==0){ lumBufIndex=0; chrBufIndex=0; dstY=0; lastInLumBuf= -1; lastInChrBuf= -1; } for(;dstY < dstH; dstY++){ unsigned char *dest =dst[0]+dstStride[0]*dstY; unsigned char *uDest=dst[1]+dstStride[1]*(dstY>>1); unsigned char *vDest=dst[2]+dstStride[2]*(dstY>>1); const int chrDstY= isHalfChrV(dstFormat) ? (dstY>>1) : dstY; const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input //handle holes (FAST_BILINEAR & weird filters) if(firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1; if(firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1; //printf(\"%d %d %d\\n\", firstChrSrcY, lastInChrBuf, vChrBufSize); ASSERT(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1) ASSERT(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1) // Do we have enough lines in this slice to output the dstY line if(lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < ((srcSliceY + srcSliceH + 1)>>1)) { //Do horizontal scaling while(lastInLumBuf < lastLumSrcY) { uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0]; lumBufIndex++; // printf(\"%d %d %d %d\\n\", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY); ASSERT(lumBufIndex < 2*vLumBufSize) ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH) ASSERT(lastInLumBuf + 1 - srcSliceY >= 0) // printf(\"%d %d\\n\", lumBufIndex, vLumBufSize); RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc, flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize, funnyYCode, c->srcFormat, formatConvBuffer); lastInLumBuf++; } while(lastInChrBuf < lastChrSrcY) { uint8_t *src1= src[1]+(lastInChrBuf + 1 - (srcSliceY>>1))*srcStride[1]; uint8_t *src2= src[2]+(lastInChrBuf + 1 - (srcSliceY>>1))*srcStride[2]; chrBufIndex++; ASSERT(chrBufIndex < 2*vChrBufSize) ASSERT(lastInChrBuf + 1 - (srcSliceY>>1) < ((srcSliceH+1)>>1)) ASSERT(lastInChrBuf + 1 - (srcSliceY>>1) >= 0) //FIXME replace parameters through context struct (some at least) RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, (srcW+1)>>1, chrXInc, flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize, funnyUVCode, c->srcFormat, formatConvBuffer); lastInChrBuf++; } //wrap buf index around to stay inside the ring buffer if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize; if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize; } else // not enough lines left in this slice -> load the rest in the buffer { /* printf(\"%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\\n\", firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY, lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize, vChrBufSize, vLumBufSize); */ //Do horizontal scaling while(lastInLumBuf+1 < srcSliceY + srcSliceH) { uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0]; lumBufIndex++; ASSERT(lumBufIndex < 2*vLumBufSize) ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH) ASSERT(lastInLumBuf + 1 - srcSliceY >= 0) RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc, flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize, funnyYCode, c->srcFormat, formatConvBuffer); lastInLumBuf++; } while(lastInChrBuf+1 < ((srcSliceY + srcSliceH)>>1)) { uint8_t *src1= src[1]+(lastInChrBuf + 1 - (srcSliceY>>1))*srcStride[1]; uint8_t *src2= src[2]+(lastInChrBuf + 1 - (srcSliceY>>1))*srcStride[2]; chrBufIndex++; ASSERT(chrBufIndex < 2*vChrBufSize) ASSERT(lastInChrBuf + 1 - (srcSliceY>>1) < ((srcSliceH+1)>>1)) ASSERT(lastInChrBuf + 1 - (srcSliceY>>1) >= 0) RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, (srcW+1)>>1, chrXInc, flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize, funnyUVCode, c->srcFormat, formatConvBuffer); lastInChrBuf++; } //wrap buf index around to stay inside the ring buffer if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize; if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize; break; //we cant output a dstY line so lets try with the next slice } #ifdef HAVE_MMX b5Dither= dither8[dstY&1]; g6Dither= dither4[dstY&1]; g5Dither= dither8[dstY&1]; r5Dither= dither8[(dstY+1)&1]; #endif if(dstY < dstH-2) { if(isPlanarYUV(dstFormat)) //YV12 like { if(dstY&1) uDest=vDest= NULL; //FIXME split functions in lumi / chromi if(vLumFilterSize == 1 && vChrFilterSize == 1) // Unscaled YV12 { int16_t *lumBuf = lumPixBuf[0]; int16_t *chrBuf= chrPixBuf[0]; RENAME(yuv2yuv1)(lumBuf, chrBuf, dest, uDest, vDest, dstW); } else //General YV12 { int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize; int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize; RENAME(yuv2yuvX)( vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, vChrFilter+(dstY>>1)*vChrFilterSize, chrSrcPtr, vChrFilterSize, dest, uDest, vDest, dstW, lumMmxFilter+dstY*vLumFilterSize*4, chrMmxFilter+(dstY>>1)*vChrFilterSize*4); } } else { int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize; int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize; ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2); ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2); if(vLumFilterSize == 1 && vChrFilterSize == 2) //Unscaled RGB { int chrAlpha= vChrFilter[2*dstY+1]; RENAME(yuv2rgb1)(*lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1), dest, dstW, chrAlpha, dstFormat, flags); } else if(vLumFilterSize == 2 && vChrFilterSize == 2) //BiLinear Upscale RGB { int lumAlpha= vLumFilter[2*dstY+1]; int chrAlpha= vChrFilter[2*dstY+1]; RENAME(yuv2rgb2)(*lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1), dest, dstW, lumAlpha, chrAlpha, dstFormat, flags); } else //General RGB { RENAME(yuv2rgbX)( vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, dest, dstW, dstFormat, lumMmxFilter+dstY*vLumFilterSize*4, chrMmxFilter+dstY*vChrFilterSize*4); } } } else // hmm looks like we cant use MMX here without overwriting this arrays tail { int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize; int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize; if(isPlanarYUV(dstFormat)) //YV12 { if(dstY&1) uDest=vDest= NULL; //FIXME split functions in lumi / chromi yuv2yuvXinC( vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, vChrFilter+(dstY>>1)*vChrFilterSize, chrSrcPtr, vChrFilterSize, dest, uDest, vDest, dstW); } else { ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2); ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2); yuv2rgbXinC( vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, dest, dstW, dstFormat); } } } #ifdef HAVE_MMX __asm __volatile(SFENCE:::\"memory\"); __asm __volatile(EMMS:::\"memory\"); #endif /* store changed local vars back in the context */ c->dstY= dstY; c->lumBufIndex= lumBufIndex; c->chrBufIndex= chrBufIndex; c->lastInLumBuf= lastInLumBuf; c->lastInChrBuf= lastInChrBuf; }"} {"target": 1, "idx": 19866, "func": "static BlockBackend *blockdev_init(const char *file, QDict *bs_opts, Error **errp) { const char *buf; int bdrv_flags = 0; int on_read_error, on_write_error; bool account_invalid, account_failed; BlockBackend *blk; BlockDriverState *bs; ThrottleConfig cfg; int snapshot = 0; Error *error = NULL; QemuOpts *opts; const char *id; bool has_driver_specific_opts; BlockdevDetectZeroesOptions detect_zeroes = BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF; const char *throttling_group = NULL; /* Check common options by copying from bs_opts to opts, all other options * stay in bs_opts for processing by bdrv_open(). */ id = qdict_get_try_str(bs_opts, \"id\"); opts = qemu_opts_create(&qemu_common_drive_opts, id, 1, &error); if (error) { error_propagate(errp, error); goto err_no_opts; } qemu_opts_absorb_qdict(opts, bs_opts, &error); if (error) { error_propagate(errp, error); goto early_err; } if (id) { qdict_del(bs_opts, \"id\"); } has_driver_specific_opts = !!qdict_size(bs_opts); /* extract parameters */ snapshot = qemu_opt_get_bool(opts, \"snapshot\", 0); extract_common_blockdev_options(opts, &bdrv_flags, &throttling_group, &cfg, &detect_zeroes, &error); if (error) { error_propagate(errp, error); goto early_err; } if ((buf = qemu_opt_get(opts, \"format\")) != NULL) { if (is_help_option(buf)) { error_printf(\"Supported formats:\"); bdrv_iterate_format(bdrv_format_print, NULL); error_printf(\"\\n\"); goto early_err; } if (qdict_haskey(bs_opts, \"driver\")) { error_setg(errp, \"Cannot specify both 'driver' and 'format'\"); goto early_err; } qdict_put(bs_opts, \"driver\", qstring_from_str(buf)); } on_write_error = BLOCKDEV_ON_ERROR_ENOSPC; if ((buf = qemu_opt_get(opts, \"werror\")) != NULL) { on_write_error = parse_block_error_action(buf, 0, &error); if (error) { error_propagate(errp, error); goto early_err; } } on_read_error = BLOCKDEV_ON_ERROR_REPORT; if ((buf = qemu_opt_get(opts, \"rerror\")) != NULL) { on_read_error = parse_block_error_action(buf, 1, &error); if (error) { error_propagate(errp, error); goto early_err; } } if (snapshot) { /* always use cache=unsafe with snapshot */ bdrv_flags &= ~BDRV_O_CACHE_MASK; bdrv_flags |= (BDRV_O_SNAPSHOT|BDRV_O_CACHE_WB|BDRV_O_NO_FLUSH); } /* init */ if ((!file || !*file) && !has_driver_specific_opts) { BlockBackendRootState *blk_rs; blk = blk_new(qemu_opts_id(opts), errp); if (!blk) { goto early_err; } blk_rs = blk_get_root_state(blk); blk_rs->open_flags = bdrv_flags; blk_rs->read_only = !(bdrv_flags & BDRV_O_RDWR); blk_rs->detect_zeroes = detect_zeroes; if (throttle_enabled(&cfg)) { if (!throttling_group) { throttling_group = blk_name(blk); } blk_rs->throttle_group = g_strdup(throttling_group); blk_rs->throttle_state = throttle_group_incref(throttling_group); blk_rs->throttle_state->cfg = cfg; } QDECREF(bs_opts); } else { if (file && !*file) { file = NULL; } blk = blk_new_open(qemu_opts_id(opts), file, NULL, bs_opts, bdrv_flags, errp); if (!blk) { goto err_no_bs_opts; } bs = blk_bs(blk); bs->detect_zeroes = detect_zeroes; /* disk I/O throttling */ if (throttle_enabled(&cfg)) { if (!throttling_group) { throttling_group = blk_name(blk); } bdrv_io_limits_enable(bs, throttling_group); bdrv_set_io_limits(bs, &cfg); } if (bdrv_key_required(bs)) { autostart = 0; } block_acct_init(blk_get_stats(blk), account_invalid, account_failed); } blk_set_on_error(blk, on_read_error, on_write_error); err_no_bs_opts: qemu_opts_del(opts); return blk; early_err: qemu_opts_del(opts); err_no_opts: QDECREF(bs_opts); return NULL; }"} {"target": 1, "idx": 19875, "func": "static void escc_mem_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { ESCCState *serial = opaque; ChannelState *s; uint32_t saddr; int newreg, channel; val &= 0xff; saddr = (addr >> serial->it_shift) & 1; channel = (addr >> (serial->it_shift + 1)) & 1; s = &serial->chn[channel]; switch (saddr) { case SERIAL_CTRL: trace_escc_mem_writeb_ctrl(CHN_C(s), s->reg, val & 0xff); newreg = 0; switch (s->reg) { case W_CMD: newreg = val & CMD_PTR_MASK; val &= CMD_CMD_MASK; switch (val) { case CMD_HI: newreg |= CMD_HI; break; case CMD_CLR_TXINT: clr_txint(s); break; case CMD_CLR_IUS: if (s->rxint_under_svc) { s->rxint_under_svc = 0; if (s->txint) { set_txint(s); } } else if (s->txint_under_svc) { s->txint_under_svc = 0; } escc_update_irq(s); break; default: break; } break; case W_INTR ... W_RXCTRL: case W_SYNC1 ... W_TXBUF: case W_MISC1 ... W_CLOCK: case W_MISC2 ... W_EXTINT: s->wregs[s->reg] = val; break; case W_TXCTRL1: case W_TXCTRL2: s->wregs[s->reg] = val; escc_update_parameters(s); break; case W_BRGLO: case W_BRGHI: s->wregs[s->reg] = val; s->rregs[s->reg] = val; escc_update_parameters(s); break; case W_MINTR: switch (val & MINTR_RST_MASK) { case 0: default: break; case MINTR_RST_B: escc_reset_chn(&serial->chn[0]); return; case MINTR_RST_A: escc_reset_chn(&serial->chn[1]); return; case MINTR_RST_ALL: escc_reset(DEVICE(serial)); return; } break; default: break; } if (s->reg == 0) s->reg = newreg; else s->reg = 0; break; case SERIAL_DATA: trace_escc_mem_writeb_data(CHN_C(s), val); s->tx = val; if (s->wregs[W_TXCTRL2] & TXCTRL2_TXEN) { // tx enabled if (s->chr) qemu_chr_fe_write(s->chr, &s->tx, 1); else if (s->type == kbd && !s->disabled) { handle_kbd_command(s, val); } } s->rregs[R_STATUS] |= STATUS_TXEMPTY; // Tx buffer empty s->rregs[R_SPEC] |= SPEC_ALLSENT; // All sent set_txint(s); break; default: break; } }"} {"target": 1, "idx": 19893, "func": "static int vmdaudio_decode_init(AVCodecContext *avctx) { VmdAudioContext *s = (VmdAudioContext *)avctx->priv_data; int i; s->channels = avctx->channels; s->bits = avctx->bits_per_sample; s->block_align = avctx->block_align; printf (\" %d channels, %d bits/sample, block align = %d\\n\", s->channels, s->bits, s->block_align); /* set up the steps8 and steps16 tables */ for (i = 0; i < 8; i++) { if (i < 4) s->steps8[i] = i; else s->steps8[i] = s->steps8[i - 1] + i - 1; if (i == 0) s->steps16[i] = 0; else if (i == 1) s->steps16[i] = 4; else if (i == 2) s->steps16[i] = 16; else s->steps16[i] = 1 << (i + 4); } /* set up the step128 table */ s->steps128[0] = 0; s->steps128[1] = 8; for (i = 0x02; i <= 0x20; i++) s->steps128[i] = (i - 1) << 4; for (i = 0x21; i <= 0x60; i++) s->steps128[i] = (i + 0x1F) << 3; for (i = 0x61; i <= 0x70; i++) s->steps128[i] = (i - 0x51) << 6; for (i = 0x71; i <= 0x78; i++) s->steps128[i] = (i - 0x69) << 8; for (i = 0x79; i <= 0x7D; i++) s->steps128[i] = (i - 0x75) << 10; s->steps128[0x7E] = 0x3000; s->steps128[0x7F] = 0x4000; /* set up the negative half of each table */ for (i = 0; i < 8; i++) { s->steps8[i + 8] = -s->steps8[i]; s->steps16[i + 8] = -s->steps16[i]; } for (i = 0; i < 128; i++) s->steps128[i + 128] = -s->steps128[i]; return 0; }"} {"target": 1, "idx": 19907, "func": "static int adb_kbd_request(ADBDevice *d, uint8_t *obuf, const uint8_t *buf, int len) { KBDState *s = ADB_KEYBOARD(d); int cmd, reg, olen; if ((buf[0] & 0x0f) == ADB_FLUSH) { /* flush keyboard fifo */ s->wptr = s->rptr = s->count = 0; return 0; } cmd = buf[0] & 0xc; reg = buf[0] & 0x3; olen = 0; switch(cmd) { case ADB_WRITEREG: switch(reg) { case 2: /* LED status */ break; case 3: switch(buf[2]) { case ADB_CMD_SELF_TEST: break; case ADB_CMD_CHANGE_ID: case ADB_CMD_CHANGE_ID_AND_ACT: case ADB_CMD_CHANGE_ID_AND_ENABLE: d->devaddr = buf[1] & 0xf; break; default: d->devaddr = buf[1] & 0xf; /* we support handlers: * 1: Apple Standard Keyboard * 2: Apple Extended Keyboard (LShift = RShift) * 3: Apple Extended Keyboard (LShift != RShift) */ if (buf[2] == 1 || buf[2] == 2 || buf[2] == 3) { d->handler = buf[2]; } break; } } break; case ADB_READREG: switch(reg) { case 0: olen = adb_kbd_poll(d, obuf); break; case 1: break; case 2: obuf[0] = 0x00; /* XXX: check this */ obuf[1] = 0x07; /* led status */ olen = 2; break; case 3: obuf[0] = d->handler; obuf[1] = d->devaddr; olen = 2; break; } break; } return olen; }"} {"target": 1, "idx": 19910, "func": "static int decode_slice(AVCodecContext *c, void *arg) { FFV1Context *fs = *(void **)arg; FFV1Context *f = fs->avctx->priv_data; int width, height, x, y, ret; const int ps = av_pix_fmt_desc_get(c->pix_fmt)->comp[0].step; AVFrame * const p = f->cur; int i, si; for( si=0; fs != f->slice_context[si]; si ++) ; if(f->fsrc && !p->key_frame) ff_thread_await_progress(&f->last_picture, si, 0); if(f->fsrc && !p->key_frame) { FFV1Context *fssrc = f->fsrc->slice_context[si]; FFV1Context *fsdst = f->slice_context[si]; av_assert1(fsdst->plane_count == fssrc->plane_count); av_assert1(fsdst == fs); if (!p->key_frame) fsdst->slice_damaged |= fssrc->slice_damaged; for (i = 0; i < f->plane_count; i++) { PlaneContext *psrc = &fssrc->plane[i]; PlaneContext *pdst = &fsdst->plane[i]; av_free(pdst->state); av_free(pdst->vlc_state); memcpy(pdst, psrc, sizeof(*pdst)); pdst->state = NULL; pdst->vlc_state = NULL; if (fssrc->ac) { pdst->state = av_malloc_array(CONTEXT_SIZE, psrc->context_count); memcpy(pdst->state, psrc->state, CONTEXT_SIZE * psrc->context_count); } else { pdst->vlc_state = av_malloc_array(sizeof(*pdst->vlc_state), psrc->context_count); memcpy(pdst->vlc_state, psrc->vlc_state, sizeof(*pdst->vlc_state) * psrc->context_count); } } } fs->slice_rct_by_coef = 1; fs->slice_rct_ry_coef = 1; if (f->version > 2) { if (ff_ffv1_init_slice_state(f, fs) < 0) return AVERROR(ENOMEM); if (decode_slice_header(f, fs) < 0) { fs->slice_damaged = 1; return AVERROR_INVALIDDATA; } } if ((ret = ff_ffv1_init_slice_state(f, fs)) < 0) return ret; if (f->cur->key_frame || fs->slice_reset_contexts) ff_ffv1_clear_slice_state(f, fs); width = fs->slice_width; height = fs->slice_height; x = fs->slice_x; y = fs->slice_y; if (!fs->ac) { if (f->version == 3 && f->micro_version > 1 || f->version > 3) get_rac(&fs->c, (uint8_t[]) { 129 }); fs->ac_byte_count = f->version > 2 || (!x && !y) ? fs->c.bytestream - fs->c.bytestream_start - 1 : 0; init_get_bits(&fs->gb, fs->c.bytestream_start + fs->ac_byte_count, (fs->c.bytestream_end - fs->c.bytestream_start - fs->ac_byte_count) * 8); } av_assert1(width && height); if (f->colorspace == 0) { const int chroma_width = FF_CEIL_RSHIFT(width, f->chroma_h_shift); const int chroma_height = FF_CEIL_RSHIFT(height, f->chroma_v_shift); const int cx = x >> f->chroma_h_shift; const int cy = y >> f->chroma_v_shift; decode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0); if (f->chroma_planes) { decode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1); decode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1); } if (fs->transparency) decode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], (f->version >= 4 && !f->chroma_planes) ? 1 : 2); } else { uint8_t *planes[3] = { p->data[0] + ps * x + y * p->linesize[0], p->data[1] + ps * x + y * p->linesize[1], p->data[2] + ps * x + y * p->linesize[2] }; decode_rgb_frame(fs, planes, width, height, p->linesize); } if (fs->ac && f->version > 2) { int v; get_rac(&fs->c, (uint8_t[]) { 129 }); v = fs->c.bytestream_end - fs->c.bytestream - 2 - 5*f->ec; if (v) { av_log(f->avctx, AV_LOG_ERROR, \"bytestream end mismatching by %d\\n\", v); fs->slice_damaged = 1; } } emms_c(); ff_thread_report_progress(&f->picture, si, 0); return 0; }"} {"target": 0, "idx": 19919, "func": "static av_cold int bmv_aud_decode_init(AVCodecContext *avctx) { BMVAudioDecContext *c = avctx->priv_data; if (avctx->channels != 2) { av_log(avctx, AV_LOG_INFO, \"invalid number of channels\\n\"); return AVERROR(EINVAL); } avctx->sample_fmt = AV_SAMPLE_FMT_S16; avcodec_get_frame_defaults(&c->frame); avctx->coded_frame = &c->frame; return 0; }"} {"target": 0, "idx": 19932, "func": "static int cdrom_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVRawState *s = bs->opaque; int ret; s->type = FTYPE_CD; /* open will not fail even if no CD is inserted, so add O_NONBLOCK */ ret = raw_open_common(bs, options, flags, O_NONBLOCK, errp); return ret; }"} {"target": 0, "idx": 19933, "func": "void imx_serial_create(int uart, const target_phys_addr_t addr, qemu_irq irq) { DeviceState *dev; SysBusDevice *bus; CharDriverState *chr; const char chr_name[] = \"serial\"; char label[ARRAY_SIZE(chr_name) + 1]; dev = qdev_create(NULL, \"imx-serial\"); if (uart >= MAX_SERIAL_PORTS) { hw_error(\"Cannot assign uart %d: QEMU supports only %d ports\\n\", uart, MAX_SERIAL_PORTS); } chr = serial_hds[uart]; if (!chr) { snprintf(label, ARRAY_SIZE(label), \"%s%d\", chr_name, uart); chr = qemu_chr_new(label, \"null\", NULL); if (!(chr)) { hw_error(\"Can't assign serial port to imx-uart%d.\\n\", uart); } } qdev_prop_set_chr(dev, \"chardev\", chr); bus = sysbus_from_qdev(dev); qdev_init_nofail(dev); if (addr != (target_phys_addr_t)-1) { sysbus_mmio_map(bus, 0, addr); } sysbus_connect_irq(bus, 0, irq); }"} {"target": 0, "idx": 19935, "func": "static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg) { unsigned int hash = kvm_hash_msi(msg.data); KVMMSIRoute *route; QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) { if (route->kroute.u.msi.address_lo == (uint32_t)msg.address && route->kroute.u.msi.address_hi == (msg.address >> 32) && route->kroute.u.msi.data == msg.data) { return route; } } return NULL; }"} {"target": 0, "idx": 19949, "func": "static int pxb_bus_num(PCIBus *bus) { PXBDev *pxb = PXB_DEV(bus->parent_dev); return pxb->bus_nr; }"} {"target": 1, "idx": 19963, "func": "static void dec_user(DisasContext *dc) { LOG_DIS(\"user\"); cpu_abort(dc->env, \"user insn undefined\\n\"); }"} {"target": 1, "idx": 19970, "func": "target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb) { target_ulong rt; if (ppc_load_slb_esid(env, rb, &rt) < 0) { helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL); } return rt; }"} {"target": 1, "idx": 19991, "func": "void hmp_pci_del(Monitor *mon, const QDict *qdict) { pci_device_hot_remove(mon, qdict_get_str(qdict, \"pci_addr\")); }"} {"target": 1, "idx": 19993, "func": "QEMUPutMouseEntry *qemu_add_mouse_event_handler(QEMUPutMouseEvent *func, void *opaque, int absolute, const char *name) { QEMUPutMouseEntry *s; s = g_malloc0(sizeof(QEMUPutMouseEntry)); s->qemu_put_mouse_event = func; s->qemu_put_mouse_event_opaque = opaque; s->qemu_put_mouse_event_absolute = absolute; s->h.name = name; s->h.mask = INPUT_EVENT_MASK_BTN | (absolute ? INPUT_EVENT_MASK_ABS : INPUT_EVENT_MASK_REL); s->h.event = legacy_mouse_event; s->h.sync = legacy_mouse_sync; s->s = qemu_input_handler_register((DeviceState *)s, &s->h); return s; }"} {"target": 1, "idx": 19996, "func": "static int build_filter(ResampleContext *c, void *filter, double factor, int tap_count, int alloc, int phase_count, int scale, int filter_type, double kaiser_beta){ int ph, i; double x, y, w, t, s; double *tab = av_malloc_array(tap_count+1, sizeof(*tab)); double *sin_lut = av_malloc_array(phase_count / 2 + 1, sizeof(*sin_lut)); const int center= (tap_count-1)/2; if (!tab || !sin_lut) goto fail; /* if upsampling, only need to interpolate, no filter */ if (factor > 1.0) factor = 1.0; av_assert0(phase_count == 1 || phase_count % 2 == 0); if (factor == 1.0) { for (ph = 0; ph <= phase_count / 2; ph++) sin_lut[ph] = sin(M_PI * ph / phase_count); } for(ph = 0; ph <= phase_count / 2; ph++) { double norm = 0; s = sin_lut[ph]; for(i=0;i<=tap_count;i++) { x = M_PI * ((double)(i - center) - (double)ph / phase_count) * factor; if (x == 0) y = 1.0; else if (factor == 1.0) y = s / x; else y = sin(x) / x; switch(filter_type){ case SWR_FILTER_TYPE_CUBIC:{ const float d= -0.5; //first order derivative = -0.5 x = fabs(((double)(i - center) - (double)ph / phase_count) * factor); if(x<1.0) y= 1 - 3*x*x + 2*x*x*x + d*( -x*x + x*x*x); else y= d*(-4 + 8*x - 5*x*x + x*x*x); break;} case SWR_FILTER_TYPE_BLACKMAN_NUTTALL: w = 2.0*x / (factor*tap_count); t = -cos(w); y *= 0.3635819 - 0.4891775 * t + 0.1365995 * (2*t*t-1) - 0.0106411 * (4*t*t*t - 3*t); break; case SWR_FILTER_TYPE_KAISER: w = 2.0*x / (factor*tap_count*M_PI); y *= bessel(kaiser_beta*sqrt(FFMAX(1-w*w, 0))); break; default: av_assert0(0); } tab[i] = y; s = -s; if (i < tap_count) norm += y; } /* normalize so that an uniform color remains the same */ switch(c->format){ case AV_SAMPLE_FMT_S16P: for(i=0;icpu_model, icc_bridge); pc_acpi_init(\"q35-acpi-dsdt.aml\"); kvmclock_create(); if (args->ram_size >= 0xb0000000) { above_4g_mem_size = args->ram_size - 0xb0000000; below_4g_mem_size = 0xb0000000; } else { above_4g_mem_size = 0; below_4g_mem_size = args->ram_size; } /* pci enabled */ if (pci_enabled) { pci_memory = g_new(MemoryRegion, 1); memory_region_init(pci_memory, NULL, \"pci\", INT64_MAX); rom_memory = pci_memory; } else { pci_memory = NULL; rom_memory = get_system_memory(); } guest_info = pc_guest_info_init(below_4g_mem_size, above_4g_mem_size); guest_info->has_pci_info = has_pci_info; guest_info->isapc_ram_fw = false; guest_info->has_acpi_build = has_acpi_build; /* allocate ram and load rom/bios */ if (!xen_enabled()) { pc_memory_init(get_system_memory(), args->kernel_filename, args->kernel_cmdline, args->initrd_filename, below_4g_mem_size, above_4g_mem_size, rom_memory, &ram_memory, guest_info); } /* irq lines */ gsi_state = g_malloc0(sizeof(*gsi_state)); if (kvm_irqchip_in_kernel()) { kvm_pc_setup_irq_routing(pci_enabled); gsi = qemu_allocate_irqs(kvm_pc_gsi_handler, gsi_state, GSI_NUM_PINS); } else { gsi = qemu_allocate_irqs(gsi_handler, gsi_state, GSI_NUM_PINS); } /* create pci host bus */ q35_host = Q35_HOST_DEVICE(qdev_create(NULL, TYPE_Q35_HOST_DEVICE)); object_property_add_child(qdev_get_machine(), \"q35\", OBJECT(q35_host), NULL); q35_host->mch.ram_memory = ram_memory; q35_host->mch.pci_address_space = pci_memory; q35_host->mch.system_memory = get_system_memory(); q35_host->mch.address_space_io = get_system_io(); q35_host->mch.below_4g_mem_size = below_4g_mem_size; q35_host->mch.above_4g_mem_size = above_4g_mem_size; q35_host->mch.guest_info = guest_info; /* pci */ qdev_init_nofail(DEVICE(q35_host)); phb = PCI_HOST_BRIDGE(q35_host); host_bus = phb->bus; /* create ISA bus */ lpc = pci_create_simple_multifunction(host_bus, PCI_DEVFN(ICH9_LPC_DEV, ICH9_LPC_FUNC), true, TYPE_ICH9_LPC_DEVICE); ich9_lpc = ICH9_LPC_DEVICE(lpc); ich9_lpc->pic = gsi; ich9_lpc->ioapic = gsi_state->ioapic_irq; pci_bus_irqs(host_bus, ich9_lpc_set_irq, ich9_lpc_map_irq, ich9_lpc, ICH9_LPC_NB_PIRQS); pci_bus_set_route_irq_fn(host_bus, ich9_route_intx_pin_to_irq); isa_bus = ich9_lpc->isa_bus; /*end early*/ isa_bus_irqs(isa_bus, gsi); if (kvm_irqchip_in_kernel()) { i8259 = kvm_i8259_init(isa_bus); } else if (xen_enabled()) { i8259 = xen_interrupt_controller_init(); } else { cpu_irq = pc_allocate_cpu_irq(); i8259 = i8259_init(isa_bus, cpu_irq[0]); } for (i = 0; i < ISA_NUM_IRQS; i++) { gsi_state->i8259_irq[i] = i8259[i]; } if (pci_enabled) { ioapic_init_gsi(gsi_state, NULL); } qdev_init_nofail(icc_bridge); pc_register_ferr_irq(gsi[13]); /* init basic PC hardware */ pc_basic_device_init(isa_bus, gsi, &rtc_state, &floppy, false); /* connect pm stuff to lpc */ ich9_lpc_pm_init(lpc); /* ahci and SATA device, for q35 1 ahci controller is built-in */ ahci = pci_create_simple_multifunction(host_bus, PCI_DEVFN(ICH9_SATA1_DEV, ICH9_SATA1_FUNC), true, \"ich9-ahci\"); idebus[0] = qdev_get_child_bus(&ahci->qdev, \"ide.0\"); idebus[1] = qdev_get_child_bus(&ahci->qdev, \"ide.1\"); if (usb_enabled(false)) { /* Should we create 6 UHCI according to ich9 spec? */ ehci_create_ich9_with_companions(host_bus, 0x1d); } /* TODO: Populate SPD eeprom data. */ smbus_eeprom_init(ich9_smb_init(host_bus, PCI_DEVFN(ICH9_SMB_DEV, ICH9_SMB_FUNC), 0xb100), 8, NULL, 0); pc_cmos_init(below_4g_mem_size, above_4g_mem_size, args->boot_order, floppy, idebus[0], idebus[1], rtc_state); /* the rest devices to which pci devfn is automatically assigned */ pc_vga_init(isa_bus, host_bus); pc_nic_init(isa_bus, host_bus); if (pci_enabled) { pc_pci_device_init(host_bus); } if (has_pvpanic) { pvpanic_init(isa_bus); } }"} {"target": 1, "idx": 20034, "func": "static int mov_read_extradata(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { AVStream *st = c->fc->streams[c->fc->nb_streams-1]; uint64_t size= (uint64_t)st->codec->extradata_size + atom.size + 8 + FF_INPUT_BUFFER_PADDING_SIZE; uint8_t *buf; if(size > INT_MAX || (uint64_t)atom.size > INT_MAX) return -1; buf= av_realloc(st->codec->extradata, size); if(!buf) return -1; st->codec->extradata= buf; buf+= st->codec->extradata_size; st->codec->extradata_size= size - FF_INPUT_BUFFER_PADDING_SIZE; AV_WB32( buf , atom.size + 8); AV_WL32( buf + 4, atom.type); get_buffer(pb, buf + 8, atom.size); return 0; }"} {"target": 1, "idx": 20036, "func": "int tap_open(char *ifname, int ifname_size, int *vnet_hdr, int vnet_hdr_required, int mq_required, Error **errp) { /* FIXME error_setg(errp, ...) on failure */ char dev[10]=\"\"; int fd; if( (fd = tap_alloc(dev, sizeof(dev))) < 0 ){ fprintf(stderr, \"Cannot allocate TAP device\\n\"); return -1; } pstrcpy(ifname, ifname_size, dev); if (*vnet_hdr) { /* Solaris doesn't have IFF_VNET_HDR */ *vnet_hdr = 0; if (vnet_hdr_required && !*vnet_hdr) { error_report(\"vnet_hdr=1 requested, but no kernel \" \"support for IFF_VNET_HDR available\"); close(fd); return -1; } } fcntl(fd, F_SETFL, O_NONBLOCK); return fd; }"} {"target": 1, "idx": 20040, "func": "static void av_estimate_timings(AVFormatContext *ic) { int64_t file_size; /* get the file size, if possible */ if (ic->iformat->flags & AVFMT_NOFILE) { file_size = 0; } else { file_size = url_fsize(&ic->pb); if (file_size < 0) file_size = 0; } ic->file_size = file_size; if ((!strcmp(ic->iformat->name, \"mpeg\") || !strcmp(ic->iformat->name, \"mpegts\")) && file_size && !ic->pb.is_streamed) { /* get accurate estimate from the PTSes */ av_estimate_timings_from_pts(ic); } else if (av_has_timings(ic)) { /* at least one components has timings - we use them for all the components */ fill_all_stream_timings(ic); } else { /* less precise: use bit rate info */ av_estimate_timings_from_bit_rate(ic); } av_update_stream_timings(ic); #if 0 { int i; AVStream *st; for(i = 0;i < ic->nb_streams; i++) { st = ic->streams[i]; printf(\"%d: start_time: %0.3f duration: %0.3f\\n\", i, (double)st->start_time / AV_TIME_BASE, (double)st->duration / AV_TIME_BASE); } printf(\"stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\\n\", (double)ic->start_time / AV_TIME_BASE, (double)ic->duration / AV_TIME_BASE, ic->bit_rate / 1000); } #endif }"} {"target": 0, "idx": 20055, "func": "static av_cold int ac3_encode_init(AVCodecContext *avctx) { int freq = avctx->sample_rate; int bitrate = avctx->bit_rate; AC3EncodeContext *s = avctx->priv_data; int i, j, ch; int bw_code; avctx->frame_size = AC3_FRAME_SIZE; ac3_common_init(); if (!avctx->channel_layout) { av_log(avctx, AV_LOG_WARNING, \"No channel layout specified. The \" \"encoder will guess the layout, but it \" \"might be incorrect.\\n\"); } if (set_channel_info(s, avctx->channels, &avctx->channel_layout)) { av_log(avctx, AV_LOG_ERROR, \"invalid channel layout\\n\"); return -1; } /* frequency */ for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) if ((ff_ac3_sample_rate_tab[j] >> i) == freq) goto found; } return -1; found: s->sample_rate = freq; s->bit_alloc.sr_shift = i; s->bit_alloc.sr_code = j; s->bitstream_id = 8 + s->bit_alloc.sr_shift; s->bitstream_mode = 0; /* complete main audio service */ /* bitrate & frame size */ for (i = 0; i < 19; i++) { if ((ff_ac3_bitrate_tab[i] >> s->bit_alloc.sr_shift)*1000 == bitrate) break; } if (i == 19) return -1; s->bit_rate = bitrate; s->frame_size_code = i << 1; s->frame_size_min = 2 * ff_ac3_frame_size_tab[s->frame_size_code][s->bit_alloc.sr_code]; s->bits_written = 0; s->samples_written = 0; s->frame_size = s->frame_size_min; /* set bandwidth */ if (avctx->cutoff) { /* calculate bandwidth based on user-specified cutoff frequency */ int cutoff = av_clip(avctx->cutoff, 1, s->sample_rate >> 1); int fbw_coeffs = cutoff * 2 * AC3_MAX_COEFS / s->sample_rate; bw_code = av_clip((fbw_coeffs - 73) / 3, 0, 60); } else { /* use default bandwidth setting */ /* XXX: should compute the bandwidth according to the frame size, so that we avoid annoying high frequency artifacts */ bw_code = 50; } for (ch = 0; ch < s->fbw_channels; ch++) { /* bandwidth for each channel */ s->bandwidth_code[ch] = bw_code; s->nb_coefs[ch] = bw_code * 3 + 73; } if (s->lfe_on) s->nb_coefs[s->lfe_channel] = 7; /* LFE channel always has 7 coefs */ /* initial snr offset */ s->coarse_snr_offset = 40; mdct_init(9); avctx->coded_frame= avcodec_alloc_frame(); avctx->coded_frame->key_frame= 1; return 0; }"} {"target": 0, "idx": 20056, "func": "static void gen_connect(URLContext *s, RTMPContext *rt) { RTMPPacket pkt; uint8_t ver[64], *p; ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0, 4096); p = pkt.data; ff_amf_write_string(&p, \"connect\"); ff_amf_write_number(&p, ++rt->nb_invokes); ff_amf_write_object_start(&p); ff_amf_write_field_name(&p, \"app\"); ff_amf_write_string(&p, rt->app); if (rt->is_input) { snprintf(ver, sizeof(ver), \"%s %d,%d,%d,%d\", RTMP_CLIENT_PLATFORM, RTMP_CLIENT_VER1, RTMP_CLIENT_VER2, RTMP_CLIENT_VER3, RTMP_CLIENT_VER4); } else { snprintf(ver, sizeof(ver), \"FMLE/3.0 (compatible; %s)\", LIBAVFORMAT_IDENT); ff_amf_write_field_name(&p, \"type\"); ff_amf_write_string(&p, \"nonprivate\"); } ff_amf_write_field_name(&p, \"flashVer\"); ff_amf_write_string(&p, ver); ff_amf_write_field_name(&p, \"tcUrl\"); ff_amf_write_string(&p, rt->tcurl); if (rt->is_input) { ff_amf_write_field_name(&p, \"fpad\"); ff_amf_write_bool(&p, 0); ff_amf_write_field_name(&p, \"capabilities\"); ff_amf_write_number(&p, 15.0); /* Tell the server we support all the audio codecs except * SUPPORT_SND_INTEL (0x0008) and SUPPORT_SND_UNUSED (0x0010) * which are unused in the RTMP protocol implementation. */ ff_amf_write_field_name(&p, \"audioCodecs\"); ff_amf_write_number(&p, 4071.0); ff_amf_write_field_name(&p, \"videoCodecs\"); ff_amf_write_number(&p, 252.0); ff_amf_write_field_name(&p, \"videoFunction\"); ff_amf_write_number(&p, 1.0); } ff_amf_write_object_end(&p); pkt.data_size = p - pkt.data; ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]); ff_rtmp_packet_destroy(&pkt); }"} {"target": 0, "idx": 20060, "func": "static void pc_dimm_plug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { HotplugHandlerClass *hhc; Error *local_err = NULL; PCMachineState *pcms = PC_MACHINE(hotplug_dev); PCDIMMDevice *dimm = PC_DIMM(dev); PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm); MemoryRegion *mr = ddc->get_memory_region(dimm); uint64_t align = TARGET_PAGE_SIZE; if (memory_region_get_alignment(mr) && pcms->enforce_aligned_dimm) { align = memory_region_get_alignment(mr); } if (!pcms->acpi_dev) { error_setg(&local_err, \"memory hotplug is not enabled: missing acpi device\"); goto out; } pc_dimm_memory_plug(dev, &pcms->hotplug_memory, mr, align, &local_err); if (local_err) { goto out; } hhc = HOTPLUG_HANDLER_GET_CLASS(pcms->acpi_dev); hhc->plug(HOTPLUG_HANDLER(pcms->acpi_dev), dev, &error_abort); out: error_propagate(errp, local_err); }"} {"target": 0, "idx": 20064, "func": "static void cmd_test_unit_ready(IDEState *s, uint8_t *buf) { if (bdrv_is_inserted(s->bs)) { ide_atapi_cmd_ok(s); } else { ide_atapi_cmd_error(s, SENSE_NOT_READY, ASC_MEDIUM_NOT_PRESENT); } }"} {"target": 0, "idx": 20067, "func": "static int wrapped_avframe_encode(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet) { AVFrame *wrapped = av_frame_clone(frame); if (!wrapped) return AVERROR(ENOMEM); pkt->buf = av_buffer_create((uint8_t *)wrapped, sizeof(*wrapped), wrapped_avframe_release_buffer, NULL, AV_BUFFER_FLAG_READONLY); if (!pkt->buf) { av_frame_free(&wrapped); return AVERROR(ENOMEM); } pkt->data = (uint8_t *)wrapped; pkt->size = sizeof(*wrapped); pkt->flags |= AV_PKT_FLAG_KEY; *got_packet = 1; return 0; }"} {"target": 0, "idx": 20068, "func": "static int kvm_s390_check_enable_cmma(KVMState *s) { struct kvm_device_attr attr = { .group = KVM_S390_VM_MEM_CTRL, .attr = KVM_S390_VM_MEM_ENABLE_CMMA, }; return kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attr); }"} {"target": 0, "idx": 20077, "func": "void tcg_optimize(TCGContext *s) { int oi, oi_next, nb_temps, nb_globals; /* Array VALS has an element for each temp. If this temp holds a constant then its value is kept in VALS' element. If this temp is a copy of other ones then the other copies are available through the doubly linked circular list. */ nb_temps = s->nb_temps; nb_globals = s->nb_globals; reset_all_temps(nb_temps); for (oi = s->gen_first_op_idx; oi >= 0; oi = oi_next) { tcg_target_ulong mask, partmask, affected; int nb_oargs, nb_iargs, i; TCGArg tmp; TCGOp * const op = &s->gen_op_buf[oi]; TCGArg * const args = &s->gen_opparam_buf[op->args]; TCGOpcode opc = op->opc; const TCGOpDef *def = &tcg_op_defs[opc]; oi_next = op->next; if (opc == INDEX_op_call) { nb_oargs = op->callo; nb_iargs = op->calli; } else { nb_oargs = def->nb_oargs; nb_iargs = def->nb_iargs; } /* Do copy propagation */ for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { if (temps[args[i]].state == TCG_TEMP_COPY) { args[i] = find_better_copy(s, args[i]); } } /* For commutative operations make constant second argument */ switch (opc) { CASE_OP_32_64(add): CASE_OP_32_64(mul): CASE_OP_32_64(and): CASE_OP_32_64(or): CASE_OP_32_64(xor): CASE_OP_32_64(eqv): CASE_OP_32_64(nand): CASE_OP_32_64(nor): CASE_OP_32_64(muluh): CASE_OP_32_64(mulsh): swap_commutative(args[0], &args[1], &args[2]); break; CASE_OP_32_64(brcond): if (swap_commutative(-1, &args[0], &args[1])) { args[2] = tcg_swap_cond(args[2]); } break; CASE_OP_32_64(setcond): if (swap_commutative(args[0], &args[1], &args[2])) { args[3] = tcg_swap_cond(args[3]); } break; CASE_OP_32_64(movcond): if (swap_commutative(-1, &args[1], &args[2])) { args[5] = tcg_swap_cond(args[5]); } /* For movcond, we canonicalize the \"false\" input reg to match the destination reg so that the tcg backend can implement a \"move if true\" operation. */ if (swap_commutative(args[0], &args[4], &args[3])) { args[5] = tcg_invert_cond(args[5]); } break; CASE_OP_32_64(add2): swap_commutative(args[0], &args[2], &args[4]); swap_commutative(args[1], &args[3], &args[5]); break; CASE_OP_32_64(mulu2): CASE_OP_32_64(muls2): swap_commutative(args[0], &args[2], &args[3]); break; case INDEX_op_brcond2_i32: if (swap_commutative2(&args[0], &args[2])) { args[4] = tcg_swap_cond(args[4]); } break; case INDEX_op_setcond2_i32: if (swap_commutative2(&args[1], &args[3])) { args[5] = tcg_swap_cond(args[5]); } break; default: break; } /* Simplify expressions for \"shift/rot r, 0, a => movi r, 0\", and \"sub r, 0, a => neg r, a\" case. */ switch (opc) { CASE_OP_32_64(shl): CASE_OP_32_64(shr): CASE_OP_32_64(sar): CASE_OP_32_64(rotl): CASE_OP_32_64(rotr): if (temps[args[1]].state == TCG_TEMP_CONST && temps[args[1]].val == 0) { tcg_opt_gen_movi(s, op, args, args[0], 0); continue; } break; CASE_OP_32_64(sub): { TCGOpcode neg_op; bool have_neg; if (temps[args[2]].state == TCG_TEMP_CONST) { /* Proceed with possible constant folding. */ break; } if (opc == INDEX_op_sub_i32) { neg_op = INDEX_op_neg_i32; have_neg = TCG_TARGET_HAS_neg_i32; } else { neg_op = INDEX_op_neg_i64; have_neg = TCG_TARGET_HAS_neg_i64; } if (!have_neg) { break; } if (temps[args[1]].state == TCG_TEMP_CONST && temps[args[1]].val == 0) { op->opc = neg_op; reset_temp(args[0]); args[1] = args[2]; continue; } } break; CASE_OP_32_64(xor): CASE_OP_32_64(nand): if (temps[args[1]].state != TCG_TEMP_CONST && temps[args[2]].state == TCG_TEMP_CONST && temps[args[2]].val == -1) { i = 1; goto try_not; } break; CASE_OP_32_64(nor): if (temps[args[1]].state != TCG_TEMP_CONST && temps[args[2]].state == TCG_TEMP_CONST && temps[args[2]].val == 0) { i = 1; goto try_not; } break; CASE_OP_32_64(andc): if (temps[args[2]].state != TCG_TEMP_CONST && temps[args[1]].state == TCG_TEMP_CONST && temps[args[1]].val == -1) { i = 2; goto try_not; } break; CASE_OP_32_64(orc): CASE_OP_32_64(eqv): if (temps[args[2]].state != TCG_TEMP_CONST && temps[args[1]].state == TCG_TEMP_CONST && temps[args[1]].val == 0) { i = 2; goto try_not; } break; try_not: { TCGOpcode not_op; bool have_not; if (def->flags & TCG_OPF_64BIT) { not_op = INDEX_op_not_i64; have_not = TCG_TARGET_HAS_not_i64; } else { not_op = INDEX_op_not_i32; have_not = TCG_TARGET_HAS_not_i32; } if (!have_not) { break; } op->opc = not_op; reset_temp(args[0]); args[1] = args[i]; continue; } default: break; } /* Simplify expression for \"op r, a, const => mov r, a\" cases */ switch (opc) { CASE_OP_32_64(add): CASE_OP_32_64(sub): CASE_OP_32_64(shl): CASE_OP_32_64(shr): CASE_OP_32_64(sar): CASE_OP_32_64(rotl): CASE_OP_32_64(rotr): CASE_OP_32_64(or): CASE_OP_32_64(xor): CASE_OP_32_64(andc): if (temps[args[1]].state != TCG_TEMP_CONST && temps[args[2]].state == TCG_TEMP_CONST && temps[args[2]].val == 0) { tcg_opt_gen_mov(s, op, args, args[0], args[1]); continue; } break; CASE_OP_32_64(and): CASE_OP_32_64(orc): CASE_OP_32_64(eqv): if (temps[args[1]].state != TCG_TEMP_CONST && temps[args[2]].state == TCG_TEMP_CONST && temps[args[2]].val == -1) { tcg_opt_gen_mov(s, op, args, args[0], args[1]); continue; } break; default: break; } /* Simplify using known-zero bits. Currently only ops with a single output argument is supported. */ mask = -1; affected = -1; switch (opc) { CASE_OP_32_64(ext8s): if ((temps[args[1]].mask & 0x80) != 0) { break; } CASE_OP_32_64(ext8u): mask = 0xff; goto and_const; CASE_OP_32_64(ext16s): if ((temps[args[1]].mask & 0x8000) != 0) { break; } CASE_OP_32_64(ext16u): mask = 0xffff; goto and_const; case INDEX_op_ext32s_i64: if ((temps[args[1]].mask & 0x80000000) != 0) { break; } case INDEX_op_ext32u_i64: mask = 0xffffffffU; goto and_const; CASE_OP_32_64(and): mask = temps[args[2]].mask; if (temps[args[2]].state == TCG_TEMP_CONST) { and_const: affected = temps[args[1]].mask & ~mask; } mask = temps[args[1]].mask & mask; break; CASE_OP_32_64(andc): /* Known-zeros does not imply known-ones. Therefore unless args[2] is constant, we can't infer anything from it. */ if (temps[args[2]].state == TCG_TEMP_CONST) { mask = ~temps[args[2]].mask; goto and_const; } /* But we certainly know nothing outside args[1] may be set. */ mask = temps[args[1]].mask; break; case INDEX_op_sar_i32: if (temps[args[2]].state == TCG_TEMP_CONST) { tmp = temps[args[2]].val & 31; mask = (int32_t)temps[args[1]].mask >> tmp; } break; case INDEX_op_sar_i64: if (temps[args[2]].state == TCG_TEMP_CONST) { tmp = temps[args[2]].val & 63; mask = (int64_t)temps[args[1]].mask >> tmp; } break; case INDEX_op_shr_i32: if (temps[args[2]].state == TCG_TEMP_CONST) { tmp = temps[args[2]].val & 31; mask = (uint32_t)temps[args[1]].mask >> tmp; } break; case INDEX_op_shr_i64: if (temps[args[2]].state == TCG_TEMP_CONST) { tmp = temps[args[2]].val & 63; mask = (uint64_t)temps[args[1]].mask >> tmp; } break; case INDEX_op_trunc_shr_i32: mask = (uint64_t)temps[args[1]].mask >> args[2]; break; CASE_OP_32_64(shl): if (temps[args[2]].state == TCG_TEMP_CONST) { tmp = temps[args[2]].val & (TCG_TARGET_REG_BITS - 1); mask = temps[args[1]].mask << tmp; } break; CASE_OP_32_64(neg): /* Set to 1 all bits to the left of the rightmost. */ mask = -(temps[args[1]].mask & -temps[args[1]].mask); break; CASE_OP_32_64(deposit): mask = deposit64(temps[args[1]].mask, args[3], args[4], temps[args[2]].mask); break; CASE_OP_32_64(or): CASE_OP_32_64(xor): mask = temps[args[1]].mask | temps[args[2]].mask; break; CASE_OP_32_64(setcond): case INDEX_op_setcond2_i32: mask = 1; break; CASE_OP_32_64(movcond): mask = temps[args[3]].mask | temps[args[4]].mask; break; CASE_OP_32_64(ld8u): mask = 0xff; break; CASE_OP_32_64(ld16u): mask = 0xffff; break; case INDEX_op_ld32u_i64: mask = 0xffffffffu; break; CASE_OP_32_64(qemu_ld): { TCGMemOpIdx oi = args[nb_oargs + nb_iargs]; TCGMemOp mop = get_memop(oi); if (!(mop & MO_SIGN)) { mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1; } } break; default: break; } /* 32-bit ops generate 32-bit results. For the result is zero test below, we can ignore high bits, but for further optimizations we need to record that the high bits contain garbage. */ partmask = mask; if (!(def->flags & TCG_OPF_64BIT)) { mask |= ~(tcg_target_ulong)0xffffffffu; partmask &= 0xffffffffu; affected &= 0xffffffffu; } if (partmask == 0) { assert(nb_oargs == 1); tcg_opt_gen_movi(s, op, args, args[0], 0); continue; } if (affected == 0) { assert(nb_oargs == 1); tcg_opt_gen_mov(s, op, args, args[0], args[1]); continue; } /* Simplify expression for \"op r, a, 0 => movi r, 0\" cases */ switch (opc) { CASE_OP_32_64(and): CASE_OP_32_64(mul): CASE_OP_32_64(muluh): CASE_OP_32_64(mulsh): if ((temps[args[2]].state == TCG_TEMP_CONST && temps[args[2]].val == 0)) { tcg_opt_gen_movi(s, op, args, args[0], 0); continue; } break; default: break; } /* Simplify expression for \"op r, a, a => mov r, a\" cases */ switch (opc) { CASE_OP_32_64(or): CASE_OP_32_64(and): if (temps_are_copies(args[1], args[2])) { tcg_opt_gen_mov(s, op, args, args[0], args[1]); continue; } break; default: break; } /* Simplify expression for \"op r, a, a => movi r, 0\" cases */ switch (opc) { CASE_OP_32_64(andc): CASE_OP_32_64(sub): CASE_OP_32_64(xor): if (temps_are_copies(args[1], args[2])) { tcg_opt_gen_movi(s, op, args, args[0], 0); continue; } break; default: break; } /* Propagate constants through copy operations and do constant folding. Constants will be substituted to arguments by register allocator where needed and possible. Also detect copies. */ switch (opc) { CASE_OP_32_64(mov): tcg_opt_gen_mov(s, op, args, args[0], args[1]); break; CASE_OP_32_64(movi): tcg_opt_gen_movi(s, op, args, args[0], args[1]); break; CASE_OP_32_64(not): CASE_OP_32_64(neg): CASE_OP_32_64(ext8s): CASE_OP_32_64(ext8u): CASE_OP_32_64(ext16s): CASE_OP_32_64(ext16u): case INDEX_op_ext32s_i64: case INDEX_op_ext32u_i64: if (temps[args[1]].state == TCG_TEMP_CONST) { tmp = do_constant_folding(opc, temps[args[1]].val, 0); tcg_opt_gen_movi(s, op, args, args[0], tmp); break; } goto do_default; case INDEX_op_trunc_shr_i32: if (temps[args[1]].state == TCG_TEMP_CONST) { tmp = do_constant_folding(opc, temps[args[1]].val, args[2]); tcg_opt_gen_movi(s, op, args, args[0], tmp); break; } goto do_default; CASE_OP_32_64(add): CASE_OP_32_64(sub): CASE_OP_32_64(mul): CASE_OP_32_64(or): CASE_OP_32_64(and): CASE_OP_32_64(xor): CASE_OP_32_64(shl): CASE_OP_32_64(shr): CASE_OP_32_64(sar): CASE_OP_32_64(rotl): CASE_OP_32_64(rotr): CASE_OP_32_64(andc): CASE_OP_32_64(orc): CASE_OP_32_64(eqv): CASE_OP_32_64(nand): CASE_OP_32_64(nor): CASE_OP_32_64(muluh): CASE_OP_32_64(mulsh): CASE_OP_32_64(div): CASE_OP_32_64(divu): CASE_OP_32_64(rem): CASE_OP_32_64(remu): if (temps[args[1]].state == TCG_TEMP_CONST && temps[args[2]].state == TCG_TEMP_CONST) { tmp = do_constant_folding(opc, temps[args[1]].val, temps[args[2]].val); tcg_opt_gen_movi(s, op, args, args[0], tmp); break; } goto do_default; CASE_OP_32_64(deposit): if (temps[args[1]].state == TCG_TEMP_CONST && temps[args[2]].state == TCG_TEMP_CONST) { tmp = deposit64(temps[args[1]].val, args[3], args[4], temps[args[2]].val); tcg_opt_gen_movi(s, op, args, args[0], tmp); break; } goto do_default; CASE_OP_32_64(setcond): tmp = do_constant_folding_cond(opc, args[1], args[2], args[3]); if (tmp != 2) { tcg_opt_gen_movi(s, op, args, args[0], tmp); break; } goto do_default; CASE_OP_32_64(brcond): tmp = do_constant_folding_cond(opc, args[0], args[1], args[2]); if (tmp != 2) { if (tmp) { reset_all_temps(nb_temps); op->opc = INDEX_op_br; args[0] = args[3]; } else { tcg_op_remove(s, op); } break; } goto do_default; CASE_OP_32_64(movcond): tmp = do_constant_folding_cond(opc, args[1], args[2], args[5]); if (tmp != 2) { tcg_opt_gen_mov(s, op, args, args[0], args[4-tmp]); break; } goto do_default; case INDEX_op_add2_i32: case INDEX_op_sub2_i32: if (temps[args[2]].state == TCG_TEMP_CONST && temps[args[3]].state == TCG_TEMP_CONST && temps[args[4]].state == TCG_TEMP_CONST && temps[args[5]].state == TCG_TEMP_CONST) { uint32_t al = temps[args[2]].val; uint32_t ah = temps[args[3]].val; uint32_t bl = temps[args[4]].val; uint32_t bh = temps[args[5]].val; uint64_t a = ((uint64_t)ah << 32) | al; uint64_t b = ((uint64_t)bh << 32) | bl; TCGArg rl, rh; TCGOp *op2 = insert_op_before(s, op, INDEX_op_movi_i32, 2); TCGArg *args2 = &s->gen_opparam_buf[op2->args]; if (opc == INDEX_op_add2_i32) { a += b; } else { a -= b; } rl = args[0]; rh = args[1]; tcg_opt_gen_movi(s, op, args, rl, (int32_t)a); tcg_opt_gen_movi(s, op2, args2, rh, (int32_t)(a >> 32)); /* We've done all we need to do with the movi. Skip it. */ oi_next = op2->next; break; } goto do_default; case INDEX_op_mulu2_i32: if (temps[args[2]].state == TCG_TEMP_CONST && temps[args[3]].state == TCG_TEMP_CONST) { uint32_t a = temps[args[2]].val; uint32_t b = temps[args[3]].val; uint64_t r = (uint64_t)a * b; TCGArg rl, rh; TCGOp *op2 = insert_op_before(s, op, INDEX_op_movi_i32, 2); TCGArg *args2 = &s->gen_opparam_buf[op2->args]; rl = args[0]; rh = args[1]; tcg_opt_gen_movi(s, op, args, rl, (int32_t)r); tcg_opt_gen_movi(s, op2, args2, rh, (int32_t)(r >> 32)); /* We've done all we need to do with the movi. Skip it. */ oi_next = op2->next; break; } goto do_default; case INDEX_op_brcond2_i32: tmp = do_constant_folding_cond2(&args[0], &args[2], args[4]); if (tmp != 2) { if (tmp) { do_brcond_true: reset_all_temps(nb_temps); op->opc = INDEX_op_br; args[0] = args[5]; } else { do_brcond_false: tcg_op_remove(s, op); } } else if ((args[4] == TCG_COND_LT || args[4] == TCG_COND_GE) && temps[args[2]].state == TCG_TEMP_CONST && temps[args[3]].state == TCG_TEMP_CONST && temps[args[2]].val == 0 && temps[args[3]].val == 0) { /* Simplify LT/GE comparisons vs zero to a single compare vs the high word of the input. */ do_brcond_high: reset_all_temps(nb_temps); op->opc = INDEX_op_brcond_i32; args[0] = args[1]; args[1] = args[3]; args[2] = args[4]; args[3] = args[5]; } else if (args[4] == TCG_COND_EQ) { /* Simplify EQ comparisons where one of the pairs can be simplified. */ tmp = do_constant_folding_cond(INDEX_op_brcond_i32, args[0], args[2], TCG_COND_EQ); if (tmp == 0) { goto do_brcond_false; } else if (tmp == 1) { goto do_brcond_high; } tmp = do_constant_folding_cond(INDEX_op_brcond_i32, args[1], args[3], TCG_COND_EQ); if (tmp == 0) { goto do_brcond_false; } else if (tmp != 1) { goto do_default; } do_brcond_low: reset_all_temps(nb_temps); op->opc = INDEX_op_brcond_i32; args[1] = args[2]; args[2] = args[4]; args[3] = args[5]; } else if (args[4] == TCG_COND_NE) { /* Simplify NE comparisons where one of the pairs can be simplified. */ tmp = do_constant_folding_cond(INDEX_op_brcond_i32, args[0], args[2], TCG_COND_NE); if (tmp == 0) { goto do_brcond_high; } else if (tmp == 1) { goto do_brcond_true; } tmp = do_constant_folding_cond(INDEX_op_brcond_i32, args[1], args[3], TCG_COND_NE); if (tmp == 0) { goto do_brcond_low; } else if (tmp == 1) { goto do_brcond_true; } goto do_default; } else { goto do_default; } break; case INDEX_op_setcond2_i32: tmp = do_constant_folding_cond2(&args[1], &args[3], args[5]); if (tmp != 2) { do_setcond_const: tcg_opt_gen_movi(s, op, args, args[0], tmp); } else if ((args[5] == TCG_COND_LT || args[5] == TCG_COND_GE) && temps[args[3]].state == TCG_TEMP_CONST && temps[args[4]].state == TCG_TEMP_CONST && temps[args[3]].val == 0 && temps[args[4]].val == 0) { /* Simplify LT/GE comparisons vs zero to a single compare vs the high word of the input. */ do_setcond_high: reset_temp(args[0]); temps[args[0]].mask = 1; op->opc = INDEX_op_setcond_i32; args[1] = args[2]; args[2] = args[4]; args[3] = args[5]; } else if (args[5] == TCG_COND_EQ) { /* Simplify EQ comparisons where one of the pairs can be simplified. */ tmp = do_constant_folding_cond(INDEX_op_setcond_i32, args[1], args[3], TCG_COND_EQ); if (tmp == 0) { goto do_setcond_const; } else if (tmp == 1) { goto do_setcond_high; } tmp = do_constant_folding_cond(INDEX_op_setcond_i32, args[2], args[4], TCG_COND_EQ); if (tmp == 0) { goto do_setcond_high; } else if (tmp != 1) { goto do_default; } do_setcond_low: reset_temp(args[0]); temps[args[0]].mask = 1; op->opc = INDEX_op_setcond_i32; args[2] = args[3]; args[3] = args[5]; } else if (args[5] == TCG_COND_NE) { /* Simplify NE comparisons where one of the pairs can be simplified. */ tmp = do_constant_folding_cond(INDEX_op_setcond_i32, args[1], args[3], TCG_COND_NE); if (tmp == 0) { goto do_setcond_high; } else if (tmp == 1) { goto do_setcond_const; } tmp = do_constant_folding_cond(INDEX_op_setcond_i32, args[2], args[4], TCG_COND_NE); if (tmp == 0) { goto do_setcond_low; } else if (tmp == 1) { goto do_setcond_const; } goto do_default; } else { goto do_default; } break; case INDEX_op_call: if (!(args[nb_oargs + nb_iargs + 1] & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) { for (i = 0; i < nb_globals; i++) { reset_temp(i); } } goto do_reset_output; default: do_default: /* Default case: we know nothing about operation (or were unable to compute the operation result) so no propagation is done. We trash everything if the operation is the end of a basic block, otherwise we only trash the output args. \"mask\" is the non-zero bits mask for the first output arg. */ if (def->flags & TCG_OPF_BB_END) { reset_all_temps(nb_temps); } else { do_reset_output: for (i = 0; i < nb_oargs; i++) { reset_temp(args[i]); /* Save the corresponding known-zero bits mask for the first output argument (only one supported so far). */ if (i == 0) { temps[args[i]].mask = mask; } } } break; } } }"} {"target": 0, "idx": 20098, "func": "static int net_host_check_device(const char *device) { int i; const char *valid_param_list[] = { \"tap\", \"socket\", \"dump\" #ifdef CONFIG_NET_BRIDGE , \"bridge\" #endif #ifdef CONFIG_SLIRP ,\"user\" #endif #ifdef CONFIG_VDE ,\"vde\" #endif }; for (i = 0; i < ARRAY_SIZE(valid_param_list); i++) { if (!strncmp(valid_param_list[i], device, strlen(valid_param_list[i]))) return 1; } return 0; }"} {"target": 0, "idx": 20106, "func": "uint32_t pci_default_read_config(PCIDevice *d, uint32_t address, int len) { uint32_t val; switch(len) { default: case 4: if (address <= 0xfc) { val = pci_get_long(d->config + address); break; } /* fall through */ case 2: if (address <= 0xfe) { val = pci_get_word(d->config + address); break; } /* fall through */ case 1: val = pci_get_byte(d->config + address); break; } return val; }"} {"target": 1, "idx": 20130, "func": "static void l2x0_class_init(ObjectClass *klass, void *data) { SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); k->init = l2x0_priv_init; dc->vmsd = &vmstate_l2x0; dc->no_user = 1; dc->props = l2x0_properties; dc->reset = l2x0_priv_reset; }"} {"target": 0, "idx": 20136, "func": "int MPV_encode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; int i; avctx->pix_fmt = PIX_FMT_YUV420P; s->bit_rate = avctx->bit_rate; s->bit_rate_tolerance = avctx->bit_rate_tolerance; s->frame_rate = avctx->frame_rate; s->width = avctx->width; s->height = avctx->height; s->gop_size = avctx->gop_size; s->rtp_mode = avctx->rtp_mode; s->rtp_payload_size = avctx->rtp_payload_size; if (avctx->rtp_callback) s->rtp_callback = avctx->rtp_callback; s->qmin= avctx->qmin; s->qmax= avctx->qmax; s->max_qdiff= avctx->max_qdiff; s->qcompress= avctx->qcompress; s->qblur= avctx->qblur; s->b_quant_factor= avctx->b_quant_factor; s->avctx = avctx; s->aspect_ratio_info= avctx->aspect_ratio_info; s->flags= avctx->flags; s->max_b_frames= avctx->max_b_frames; s->rc_strategy= avctx->rc_strategy; s->b_frame_strategy= avctx->b_frame_strategy; s->codec_id= avctx->codec->id; if (s->gop_size <= 1) { s->intra_only = 1; s->gop_size = 12; } else { s->intra_only = 0; } /* ME algorithm */ if (avctx->me_method == 0) /* For compatibility */ s->me_method = motion_estimation_method; else s->me_method = avctx->me_method; /* Fixed QSCALE */ s->fixed_qscale = (avctx->flags & CODEC_FLAG_QSCALE); switch(avctx->codec->id) { case CODEC_ID_MPEG1VIDEO: s->out_format = FMT_MPEG1; avctx->delay=0; //FIXME not sure, should check the spec break; case CODEC_ID_MJPEG: s->out_format = FMT_MJPEG; s->intra_only = 1; /* force intra only for jpeg */ s->mjpeg_write_tables = 1; /* write all tables */ s->mjpeg_data_only_frames = 0; /* write all the needed headers */ s->mjpeg_vsample[0] = 2; /* set up default sampling factors */ s->mjpeg_vsample[1] = 1; /* the only currently supported values */ s->mjpeg_vsample[2] = 1; s->mjpeg_hsample[0] = 2; s->mjpeg_hsample[1] = 1; s->mjpeg_hsample[2] = 1; if (mjpeg_init(s) < 0) return -1; avctx->delay=0; break; case CODEC_ID_H263: if (h263_get_picture_format(s->width, s->height) == 7) { printf(\"Input picture size isn't suitable for h263 codec! try h263+\\n\"); return -1; } s->out_format = FMT_H263; avctx->delay=0; break; case CODEC_ID_H263P: s->out_format = FMT_H263; s->rtp_mode = 1; s->rtp_payload_size = 1200; s->h263_plus = 1; s->unrestricted_mv = 1; s->h263_aic = 1; /* These are just to be sure */ s->umvplus = 0; s->umvplus_dec = 0; avctx->delay=0; break; case CODEC_ID_RV10: s->out_format = FMT_H263; s->h263_rv10 = 1; avctx->delay=0; break; case CODEC_ID_MPEG4: s->out_format = FMT_H263; s->h263_pred = 1; s->unrestricted_mv = 1; s->has_b_frames= s->max_b_frames ? 1 : 0; s->low_delay=0; avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1); break; case CODEC_ID_MSMPEG4V1: s->out_format = FMT_H263; s->h263_msmpeg4 = 1; s->h263_pred = 1; s->unrestricted_mv = 1; s->msmpeg4_version= 1; avctx->delay=0; break; case CODEC_ID_MSMPEG4V2: s->out_format = FMT_H263; s->h263_msmpeg4 = 1; s->h263_pred = 1; s->unrestricted_mv = 1; s->msmpeg4_version= 2; avctx->delay=0; break; case CODEC_ID_MSMPEG4V3: s->out_format = FMT_H263; s->h263_msmpeg4 = 1; s->h263_pred = 1; s->unrestricted_mv = 1; s->msmpeg4_version= 3; avctx->delay=0; break; default: return -1; } if((s->flags&CODEC_FLAG_4MV) && !(s->flags&CODEC_FLAG_HQ)){ printf(\"4MV is currently only supported in HQ mode\\n\"); return -1; } { /* set up some save defaults, some codecs might override them later */ static int done=0; if(!done){ int i; done=1; memset(default_mv_penalty, 0, sizeof(UINT16)*(MAX_FCODE+1)*(2*MAX_MV+1)); memset(default_fcode_tab , 0, sizeof(UINT8)*(2*MAX_MV+1)); for(i=-16; i<16; i++){ default_fcode_tab[i + MAX_MV]= 1; } } } s->mv_penalty= default_mv_penalty; s->fcode_tab= default_fcode_tab; if (s->out_format == FMT_H263) h263_encode_init(s); else if (s->out_format == FMT_MPEG1) mpeg1_encode_init(s); /* dont use mv_penalty table for crap MV as it would be confused */ if (s->me_method < ME_EPZS) s->mv_penalty = default_mv_penalty; s->encoding = 1; /* init */ if (MPV_common_init(s) < 0) return -1; /* init default q matrix */ for(i=0;i<64;i++) { if(s->out_format == FMT_H263) s->intra_matrix[i] = default_non_intra_matrix[i]; else s->intra_matrix[i] = default_intra_matrix[i]; s->inter_matrix[i] = default_non_intra_matrix[i]; } /* precompute matrix */ /* for mjpeg, we do include qscale in the matrix */ if (s->out_format != FMT_MJPEG) { convert_matrix(s->q_intra_matrix, s->q_intra_matrix16, s->q_intra_matrix16_bias, s->intra_matrix, s->intra_quant_bias); convert_matrix(s->q_inter_matrix, s->q_inter_matrix16, s->q_inter_matrix16_bias, s->inter_matrix, s->inter_quant_bias); } if(ff_rate_control_init(s) < 0) return -1; s->picture_number = 0; s->picture_in_gop_number = 0; s->fake_picture_number = 0; /* motion detector init */ s->f_code = 1; s->b_code = 1; return 0; }"} {"target": 0, "idx": 20138, "func": "static void inline xan_wc3_output_pixel_run(XanContext *s, unsigned char *pixel_buffer, int x, int y, int pixel_count) { int stride; int line_inc; int index; int current_x; int width = s->avctx->width; unsigned char pix; unsigned char *palette_plane; unsigned char *y_plane; unsigned char *u_plane; unsigned char *v_plane; unsigned char *rgb_plane; unsigned short *rgb16_plane; unsigned short *palette16; unsigned int *rgb32_plane; unsigned int *palette32; switch (s->avctx->pix_fmt) { case PIX_FMT_PAL8: palette_plane = s->current_frame.data[0]; stride = s->current_frame.linesize[0]; line_inc = stride - width; index = y * stride + x; current_x = x; while(pixel_count--) { /* don't do a memcpy() here; keyframes generally copy an entire * frame of data and the stride needs to be accounted for */ palette_plane[index++] = *pixel_buffer++; ADVANCE_CURRENT_X(); } break; case PIX_FMT_RGB555: case PIX_FMT_RGB565: rgb16_plane = (unsigned short *)s->current_frame.data[0]; palette16 = (unsigned short *)s->palette; stride = s->current_frame.linesize[0] / 2; line_inc = stride - width; index = y * stride + x; current_x = x; while(pixel_count--) { rgb16_plane[index++] = palette16[*pixel_buffer++]; ADVANCE_CURRENT_X(); } break; case PIX_FMT_RGB24: case PIX_FMT_BGR24: rgb_plane = s->current_frame.data[0]; stride = s->current_frame.linesize[0]; line_inc = stride - width * 3; index = y * stride + x * 3; current_x = x; while(pixel_count--) { pix = *pixel_buffer++; rgb_plane[index++] = s->palette[pix * 4 + 0]; rgb_plane[index++] = s->palette[pix * 4 + 1]; rgb_plane[index++] = s->palette[pix * 4 + 2]; ADVANCE_CURRENT_X(); } break; case PIX_FMT_RGBA32: rgb32_plane = (unsigned int *)s->current_frame.data[0]; palette32 = (unsigned int *)s->palette; stride = s->current_frame.linesize[0] / 4; line_inc = stride - width; index = y * stride + x; current_x = x; while(pixel_count--) { rgb32_plane[index++] = palette32[*pixel_buffer++]; ADVANCE_CURRENT_X(); } break; case PIX_FMT_YUV444P: y_plane = s->current_frame.data[0]; u_plane = s->current_frame.data[1]; v_plane = s->current_frame.data[2]; stride = s->current_frame.linesize[0]; line_inc = stride - width; index = y * stride + x; current_x = x; while(pixel_count--) { pix = *pixel_buffer++; y_plane[index] = s->palette[pix * 4 + 0]; u_plane[index] = s->palette[pix * 4 + 1]; v_plane[index] = s->palette[pix * 4 + 2]; index++; ADVANCE_CURRENT_X(); } break; default: av_log(s->avctx, AV_LOG_ERROR, \" Xan WC3: Unhandled colorspace\\n\"); break; } }"} {"target": 0, "idx": 20144, "func": "const char *bdrv_get_format_name(BlockDriverState *bs) { return bs->drv ? bs->drv->format_name : NULL; }"} {"target": 1, "idx": 20174, "func": "static int mov_read_hdlr(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { AVStream *st = c->fc->streams[c->fc->nb_streams-1]; uint32_t type; uint32_t ctype; get_byte(pb); /* version */ get_be24(pb); /* flags */ /* component type */ ctype = get_le32(pb); type = get_le32(pb); /* component subtype */ dprintf(c->fc, \"ctype= %c%c%c%c (0x%08x)\\n\", *((char *)&ctype), ((char *)&ctype)[1], ((char *)&ctype)[2], ((char *)&ctype)[3], (int) ctype); dprintf(c->fc, \"stype= %c%c%c%c\\n\", *((char *)&type), ((char *)&type)[1], ((char *)&type)[2], ((char *)&type)[3]); if(!ctype) c->isom = 1; if (type == MKTAG('v','i','d','e')) st->codec->codec_type = CODEC_TYPE_VIDEO; else if(type == MKTAG('s','o','u','n')) st->codec->codec_type = CODEC_TYPE_AUDIO; else if(type == MKTAG('m','1','a',' ')) st->codec->codec_id = CODEC_ID_MP2; else if(type == MKTAG('s','u','b','p')) { st->codec->codec_type = CODEC_TYPE_SUBTITLE; } get_be32(pb); /* component manufacture */ get_be32(pb); /* component flags */ get_be32(pb); /* component flags mask */ if(atom.size <= 24) return 0; /* nothing left to read */ url_fskip(pb, atom.size - (url_ftell(pb) - atom.offset)); return 0; }"} {"target": 1, "idx": 20179, "func": "static inline void powerpc_excp(CPUPPCState *env, int excp_model, int excp) { target_ulong msr, new_msr, vector; int srr0, srr1, asrr0, asrr1; int lpes0, lpes1, lev; if (0) { /* XXX: find a suitable condition to enable the hypervisor mode */ lpes0 = (env->spr[SPR_LPCR] >> 1) & 1; lpes1 = (env->spr[SPR_LPCR] >> 2) & 1; } else { /* Those values ensure we won't enter the hypervisor mode */ lpes0 = 0; lpes1 = 1; } qemu_log_mask(CPU_LOG_INT, \"Raise exception at \" TARGET_FMT_lx \" => %08x (%02x)\\n\", env->nip, excp, env->error_code); /* new srr1 value excluding must-be-zero bits */ msr = env->msr & ~0x783f0000ULL; /* new interrupt handler msr */ new_msr = env->msr & ((target_ulong)1 << MSR_ME); /* target registers */ srr0 = SPR_SRR0; srr1 = SPR_SRR1; asrr0 = -1; asrr1 = -1; switch (excp) { case POWERPC_EXCP_NONE: /* Should never happen */ return; case POWERPC_EXCP_CRITICAL: /* Critical input */ switch (excp_model) { case POWERPC_EXCP_40x: srr0 = SPR_40x_SRR2; srr1 = SPR_40x_SRR3; break; case POWERPC_EXCP_BOOKE: srr0 = SPR_BOOKE_CSRR0; srr1 = SPR_BOOKE_CSRR1; break; case POWERPC_EXCP_G2: break; default: goto excp_invalid; } goto store_next; case POWERPC_EXCP_MCHECK: /* Machine check exception */ if (msr_me == 0) { /* Machine check exception is not enabled. * Enter checkstop state. */ if (qemu_log_enabled()) { qemu_log(\"Machine check while not allowed. \" \"Entering checkstop state\\n\"); } else { fprintf(stderr, \"Machine check while not allowed. \" \"Entering checkstop state\\n\"); } env->halted = 1; env->interrupt_request |= CPU_INTERRUPT_EXITTB; } if (0) { /* XXX: find a suitable condition to enable the hypervisor mode */ new_msr |= (target_ulong)MSR_HVB; } /* machine check exceptions don't have ME set */ new_msr &= ~((target_ulong)1 << MSR_ME); /* XXX: should also have something loaded in DAR / DSISR */ switch (excp_model) { case POWERPC_EXCP_40x: srr0 = SPR_40x_SRR2; srr1 = SPR_40x_SRR3; break; case POWERPC_EXCP_BOOKE: srr0 = SPR_BOOKE_MCSRR0; srr1 = SPR_BOOKE_MCSRR1; asrr0 = SPR_BOOKE_CSRR0; asrr1 = SPR_BOOKE_CSRR1; break; default: break; } goto store_next; case POWERPC_EXCP_DSI: /* Data storage exception */ LOG_EXCP(\"DSI exception: DSISR=\" TARGET_FMT_lx\" DAR=\" TARGET_FMT_lx \"\\n\", env->spr[SPR_DSISR], env->spr[SPR_DAR]); if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; goto store_next; case POWERPC_EXCP_ISI: /* Instruction storage exception */ LOG_EXCP(\"ISI exception: msr=\" TARGET_FMT_lx \", nip=\" TARGET_FMT_lx \"\\n\", msr, env->nip); if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; msr |= env->error_code; goto store_next; case POWERPC_EXCP_EXTERNAL: /* External input */ if (lpes0 == 1) new_msr |= (target_ulong)MSR_HVB; goto store_next; case POWERPC_EXCP_ALIGN: /* Alignment exception */ if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; /* XXX: this is false */ /* Get rS/rD and rA from faulting opcode */ env->spr[SPR_DSISR] |= (ldl_code((env->nip - 4)) & 0x03FF0000) >> 16; goto store_current; case POWERPC_EXCP_PROGRAM: /* Program exception */ switch (env->error_code & ~0xF) { case POWERPC_EXCP_FP: if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) { LOG_EXCP(\"Ignore floating point exception\\n\"); env->exception_index = POWERPC_EXCP_NONE; env->error_code = 0; return; } if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; msr |= 0x00100000; if (msr_fe0 == msr_fe1) goto store_next; msr |= 0x00010000; break; case POWERPC_EXCP_INVAL: LOG_EXCP(\"Invalid instruction at \" TARGET_FMT_lx \"\\n\", env->nip); if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; msr |= 0x00080000; env->spr[SPR_BOOKE_ESR] = ESR_PIL; break; case POWERPC_EXCP_PRIV: if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; msr |= 0x00040000; env->spr[SPR_BOOKE_ESR] = ESR_PPR; break; case POWERPC_EXCP_TRAP: if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; msr |= 0x00020000; env->spr[SPR_BOOKE_ESR] = ESR_PTR; break; default: /* Should never occur */ cpu_abort(env, \"Invalid program exception %d. Aborting\\n\", env->error_code); break; } goto store_current; case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; goto store_current; case POWERPC_EXCP_SYSCALL: /* System call exception */ dump_syscall(env); lev = env->error_code; if ((lev == 1) && cpu_ppc_hypercall) { cpu_ppc_hypercall(env); return; } if (lev == 1 || (lpes0 == 0 && lpes1 == 0)) new_msr |= (target_ulong)MSR_HVB; goto store_next; case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ goto store_current; case POWERPC_EXCP_DECR: /* Decrementer exception */ if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; goto store_next; case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ /* FIT on 4xx */ LOG_EXCP(\"FIT exception\\n\"); goto store_next; case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ LOG_EXCP(\"WDT exception\\n\"); switch (excp_model) { case POWERPC_EXCP_BOOKE: srr0 = SPR_BOOKE_CSRR0; srr1 = SPR_BOOKE_CSRR1; break; default: break; } goto store_next; case POWERPC_EXCP_DTLB: /* Data TLB error */ goto store_next; case POWERPC_EXCP_ITLB: /* Instruction TLB error */ goto store_next; case POWERPC_EXCP_DEBUG: /* Debug interrupt */ switch (excp_model) { case POWERPC_EXCP_BOOKE: srr0 = SPR_BOOKE_DSRR0; srr1 = SPR_BOOKE_DSRR1; asrr0 = SPR_BOOKE_CSRR0; asrr1 = SPR_BOOKE_CSRR1; break; default: break; } /* XXX: TODO */ cpu_abort(env, \"Debug exception is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */ env->spr[SPR_BOOKE_ESR] = ESR_SPV; goto store_current; case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */ /* XXX: TODO */ cpu_abort(env, \"Embedded floating point data exception \" \"is not implemented yet !\\n\"); env->spr[SPR_BOOKE_ESR] = ESR_SPV; goto store_next; case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */ /* XXX: TODO */ cpu_abort(env, \"Embedded floating point round exception \" \"is not implemented yet !\\n\"); env->spr[SPR_BOOKE_ESR] = ESR_SPV; goto store_next; case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */ /* XXX: TODO */ cpu_abort(env, \"Performance counter exception is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ goto store_next; case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ srr0 = SPR_BOOKE_CSRR0; srr1 = SPR_BOOKE_CSRR1; goto store_next; case POWERPC_EXCP_RESET: /* System reset exception */ if (msr_pow) { /* indicate that we resumed from power save mode */ msr |= 0x10000; } else { new_msr &= ~((target_ulong)1 << MSR_ME); } if (0) { /* XXX: find a suitable condition to enable the hypervisor mode */ new_msr |= (target_ulong)MSR_HVB; } goto store_next; case POWERPC_EXCP_DSEG: /* Data segment exception */ if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; goto store_next; case POWERPC_EXCP_ISEG: /* Instruction segment exception */ if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; goto store_next; case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ srr0 = SPR_HSRR0; srr1 = SPR_HSRR1; new_msr |= (target_ulong)MSR_HVB; new_msr |= env->msr & ((target_ulong)1 << MSR_RI); goto store_next; case POWERPC_EXCP_TRACE: /* Trace exception */ if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; goto store_next; case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ srr0 = SPR_HSRR0; srr1 = SPR_HSRR1; new_msr |= (target_ulong)MSR_HVB; new_msr |= env->msr & ((target_ulong)1 << MSR_RI); goto store_next; case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */ srr0 = SPR_HSRR0; srr1 = SPR_HSRR1; new_msr |= (target_ulong)MSR_HVB; new_msr |= env->msr & ((target_ulong)1 << MSR_RI); goto store_next; case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */ srr0 = SPR_HSRR0; srr1 = SPR_HSRR1; new_msr |= (target_ulong)MSR_HVB; new_msr |= env->msr & ((target_ulong)1 << MSR_RI); goto store_next; case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */ srr0 = SPR_HSRR0; srr1 = SPR_HSRR1; new_msr |= (target_ulong)MSR_HVB; new_msr |= env->msr & ((target_ulong)1 << MSR_RI); goto store_next; case POWERPC_EXCP_VPU: /* Vector unavailable exception */ if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; goto store_current; case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */ LOG_EXCP(\"PIT exception\\n\"); goto store_next; case POWERPC_EXCP_IO: /* IO error exception */ /* XXX: TODO */ cpu_abort(env, \"601 IO error exception is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_RUNM: /* Run mode exception */ /* XXX: TODO */ cpu_abort(env, \"601 run mode exception is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_EMUL: /* Emulation trap exception */ /* XXX: TODO */ cpu_abort(env, \"602 emulation trap exception \" \"is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ if (lpes1 == 0) /* XXX: check this */ new_msr |= (target_ulong)MSR_HVB; switch (excp_model) { case POWERPC_EXCP_602: case POWERPC_EXCP_603: case POWERPC_EXCP_603E: case POWERPC_EXCP_G2: goto tlb_miss_tgpr; case POWERPC_EXCP_7x5: goto tlb_miss; case POWERPC_EXCP_74xx: goto tlb_miss_74xx; default: cpu_abort(env, \"Invalid instruction TLB miss exception\\n\"); break; } break; case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ if (lpes1 == 0) /* XXX: check this */ new_msr |= (target_ulong)MSR_HVB; switch (excp_model) { case POWERPC_EXCP_602: case POWERPC_EXCP_603: case POWERPC_EXCP_603E: case POWERPC_EXCP_G2: goto tlb_miss_tgpr; case POWERPC_EXCP_7x5: goto tlb_miss; case POWERPC_EXCP_74xx: goto tlb_miss_74xx; default: cpu_abort(env, \"Invalid data load TLB miss exception\\n\"); break; } break; case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ if (lpes1 == 0) /* XXX: check this */ new_msr |= (target_ulong)MSR_HVB; switch (excp_model) { case POWERPC_EXCP_602: case POWERPC_EXCP_603: case POWERPC_EXCP_603E: case POWERPC_EXCP_G2: tlb_miss_tgpr: /* Swap temporary saved registers with GPRs */ if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) { new_msr |= (target_ulong)1 << MSR_TGPR; hreg_swap_gpr_tgpr(env); } goto tlb_miss; case POWERPC_EXCP_7x5: tlb_miss: #if defined (DEBUG_SOFTWARE_TLB) if (qemu_log_enabled()) { const char *es; target_ulong *miss, *cmp; int en; if (excp == POWERPC_EXCP_IFTLB) { es = \"I\"; en = 'I'; miss = &env->spr[SPR_IMISS]; cmp = &env->spr[SPR_ICMP]; } else { if (excp == POWERPC_EXCP_DLTLB) es = \"DL\"; else es = \"DS\"; en = 'D'; miss = &env->spr[SPR_DMISS]; cmp = &env->spr[SPR_DCMP]; } qemu_log(\"6xx %sTLB miss: %cM \" TARGET_FMT_lx \" %cC \" TARGET_FMT_lx \" H1 \" TARGET_FMT_lx \" H2 \" TARGET_FMT_lx \" %08x\\n\", es, en, *miss, en, *cmp, env->spr[SPR_HASH1], env->spr[SPR_HASH2], env->error_code); } #endif msr |= env->crf[0] << 28; msr |= env->error_code; /* key, D/I, S/L bits */ /* Set way using a LRU mechanism */ msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17; break; case POWERPC_EXCP_74xx: tlb_miss_74xx: #if defined (DEBUG_SOFTWARE_TLB) if (qemu_log_enabled()) { const char *es; target_ulong *miss, *cmp; int en; if (excp == POWERPC_EXCP_IFTLB) { es = \"I\"; en = 'I'; miss = &env->spr[SPR_TLBMISS]; cmp = &env->spr[SPR_PTEHI]; } else { if (excp == POWERPC_EXCP_DLTLB) es = \"DL\"; else es = \"DS\"; en = 'D'; miss = &env->spr[SPR_TLBMISS]; cmp = &env->spr[SPR_PTEHI]; } qemu_log(\"74xx %sTLB miss: %cM \" TARGET_FMT_lx \" %cC \" TARGET_FMT_lx \" %08x\\n\", es, en, *miss, en, *cmp, env->error_code); } #endif msr |= env->error_code; /* key bit */ break; default: cpu_abort(env, \"Invalid data store TLB miss exception\\n\"); break; } goto store_next; case POWERPC_EXCP_FPA: /* Floating-point assist exception */ /* XXX: TODO */ cpu_abort(env, \"Floating point assist exception \" \"is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_DABR: /* Data address breakpoint */ /* XXX: TODO */ cpu_abort(env, \"DABR exception is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ /* XXX: TODO */ cpu_abort(env, \"IABR exception is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_SMI: /* System management interrupt */ /* XXX: TODO */ cpu_abort(env, \"SMI exception is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_THERM: /* Thermal interrupt */ /* XXX: TODO */ cpu_abort(env, \"Thermal management exception \" \"is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */ if (lpes1 == 0) new_msr |= (target_ulong)MSR_HVB; /* XXX: TODO */ cpu_abort(env, \"Performance counter exception is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_VPUA: /* Vector assist exception */ /* XXX: TODO */ cpu_abort(env, \"VPU assist exception is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_SOFTP: /* Soft patch exception */ /* XXX: TODO */ cpu_abort(env, \"970 soft-patch exception is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_MAINT: /* Maintenance exception */ /* XXX: TODO */ cpu_abort(env, \"970 maintenance exception is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */ /* XXX: TODO */ cpu_abort(env, \"Maskable external exception \" \"is not implemented yet !\\n\"); goto store_next; case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */ /* XXX: TODO */ cpu_abort(env, \"Non maskable external exception \" \"is not implemented yet !\\n\"); goto store_next; default: excp_invalid: cpu_abort(env, \"Invalid PowerPC exception %d. Aborting\\n\", excp); break; store_current: /* save current instruction location */ env->spr[srr0] = env->nip - 4; break; store_next: /* save next instruction location */ env->spr[srr0] = env->nip; break; } /* Save MSR */ env->spr[srr1] = msr; /* If any alternate SRR register are defined, duplicate saved values */ if (asrr0 != -1) env->spr[asrr0] = env->spr[srr0]; if (asrr1 != -1) env->spr[asrr1] = env->spr[srr1]; /* If we disactivated any translation, flush TLBs */ if (new_msr & ((1 << MSR_IR) | (1 << MSR_DR))) tlb_flush(env, 1); if (msr_ile) { new_msr |= (target_ulong)1 << MSR_LE; } /* Jump to handler */ vector = env->excp_vectors[excp]; if (vector == (target_ulong)-1ULL) { cpu_abort(env, \"Raised an exception without defined vector %d\\n\", excp); } vector |= env->excp_prefix; #if defined(TARGET_PPC64) if (excp_model == POWERPC_EXCP_BOOKE) { if (!msr_icm) { vector = (uint32_t)vector; } else { new_msr |= (target_ulong)1 << MSR_CM; } } else { if (!msr_isf && !(env->mmu_model & POWERPC_MMU_64)) { vector = (uint32_t)vector; } else { new_msr |= (target_ulong)1 << MSR_SF; } } #endif /* XXX: we don't use hreg_store_msr here as already have treated * any special case that could occur. Just store MSR and update hflags */ env->msr = new_msr & env->msr_mask; hreg_compute_hflags(env); env->nip = vector; /* Reset exception state */ env->exception_index = POWERPC_EXCP_NONE; env->error_code = 0; if ((env->mmu_model == POWERPC_MMU_BOOKE) || (env->mmu_model == POWERPC_MMU_BOOKE206)) { /* XXX: The BookE changes address space when switching modes, we should probably implement that as different MMU indexes, but for the moment we do it the slow way and flush all. */ tlb_flush(env, 1); } }"} {"target": 1, "idx": 20181, "func": "static int mxf_read_cryptographic_context(MXFCryptoContext *cryptocontext, ByteIOContext *pb, int tag, int size, UID uid) { if (size != 16) return -1; if (IS_KLV_KEY(uid, mxf_crypto_source_container_ul)) get_buffer(pb, cryptocontext->source_container_ul, 16); return 0; }"} {"target": 1, "idx": 20183, "func": "static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr) { VhostUserMsg msg = { 0 }; int err; assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); /* If guest supports GUEST_ANNOUNCE do nothing */ if (virtio_has_feature(dev->acked_features, VIRTIO_NET_F_GUEST_ANNOUNCE)) { return 0; } /* if backend supports VHOST_USER_PROTOCOL_F_RARP ask it to send the RARP */ if (virtio_has_feature(dev->protocol_features, VHOST_USER_PROTOCOL_F_RARP)) { msg.request = VHOST_USER_SEND_RARP; msg.flags = VHOST_USER_VERSION; memcpy((char *)&msg.u64, mac_addr, 6); msg.size = sizeof(m.u64); err = vhost_user_write(dev, &msg, NULL, 0); return err; } return -1; }"} {"target": 1, "idx": 20184, "func": "static void assert_file_overwrite(const char *filename) { if (file_overwrite && no_file_overwrite) { fprintf(stderr, \"Error, both -y and -n supplied. Exiting.\\n\"); exit_program(1); } if (!file_overwrite && (strchr(filename, ':') == NULL || filename[1] == ':' || av_strstart(filename, \"file:\", NULL))) { if (avio_check(filename, 0) == 0) { if (stdin_interaction && !no_file_overwrite) { fprintf(stderr,\"File '%s' already exists. Overwrite ? [y/N] \", filename); fflush(stderr); term_exit(); signal(SIGINT, SIG_DFL); if (!read_yesno()) { av_log(NULL, AV_LOG_FATAL, \"Not overwriting - exiting\\n\"); exit_program(1); } term_init(); } else { av_log(NULL, AV_LOG_FATAL, \"File '%s' already exists. Exiting.\\n\", filename); exit_program(1); } } } }"} {"target": 0, "idx": 20204, "func": "void do_tlbwr (void) { int r = cpu_mips_get_random(env); invalidate_tlb(r, 1); fill_tlb(r); }"} {"target": 0, "idx": 20214, "func": "static void raw_close(BlockDriverState *bs) { }"} {"target": 0, "idx": 20222, "func": "static void gen_interrupt(DisasContext *s, int intno, target_ulong cur_eip, target_ulong next_eip) { gen_update_cc_op(s); gen_jmp_im(cur_eip); gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno), tcg_const_i32(next_eip - cur_eip)); s->is_jmp = DISAS_TB_JUMP; }"} {"target": 0, "idx": 20240, "func": "static uint32_t platform_mmio_read(ReadWriteHandler *handler, pcibus_t addr, int len) { DPRINTF(\"Warning: attempted read from physical address \" \"0x\" TARGET_FMT_plx \" in xen platform mmio space\\n\", addr); return 0; }"} {"target": 0, "idx": 20241, "func": "int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw, int mmu_idx, int is_softmmu) { target_ulong virt_addr, vaddr; target_phys_addr_t paddr; int error_code = 0, prot, ret = 0, access_index; error_code = get_physical_address(env, &paddr, &prot, &access_index, address, rw, mmu_idx); if (error_code == 0) { virt_addr = address & TARGET_PAGE_MASK; vaddr = virt_addr + ((address & TARGET_PAGE_MASK) & (TARGET_PAGE_SIZE - 1)); #ifdef DEBUG_MMU printf(\"Translate at 0x%\" PRIx64 \" -> 0x%\" PRIx64 \", vaddr 0x%\" PRIx64 \"\\n\", address, paddr, vaddr); #endif ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu); return ret; } // XXX return 1; }"} {"target": 0, "idx": 20243, "func": "static av_cold int oggvorbis_init_encoder(vorbis_info *vi, AVCodecContext *avctx) { OggVorbisContext *s = avctx->priv_data; double cfreq; int ret; if (avctx->flags & CODEC_FLAG_QSCALE) { /* variable bitrate * NOTE: we use the oggenc range of -1 to 10 for global_quality for * user convenience, but libvorbis uses -0.1 to 1.0 */ float q = avctx->global_quality / (float)FF_QP2LAMBDA; if ((ret = vorbis_encode_setup_vbr(vi, avctx->channels, avctx->sample_rate, q / 10.0))) goto error; } else { int minrate = avctx->rc_min_rate > 0 ? avctx->rc_min_rate : -1; int maxrate = avctx->rc_min_rate > 0 ? avctx->rc_max_rate : -1; /* average bitrate */ if ((ret = vorbis_encode_setup_managed(vi, avctx->channels, avctx->sample_rate, minrate, avctx->bit_rate, maxrate))) goto error; /* variable bitrate by estimate, disable slow rate management */ if (minrate == -1 && maxrate == -1) if ((ret = vorbis_encode_ctl(vi, OV_ECTL_RATEMANAGE2_SET, NULL))) goto error; } /* cutoff frequency */ if (avctx->cutoff > 0) { cfreq = avctx->cutoff / 1000.0; if ((ret = vorbis_encode_ctl(vi, OV_ECTL_LOWPASS_SET, &cfreq))) goto error; } /* impulse block bias */ if (s->iblock) { if ((ret = vorbis_encode_ctl(vi, OV_ECTL_IBLOCK_SET, &s->iblock))) goto error; } if ((ret = vorbis_encode_setup_init(vi))) goto error; return 0; error: return vorbis_error_to_averror(ret); }"} {"target": 0, "idx": 20288, "func": "DriveInfo *drive_init(QemuOpts *opts, void *opaque, int *fatal_error) { const char *buf; const char *file = NULL; char devname[128]; const char *serial; const char *mediastr = \"\"; BlockInterfaceType type; enum { MEDIA_DISK, MEDIA_CDROM } media; int bus_id, unit_id; int cyls, heads, secs, translation; BlockDriver *drv = NULL; QEMUMachine *machine = opaque; int max_devs; int index; int cache; int aio = 0; int bdrv_flags, onerror; const char *devaddr; DriveInfo *dinfo; int snapshot = 0; *fatal_error = 1; translation = BIOS_ATA_TRANSLATION_AUTO; cache = 1; if (machine && machine->use_scsi) { type = IF_SCSI; max_devs = MAX_SCSI_DEVS; pstrcpy(devname, sizeof(devname), \"scsi\"); } else { type = IF_IDE; max_devs = MAX_IDE_DEVS; pstrcpy(devname, sizeof(devname), \"ide\"); } media = MEDIA_DISK; /* extract parameters */ bus_id = qemu_opt_get_number(opts, \"bus\", 0); unit_id = qemu_opt_get_number(opts, \"unit\", -1); index = qemu_opt_get_number(opts, \"index\", -1); cyls = qemu_opt_get_number(opts, \"cyls\", 0); heads = qemu_opt_get_number(opts, \"heads\", 0); secs = qemu_opt_get_number(opts, \"secs\", 0); snapshot = qemu_opt_get_bool(opts, \"snapshot\", 0); file = qemu_opt_get(opts, \"file\"); serial = qemu_opt_get(opts, \"serial\"); if ((buf = qemu_opt_get(opts, \"if\")) != NULL) { pstrcpy(devname, sizeof(devname), buf); if (!strcmp(buf, \"ide\")) { type = IF_IDE; max_devs = MAX_IDE_DEVS; } else if (!strcmp(buf, \"scsi\")) { type = IF_SCSI; max_devs = MAX_SCSI_DEVS; } else if (!strcmp(buf, \"floppy\")) { type = IF_FLOPPY; max_devs = 0; } else if (!strcmp(buf, \"pflash\")) { type = IF_PFLASH; max_devs = 0; } else if (!strcmp(buf, \"mtd\")) { type = IF_MTD; max_devs = 0; } else if (!strcmp(buf, \"sd\")) { type = IF_SD; max_devs = 0; } else if (!strcmp(buf, \"virtio\")) { type = IF_VIRTIO; max_devs = 0; } else if (!strcmp(buf, \"xen\")) { type = IF_XEN; max_devs = 0; } else if (!strcmp(buf, \"none\")) { type = IF_NONE; max_devs = 0; } else { fprintf(stderr, \"qemu: unsupported bus type '%s'\\n\", buf); return NULL; } } if (cyls || heads || secs) { if (cyls < 1 || cyls > 16383) { fprintf(stderr, \"qemu: '%s' invalid physical cyls number\\n\", buf); return NULL; } if (heads < 1 || heads > 16) { fprintf(stderr, \"qemu: '%s' invalid physical heads number\\n\", buf); return NULL; } if (secs < 1 || secs > 63) { fprintf(stderr, \"qemu: '%s' invalid physical secs number\\n\", buf); return NULL; } } if ((buf = qemu_opt_get(opts, \"trans\")) != NULL) { if (!cyls) { fprintf(stderr, \"qemu: '%s' trans must be used with cyls,heads and secs\\n\", buf); return NULL; } if (!strcmp(buf, \"none\")) translation = BIOS_ATA_TRANSLATION_NONE; else if (!strcmp(buf, \"lba\")) translation = BIOS_ATA_TRANSLATION_LBA; else if (!strcmp(buf, \"auto\")) translation = BIOS_ATA_TRANSLATION_AUTO; else { fprintf(stderr, \"qemu: '%s' invalid translation type\\n\", buf); return NULL; } } if ((buf = qemu_opt_get(opts, \"media\")) != NULL) { if (!strcmp(buf, \"disk\")) { media = MEDIA_DISK; } else if (!strcmp(buf, \"cdrom\")) { if (cyls || secs || heads) { fprintf(stderr, \"qemu: '%s' invalid physical CHS format\\n\", buf); return NULL; } media = MEDIA_CDROM; } else { fprintf(stderr, \"qemu: '%s' invalid media\\n\", buf); return NULL; } } if ((buf = qemu_opt_get(opts, \"cache\")) != NULL) { if (!strcmp(buf, \"off\") || !strcmp(buf, \"none\")) cache = 0; else if (!strcmp(buf, \"writethrough\")) cache = 1; else if (!strcmp(buf, \"writeback\")) cache = 2; else { fprintf(stderr, \"qemu: invalid cache option\\n\"); return NULL; } } #ifdef CONFIG_LINUX_AIO if ((buf = qemu_opt_get(opts, \"aio\")) != NULL) { if (!strcmp(buf, \"threads\")) aio = 0; else if (!strcmp(buf, \"native\")) aio = 1; else { fprintf(stderr, \"qemu: invalid aio option\\n\"); return NULL; } } #endif if ((buf = qemu_opt_get(opts, \"format\")) != NULL) { if (strcmp(buf, \"?\") == 0) { fprintf(stderr, \"qemu: Supported formats:\"); bdrv_iterate_format(bdrv_format_print, NULL); fprintf(stderr, \"\\n\"); return NULL; } drv = bdrv_find_format(buf); if (!drv) { fprintf(stderr, \"qemu: '%s' invalid format\\n\", buf); return NULL; } } onerror = BLOCK_ERR_STOP_ENOSPC; if ((buf = qemu_opt_get(opts, \"werror\")) != NULL) { if (type != IF_IDE && type != IF_SCSI && type != IF_VIRTIO) { fprintf(stderr, \"werror is no supported by this format\\n\"); return NULL; } if (!strcmp(buf, \"ignore\")) onerror = BLOCK_ERR_IGNORE; else if (!strcmp(buf, \"enospc\")) onerror = BLOCK_ERR_STOP_ENOSPC; else if (!strcmp(buf, \"stop\")) onerror = BLOCK_ERR_STOP_ANY; else if (!strcmp(buf, \"report\")) onerror = BLOCK_ERR_REPORT; else { fprintf(stderr, \"qemu: '%s' invalid write error action\\n\", buf); return NULL; } } if ((devaddr = qemu_opt_get(opts, \"addr\")) != NULL) { if (type != IF_VIRTIO) { fprintf(stderr, \"addr is not supported\\n\"); return NULL; } } /* compute bus and unit according index */ if (index != -1) { if (bus_id != 0 || unit_id != -1) { fprintf(stderr, \"qemu: index cannot be used with bus and unit\\n\"); return NULL; } if (max_devs == 0) { unit_id = index; bus_id = 0; } else { unit_id = index % max_devs; bus_id = index / max_devs; } } /* if user doesn't specify a unit_id, * try to find the first free */ if (unit_id == -1) { unit_id = 0; while (drive_get(type, bus_id, unit_id) != NULL) { unit_id++; if (max_devs && unit_id >= max_devs) { unit_id -= max_devs; bus_id++; } } } /* check unit id */ if (max_devs && unit_id >= max_devs) { fprintf(stderr, \"qemu: unit %d too big (max is %d)\\n\", unit_id, max_devs - 1); return NULL; } /* * ignore multiple definitions */ if (drive_get(type, bus_id, unit_id) != NULL) { *fatal_error = 0; return NULL; } /* init */ dinfo = qemu_mallocz(sizeof(*dinfo)); if ((buf = qemu_opts_id(opts)) != NULL) { dinfo->id = qemu_strdup(buf); } else { /* no id supplied -> create one */ dinfo->id = qemu_mallocz(32); if (type == IF_IDE || type == IF_SCSI) mediastr = (media == MEDIA_CDROM) ? \"-cd\" : \"-hd\"; if (max_devs) snprintf(dinfo->id, 32, \"%s%i%s%i\", devname, bus_id, mediastr, unit_id); else snprintf(dinfo->id, 32, \"%s%s%i\", devname, mediastr, unit_id); } dinfo->bdrv = bdrv_new(dinfo->id); dinfo->devaddr = devaddr; dinfo->type = type; dinfo->bus = bus_id; dinfo->unit = unit_id; dinfo->onerror = onerror; dinfo->opts = opts; if (serial) strncpy(dinfo->serial, serial, sizeof(serial)); TAILQ_INSERT_TAIL(&drives, dinfo, next); switch(type) { case IF_IDE: case IF_SCSI: case IF_XEN: switch(media) { case MEDIA_DISK: if (cyls != 0) { bdrv_set_geometry_hint(dinfo->bdrv, cyls, heads, secs); bdrv_set_translation_hint(dinfo->bdrv, translation); } break; case MEDIA_CDROM: bdrv_set_type_hint(dinfo->bdrv, BDRV_TYPE_CDROM); break; } break; case IF_SD: /* FIXME: This isn't really a floppy, but it's a reasonable approximation. */ case IF_FLOPPY: bdrv_set_type_hint(dinfo->bdrv, BDRV_TYPE_FLOPPY); break; case IF_PFLASH: case IF_MTD: case IF_NONE: break; case IF_VIRTIO: /* add virtio block device */ opts = qemu_opts_create(&qemu_device_opts, NULL, 0); qemu_opt_set(opts, \"driver\", \"virtio-blk-pci\"); qemu_opt_set(opts, \"drive\", dinfo->id); if (devaddr) qemu_opt_set(opts, \"addr\", devaddr); break; case IF_COUNT: abort(); } if (!file) { *fatal_error = 0; return NULL; } bdrv_flags = 0; if (snapshot) { bdrv_flags |= BDRV_O_SNAPSHOT; cache = 2; /* always use write-back with snapshot */ } if (cache == 0) /* no caching */ bdrv_flags |= BDRV_O_NOCACHE; else if (cache == 2) /* write-back */ bdrv_flags |= BDRV_O_CACHE_WB; if (aio == 1) { bdrv_flags |= BDRV_O_NATIVE_AIO; } else { bdrv_flags &= ~BDRV_O_NATIVE_AIO; } if (bdrv_open2(dinfo->bdrv, file, bdrv_flags, drv) < 0) { fprintf(stderr, \"qemu: could not open disk image %s\\n\", file); return NULL; } if (bdrv_key_required(dinfo->bdrv)) autostart = 0; *fatal_error = 0; return dinfo; }"} {"target": 0, "idx": 20321, "func": "static int mpegts_write_header(AVFormatContext *s) { MpegTSWrite *ts = s->priv_data; MpegTSWriteStream *ts_st; MpegTSService *service; AVStream *st, *pcr_st = NULL; AVDictionaryEntry *title, *provider; int i, j; const char *service_name; const char *provider_name; int *pids; int ret; if (s->max_delay < 0) /* Not set by the caller */ s->max_delay = 0; // round up to a whole number of TS packets ts->pes_payload_size = (ts->pes_payload_size + 14 + 183) / 184 * 184 - 14; ts->tsid = ts->transport_stream_id; ts->onid = ts->original_network_id; /* allocate a single DVB service */ title = av_dict_get(s->metadata, \"service_name\", NULL, 0); if (!title) title = av_dict_get(s->metadata, \"title\", NULL, 0); service_name = title ? title->value : DEFAULT_SERVICE_NAME; provider = av_dict_get(s->metadata, \"service_provider\", NULL, 0); provider_name = provider ? provider->value : DEFAULT_PROVIDER_NAME; service = mpegts_add_service(ts, ts->service_id, provider_name, service_name); service->pmt.write_packet = section_write_packet; service->pmt.opaque = s; service->pmt.cc = 15; ts->pat.pid = PAT_PID; ts->pat.cc = 15; // Initialize at 15 so that it wraps and be equal to 0 for the first packet we write ts->pat.write_packet = section_write_packet; ts->pat.opaque = s; ts->sdt.pid = SDT_PID; ts->sdt.cc = 15; ts->sdt.write_packet = section_write_packet; ts->sdt.opaque = s; pids = av_malloc(s->nb_streams * sizeof(*pids)); if (!pids) return AVERROR(ENOMEM); /* assign pids to each stream */ for(i = 0;i < s->nb_streams; i++) { st = s->streams[i]; avpriv_set_pts_info(st, 33, 1, 90000); ts_st = av_mallocz(sizeof(MpegTSWriteStream)); if (!ts_st) { ret = AVERROR(ENOMEM); goto fail; } st->priv_data = ts_st; ts_st->payload = av_mallocz(ts->pes_payload_size); if (!ts_st->payload) { ret = AVERROR(ENOMEM); goto fail; } ts_st->service = service; /* MPEG pid values < 16 are reserved. Applications which set st->id in * this range are assigned a calculated pid. */ if (st->id < 16) { ts_st->pid = ts->start_pid + i; } else if (st->id < 0x1FFF) { ts_st->pid = st->id; } else { av_log(s, AV_LOG_ERROR, \"Invalid stream id %d, must be less than 8191\\n\", st->id); ret = AVERROR(EINVAL); goto fail; } if (ts_st->pid == service->pmt.pid) { av_log(s, AV_LOG_ERROR, \"Duplicate stream id %d\\n\", ts_st->pid); ret = AVERROR(EINVAL); goto fail; } for (j = 0; j < i; j++) if (pids[j] == ts_st->pid) { av_log(s, AV_LOG_ERROR, \"Duplicate stream id %d\\n\", ts_st->pid); ret = AVERROR(EINVAL); goto fail; } pids[i] = ts_st->pid; ts_st->payload_pts = AV_NOPTS_VALUE; ts_st->payload_dts = AV_NOPTS_VALUE; ts_st->first_pts_check = 1; ts_st->cc = 15; /* update PCR pid by using the first video stream */ if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && service->pcr_pid == 0x1fff) { service->pcr_pid = ts_st->pid; pcr_st = st; } if (st->codec->codec_id == AV_CODEC_ID_AAC && st->codec->extradata_size > 0) { AVStream *ast; ts_st->amux = avformat_alloc_context(); if (!ts_st->amux) { ret = AVERROR(ENOMEM); goto fail; } ts_st->amux->oformat = av_guess_format((ts->flags & MPEGTS_FLAG_AAC_LATM) ? \"latm\" : \"adts\", NULL, NULL); if (!ts_st->amux->oformat) { ret = AVERROR(EINVAL); goto fail; } ast = avformat_new_stream(ts_st->amux, NULL); ret = avcodec_copy_context(ast->codec, st->codec); if (ret != 0) goto fail; ret = avformat_write_header(ts_st->amux, NULL); if (ret < 0) goto fail; } } av_free(pids); /* if no video stream, use the first stream as PCR */ if (service->pcr_pid == 0x1fff && s->nb_streams > 0) { pcr_st = s->streams[0]; ts_st = pcr_st->priv_data; service->pcr_pid = ts_st->pid; } if (ts->mux_rate > 1) { service->pcr_packet_period = (ts->mux_rate * PCR_RETRANS_TIME) / (TS_PACKET_SIZE * 8 * 1000); ts->sdt_packet_period = (ts->mux_rate * SDT_RETRANS_TIME) / (TS_PACKET_SIZE * 8 * 1000); ts->pat_packet_period = (ts->mux_rate * PAT_RETRANS_TIME) / (TS_PACKET_SIZE * 8 * 1000); ts->first_pcr = av_rescale(s->max_delay, PCR_TIME_BASE, AV_TIME_BASE); } else { /* Arbitrary values, PAT/PMT could be written on key frames */ ts->sdt_packet_period = 200; ts->pat_packet_period = 40; if (pcr_st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { if (!pcr_st->codec->frame_size) { av_log(s, AV_LOG_WARNING, \"frame size not set\\n\"); service->pcr_packet_period = pcr_st->codec->sample_rate/(10*512); } else { service->pcr_packet_period = pcr_st->codec->sample_rate/(10*pcr_st->codec->frame_size); } } else { // max delta PCR 0.1s service->pcr_packet_period = pcr_st->codec->time_base.den/(10*pcr_st->codec->time_base.num); } } // output a PCR as soon as possible service->pcr_packet_count = service->pcr_packet_period; ts->pat_packet_count = ts->pat_packet_period-1; ts->sdt_packet_count = ts->sdt_packet_period-1; if (ts->mux_rate == 1) av_log(s, AV_LOG_VERBOSE, \"muxrate VBR, \"); else av_log(s, AV_LOG_VERBOSE, \"muxrate %d, \", ts->mux_rate); av_log(s, AV_LOG_VERBOSE, \"pcr every %d pkts, \" \"sdt every %d, pat/pmt every %d pkts\\n\", service->pcr_packet_period, ts->sdt_packet_period, ts->pat_packet_period); avio_flush(s->pb); return 0; fail: av_free(pids); for(i = 0;i < s->nb_streams; i++) { MpegTSWriteStream *ts_st; st = s->streams[i]; ts_st = st->priv_data; if (ts_st) { av_freep(&ts_st->payload); if (ts_st->amux) { avformat_free_context(ts_st->amux); ts_st->amux = NULL; } } av_freep(&st->priv_data); } return ret; }"} {"target": 0, "idx": 20366, "func": "static int htab_save_setup(QEMUFile *f, void *opaque) { sPAPRMachineState *spapr = opaque; /* \"Iteration\" header */ qemu_put_be32(f, spapr->htab_shift); if (spapr->htab) { spapr->htab_save_index = 0; spapr->htab_first_pass = true; } else { assert(kvm_enabled()); spapr->htab_fd = kvmppc_get_htab_fd(false); spapr->htab_fd_stale = false; if (spapr->htab_fd < 0) { fprintf(stderr, \"Unable to open fd for reading hash table from KVM: %s\\n\", strerror(errno)); return -1; } } return 0; }"} {"target": 0, "idx": 20367, "func": "int pci_drive_hot_add(Monitor *mon, const QDict *qdict, DriveInfo *dinfo) { int dom, pci_bus; unsigned slot; PCIDevice *dev; const char *pci_addr = qdict_get_str(qdict, \"pci_addr\"); switch (dinfo->type) { case IF_SCSI: if (pci_read_devaddr(mon, pci_addr, &dom, &pci_bus, &slot)) { goto err; } dev = pci_find_device(pci_find_root_bus(dom), pci_bus, PCI_DEVFN(slot, 0)); if (!dev) { monitor_printf(mon, \"no pci device with address %s\\n\", pci_addr); goto err; } if (scsi_hot_add(mon, &dev->qdev, dinfo, 1) != 0) { goto err; } break; default: monitor_printf(mon, \"Can't hot-add drive to type %d\\n\", dinfo->type); goto err; } return 0; err: return -1; }"} {"target": 0, "idx": 20385, "func": "static int read_ir(AVFilterLink *link, AVFrame *frame) { AVFilterContext *ctx = link->dst; AudioFIRContext *s = ctx->priv; int nb_taps, max_nb_taps; av_audio_fifo_write(s->fifo[1], (void **)frame->extended_data, frame->nb_samples); av_frame_free(&frame); nb_taps = av_audio_fifo_size(s->fifo[1]); max_nb_taps = MAX_IR_DURATION * ctx->outputs[0]->sample_rate; if (nb_taps > max_nb_taps) { av_log(ctx, AV_LOG_ERROR, \"Too big number of coefficients: %d > %d.\\n\", nb_taps, max_nb_taps); return AVERROR(EINVAL); } return 0; }"} {"target": 0, "idx": 20395, "func": "void cpu_loop(CPUSH4State *env) { CPUState *cs = CPU(sh_env_get_cpu(env)); int trapnr, ret; target_siginfo_t info; while (1) { cpu_exec_start(cs); trapnr = cpu_sh4_exec(cs); cpu_exec_end(cs); switch (trapnr) { case 0x160: env->pc += 2; ret = do_syscall(env, env->gregs[3], env->gregs[4], env->gregs[5], env->gregs[6], env->gregs[7], env->gregs[0], env->gregs[1], 0, 0); env->gregs[0] = ret; break; case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ break; case EXCP_DEBUG: { int sig; sig = gdb_handlesig(cs, TARGET_SIGTRAP); if (sig) { info.si_signo = sig; info.si_errno = 0; info.si_code = TARGET_TRAP_BRKPT; queue_signal(env, info.si_signo, &info); } } break; case 0xa0: case 0xc0: info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; info.si_code = TARGET_SEGV_MAPERR; info._sifields._sigfault._addr = env->tea; queue_signal(env, info.si_signo, &info); break; default: printf (\"Unhandled trap: 0x%x\\n\", trapnr); cpu_dump_state(cs, stderr, fprintf, 0); exit(EXIT_FAILURE); } process_pending_signals (env); } }"} {"target": 0, "idx": 20402, "func": "static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2) { gen_helper_fcmps(cpu_env, r_rs1, r_rs2); }"} {"target": 0, "idx": 20403, "func": "static void set_sigp_status(SigpInfo *si, uint64_t status) { *si->status_reg &= 0xffffffff00000000ULL; *si->status_reg |= status; si->cc = SIGP_CC_STATUS_STORED; }"} {"target": 0, "idx": 20407, "func": "static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq) { VirtIOSCSIReq *req; req = g_malloc(sizeof(*req)); if (!virtqueue_pop(vq, &req->elem)) { g_free(req); return NULL; } virtio_scsi_parse_req(s, vq, req); return req; }"} {"target": 1, "idx": 20417, "func": "void OPPROTO op_divb_AL_T0(void) { unsigned int num, den, q, r; num = (EAX & 0xffff); den = (T0 & 0xff); if (den == 0) { raise_exception(EXCP00_DIVZ); } q = (num / den) & 0xff; r = (num % den) & 0xff; EAX = (EAX & ~0xffff) | (r << 8) | q; }"} {"target": 0, "idx": 20436, "func": "static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src) { gen_mov_reg_Z(cpu_tmp0, src); gen_mov_reg_C(dst, src); tcg_gen_or_tl(dst, dst, cpu_tmp0); tcg_gen_xori_tl(dst, dst, 0x1); }"} {"target": 0, "idx": 20444, "func": "static void test_pxe_virtio_pci(void) { test_pxe_one(\"-device virtio-net-pci,netdev=\" NETNAME, false); }"} {"target": 1, "idx": 20455, "func": "int ff_scale_eval_dimensions(void *log_ctx, const char *w_expr, const char *h_expr, AVFilterLink *inlink, AVFilterLink *outlink, int *ret_w, int *ret_h) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(outlink->format); const char *expr; int w, h; int factor_w, factor_h; int eval_w, eval_h; int ret; double var_values[VARS_NB], res; var_values[VAR_PI] = M_PI; var_values[VAR_PHI] = M_PHI; var_values[VAR_E] = M_E; var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w; var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h; var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN; var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN; var_values[VAR_A] = (double) inlink->w / inlink->h; var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1; var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR]; var_values[VAR_HSUB] = 1 << desc->log2_chroma_w; var_values[VAR_VSUB] = 1 << desc->log2_chroma_h; var_values[VAR_OHSUB] = 1 << out_desc->log2_chroma_w; var_values[VAR_OVSUB] = 1 << out_desc->log2_chroma_h; /* evaluate width and height */ av_expr_parse_and_eval(&res, (expr = w_expr), var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, log_ctx); eval_w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res; if ((ret = av_expr_parse_and_eval(&res, (expr = h_expr), var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, log_ctx)) < 0) goto fail; eval_h = var_values[VAR_OUT_H] = var_values[VAR_OH] = res; /* evaluate again the width, as it may depend on the output height */ if ((ret = av_expr_parse_and_eval(&res, (expr = w_expr), var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, log_ctx)) < 0) goto fail; eval_w = res; w = eval_w; h = eval_h; /* Check if it is requested that the result has to be divisible by a some * factor (w or h = -n with n being the factor). */ factor_w = 1; factor_h = 1; if (w < -1) { factor_w = -w; } if (h < -1) { factor_h = -h; } if (w < 0 && h < 0) eval_w = eval_h = 0; if (!(w = eval_w)) w = inlink->w; if (!(h = eval_h)) h = inlink->h; /* Make sure that the result is divisible by the factor we determined * earlier. If no factor was set, it is nothing will happen as the default * factor is 1 */ if (w < 0) w = av_rescale(h, inlink->w, inlink->h * factor_w) * factor_w; if (h < 0) h = av_rescale(w, inlink->h, inlink->w * factor_h) * factor_h; *ret_w = w; *ret_h = h; return 0; fail: av_log(log_ctx, AV_LOG_ERROR, \"Error when evaluating the expression '%s'.\\n\" \"Maybe the expression for out_w:'%s' or for out_h:'%s' is self-referencing.\\n\", expr, w_expr, h_expr); return ret; }"} {"target": 1, "idx": 20472, "func": "static int parse_strk(AVFormatContext *s, FourxmDemuxContext *fourxm, uint8_t *buf, int size) { AVStream *st; int track; /* check that there is enough data */ if (size != strk_SIZE) return AVERROR_INVALIDDATA; track = AV_RL32(buf + 8); if (track + 1 > fourxm->track_count) { if (av_reallocp_array(&fourxm->tracks, track + 1, sizeof(AudioTrack))) return AVERROR(ENOMEM); memset(&fourxm->tracks[fourxm->track_count], 0, sizeof(AudioTrack) * (track + 1 - fourxm->track_count)); fourxm->track_count = track + 1; } fourxm->tracks[track].adpcm = AV_RL32(buf + 12); fourxm->tracks[track].channels = AV_RL32(buf + 36); fourxm->tracks[track].sample_rate = AV_RL32(buf + 40); fourxm->tracks[track].bits = AV_RL32(buf + 44); fourxm->tracks[track].audio_pts = 0; if (fourxm->tracks[track].channels <= 0 || fourxm->tracks[track].sample_rate <= 0 || fourxm->tracks[track].bits < 0) { av_log(s, AV_LOG_ERROR, \"audio header invalid\\n\"); return AVERROR_INVALIDDATA; } /* allocate a new AVStream */ st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); st->id = track; avpriv_set_pts_info(st, 60, 1, fourxm->tracks[track].sample_rate); fourxm->tracks[track].stream_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_tag = 0; st->codec->channels = fourxm->tracks[track].channels; st->codec->sample_rate = fourxm->tracks[track].sample_rate; st->codec->bits_per_coded_sample = fourxm->tracks[track].bits; st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample; st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample; if (fourxm->tracks[track].adpcm){ st->codec->codec_id = AV_CODEC_ID_ADPCM_4XM; } else if (st->codec->bits_per_coded_sample == 8) { st->codec->codec_id = AV_CODEC_ID_PCM_U8; } else st->codec->codec_id = AV_CODEC_ID_PCM_S16LE; return 0; }"} {"target": 1, "idx": 20478, "func": "static inline void gen_add_data_offset(DisasContext *s, unsigned int insn, TCGv var) { int val, rm, shift, shiftop; TCGv offset; if (!(insn & (1 << 25))) { /* immediate */ val = insn & 0xfff; if (!(insn & (1 << 23))) val = -val; if (val != 0) tcg_gen_addi_i32(var, var, val); } else { /* shift/register */ rm = (insn) & 0xf; shift = (insn >> 7) & 0x1f; shiftop = (insn >> 5) & 3; offset = load_reg(s, rm); gen_arm_shift_im(offset, shiftop, shift, 0); if (!(insn & (1 << 23))) tcg_gen_sub_i32(var, var, offset); else tcg_gen_add_i32(var, var, offset); dead_tmp(offset); } }"} {"target": 0, "idx": 20497, "func": "static int count_contiguous_clusters(int nb_clusters, int cluster_size, uint64_t *l2_table, uint64_t stop_flags) { int i; QCow2ClusterType first_cluster_type; uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED; uint64_t first_entry = be64_to_cpu(l2_table[0]); uint64_t offset = first_entry & mask; if (!offset) { return 0; } /* must be allocated */ first_cluster_type = qcow2_get_cluster_type(first_entry); assert(first_cluster_type == QCOW2_CLUSTER_NORMAL || (first_cluster_type == QCOW2_CLUSTER_ZERO && (first_entry & L2E_OFFSET_MASK) != 0)); for (i = 0; i < nb_clusters; i++) { uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask; if (offset + (uint64_t) i * cluster_size != l2_entry) { break; } } return i; }"} {"target": 0, "idx": 20530, "func": "static inline int name_to_handle(int dirfd, const char *name, struct file_handle *fh, int *mnt_id, int flags) { return syscall(__NR_name_to_handle_at, dirfd, name, fh, mnt_id, flags); }"} {"target": 0, "idx": 20532, "func": "static void generate_bootsect(uint32_t gpr[8], uint16_t segs[6], uint16_t ip) { uint8_t bootsect[512], *p; int i; int hda; hda = drive_get_index(IF_IDE, 0, 0); if (hda == -1) { fprintf(stderr, \"A disk image must be given for 'hda' when booting \" \"a Linux kernel\\n(if you really don't want it, use /dev/zero)\\n\"); exit(1); } memset(bootsect, 0, sizeof(bootsect)); /* Copy the MSDOS partition table if possible */ bdrv_read(drives_table[hda].bdrv, 0, bootsect, 1); /* Make sure we have a partition signature */ bootsect[510] = 0x55; bootsect[511] = 0xaa; /* Actual code */ p = bootsect; *p++ = 0xfa; /* CLI */ *p++ = 0xfc; /* CLD */ for (i = 0; i < 6; i++) { if (i == 1) /* Skip CS */ continue; *p++ = 0xb8; /* MOV AX,imm16 */ *p++ = segs[i]; *p++ = segs[i] >> 8; *p++ = 0x8e; /* MOV ,AX */ *p++ = 0xc0 + (i << 3); } for (i = 0; i < 8; i++) { *p++ = 0x66; /* 32-bit operand size */ *p++ = 0xb8 + i; /* MOV ,imm32 */ *p++ = gpr[i]; *p++ = gpr[i] >> 8; *p++ = gpr[i] >> 16; *p++ = gpr[i] >> 24; } *p++ = 0xea; /* JMP FAR */ *p++ = ip; /* IP */ *p++ = ip >> 8; *p++ = segs[1]; /* CS */ *p++ = segs[1] >> 8; bdrv_set_boot_sector(drives_table[hda].bdrv, bootsect, sizeof(bootsect)); }"} {"target": 0, "idx": 20536, "func": "static inline int blah (int32_t i) { if (i > 0x43c07fff) return 32767; else if (i < 0x43bf8000) return -32768; else return i - 0x43c00000; }"} {"target": 0, "idx": 20550, "func": "uint64_t helper_fmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2, uint64_t arg3) { CPU_DoubleU farg1, farg2, farg3; farg1.ll = arg1; farg2.ll = arg2; farg3.ll = arg3; if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) { /* Multiplication of zero by infinity */ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ); } else { if (unlikely(float64_is_signaling_nan(farg1.d) || float64_is_signaling_nan(farg2.d) || float64_is_signaling_nan(farg3.d))) { /* sNaN operation */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); } /* This is the way the PowerPC specification defines it */ float128 ft0_128, ft1_128; ft0_128 = float64_to_float128(farg1.d, &env->fp_status); ft1_128 = float64_to_float128(farg2.d, &env->fp_status); ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status); if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) && float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) { /* Magnitude subtraction of infinities */ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI); } else { ft1_128 = float64_to_float128(farg3.d, &env->fp_status); ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status); farg1.d = float128_to_float64(ft0_128, &env->fp_status); } } return farg1.ll; }"} {"target": 0, "idx": 20557, "func": "static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors, bool is_write, double elapsed_time, uint64_t *wait) { uint64_t bps_limit = 0; double bytes_limit, bytes_base, bytes_res; double slice_time, wait_time; if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) { bps_limit = bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]; } else if (bs->io_limits.bps[is_write]) { bps_limit = bs->io_limits.bps[is_write]; } else { if (wait) { *wait = 0; } return false; } slice_time = bs->slice_end - bs->slice_start; slice_time /= (NANOSECONDS_PER_SECOND); bytes_limit = bps_limit * slice_time; bytes_base = bs->nr_bytes[is_write] - bs->io_base.bytes[is_write]; if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) { bytes_base += bs->nr_bytes[!is_write] - bs->io_base.bytes[!is_write]; } /* bytes_base: the bytes of data which have been read/written; and * it is obtained from the history statistic info. * bytes_res: the remaining bytes of data which need to be read/written. * (bytes_base + bytes_res) / bps_limit: used to calcuate * the total time for completing reading/writting all data. */ bytes_res = (unsigned) nb_sectors * BDRV_SECTOR_SIZE; if (bytes_base + bytes_res <= bytes_limit) { if (wait) { *wait = 0; } return false; } /* Calc approx time to dispatch */ wait_time = (bytes_base + bytes_res) / bps_limit - elapsed_time; /* When the I/O rate at runtime exceeds the limits, * bs->slice_end need to be extended in order that the current statistic * info can be kept until the timer fire, so it is increased and tuned * based on the result of experiment. */ bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10; bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME; if (wait) { *wait = wait_time * BLOCK_IO_SLICE_TIME * 10; } return true; }"} {"target": 1, "idx": 20596, "func": "static int decode_tns(AACContext * ac, TemporalNoiseShaping * tns, GetBitContext * gb, const IndividualChannelStream * ics) { int w, filt, i, coef_len, coef_res, coef_compress; const int is8 = ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE; const int tns_max_order = is8 ? 7 : ac->m4ac.object_type == AOT_AAC_MAIN ? 20 : 12; for (w = 0; w < ics->num_windows; w++) { tns->n_filt[w] = get_bits(gb, 2 - is8); if (tns->n_filt[w]) coef_res = get_bits1(gb); for (filt = 0; filt < tns->n_filt[w]; filt++) { int tmp2_idx; tns->length[w][filt] = get_bits(gb, 6 - 2*is8); if ((tns->order[w][filt] = get_bits(gb, 5 - 2*is8)) > tns_max_order) { av_log(ac->avccontext, AV_LOG_ERROR, \"TNS filter order %d is greater than maximum %d.\", tns->order[w][filt], tns_max_order); tns->order[w][filt] = 0; return -1; } tns->direction[w][filt] = get_bits1(gb); coef_compress = get_bits1(gb); coef_len = coef_res + 3 - coef_compress; tmp2_idx = 2*coef_compress + coef_res; for (i = 0; i < tns->order[w][filt]; i++) tns->coef[w][filt][i] = tns_tmp2_map[tmp2_idx][get_bits(gb, coef_len)]; } } return 0; }"} {"target": 1, "idx": 20603, "func": "static void xilinx_enet_realize(DeviceState *dev, Error **errp) { XilinxAXIEnet *s = XILINX_AXI_ENET(dev); XilinxAXIEnetStreamSlave *ds = XILINX_AXI_ENET_DATA_STREAM(&s->rx_data_dev); XilinxAXIEnetStreamSlave *cs = XILINX_AXI_ENET_CONTROL_STREAM( &s->rx_control_dev); Error *local_errp = NULL; object_property_add_link(OBJECT(ds), \"enet\", \"xlnx.axi-ethernet\", (Object **) &ds->enet, OBJ_PROP_LINK_UNREF_ON_RELEASE, &local_errp); object_property_add_link(OBJECT(cs), \"enet\", \"xlnx.axi-ethernet\", (Object **) &cs->enet, OBJ_PROP_LINK_UNREF_ON_RELEASE, &local_errp); if (local_errp) { goto xilinx_enet_realize_fail; } object_property_set_link(OBJECT(ds), OBJECT(s), \"enet\", &local_errp); object_property_set_link(OBJECT(cs), OBJECT(s), \"enet\", &local_errp); if (local_errp) { goto xilinx_enet_realize_fail; } qemu_macaddr_default_if_unset(&s->conf.macaddr); s->nic = qemu_new_nic(&net_xilinx_enet_info, &s->conf, object_get_typename(OBJECT(dev)), dev->id, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); tdk_init(&s->TEMAC.phy); mdio_attach(&s->TEMAC.mdio_bus, &s->TEMAC.phy, s->c_phyaddr); s->TEMAC.parent = s; s->rxmem = g_malloc(s->c_rxmem); return; xilinx_enet_realize_fail: if (!*errp) { *errp = local_errp; } }"} {"target": 1, "idx": 20610, "func": "static int mpc8_read_header(AVFormatContext *s) { MPCContext *c = s->priv_data; AVIOContext *pb = s->pb; AVStream *st; int tag = 0; int64_t size, pos; c->header_pos = avio_tell(pb); if(avio_rl32(pb) != TAG_MPCK){ av_log(s, AV_LOG_ERROR, \"Not a Musepack8 file\\n\"); while(!avio_feof(pb)){ pos = avio_tell(pb); mpc8_get_chunk_header(pb, &tag, &size); if(tag == TAG_STREAMHDR) break; mpc8_handle_chunk(s, tag, pos, size); if(tag != TAG_STREAMHDR){ av_log(s, AV_LOG_ERROR, \"Stream header not found\\n\"); pos = avio_tell(pb); avio_skip(pb, 4); //CRC c->ver = avio_r8(pb); if(c->ver != 8){ av_log(s, AV_LOG_ERROR, \"Unknown stream version %d\\n\", c->ver); return AVERROR_PATCHWELCOME; c->samples = ffio_read_varlen(pb); ffio_read_varlen(pb); //silence samples at the beginning st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = AV_CODEC_ID_MUSEPACK8; st->codec->bits_per_coded_sample = 16; if (ff_get_extradata(st->codec, pb, 2) < 0) return AVERROR(ENOMEM); st->codec->channels = (st->codec->extradata[1] >> 4) + 1; st->codec->sample_rate = mpc8_rate[st->codec->extradata[0] >> 5]; avpriv_set_pts_info(st, 32, 1152 << (st->codec->extradata[1]&3)*2, st->codec->sample_rate); st->start_time = 0; st->duration = c->samples / (1152 << (st->codec->extradata[1]&3)*2); size -= avio_tell(pb) - pos; if (size > 0) avio_skip(pb, size); if (pb->seekable) { int64_t pos = avio_tell(s->pb); c->apetag_start = ff_ape_parse_tag(s); avio_seek(s->pb, pos, SEEK_SET); return 0;"} {"target": 1, "idx": 20614, "func": "static DriveInfo *blockdev_init(const char *file, QDict *bs_opts, Error **errp) { const char *buf; const char *serial; int ro = 0; int bdrv_flags = 0; int on_read_error, on_write_error; DriveInfo *dinfo; ThrottleConfig cfg; int snapshot = 0; bool copy_on_read; int ret; Error *error = NULL; QemuOpts *opts; const char *id; bool has_driver_specific_opts; BlockdevDetectZeroesOptions detect_zeroes; BlockDriver *drv = NULL; /* Check common options by copying from bs_opts to opts, all other options * stay in bs_opts for processing by bdrv_open(). */ id = qdict_get_try_str(bs_opts, \"id\"); opts = qemu_opts_create(&qemu_common_drive_opts, id, 1, &error); if (error) { error_propagate(errp, error); return NULL; } qemu_opts_absorb_qdict(opts, bs_opts, &error); if (error) { error_propagate(errp, error); goto early_err; } if (id) { qdict_del(bs_opts, \"id\"); } has_driver_specific_opts = !!qdict_size(bs_opts); /* extract parameters */ snapshot = qemu_opt_get_bool(opts, \"snapshot\", 0); ro = qemu_opt_get_bool(opts, \"read-only\", 0); copy_on_read = qemu_opt_get_bool(opts, \"copy-on-read\", false); serial = qemu_opt_get(opts, \"serial\"); if ((buf = qemu_opt_get(opts, \"discard\")) != NULL) { if (bdrv_parse_discard_flags(buf, &bdrv_flags) != 0) { error_setg(errp, \"invalid discard option\"); goto early_err; } } if (qemu_opt_get_bool(opts, \"cache.writeback\", true)) { bdrv_flags |= BDRV_O_CACHE_WB; } if (qemu_opt_get_bool(opts, \"cache.direct\", false)) { bdrv_flags |= BDRV_O_NOCACHE; } if (qemu_opt_get_bool(opts, \"cache.no-flush\", false)) { bdrv_flags |= BDRV_O_NO_FLUSH; } #ifdef CONFIG_LINUX_AIO if ((buf = qemu_opt_get(opts, \"aio\")) != NULL) { if (!strcmp(buf, \"native\")) { bdrv_flags |= BDRV_O_NATIVE_AIO; } else if (!strcmp(buf, \"threads\")) { /* this is the default */ } else { error_setg(errp, \"invalid aio option\"); goto early_err; } } #endif if ((buf = qemu_opt_get(opts, \"format\")) != NULL) { if (is_help_option(buf)) { error_printf(\"Supported formats:\"); bdrv_iterate_format(bdrv_format_print, NULL); error_printf(\"\\n\"); goto early_err; } drv = bdrv_find_format(buf); if (!drv) { error_setg(errp, \"'%s' invalid format\", buf); goto early_err; } } /* disk I/O throttling */ memset(&cfg, 0, sizeof(cfg)); cfg.buckets[THROTTLE_BPS_TOTAL].avg = qemu_opt_get_number(opts, \"throttling.bps-total\", 0); cfg.buckets[THROTTLE_BPS_READ].avg = qemu_opt_get_number(opts, \"throttling.bps-read\", 0); cfg.buckets[THROTTLE_BPS_WRITE].avg = qemu_opt_get_number(opts, \"throttling.bps-write\", 0); cfg.buckets[THROTTLE_OPS_TOTAL].avg = qemu_opt_get_number(opts, \"throttling.iops-total\", 0); cfg.buckets[THROTTLE_OPS_READ].avg = qemu_opt_get_number(opts, \"throttling.iops-read\", 0); cfg.buckets[THROTTLE_OPS_WRITE].avg = qemu_opt_get_number(opts, \"throttling.iops-write\", 0); cfg.buckets[THROTTLE_BPS_TOTAL].max = qemu_opt_get_number(opts, \"throttling.bps-total-max\", 0); cfg.buckets[THROTTLE_BPS_READ].max = qemu_opt_get_number(opts, \"throttling.bps-read-max\", 0); cfg.buckets[THROTTLE_BPS_WRITE].max = qemu_opt_get_number(opts, \"throttling.bps-write-max\", 0); cfg.buckets[THROTTLE_OPS_TOTAL].max = qemu_opt_get_number(opts, \"throttling.iops-total-max\", 0); cfg.buckets[THROTTLE_OPS_READ].max = qemu_opt_get_number(opts, \"throttling.iops-read-max\", 0); cfg.buckets[THROTTLE_OPS_WRITE].max = qemu_opt_get_number(opts, \"throttling.iops-write-max\", 0); cfg.op_size = qemu_opt_get_number(opts, \"throttling.iops-size\", 0); if (!check_throttle_config(&cfg, &error)) { error_propagate(errp, error); goto early_err; } on_write_error = BLOCKDEV_ON_ERROR_ENOSPC; if ((buf = qemu_opt_get(opts, \"werror\")) != NULL) { on_write_error = parse_block_error_action(buf, 0, &error); if (error) { error_propagate(errp, error); goto early_err; } } on_read_error = BLOCKDEV_ON_ERROR_REPORT; if ((buf = qemu_opt_get(opts, \"rerror\")) != NULL) { on_read_error = parse_block_error_action(buf, 1, &error); if (error) { error_propagate(errp, error); goto early_err; } } detect_zeroes = parse_enum_option(BlockdevDetectZeroesOptions_lookup, qemu_opt_get(opts, \"detect-zeroes\"), BLOCKDEV_DETECT_ZEROES_OPTIONS_MAX, BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF, &error); if (error) { error_propagate(errp, error); goto early_err; } if (detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP && !(bdrv_flags & BDRV_O_UNMAP)) { error_setg(errp, \"setting detect-zeroes to unmap is not allowed \" \"without setting discard operation to unmap\"); goto early_err; } /* init */ dinfo = g_malloc0(sizeof(*dinfo)); dinfo->id = g_strdup(qemu_opts_id(opts)); dinfo->bdrv = bdrv_new(dinfo->id, &error); if (error) { error_propagate(errp, error); goto bdrv_new_err; } dinfo->bdrv->open_flags = snapshot ? BDRV_O_SNAPSHOT : 0; dinfo->bdrv->read_only = ro; dinfo->bdrv->detect_zeroes = detect_zeroes; dinfo->refcount = 1; if (serial != NULL) { dinfo->serial = g_strdup(serial); } QTAILQ_INSERT_TAIL(&drives, dinfo, next); bdrv_set_on_error(dinfo->bdrv, on_read_error, on_write_error); /* disk I/O throttling */ if (throttle_enabled(&cfg)) { bdrv_io_limits_enable(dinfo->bdrv); bdrv_set_io_limits(dinfo->bdrv, &cfg); } if (!file || !*file) { if (has_driver_specific_opts) { file = NULL; } else { QDECREF(bs_opts); qemu_opts_del(opts); return dinfo; } } if (snapshot) { /* always use cache=unsafe with snapshot */ bdrv_flags &= ~BDRV_O_CACHE_MASK; bdrv_flags |= (BDRV_O_SNAPSHOT|BDRV_O_CACHE_WB|BDRV_O_NO_FLUSH); } if (copy_on_read) { bdrv_flags |= BDRV_O_COPY_ON_READ; } if (runstate_check(RUN_STATE_INMIGRATE)) { bdrv_flags |= BDRV_O_INCOMING; } bdrv_flags |= ro ? 0 : BDRV_O_RDWR; QINCREF(bs_opts); ret = bdrv_open(&dinfo->bdrv, file, NULL, bs_opts, bdrv_flags, drv, &error); if (ret < 0) { error_setg(errp, \"could not open disk image %s: %s\", file ?: dinfo->id, error_get_pretty(error)); error_free(error); goto err; } if (bdrv_key_required(dinfo->bdrv)) autostart = 0; QDECREF(bs_opts); qemu_opts_del(opts); return dinfo; err: bdrv_unref(dinfo->bdrv); QTAILQ_REMOVE(&drives, dinfo, next); bdrv_new_err: g_free(dinfo->id); g_free(dinfo); early_err: QDECREF(bs_opts); qemu_opts_del(opts); return NULL; }"} {"target": 1, "idx": 20638, "func": "static int fourxm_read_header(AVFormatContext *s, AVFormatParameters *ap) { AVIOContext *pb = s->pb; unsigned int fourcc_tag; unsigned int size; int header_size; FourxmDemuxContext *fourxm = s->priv_data; unsigned char *header; int i, ret; AVStream *st; fourxm->track_count = 0; fourxm->tracks = NULL; fourxm->fps = 1.0; /* skip the first 3 32-bit numbers */ avio_skip(pb, 12); /* check for LIST-HEAD */ GET_LIST_HEADER(); header_size = size - 4; if (fourcc_tag != HEAD_TAG || header_size < 0) return AVERROR_INVALIDDATA; /* allocate space for the header and load the whole thing */ header = av_malloc(header_size); if (!header) return AVERROR(ENOMEM); if (avio_read(pb, header, header_size) != header_size){ av_free(header); return AVERROR(EIO); } /* take the lazy approach and search for any and all vtrk and strk chunks */ for (i = 0; i < header_size - 8; i++) { fourcc_tag = AV_RL32(&header[i]); size = AV_RL32(&header[i + 4]); if (fourcc_tag == std__TAG) { fourxm->fps = av_int2flt(AV_RL32(&header[i + 12])); } else if (fourcc_tag == vtrk_TAG) { /* check that there is enough data */ if (size != vtrk_SIZE) { ret= AVERROR_INVALIDDATA; goto fail; } fourxm->width = AV_RL32(&header[i + 36]); fourxm->height = AV_RL32(&header[i + 40]); /* allocate a new AVStream */ st = av_new_stream(s, 0); if (!st){ ret= AVERROR(ENOMEM); goto fail; } av_set_pts_info(st, 60, 1, fourxm->fps); fourxm->video_stream_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_4XM; st->codec->extradata_size = 4; st->codec->extradata = av_malloc(4); AV_WL32(st->codec->extradata, AV_RL32(&header[i + 16])); st->codec->width = fourxm->width; st->codec->height = fourxm->height; i += 8 + size; } else if (fourcc_tag == strk_TAG) { int current_track; /* check that there is enough data */ if (size != strk_SIZE) { ret= AVERROR_INVALIDDATA; goto fail; } current_track = AV_RL32(&header[i + 8]); if((unsigned)current_track >= UINT_MAX / sizeof(AudioTrack) - 1){ av_log(s, AV_LOG_ERROR, \"current_track too large\\n\"); ret= -1; goto fail; } if (current_track + 1 > fourxm->track_count) { fourxm->track_count = current_track + 1; fourxm->tracks = av_realloc(fourxm->tracks, fourxm->track_count * sizeof(AudioTrack)); if (!fourxm->tracks) { ret= AVERROR(ENOMEM); goto fail; } } fourxm->tracks[current_track].adpcm = AV_RL32(&header[i + 12]); fourxm->tracks[current_track].channels = AV_RL32(&header[i + 36]); fourxm->tracks[current_track].sample_rate = AV_RL32(&header[i + 40]); fourxm->tracks[current_track].bits = AV_RL32(&header[i + 44]); fourxm->tracks[current_track].audio_pts = 0; if( fourxm->tracks[current_track].channels <= 0 || fourxm->tracks[current_track].sample_rate <= 0 || fourxm->tracks[current_track].bits < 0){ av_log(s, AV_LOG_ERROR, \"audio header invalid\\n\"); ret= -1; goto fail; } i += 8 + size; /* allocate a new AVStream */ st = av_new_stream(s, current_track); if (!st){ ret= AVERROR(ENOMEM); goto fail; } av_set_pts_info(st, 60, 1, fourxm->tracks[current_track].sample_rate); fourxm->tracks[current_track].stream_index = st->index; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_tag = 0; st->codec->channels = fourxm->tracks[current_track].channels; st->codec->sample_rate = fourxm->tracks[current_track].sample_rate; st->codec->bits_per_coded_sample = fourxm->tracks[current_track].bits; st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample; st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample; if (fourxm->tracks[current_track].adpcm){ st->codec->codec_id = CODEC_ID_ADPCM_4XM; }else if (st->codec->bits_per_coded_sample == 8){ st->codec->codec_id = CODEC_ID_PCM_U8; }else st->codec->codec_id = CODEC_ID_PCM_S16LE; } } /* skip over the LIST-MOVI chunk (which is where the stream should be */ GET_LIST_HEADER(); if (fourcc_tag != MOVI_TAG){ ret= AVERROR_INVALIDDATA; goto fail; } av_free(header); /* initialize context members */ fourxm->video_pts = -1; /* first frame will push to 0 */ return 0; fail: av_freep(&fourxm->tracks); av_free(header); return ret; }"} {"target": 1, "idx": 20642, "func": "static int read_extra_header(FFV1Context *f) { RangeCoder *const c = &f->c; uint8_t state[CONTEXT_SIZE]; int i, j, k, ret; uint8_t state2[32][CONTEXT_SIZE]; unsigned crc = 0; memset(state2, 128, sizeof(state2)); memset(state, 128, sizeof(state)); ff_init_range_decoder(c, f->avctx->extradata, f->avctx->extradata_size); ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8); f->version = get_symbol(c, state, 0); if (f->version < 2) { av_log(f->avctx, AV_LOG_ERROR, \"Invalid version in global header\\n\"); return AVERROR_INVALIDDATA; } if (f->version > 2) { c->bytestream_end -= 4; f->micro_version = get_symbol(c, state, 0); if (f->micro_version < 0) return AVERROR_INVALIDDATA; } f->ac = f->avctx->coder_type = get_symbol(c, state, 0); if (f->ac > 1) { for (i = 1; i < 256; i++) f->state_transition[i] = get_symbol(c, state, 1) + c->one_state[i]; } f->colorspace = get_symbol(c, state, 0); //YUV cs type f->avctx->bits_per_raw_sample = get_symbol(c, state, 0); f->chroma_planes = get_rac(c, state); f->chroma_h_shift = get_symbol(c, state, 0); f->chroma_v_shift = get_symbol(c, state, 0); f->transparency = get_rac(c, state); f->plane_count = 1 + (f->chroma_planes || f->version<4) + f->transparency; f->num_h_slices = 1 + get_symbol(c, state, 0); f->num_v_slices = 1 + get_symbol(c, state, 0); if (f->chroma_h_shift > 4U || f->chroma_v_shift > 4U) { av_log(f->avctx, AV_LOG_ERROR, \"chroma shift parameters %d %d are invalid\\n\", f->chroma_h_shift, f->chroma_v_shift); return AVERROR_INVALIDDATA; } if (f->num_h_slices > (unsigned)f->width || !f->num_h_slices || f->num_v_slices > (unsigned)f->height || !f->num_v_slices ) { av_log(f->avctx, AV_LOG_ERROR, \"slice count invalid\\n\"); return AVERROR_INVALIDDATA; } f->quant_table_count = get_symbol(c, state, 0); if (f->quant_table_count > (unsigned)MAX_QUANT_TABLES) return AVERROR_INVALIDDATA; for (i = 0; i < f->quant_table_count; i++) { f->context_count[i] = read_quant_tables(c, f->quant_tables[i]); if (f->context_count[i] < 0) { av_log(f->avctx, AV_LOG_ERROR, \"read_quant_table error\\n\"); return AVERROR_INVALIDDATA; } } if ((ret = ff_ffv1_allocate_initial_states(f)) < 0) return ret; for (i = 0; i < f->quant_table_count; i++) if (get_rac(c, state)) { for (j = 0; j < f->context_count[i]; j++) for (k = 0; k < CONTEXT_SIZE; k++) { int pred = j ? f->initial_states[i][j - 1][k] : 128; f->initial_states[i][j][k] = (pred + get_symbol(c, state2[k], 1)) & 0xFF; } } if (f->version > 2) { f->ec = get_symbol(c, state, 0); if (f->micro_version > 2) f->intra = get_symbol(c, state, 0); } if (f->version > 2) { unsigned v; v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, f->avctx->extradata, f->avctx->extradata_size); if (v || f->avctx->extradata_size < 4) { av_log(f->avctx, AV_LOG_ERROR, \"CRC mismatch %X!\\n\", v); return AVERROR_INVALIDDATA; } crc = AV_RB32(f->avctx->extradata + f->avctx->extradata_size - 4); } if (f->avctx->debug & FF_DEBUG_PICT_INFO) av_log(f->avctx, AV_LOG_DEBUG, \"global: ver:%d.%d, coder:%d, colorspace: %d bpr:%d chroma:%d(%d:%d), alpha:%d slices:%dx%d qtabs:%d ec:%d intra:%d CRC:0x%08X\\n\", f->version, f->micro_version, f->ac, f->colorspace, f->avctx->bits_per_raw_sample, f->chroma_planes, f->chroma_h_shift, f->chroma_v_shift, f->transparency, f->num_h_slices, f->num_v_slices, f->quant_table_count, f->ec, f->intra, crc ); return 0; }"} {"target": 1, "idx": 20657, "func": "static void qemu_rbd_aio_event_reader(void *opaque) { BDRVRBDState *s = opaque; ssize_t ret; do { char *p = (char *)&s->event_rcb; /* now read the rcb pointer that was sent from a non qemu thread */ ret = read(s->fds[RBD_FD_READ], p + s->event_reader_pos, sizeof(s->event_rcb) - s->event_reader_pos); if (ret > 0) { s->event_reader_pos += ret; if (s->event_reader_pos == sizeof(s->event_rcb)) { s->event_reader_pos = 0; qemu_rbd_complete_aio(s->event_rcb); } } } while (ret < 0 && errno == EINTR); }"} {"target": 1, "idx": 20670, "func": "void ff_vorbis_ready_floor1_list(vorbis_floor1_entry * list, int values) { int i; list[0].sort = 0; list[1].sort = 1; for (i = 2; i < values; i++) { int j; list[i].low = 0; list[i].high = 1; list[i].sort = i; for (j = 2; j < i; j++) { int tmp = list[j].x; if (tmp < list[i].x) { if (tmp > list[list[i].low].x) list[i].low = j; } else { if (tmp < list[list[i].high].x) list[i].high = j; } } } for (i = 0; i < values - 1; i++) { int j; for (j = i + 1; j < values; j++) { if (list[list[i].sort].x > list[list[j].sort].x) { int tmp = list[i].sort; list[i].sort = list[j].sort; list[j].sort = tmp; } } } }"} {"target": 1, "idx": 20676, "func": "static void gen_wsr_windowbase(DisasContext *dc, uint32_t sr, TCGv_i32 v) { gen_helper_wsr_windowbase(cpu_env, v); reset_used_window(dc); }"} {"target": 1, "idx": 20692, "func": "static void virtio_pci_exit(PCIDevice *pci_dev) { VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); virtio_pci_stop_ioeventfd(proxy); memory_region_destroy(&proxy->bar); msix_uninit_exclusive_bar(pci_dev); }"} {"target": 1, "idx": 20712, "func": "static av_cold void init_cavlc_level_tab(void){ int suffix_length, mask; unsigned int i; for(suffix_length=0; suffix_length<7; suffix_length++){ for(i=0; i<(1<>(LEVEL_TAB_BITS-prefix-1-suffix_length)) - (1<>1) ^ mask) - mask; if(prefix + 1 + suffix_length <= LEVEL_TAB_BITS){ cavlc_level_tab[suffix_length][i][0]= level_code; cavlc_level_tab[suffix_length][i][1]= prefix + 1 + suffix_length; }else if(prefix + 1 <= LEVEL_TAB_BITS){ cavlc_level_tab[suffix_length][i][0]= prefix+100; cavlc_level_tab[suffix_length][i][1]= prefix + 1; }else{ cavlc_level_tab[suffix_length][i][0]= LEVEL_TAB_BITS+100; cavlc_level_tab[suffix_length][i][1]= LEVEL_TAB_BITS; } } } }"} {"target": 1, "idx": 20720, "func": "static void yuv_a_to_rgba(const uint8_t *ycbcr, const uint8_t *alpha, uint32_t *rgba, int num_values) { const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; uint8_t r, g, b; int i, y, cb, cr; int r_add, g_add, b_add; for (i = num_values; i > 0; i--) { y = *ycbcr++; cr = *ycbcr++; cb = *ycbcr++; YUV_TO_RGB1_CCIR(cb, cr); YUV_TO_RGB2_CCIR(r, g, b, y); *rgba++ = (*alpha++ << 24) | (r << 16) | (g << 8) | b; } }"} {"target": 0, "idx": 20725, "func": "static int init_muxer(AVFormatContext *s, AVDictionary **options) { int ret = 0, i; AVStream *st; AVDictionary *tmp = NULL; AVCodecParameters *par = NULL; AVOutputFormat *of = s->oformat; const AVCodecDescriptor *desc; AVDictionaryEntry *e; if (options) av_dict_copy(&tmp, *options, 0); if ((ret = av_opt_set_dict(s, &tmp)) < 0) goto fail; if (s->priv_data && s->oformat->priv_class && *(const AVClass**)s->priv_data==s->oformat->priv_class && (ret = av_opt_set_dict2(s->priv_data, &tmp, AV_OPT_SEARCH_CHILDREN)) < 0) goto fail; #if FF_API_LAVF_AVCTX FF_DISABLE_DEPRECATION_WARNINGS if (s->nb_streams && s->streams[0]->codec->flags & AV_CODEC_FLAG_BITEXACT) { if (!(s->flags & AVFMT_FLAG_BITEXACT)) { #if FF_API_LAVF_BITEXACT av_log(s, AV_LOG_WARNING, \"Setting the AVFormatContext to bitexact mode, because \" \"the AVCodecContext is in that mode. This behavior will \" \"change in the future. To keep the current behavior, set \" \"AVFormatContext.flags |= AVFMT_FLAG_BITEXACT.\\n\"); s->flags |= AVFMT_FLAG_BITEXACT; #else av_log(s, AV_LOG_WARNING, \"The AVFormatContext is not in set to bitexact mode, only \" \"the AVCodecContext. If this is not intended, set \" \"AVFormatContext.flags |= AVFMT_FLAG_BITEXACT.\\n\"); #endif } } FF_ENABLE_DEPRECATION_WARNINGS #endif // some sanity checks if (s->nb_streams == 0 && !(of->flags & AVFMT_NOSTREAMS)) { av_log(s, AV_LOG_ERROR, \"No streams to mux were specified\\n\"); ret = AVERROR(EINVAL); goto fail; } for (i = 0; i < s->nb_streams; i++) { st = s->streams[i]; par = st->codecpar; #if FF_API_LAVF_CODEC_TB && FF_API_LAVF_AVCTX FF_DISABLE_DEPRECATION_WARNINGS if (!st->time_base.num && st->codec->time_base.num) { av_log(s, AV_LOG_WARNING, \"Using AVStream.codec.time_base as a \" \"timebase hint to the muxer is deprecated. Set \" \"AVStream.time_base instead.\\n\"); avpriv_set_pts_info(st, 64, st->codec->time_base.num, st->codec->time_base.den); } FF_ENABLE_DEPRECATION_WARNINGS #endif #if FF_API_LAVF_AVCTX FF_DISABLE_DEPRECATION_WARNINGS if (st->codecpar->codec_type == AVMEDIA_TYPE_UNKNOWN && st->codec->codec_type != AVMEDIA_TYPE_UNKNOWN) { av_log(s, AV_LOG_WARNING, \"Using AVStream.codec to pass codec \" \"parameters to muxers is deprecated, use AVStream.codecpar \" \"instead.\\n\"); ret = avcodec_parameters_from_context(st->codecpar, st->codec); if (ret < 0) goto fail; } FF_ENABLE_DEPRECATION_WARNINGS #endif /* update internal context from codecpar, old bsf api needs this * FIXME: remove when autobsf uses new bsf API */ ret = avcodec_parameters_to_context(st->internal->avctx, st->codecpar); if (ret < 0) goto fail; if (!st->time_base.num) { /* fall back on the default timebase values */ if (par->codec_type == AVMEDIA_TYPE_AUDIO && par->sample_rate) avpriv_set_pts_info(st, 64, 1, par->sample_rate); else avpriv_set_pts_info(st, 33, 1, 90000); } switch (par->codec_type) { case AVMEDIA_TYPE_AUDIO: if (par->sample_rate <= 0) { av_log(s, AV_LOG_ERROR, \"sample rate not set\\n\"); ret = AVERROR(EINVAL); goto fail; } if (!par->block_align) par->block_align = par->channels * av_get_bits_per_sample(par->codec_id) >> 3; break; case AVMEDIA_TYPE_VIDEO: if ((par->width <= 0 || par->height <= 0) && !(of->flags & AVFMT_NODIMENSIONS)) { av_log(s, AV_LOG_ERROR, \"dimensions not set\\n\"); ret = AVERROR(EINVAL); goto fail; } if (av_cmp_q(st->sample_aspect_ratio, par->sample_aspect_ratio) && fabs(av_q2d(st->sample_aspect_ratio) - av_q2d(par->sample_aspect_ratio)) > 0.004*av_q2d(st->sample_aspect_ratio) ) { if (st->sample_aspect_ratio.num != 0 && st->sample_aspect_ratio.den != 0 && par->sample_aspect_ratio.num != 0 && par->sample_aspect_ratio.den != 0) { av_log(s, AV_LOG_ERROR, \"Aspect ratio mismatch between muxer \" \"(%d/%d) and encoder layer (%d/%d)\\n\", st->sample_aspect_ratio.num, st->sample_aspect_ratio.den, par->sample_aspect_ratio.num, par->sample_aspect_ratio.den); ret = AVERROR(EINVAL); goto fail; } } break; } desc = avcodec_descriptor_get(par->codec_id); if (desc && desc->props & AV_CODEC_PROP_REORDER) st->internal->reorder = 1; if (of->codec_tag) { if ( par->codec_tag && par->codec_id == AV_CODEC_ID_RAWVIDEO && ( av_codec_get_tag(of->codec_tag, par->codec_id) == 0 || av_codec_get_tag(of->codec_tag, par->codec_id) == MKTAG('r', 'a', 'w', ' ')) && !validate_codec_tag(s, st)) { // the current rawvideo encoding system ends up setting // the wrong codec_tag for avi/mov, we override it here par->codec_tag = 0; } if (par->codec_tag) { if (!validate_codec_tag(s, st)) { char tagbuf[32], tagbuf2[32]; av_get_codec_tag_string(tagbuf, sizeof(tagbuf), par->codec_tag); av_get_codec_tag_string(tagbuf2, sizeof(tagbuf2), av_codec_get_tag(s->oformat->codec_tag, par->codec_id)); av_log(s, AV_LOG_ERROR, \"Tag %s/0x%08x incompatible with output codec id '%d' (%s)\\n\", tagbuf, par->codec_tag, par->codec_id, tagbuf2); ret = AVERROR_INVALIDDATA; goto fail; } } else par->codec_tag = av_codec_get_tag(of->codec_tag, par->codec_id); } if (par->codec_type != AVMEDIA_TYPE_ATTACHMENT) s->internal->nb_interleaved_streams++; } if (!s->priv_data && of->priv_data_size > 0) { s->priv_data = av_mallocz(of->priv_data_size); if (!s->priv_data) { ret = AVERROR(ENOMEM); goto fail; } if (of->priv_class) { *(const AVClass **)s->priv_data = of->priv_class; av_opt_set_defaults(s->priv_data); if ((ret = av_opt_set_dict2(s->priv_data, &tmp, AV_OPT_SEARCH_CHILDREN)) < 0) goto fail; } } /* set muxer identification string */ if (!(s->flags & AVFMT_FLAG_BITEXACT)) { av_dict_set(&s->metadata, \"encoder\", LIBAVFORMAT_IDENT, 0); } else { av_dict_set(&s->metadata, \"encoder\", NULL, 0); } for (e = NULL; e = av_dict_get(s->metadata, \"encoder-\", e, AV_DICT_IGNORE_SUFFIX); ) { av_dict_set(&s->metadata, e->key, NULL, 0); } if (options) { av_dict_free(options); *options = tmp; } if (s->oformat->init) { if ((ret = s->oformat->init(s)) < 0) { if (s->oformat->deinit) s->oformat->deinit(s); return ret; } return ret == 0; } return 0; fail: av_dict_free(&tmp); return ret; }"} {"target": 0, "idx": 20726, "func": "static void gen_stf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd) { TCGv_i32 r_asi, r_size, r_rd; r_asi = gen_get_asi(dc, insn); r_size = tcg_const_i32(size); r_rd = tcg_const_i32(rd); gen_helper_stf_asi(cpu_env, addr, r_asi, r_size, r_rd); tcg_temp_free_i32(r_rd); tcg_temp_free_i32(r_size); tcg_temp_free_i32(r_asi); }"} {"target": 0, "idx": 20744, "func": "static int kvm_arch_set_tsc_khz(CPUState *cs) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; int r; if (!env->tsc_khz) { return 0; } r = kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL) ? kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) : -ENOTSUP; if (r < 0) { /* When KVM_SET_TSC_KHZ fails, it's an error only if the current * TSC frequency doesn't match the one we want. */ int cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ? kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : -ENOTSUP; if (cur_freq <= 0 || cur_freq != env->tsc_khz) { error_report(\"warning: TSC frequency mismatch between \" \"VM (%\" PRId64 \" kHz) and host (%d kHz), \" \"and TSC scaling unavailable\", env->tsc_khz, cur_freq); return r; } } return 0; }"} {"target": 0, "idx": 20772, "func": "int ff_frame_thread_encoder_init(AVCodecContext *avctx, AVDictionary *options){ int i=0; ThreadContext *c; if( !(avctx->thread_type & FF_THREAD_FRAME) || !(avctx->codec->capabilities & CODEC_CAP_INTRA_ONLY)) return 0; if( !avctx->thread_count && avctx->codec_id == AV_CODEC_ID_MJPEG && !(avctx->flags & CODEC_FLAG_QSCALE)) { av_log(avctx, AV_LOG_DEBUG, \"Forcing thread count to 1 for MJPEG encoding, use -thread_type slice \" \"or a constant quantizer if you want to use multiple cpu cores\\n\"); avctx->thread_count = 1; } if( avctx->thread_count > 1 && avctx->codec_id == AV_CODEC_ID_MJPEG && !(avctx->flags & CODEC_FLAG_QSCALE)) av_log(avctx, AV_LOG_WARNING, \"MJPEG CBR encoding works badly with frame multi-threading, consider \" \"using -threads 1, -thread_type slice or a constant quantizer.\\n\"); if(!avctx->thread_count) { avctx->thread_count = av_cpu_count(); avctx->thread_count = FFMIN(avctx->thread_count, MAX_THREADS); } if(avctx->thread_count <= 1) return 0; if (avctx->codec_id == AV_CODEC_ID_HUFFYUV || avctx->codec_id == AV_CODEC_ID_FFVHUFF) { // huffyuv doesnt support these with multiple frame threads currently if (avctx->context_model > 0 || (avctx->flags & CODEC_FLAG_PASS1)) return 0; } if(avctx->thread_count > MAX_THREADS) return AVERROR(EINVAL); av_assert0(!avctx->internal->frame_thread_encoder); c = avctx->internal->frame_thread_encoder = av_mallocz(sizeof(ThreadContext)); if(!c) return AVERROR(ENOMEM); c->parent_avctx = avctx; c->task_fifo = av_fifo_alloc(sizeof(Task) * BUFFER_SIZE); if(!c->task_fifo) goto fail; pthread_mutex_init(&c->task_fifo_mutex, NULL); pthread_mutex_init(&c->finished_task_mutex, NULL); pthread_mutex_init(&c->buffer_mutex, NULL); pthread_cond_init(&c->task_fifo_cond, NULL); pthread_cond_init(&c->finished_task_cond, NULL); for(i=0; ithread_count ; i++){ AVDictionary *tmp = NULL; void *tmpv; AVCodecContext *thread_avctx = avcodec_alloc_context3(avctx->codec); if(!thread_avctx) goto fail; tmpv = thread_avctx->priv_data; *thread_avctx = *avctx; thread_avctx->priv_data = tmpv; thread_avctx->internal = NULL; memcpy(thread_avctx->priv_data, avctx->priv_data, avctx->codec->priv_data_size); thread_avctx->thread_count = 1; thread_avctx->active_thread_type &= ~FF_THREAD_FRAME; av_dict_copy(&tmp, options, 0); av_dict_set(&tmp, \"threads\", \"1\", 0); if(avcodec_open2(thread_avctx, avctx->codec, &tmp) < 0) { av_dict_free(&tmp); goto fail; } av_dict_free(&tmp); av_assert0(!thread_avctx->internal->frame_thread_encoder); thread_avctx->internal->frame_thread_encoder = c; if(pthread_create(&c->worker[i], NULL, worker, thread_avctx)) { goto fail; } } avctx->active_thread_type = FF_THREAD_FRAME; return 0; fail: avctx->thread_count = i; av_log(avctx, AV_LOG_ERROR, \"ff_frame_thread_encoder_init failed\\n\"); ff_frame_thread_encoder_free(avctx); return -1; }"} {"target": 0, "idx": 20773, "func": "static void intra_predict_mad_cow_dc_0l0_8x8_msa(uint8_t *src, int32_t stride) { uint8_t lp_cnt; uint32_t src0 = 0; uint64_t out0, out1; for (lp_cnt = 0; lp_cnt < 4; lp_cnt++) { src0 += src[(4 + lp_cnt) * stride - 1]; } src0 = (src0 + 2) >> 2; out0 = 0x8080808080808080; out1 = src0 * 0x0101010101010101; for (lp_cnt = 4; lp_cnt--;) { SD(out0, src); SD(out1, src + stride * 4); src += stride; } }"} {"target": 0, "idx": 20777, "func": "static void srt_to_ass(AVCodecContext *avctx, AVBPrint *dst, const char *in, int x1, int y1, int x2, int y2) { if (x1 >= 0 && y1 >= 0) { /* XXX: here we rescale coordinate assuming they are in DVD resolution * (720x480) since we don't have anything better */ if (x2 >= 0 && y2 >= 0 && (x2 != x1 || y2 != y1) && x2 >= x1 && y2 >= y1) { /* text rectangle defined, write the text at the center of the rectangle */ const int cx = x1 + (x2 - x1)/2; const int cy = y1 + (y2 - y1)/2; const int scaled_x = cx * (int64_t)ASS_DEFAULT_PLAYRESX / 720; const int scaled_y = cy * (int64_t)ASS_DEFAULT_PLAYRESY / 480; av_bprintf(dst, \"{\\\\an5}{\\\\pos(%d,%d)}\", scaled_x, scaled_y); } else { /* only the top left corner, assume the text starts in that corner */ const int scaled_x = x1 * (int64_t)ASS_DEFAULT_PLAYRESX / 720; const int scaled_y = y1 * (int64_t)ASS_DEFAULT_PLAYRESY / 480; av_bprintf(dst, \"{\\\\an1}{\\\\pos(%d,%d)}\", scaled_x, scaled_y); } } ff_htmlmarkup_to_ass(avctx, dst, in); }"} {"target": 0, "idx": 20779, "func": "static int msvideo1_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; Msvideo1Context *s = avctx->priv_data; int ret; s->buf = buf; s->size = buf_size; if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) return ret; if (s->mode_8bit) { const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL); if (pal) { memcpy(s->pal, pal, AVPALETTE_SIZE); s->frame->palette_has_changed = 1; } } if (s->mode_8bit) msvideo1_decode_8bit(s); else msvideo1_decode_16bit(s); if ((ret = av_frame_ref(data, s->frame)) < 0) return ret; *got_frame = 1; /* report that the buffer was completely consumed */ return buf_size; }"} {"target": 0, "idx": 20823, "func": "pvscsi_realizefn(PCIDevice *pci_dev, Error **errp) { PVSCSIState *s = PVSCSI(pci_dev); trace_pvscsi_state(\"init\"); /* PCI subsystem ID, subsystem vendor ID, revision */ if (PVSCSI_USE_OLD_PCI_CONFIGURATION(s)) { pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, 0x1000); } else { pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, PCI_VENDOR_ID_VMWARE); pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, PCI_DEVICE_ID_VMWARE_PVSCSI); pci_config_set_revision(pci_dev->config, 0x2); } /* PCI latency timer = 255 */ pci_dev->config[PCI_LATENCY_TIMER] = 0xff; /* Interrupt pin A */ pci_config_set_interrupt_pin(pci_dev->config, 1); memory_region_init_io(&s->io_space, OBJECT(s), &pvscsi_ops, s, \"pvscsi-io\", PVSCSI_MEM_SPACE_SIZE); pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->io_space); pvscsi_init_msi(s); if (pci_is_express(pci_dev) && pci_bus_is_express(pci_dev->bus)) { pcie_endpoint_cap_init(pci_dev, PVSCSI_EXP_EP_OFFSET); } s->completion_worker = qemu_bh_new(pvscsi_process_completion_queue, s); scsi_bus_new(&s->bus, sizeof(s->bus), DEVICE(pci_dev), &pvscsi_scsi_info, NULL); /* override default SCSI bus hotplug-handler, with pvscsi's one */ qbus_set_hotplug_handler(BUS(&s->bus), DEVICE(s), &error_abort); pvscsi_reset_state(s); }"} {"target": 0, "idx": 20826, "func": "static void vde_from_qemu(void *opaque, const uint8_t *buf, int size) { VDEState *s = opaque; int ret; for(;;) { ret = vde_send(s->vde, (const char *)buf, size, 0); if (ret < 0 && errno == EINTR) { } else { break; } } }"} {"target": 1, "idx": 20869, "func": "int ff_dirac_golomb_read_32bit(DiracGolombLUT *lut_ctx, const uint8_t *buf, int bytes, uint8_t *_dst, int coeffs) { int i, b, c_idx = 0; int32_t *dst = (int32_t *)_dst; DiracGolombLUT *future[4], *l = &lut_ctx[2*LUT_SIZE + buf[0]]; INIT_RESIDUE(res, 0, 0); #define APPEND_RESIDUE(N, M) \\ N |= M >> (N ## _bits); \\ N ## _bits += (M ## _bits) for (b = 1; b <= bytes; b++) { future[0] = &lut_ctx[buf[b]]; future[1] = future[0] + 1*LUT_SIZE; future[2] = future[0] + 2*LUT_SIZE; future[3] = future[0] + 3*LUT_SIZE; if ((c_idx + 1) > coeffs) return c_idx; /* res_bits is a hint for better branch prediction */ if (res_bits && l->sign) { int32_t coeff = 1; APPEND_RESIDUE(res, l->preamble); for (i = 0; i < (res_bits >> 1) - 1; i++) { coeff <<= 1; coeff |= (res >> (RSIZE_BITS - 2*i - 2)) & 1; } dst[c_idx++] = l->sign * (coeff - 1); res_bits = res = 0; } memcpy(&dst[c_idx], l->ready, LUT_BITS*sizeof(int32_t)); c_idx += l->ready_num; APPEND_RESIDUE(res, l->leftover); l = future[l->need_s ? 3 : !res_bits ? 2 : res_bits & 1]; } return c_idx; }"} {"target": 1, "idx": 20879, "func": "static int dirac_unpack_idwt_params(DiracContext *s) { GetBitContext *gb = &s->gb; int i, level; unsigned tmp; #define CHECKEDREAD(dst, cond, errmsg) \\ tmp = svq3_get_ue_golomb(gb); \\ if (cond) { \\ av_log(s->avctx, AV_LOG_ERROR, errmsg); \\ return -1; \\ }\\ dst = tmp; align_get_bits(gb); s->zero_res = s->num_refs ? get_bits1(gb) : 0; if (s->zero_res) return 0; /*[DIRAC_STD] 11.3.1 Transform parameters. transform_parameters() */ CHECKEDREAD(s->wavelet_idx, tmp > 6, \"wavelet_idx is too big\\n\") CHECKEDREAD(s->wavelet_depth, tmp > MAX_DWT_LEVELS || tmp < 1, \"invalid number of DWT decompositions\\n\") if (!s->low_delay) { /* Codeblock paramaters (core syntax only) */ if (get_bits1(gb)) { for (i = 0; i <= s->wavelet_depth; i++) { CHECKEDREAD(s->codeblock[i].width , tmp < 1, \"codeblock width invalid\\n\") CHECKEDREAD(s->codeblock[i].height, tmp < 1, \"codeblock height invalid\\n\") CHECKEDREAD(s->codeblock_mode, tmp > 1, \"unknown codeblock mode\\n\") } else for (i = 0; i <= s->wavelet_depth; i++) s->codeblock[i].width = s->codeblock[i].height = 1; } else { /* Slice parameters + quantization matrix*/ /*[DIRAC_STD] 11.3.4 Slice coding Parameters (low delay syntax only). slice_parameters() */ s->lowdelay.num_x = svq3_get_ue_golomb(gb); s->lowdelay.num_y = svq3_get_ue_golomb(gb); s->lowdelay.bytes.num = svq3_get_ue_golomb(gb); s->lowdelay.bytes.den = svq3_get_ue_golomb(gb); /* [DIRAC_STD] 11.3.5 Quantisation matrices (low-delay syntax). quant_matrix() */ if (get_bits1(gb)) { av_log(s->avctx,AV_LOG_DEBUG,\"Low Delay: Has Custom Quantization Matrix!\\n\"); /* custom quantization matrix */ s->lowdelay.quant[0][0] = svq3_get_ue_golomb(gb); for (level = 0; level < s->wavelet_depth; level++) { s->lowdelay.quant[level][1] = svq3_get_ue_golomb(gb); s->lowdelay.quant[level][2] = svq3_get_ue_golomb(gb); s->lowdelay.quant[level][3] = svq3_get_ue_golomb(gb); } else { /* default quantization matrix */ for (level = 0; level < s->wavelet_depth; level++) for (i = 0; i < 4; i++) { s->lowdelay.quant[level][i] = default_qmat[s->wavelet_idx][level][i]; /* haar with no shift differs for different depths */ if (s->wavelet_idx == 3) s->lowdelay.quant[level][i] += 4*(s->wavelet_depth-1 - level); return 0;"} {"target": 1, "idx": 20883, "func": "static int ffm_read_data(AVFormatContext *s, uint8_t *buf, int size, int header) { FFMContext *ffm = s->priv_data; AVIOContext *pb = s->pb; int len, fill_size, size1, frame_offset; uint32_t id; int64_t last_pos = -1; size1 = size; while (size > 0) { redo: len = ffm->packet_end - ffm->packet_ptr; if (len < 0) return -1; if (len > size) len = size; if (len == 0) { if (avio_tell(pb) == ffm->file_size) { if (ffm->server_attached) { avio_seek(pb, ffm->packet_size, SEEK_SET); } else return AVERROR_EOF; } retry_read: if (pb->buffer_size != ffm->packet_size) { int64_t tell = avio_tell(pb); int ret = ffio_set_buf_size(pb, ffm->packet_size); if (ret < 0) return ret; avio_seek(pb, tell, SEEK_SET); } id = avio_rb16(pb); /* PACKET_ID */ if (id != PACKET_ID) { if (ffm_resync(s, id) < 0) return -1; last_pos = avio_tell(pb); } fill_size = avio_rb16(pb); ffm->dts = avio_rb64(pb); frame_offset = avio_rb16(pb); avio_read(pb, ffm->packet, ffm->packet_size - FFM_HEADER_SIZE); if (ffm->packet_size < FFM_HEADER_SIZE + fill_size || frame_offset < 0) { return -1; } ffm->packet_end = ffm->packet + (ffm->packet_size - FFM_HEADER_SIZE - fill_size); /* if first packet or resynchronization packet, we must handle it specifically */ if (ffm->first_packet || (frame_offset & 0x8000)) { if (!frame_offset) { /* This packet has no frame headers in it */ if (avio_tell(pb) >= ffm->packet_size * 3LL) { int64_t seekback = FFMIN(ffm->packet_size * 2LL, avio_tell(pb) - last_pos); seekback = FFMAX(seekback, 0); avio_seek(pb, -seekback, SEEK_CUR); goto retry_read; } /* This is bad, we cannot find a valid frame header */ return 0; } ffm->first_packet = 0; if ((frame_offset & 0x7fff) < FFM_HEADER_SIZE) { ffm->packet_end = ffm->packet_ptr; return -1; } ffm->packet_ptr = ffm->packet + (frame_offset & 0x7fff) - FFM_HEADER_SIZE; if (!header) break; } else { ffm->packet_ptr = ffm->packet; } goto redo; } memcpy(buf, ffm->packet_ptr, len); buf += len; ffm->packet_ptr += len; size -= len; header = 0; } return size1 - size; }"} {"target": 0, "idx": 20895, "func": "static void mipsnet_ioport_write(void *opaque, target_phys_addr_t addr, uint64_t val, unsigned int size) { MIPSnetState *s = opaque; addr &= 0x3f; trace_mipsnet_write(addr, val); switch (addr) { case MIPSNET_TX_DATA_COUNT: s->tx_count = (val <= MAX_ETH_FRAME_SIZE) ? val : 0; s->tx_written = 0; break; case MIPSNET_INT_CTL: if (val & MIPSNET_INTCTL_TXDONE) { s->intctl &= ~MIPSNET_INTCTL_TXDONE; } else if (val & MIPSNET_INTCTL_RXDONE) { s->intctl &= ~MIPSNET_INTCTL_RXDONE; } else if (val & MIPSNET_INTCTL_TESTBIT) { mipsnet_reset(s); s->intctl |= MIPSNET_INTCTL_TESTBIT; } else if (!val) { /* ACK testbit interrupt, flag was cleared on read. */ } s->busy = !!s->intctl; mipsnet_update_irq(s); break; case MIPSNET_TX_DATA_BUFFER: s->tx_buffer[s->tx_written++] = val; if (s->tx_written == s->tx_count) { /* Send buffer. */ trace_mipsnet_send(s->tx_count); qemu_send_packet(&s->nic->nc, s->tx_buffer, s->tx_count); s->tx_count = s->tx_written = 0; s->intctl |= MIPSNET_INTCTL_TXDONE; s->busy = 1; mipsnet_update_irq(s); } break; /* Read-only registers */ case MIPSNET_DEV_ID: case MIPSNET_BUSY: case MIPSNET_RX_DATA_COUNT: case MIPSNET_INTERRUPT_INFO: case MIPSNET_RX_DATA_BUFFER: default: break; } }"} {"target": 0, "idx": 20898, "func": "mac_writereg(E1000State *s, int index, uint32_t val) { uint32_t macaddr[2]; s->mac_reg[index] = val; if (index == RA || index == RA + 1) { macaddr[0] = cpu_to_le32(s->mac_reg[RA]); macaddr[1] = cpu_to_le32(s->mac_reg[RA + 1]); qemu_format_nic_info_str(qemu_get_queue(s->nic), (uint8_t *)macaddr); } }"} {"target": 0, "idx": 20899, "func": "static int xen_pt_pmcsr_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, uint16_t *val, uint16_t dev_value, uint16_t valid_mask) { XenPTRegInfo *reg = cfg_entry->reg; uint16_t writable_mask = 0; uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask); /* modify emulate register */ writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); /* create value for writing to I/O device register */ *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~PCI_PM_CTRL_PME_STATUS, throughable_mask); return 0; }"} {"target": 0, "idx": 20906, "func": "static int net_socket_connect_init(VLANState *vlan, const char *model, const char *name, const char *host_str) { NetSocketState *s; int fd, connected, ret, err; struct sockaddr_in saddr; if (parse_host_port(&saddr, host_str) < 0) return -1; fd = qemu_socket(PF_INET, SOCK_STREAM, 0); if (fd < 0) { perror(\"socket\"); return -1; } socket_set_nonblock(fd); connected = 0; for(;;) { ret = connect(fd, (struct sockaddr *)&saddr, sizeof(saddr)); if (ret < 0) { err = socket_error(); if (err == EINTR || err == EWOULDBLOCK) { } else if (err == EINPROGRESS) { break; #ifdef _WIN32 } else if (err == WSAEALREADY) { break; #endif } else { perror(\"connect\"); closesocket(fd); return -1; } } else { connected = 1; break; } } s = net_socket_fd_init(vlan, model, name, fd, connected); if (!s) return -1; snprintf(s->nc.info_str, sizeof(s->nc.info_str), \"socket: connect to %s:%d\", inet_ntoa(saddr.sin_addr), ntohs(saddr.sin_port)); return 0; }"} {"target": 0, "idx": 20910, "func": "static int usb_device_del(const char *devname) { USBPort *port; USBPort **lastp; USBDevice *dev; int bus_num, addr; const char *p; if (!used_usb_ports) return -1; p = strchr(devname, '.'); if (!p) return -1; bus_num = strtoul(devname, NULL, 0); addr = strtoul(p + 1, NULL, 0); if (bus_num != 0) return -1; lastp = &used_usb_ports; port = used_usb_ports; while (port && port->dev->addr != addr) { lastp = &port->next; port = port->next; } if (!port) return -1; dev = port->dev; *lastp = port->next; usb_attach(port, NULL); dev->handle_destroy(dev); port->next = free_usb_ports; free_usb_ports = port; return 0; }"} {"target": 1, "idx": 20916, "func": "void qmp_migrate_set_parameters(bool has_compress_level, int64_t compress_level, bool has_compress_threads, int64_t compress_threads, bool has_decompress_threads, int64_t decompress_threads, bool has_cpu_throttle_initial, int64_t cpu_throttle_initial, bool has_cpu_throttle_increment, int64_t cpu_throttle_increment, bool has_tls_creds, const char *tls_creds, bool has_tls_hostname, const char *tls_hostname, Error **errp) { MigrationState *s = migrate_get_current(); if (has_compress_level && (compress_level < 0 || compress_level > 9)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, \"compress_level\", \"is invalid, it should be in the range of 0 to 9\"); return; if (has_compress_threads && (compress_threads < 1 || compress_threads > 255)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, \"compress_threads\", \"is invalid, it should be in the range of 1 to 255\"); return; if (has_decompress_threads && (decompress_threads < 1 || decompress_threads > 255)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, \"decompress_threads\", \"is invalid, it should be in the range of 1 to 255\"); return; if (has_cpu_throttle_initial && (cpu_throttle_initial < 1 || cpu_throttle_initial > 99)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, \"cpu_throttle_initial\", \"an integer in the range of 1 to 99\"); if (has_cpu_throttle_increment && (cpu_throttle_increment < 1 || cpu_throttle_increment > 99)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, \"cpu_throttle_increment\", \"an integer in the range of 1 to 99\"); if (has_compress_level) { s->parameters.compress_level = compress_level; if (has_compress_threads) { s->parameters.compress_threads = compress_threads; if (has_decompress_threads) { s->parameters.decompress_threads = decompress_threads; if (has_cpu_throttle_initial) { s->parameters.cpu_throttle_initial = cpu_throttle_initial; if (has_cpu_throttle_increment) { s->parameters.cpu_throttle_increment = cpu_throttle_increment;"} {"target": 0, "idx": 20929, "func": "static int drop_sync(QIOChannel *ioc, size_t size, Error **errp) { ssize_t ret = 0; char small[1024]; char *buffer; buffer = sizeof(small) >= size ? small : g_malloc(MIN(65536, size)); while (size > 0) { ssize_t count = MIN(65536, size); ret = read_sync(ioc, buffer, MIN(65536, size), errp); if (ret < 0) { goto cleanup; } size -= count; } cleanup: if (buffer != small) { g_free(buffer); } return ret; }"} {"target": 0, "idx": 20957, "func": "static int check_refcounts_l1(BlockDriverState *bs, BdrvCheckResult *res, void **refcount_table, int64_t *refcount_table_size, int64_t l1_table_offset, int l1_size, int flags) { BDRVQcow2State *s = bs->opaque; uint64_t *l1_table = NULL, l2_offset, l1_size2; int i, ret; l1_size2 = l1_size * sizeof(uint64_t); /* Mark L1 table as used */ ret = inc_refcounts(bs, res, refcount_table, refcount_table_size, l1_table_offset, l1_size2); if (ret < 0) { goto fail; } /* Read L1 table entries from disk */ if (l1_size2 > 0) { l1_table = g_try_malloc(l1_size2); if (l1_table == NULL) { ret = -ENOMEM; res->check_errors++; goto fail; } ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2); if (ret < 0) { fprintf(stderr, \"ERROR: I/O error in check_refcounts_l1\\n\"); res->check_errors++; goto fail; } for(i = 0;i < l1_size; i++) be64_to_cpus(&l1_table[i]); } /* Do the actual checks */ for(i = 0; i < l1_size; i++) { l2_offset = l1_table[i]; if (l2_offset) { /* Mark L2 table as used */ l2_offset &= L1E_OFFSET_MASK; ret = inc_refcounts(bs, res, refcount_table, refcount_table_size, l2_offset, s->cluster_size); if (ret < 0) { goto fail; } /* L2 tables are cluster aligned */ if (offset_into_cluster(s, l2_offset)) { fprintf(stderr, \"ERROR l2_offset=%\" PRIx64 \": Table is not \" \"cluster aligned; L1 entry corrupted\\n\", l2_offset); res->corruptions++; } /* Process and check L2 entries */ ret = check_refcounts_l2(bs, res, refcount_table, refcount_table_size, l2_offset, flags); if (ret < 0) { goto fail; } } } g_free(l1_table); return 0; fail: g_free(l1_table); return ret; }"} {"target": 0, "idx": 20963, "func": "static void pl061_update(pl061_state *s) { uint8_t changed; uint8_t mask; uint8_t out; int i; /* Outputs float high. */ /* FIXME: This is board dependent. */ out = (s->data & s->dir) | ~s->dir; changed = s->old_data ^ out; if (!changed) return; s->old_data = out; for (i = 0; i < 8; i++) { mask = 1 << i; if ((changed & mask) && s->out) { DPRINTF(\"Set output %d = %d\\n\", i, (out & mask) != 0); qemu_set_irq(s->out[i], (out & mask) != 0); } } /* FIXME: Implement input interrupts. */ }"} {"target": 0, "idx": 20981, "func": "void gic_set_pending_private(GICState *s, int cpu, int irq) { int cm = 1 << cpu; if (GIC_TEST_PENDING(irq, cm)) return; DPRINTF(\"Set %d pending cpu %d\\n\", irq, cpu); GIC_SET_PENDING(irq, cm); gic_update(s); }"} {"target": 0, "idx": 20987, "func": "static uint32_t pxa2xx_pm_read(void *opaque, target_phys_addr_t addr) { struct pxa2xx_state_s *s = (struct pxa2xx_state_s *) opaque; if (addr > s->pm_base + PCMD31) { /* Special case: PWRI2C registers appear in the same range. */ return pxa2xx_i2c_read(s->i2c[1], addr); } addr -= s->pm_base; switch (addr) { case PMCR ... PCMD31: if (addr & 3) goto fail; return s->pm_regs[addr >> 2]; default: fail: printf(\"%s: Bad register \" REG_FMT \"\\n\", __FUNCTION__, addr); break; } return 0; }"} {"target": 0, "idx": 20992, "func": "static void ssim_4x4x2_core(const uint8_t *main, int main_stride, const uint8_t *ref, int ref_stride, int sums[2][4]) { int x, y, z; for (z = 0; z < 2; z++) { uint32_t s1 = 0, s2 = 0, ss = 0, s12 = 0; for (y = 0; y < 4; y++) { for (x = 0; x < 4; x++) { int a = main[x + y * main_stride]; int b = ref[x + y * ref_stride]; s1 += a; s2 += b; ss += a*a; ss += b*b; s12 += a*b; } } sums[z][0] = s1; sums[z][1] = s2; sums[z][2] = ss; sums[z][3] = s12; main += 4; ref += 4; } }"} {"target": 0, "idx": 20997, "func": "static void ivshmem_check_memdev_is_busy(Object *obj, const char *name, Object *val, Error **errp) { MemoryRegion *mr; mr = host_memory_backend_get_memory(MEMORY_BACKEND(val), &error_abort); if (memory_region_is_mapped(mr)) { char *path = object_get_canonical_path_component(val); error_setg(errp, \"can't use already busy memdev: %s\", path); g_free(path); } else { qdev_prop_allow_set_link_before_realize(obj, name, val, errp); } }"} {"target": 1, "idx": 21006, "func": "static int seq_parse_frame_data(SeqDemuxContext *seq, ByteIOContext *pb) { unsigned int offset_table[4], buffer_num[4]; TiertexSeqFrameBuffer *seq_buffer; int i, e; seq->current_frame_offs += SEQ_FRAME_SIZE; url_fseek(pb, seq->current_frame_offs, SEEK_SET); /* sound data */ seq->current_audio_data_offs = get_le16(pb); if (seq->current_audio_data_offs != 0) { seq->current_audio_data_size = SEQ_AUDIO_BUFFER_SIZE * 2; } else { seq->current_audio_data_size = 0; } /* palette data */ seq->current_pal_data_offs = get_le16(pb); if (seq->current_pal_data_offs != 0) { seq->current_pal_data_size = 768; } else { seq->current_pal_data_size = 0; } /* video data */ for (i = 0; i < 4; i++) buffer_num[i] = get_byte(pb); for (i = 0; i < 4; i++) offset_table[i] = get_le16(pb); for (i = 0; i < 3; i++) { if (offset_table[i] != 0) { for (e = i + 1; e < 4 && offset_table[e] == 0; e++); seq_fill_buffer(seq, pb, buffer_num[1 + i], offset_table[i], offset_table[e] - offset_table[i]); } } if (buffer_num[0] != 255) { seq_buffer = &seq->frame_buffers[buffer_num[0]]; seq->current_video_data_size = seq_buffer->fill_size; seq->current_video_data_ptr = seq_buffer->data; seq_buffer->fill_size = 0; } else { seq->current_video_data_size = 0; seq->current_video_data_ptr = 0; } return 0; }"} {"target": 1, "idx": 21008, "func": "static void fd_chr_close(struct CharDriverState *chr) { FDCharDriver *s = chr->opaque; if (s->fd_in_tag) { g_source_remove(s->fd_in_tag); s->fd_in_tag = 0; } if (s->fd_in) { g_io_channel_unref(s->fd_in); } if (s->fd_out) { g_io_channel_unref(s->fd_out); } g_free(s); qemu_chr_be_event(chr, CHR_EVENT_CLOSED); }"} {"target": 1, "idx": 21009, "func": "static void async_complete(void *opaque) { USBHostDevice *s = opaque; AsyncURB *aurb; int urbs = 0; while (1) { USBPacket *p; int r = ioctl(s->fd, USBDEVFS_REAPURBNDELAY, &aurb); if (r < 0) { if (errno == EAGAIN) { if (urbs > 2) { fprintf(stderr, \"husb: %d iso urbs finished at once\\n\", urbs); } return; } if (errno == ENODEV && !s->closing) { do_disconnect(s); return; } DPRINTF(\"husb: async. reap urb failed errno %d\\n\", errno); return; } DPRINTF(\"husb: async completed. aurb %p status %d alen %d\\n\", aurb, aurb->urb.status, aurb->urb.actual_length); /* If this is a buffered iso urb mark it as complete and don't do anything else (it is handled further in usb_host_handle_iso_data) */ if (aurb->iso_frame_idx == -1) { int inflight; if (aurb->urb.status == -EPIPE) { set_halt(s, aurb->urb.endpoint & 0xf); } aurb->iso_frame_idx = 0; urbs++; inflight = change_iso_inflight(s, aurb->urb.endpoint & 0xf, -1); if (inflight == 0 && is_iso_started(s, aurb->urb.endpoint & 0xf)) { fprintf(stderr, \"husb: out of buffers for iso stream\\n\"); } continue; } p = aurb->packet; if (p) { switch (aurb->urb.status) { case 0: p->len += aurb->urb.actual_length; break; case -EPIPE: set_halt(s, p->devep); p->len = USB_RET_STALL; break; default: p->len = USB_RET_NAK; break; } if (aurb->urb.type == USBDEVFS_URB_TYPE_CONTROL) { usb_generic_async_ctrl_complete(&s->dev, p); } else if (!aurb->more) { usb_packet_complete(&s->dev, p); } } async_free(aurb); } }"} {"target": 1, "idx": 21020, "func": "static void dhcp_decode(const struct bootp_t *bp, int *pmsg_type, struct in_addr *preq_addr) { const uint8_t *p, *p_end; int len, tag; *pmsg_type = 0; preq_addr->s_addr = htonl(0L); p = bp->bp_vend; p_end = p + DHCP_OPT_LEN; if (memcmp(p, rfc1533_cookie, 4) != 0) return; p += 4; while (p < p_end) { tag = p[0]; if (tag == RFC1533_PAD) { p++; } else if (tag == RFC1533_END) { } else { p++; if (p >= p_end) len = *p++; DPRINTF(\"dhcp: tag=%d len=%d\\n\", tag, len); switch(tag) { case RFC2132_MSG_TYPE: if (len >= 1) *pmsg_type = p[0]; case RFC2132_REQ_ADDR: if (len >= 4) { memcpy(&(preq_addr->s_addr), p, 4); default: p += len; if (*pmsg_type == DHCPREQUEST && preq_addr->s_addr == htonl(0L) && bp->bp_ciaddr.s_addr) { memcpy(&(preq_addr->s_addr), &bp->bp_ciaddr, 4);"} {"target": 1, "idx": 21026, "func": "static void GCC_FMT_ATTR(2, 3) qtest_sendf(CharBackend *chr, const char *fmt, ...) { va_list ap; gchar *buffer; va_start(ap, fmt); buffer = g_strdup_vprintf(fmt, ap); qtest_send(chr, buffer); va_end(ap); }"} {"target": 1, "idx": 21033, "func": "static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f, int version_id) { VirtIOBlock *s = VIRTIO_BLK(vdev); while (qemu_get_sbyte(f)) { VirtIOBlockReq *req = virtio_blk_alloc_request(s); qemu_get_buffer(f, (unsigned char *)req->elem, sizeof(VirtQueueElement)); req->next = s->rq; s->rq = req; virtqueue_map_sg(req->elem->in_sg, req->elem->in_addr, req->elem->in_num, 1); virtqueue_map_sg(req->elem->out_sg, req->elem->out_addr, req->elem->out_num, 0); } return 0; }"} {"target": 1, "idx": 21045, "func": "struct AACISError ff_aac_is_encoding_err(AACEncContext *s, ChannelElement *cpe, int start, int w, int g, float ener0, float ener1, float ener01, int use_pcoeffs, int phase) { int i, w2; SingleChannelElement *sce0 = &cpe->ch[0]; SingleChannelElement *sce1 = &cpe->ch[1]; float *L = use_pcoeffs ? sce0->pcoeffs : sce0->coeffs; float *R = use_pcoeffs ? sce1->pcoeffs : sce1->coeffs; float *L34 = &s->scoefs[256*0], *R34 = &s->scoefs[256*1]; float *IS = &s->scoefs[256*2], *I34 = &s->scoefs[256*3]; float dist1 = 0.0f, dist2 = 0.0f; struct AACISError is_error = {0}; for (w2 = 0; w2 < sce0->ics.group_len[w]; w2++) { FFPsyBand *band0 = &s->psy.ch[s->cur_channel+0].psy_bands[(w+w2)*16+g]; FFPsyBand *band1 = &s->psy.ch[s->cur_channel+1].psy_bands[(w+w2)*16+g]; int is_band_type, is_sf_idx = FFMAX(1, sce0->sf_idx[(w+w2)*16+g]-4); float e01_34 = phase*pow(ener1/ener0, 3.0/4.0); float maxval, dist_spec_err = 0.0f; float minthr = FFMIN(band0->threshold, band1->threshold); for (i = 0; i < sce0->ics.swb_sizes[g]; i++) IS[i] = (L[start+(w+w2)*128+i] + phase*R[start+(w+w2)*128+i])*sqrt(ener0/ener01); abs_pow34_v(L34, &L[start+(w+w2)*128], sce0->ics.swb_sizes[g]); abs_pow34_v(R34, &R[start+(w+w2)*128], sce0->ics.swb_sizes[g]); abs_pow34_v(I34, IS, sce0->ics.swb_sizes[g]); maxval = find_max_val(1, sce0->ics.swb_sizes[g], I34); is_band_type = find_min_book(maxval, is_sf_idx); dist1 += quantize_band_cost(s, &L[start + (w+w2)*128], L34, sce0->ics.swb_sizes[g], sce0->sf_idx[(w+w2)*16+g], sce0->band_type[(w+w2)*16+g], s->lambda / band0->threshold, INFINITY, NULL, 0); dist1 += quantize_band_cost(s, &R[start + (w+w2)*128], R34, sce1->ics.swb_sizes[g], sce1->sf_idx[(w+w2)*16+g], sce1->band_type[(w+w2)*16+g], s->lambda / band1->threshold, INFINITY, NULL, 0); dist2 += quantize_band_cost(s, IS, I34, sce0->ics.swb_sizes[g], is_sf_idx, is_band_type, s->lambda / minthr, INFINITY, NULL, 0); for (i = 0; i < sce0->ics.swb_sizes[g]; i++) { dist_spec_err += (L34[i] - I34[i])*(L34[i] - I34[i]); dist_spec_err += (R34[i] - I34[i]*e01_34)*(R34[i] - I34[i]*e01_34); } dist_spec_err *= s->lambda / minthr; dist2 += dist_spec_err; } is_error.pass = dist2 <= dist1; is_error.phase = phase; is_error.error = fabsf(dist1 - dist2); is_error.dist1 = dist1; is_error.dist2 = dist2; return is_error; }"} {"target": 1, "idx": 21059, "func": "static Picture * remove_short(H264Context *h, int frame_num){ MpegEncContext * const s = &h->s; int i; if(s->avctx->debug&FF_DEBUG_MMCO) av_log(h->s.avctx, AV_LOG_DEBUG, \"remove short %d count %d\\n\", frame_num, h->short_ref_count); for(i=0; ishort_ref_count; i++){ Picture *pic= h->short_ref[i]; if(s->avctx->debug&FF_DEBUG_MMCO) av_log(h->s.avctx, AV_LOG_DEBUG, \"%d %d %p\\n\", i, pic->frame_num, pic); if(pic->frame_num == frame_num){ h->short_ref[i]= NULL; memmove(&h->short_ref[i], &h->short_ref[i+1], (h->short_ref_count - i - 1)*sizeof(Picture*)); h->short_ref_count--; return pic; } } return NULL; }"} {"target": 0, "idx": 21068, "func": "static int cook_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; COOKContext *q = avctx->priv_data; int i; int offset = 0; int chidx = 0; if (buf_size < avctx->block_align) return buf_size; /* estimate subpacket sizes */ q->subpacket[0].size = avctx->block_align; for(i=1;inum_subpackets;i++){ q->subpacket[i].size = 2 * buf[avctx->block_align - q->num_subpackets + i]; q->subpacket[0].size -= q->subpacket[i].size + 1; if (q->subpacket[0].size < 0) { av_log(avctx,AV_LOG_DEBUG,\"frame subpacket size total > avctx->block_align!\\n\"); return AVERROR_INVALIDDATA; } } /* decode supbackets */ for(i=0;inum_subpackets;i++){ q->subpacket[i].bits_per_subpacket = (q->subpacket[i].size*8)>>q->subpacket[i].bits_per_subpdiv; q->subpacket[i].ch_idx = chidx; av_log(avctx,AV_LOG_DEBUG,\"subpacket[%i] size %i js %i %i block_align %i\\n\",i,q->subpacket[i].size,q->subpacket[i].joint_stereo,offset,avctx->block_align); decode_subpacket(q, &q->subpacket[i], buf + offset, data); offset += q->subpacket[i].size; chidx += q->subpacket[i].num_channels; av_log(avctx,AV_LOG_DEBUG,\"subpacket[%i] %i %i\\n\",i,q->subpacket[i].size * 8,get_bits_count(&q->gb)); } *data_size = q->nb_channels * q->samples_per_channel * av_get_bytes_per_sample(avctx->sample_fmt); /* Discard the first two frames: no valid audio. */ if (avctx->frame_number < 2) *data_size = 0; return avctx->block_align; }"} {"target": 0, "idx": 21091, "func": "struct pxa2xx_state_s *pxa270_init(DisplayState *ds, const char *revision) { struct pxa2xx_state_s *s; struct pxa2xx_ssp_s *ssp; char cpu_model[16]; int iomemtype, i; s = (struct pxa2xx_state_s *) qemu_mallocz(sizeof(struct pxa2xx_state_s)); s->env = cpu_init(); snprintf(cpu_model, sizeof(cpu_model), \"pxa270-%s\", revision); cpu_arm_set_model(s->env, cpu_model); s->pic = pxa2xx_pic_init(0x40d00000, s->env); s->dma = pxa27x_dma_init(0x40000000, s->pic[PXA2XX_PIC_DMA]); pxa27x_timer_init(0x40a00000, &s->pic[PXA2XX_PIC_OST_0], s->pic[PXA27X_PIC_OST_4_11], s->env); s->gpio = pxa2xx_gpio_init(0x40e00000, s->env, s->pic, 121); s->mmc = pxa2xx_mmci_init(0x41100000, s->pic[PXA2XX_PIC_MMC], s->dma); for (i = 0; pxa270_serial[i].io_base; i ++) if (serial_hds[i]) serial_mm_init(pxa270_serial[i].io_base, 2, s->pic[pxa270_serial[i].irqn], serial_hds[i], 1); else break; if (serial_hds[i]) s->fir = pxa2xx_fir_init(0x40800000, s->pic[PXA2XX_PIC_ICP], s->dma, serial_hds[i]); if (ds) s->lcd = pxa2xx_lcdc_init(0x44000000, s->pic[PXA2XX_PIC_LCD], ds); s->cm_base = 0x41300000; s->cm_regs[CCCR >> 4] = 0x02000210; /* 416.0 MHz */ s->clkcfg = 0x00000009; /* Turbo mode active */ iomemtype = cpu_register_io_memory(0, pxa2xx_cm_readfn, pxa2xx_cm_writefn, s); cpu_register_physical_memory(s->cm_base, 0xfff, iomemtype); cpu_arm_set_cp_io(s->env, 14, pxa2xx_cp14_read, pxa2xx_cp14_write, s); s->mm_base = 0x48000000; s->mm_regs[MDMRS >> 2] = 0x00020002; s->mm_regs[MDREFR >> 2] = 0x03ca4000; s->mm_regs[MECR >> 2] = 0x00000001; /* Two PC Card sockets */ iomemtype = cpu_register_io_memory(0, pxa2xx_mm_readfn, pxa2xx_mm_writefn, s); cpu_register_physical_memory(s->mm_base, 0xfff, iomemtype); for (i = 0; pxa27x_ssp[i].io_base; i ++); s->ssp = (struct pxa2xx_ssp_s **) qemu_mallocz(sizeof(struct pxa2xx_ssp_s *) * i); ssp = (struct pxa2xx_ssp_s *) qemu_mallocz(sizeof(struct pxa2xx_ssp_s) * i); for (i = 0; pxa27x_ssp[i].io_base; i ++) { s->ssp[i] = &ssp[i]; ssp[i].base = pxa27x_ssp[i].io_base; ssp[i].irq = s->pic[pxa27x_ssp[i].irqn]; iomemtype = cpu_register_io_memory(0, pxa2xx_ssp_readfn, pxa2xx_ssp_writefn, &ssp[i]); cpu_register_physical_memory(ssp[i].base, 0xfff, iomemtype); } if (usb_enabled) { usb_ohci_init_pxa(0x4c000000, 3, -1, s->pic[PXA2XX_PIC_USBH1]); } s->pcmcia[0] = pxa2xx_pcmcia_init(0x20000000); s->pcmcia[1] = pxa2xx_pcmcia_init(0x30000000); s->rtc_base = 0x40900000; iomemtype = cpu_register_io_memory(0, pxa2xx_rtc_readfn, pxa2xx_rtc_writefn, s); cpu_register_physical_memory(s->rtc_base, 0xfff, iomemtype); pxa2xx_rtc_reset(s); s->pm_base = 0x40f00000; iomemtype = cpu_register_io_memory(0, pxa2xx_pm_readfn, pxa2xx_pm_writefn, s); cpu_register_physical_memory(s->pm_base, 0xfff, iomemtype); s->i2s = pxa2xx_i2s_init(0x40400000, s->pic[PXA2XX_PIC_I2S], s->dma); /* GPIO1 resets the processor */ /* The handler can be overriden by board-specific code */ pxa2xx_gpio_handler_set(s->gpio, 1, pxa2xx_reset, s); return s; }"} {"target": 1, "idx": 21124, "func": "static int hdev_has_zero_init(BlockDriverState *bs) { return 0; }"} {"target": 1, "idx": 21141, "func": "static int read_thread(void *arg) { VideoState *is = arg; AVFormatContext *ic = NULL; int err, i, ret; int st_index[AVMEDIA_TYPE_NB]; AVPacket pkt1, *pkt = &pkt1; int64_t stream_start_time; int pkt_in_play_range = 0; AVDictionaryEntry *t; AVDictionary **opts; int orig_nb_streams; SDL_mutex *wait_mutex = SDL_CreateMutex(); int scan_all_pmts_set = 0; int64_t pkt_ts; if (!wait_mutex) { av_log(NULL, AV_LOG_FATAL, \"SDL_CreateMutex(): %s\\n\", SDL_GetError()); ret = AVERROR(ENOMEM); goto fail; } memset(st_index, -1, sizeof(st_index)); is->last_video_stream = is->video_stream = -1; is->last_audio_stream = is->audio_stream = -1; is->last_subtitle_stream = is->subtitle_stream = -1; is->eof = 0; ic = avformat_alloc_context(); if (!ic) { av_log(NULL, AV_LOG_FATAL, \"Could not allocate context.\\n\"); ret = AVERROR(ENOMEM); goto fail; } ic->interrupt_callback.callback = decode_interrupt_cb; ic->interrupt_callback.opaque = is; if (!av_dict_get(format_opts, \"scan_all_pmts\", NULL, AV_DICT_MATCH_CASE)) { av_dict_set(&format_opts, \"scan_all_pmts\", \"1\", AV_DICT_DONT_OVERWRITE); scan_all_pmts_set = 1; } err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts); if (err < 0) { print_error(is->filename, err); ret = -1; goto fail; } if (scan_all_pmts_set) av_dict_set(&format_opts, \"scan_all_pmts\", NULL, AV_DICT_MATCH_CASE); if ((t = av_dict_get(format_opts, \"\", NULL, AV_DICT_IGNORE_SUFFIX))) { av_log(NULL, AV_LOG_ERROR, \"Option %s not found.\\n\", t->key); ret = AVERROR_OPTION_NOT_FOUND; goto fail; } is->ic = ic; if (genpts) ic->flags |= AVFMT_FLAG_GENPTS; av_format_inject_global_side_data(ic); opts = setup_find_stream_info_opts(ic, codec_opts); orig_nb_streams = ic->nb_streams; err = avformat_find_stream_info(ic, opts); for (i = 0; i < orig_nb_streams; i++) av_dict_free(&opts[i]); av_freep(&opts); if (err < 0) { av_log(NULL, AV_LOG_WARNING, \"%s: could not find codec parameters\\n\", is->filename); ret = -1; goto fail; } if (ic->pb) ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end if (seek_by_bytes < 0) seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp(\"ogg\", ic->iformat->name); is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0; if (!window_title && (t = av_dict_get(ic->metadata, \"title\", NULL, 0))) window_title = av_asprintf(\"%s - %s\", t->value, input_filename); /* if seeking requested, we execute it */ if (start_time != AV_NOPTS_VALUE) { int64_t timestamp; timestamp = start_time; /* add the stream start time */ if (ic->start_time != AV_NOPTS_VALUE) timestamp += ic->start_time; ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0); if (ret < 0) { av_log(NULL, AV_LOG_WARNING, \"%s: could not seek to position %0.3f\\n\", is->filename, (double)timestamp / AV_TIME_BASE); } } is->realtime = is_realtime(ic); if (show_status) av_dump_format(ic, 0, is->filename, 0); for (i = 0; i < ic->nb_streams; i++) { AVStream *st = ic->streams[i]; enum AVMediaType type = st->codec->codec_type; st->discard = AVDISCARD_ALL; if (wanted_stream_spec[type] && st_index[type] == -1) if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0) st_index[type] = i; } for (i = 0; i < AVMEDIA_TYPE_NB; i++) { if (wanted_stream_spec[i] && st_index[i] == -1) { av_log(NULL, AV_LOG_ERROR, \"Stream specifier %s does not match any %s stream\\n\", wanted_stream_spec[i], av_get_media_type_string(i)); st_index[i] = INT_MAX; } } if (!video_disable) st_index[AVMEDIA_TYPE_VIDEO] = av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO, st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0); if (!audio_disable) st_index[AVMEDIA_TYPE_AUDIO] = av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO, st_index[AVMEDIA_TYPE_AUDIO], st_index[AVMEDIA_TYPE_VIDEO], NULL, 0); if (!video_disable && !subtitle_disable) st_index[AVMEDIA_TYPE_SUBTITLE] = av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE, st_index[AVMEDIA_TYPE_SUBTITLE], (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ? st_index[AVMEDIA_TYPE_AUDIO] : st_index[AVMEDIA_TYPE_VIDEO]), NULL, 0); is->show_mode = show_mode; if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) { AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]]; AVCodecContext *avctx = st->codec; AVRational sar = av_guess_sample_aspect_ratio(ic, st, NULL); if (avctx->width) set_default_window_size(avctx->width, avctx->height, sar); } /* open the streams */ if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) { stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]); } ret = -1; if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) { ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]); } if (is->show_mode == SHOW_MODE_NONE) is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT; if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) { stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]); } if (is->video_stream < 0 && is->audio_stream < 0) { av_log(NULL, AV_LOG_FATAL, \"Failed to open file '%s' or configure filtergraph\\n\", is->filename); ret = -1; goto fail; } if (infinite_buffer < 0 && is->realtime) infinite_buffer = 1; for (;;) { if (is->abort_request) break; if (is->paused != is->last_paused) { is->last_paused = is->paused; if (is->paused) is->read_pause_return = av_read_pause(ic); else av_read_play(ic); } #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL if (is->paused && (!strcmp(ic->iformat->name, \"rtsp\") || (ic->pb && !strncmp(input_filename, \"mmsh:\", 5)))) { /* wait 10 ms to avoid trying to get another packet */ /* XXX: horrible */ SDL_Delay(10); continue; } #endif if (is->seek_req) { int64_t seek_target = is->seek_pos; int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN; int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX; // FIXME the +-2 is due to rounding being not done in the correct direction in generation // of the seek_pos/seek_rel variables ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, \"%s: error while seeking\\n\", is->ic->filename); } else { if (is->audio_stream >= 0) { packet_queue_flush(&is->audioq); packet_queue_put(&is->audioq, &flush_pkt); } if (is->subtitle_stream >= 0) { packet_queue_flush(&is->subtitleq); packet_queue_put(&is->subtitleq, &flush_pkt); } if (is->video_stream >= 0) { packet_queue_flush(&is->videoq); packet_queue_put(&is->videoq, &flush_pkt); } if (is->seek_flags & AVSEEK_FLAG_BYTE) { set_clock(&is->extclk, NAN, 0); } else { set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0); } } is->seek_req = 0; is->queue_attachments_req = 1; is->eof = 0; if (is->paused) step_to_next_frame(is); } if (is->queue_attachments_req) { if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) { AVPacket copy; if ((ret = av_copy_packet(©, &is->video_st->attached_pic)) < 0) goto fail; packet_queue_put(&is->videoq, ©); packet_queue_put_nullpacket(&is->videoq, is->video_stream); } is->queue_attachments_req = 0; } /* if the queue are full, no need to read more */ if (infinite_buffer<1 && (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE || ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request) && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request || (is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) { /* wait 10 ms */ SDL_LockMutex(wait_mutex); SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10); SDL_UnlockMutex(wait_mutex); continue; } if (!is->paused && (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) && (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) { if (loop != 1 && (!loop || --loop)) { stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0); } else if (autoexit) { ret = AVERROR_EOF; goto fail; } } ret = av_read_frame(ic, pkt); if (ret < 0) { if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) { if (is->video_stream >= 0) packet_queue_put_nullpacket(&is->videoq, is->video_stream); if (is->audio_stream >= 0) packet_queue_put_nullpacket(&is->audioq, is->audio_stream); if (is->subtitle_stream >= 0) packet_queue_put_nullpacket(&is->subtitleq, is->subtitle_stream); is->eof = 1; } if (ic->pb && ic->pb->error) break; SDL_LockMutex(wait_mutex); SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10); SDL_UnlockMutex(wait_mutex); continue; } else { is->eof = 0; } /* check if packet is in play range specified by user, then queue, otherwise discard */ stream_start_time = ic->streams[pkt->stream_index]->start_time; pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts; pkt_in_play_range = duration == AV_NOPTS_VALUE || (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) * av_q2d(ic->streams[pkt->stream_index]->time_base) - (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000 <= ((double)duration / 1000000); if (pkt->stream_index == is->audio_stream && pkt_in_play_range) { packet_queue_put(&is->audioq, pkt); } else if (pkt->stream_index == is->video_stream && pkt_in_play_range && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) { packet_queue_put(&is->videoq, pkt); } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) { packet_queue_put(&is->subtitleq, pkt); } else { av_free_packet(pkt); } } /* wait until the end */ while (!is->abort_request) { SDL_Delay(100); } ret = 0; fail: /* close each stream */ if (is->audio_stream >= 0) stream_component_close(is, is->audio_stream); if (is->video_stream >= 0) stream_component_close(is, is->video_stream); if (is->subtitle_stream >= 0) stream_component_close(is, is->subtitle_stream); if (ic) { avformat_close_input(&ic); is->ic = NULL; } if (ret != 0) { SDL_Event event; event.type = FF_QUIT_EVENT; event.user.data1 = is; SDL_PushEvent(&event); } SDL_DestroyMutex(wait_mutex); return 0; }"} {"target": 0, "idx": 21148, "func": "static inline void RENAME(yuv2yuv1_ar)(SwsContext *c, const int16_t *lumSrc, const int16_t *chrUSrc, const int16_t *chrVSrc, const int16_t *alpSrc, uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, int dstW, int chrDstW) { int p= 4; const uint8_t *src[4]= { alpSrc + dstW, lumSrc + dstW, chrUSrc + chrDstW, chrVSrc + chrDstW }; uint8_t *dst[4]= { aDest, dest, uDest, vDest }; x86_reg counter[4]= { dstW, dstW, chrDstW, chrDstW }; while (p--) { if (dst[p]) { __asm__ volatile( \"mov %2, %%\"REG_a\" \\n\\t\" \"pcmpeqw %%mm7, %%mm7 \\n\\t\" \"psrlw $15, %%mm7 \\n\\t\" \"psllw $6, %%mm7 \\n\\t\" \".p2align 4 \\n\\t\" /* FIXME Unroll? */ \"1: \\n\\t\" \"movq (%0, %%\"REG_a\", 2), %%mm0 \\n\\t\" \"movq 8(%0, %%\"REG_a\", 2), %%mm1 \\n\\t\" \"paddsw %%mm7, %%mm0 \\n\\t\" \"paddsw %%mm7, %%mm1 \\n\\t\" \"psraw $7, %%mm0 \\n\\t\" \"psraw $7, %%mm1 \\n\\t\" \"packuswb %%mm1, %%mm0 \\n\\t\" MOVNTQ(%%mm0, (%1, %%REGa)) \"add $8, %%\"REG_a\" \\n\\t\" \"jnc 1b \\n\\t\" :: \"r\" (src[p]), \"r\" (dst[p] + counter[p]), \"g\" (-counter[p]) : \"%\"REG_a ); } } }"} {"target": 1, "idx": 21160, "func": "void visit_start_struct(Visitor *v, const char *name, void **obj, size_t size, Error **errp) { Error *err = NULL; if (obj) { assert(size); assert(v->type != VISITOR_OUTPUT || *obj); } v->start_struct(v, name, obj, size, &err); if (obj && v->type == VISITOR_INPUT) { assert(!err != !*obj); } error_propagate(errp, err); }"} {"target": 1, "idx": 21171, "func": "static void pmac_ide_transfer_cb(void *opaque, int ret) { DBDMA_io *io = opaque; MACIOIDEState *m = io->opaque; IDEState *s = idebus_active_if(&m->bus); int64_t sector_num; int nsector, remainder; int64_t offset; MACIO_DPRINTF(\"pmac_ide_transfer_cb\\n\"); if (ret < 0) { MACIO_DPRINTF(\"DMA error\\n\"); m->aiocb = NULL; ide_dma_error(s); io->remainder_len = 0; goto done; } if (!m->dma_active) { MACIO_DPRINTF(\"waiting for data (%#x - %#x - %x)\\n\", s->nsector, io->len, s->status); /* data not ready yet, wait for the channel to get restarted */ io->processing = false; return; } if (s->io_buffer_size <= 0) { MACIO_DPRINTF(\"end of transfer\\n\"); s->status = READY_STAT | SEEK_STAT; ide_set_irq(s->bus); m->dma_active = false; goto done; } if (io->len == 0) { MACIO_DPRINTF(\"End of DMA transfer\\n\"); goto done; } /* Calculate number of sectors */ sector_num = ide_get_sector(s) + (s->io_buffer_index >> 9); offset = (ide_get_sector(s) << 9) + s->io_buffer_index; nsector = (io->len + 0x1ff) >> 9; remainder = io->len & 0x1ff; s->nsector -= nsector; MACIO_DPRINTF(\"nsector: %d remainder: %x\\n\", nsector, remainder); MACIO_DPRINTF(\"sector: %\"PRIx64\" %x\\n\", sector_num, nsector); switch (s->dma_cmd) { case IDE_DMA_READ: pmac_dma_read(s->blk, offset, io->len, pmac_ide_transfer_cb, io); break; case IDE_DMA_WRITE: pmac_dma_write(s->blk, sector_num, nsector, pmac_ide_transfer_cb, io); break; case IDE_DMA_TRIM: MACIO_DPRINTF(\"TRIM command issued!\"); break; } return; done: if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) { block_acct_done(blk_get_stats(s->blk), &s->acct); } io->dma_end(opaque); }"} {"target": 1, "idx": 21173, "func": "static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, unsigned src_size) { const uint16_t *end; #ifdef HAVE_MMX const uint16_t *mm_end; #endif uint8_t *d = (uint8_t *)dst; const uint16_t *s = (uint16_t *)src; end = s + src_size/2; #ifdef HAVE_MMX __asm __volatile(PREFETCH\" %0\"::\"m\"(*s):\"memory\"); __asm __volatile(\"pxor %%mm7,%%mm7\\n\\t\":::\"memory\"); mm_end = end - 3; while(s < mm_end) { __asm __volatile( PREFETCH\" 32%1\\n\\t\" \"movq %1, %%mm0\\n\\t\" \"movq %1, %%mm1\\n\\t\" \"movq %1, %%mm2\\n\\t\" \"pand %2, %%mm0\\n\\t\" \"pand %3, %%mm1\\n\\t\" \"pand %4, %%mm2\\n\\t\" \"psllq $3, %%mm0\\n\\t\" \"psrlq $3, %%mm1\\n\\t\" \"psrlq $8, %%mm2\\n\\t\" \"movq %%mm0, %%mm3\\n\\t\" \"movq %%mm1, %%mm4\\n\\t\" \"movq %%mm2, %%mm5\\n\\t\" \"punpcklwd %%mm7, %%mm0\\n\\t\" \"punpcklwd %%mm7, %%mm1\\n\\t\" \"punpcklwd %%mm7, %%mm2\\n\\t\" \"punpckhwd %%mm7, %%mm3\\n\\t\" \"punpckhwd %%mm7, %%mm4\\n\\t\" \"punpckhwd %%mm7, %%mm5\\n\\t\" \"psllq $8, %%mm1\\n\\t\" \"psllq $16, %%mm2\\n\\t\" \"por %%mm1, %%mm0\\n\\t\" \"por %%mm2, %%mm0\\n\\t\" \"psllq $8, %%mm4\\n\\t\" \"psllq $16, %%mm5\\n\\t\" \"por %%mm4, %%mm3\\n\\t\" \"por %%mm5, %%mm3\\n\\t\" MOVNTQ\" %%mm0, %0\\n\\t\" MOVNTQ\" %%mm3, 8%0\\n\\t\" :\"=m\"(*d) :\"m\"(*s),\"m\"(mask16b),\"m\"(mask16g),\"m\"(mask16r) :\"memory\"); d += 16; s += 4; } __asm __volatile(SFENCE:::\"memory\"); __asm __volatile(EMMS:::\"memory\"); #endif while(s < end) { register uint16_t bgr; bgr = *s++; #ifdef WORDS_BIGENDIAN *d++ = 0; *d++ = (bgr&0x1F)<<3; *d++ = (bgr&0x7E0)>>3; *d++ = (bgr&0xF800)>>8; #else *d++ = (bgr&0x1F)<<3; *d++ = (bgr&0x7E0)>>3; *d++ = (bgr&0xF800)>>8; *d++ = 0; #endif } }"} {"target": 1, "idx": 21176, "func": "static void celt_search_for_dual_stereo(OpusPsyContext *s, CeltFrame *f) { float td1, td2; f->dual_stereo = 0; bands_dist(s, f, &td1); f->dual_stereo = 1; bands_dist(s, f, &td2); f->dual_stereo = td2 < td1; s->dual_stereo_used += td2 < td1; }"} {"target": 1, "idx": 21189, "func": "void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val, int l) { int i, was_irq_disabled = pci_irq_disabled(d); uint32_t config_size = pci_config_size(d); for (i = 0; i < l && addr + i < config_size; val >>= 8, ++i) { uint8_t wmask = d->wmask[addr + i]; uint8_t w1cmask = d->w1cmask[addr + i]; assert(!(wmask & w1cmask)); d->config[addr + i] = (d->config[addr + i] & ~wmask) | (val & wmask); d->config[addr + i] &= ~(val & w1cmask); /* W1C: Write 1 to Clear */ } if (ranges_overlap(addr, l, PCI_BASE_ADDRESS_0, 24) || ranges_overlap(addr, l, PCI_ROM_ADDRESS, 4) || ranges_overlap(addr, l, PCI_ROM_ADDRESS1, 4) || range_covers_byte(addr, l, PCI_COMMAND)) pci_update_mappings(d); if (range_covers_byte(addr, l, PCI_COMMAND)) pci_update_irq_disabled(d, was_irq_disabled); }"} {"target": 0, "idx": 21196, "func": "static inline int check_fit_i32(uint32_t val, unsigned int bits) { return ((val << (32 - bits)) >> (32 - bits)) == val; }"} {"target": 0, "idx": 21200, "func": "static void v9fs_getattr(void *opaque) { int32_t fid; size_t offset = 7; ssize_t retval = 0; struct stat stbuf; V9fsFidState *fidp; uint64_t request_mask; V9fsStatDotl v9stat_dotl; V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; pdu_unmarshal(pdu, offset, \"dq\", &fid, &request_mask); trace_v9fs_getattr(pdu->tag, pdu->id, fid, request_mask); fidp = get_fid(pdu, fid); if (fidp == NULL) { retval = -ENOENT; goto out_nofid; } /* * Currently we only support BASIC fields in stat, so there is no * need to look at request_mask. */ retval = v9fs_co_lstat(pdu, &fidp->path, &stbuf); if (retval < 0) { goto out; } stat_to_v9stat_dotl(s, &stbuf, &v9stat_dotl); /* fill st_gen if requested and supported by underlying fs */ if (request_mask & P9_STATS_GEN) { retval = v9fs_co_st_gen(pdu, &fidp->path, stbuf.st_mode, &v9stat_dotl); if (retval < 0) { goto out; } v9stat_dotl.st_result_mask |= P9_STATS_GEN; } retval = offset; retval += pdu_marshal(pdu, offset, \"A\", &v9stat_dotl); trace_v9fs_getattr_return(pdu->tag, pdu->id, v9stat_dotl.st_result_mask, v9stat_dotl.st_mode, v9stat_dotl.st_uid, v9stat_dotl.st_gid); out: put_fid(pdu, fidp); out_nofid: complete_pdu(s, pdu, retval); }"} {"target": 0, "idx": 21215, "func": "int qemu_timeout_ns_to_ms(int64_t ns) { int64_t ms; if (ns < 0) { return -1; } if (!ns) { return 0; } /* Always round up, because it's better to wait too long than to wait too * little and effectively busy-wait */ ms = DIV_ROUND_UP(ns, SCALE_MS); /* To avoid overflow problems, limit this to 2^31, i.e. approx 25 days */ if (ms > (int64_t) INT32_MAX) { ms = INT32_MAX; } return (int) ms; }"} {"target": 0, "idx": 21222, "func": "void ff_er_frame_end(MpegEncContext *s){ int i, mb_x, mb_y, error, error_type, dc_error, mv_error, ac_error; int distance; int threshold_part[4]= {100,100,100}; int threshold= 50; int is_intra_likely; int size = s->b8_stride * 2 * s->mb_height; Picture *pic= s->current_picture_ptr; if(!s->error_recognition || s->error_count==0 || s->avctx->lowres || s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU || s->error_count==3*s->mb_width*(s->avctx->skip_top + s->avctx->skip_bottom)) return; if(s->current_picture.motion_val[0] == NULL){ av_log(s->avctx, AV_LOG_ERROR, \"Warning MVs not available\\n\"); for(i=0; i<2; i++){ pic->ref_index[i]= av_mallocz(size * sizeof(uint8_t)); pic->motion_val_base[i]= av_mallocz((size+4) * 2 * sizeof(uint16_t)); pic->motion_val[i]= pic->motion_val_base[i]+4; } pic->motion_subsample_log2= 3; s->current_picture= *s->current_picture_ptr; } for(i=0; i<2; i++){ if(pic->ref_index[i]) memset(pic->ref_index[i], 0, size * sizeof(uint8_t)); } if(s->avctx->debug&FF_DEBUG_ER){ for(mb_y=0; mb_ymb_height; mb_y++){ for(mb_x=0; mb_xmb_width; mb_x++){ int status= s->error_status_table[mb_x + mb_y*s->mb_stride]; av_log(s->avctx, AV_LOG_DEBUG, \"%2X \", status); } av_log(s->avctx, AV_LOG_DEBUG, \"\\n\"); } } #if 1 /* handle overlapping slices */ for(error_type=1; error_type<=3; error_type++){ int end_ok=0; for(i=s->mb_num-1; i>=0; i--){ const int mb_xy= s->mb_index2xy[i]; int error= s->error_status_table[mb_xy]; if(error&(1<error_status_table[mb_xy]|= 1<partitioned_frame){ int end_ok=0; for(i=s->mb_num-1; i>=0; i--){ const int mb_xy= s->mb_index2xy[i]; int error= s->error_status_table[mb_xy]; if(error&AC_END) end_ok=0; if((error&MV_END) || (error&DC_END) || (error&AC_ERROR)) end_ok=1; if(!end_ok) s->error_status_table[mb_xy]|= AC_ERROR; if(error&VP_START) end_ok=0; } } #endif /* handle missing slices */ if(s->error_recognition>=4){ int end_ok=1; for(i=s->mb_num-2; i>=s->mb_width+100; i--){ //FIXME +100 hack const int mb_xy= s->mb_index2xy[i]; int error1= s->error_status_table[mb_xy ]; int error2= s->error_status_table[s->mb_index2xy[i+1]]; if(error1&VP_START) end_ok=1; if( error2==(VP_START|DC_ERROR|AC_ERROR|MV_ERROR|AC_END|DC_END|MV_END) && error1!=(VP_START|DC_ERROR|AC_ERROR|MV_ERROR|AC_END|DC_END|MV_END) && ((error1&AC_END) || (error1&DC_END) || (error1&MV_END))){ //end & uninit end_ok=0; } if(!end_ok) s->error_status_table[mb_xy]|= DC_ERROR|AC_ERROR|MV_ERROR; } } #if 1 /* backward mark errors */ distance=9999999; for(error_type=1; error_type<=3; error_type++){ for(i=s->mb_num-1; i>=0; i--){ const int mb_xy= s->mb_index2xy[i]; int error= s->error_status_table[mb_xy]; if(!s->mbskip_table[mb_xy]) //FIXME partition specific distance++; if(error&(1<partitioned_frame){ if(distance < threshold_part[error_type-1]) s->error_status_table[mb_xy]|= 1<error_status_table[mb_xy]|= 1<mb_num; i++){ const int mb_xy= s->mb_index2xy[i]; int old_error= s->error_status_table[mb_xy]; if(old_error&VP_START) error= old_error& (DC_ERROR|AC_ERROR|MV_ERROR); else{ error|= old_error& (DC_ERROR|AC_ERROR|MV_ERROR); s->error_status_table[mb_xy]|= error; } } #if 1 /* handle not partitioned case */ if(!s->partitioned_frame){ for(i=0; imb_num; i++){ const int mb_xy= s->mb_index2xy[i]; error= s->error_status_table[mb_xy]; if(error&(AC_ERROR|DC_ERROR|MV_ERROR)) error|= AC_ERROR|DC_ERROR|MV_ERROR; s->error_status_table[mb_xy]= error; } } #endif dc_error= ac_error= mv_error=0; for(i=0; imb_num; i++){ const int mb_xy= s->mb_index2xy[i]; error= s->error_status_table[mb_xy]; if(error&DC_ERROR) dc_error ++; if(error&AC_ERROR) ac_error ++; if(error&MV_ERROR) mv_error ++; } av_log(s->avctx, AV_LOG_INFO, \"concealing %d DC, %d AC, %d MV errors\\n\", dc_error, ac_error, mv_error); is_intra_likely= is_intra_more_likely(s); /* set unknown mb-type to most likely */ for(i=0; imb_num; i++){ const int mb_xy= s->mb_index2xy[i]; error= s->error_status_table[mb_xy]; if(!((error&DC_ERROR) && (error&MV_ERROR))) continue; if(is_intra_likely) s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA4x4; else s->current_picture.mb_type[mb_xy]= MB_TYPE_16x16 | MB_TYPE_L0; } /* handle inter blocks with damaged AC */ for(mb_y=0; mb_ymb_height; mb_y++){ for(mb_x=0; mb_xmb_width; mb_x++){ const int mb_xy= mb_x + mb_y * s->mb_stride; const int mb_type= s->current_picture.mb_type[mb_xy]; error= s->error_status_table[mb_xy]; if(IS_INTRA(mb_type)) continue; //intra if(error&MV_ERROR) continue; //inter with damaged MV if(!(error&AC_ERROR)) continue; //undamaged inter s->mv_dir = MV_DIR_FORWARD; s->mb_intra=0; s->mb_skipped=0; if(IS_8X8(mb_type)){ int mb_index= mb_x*2 + mb_y*2*s->b8_stride; int j; s->mv_type = MV_TYPE_8X8; for(j=0; j<4; j++){ s->mv[0][j][0] = s->current_picture.motion_val[0][ mb_index + (j&1) + (j>>1)*s->b8_stride ][0]; s->mv[0][j][1] = s->current_picture.motion_val[0][ mb_index + (j&1) + (j>>1)*s->b8_stride ][1]; } }else{ s->mv_type = MV_TYPE_16X16; s->mv[0][0][0] = s->current_picture.motion_val[0][ mb_x*2 + mb_y*2*s->b8_stride ][0]; s->mv[0][0][1] = s->current_picture.motion_val[0][ mb_x*2 + mb_y*2*s->b8_stride ][1]; } s->dsp.clear_blocks(s->block[0]); s->mb_x= mb_x; s->mb_y= mb_y; decode_mb(s); } } /* guess MVs */ if(s->pict_type==FF_B_TYPE){ for(mb_y=0; mb_ymb_height; mb_y++){ for(mb_x=0; mb_xmb_width; mb_x++){ int xy= mb_x*2 + mb_y*2*s->b8_stride; const int mb_xy= mb_x + mb_y * s->mb_stride; const int mb_type= s->current_picture.mb_type[mb_xy]; error= s->error_status_table[mb_xy]; if(IS_INTRA(mb_type)) continue; if(!(error&MV_ERROR)) continue; //inter with undamaged MV if(!(error&AC_ERROR)) continue; //undamaged inter s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD; s->mb_intra=0; s->mv_type = MV_TYPE_16X16; s->mb_skipped=0; if(s->pp_time){ int time_pp= s->pp_time; int time_pb= s->pb_time; s->mv[0][0][0] = s->next_picture.motion_val[0][xy][0]*time_pb/time_pp; s->mv[0][0][1] = s->next_picture.motion_val[0][xy][1]*time_pb/time_pp; s->mv[1][0][0] = s->next_picture.motion_val[0][xy][0]*(time_pb - time_pp)/time_pp; s->mv[1][0][1] = s->next_picture.motion_val[0][xy][1]*(time_pb - time_pp)/time_pp; }else{ s->mv[0][0][0]= 0; s->mv[0][0][1]= 0; s->mv[1][0][0]= 0; s->mv[1][0][1]= 0; } s->dsp.clear_blocks(s->block[0]); s->mb_x= mb_x; s->mb_y= mb_y; decode_mb(s); } } }else guess_mv(s); #if CONFIG_MPEG_XVMC_DECODER /* the filters below are not XvMC compatible, skip them */ if(s->avctx->xvmc_acceleration) goto ec_clean; #endif /* fill DC for inter blocks */ for(mb_y=0; mb_ymb_height; mb_y++){ for(mb_x=0; mb_xmb_width; mb_x++){ int dc, dcu, dcv, y, n; int16_t *dc_ptr; uint8_t *dest_y, *dest_cb, *dest_cr; const int mb_xy= mb_x + mb_y * s->mb_stride; const int mb_type= s->current_picture.mb_type[mb_xy]; error= s->error_status_table[mb_xy]; if(IS_INTRA(mb_type) && s->partitioned_frame) continue; // if(error&MV_ERROR) continue; //inter data damaged FIXME is this good? dest_y = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize; dest_cb= s->current_picture.data[1] + mb_x*8 + mb_y*8 *s->uvlinesize; dest_cr= s->current_picture.data[2] + mb_x*8 + mb_y*8 *s->uvlinesize; dc_ptr= &s->dc_val[0][mb_x*2 + mb_y*2*s->b8_stride]; for(n=0; n<4; n++){ dc=0; for(y=0; y<8; y++){ int x; for(x=0; x<8; x++){ dc+= dest_y[x + (n&1)*8 + (y + (n>>1)*8)*s->linesize]; } } dc_ptr[(n&1) + (n>>1)*s->b8_stride]= (dc+4)>>3; } dcu=dcv=0; for(y=0; y<8; y++){ int x; for(x=0; x<8; x++){ dcu+=dest_cb[x + y*(s->uvlinesize)]; dcv+=dest_cr[x + y*(s->uvlinesize)]; } } s->dc_val[1][mb_x + mb_y*s->mb_stride]= (dcu+4)>>3; s->dc_val[2][mb_x + mb_y*s->mb_stride]= (dcv+4)>>3; } } #if 1 /* guess DC for damaged blocks */ guess_dc(s, s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride, 1); guess_dc(s, s->dc_val[1], s->mb_width , s->mb_height , s->mb_stride, 0); guess_dc(s, s->dc_val[2], s->mb_width , s->mb_height , s->mb_stride, 0); #endif /* filter luma DC */ filter181(s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride); #if 1 /* render DC only intra */ for(mb_y=0; mb_ymb_height; mb_y++){ for(mb_x=0; mb_xmb_width; mb_x++){ uint8_t *dest_y, *dest_cb, *dest_cr; const int mb_xy= mb_x + mb_y * s->mb_stride; const int mb_type= s->current_picture.mb_type[mb_xy]; error= s->error_status_table[mb_xy]; if(IS_INTER(mb_type)) continue; if(!(error&AC_ERROR)) continue; //undamaged dest_y = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize; dest_cb= s->current_picture.data[1] + mb_x*8 + mb_y*8 *s->uvlinesize; dest_cr= s->current_picture.data[2] + mb_x*8 + mb_y*8 *s->uvlinesize; put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y); } } #endif if(s->avctx->error_concealment&FF_EC_DEBLOCK){ /* filter horizontal block boundaries */ h_block_filter(s, s->current_picture.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1); h_block_filter(s, s->current_picture.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0); h_block_filter(s, s->current_picture.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0); /* filter vertical block boundaries */ v_block_filter(s, s->current_picture.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1); v_block_filter(s, s->current_picture.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0); v_block_filter(s, s->current_picture.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0); } #if CONFIG_MPEG_XVMC_DECODER ec_clean: #endif /* clean a few tables */ for(i=0; imb_num; i++){ const int mb_xy= s->mb_index2xy[i]; int error= s->error_status_table[mb_xy]; if(s->pict_type!=FF_B_TYPE && (error&(DC_ERROR|MV_ERROR|AC_ERROR))){ s->mbskip_table[mb_xy]=0; } s->mbintra_table[mb_xy]=1; } }"} {"target": 1, "idx": 21230, "func": "static void qxl_init_ramsize(PCIQXLDevice *qxl, uint32_t ram_min_mb) { /* vga ram (bar 0) */ if (qxl->ram_size_mb != -1) { qxl->vga.vram_size = qxl->ram_size_mb * 1024 * 1024; } if (qxl->vga.vram_size < ram_min_mb * 1024 * 1024) { qxl->vga.vram_size = ram_min_mb * 1024 * 1024; } /* vram32 (surfaces, 32bit, bar 1) */ if (qxl->vram32_size_mb != -1) { qxl->vram32_size = qxl->vram32_size_mb * 1024 * 1024; } if (qxl->vram32_size < 4096) { qxl->vram32_size = 4096; } /* vram (surfaces, 64bit, bar 4+5) */ if (qxl->vram_size_mb != -1) { qxl->vram_size = qxl->vram_size_mb * 1024 * 1024; } if (qxl->vram_size < qxl->vram32_size) { qxl->vram_size = qxl->vram32_size; } if (qxl->revision == 1) { qxl->vram32_size = 4096; qxl->vram_size = 4096; } qxl->vga.vram_size = msb_mask(qxl->vga.vram_size * 2 - 1); qxl->vram32_size = msb_mask(qxl->vram32_size * 2 - 1); qxl->vram_size = msb_mask(qxl->vram_size * 2 - 1); }"} {"target": 0, "idx": 21236, "func": "void ff_mspel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, op_pixels_func (*pix_op)[4], int motion_x, int motion_y, int h) { Wmv2Context * const w= (Wmv2Context*)s; uint8_t *ptr; int dxy, offset, mx, my, src_x, src_y, v_edge_pos, linesize, uvlinesize; int emu=0; dxy = ((motion_y & 1) << 1) | (motion_x & 1); dxy = 2*dxy + w->hshift; src_x = s->mb_x * 16 + (motion_x >> 1); src_y = s->mb_y * 16 + (motion_y >> 1); /* WARNING: do no forget half pels */ v_edge_pos = s->v_edge_pos; src_x = av_clip(src_x, -16, s->width); src_y = av_clip(src_y, -16, s->height); if(src_x<=-16 || src_x >= s->width) dxy &= ~3; if(src_y<=-16 || src_y >= s->height) dxy &= ~4; linesize = s->linesize; uvlinesize = s->uvlinesize; ptr = ref_picture[0] + (src_y * linesize) + src_x; if(s->flags&CODEC_FLAG_EMU_EDGE){ if(src_x<1 || src_y<1 || src_x + 17 >= s->h_edge_pos || src_y + h+1 >= v_edge_pos){ s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr - 1 - s->linesize, s->linesize, 19, 19, src_x-1, src_y-1, s->h_edge_pos, s->v_edge_pos); ptr= s->edge_emu_buffer + 1 + s->linesize; emu=1; } } s->dsp.put_mspel_pixels_tab[dxy](dest_y , ptr , linesize); s->dsp.put_mspel_pixels_tab[dxy](dest_y+8 , ptr+8 , linesize); s->dsp.put_mspel_pixels_tab[dxy](dest_y +8*linesize, ptr +8*linesize, linesize); s->dsp.put_mspel_pixels_tab[dxy](dest_y+8+8*linesize, ptr+8+8*linesize, linesize); if(s->flags&CODEC_FLAG_GRAY) return; if (s->out_format == FMT_H263) { dxy = 0; if ((motion_x & 3) != 0) dxy |= 1; if ((motion_y & 3) != 0) dxy |= 2; mx = motion_x >> 2; my = motion_y >> 2; } else { mx = motion_x / 2; my = motion_y / 2; dxy = ((my & 1) << 1) | (mx & 1); mx >>= 1; my >>= 1; } src_x = s->mb_x * 8 + mx; src_y = s->mb_y * 8 + my; src_x = av_clip(src_x, -8, s->width >> 1); if (src_x == (s->width >> 1)) dxy &= ~1; src_y = av_clip(src_y, -8, s->height >> 1); if (src_y == (s->height >> 1)) dxy &= ~2; offset = (src_y * uvlinesize) + src_x; ptr = ref_picture[1] + offset; if(emu){ s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); ptr= s->edge_emu_buffer; } pix_op[1][dxy](dest_cb, ptr, uvlinesize, h >> 1); ptr = ref_picture[2] + offset; if(emu){ s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); ptr= s->edge_emu_buffer; } pix_op[1][dxy](dest_cr, ptr, uvlinesize, h >> 1); }"} {"target": 1, "idx": 21238, "func": "static void register_types(void) { register_char_driver(\"null\", CHARDEV_BACKEND_KIND_NULL, NULL, qemu_chr_open_null); register_char_driver(\"socket\", CHARDEV_BACKEND_KIND_SOCKET, qemu_chr_parse_socket, qmp_chardev_open_socket); register_char_driver(\"udp\", CHARDEV_BACKEND_KIND_UDP, qemu_chr_parse_udp, qmp_chardev_open_udp); register_char_driver(\"ringbuf\", CHARDEV_BACKEND_KIND_RINGBUF, qemu_chr_parse_ringbuf, qemu_chr_open_ringbuf); register_char_driver(\"file\", CHARDEV_BACKEND_KIND_FILE, qemu_chr_parse_file_out, qmp_chardev_open_file); register_char_driver(\"stdio\", CHARDEV_BACKEND_KIND_STDIO, qemu_chr_parse_stdio, qemu_chr_open_stdio); #if defined HAVE_CHARDEV_SERIAL register_char_driver(\"serial\", CHARDEV_BACKEND_KIND_SERIAL, qemu_chr_parse_serial, qmp_chardev_open_serial); register_char_driver(\"tty\", CHARDEV_BACKEND_KIND_SERIAL, qemu_chr_parse_serial, qmp_chardev_open_serial); #endif #ifdef HAVE_CHARDEV_PARPORT register_char_driver(\"parallel\", CHARDEV_BACKEND_KIND_PARALLEL, qemu_chr_parse_parallel, qmp_chardev_open_parallel); register_char_driver(\"parport\", CHARDEV_BACKEND_KIND_PARALLEL, qemu_chr_parse_parallel, qmp_chardev_open_parallel); #endif #ifdef HAVE_CHARDEV_PTY register_char_driver(\"pty\", CHARDEV_BACKEND_KIND_PTY, NULL, qemu_chr_open_pty); #endif #ifdef _WIN32 register_char_driver(\"console\", CHARDEV_BACKEND_KIND_CONSOLE, NULL, qemu_chr_open_win_con); #endif register_char_driver(\"pipe\", CHARDEV_BACKEND_KIND_PIPE, qemu_chr_parse_pipe, qemu_chr_open_pipe); register_char_driver(\"mux\", CHARDEV_BACKEND_KIND_MUX, qemu_chr_parse_mux, qemu_chr_open_mux); /* Bug-compatibility: */ register_char_driver(\"memory\", CHARDEV_BACKEND_KIND_MEMORY, qemu_chr_parse_ringbuf, qemu_chr_open_ringbuf); /* this must be done after machine init, since we register FEs with muxes * as part of realize functions like serial_isa_realizefn when -nographic * is specified */ qemu_add_machine_init_done_notifier(&muxes_realize_notify); atexit(qemu_chr_cleanup); }"} {"target": 1, "idx": 21251, "func": "static int scaling_list_data(HEVCContext *s, ScalingList *sl) { GetBitContext *gb = &s->HEVClc.gb; uint8_t scaling_list_pred_mode_flag[4][6]; int32_t scaling_list_dc_coef[2][6]; int size_id, matrix_id, i, pos, delta; for (size_id = 0; size_id < 4; size_id++) for (matrix_id = 0; matrix_id < (size_id == 3 ? 2 : 6); matrix_id++) { scaling_list_pred_mode_flag[size_id][matrix_id] = get_bits1(gb); if (!scaling_list_pred_mode_flag[size_id][matrix_id]) { delta = get_ue_golomb_long(gb); /* Only need to handle non-zero delta. Zero means default, * which should already be in the arrays. */ if (delta) { // Copy from previous array. if (matrix_id - delta < 0) { av_log(s->avctx, AV_LOG_ERROR, \"Invalid delta in scaling list data: %d.\\n\", delta); return AVERROR_INVALIDDATA; } memcpy(sl->sl[size_id][matrix_id], sl->sl[size_id][matrix_id - delta], size_id > 0 ? 64 : 16); if (size_id > 1) sl->sl_dc[size_id - 2][matrix_id] = sl->sl_dc[size_id - 2][matrix_id - delta]; } } else { int next_coef, coef_num; int32_t scaling_list_delta_coef; next_coef = 8; coef_num = FFMIN(64, 1 << (4 + (size_id << 1))); if (size_id > 1) { scaling_list_dc_coef[size_id - 2][matrix_id] = get_se_golomb(gb) + 8; next_coef = scaling_list_dc_coef[size_id - 2][matrix_id]; sl->sl_dc[size_id - 2][matrix_id] = next_coef; } for (i = 0; i < coef_num; i++) { if (size_id == 0) pos = 4 * ff_hevc_diag_scan4x4_y[i] + ff_hevc_diag_scan4x4_x[i]; else pos = 8 * ff_hevc_diag_scan8x8_y[i] + ff_hevc_diag_scan8x8_x[i]; scaling_list_delta_coef = get_se_golomb(gb); next_coef = (next_coef + scaling_list_delta_coef + 256) % 256; sl->sl[size_id][matrix_id][pos] = next_coef; } } } return 0; }"} {"target": 0, "idx": 21280, "func": "static int nvdec_mpeg12_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size) { MpegEncContext *s = avctx->priv_data; NVDECContext *ctx = avctx->internal->hwaccel_priv_data; CUVIDPICPARAMS *pp = &ctx->pic_params; CUVIDMPEG2PICPARAMS *ppc = &pp->CodecSpecific.mpeg2; FrameDecodeData *fdd; NVDECFrame *cf; AVFrame *cur_frame = s->current_picture.f; int ret, i; ret = ff_nvdec_start_frame(avctx, cur_frame); if (ret < 0) return ret; fdd = (FrameDecodeData*)cur_frame->private_ref->data; cf = (NVDECFrame*)fdd->hwaccel_priv; *pp = (CUVIDPICPARAMS) { .PicWidthInMbs = (cur_frame->width + 15) / 16, .FrameHeightInMbs = (cur_frame->height + 15) / 16, .CurrPicIdx = cf->idx, .intra_pic_flag = s->pict_type == AV_PICTURE_TYPE_I, .ref_pic_flag = s->pict_type == AV_PICTURE_TYPE_I || s->pict_type == AV_PICTURE_TYPE_P, .CodecSpecific.mpeg2 = { .ForwardRefIdx = get_ref_idx(s->last_picture.f), .BackwardRefIdx = get_ref_idx(s->next_picture.f), .picture_coding_type = s->pict_type, .full_pel_forward_vector = s->full_pel[0], .full_pel_backward_vector = s->full_pel[1], .f_code = { { s->mpeg_f_code[0][0], s->mpeg_f_code[0][1] }, { s->mpeg_f_code[1][0], s->mpeg_f_code[1][1] } }, .intra_dc_precision = s->intra_dc_precision, .frame_pred_frame_dct = s->frame_pred_frame_dct, .concealment_motion_vectors = s->concealment_motion_vectors, .q_scale_type = s->q_scale_type, .intra_vlc_format = s->intra_vlc_format, .alternate_scan = s->alternate_scan, .top_field_first = s->top_field_first, } }; for (i = 0; i < 64; ++i) { ppc->QuantMatrixIntra[i] = s->intra_matrix[i]; ppc->QuantMatrixInter[i] = s->inter_matrix[i]; } return 0; }"} {"target": 1, "idx": 21282, "func": "static void on_vcpu(CPUState *env, void (*func)(void *data), void *data) { if (env == cpu_single_env) { func(data); return; } abort(); }"} {"target": 1, "idx": 21288, "func": "static int svq1_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MpegEncContext *s=avctx->priv_data; uint8_t *current, *previous; int result, i, x, y, width, height; AVFrame *pict = data; svq1_pmv *pmv; /* initialize bit buffer */ init_get_bits(&s->gb,buf,buf_size*8); /* decode frame header */ s->f_code = get_bits (&s->gb, 22); if ((s->f_code & ~0x70) || !(s->f_code & 0x60)) return -1; /* swap some header bytes (why?) */ if (s->f_code != 0x20) { uint32_t *src = (uint32_t *) (buf + 4); for (i=0; i < 4; i++) { src[i] = ((src[i] << 16) | (src[i] >> 16)) ^ src[7 - i]; } } result = svq1_decode_frame_header (&s->gb, s); if (result != 0) { av_dlog(s->avctx, \"Error in svq1_decode_frame_header %i\\n\",result); return result; } avcodec_set_dimensions(avctx, s->width, s->height); //FIXME this avoids some confusion for \"B frames\" without 2 references //this should be removed after libavcodec can handle more flexible picture types & ordering if(s->pict_type==AV_PICTURE_TYPE_B && s->last_picture_ptr==NULL) return buf_size; if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==AV_PICTURE_TYPE_B) ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=AV_PICTURE_TYPE_I) || avctx->skip_frame >= AVDISCARD_ALL) return buf_size; if(ff_MPV_frame_start(s, avctx) < 0) return -1; pmv = av_malloc((FFALIGN(s->width, 16)/8 + 3) * sizeof(*pmv)); if (!pmv) return -1; /* decode y, u and v components */ for (i=0; i < 3; i++) { int linesize; if (i == 0) { width = FFALIGN(s->width, 16); height = FFALIGN(s->height, 16); linesize= s->linesize; } else { if(s->flags&CODEC_FLAG_GRAY) break; width = FFALIGN(s->width/4, 16); height = FFALIGN(s->height/4, 16); linesize= s->uvlinesize; } current = s->current_picture.f.data[i]; if(s->pict_type==AV_PICTURE_TYPE_B){ previous = s->next_picture.f.data[i]; }else{ previous = s->last_picture.f.data[i]; } if (s->pict_type == AV_PICTURE_TYPE_I) { /* keyframe */ for (y=0; y < height; y+=16) { for (x=0; x < width; x+=16) { result = svq1_decode_block_intra (&s->gb, ¤t[x], linesize); if (result != 0) { av_log(s->avctx, AV_LOG_ERROR, \"Error in svq1_decode_block %i (keyframe)\\n\",result); goto err; } } current += 16*linesize; } } else { /* delta frame */ memset (pmv, 0, ((width / 8) + 3) * sizeof(svq1_pmv)); for (y=0; y < height; y+=16) { for (x=0; x < width; x+=16) { result = svq1_decode_delta_block (s, &s->gb, ¤t[x], previous, linesize, pmv, x, y); if (result != 0) { av_dlog(s->avctx, \"Error in svq1_decode_delta_block %i\\n\",result); goto err; } } pmv[0].x = pmv[0].y = 0; current += 16*linesize; } } } *pict = s->current_picture.f; ff_MPV_frame_end(s); *data_size=sizeof(AVFrame); result = buf_size; err: av_free(pmv); return result; }"} {"target": 1, "idx": 21290, "func": "static void ogg_free(AVFormatContext *s) { int i; for (i = 0; i < s->nb_streams; i++) { AVStream *st = s->streams[i]; OGGStreamContext *oggstream = st->priv_data; if (st->codecpar->codec_id == AV_CODEC_ID_FLAC || st->codecpar->codec_id == AV_CODEC_ID_SPEEX || st->codecpar->codec_id == AV_CODEC_ID_OPUS || st->codecpar->codec_id == AV_CODEC_ID_VP8) { av_freep(&oggstream->header[0]); } av_freep(&oggstream->header[1]); av_freep(&st->priv_data); } }"} {"target": 0, "idx": 21313, "func": "static void bdrv_drain_recurse(BlockDriverState *bs) { BdrvChild *child; if (bs->drv && bs->drv->bdrv_drain) { bs->drv->bdrv_drain(bs); } QLIST_FOREACH(child, &bs->children, next) { bdrv_drain_recurse(child->bs); } }"} {"target": 0, "idx": 21319, "func": "static void dump_qobject(fprintf_function func_fprintf, void *f, int comp_indent, QObject *obj) { switch (qobject_type(obj)) { case QTYPE_QINT: { QInt *value = qobject_to_qint(obj); func_fprintf(f, \"%\" PRId64, qint_get_int(value)); break; } case QTYPE_QSTRING: { QString *value = qobject_to_qstring(obj); func_fprintf(f, \"%s\", qstring_get_str(value)); break; } case QTYPE_QDICT: { QDict *value = qobject_to_qdict(obj); dump_qdict(func_fprintf, f, comp_indent, value); break; } case QTYPE_QLIST: { QList *value = qobject_to_qlist(obj); dump_qlist(func_fprintf, f, comp_indent, value); break; } case QTYPE_QFLOAT: { QFloat *value = qobject_to_qfloat(obj); func_fprintf(f, \"%g\", qfloat_get_double(value)); break; } case QTYPE_QBOOL: { QBool *value = qobject_to_qbool(obj); func_fprintf(f, \"%s\", qbool_get_int(value) ? \"true\" : \"false\"); break; } case QTYPE_QERROR: { QString *value = qerror_human((QError *)obj); func_fprintf(f, \"%s\", qstring_get_str(value)); QDECREF(value); break; } case QTYPE_NONE: break; case QTYPE_MAX: default: abort(); } }"} {"target": 1, "idx": 21334, "func": "vmxnet3_dump_tx_descr(struct Vmxnet3_TxDesc *descr) { VMW_PKPRN(\"TX DESCR: \" \"addr %\" PRIx64 \", len: %d, gen: %d, rsvd: %d, \" \"dtype: %d, ext1: %d, msscof: %d, hlen: %d, om: %d, \" \"eop: %d, cq: %d, ext2: %d, ti: %d, tci: %d\", le64_to_cpu(descr->addr), descr->len, descr->gen, descr->rsvd, descr->dtype, descr->ext1, descr->msscof, descr->hlen, descr->om, descr->eop, descr->cq, descr->ext2, descr->ti, descr->tci); }"} {"target": 0, "idx": 21345, "func": "build_dsdt(GArray *table_data, GArray *linker, AcpiCpuInfo *cpu, AcpiPmInfo *pm, AcpiMiscInfo *misc, PcPciInfo *pci) { CrsRangeEntry *entry; Aml *dsdt, *sb_scope, *scope, *dev, *method, *field, *pkg, *crs; GPtrArray *mem_ranges = g_ptr_array_new_with_free_func(crs_range_free); GPtrArray *io_ranges = g_ptr_array_new_with_free_func(crs_range_free); MachineState *machine = MACHINE(qdev_get_machine()); PCMachineState *pcms = PC_MACHINE(machine); uint32_t nr_mem = machine->ram_slots; int root_bus_limit = 0xFF; PCIBus *bus = NULL; int i; dsdt = init_aml_allocator(); /* Reserve space for header */ acpi_data_push(dsdt->buf, sizeof(AcpiTableHeader)); build_dbg_aml(dsdt); if (misc->is_piix4) { sb_scope = aml_scope(\"_SB\"); dev = aml_device(\"PCI0\"); aml_append(dev, aml_name_decl(\"_HID\", aml_eisaid(\"PNP0A03\"))); aml_append(dev, aml_name_decl(\"_ADR\", aml_int(0))); aml_append(dev, aml_name_decl(\"_UID\", aml_int(1))); aml_append(sb_scope, dev); aml_append(dsdt, sb_scope); build_hpet_aml(dsdt); build_piix4_pm(dsdt); build_piix4_isa_bridge(dsdt); build_isa_devices_aml(dsdt); build_piix4_pci_hotplug(dsdt); build_piix4_pci0_int(dsdt); } else { sb_scope = aml_scope(\"_SB\"); aml_append(sb_scope, aml_operation_region(\"PCST\", AML_SYSTEM_IO, 0xae00, 0x0c)); aml_append(sb_scope, aml_operation_region(\"PCSB\", AML_SYSTEM_IO, 0xae0c, 0x01)); field = aml_field(\"PCSB\", AML_ANY_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS); aml_append(field, aml_named_field(\"PCIB\", 8)); aml_append(sb_scope, field); aml_append(dsdt, sb_scope); sb_scope = aml_scope(\"_SB\"); dev = aml_device(\"PCI0\"); aml_append(dev, aml_name_decl(\"_HID\", aml_eisaid(\"PNP0A08\"))); aml_append(dev, aml_name_decl(\"_CID\", aml_eisaid(\"PNP0A03\"))); aml_append(dev, aml_name_decl(\"_ADR\", aml_int(0))); aml_append(dev, aml_name_decl(\"_UID\", aml_int(1))); aml_append(dev, aml_name_decl(\"SUPP\", aml_int(0))); aml_append(dev, aml_name_decl(\"CTRL\", aml_int(0))); aml_append(dev, build_q35_osc_method()); aml_append(sb_scope, dev); aml_append(dsdt, sb_scope); build_hpet_aml(dsdt); build_q35_isa_bridge(dsdt); build_isa_devices_aml(dsdt); build_q35_pci0_int(dsdt); } build_cpu_hotplug_aml(dsdt); build_memory_hotplug_aml(dsdt, nr_mem, pm->mem_hp_io_base, pm->mem_hp_io_len); scope = aml_scope(\"_GPE\"); { aml_append(scope, aml_name_decl(\"_HID\", aml_string(\"ACPI0006\"))); aml_append(scope, aml_method(\"_L00\", 0, AML_NOTSERIALIZED)); if (misc->is_piix4) { method = aml_method(\"_E01\", 0, AML_NOTSERIALIZED); aml_append(method, aml_acquire(aml_name(\"\\\\_SB.PCI0.BLCK\"), 0xFFFF)); aml_append(method, aml_call0(\"\\\\_SB.PCI0.PCNT\")); aml_append(method, aml_release(aml_name(\"\\\\_SB.PCI0.BLCK\"))); aml_append(scope, method); } else { aml_append(scope, aml_method(\"_L01\", 0, AML_NOTSERIALIZED)); } method = aml_method(\"_E02\", 0, AML_NOTSERIALIZED); aml_append(method, aml_call0(\"\\\\_SB.\" CPU_SCAN_METHOD)); aml_append(scope, method); method = aml_method(\"_E03\", 0, AML_NOTSERIALIZED); aml_append(method, aml_call0(MEMORY_HOTPLUG_HANDLER_PATH)); aml_append(scope, method); aml_append(scope, aml_method(\"_L04\", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method(\"_L05\", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method(\"_L06\", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method(\"_L07\", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method(\"_L08\", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method(\"_L09\", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method(\"_L0A\", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method(\"_L0B\", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method(\"_L0C\", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method(\"_L0D\", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method(\"_L0E\", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method(\"_L0F\", 0, AML_NOTSERIALIZED)); } aml_append(dsdt, scope); bus = PC_MACHINE(machine)->bus; if (bus) { QLIST_FOREACH(bus, &bus->child, sibling) { uint8_t bus_num = pci_bus_num(bus); uint8_t numa_node = pci_bus_numa_node(bus); /* look only for expander root buses */ if (!pci_bus_is_root(bus)) { continue; } if (bus_num < root_bus_limit) { root_bus_limit = bus_num - 1; } scope = aml_scope(\"\\\\_SB\"); dev = aml_device(\"PC%.02X\", bus_num); aml_append(dev, aml_name_decl(\"_UID\", aml_int(bus_num))); aml_append(dev, aml_name_decl(\"_HID\", aml_eisaid(\"PNP0A03\"))); aml_append(dev, aml_name_decl(\"_BBN\", aml_int(bus_num))); if (numa_node != NUMA_NODE_UNASSIGNED) { aml_append(dev, aml_name_decl(\"_PXM\", aml_int(numa_node))); } aml_append(dev, build_prt(false)); crs = build_crs(PCI_HOST_BRIDGE(BUS(bus)->parent), io_ranges, mem_ranges); aml_append(dev, aml_name_decl(\"_CRS\", crs)); aml_append(scope, dev); aml_append(dsdt, scope); } } scope = aml_scope(\"\\\\_SB.PCI0\"); /* build PCI0._CRS */ crs = aml_resource_template(); aml_append(crs, aml_word_bus_number(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, 0x0000, 0x0, root_bus_limit, 0x0000, root_bus_limit + 1)); aml_append(crs, aml_io(AML_DECODE16, 0x0CF8, 0x0CF8, 0x01, 0x08)); aml_append(crs, aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, AML_ENTIRE_RANGE, 0x0000, 0x0000, 0x0CF7, 0x0000, 0x0CF8)); crs_replace_with_free_ranges(io_ranges, 0x0D00, 0xFFFF); for (i = 0; i < io_ranges->len; i++) { entry = g_ptr_array_index(io_ranges, i); aml_append(crs, aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, AML_ENTIRE_RANGE, 0x0000, entry->base, entry->limit, 0x0000, entry->limit - entry->base + 1)); } aml_append(crs, aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_CACHEABLE, AML_READ_WRITE, 0, 0x000A0000, 0x000BFFFF, 0, 0x00020000)); crs_replace_with_free_ranges(mem_ranges, pci->w32.begin, pci->w32.end - 1); for (i = 0; i < mem_ranges->len; i++) { entry = g_ptr_array_index(mem_ranges, i); aml_append(crs, aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_NON_CACHEABLE, AML_READ_WRITE, 0, entry->base, entry->limit, 0, entry->limit - entry->base + 1)); } if (pci->w64.begin) { aml_append(crs, aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_CACHEABLE, AML_READ_WRITE, 0, pci->w64.begin, pci->w64.end - 1, 0, pci->w64.end - pci->w64.begin)); } aml_append(scope, aml_name_decl(\"_CRS\", crs)); /* reserve GPE0 block resources */ dev = aml_device(\"GPE0\"); aml_append(dev, aml_name_decl(\"_HID\", aml_string(\"PNP0A06\"))); aml_append(dev, aml_name_decl(\"_UID\", aml_string(\"GPE0 resources\"))); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl(\"_STA\", aml_int(0xB))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, pm->gpe0_blk, pm->gpe0_blk, 1, pm->gpe0_blk_len) ); aml_append(dev, aml_name_decl(\"_CRS\", crs)); aml_append(scope, dev); g_ptr_array_free(io_ranges, true); g_ptr_array_free(mem_ranges, true); /* reserve PCIHP resources */ if (pm->pcihp_io_len) { dev = aml_device(\"PHPR\"); aml_append(dev, aml_name_decl(\"_HID\", aml_string(\"PNP0A06\"))); aml_append(dev, aml_name_decl(\"_UID\", aml_string(\"PCI Hotplug resources\"))); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl(\"_STA\", aml_int(0xB))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, pm->pcihp_io_base, pm->pcihp_io_base, 1, pm->pcihp_io_len) ); aml_append(dev, aml_name_decl(\"_CRS\", crs)); aml_append(scope, dev); } aml_append(dsdt, scope); /* create S3_ / S4_ / S5_ packages if necessary */ scope = aml_scope(\"\\\\\"); if (!pm->s3_disabled) { pkg = aml_package(4); aml_append(pkg, aml_int(1)); /* PM1a_CNT.SLP_TYP */ aml_append(pkg, aml_int(1)); /* PM1b_CNT.SLP_TYP, FIXME: not impl. */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(scope, aml_name_decl(\"_S3\", pkg)); } if (!pm->s4_disabled) { pkg = aml_package(4); aml_append(pkg, aml_int(pm->s4_val)); /* PM1a_CNT.SLP_TYP */ /* PM1b_CNT.SLP_TYP, FIXME: not impl. */ aml_append(pkg, aml_int(pm->s4_val)); aml_append(pkg, aml_int(0)); /* reserved */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(scope, aml_name_decl(\"_S4\", pkg)); } pkg = aml_package(4); aml_append(pkg, aml_int(0)); /* PM1a_CNT.SLP_TYP */ aml_append(pkg, aml_int(0)); /* PM1b_CNT.SLP_TYP not impl. */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(scope, aml_name_decl(\"_S5\", pkg)); aml_append(dsdt, scope); if (misc->applesmc_io_base) { scope = aml_scope(\"\\\\_SB.PCI0.ISA\"); dev = aml_device(\"SMC\"); aml_append(dev, aml_name_decl(\"_HID\", aml_eisaid(\"APP0001\"))); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl(\"_STA\", aml_int(0xB))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, misc->applesmc_io_base, misc->applesmc_io_base, 0x01, APPLESMC_MAX_DATA_LENGTH) ); aml_append(crs, aml_irq_no_flags(6)); aml_append(dev, aml_name_decl(\"_CRS\", crs)); aml_append(scope, dev); aml_append(dsdt, scope); } if (misc->pvpanic_port) { scope = aml_scope(\"\\\\_SB.PCI0.ISA\"); dev = aml_device(\"PEVT\"); aml_append(dev, aml_name_decl(\"_HID\", aml_string(\"QEMU0001\"))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, misc->pvpanic_port, misc->pvpanic_port, 1, 1) ); aml_append(dev, aml_name_decl(\"_CRS\", crs)); aml_append(dev, aml_operation_region(\"PEOR\", AML_SYSTEM_IO, misc->pvpanic_port, 1)); field = aml_field(\"PEOR\", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE); aml_append(field, aml_named_field(\"PEPT\", 8)); aml_append(dev, field); /* device present, functioning, decoding, shown in UI */ aml_append(dev, aml_name_decl(\"_STA\", aml_int(0xF))); method = aml_method(\"RDPT\", 0, AML_NOTSERIALIZED); aml_append(method, aml_store(aml_name(\"PEPT\"), aml_local(0))); aml_append(method, aml_return(aml_local(0))); aml_append(dev, method); method = aml_method(\"WRPT\", 1, AML_NOTSERIALIZED); aml_append(method, aml_store(aml_arg(0), aml_name(\"PEPT\"))); aml_append(dev, method); aml_append(scope, dev); aml_append(dsdt, scope); } sb_scope = aml_scope(\"\\\\_SB\"); { build_processor_devices(sb_scope, pcms->apic_id_limit, cpu, pm); build_memory_devices(sb_scope, nr_mem, pm->mem_hp_io_base, pm->mem_hp_io_len); { Object *pci_host; PCIBus *bus = NULL; pci_host = acpi_get_i386_pci_host(); if (pci_host) { bus = PCI_HOST_BRIDGE(pci_host)->bus; } if (bus) { Aml *scope = aml_scope(\"PCI0\"); /* Scan all PCI buses. Generate tables to support hotplug. */ build_append_pci_bus_devices(scope, bus, pm->pcihp_bridge_en); if (misc->tpm_version != TPM_VERSION_UNSPEC) { dev = aml_device(\"ISA.TPM\"); aml_append(dev, aml_name_decl(\"_HID\", aml_eisaid(\"PNP0C31\"))); aml_append(dev, aml_name_decl(\"_STA\", aml_int(0xF))); crs = aml_resource_template(); aml_append(crs, aml_memory32_fixed(TPM_TIS_ADDR_BASE, TPM_TIS_ADDR_SIZE, AML_READ_WRITE)); aml_append(crs, aml_irq_no_flags(TPM_TIS_IRQ)); aml_append(dev, aml_name_decl(\"_CRS\", crs)); aml_append(scope, dev); } aml_append(sb_scope, scope); } } aml_append(dsdt, sb_scope); } /* copy AML table into ACPI tables blob and patch header there */ g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len); build_header(linker, table_data, (void *)(table_data->data + table_data->len - dsdt->buf->len), \"DSDT\", dsdt->buf->len, 1, NULL); free_aml_allocator(); }"} {"target": 0, "idx": 21369, "func": "INLINE bits64 extractFloat64Frac( float64 a ) { return a & LIT64( 0x000FFFFFFFFFFFFF ); }"} {"target": 1, "idx": 21372, "func": "setup_sigcontext(struct target_sigcontext *sc, struct target_fpstate *fpstate, CPUX86State *env, unsigned long mask) { int err = 0; err |= __put_user(env->segs[R_GS], (unsigned int *)&sc->gs); err |= __put_user(env->segs[R_FS], (unsigned int *)&sc->fs); err |= __put_user(env->segs[R_ES], (unsigned int *)&sc->es); err |= __put_user(env->segs[R_DS], (unsigned int *)&sc->ds); err |= __put_user(env->regs[R_EDI], &sc->edi); err |= __put_user(env->regs[R_ESI], &sc->esi); err |= __put_user(env->regs[R_EBP], &sc->ebp); err |= __put_user(env->regs[R_ESP], &sc->esp); err |= __put_user(env->regs[R_EBX], &sc->ebx); err |= __put_user(env->regs[R_EDX], &sc->edx); err |= __put_user(env->regs[R_ECX], &sc->ecx); err |= __put_user(env->regs[R_EAX], &sc->eax); err |= __put_user(env->exception_index, &sc->trapno); err |= __put_user(env->error_code, &sc->err); err |= __put_user(env->eip, &sc->eip); err |= __put_user(env->segs[R_CS], (unsigned int *)&sc->cs); err |= __put_user(env->eflags, &sc->eflags); err |= __put_user(env->regs[R_ESP], &sc->esp_at_signal); err |= __put_user(env->segs[R_SS], (unsigned int *)&sc->ss); #if 0 tmp = save_i387(fpstate); if (tmp < 0) err = 1; else err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate); #else err |= __put_user(0, &sc->fpstate); #endif /* non-iBCS2 extensions.. */ err |= __put_user(mask, &sc->oldmask); err |= __put_user(/*current->thread.cr2*/ 0, &sc->cr2); return err; }"} {"target": 1, "idx": 21389, "func": "static void esp_command_complete(SCSIBus *bus, int reason, uint32_t tag, uint32_t arg) { ESPState *s = DO_UPCAST(ESPState, busdev.qdev, bus->qbus.parent); if (reason == SCSI_REASON_DONE) { DPRINTF(\"SCSI Command complete\\n\"); if (s->ti_size != 0) DPRINTF(\"SCSI command completed unexpectedly\\n\"); s->ti_size = 0; s->dma_left = 0; s->async_len = 0; if (arg) DPRINTF(\"Command failed\\n\"); s->sense = arg; s->rregs[ESP_RSTAT] = STAT_ST; esp_dma_done(s); s->current_dev = NULL; } else { DPRINTF(\"transfer %d/%d\\n\", s->dma_left, s->ti_size); s->async_len = arg; s->async_buf = s->current_dev->info->get_buf(s->current_dev, 0); if (s->dma_left) { esp_do_dma(s); } else if (s->dma_counter != 0 && s->ti_size <= 0) { /* If this was the last part of a DMA transfer then the completion interrupt is deferred to here. */ esp_dma_done(s); } } }"} {"target": 1, "idx": 21390, "func": "static inline CopyRet receive_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t second_field) { BC_STATUS ret; BC_DTS_PROC_OUT output = { .PicInfo.width = avctx->width, .PicInfo.height = avctx->height, }; CHDContext *priv = avctx->priv_data; HANDLE dev = priv->dev; *data_size = 0; // Request decoded data from the driver ret = DtsProcOutputNoCopy(dev, OUTPUT_PROC_TIMEOUT, &output); if (ret == BC_STS_FMT_CHANGE) { av_log(avctx, AV_LOG_VERBOSE, \"CrystalHD: Initial format change\\n\"); avctx->width = output.PicInfo.width; avctx->height = output.PicInfo.height; return RET_COPY_AGAIN; } else if (ret == BC_STS_SUCCESS) { int copy_ret = -1; if (output.PoutFlags & BC_POUT_FLAGS_PIB_VALID) { if (priv->last_picture == -1) { /* * Init to one less, so that the incrementing code doesn't * need to be special-cased. */ priv->last_picture = output.PicInfo.picture_number - 1; } if (avctx->codec->id == CODEC_ID_MPEG4 && output.PicInfo.timeStamp == 0) { av_log(avctx, AV_LOG_VERBOSE, \"CrystalHD: Not returning packed frame twice.\\n\"); priv->last_picture++; DtsReleaseOutputBuffs(dev, NULL, FALSE); return RET_COPY_AGAIN; } print_frame_info(priv, &output); if (priv->last_picture + 1 < output.PicInfo.picture_number) { av_log(avctx, AV_LOG_WARNING, \"CrystalHD: Picture Number discontinuity\\n\"); /* * Have we lost frames? If so, we need to shrink the * pipeline length appropriately. * * XXX: I have no idea what the semantics of this situation * are so I don't even know if we've lost frames or which * ones. * * In any case, only warn the first time. */ priv->last_picture = output.PicInfo.picture_number - 1; } copy_ret = copy_frame(avctx, &output, data, data_size, second_field); if (*data_size > 0) { avctx->has_b_frames--; priv->last_picture++; av_log(avctx, AV_LOG_VERBOSE, \"CrystalHD: Pipeline length: %u\\n\", avctx->has_b_frames); } } else { /* * An invalid frame has been consumed. */ av_log(avctx, AV_LOG_ERROR, \"CrystalHD: ProcOutput succeeded with \" \"invalid PIB\\n\"); avctx->has_b_frames--; copy_ret = RET_OK; } DtsReleaseOutputBuffs(dev, NULL, FALSE); return copy_ret; } else if (ret == BC_STS_BUSY) { return RET_COPY_AGAIN; } else { av_log(avctx, AV_LOG_ERROR, \"CrystalHD: ProcOutput failed %d\\n\", ret); return RET_ERROR; } }"} {"target": 0, "idx": 21399, "func": "static int encode_frame(AVCodecContext *avctx, QSVEncContext *q, const AVFrame *frame) { AVPacket new_pkt = { 0 }; mfxBitstream *bs; mfxFrameSurface1 *surf = NULL; mfxSyncPoint sync = NULL; int ret; if (frame) { ret = submit_frame(q, frame, &surf); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, \"Error submitting the frame for encoding.\\n\"); return ret; } } ret = av_new_packet(&new_pkt, q->packet_size); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, \"Error allocating the output packet\\n\"); return ret; } bs = av_mallocz(sizeof(*bs)); if (!bs) { av_packet_unref(&new_pkt); return AVERROR(ENOMEM); } bs->Data = new_pkt.data; bs->MaxLength = new_pkt.size; do { ret = MFXVideoENCODE_EncodeFrameAsync(q->session, NULL, surf, bs, &sync); if (ret == MFX_WRN_DEVICE_BUSY) av_usleep(1); } while (ret > 0); if (ret < 0) { av_packet_unref(&new_pkt); av_freep(&bs); return (ret == MFX_ERR_MORE_DATA) ? 0 : ff_qsv_error(ret); } if (ret == MFX_WRN_INCOMPATIBLE_VIDEO_PARAM && frame->interlaced_frame) print_interlace_msg(avctx, q); if (sync) { av_fifo_generic_write(q->async_fifo, &new_pkt, sizeof(new_pkt), NULL); av_fifo_generic_write(q->async_fifo, &sync, sizeof(sync), NULL); av_fifo_generic_write(q->async_fifo, &bs, sizeof(bs), NULL); } else { av_packet_unref(&new_pkt); av_freep(&bs); } return 0; }"} {"target": 1, "idx": 21407, "func": "static void ehci_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); EHCIPCIInfo *i = data; k->init = usb_ehci_pci_initfn; k->vendor_id = i->vendor_id; k->device_id = i->device_id; k->revision = i->revision; k->class_id = PCI_CLASS_SERIAL_USB; k->config_write = usb_ehci_pci_write_config; dc->vmsd = &vmstate_ehci_pci; dc->props = ehci_pci_properties; }"} {"target": 0, "idx": 21417, "func": "static void curses_refresh(DisplayChangeListener *dcl) { int chr, nextchr, keysym, keycode, keycode_alt; curses_winch_check(); if (invalidate) { clear(); refresh(); curses_calc_pad(); graphic_hw_invalidate(NULL); invalidate = 0; } graphic_hw_text_update(NULL, screen); nextchr = ERR; while (1) { /* while there are any pending key strokes to process */ if (nextchr == ERR) chr = getch(); else { chr = nextchr; nextchr = ERR; } if (chr == ERR) break; #ifdef KEY_RESIZE /* this shouldn't occur when we use a custom SIGWINCH handler */ if (chr == KEY_RESIZE) { clear(); refresh(); curses_calc_pad(); curses_update(dcl, 0, 0, width, height); continue; } #endif keycode = curses2keycode[chr]; keycode_alt = 0; /* alt key */ if (keycode == 1) { nextchr = getch(); if (nextchr != ERR) { chr = nextchr; keycode_alt = ALT; keycode = curses2keycode[nextchr]; nextchr = ERR; if (keycode != -1) { keycode |= ALT; /* process keys reserved for qemu */ if (keycode >= QEMU_KEY_CONSOLE0 && keycode < QEMU_KEY_CONSOLE0 + 9) { erase(); wnoutrefresh(stdscr); console_select(keycode - QEMU_KEY_CONSOLE0); invalidate = 1; continue; } } } } if (kbd_layout) { keysym = -1; if (chr < CURSES_KEYS) keysym = curses2keysym[chr]; if (keysym == -1) { if (chr < ' ') { keysym = chr + '@'; if (keysym >= 'A' && keysym <= 'Z') keysym += 'a' - 'A'; keysym |= KEYSYM_CNTRL; } else keysym = chr; } keycode = keysym2scancode(kbd_layout, keysym & KEYSYM_MASK); if (keycode == 0) continue; keycode |= (keysym & ~KEYSYM_MASK) >> 16; keycode |= keycode_alt; } if (keycode == -1) continue; if (qemu_console_is_graphic(NULL)) { /* since terminals don't know about key press and release * events, we need to emit both for each key received */ if (keycode & SHIFT) { qemu_input_event_send_key_number(NULL, SHIFT_CODE, true); qemu_input_event_send_key_delay(0); } if (keycode & CNTRL) { qemu_input_event_send_key_number(NULL, CNTRL_CODE, true); qemu_input_event_send_key_delay(0); } if (keycode & ALT) { qemu_input_event_send_key_number(NULL, ALT_CODE, true); qemu_input_event_send_key_delay(0); } if (keycode & ALTGR) { qemu_input_event_send_key_number(NULL, GREY | ALT_CODE, true); qemu_input_event_send_key_delay(0); } qemu_input_event_send_key_number(NULL, keycode & KEY_MASK, true); qemu_input_event_send_key_delay(0); qemu_input_event_send_key_number(NULL, keycode & KEY_MASK, false); qemu_input_event_send_key_delay(0); if (keycode & ALTGR) { qemu_input_event_send_key_number(NULL, GREY | ALT_CODE, false); qemu_input_event_send_key_delay(0); } if (keycode & ALT) { qemu_input_event_send_key_number(NULL, ALT_CODE, false); qemu_input_event_send_key_delay(0); } if (keycode & CNTRL) { qemu_input_event_send_key_number(NULL, CNTRL_CODE, false); qemu_input_event_send_key_delay(0); } if (keycode & SHIFT) { qemu_input_event_send_key_number(NULL, SHIFT_CODE, false); qemu_input_event_send_key_delay(0); } } else { keysym = -1; if (chr < CURSES_KEYS) { keysym = curses2qemu[chr]; } if (keysym == -1) keysym = chr; kbd_put_keysym(keysym); } } }"} {"target": 0, "idx": 21434, "func": "static ssize_t proxy_lgetxattr(FsContext *ctx, V9fsPath *fs_path, const char *name, void *value, size_t size) { int retval; V9fsString xname; v9fs_string_init(&xname); v9fs_string_sprintf(&xname, \"%s\", name); retval = v9fs_request(ctx->private, T_LGETXATTR, value, \"dss\", size, fs_path, &xname); v9fs_string_free(&xname); if (retval < 0) { errno = -retval; } return retval; }"} {"target": 0, "idx": 21436, "func": "static void cadence_timer_sync(CadenceTimerState *s) { int i; int64_t r, x; int64_t interval = ((s->reg_count & COUNTER_CTRL_INT) ? (int64_t)s->reg_interval + 1 : 0x10000ULL) << 16; uint64_t old_time = s->cpu_time; s->cpu_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); DB_PRINT(\"cpu time: %lld ns\\n\", (long long)old_time); if (!s->cpu_time_valid || old_time == s->cpu_time) { s->cpu_time_valid = 1; return; } r = (int64_t)cadence_timer_get_steps(s, s->cpu_time - old_time); x = (int64_t)s->reg_value + ((s->reg_count & COUNTER_CTRL_DEC) ? -r : r); for (i = 0; i < 3; ++i) { int64_t m = (int64_t)s->reg_match[i] << 16; if (m > interval) { continue; } /* check to see if match event has occurred. check m +/- interval * to account for match events in wrap around cases */ if (is_between(m, s->reg_value, x) || is_between(m + interval, s->reg_value, x) || is_between(m - interval, s->reg_value, x)) { s->reg_intr |= (2 << i); } } while (x < 0) { x += interval; } s->reg_value = (uint32_t)(x % interval); if (s->reg_value != x) { s->reg_intr |= (s->reg_count & COUNTER_CTRL_INT) ? COUNTER_INTR_IV : COUNTER_INTR_OV; } cadence_timer_update(s); }"} {"target": 0, "idx": 21440, "func": "static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1, int32_t arg2, int const_arg2, int label) { /* For 64-bit signed comparisons vs zero, we can avoid the compare. */ if (arg2 == 0 && !is_unsigned_cond(cond)) { TCGLabel *l = &s->labels[label]; int off16; if (l->has_value) { off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr)); } else { /* Make sure to preserve destinations during retranslation. */ off16 = *s->code_ptr & INSN_OFF16(-1); tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, label, 0); } tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1) | INSN_COND(tcg_cond_to_rcond[cond]) | off16); } else { tcg_out_cmp(s, arg1, arg2, const_arg2); tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, label); } tcg_out_nop(s); }"} {"target": 0, "idx": 21453, "func": "int pci_bridge_initfn(PCIDevice *dev) { PCIBus *parent = dev->bus; PCIBridge *br = DO_UPCAST(PCIBridge, dev, dev); PCIBus *sec_bus = &br->sec_bus; pci_set_word(dev->config + PCI_STATUS, PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK); pci_config_set_class(dev->config, PCI_CLASS_BRIDGE_PCI); dev->config[PCI_HEADER_TYPE] = (dev->config[PCI_HEADER_TYPE] & PCI_HEADER_TYPE_MULTI_FUNCTION) | PCI_HEADER_TYPE_BRIDGE; pci_set_word(dev->config + PCI_SEC_STATUS, PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK); qbus_create_inplace(&sec_bus->qbus, &pci_bus_info, &dev->qdev, br->bus_name); sec_bus->parent_dev = dev; sec_bus->map_irq = br->map_irq; sec_bus->address_space_mem = &br->address_space_mem; memory_region_init(&br->address_space_mem, \"pci_bridge_pci\", INT64_MAX); sec_bus->address_space_io = &br->address_space_io; memory_region_init(&br->address_space_io, \"pci_bridge_io\", 65536); pci_bridge_region_init(br); QLIST_INIT(&sec_bus->child); QLIST_INSERT_HEAD(&parent->child, sec_bus, sibling); return 0; }"} {"target": 1, "idx": 21468, "func": "static void clear_sdr_rep(IPMIBmcSim *ibs, uint8_t *cmd, unsigned int cmd_len, uint8_t *rsp, unsigned int *rsp_len, unsigned int max_rsp_len) { IPMI_CHECK_CMD_LEN(8); IPMI_CHECK_RESERVATION(2, ibs->sdr.reservation); if (cmd[4] != 'C' || cmd[5] != 'L' || cmd[6] != 'R') { rsp[2] = IPMI_CC_INVALID_DATA_FIELD; return; } if (cmd[7] == 0xaa) { ibs->sdr.next_free = 0; ibs->sdr.overflow = 0; set_timestamp(ibs, ibs->sdr.last_clear); IPMI_ADD_RSP_DATA(1); /* Erasure complete */ sdr_inc_reservation(&ibs->sdr); } else if (cmd[7] == 0) { IPMI_ADD_RSP_DATA(1); /* Erasure complete */ } else { rsp[2] = IPMI_CC_INVALID_DATA_FIELD; return; } }"} {"target": 1, "idx": 21470, "func": "static inline void RENAME(initFilter)(int16_t *dstFilter, int16_t *filterPos, int *filterSize, int xInc, int srcW, int dstW, int filterAlign, int one) { int i; double filter[8000]; #ifdef HAVE_MMX asm volatile(\"emms\\n\\t\"::: \"memory\"); //FIXME this shouldnt be required but it IS (even for non mmx versions) #endif if(ABS(xInc - 0x10000) <10) // unscaled { int i; *filterSize= (1 +(filterAlign-1)) & (~(filterAlign-1)); // 1 or 4 normaly for(i=0; i>16) - (*filterSize>>1) + 1; int j; filterPos[i]= xx; if(sws_flags == SWS_BICUBIC) { double d= ABS(((xx+1)<<16) - xDstInSrc)/(double)(1<<16); double y1,y2,y3,y4; double A= -0.75; // Equation is from VirtualDub y1 = ( + A*d - 2.0*A*d*d + A*d*d*d); y2 = (+ 1.0 - (A+3.0)*d*d + (A+2.0)*d*d*d); y3 = ( - A*d + (2.0*A+3.0)*d*d - (A+2.0)*d*d*d); y4 = ( + A*d*d - A*d*d*d); // printf(\"%d %d %d \\n\", coeff, (int)d, xDstInSrc); filter[i*(*filterSize) + 0]= y1; filter[i*(*filterSize) + 1]= y2; filter[i*(*filterSize) + 2]= y3; filter[i*(*filterSize) + 3]= y4; // printf(\"%1.3f %d, %d, %d, %d\\n\",d , y1, y2, y3, y4); } else { for(j=0; j<*filterSize; j++) { double d= ABS((xx<<16) - xDstInSrc)/(double)(1<<16); double coeff= 1.0 - d; if(coeff<0) coeff=0; // printf(\"%d %d %d \\n\", coeff, (int)d, xDstInSrc); filter[i*(*filterSize) + j]= coeff; xx++; } } xDstInSrc+= xInc; } } else // downscale { int xDstInSrc; if(sws_flags==SWS_BICUBIC) *filterSize= (int)ceil(1 + 4.0*srcW / (double)dstW); else *filterSize= (int)ceil(1 + 2.0*srcW / (double)dstW); // printf(\"%d %d %d\\n\", *filterSize, srcW, dstW); *filterSize= (*filterSize +(filterAlign-1)) & (~(filterAlign-1)); xDstInSrc= xInc/2 - 0x8000; for(i=0; i srcW) { int shift= filterPos[i] + (*filterSize) - srcW; // Move filter coeffs right to compensate for filterPos for(j=(*filterSize)-2; j>=0; j--) { int right= MIN(j + shift, (*filterSize)-1); filter[i*(*filterSize) +right] += filter[i*(*filterSize) +j]; filter[i*(*filterSize) +j]=0; } filterPos[i]= srcW - (*filterSize); } } //FIXME try to align filterpos if possible / try to shift filterpos to put zeros at the end // and skip these than later //Normalize for(i=0; iu.file; ChardevCommon *common = qapi_ChardevFile_base(file); HANDLE out; if (file->has_in) { error_setg(errp, \"input file not supported\"); return NULL; } out = CreateFile(file->out, GENERIC_WRITE, FILE_SHARE_READ, NULL, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); if (out == INVALID_HANDLE_VALUE) { error_setg(errp, \"open %s failed\", file->out); return NULL; } return qemu_chr_open_win_file(out, common, errp); }"} {"target": 0, "idx": 21531, "func": "void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event) { if (!bs || !bs->drv || !bs->drv->bdrv_debug_event) { return; } bs->drv->bdrv_debug_event(bs, event); }"} {"target": 0, "idx": 21555, "func": "static bool pc_machine_get_aligned_dimm(Object *obj, Error **errp) { PCMachineState *pcms = PC_MACHINE(obj); return pcms->enforce_aligned_dimm; }"} {"target": 0, "idx": 21561, "func": "static int refresh_total_sectors(BlockDriverState *bs, int64_t hint) { BlockDriver *drv = bs->drv; /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */ if (bs->sg) return 0; /* query actual device if possible, otherwise just trust the hint */ if (drv->bdrv_getlength) { int64_t length = drv->bdrv_getlength(bs); if (length < 0) { return length; } hint = DIV_ROUND_UP(length, BDRV_SECTOR_SIZE); } bs->total_sectors = hint; return 0; }"} {"target": 1, "idx": 21564, "func": "BlockStatsList *qmp_query_blockstats(Error **errp) { BlockStatsList *head = NULL, **p_next = &head; BlockDriverState *bs = NULL; while ((bs = bdrv_next(bs))) { BlockStatsList *info = g_malloc0(sizeof(*info)); info->value = bdrv_query_stats(bs); *p_next = info; p_next = &info->next; } return head; }"} {"target": 0, "idx": 21586, "func": "static void opt_pad_color(const char *arg) { /* Input is expected to be six hex digits similar to how colors are expressed in html tags (but without the #) */ int rgb = strtol(arg, NULL, 16); int r,g,b; r = (rgb >> 16); g = ((rgb >> 8) & 255); b = (rgb & 255); padcolor[0] = RGB_TO_Y(r,g,b); padcolor[1] = RGB_TO_U(r,g,b,0); padcolor[2] = RGB_TO_V(r,g,b,0); }"} {"target": 1, "idx": 21587, "func": "static void gen_spr_74xx (CPUPPCState *env) { /* Processor identification */ spr_register(env, SPR_PIR, \"PIR\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_pir, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MMCR2, \"MMCR2\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_UMMCR2, \"UMMCR2\", &spr_read_ureg, SPR_NOACCESS, &spr_read_ureg, SPR_NOACCESS, 0x00000000); /* XXX: not implemented */ spr_register(env, SPR_BAMR, \"BAMR\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MSSCR0, \"MSSCR0\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, \"HID0\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, \"HID1\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Altivec */ spr_register(env, SPR_VRSAVE, \"VRSAVE\", &spr_read_generic, &spr_write_generic, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_L2CR, \"L2CR\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, NULL, 0x00000000); /* Not strictly an SPR */ vscr_init(env, 0x00010000); }"} {"target": 1, "idx": 21589, "func": "static int read_channels(AVFilterContext *ctx, int channels, uint8_t *item_str, int *nb, double **c, double **cache) { char *p, *arg, *old_str, *prev_arg = NULL, *saveptr = NULL; int i, ret; p = old_str = av_strdup(item_str); if (!p) return AVERROR(ENOMEM); for (i = 0; i < channels; i++) { if (!(arg = av_strtok(p, \"|\", &saveptr))) arg = prev_arg; p = NULL; count_coefficients(arg, &nb[i]); cache[i] = av_calloc(nb[i], sizeof(cache[i])); c[i] = av_calloc(nb[i], sizeof(c[i])); if (!c[i] || !cache[i]) return AVERROR(ENOMEM); ret = read_coefficients(ctx, arg, nb[i], c[i]); if (ret < 0) return ret; prev_arg = arg; } av_freep(&old_str); return 0; }"} {"target": 0, "idx": 21601, "func": "av_cold void ff_ac3dsp_init_x86(AC3DSPContext *c, int bit_exact) { #if HAVE_YASM int mm_flags = av_get_cpu_flags(); if (mm_flags & AV_CPU_FLAG_MMX) { c->ac3_exponent_min = ff_ac3_exponent_min_mmx; c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_mmx; c->ac3_lshift_int16 = ff_ac3_lshift_int16_mmx; c->ac3_rshift_int32 = ff_ac3_rshift_int32_mmx; } if (mm_flags & AV_CPU_FLAG_3DNOW && HAVE_AMD3DNOW) { c->extract_exponents = ff_ac3_extract_exponents_3dnow; if (!bit_exact) { c->float_to_fixed24 = ff_float_to_fixed24_3dnow; } } if (mm_flags & AV_CPU_FLAG_MMX2 && HAVE_MMX2) { c->ac3_exponent_min = ff_ac3_exponent_min_mmxext; c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_mmxext; } if (mm_flags & AV_CPU_FLAG_SSE && HAVE_SSE) { c->float_to_fixed24 = ff_float_to_fixed24_sse; } if (mm_flags & AV_CPU_FLAG_SSE2 && HAVE_SSE) { c->ac3_exponent_min = ff_ac3_exponent_min_sse2; c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_sse2; c->float_to_fixed24 = ff_float_to_fixed24_sse2; c->compute_mantissa_size = ff_ac3_compute_mantissa_size_sse2; c->extract_exponents = ff_ac3_extract_exponents_sse2; if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) { c->ac3_lshift_int16 = ff_ac3_lshift_int16_sse2; c->ac3_rshift_int32 = ff_ac3_rshift_int32_sse2; } } if (mm_flags & AV_CPU_FLAG_SSSE3 && HAVE_SSSE3) { c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_ssse3; if (!(mm_flags & AV_CPU_FLAG_ATOM)) { c->extract_exponents = ff_ac3_extract_exponents_ssse3; } } #endif }"} {"target": 0, "idx": 21607, "func": "static int xan_decode_end(AVCodecContext *avctx) { XanContext *s = avctx->priv_data; /* release the last frame */ avctx->release_buffer(avctx, &s->last_frame); av_free(s->buffer1); av_free(s->buffer2); return 0; }"} {"target": 0, "idx": 21619, "func": "static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp, int flags, int send) { abi_long ret, len; struct msghdr msg; int count; struct iovec *vec; abi_ulong target_vec; if (msgp->msg_name) { msg.msg_namelen = tswap32(msgp->msg_namelen); msg.msg_name = alloca(msg.msg_namelen+1); ret = target_to_host_sockaddr(fd, msg.msg_name, tswapal(msgp->msg_name), msg.msg_namelen); if (ret) { goto out2; } } else { msg.msg_name = NULL; msg.msg_namelen = 0; } msg.msg_controllen = 2 * tswapal(msgp->msg_controllen); msg.msg_control = alloca(msg.msg_controllen); msg.msg_flags = tswap32(msgp->msg_flags); count = tswapal(msgp->msg_iovlen); target_vec = tswapal(msgp->msg_iov); vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, target_vec, count, send); if (vec == NULL) { ret = -host_to_target_errno(errno); goto out2; } msg.msg_iovlen = count; msg.msg_iov = vec; if (send) { ret = target_to_host_cmsg(&msg, msgp); if (ret == 0) ret = get_errno(sendmsg(fd, &msg, flags)); } else { ret = get_errno(recvmsg(fd, &msg, flags)); if (!is_error(ret)) { len = ret; ret = host_to_target_cmsg(msgp, &msg); if (!is_error(ret)) { msgp->msg_namelen = tswap32(msg.msg_namelen); if (msg.msg_name != NULL) { ret = host_to_target_sockaddr(tswapal(msgp->msg_name), msg.msg_name, msg.msg_namelen); if (ret) { goto out; } } ret = len; } } } out: unlock_iovec(vec, target_vec, count, !send); out2: return ret; }"} {"target": 0, "idx": 21621, "func": "static void omap_clkm_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) opaque; uint16_t diff; omap_clk clk; static const char *clkschemename[8] = { \"fully synchronous\", \"fully asynchronous\", \"synchronous scalable\", \"mix mode 1\", \"mix mode 2\", \"bypass mode\", \"mix mode 3\", \"mix mode 4\", }; if (size != 2) { omap_badwidth_write16(opaque, addr, value); return; } switch (addr) { case 0x00: /* ARM_CKCTL */ diff = s->clkm.arm_ckctl ^ value; s->clkm.arm_ckctl = value & 0x7fff; omap_clkm_ckctl_update(s, diff, value); return; case 0x04: /* ARM_IDLECT1 */ diff = s->clkm.arm_idlect1 ^ value; s->clkm.arm_idlect1 = value & 0x0fff; omap_clkm_idlect1_update(s, diff, value); return; case 0x08: /* ARM_IDLECT2 */ diff = s->clkm.arm_idlect2 ^ value; s->clkm.arm_idlect2 = value & 0x07ff; omap_clkm_idlect2_update(s, diff, value); return; case 0x0c: /* ARM_EWUPCT */ s->clkm.arm_ewupct = value & 0x003f; return; case 0x10: /* ARM_RSTCT1 */ diff = s->clkm.arm_rstct1 ^ value; s->clkm.arm_rstct1 = value & 0x0007; if (value & 9) { qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); s->clkm.cold_start = 0xa; } if (diff & ~value & 4) { /* DSP_RST */ omap_mpui_reset(s); omap_tipb_bridge_reset(s->private_tipb); omap_tipb_bridge_reset(s->public_tipb); } if (diff & 2) { /* DSP_EN */ clk = omap_findclk(s, \"dsp_ck\"); omap_clk_canidle(clk, (~value >> 1) & 1); } return; case 0x14: /* ARM_RSTCT2 */ s->clkm.arm_rstct2 = value & 0x0001; return; case 0x18: /* ARM_SYSST */ if ((s->clkm.clocking_scheme ^ (value >> 11)) & 7) { s->clkm.clocking_scheme = (value >> 11) & 7; printf(\"%s: clocking scheme set to %s\\n\", __FUNCTION__, clkschemename[s->clkm.clocking_scheme]); } s->clkm.cold_start &= value & 0x3f; return; case 0x1c: /* ARM_CKOUT1 */ diff = s->clkm.arm_ckout1 ^ value; s->clkm.arm_ckout1 = value & 0x003f; omap_clkm_ckout1_update(s, diff, value); return; case 0x20: /* ARM_CKOUT2 */ default: OMAP_BAD_REG(addr); } }"} {"target": 0, "idx": 21627, "func": "static void ppce500_reset_device_tree(void *opaque) { DeviceTreeParams *p = opaque; ppce500_load_device_tree(p->machine, &p->params, p->addr, p->initrd_base, p->initrd_size, false); }"} {"target": 1, "idx": 21633, "func": "iscsi_aio_write16_cb(struct iscsi_context *iscsi, int status, void *command_data, void *opaque) { IscsiAIOCB *acb = opaque; trace_iscsi_aio_write16_cb(iscsi, status, acb, acb->canceled); g_free(acb->buf); if (acb->canceled != 0) { qemu_aio_release(acb); scsi_free_scsi_task(acb->task); acb->task = NULL; return; } acb->status = 0; if (status < 0) { error_report(\"Failed to write16 data to iSCSI lun. %s\", iscsi_get_error(iscsi)); acb->status = -EIO; } iscsi_schedule_bh(acb); scsi_free_scsi_task(acb->task); acb->task = NULL; }"} {"target": 0, "idx": 21664, "func": "static int apc_probe(AVProbeData *p) { if (p->buf_size < 8) return 0; if (!strncmp(p->buf, \"CRYO_APC\", 8)) return AVPROBE_SCORE_MAX; return 0; }"} {"target": 1, "idx": 21683, "func": "static int hnm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { AVFrame *frame = data; Hnm4VideoContext *hnm = avctx->priv_data; int ret; uint16_t chunk_id; if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) return ret; chunk_id = AV_RL16(avpkt->data + 4); if (chunk_id == HNM4_CHUNK_ID_PL) { hnm_update_palette(avctx, avpkt->data, avpkt->size); frame->palette_has_changed = 1; } else if (chunk_id == HNM4_CHUNK_ID_IZ) { unpack_intraframe(avctx, avpkt->data + 12, avpkt->size - 12); memcpy(hnm->previous, hnm->current, hnm->width * hnm->height); if (hnm->version == 0x4a) memcpy(hnm->processed, hnm->current, hnm->width * hnm->height); else postprocess_current_frame(avctx); copy_processed_frame(avctx, frame); frame->pict_type = AV_PICTURE_TYPE_I; frame->key_frame = 1; memcpy(frame->data[1], hnm->palette, 256 * 4); *got_frame = 1; } else if (chunk_id == HNM4_CHUNK_ID_IU) { if (hnm->version == 0x4a) { decode_interframe_v4a(avctx, avpkt->data + 8, avpkt->size - 8); memcpy(hnm->processed, hnm->current, hnm->width * hnm->height); } else { decode_interframe_v4(avctx, avpkt->data + 8, avpkt->size - 8); postprocess_current_frame(avctx); copy_processed_frame(avctx, frame); frame->pict_type = AV_PICTURE_TYPE_P; frame->key_frame = 0; memcpy(frame->data[1], hnm->palette, 256 * 4); *got_frame = 1; hnm_flip_buffers(hnm); } else { av_log(avctx, AV_LOG_ERROR, \"invalid chunk id: %d\\n\", chunk_id); return avpkt->size;"} {"target": 0, "idx": 21685, "func": "int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt, int is_vp7) { VP8Context *s = avctx->priv_data; int ret, i, referenced, num_jobs; enum AVDiscard skip_thresh; VP8Frame *av_uninit(curframe), *prev_frame; if (is_vp7) ret = vp7_decode_frame_header(s, avpkt->data, avpkt->size); else ret = vp8_decode_frame_header(s, avpkt->data, avpkt->size); if (ret < 0) goto err; if (!is_vp7 && s->pix_fmt == AV_PIX_FMT_NONE) { enum AVPixelFormat pix_fmts[] = { #if CONFIG_VP8_VAAPI_HWACCEL AV_PIX_FMT_VAAPI, #endif AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE, }; s->pix_fmt = ff_get_format(s->avctx, pix_fmts); if (s->pix_fmt < 0) { ret = AVERROR(EINVAL); goto err; } avctx->pix_fmt = s->pix_fmt; } prev_frame = s->framep[VP56_FRAME_CURRENT]; referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT || s->update_altref == VP56_FRAME_CURRENT; skip_thresh = !referenced ? AVDISCARD_NONREF : !s->keyframe ? AVDISCARD_NONKEY : AVDISCARD_ALL; if (avctx->skip_frame >= skip_thresh) { s->invisible = 1; memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4); goto skip_decode; } s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh; // release no longer referenced frames for (i = 0; i < 5; i++) if (s->frames[i].tf.f->data[0] && &s->frames[i] != prev_frame && &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] && &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] && &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) vp8_release_frame(s, &s->frames[i]); curframe = s->framep[VP56_FRAME_CURRENT] = vp8_find_free_buffer(s); if (!s->colorspace) avctx->colorspace = AVCOL_SPC_BT470BG; if (s->fullrange) avctx->color_range = AVCOL_RANGE_JPEG; else avctx->color_range = AVCOL_RANGE_MPEG; /* Given that arithmetic probabilities are updated every frame, it's quite * likely that the values we have on a random interframe are complete * junk if we didn't start decode on a keyframe. So just don't display * anything rather than junk. */ if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] || !s->framep[VP56_FRAME_GOLDEN] || !s->framep[VP56_FRAME_GOLDEN2])) { av_log(avctx, AV_LOG_WARNING, \"Discarding interframe without a prior keyframe!\\n\"); ret = AVERROR_INVALIDDATA; goto err; } curframe->tf.f->key_frame = s->keyframe; curframe->tf.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; if ((ret = vp8_alloc_frame(s, curframe, referenced))) { av_log(avctx, AV_LOG_ERROR, \"get_buffer() failed!\\n\"); goto err; } // check if golden and altref are swapped if (s->update_altref != VP56_FRAME_NONE) s->next_framep[VP56_FRAME_GOLDEN2] = s->framep[s->update_altref]; else s->next_framep[VP56_FRAME_GOLDEN2] = s->framep[VP56_FRAME_GOLDEN2]; if (s->update_golden != VP56_FRAME_NONE) s->next_framep[VP56_FRAME_GOLDEN] = s->framep[s->update_golden]; else s->next_framep[VP56_FRAME_GOLDEN] = s->framep[VP56_FRAME_GOLDEN]; if (s->update_last) s->next_framep[VP56_FRAME_PREVIOUS] = curframe; else s->next_framep[VP56_FRAME_PREVIOUS] = s->framep[VP56_FRAME_PREVIOUS]; s->next_framep[VP56_FRAME_CURRENT] = curframe; ff_thread_finish_setup(avctx); if (avctx->hwaccel) { ret = avctx->hwaccel->start_frame(avctx, avpkt->data, avpkt->size); if (ret < 0) goto err; ret = avctx->hwaccel->decode_slice(avctx, avpkt->data, avpkt->size); if (ret < 0) goto err; ret = avctx->hwaccel->end_frame(avctx); if (ret < 0) goto err; } else { s->linesize = curframe->tf.f->linesize[0]; s->uvlinesize = curframe->tf.f->linesize[1]; memset(s->top_nnz, 0, s->mb_width * sizeof(*s->top_nnz)); /* Zero macroblock structures for top/top-left prediction * from outside the frame. */ if (!s->mb_layout) memset(s->macroblocks + s->mb_height * 2 - 1, 0, (s->mb_width + 1) * sizeof(*s->macroblocks)); if (!s->mb_layout && s->keyframe) memset(s->intra4x4_pred_mode_top, DC_PRED, s->mb_width * 4); memset(s->ref_count, 0, sizeof(s->ref_count)); if (s->mb_layout == 1) { // Make sure the previous frame has read its segmentation map, // if we re-use the same map. if (prev_frame && s->segmentation.enabled && !s->segmentation.update_map) ff_thread_await_progress(&prev_frame->tf, 1, 0); if (is_vp7) vp7_decode_mv_mb_modes(avctx, curframe, prev_frame); else vp8_decode_mv_mb_modes(avctx, curframe, prev_frame); } if (avctx->active_thread_type == FF_THREAD_FRAME) num_jobs = 1; else num_jobs = FFMIN(s->num_coeff_partitions, avctx->thread_count); s->num_jobs = num_jobs; s->curframe = curframe; s->prev_frame = prev_frame; s->mv_min.y = -MARGIN; s->mv_max.y = ((s->mb_height - 1) << 6) + MARGIN; for (i = 0; i < MAX_THREADS; i++) { s->thread_data[i].thread_mb_pos = 0; s->thread_data[i].wait_mb_pos = INT_MAX; } if (is_vp7) avctx->execute2(avctx, vp7_decode_mb_row_sliced, s->thread_data, NULL, num_jobs); else avctx->execute2(avctx, vp8_decode_mb_row_sliced, s->thread_data, NULL, num_jobs); } ff_thread_report_progress(&curframe->tf, INT_MAX, 0); memcpy(&s->framep[0], &s->next_framep[0], sizeof(s->framep[0]) * 4); skip_decode: // if future frames don't use the updated probabilities, // reset them to the values we saved if (!s->update_probabilities) s->prob[0] = s->prob[1]; if (!s->invisible) { if ((ret = av_frame_ref(data, curframe->tf.f)) < 0) return ret; *got_frame = 1; } return avpkt->size; err: memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4); return ret; }"} {"target": 1, "idx": 21691, "func": "NBDExport *nbd_export_new(BlockDriverState *bs, off_t dev_offset, off_t size, uint32_t nbdflags, void (*close)(NBDExport *)) { NBDExport *exp = g_malloc0(sizeof(NBDExport)); exp->refcount = 1; QTAILQ_INIT(&exp->clients); exp->bs = bs; exp->dev_offset = dev_offset; exp->nbdflags = nbdflags; exp->size = size == -1 ? bdrv_getlength(bs) : size; exp->close = close; exp->ctx = bdrv_get_aio_context(bs); bdrv_ref(bs); bdrv_add_aio_context_notifier(bs, bs_aio_attached, bs_aio_detach, exp); return exp; }"} {"target": 1, "idx": 21704, "func": "static int qemu_reset_requested(void) { int r = reset_requested; if (r && replay_checkpoint(CHECKPOINT_RESET_REQUESTED)) { reset_requested = 0; return r; } return false; }"} {"target": 0, "idx": 21726, "func": "static int add_calxeda_midway_xgmac_fdt_node(SysBusDevice *sbdev, void *opaque) { PlatformBusFDTData *data = opaque; PlatformBusDevice *pbus = data->pbus; void *fdt = data->fdt; const char *parent_node = data->pbus_node_name; int compat_str_len, i, ret = -1; char *nodename; uint32_t *irq_attr, *reg_attr; uint64_t mmio_base, irq_number; VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev); VFIODevice *vbasedev = &vdev->vbasedev; mmio_base = platform_bus_get_mmio_addr(pbus, sbdev, 0); nodename = g_strdup_printf(\"%s/%s@%\" PRIx64, parent_node, vbasedev->name, mmio_base); qemu_fdt_add_subnode(fdt, nodename); compat_str_len = strlen(vdev->compat) + 1; qemu_fdt_setprop(fdt, nodename, \"compatible\", vdev->compat, compat_str_len); qemu_fdt_setprop(fdt, nodename, \"dma-coherent\", \"\", 0); reg_attr = g_new(uint32_t, vbasedev->num_regions * 2); for (i = 0; i < vbasedev->num_regions; i++) { mmio_base = platform_bus_get_mmio_addr(pbus, sbdev, i); reg_attr[2 * i] = cpu_to_be32(mmio_base); reg_attr[2 * i + 1] = cpu_to_be32( memory_region_size(&vdev->regions[i]->mem)); } ret = qemu_fdt_setprop(fdt, nodename, \"reg\", reg_attr, vbasedev->num_regions * 2 * sizeof(uint32_t)); if (ret) { error_report(\"could not set reg property of node %s\", nodename); goto fail_reg; } irq_attr = g_new(uint32_t, vbasedev->num_irqs * 3); for (i = 0; i < vbasedev->num_irqs; i++) { irq_number = platform_bus_get_irqn(pbus, sbdev , i) + data->irq_start; irq_attr[3 * i] = cpu_to_be32(GIC_FDT_IRQ_TYPE_SPI); irq_attr[3 * i + 1] = cpu_to_be32(irq_number); irq_attr[3 * i + 2] = cpu_to_be32(GIC_FDT_IRQ_FLAGS_LEVEL_HI); } ret = qemu_fdt_setprop(fdt, nodename, \"interrupts\", irq_attr, vbasedev->num_irqs * 3 * sizeof(uint32_t)); if (ret) { error_report(\"could not set interrupts property of node %s\", nodename); } g_free(irq_attr); fail_reg: g_free(reg_attr); g_free(nodename); return ret; }"} {"target": 0, "idx": 21738, "func": "static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, long width, long height, long lumStride, long chromStride, long srcStride) { long y; const x86_reg chromWidth= width>>1; for (y=0; yclk = clk; omap_pwt_reset(s); memory_region_init_io(&s->iomem, &omap_pwt_ops, s, \"omap-pwt\", 0x800); memory_region_add_subregion(system_memory, base, &s->iomem); return s; }"} {"target": 0, "idx": 21760, "func": "static int init_er(MpegEncContext *s) { ERContext *er = &s->er; int mb_array_size = s->mb_height * s->mb_stride; int i; er->avctx = s->avctx; er->mecc = &s->mecc; er->mb_index2xy = s->mb_index2xy; er->mb_num = s->mb_num; er->mb_width = s->mb_width; er->mb_height = s->mb_height; er->mb_stride = s->mb_stride; er->b8_stride = s->b8_stride; er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride); er->error_status_table = av_mallocz(mb_array_size); if (!er->er_temp_buffer || !er->error_status_table) goto fail; er->mbskip_table = s->mbskip_table; er->mbintra_table = s->mbintra_table; for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++) er->dc_val[i] = s->dc_val[i]; er->decode_mb = mpeg_er_decode_mb; er->opaque = s; return 0; fail: av_freep(&er->er_temp_buffer); av_freep(&er->error_status_table); return AVERROR(ENOMEM); }"} {"target": 0, "idx": 21765, "func": "static av_cold int xbm_encode_close(AVCodecContext *avctx) { av_frame_free(&avctx->coded_frame); return 0; }"} {"target": 1, "idx": 21815, "func": "int ff_lzw_decode_init(LZWState *p, int csize, uint8_t *buf, int buf_size, int mode) { struct LZWState *s = (struct LZWState *)p; if(csize < 1 || csize > LZW_MAXBITS) return -1; /* read buffer */ s->eob_reached = 0; s->pbuf = buf; s->ebuf = s->pbuf + buf_size; s->bbuf = 0; s->bbits = 0; s->bs = 0; /* decoder */ s->codesize = csize; s->cursize = s->codesize + 1; s->curmask = mask[s->cursize]; s->top_slot = 1 << s->cursize; s->clear_code = 1 << s->codesize; s->end_code = s->clear_code + 1; s->slot = s->newcodes = s->clear_code + 2; s->oc = s->fc = 0; s->sp = s->stack; s->mode = mode; switch(s->mode){ case FF_LZW_GIF: s->extra_slot= 0; break; case FF_LZW_TIFF: s->extra_slot= 1; break; default: return -1; } return 0; }"} {"target": 1, "idx": 21823, "func": "static target_long monitor_get_tbl (const struct MonitorDef *md, int val) { CPUState *env = mon_get_cpu(); if (!env) return 0; return cpu_ppc_load_tbl(env); }"} {"target": 0, "idx": 21843, "func": "static void sub2video_update(InputStream *ist, AVSubtitle *sub) { AVFrame *frame = ist->sub2video.frame; int8_t *dst; int dst_linesize; int num_rects, i; int64_t pts, end_pts; if (!frame) return; if (sub) { pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL, AV_TIME_BASE_Q, ist->st->time_base); end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL, AV_TIME_BASE_Q, ist->st->time_base); num_rects = sub->num_rects; } else { pts = ist->sub2video.end_pts; end_pts = INT64_MAX; num_rects = 0; } if (sub2video_get_blank_frame(ist) < 0) { av_log(ist->dec_ctx, AV_LOG_ERROR, \"Impossible to get a blank canvas.\\n\"); return; } dst = frame->data [0]; dst_linesize = frame->linesize[0]; for (i = 0; i < num_rects; i++) sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]); sub2video_push_ref(ist, pts); ist->sub2video.end_pts = end_pts; }"} {"target": 0, "idx": 21858, "func": "static void sdl_grab_start(void) { if (guest_cursor) { SDL_SetCursor(guest_sprite); SDL_WarpMouse(guest_x, guest_y); } else sdl_hide_cursor(); if (SDL_WM_GrabInput(SDL_GRAB_ON) == SDL_GRAB_ON) { gui_grab = 1; sdl_update_caption(); } else sdl_show_cursor(); }"} {"target": 0, "idx": 21860, "func": "VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size, VirtIOHandleOutput handle_output) { int i; for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { if (vdev->vq[i].vring.num == 0) break; } if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE) abort(); vdev->vq[i].vring.num = queue_size; vdev->vq[i].vring.num_default = queue_size; vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN; vdev->vq[i].handle_output = handle_output; vdev->vq[i].handle_aio_output = NULL; return &vdev->vq[i]; }"} {"target": 0, "idx": 21864, "func": "static void discard_vq_data(VirtQueue *vq, VirtIODevice *vdev) { VirtQueueElement elem; if (!virtio_queue_ready(vq)) { return; } while (virtqueue_pop(vq, &elem)) { virtqueue_push(vq, &elem, 0); } virtio_notify(vdev, vq); }"} {"target": 0, "idx": 21871, "func": "static void usbredir_chardev_open(USBRedirDevice *dev) { uint32_t caps[USB_REDIR_CAPS_SIZE] = { 0, }; char version[32]; int flags = 0; /* Make sure any pending closes are handled (no-op if none pending) */ usbredir_chardev_close_bh(dev); qemu_bh_cancel(dev->chardev_close_bh); DPRINTF(\"creating usbredirparser\\n\"); strcpy(version, \"qemu usb-redir guest \"); pstrcat(version, sizeof(version), qemu_get_version()); dev->parser = qemu_oom_check(usbredirparser_create()); dev->parser->priv = dev; dev->parser->log_func = usbredir_log; dev->parser->read_func = usbredir_read; dev->parser->write_func = usbredir_write; dev->parser->hello_func = usbredir_hello; dev->parser->device_connect_func = usbredir_device_connect; dev->parser->device_disconnect_func = usbredir_device_disconnect; dev->parser->interface_info_func = usbredir_interface_info; dev->parser->ep_info_func = usbredir_ep_info; dev->parser->configuration_status_func = usbredir_configuration_status; dev->parser->alt_setting_status_func = usbredir_alt_setting_status; dev->parser->iso_stream_status_func = usbredir_iso_stream_status; dev->parser->interrupt_receiving_status_func = usbredir_interrupt_receiving_status; dev->parser->bulk_streams_status_func = usbredir_bulk_streams_status; dev->parser->control_packet_func = usbredir_control_packet; dev->parser->bulk_packet_func = usbredir_bulk_packet; dev->parser->iso_packet_func = usbredir_iso_packet; dev->parser->interrupt_packet_func = usbredir_interrupt_packet; dev->read_buf = NULL; dev->read_buf_size = 0; usbredirparser_caps_set_cap(caps, usb_redir_cap_connect_device_version); usbredirparser_caps_set_cap(caps, usb_redir_cap_filter); usbredirparser_caps_set_cap(caps, usb_redir_cap_ep_info_max_packet_size); usbredirparser_caps_set_cap(caps, usb_redir_cap_64bits_ids); if (runstate_check(RUN_STATE_INMIGRATE)) { flags |= usbredirparser_fl_no_hello; } usbredirparser_init(dev->parser, version, caps, USB_REDIR_CAPS_SIZE, flags); usbredirparser_do_write(dev->parser); }"} {"target": 0, "idx": 21875, "func": "void migrate_compress_threads_create(void) { int i, thread_count; if (!migrate_use_compression()) { return; } quit_comp_thread = false; compression_switch = true; thread_count = migrate_compress_threads(); compress_threads = g_new0(QemuThread, thread_count); comp_param = g_new0(CompressParam, thread_count); comp_done_cond = g_new0(QemuCond, 1); comp_done_lock = g_new0(QemuMutex, 1); qemu_cond_init(comp_done_cond); qemu_mutex_init(comp_done_lock); for (i = 0; i < thread_count; i++) { /* com_param[i].file is just used as a dummy buffer to save data, set * it's ops to empty. */ comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops); comp_param[i].done = true; qemu_mutex_init(&comp_param[i].mutex); qemu_cond_init(&comp_param[i].cond); qemu_thread_create(compress_threads + i, \"compress\", do_data_compress, comp_param + i, QEMU_THREAD_JOINABLE); } }"} {"target": 0, "idx": 21876, "func": "void validate_bootdevices(const char *devices) { /* We just do some generic consistency checks */ const char *p; int bitmap = 0; for (p = devices; *p != '\\0'; p++) { /* Allowed boot devices are: * a-b: floppy disk drives * c-f: IDE disk drives * g-m: machine implementation dependent drives * n-p: network devices * It's up to each machine implementation to check if the given boot * devices match the actual hardware implementation and firmware * features. */ if (*p < 'a' || *p > 'p') { fprintf(stderr, \"Invalid boot device '%c'\\n\", *p); exit(1); } if (bitmap & (1 << (*p - 'a'))) { fprintf(stderr, \"Boot device '%c' was given twice\\n\", *p); exit(1); } bitmap |= 1 << (*p - 'a'); } }"} {"target": 0, "idx": 21880, "func": "static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, TranslationBlock **last_tb, int *tb_exit, SyncClocks *sc) { uintptr_t ret; int32_t insns_left; if (unlikely(atomic_read(&cpu->exit_request))) { return; } trace_exec_tb(tb, tb->pc); ret = cpu_tb_exec(cpu, tb); tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK); *tb_exit = ret & TB_EXIT_MASK; if (*tb_exit != TB_EXIT_REQUESTED) { *last_tb = tb; return; } *last_tb = NULL; insns_left = atomic_read(&cpu->icount_decr.u32); atomic_set(&cpu->icount_decr.u16.high, 0); if (insns_left < 0) { /* Something asked us to stop executing * chained TBs; just continue round the main * loop. Whatever requested the exit will also * have set something else (eg exit_request or * interrupt_request) which we will handle * next time around the loop. But we need to * ensure the zeroing of tcg_exit_req (see cpu_tb_exec) * comes before the next read of cpu->exit_request * or cpu->interrupt_request. */ smp_mb(); return; } /* Instruction counter expired. */ assert(use_icount); #ifndef CONFIG_USER_ONLY if (cpu->icount_extra) { /* Refill decrementer and continue execution. */ cpu->icount_extra += insns_left; insns_left = MIN(0xffff, cpu->icount_extra); cpu->icount_extra -= insns_left; cpu->icount_decr.u16.low = insns_left; } else { /* Execute any remaining instructions, then let the main loop * handle the next event. */ if (insns_left > 0) { cpu_exec_nocache(cpu, insns_left, tb, false); align_clocks(sc, cpu); } cpu->exception_index = EXCP_INTERRUPT; cpu_loop_exit(cpu); } #endif }"} {"target": 0, "idx": 21883, "func": "static int test_butterflies_float(AVFloatDSPContext *fdsp, AVFloatDSPContext *cdsp, const float *v1, const float *v2) { LOCAL_ALIGNED(32, float, cv1, [LEN]); LOCAL_ALIGNED(32, float, cv2, [LEN]); LOCAL_ALIGNED(32, float, ov1, [LEN]); LOCAL_ALIGNED(32, float, ov2, [LEN]); int ret; memcpy(cv1, v1, LEN * sizeof(*v1)); memcpy(cv2, v2, LEN * sizeof(*v2)); memcpy(ov1, v1, LEN * sizeof(*v1)); memcpy(ov2, v2, LEN * sizeof(*v2)); cdsp->butterflies_float(cv1, cv2, LEN); fdsp->butterflies_float(ov1, ov2, LEN); if ((ret = compare_floats(cv1, ov1, LEN, FLT_EPSILON)) || (ret = compare_floats(cv2, ov2, LEN, FLT_EPSILON))) av_log(NULL, AV_LOG_ERROR, \"butterflies_float failed\\n\"); return ret; }"} {"target": 0, "idx": 21885, "func": "static int64_t mpegps_read_dts(AVFormatContext *s, int stream_index, int64_t *ppos, int64_t pos_limit) { int len, startcode; int64_t pos, pts, dts; pos = *ppos; #ifdef DEBUG_SEEK printf(\"read_dts: pos=0x%\"PRIx64\" next=%d -> \", pos, find_next); #endif url_fseek(s->pb, pos, SEEK_SET); for(;;) { len = mpegps_read_pes_header(s, &pos, &startcode, &pts, &dts); if (len < 0) { #ifdef DEBUG_SEEK printf(\"none (ret=%d)\\n\", len); #endif return AV_NOPTS_VALUE; } if (startcode == s->streams[stream_index]->id && dts != AV_NOPTS_VALUE) { break; } url_fskip(s->pb, len); } #ifdef DEBUG_SEEK printf(\"pos=0x%\"PRIx64\" dts=0x%\"PRIx64\" %0.3f\\n\", pos, dts, dts / 90000.0); #endif *ppos = pos; return dts; }"} {"target": 1, "idx": 21901, "func": "static void pc_dimm_realize(DeviceState *dev, Error **errp) { PCDIMMDevice *dimm = PC_DIMM(dev); if (!dimm->hostmem) { error_setg(errp, \"'\" PC_DIMM_MEMDEV_PROP \"' property is not set\"); return; } if ((nb_numa_nodes > 0) && (dimm->node >= nb_numa_nodes)) { error_setg(errp, \"'DIMM property \" PC_DIMM_NODE_PROP \" has value %\" PRIu32 \"' which exceeds the number of numa nodes: %d\", dimm->node, nb_numa_nodes); return; } }"} {"target": 1, "idx": 21914, "func": "static int libssh_open(URLContext *h, const char *url, int flags) { static const int verbosity = SSH_LOG_NOLOG; LIBSSHContext *s = h->priv_data; char proto[10], path[MAX_URL_SIZE], hostname[1024], credencials[1024]; int port = 22, access, ret; long timeout = s->rw_timeout * 1000; const char *user = NULL, *pass = NULL; char *end = NULL; sftp_attributes stat; av_url_split(proto, sizeof(proto), credencials, sizeof(credencials), hostname, sizeof(hostname), &port, path, sizeof(path), url); if (port <= 0 || port > 65535) port = 22; if (!(s->session = ssh_new())) { ret = AVERROR(ENOMEM); goto fail; } user = av_strtok(credencials, \":\", &end); pass = av_strtok(end, \":\", &end); ssh_options_set(s->session, SSH_OPTIONS_HOST, hostname); ssh_options_set(s->session, SSH_OPTIONS_PORT, &port); ssh_options_set(s->session, SSH_OPTIONS_LOG_VERBOSITY, &verbosity); if (timeout > 0) ssh_options_set(s->session, SSH_OPTIONS_TIMEOUT_USEC, &timeout); if (user) ssh_options_set(s->session, SSH_OPTIONS_USER, user); if (ssh_connect(s->session) != SSH_OK) { av_log(h, AV_LOG_ERROR, \"Connection failed. %s\\n\", ssh_get_error(s->session)); ret = AVERROR(EIO); goto fail; } if (pass && ssh_userauth_password(s->session, NULL, pass) != SSH_AUTH_SUCCESS) { av_log(h, AV_LOG_ERROR, \"Error authenticating with password: %s\\n\", ssh_get_error(s->session)); ret = AVERROR(EACCES); goto fail; } if (!(s->sftp = sftp_new(s->session))) { av_log(h, AV_LOG_ERROR, \"SFTP session creation failed: %s\\n\", ssh_get_error(s->session)); ret = AVERROR(ENOMEM); goto fail; } if (sftp_init(s->sftp) != SSH_OK) { av_log(h, AV_LOG_ERROR, \"Error initializing sftp session: %s\\n\", ssh_get_error(s->session)); ret = AVERROR(EIO); goto fail; } if ((flags & AVIO_FLAG_WRITE) && (flags & AVIO_FLAG_READ)) { access = O_CREAT | O_RDWR; if (s->trunc) access |= O_TRUNC; } else if (flags & AVIO_FLAG_WRITE) { access = O_CREAT | O_WRONLY; if (s->trunc) access |= O_TRUNC; } else { access = O_RDONLY; } /* 0666 = -rw-rw-rw- = read+write for everyone, minus umask */ if (!(s->file = sftp_open(s->sftp, path, access, 0666))) { av_log(h, AV_LOG_ERROR, \"Error opening sftp file: %s\\n\", ssh_get_error(s->session)); ret = AVERROR(EIO); goto fail; } if (!(stat = sftp_fstat(s->file))) { av_log(h, AV_LOG_WARNING, \"Cannot stat remote file %s.\\n\", path); s->filesize = -1; } else { s->filesize = stat->size; sftp_attributes_free(stat); } return 0; fail: libssh_close(h); return ret; }"} {"target": 1, "idx": 21918, "func": "void cpu_write_xer(CPUPPCState *env, target_ulong xer) { env->so = (xer >> XER_SO) & 1; env->ov = (xer >> XER_OV) & 1; env->ca = (xer >> XER_CA) & 1; env->xer = xer & ~((1u << XER_SO) | (1u << XER_OV) | (1u << XER_CA)); }"} {"target": 1, "idx": 21921, "func": "void helper_divq_EAX_T0(void) { uint64_t r0, r1; if (T0 == 0) { raise_exception(EXCP00_DIVZ); } r0 = EAX; r1 = EDX; div64(&r0, &r1, T0); EAX = r0; EDX = r1; }"} {"target": 1, "idx": 21948, "func": "static int hevc_init(AVCodecParserContext *s) { HEVCContext *h = &((HEVCParseContext *)s->priv_data)->h; h->HEVClc = av_mallocz(sizeof(HEVCLocalContext)); h->skipped_bytes_pos_size = INT_MAX; return 0; }"} {"target": 1, "idx": 21951, "func": "matroska_probe (AVProbeData *p) { uint64_t total = 0; int len_mask = 0x80, size = 1, n = 1; uint8_t probe_data[] = { 'm', 'a', 't', 'r', 'o', 's', 'k', 'a' }; if (p->buf_size < 5) return 0; /* ebml header? */ if ((p->buf[0] << 24 | p->buf[1] << 16 | p->buf[2] << 8 | p->buf[3]) != EBML_ID_HEADER) return 0; /* length of header */ total = p->buf[4]; while (size <= 8 && !(total & len_mask)) { size++; len_mask >>= 1; } if (size > 8) return 0; total &= (len_mask - 1); while (n < size) total = (total << 8) | p->buf[4 + n++]; /* does the probe data contain the whole header? */ if (p->buf_size < 4 + size + total) return 0; /* the header must contain the document type 'matroska'. For now, * we don't parse the whole header but simply check for the * availability of that array of characters inside the header. * Not fully fool-proof, but good enough. */ for (n = 4 + size; n < 4 + size + total - sizeof(probe_data); n++) if (!memcmp (&p->buf[n], probe_data, sizeof(probe_data))) return AVPROBE_SCORE_MAX; return 0; }"} {"target": 1, "idx": 21972, "func": "static void FUNCC(pred16x16_vertical)(uint8_t *_src, int _stride){ int i; pixel *src = (pixel*)_src; int stride = _stride/sizeof(pixel); const pixel4 a = ((pixel4*)(src-stride))[0]; const pixel4 b = ((pixel4*)(src-stride))[1]; const pixel4 c = ((pixel4*)(src-stride))[2]; const pixel4 d = ((pixel4*)(src-stride))[3]; for(i=0; i<16; i++){ ((pixel4*)(src+i*stride))[0] = a; ((pixel4*)(src+i*stride))[1] = b; ((pixel4*)(src+i*stride))[2] = c; ((pixel4*)(src+i*stride))[3] = d; } }"} {"target": 0, "idx": 21974, "func": "static void rgb24_to_pal8(AVPicture *dst, AVPicture *src, int width, int height) { const unsigned char *p; unsigned char *q; int r, g, b, dst_wrap, src_wrap; int x, y, i; static const uint8_t pal_value[6] = { 0x00, 0x33, 0x66, 0x99, 0xcc, 0xff }; uint32_t *pal; p = src->data[0]; src_wrap = src->linesize[0] - 3 * width; q = dst->data[0]; dst_wrap = dst->linesize[0] - width; for(y=0;ydata[1]; i = 0; for(r = 0; r < 6; r++) { for(g = 0; g < 6; g++) { for(b = 0; b < 6; b++) { pal[i++] = (0xff << 24) | (pal_value[r] << 16) | (pal_value[g] << 8) | pal_value[b]; } } } while (i < 256) pal[i++] = 0; }"} {"target": 1, "idx": 21980, "func": "vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int count) { uint16_t old, new; if (unlikely(dev->broken)) { return; } /* Make sure buffer is written before we update index. */ smp_wmb(); old = vq->used_idx; new = old + count; vring_used_idx_set(dev, vq, new); vq->inuse -= count; if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) { vq->signalled_used_valid = false; } }"} {"target": 1, "idx": 22002, "func": "static int ioh3420_initfn(PCIDevice *d) { PCIEPort *p = PCIE_PORT(d); PCIESlot *s = PCIE_SLOT(d); int rc; pci_bridge_initfn(d, TYPE_PCIE_BUS); pcie_port_init_reg(d); rc = pci_bridge_ssvid_init(d, IOH_EP_SSVID_OFFSET, IOH_EP_SSVID_SVID, IOH_EP_SSVID_SSID); if (rc < 0) { goto err_bridge; } rc = msi_init(d, IOH_EP_MSI_OFFSET, IOH_EP_MSI_NR_VECTOR, IOH_EP_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_64BIT, IOH_EP_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_MASKBIT); if (rc < 0) { goto err_bridge; } rc = pcie_cap_init(d, IOH_EP_EXP_OFFSET, PCI_EXP_TYPE_ROOT_PORT, p->port); if (rc < 0) { goto err_msi; } pcie_cap_arifwd_init(d); pcie_cap_deverr_init(d); pcie_cap_slot_init(d, s->slot); pcie_cap_root_init(d); pcie_chassis_create(s->chassis); rc = pcie_chassis_add_slot(s); if (rc < 0) { goto err_pcie_cap; } rc = pcie_aer_init(d, IOH_EP_AER_OFFSET, PCI_ERR_SIZEOF); if (rc < 0) { goto err; } pcie_aer_root_init(d); ioh3420_aer_vector_update(d); return 0; err: pcie_chassis_del_slot(s); err_pcie_cap: pcie_cap_exit(d); err_msi: msi_uninit(d); err_bridge: pci_bridge_exitfn(d); return rc; }"} {"target": 1, "idx": 22028, "func": "int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl) { int mb_xy; int mb_type, partition_count, cbp = 0; int dct8x8_allowed= h->pps.transform_8x8_mode; int decode_chroma = h->sps.chroma_format_idc == 1 || h->sps.chroma_format_idc == 2; const int pixel_shift = h->pixel_shift; mb_xy = sl->mb_xy = sl->mb_x + sl->mb_y*h->mb_stride; ff_tlog(h->avctx, \"pic:%d mb:%d/%d\\n\", h->frame_num, sl->mb_x, sl->mb_y); if (sl->slice_type_nos != AV_PICTURE_TYPE_I) { int skip; /* a skipped mb needs the aff flag from the following mb */ if (FRAME_MBAFF(h) && (sl->mb_y & 1) == 1 && sl->prev_mb_skipped) skip = sl->next_mb_skipped; else skip = decode_cabac_mb_skip(h, sl, sl->mb_x, sl->mb_y ); /* read skip flags */ if( skip ) { if (FRAME_MBAFF(h) && (sl->mb_y & 1) == 0) { h->cur_pic.mb_type[mb_xy] = MB_TYPE_SKIP; sl->next_mb_skipped = decode_cabac_mb_skip(h, sl, sl->mb_x, sl->mb_y+1 ); if(!sl->next_mb_skipped) sl->mb_mbaff = sl->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h, sl); } decode_mb_skip(h, sl); h->cbp_table[mb_xy] = 0; h->chroma_pred_mode_table[mb_xy] = 0; sl->last_qscale_diff = 0; return 0; } } if (FRAME_MBAFF(h)) { if ((sl->mb_y & 1) == 0) sl->mb_mbaff = sl->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h, sl); } sl->prev_mb_skipped = 0; fill_decode_neighbors(h, sl, -(MB_FIELD(sl))); if (sl->slice_type_nos == AV_PICTURE_TYPE_B) { int ctx = 0; av_assert2(sl->slice_type_nos == AV_PICTURE_TYPE_B); if (!IS_DIRECT(sl->left_type[LTOP] - 1)) ctx++; if (!IS_DIRECT(sl->top_type - 1)) ctx++; if( !get_cabac_noinline( &sl->cabac, &sl->cabac_state[27+ctx] ) ){ mb_type= 0; /* B_Direct_16x16 */ }else if( !get_cabac_noinline( &sl->cabac, &sl->cabac_state[27+3] ) ) { mb_type= 1 + get_cabac_noinline( &sl->cabac, &sl->cabac_state[27+5] ); /* B_L[01]_16x16 */ }else{ int bits; bits = get_cabac_noinline( &sl->cabac, &sl->cabac_state[27+4] ) << 3; bits+= get_cabac_noinline( &sl->cabac, &sl->cabac_state[27+5] ) << 2; bits+= get_cabac_noinline( &sl->cabac, &sl->cabac_state[27+5] ) << 1; bits+= get_cabac_noinline( &sl->cabac, &sl->cabac_state[27+5] ); if( bits < 8 ){ mb_type= bits + 3; /* B_Bi_16x16 through B_L1_L0_16x8 */ }else if( bits == 13 ){ mb_type = decode_cabac_intra_mb_type(sl, 32, 0); goto decode_intra_mb; }else if( bits == 14 ){ mb_type= 11; /* B_L1_L0_8x16 */ }else if( bits == 15 ){ mb_type= 22; /* B_8x8 */ }else{ bits= ( bits<<1 ) + get_cabac_noinline( &sl->cabac, &sl->cabac_state[27+5] ); mb_type= bits - 4; /* B_L0_Bi_* through B_Bi_Bi_* */ } } partition_count= b_mb_type_info[mb_type].partition_count; mb_type= b_mb_type_info[mb_type].type; } else if (sl->slice_type_nos == AV_PICTURE_TYPE_P) { if( get_cabac_noinline( &sl->cabac, &sl->cabac_state[14] ) == 0 ) { /* P-type */ if( get_cabac_noinline( &sl->cabac, &sl->cabac_state[15] ) == 0 ) { /* P_L0_D16x16, P_8x8 */ mb_type= 3 * get_cabac_noinline( &sl->cabac, &sl->cabac_state[16] ); } else { /* P_L0_D8x16, P_L0_D16x8 */ mb_type= 2 - get_cabac_noinline( &sl->cabac, &sl->cabac_state[17] ); } partition_count= p_mb_type_info[mb_type].partition_count; mb_type= p_mb_type_info[mb_type].type; } else { mb_type = decode_cabac_intra_mb_type(sl, 17, 0); goto decode_intra_mb; } } else { mb_type = decode_cabac_intra_mb_type(sl, 3, 1); if (sl->slice_type == AV_PICTURE_TYPE_SI && mb_type) mb_type--; av_assert2(sl->slice_type_nos == AV_PICTURE_TYPE_I); decode_intra_mb: partition_count = 0; cbp= i_mb_type_info[mb_type].cbp; sl->intra16x16_pred_mode = i_mb_type_info[mb_type].pred_mode; mb_type= i_mb_type_info[mb_type].type; } if (MB_FIELD(sl)) mb_type |= MB_TYPE_INTERLACED; h->slice_table[mb_xy] = sl->slice_num; if(IS_INTRA_PCM(mb_type)) { const int mb_size = ff_h264_mb_sizes[h->sps.chroma_format_idc] * h->sps.bit_depth_luma >> 3; const uint8_t *ptr; // We assume these blocks are very rare so we do not optimize it. // FIXME The two following lines get the bitstream position in the cabac // decode, I think it should be done by a function in cabac.h (or cabac.c). ptr= sl->cabac.bytestream; if(sl->cabac.low&0x1) ptr--; if(CABAC_BITS==16){ if(sl->cabac.low&0x1FF) ptr--; } // The pixels are stored in the same order as levels in h->mb array. if ((int) (sl->cabac.bytestream_end - ptr) < mb_size) return -1; sl->intra_pcm_ptr = ptr; ptr += mb_size; ff_init_cabac_decoder(&sl->cabac, ptr, sl->cabac.bytestream_end - ptr); // All blocks are present h->cbp_table[mb_xy] = 0xf7ef; h->chroma_pred_mode_table[mb_xy] = 0; // In deblocking, the quantizer is 0 h->cur_pic.qscale_table[mb_xy] = 0; // All coeffs are present memset(h->non_zero_count[mb_xy], 16, 48); h->cur_pic.mb_type[mb_xy] = mb_type; sl->last_qscale_diff = 0; return 0; } fill_decode_caches(h, sl, mb_type); if( IS_INTRA( mb_type ) ) { int i, pred_mode; if( IS_INTRA4x4( mb_type ) ) { if (dct8x8_allowed && get_cabac_noinline(&sl->cabac, &sl->cabac_state[399 + sl->neighbor_transform_size])) { mb_type |= MB_TYPE_8x8DCT; for( i = 0; i < 16; i+=4 ) { int pred = pred_intra_mode(h, sl, i); int mode = decode_cabac_mb_intra4x4_pred_mode(sl, pred); fill_rectangle(&sl->intra4x4_pred_mode_cache[scan8[i]], 2, 2, 8, mode, 1); } } else { for( i = 0; i < 16; i++ ) { int pred = pred_intra_mode(h, sl, i); sl->intra4x4_pred_mode_cache[scan8[i]] = decode_cabac_mb_intra4x4_pred_mode(sl, pred); ff_tlog(h->avctx, \"i4x4 pred=%d mode=%d\\n\", pred, sl->intra4x4_pred_mode_cache[scan8[i]]); } } write_back_intra_pred_mode(h, sl); if (ff_h264_check_intra4x4_pred_mode(h, sl) < 0 ) return -1; } else { sl->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, sl, sl->intra16x16_pred_mode, 0); if (sl->intra16x16_pred_mode < 0) return -1; } if(decode_chroma){ h->chroma_pred_mode_table[mb_xy] = pred_mode = decode_cabac_mb_chroma_pre_mode(h, sl); pred_mode= ff_h264_check_intra_pred_mode(h, sl, pred_mode, 1 ); if( pred_mode < 0 ) return -1; sl->chroma_pred_mode = pred_mode; } else { sl->chroma_pred_mode = DC_128_PRED8x8; } } else if( partition_count == 4 ) { int i, j, sub_partition_count[4], list, ref[2][4]; if (sl->slice_type_nos == AV_PICTURE_TYPE_B ) { for( i = 0; i < 4; i++ ) { sl->sub_mb_type[i] = decode_cabac_b_mb_sub_type(sl); sub_partition_count[i] = b_sub_mb_type_info[sl->sub_mb_type[i]].partition_count; sl->sub_mb_type[i] = b_sub_mb_type_info[sl->sub_mb_type[i]].type; } if (IS_DIRECT(sl->sub_mb_type[0] | sl->sub_mb_type[1] | sl->sub_mb_type[2] | sl->sub_mb_type[3])) { ff_h264_pred_direct_motion(h, sl, &mb_type); sl->ref_cache[0][scan8[4]] = sl->ref_cache[1][scan8[4]] = sl->ref_cache[0][scan8[12]] = sl->ref_cache[1][scan8[12]] = PART_NOT_AVAILABLE; for( i = 0; i < 4; i++ ) fill_rectangle(&sl->direct_cache[scan8[4*i]], 2, 2, 8, (sl->sub_mb_type[i] >> 1) & 0xFF, 1); } } else { for( i = 0; i < 4; i++ ) { sl->sub_mb_type[i] = decode_cabac_p_mb_sub_type(sl); sub_partition_count[i] = p_sub_mb_type_info[sl->sub_mb_type[i]].partition_count; sl->sub_mb_type[i] = p_sub_mb_type_info[sl->sub_mb_type[i]].type; } } for( list = 0; list < sl->list_count; list++ ) { for( i = 0; i < 4; i++ ) { if(IS_DIRECT(sl->sub_mb_type[i])) continue; if(IS_DIR(sl->sub_mb_type[i], 0, list)){ unsigned rc = sl->ref_count[list] << MB_MBAFF(sl); if (rc > 1) { ref[list][i] = decode_cabac_mb_ref(sl, list, 4 * i); if (ref[list][i] >= rc) { av_log(h->avctx, AV_LOG_ERROR, \"Reference %d >= %d\\n\", ref[list][i], rc); return -1; } }else ref[list][i] = 0; } else { ref[list][i] = -1; } sl->ref_cache[list][scan8[4 * i] + 1] = sl->ref_cache[list][scan8[4 * i] + 8] = sl->ref_cache[list][scan8[4 * i] + 9] = ref[list][i]; } } if(dct8x8_allowed) dct8x8_allowed = get_dct8x8_allowed(h, sl); for (list = 0; list < sl->list_count; list++) { for(i=0; i<4; i++){ sl->ref_cache[list][scan8[4 * i]] = sl->ref_cache[list][scan8[4 * i] + 1]; if(IS_DIRECT(sl->sub_mb_type[i])){ fill_rectangle(sl->mvd_cache[list][scan8[4*i]], 2, 2, 8, 0, 2); continue; } if(IS_DIR(sl->sub_mb_type[i], 0, list) && !IS_DIRECT(sl->sub_mb_type[i])){ const int sub_mb_type= sl->sub_mb_type[i]; const int block_width= (sub_mb_type & (MB_TYPE_16x16|MB_TYPE_16x8)) ? 2 : 1; for(j=0; jmv_cache[list][ scan8[index] ]; uint8_t (* mvd_cache)[2]= &sl->mvd_cache[list][ scan8[index] ]; pred_motion(h, sl, index, block_width, list, sl->ref_cache[list][ scan8[index] ], &mx, &my); DECODE_CABAC_MB_MVD(sl, list, index) ff_tlog(h->avctx, \"final mv:%d %d\\n\", mx, my); if(IS_SUB_8X8(sub_mb_type)){ mv_cache[ 1 ][0]= mv_cache[ 8 ][0]= mv_cache[ 9 ][0]= mx; mv_cache[ 1 ][1]= mv_cache[ 8 ][1]= mv_cache[ 9 ][1]= my; mvd_cache[ 1 ][0]= mvd_cache[ 8 ][0]= mvd_cache[ 9 ][0]= mpx; mvd_cache[ 1 ][1]= mvd_cache[ 8 ][1]= mvd_cache[ 9 ][1]= mpy; }else if(IS_SUB_8X4(sub_mb_type)){ mv_cache[ 1 ][0]= mx; mv_cache[ 1 ][1]= my; mvd_cache[ 1 ][0]= mpx; mvd_cache[ 1 ][1]= mpy; }else if(IS_SUB_4X8(sub_mb_type)){ mv_cache[ 8 ][0]= mx; mv_cache[ 8 ][1]= my; mvd_cache[ 8 ][0]= mpx; mvd_cache[ 8 ][1]= mpy; } mv_cache[ 0 ][0]= mx; mv_cache[ 0 ][1]= my; mvd_cache[ 0 ][0]= mpx; mvd_cache[ 0 ][1]= mpy; } }else{ fill_rectangle(sl->mv_cache [list][ scan8[4*i] ], 2, 2, 8, 0, 4); fill_rectangle(sl->mvd_cache[list][ scan8[4*i] ], 2, 2, 8, 0, 2); } } } } else if( IS_DIRECT(mb_type) ) { ff_h264_pred_direct_motion(h, sl, &mb_type); fill_rectangle(sl->mvd_cache[0][scan8[0]], 4, 4, 8, 0, 2); fill_rectangle(sl->mvd_cache[1][scan8[0]], 4, 4, 8, 0, 2); dct8x8_allowed &= h->sps.direct_8x8_inference_flag; } else { int list, i; if(IS_16X16(mb_type)){ for (list = 0; list < sl->list_count; list++) { if(IS_DIR(mb_type, 0, list)){ int ref; unsigned rc = sl->ref_count[list] << MB_MBAFF(sl); if (rc > 1) { ref= decode_cabac_mb_ref(sl, list, 0); if (ref >= rc) { av_log(h->avctx, AV_LOG_ERROR, \"Reference %d >= %d\\n\", ref, rc); return -1; } }else ref=0; fill_rectangle(&sl->ref_cache[list][ scan8[0] ], 4, 4, 8, ref, 1); } } for (list = 0; list < sl->list_count; list++) { if(IS_DIR(mb_type, 0, list)){ int mx,my,mpx,mpy; pred_motion(h, sl, 0, 4, list, sl->ref_cache[list][ scan8[0] ], &mx, &my); DECODE_CABAC_MB_MVD(sl, list, 0) ff_tlog(h->avctx, \"final mv:%d %d\\n\", mx, my); fill_rectangle(sl->mvd_cache[list][ scan8[0] ], 4, 4, 8, pack8to16(mpx,mpy), 2); fill_rectangle(sl->mv_cache[list][ scan8[0] ], 4, 4, 8, pack16to32(mx,my), 4); } } } else if(IS_16X8(mb_type)){ for (list = 0; list < sl->list_count; list++) { for(i=0; i<2; i++){ if(IS_DIR(mb_type, i, list)){ int ref; unsigned rc = sl->ref_count[list] << MB_MBAFF(sl); if (rc > 1) { ref= decode_cabac_mb_ref(sl, list, 8 * i); if (ref >= rc) { av_log(h->avctx, AV_LOG_ERROR, \"Reference %d >= %d\\n\", ref, rc); return -1; } }else ref=0; fill_rectangle(&sl->ref_cache[list][ scan8[0] + 16*i ], 4, 2, 8, ref, 1); }else fill_rectangle(&sl->ref_cache[list][ scan8[0] + 16*i ], 4, 2, 8, (LIST_NOT_USED&0xFF), 1); } } for (list = 0; list < sl->list_count; list++) { for(i=0; i<2; i++){ if(IS_DIR(mb_type, i, list)){ int mx,my,mpx,mpy; pred_16x8_motion(h, sl, 8*i, list, sl->ref_cache[list][scan8[0] + 16*i], &mx, &my); DECODE_CABAC_MB_MVD(sl, list, 8*i) ff_tlog(h->avctx, \"final mv:%d %d\\n\", mx, my); fill_rectangle(sl->mvd_cache[list][ scan8[0] + 16*i ], 4, 2, 8, pack8to16(mpx,mpy), 2); fill_rectangle(sl->mv_cache[list][ scan8[0] + 16*i ], 4, 2, 8, pack16to32(mx,my), 4); }else{ fill_rectangle(sl->mvd_cache[list][ scan8[0] + 16*i ], 4, 2, 8, 0, 2); fill_rectangle(sl->mv_cache[list][ scan8[0] + 16*i ], 4, 2, 8, 0, 4); } } } }else{ av_assert2(IS_8X16(mb_type)); for (list = 0; list < sl->list_count; list++) { for(i=0; i<2; i++){ if(IS_DIR(mb_type, i, list)){ //FIXME optimize int ref; unsigned rc = sl->ref_count[list] << MB_MBAFF(sl); if (rc > 1) { ref = decode_cabac_mb_ref(sl, list, 4 * i); if (ref >= rc) { av_log(h->avctx, AV_LOG_ERROR, \"Reference %d >= %d\\n\", ref, rc); return -1; } }else ref=0; fill_rectangle(&sl->ref_cache[list][ scan8[0] + 2*i ], 2, 4, 8, ref, 1); }else fill_rectangle(&sl->ref_cache[list][ scan8[0] + 2*i ], 2, 4, 8, (LIST_NOT_USED&0xFF), 1); } } for (list = 0; list < sl->list_count; list++) { for(i=0; i<2; i++){ if(IS_DIR(mb_type, i, list)){ int mx,my,mpx,mpy; pred_8x16_motion(h, sl, i*4, list, sl->ref_cache[list][ scan8[0] + 2*i ], &mx, &my); DECODE_CABAC_MB_MVD(sl, list, 4*i) ff_tlog(h->avctx, \"final mv:%d %d\\n\", mx, my); fill_rectangle(sl->mvd_cache[list][ scan8[0] + 2*i ], 2, 4, 8, pack8to16(mpx,mpy), 2); fill_rectangle(sl->mv_cache[list][ scan8[0] + 2*i ], 2, 4, 8, pack16to32(mx,my), 4); }else{ fill_rectangle(sl->mvd_cache[list][ scan8[0] + 2*i ], 2, 4, 8, 0, 2); fill_rectangle(sl->mv_cache[list][ scan8[0] + 2*i ], 2, 4, 8, 0, 4); } } } } } if( IS_INTER( mb_type ) ) { h->chroma_pred_mode_table[mb_xy] = 0; write_back_motion(h, sl, mb_type); } if( !IS_INTRA16x16( mb_type ) ) { cbp = decode_cabac_mb_cbp_luma(sl); if(decode_chroma) cbp |= decode_cabac_mb_cbp_chroma(sl) << 4; } else { if (!decode_chroma && cbp>15) { av_log(h->avctx, AV_LOG_ERROR, \"gray chroma\\n\"); return AVERROR_INVALIDDATA; } } h->cbp_table[mb_xy] = sl->cbp = cbp; if( dct8x8_allowed && (cbp&15) && !IS_INTRA( mb_type ) ) { mb_type |= MB_TYPE_8x8DCT * get_cabac_noinline(&sl->cabac, &sl->cabac_state[399 + sl->neighbor_transform_size]); } /* It would be better to do this in fill_decode_caches, but we don't know * the transform mode of the current macroblock there. */ if (CHROMA444(h) && IS_8x8DCT(mb_type)){ int i; uint8_t *nnz_cache = sl->non_zero_count_cache; for (i = 0; i < 2; i++){ if (sl->left_type[LEFT(i)] && !IS_8x8DCT(sl->left_type[LEFT(i)])) { nnz_cache[3+8* 1 + 2*8*i]= nnz_cache[3+8* 2 + 2*8*i]= nnz_cache[3+8* 6 + 2*8*i]= nnz_cache[3+8* 7 + 2*8*i]= nnz_cache[3+8*11 + 2*8*i]= nnz_cache[3+8*12 + 2*8*i]= IS_INTRA(mb_type) ? 64 : 0; } } if (sl->top_type && !IS_8x8DCT(sl->top_type)){ uint32_t top_empty = CABAC(h) && !IS_INTRA(mb_type) ? 0 : 0x40404040; AV_WN32A(&nnz_cache[4+8* 0], top_empty); AV_WN32A(&nnz_cache[4+8* 5], top_empty); AV_WN32A(&nnz_cache[4+8*10], top_empty); } } h->cur_pic.mb_type[mb_xy] = mb_type; if( cbp || IS_INTRA16x16( mb_type ) ) { const uint8_t *scan, *scan8x8; const uint32_t *qmul; if(IS_INTERLACED(mb_type)){ scan8x8 = sl->qscale ? h->field_scan8x8 : h->field_scan8x8_q0; scan = sl->qscale ? h->field_scan : h->field_scan_q0; }else{ scan8x8 = sl->qscale ? h->zigzag_scan8x8 : h->zigzag_scan8x8_q0; scan = sl->qscale ? h->zigzag_scan : h->zigzag_scan_q0; } // decode_cabac_mb_dqp if(get_cabac_noinline( &sl->cabac, &sl->cabac_state[60 + (sl->last_qscale_diff != 0)])){ int val = 1; int ctx= 2; const int max_qp = 51 + 6*(h->sps.bit_depth_luma-8); while( get_cabac_noinline( &sl->cabac, &sl->cabac_state[60 + ctx] ) ) { ctx= 3; val++; if(val > 2*max_qp){ //prevent infinite loop av_log(h->avctx, AV_LOG_ERROR, \"cabac decode of qscale diff failed at %d %d\\n\", sl->mb_x, sl->mb_y); return -1; } } if( val&0x01 ) val= (val + 1)>>1 ; else val= -((val + 1)>>1); sl->last_qscale_diff = val; sl->qscale += val; if (((unsigned)sl->qscale) > max_qp){ if (sl->qscale < 0) sl->qscale += max_qp + 1; else sl->qscale -= max_qp + 1; } sl->chroma_qp[0] = get_chroma_qp(h, 0, sl->qscale); sl->chroma_qp[1] = get_chroma_qp(h, 1, sl->qscale); }else sl->last_qscale_diff=0; decode_cabac_luma_residual(h, sl, scan, scan8x8, pixel_shift, mb_type, cbp, 0); if (CHROMA444(h)) { decode_cabac_luma_residual(h, sl, scan, scan8x8, pixel_shift, mb_type, cbp, 1); decode_cabac_luma_residual(h, sl, scan, scan8x8, pixel_shift, mb_type, cbp, 2); } else if (CHROMA422(h)) { if( cbp&0x30 ){ int c; for (c = 0; c < 2; c++) decode_cabac_residual_dc_422(h, sl, sl->mb + ((256 + 16*16*c) << pixel_shift), 3, CHROMA_DC_BLOCK_INDEX + c, chroma422_dc_scan, 8); } if( cbp&0x20 ) { int c, i, i8x8; for( c = 0; c < 2; c++ ) { int16_t *mb = sl->mb + (16*(16 + 16*c) << pixel_shift); qmul = h->dequant4_coeff[c+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[c]]; for (i8x8 = 0; i8x8 < 2; i8x8++) { for (i = 0; i < 4; i++) { const int index = 16 + 16 * c + 8*i8x8 + i; decode_cabac_residual_nondc(h, sl, mb, 4, index, scan + 1, qmul, 15); mb += 16<non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1); fill_rectangle(&sl->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1); } } else /* yuv420 */ { if( cbp&0x30 ){ int c; for (c = 0; c < 2; c++) decode_cabac_residual_dc(h, sl, sl->mb + ((256 + 16*16*c) << pixel_shift), 3, CHROMA_DC_BLOCK_INDEX+c, chroma_dc_scan, 4); } if( cbp&0x20 ) { int c, i; for( c = 0; c < 2; c++ ) { qmul = h->dequant4_coeff[c+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[c]]; for( i = 0; i < 4; i++ ) { const int index = 16 + 16 * c + i; decode_cabac_residual_nondc(h, sl, sl->mb + (16*index << pixel_shift), 4, index, scan + 1, qmul, 15); } } } else { fill_rectangle(&sl->non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1); fill_rectangle(&sl->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1); } } } else { fill_rectangle(&sl->non_zero_count_cache[scan8[ 0]], 4, 4, 8, 0, 1); fill_rectangle(&sl->non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1); fill_rectangle(&sl->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1); sl->last_qscale_diff = 0; } h->cur_pic.qscale_table[mb_xy] = sl->qscale; write_back_non_zero_count(h, sl); return 0; }"} {"target": 1, "idx": 22029, "func": "static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid) { int32_t datalen; int lun; DPRINTF(\"do_busid_cmd: busid 0x%x\\n\", busid); lun = busid & 7; datalen = s->current_dev->info->send_command(s->current_dev, 0, buf, lun); s->ti_size = datalen; if (datalen != 0) { s->rregs[ESP_RSTAT] = STAT_TC; s->dma_left = 0; s->dma_counter = 0; if (datalen > 0) { s->rregs[ESP_RSTAT] |= STAT_DI; s->current_dev->info->read_data(s->current_dev, 0); } else { s->rregs[ESP_RSTAT] |= STAT_DO; s->current_dev->info->write_data(s->current_dev, 0); } } s->rregs[ESP_RINTR] = INTR_BS | INTR_FC; s->rregs[ESP_RSEQ] = SEQ_CD; esp_raise_irq(s); }"} {"target": 1, "idx": 22031, "func": "static int swf_write_header(AVFormatContext *s) { SWFContext *swf = s->priv_data; AVIOContext *pb = s->pb; PutBitContext p; uint8_t buf1[256]; int i, width, height, rate, rate_base; int version; swf->sound_samples = 0; swf->swf_frame_number = 0; swf->video_frame_number = 0; for(i=0;inb_streams;i++) { AVCodecContext *enc = s->streams[i]->codec; if (enc->codec_type == AVMEDIA_TYPE_AUDIO) { if (swf->audio_enc) { av_log(s, AV_LOG_ERROR, \"SWF muxer only supports 1 audio stream\\n\"); return AVERROR_INVALIDDATA; if (enc->codec_id == AV_CODEC_ID_MP3) { if (!enc->frame_size) { av_log(s, AV_LOG_ERROR, \"audio frame size not set\\n\"); return -1; swf->audio_enc = enc; swf->audio_fifo= av_fifo_alloc(AUDIO_FIFO_SIZE); if (!swf->audio_fifo) return AVERROR(ENOMEM); } else { av_log(s, AV_LOG_ERROR, \"SWF muxer only supports MP3\\n\"); return -1; } else { if (swf->video_enc) { av_log(s, AV_LOG_ERROR, \"SWF muxer only supports 1 video stream\\n\"); return AVERROR_INVALIDDATA; if (enc->codec_id == AV_CODEC_ID_VP6F || enc->codec_id == AV_CODEC_ID_FLV1 || enc->codec_id == AV_CODEC_ID_MJPEG) { swf->video_st = s->streams[i]; swf->video_enc = enc; } else { av_log(s, AV_LOG_ERROR, \"SWF muxer only supports VP6, FLV1 and MJPEG\\n\"); return -1; if (!swf->video_enc) { /* currently, cannot work correctly if audio only */ width = 320; height = 200; rate = 10; rate_base= 1; } else { width = swf->video_enc->width; height = swf->video_enc->height; // TODO: should be avg_frame_rate rate = swf->video_st->time_base.den; rate_base = swf->video_st->time_base.num; if (!swf->audio_enc) swf->samples_per_frame = (44100LL * rate_base) / rate; else swf->samples_per_frame = (swf->audio_enc->sample_rate * rate_base) / rate; avio_write(pb, \"FWS\", 3); if (!strcmp(\"avm2\", s->oformat->name)) version = 9; else if (swf->video_enc && swf->video_enc->codec_id == AV_CODEC_ID_VP6F) version = 8; /* version 8 and above support VP6 codec */ else if (swf->video_enc && swf->video_enc->codec_id == AV_CODEC_ID_FLV1) version = 6; /* version 6 and above support FLV1 codec */ else version = 4; /* version 4 for mpeg audio support */ avio_w8(pb, version); avio_wl32(pb, DUMMY_FILE_SIZE); /* dummy size (will be patched if not streamed) */ put_swf_rect(pb, 0, width * 20, 0, height * 20); avio_wl16(pb, (rate * 256) / rate_base); /* frame rate */ swf->duration_pos = avio_tell(pb); avio_wl16(pb, (uint16_t)(DUMMY_DURATION * (int64_t)rate / rate_base)); /* frame count */ /* avm2/swf v9 (also v8?) files require a file attribute tag */ if (version == 9) { put_swf_tag(s, TAG_FILEATTRIBUTES); avio_wl32(pb, 1<<3); /* set ActionScript v3/AVM2 flag */ put_swf_end_tag(s); /* define a shape with the jpeg inside */ if (swf->video_enc && swf->video_enc->codec_id == AV_CODEC_ID_MJPEG) { put_swf_tag(s, TAG_DEFINESHAPE); avio_wl16(pb, SHAPE_ID); /* ID of shape */ /* bounding rectangle */ put_swf_rect(pb, 0, width, 0, height); /* style info */ avio_w8(pb, 1); /* one fill style */ avio_w8(pb, 0x41); /* clipped bitmap fill */ avio_wl16(pb, BITMAP_ID); /* bitmap ID */ /* position of the bitmap */ put_swf_matrix(pb, 1 << FRAC_BITS, 0, 0, 1 << FRAC_BITS, 0, 0); avio_w8(pb, 0); /* no line style */ /* shape drawing */ init_put_bits(&p, buf1, sizeof(buf1)); put_bits(&p, 4, 1); /* one fill bit */ put_bits(&p, 4, 0); /* zero line bit */ put_bits(&p, 1, 0); /* not an edge */ put_bits(&p, 5, FLAG_MOVETO | FLAG_SETFILL0); put_bits(&p, 5, 1); /* nbits */ put_bits(&p, 1, 0); /* X */ put_bits(&p, 1, 0); /* Y */ put_bits(&p, 1, 1); /* set fill style 1 */ /* draw the rectangle ! */ put_swf_line_edge(&p, width, 0); put_swf_line_edge(&p, 0, height); put_swf_line_edge(&p, -width, 0); put_swf_line_edge(&p, 0, -height); /* end of shape */ put_bits(&p, 1, 0); /* not an edge */ put_bits(&p, 5, 0); flush_put_bits(&p); avio_write(pb, buf1, put_bits_ptr(&p) - p.buf); put_swf_end_tag(s); if (swf->audio_enc && swf->audio_enc->codec_id == AV_CODEC_ID_MP3) { int v = 0; /* start sound */ put_swf_tag(s, TAG_STREAMHEAD2); switch(swf->audio_enc->sample_rate) { case 11025: v |= 1 << 2; break; case 22050: v |= 2 << 2; break; case 44100: v |= 3 << 2; break; default: /* not supported */ av_log(s, AV_LOG_ERROR, \"swf does not support that sample rate, choose from (44100, 22050, 11025).\\n\"); return -1; v |= 0x02; /* 16 bit playback */ if (swf->audio_enc->channels == 2) v |= 0x01; /* stereo playback */ avio_w8(s->pb, v); v |= 0x20; /* mp3 compressed */ avio_w8(s->pb, v); avio_wl16(s->pb, swf->samples_per_frame); /* avg samples per frame */ avio_wl16(s->pb, 0); put_swf_end_tag(s); avio_flush(s->pb); return 0;"} {"target": 1, "idx": 22043, "func": "static void decode(AVCodecContext *dec_ctx, AVFrame *frame, AVPacket *pkt, const char *filename) { char buf[1024]; int ret; ret = avcodec_send_packet(dec_ctx, pkt); if (ret < 0) { fprintf(stderr, \"Error sending a packet for decoding\\n\"); exit(1); } while (ret >= 0) { ret = avcodec_receive_frame(dec_ctx, frame); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) return; else if (ret < 0) { fprintf(stderr, \"Error during decoding\\n\"); exit(1); } printf(\"saving frame %3d\\n\", dec_ctx->frame_number); fflush(stdout); /* the picture is allocated by the decoder. no need to free it */ snprintf(buf, sizeof(buf), filename, dec_ctx->frame_number); pgm_save(frame->data[0], frame->linesize[0], frame->width, frame->height, buf); } }"} {"target": 1, "idx": 22045, "func": "static TCGv neon_load_scratch(int scratch) { TCGv tmp = new_tmp(); tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch])); return tmp; }"} {"target": 0, "idx": 22049, "func": "static void ripemd128_transform(uint32_t *state, const uint8_t buffer[64], int ext) { uint32_t a, b, c, d, e, f, g, h; uint32_t block[16]; int n; if (ext) { a = state[0]; b = state[1]; c = state[2]; d = state[3]; e = state[4]; f = state[5]; g = state[6]; h = state[7]; } else { a = e = state[0]; b = f = state[1]; c = g = state[2]; d = h = state[3]; } for (n = 0; n < 16; n++) block[n] = AV_RL32(buffer + 4 * n); for (n = 0; n < 16;) { ROUND128_0_TO_15(a,b,c,d,e,f,g,h); ROUND128_0_TO_15(d,a,b,c,h,e,f,g); ROUND128_0_TO_15(c,d,a,b,g,h,e,f); ROUND128_0_TO_15(b,c,d,a,f,g,h,e); } SWAP(a,e) for (; n < 32;) { ROUND128_16_TO_31(a,b,c,d,e,f,g,h); ROUND128_16_TO_31(d,a,b,c,h,e,f,g); ROUND128_16_TO_31(c,d,a,b,g,h,e,f); ROUND128_16_TO_31(b,c,d,a,f,g,h,e); } SWAP(b,f) for (; n < 48;) { ROUND128_32_TO_47(a,b,c,d,e,f,g,h); ROUND128_32_TO_47(d,a,b,c,h,e,f,g); ROUND128_32_TO_47(c,d,a,b,g,h,e,f); ROUND128_32_TO_47(b,c,d,a,f,g,h,e); } SWAP(c,g) for (; n < 64;) { ROUND128_48_TO_63(a,b,c,d,e,f,g,h); ROUND128_48_TO_63(d,a,b,c,h,e,f,g); ROUND128_48_TO_63(c,d,a,b,g,h,e,f); ROUND128_48_TO_63(b,c,d,a,f,g,h,e); } SWAP(d,h) if (ext) { state[0] += a; state[1] += b; state[2] += c; state[3] += d; state[4] += e; state[5] += f; state[6] += g; state[7] += h; } else { h += c + state[1]; state[1] = state[2] + d + e; state[2] = state[3] + a + f; state[3] = state[0] + b + g; state[0] = h; } }"} {"target": 1, "idx": 22060, "func": "static int kvm_put_msrs(X86CPU *cpu, int level) { CPUX86State *env = &cpu->env; int i; int ret; kvm_msr_buf_reset(cpu); kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs); kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp); kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip); kvm_msr_entry_add(cpu, MSR_PAT, env->pat); if (has_msr_star) { kvm_msr_entry_add(cpu, MSR_STAR, env->star); } if (has_msr_hsave_pa) { kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave); } if (has_msr_tsc_aux) { kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux); } if (has_msr_tsc_adjust) { kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust); } if (has_msr_misc_enable) { kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, env->msr_ia32_misc_enable); } if (has_msr_smbase) { kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase); } if (has_msr_bndcfgs) { kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs); } if (has_msr_xss) { kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss); } #ifdef TARGET_X86_64 if (lm_capable_kernel) { kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar); kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase); kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask); kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar); } #endif /* * The following MSRs have side effects on the guest or are too heavy * for normal writeback. Limit them to reset or full state updates. */ if (level >= KVM_PUT_RESET_STATE) { kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc); kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr); kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr); if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) { kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr); } if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) { kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr); } if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) { kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr); } if (has_msr_architectural_pmu) { /* Stop the counter. */ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); /* Set the counter values. */ for (i = 0; i < MAX_FIXED_COUNTERS; i++) { kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, env->msr_fixed_counters[i]); } for (i = 0; i < num_architectural_pmu_counters; i++) { kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, env->msr_gp_counters[i]); kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, env->msr_gp_evtsel[i]); } kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, env->msr_global_status); kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, env->msr_global_ovf_ctrl); /* Now start the PMU. */ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, env->msr_fixed_ctr_ctrl); kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, env->msr_global_ctrl); } /* * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add, * only sync them to KVM on the first cpu */ if (current_cpu == first_cpu) { if (has_msr_hv_hypercall) { kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, env->msr_hv_guest_os_id); kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, env->msr_hv_hypercall); } if (cpu->hyperv_time) { kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, env->msr_hv_tsc); } } if (cpu->hyperv_vapic) { kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, env->msr_hv_vapic); } if (has_msr_hv_crash) { int j; for (j = 0; j < HV_CRASH_PARAMS; j++) kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, env->msr_hv_crash_params[j]); kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_NOTIFY); } if (has_msr_hv_runtime) { kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime); } if (cpu->hyperv_synic) { int j; kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, HV_SYNIC_VERSION); kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, env->msr_hv_synic_control); kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, env->msr_hv_synic_evt_page); kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, env->msr_hv_synic_msg_page); for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) { kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j, env->msr_hv_synic_sint[j]); } } if (has_msr_hv_stimer) { int j; for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) { kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2, env->msr_hv_stimer_config[j]); } for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) { kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2, env->msr_hv_stimer_count[j]); } } if (env->features[FEAT_1_EDX] & CPUID_MTRR) { uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits); kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype); kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]); kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]); kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]); kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]); for (i = 0; i < MSR_MTRRcap_VCNT; i++) { /* The CPU GPs if we write to a bit above the physical limit of * the host CPU (and KVM emulates that) */ uint64_t mask = env->mtrr_var[i].mask; mask &= phys_mask; kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), env->mtrr_var[i].base); kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask); } } /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see * kvm_put_msr_feature_control. */ } if (env->mcg_cap) { int i; kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status); kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl); if (has_msr_mcg_ext_ctl) { kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl); } for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) { kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]); } } ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf); if (ret < 0) { return ret; } if (ret < cpu->kvm_msr_buf->nmsrs) { struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret]; error_report(\"error: failed to set MSR 0x%\" PRIx32 \" to 0x%\" PRIx64, (uint32_t)e->index, (uint64_t)e->data); } assert(ret == cpu->kvm_msr_buf->nmsrs); return 0; }"} {"target": 0, "idx": 22080, "func": "void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, Error **errp) { Error *local_err = NULL; assert(!atomic_read(&bs_top->in_flight)); assert(!atomic_read(&bs_new->in_flight)); bdrv_set_backing_hd(bs_new, bs_top, &local_err); if (local_err) { error_propagate(errp, local_err); goto out; } change_parent_backing_link(bs_top, bs_new); /* bs_new is now referenced by its new parents, we don't need the * additional reference any more. */ out: bdrv_unref(bs_new); }"} {"target": 0, "idx": 22081, "func": "static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags, abi_ulong target_addr, socklen_t addrlen) { void *addr; void *host_msg; abi_long ret; if (addrlen < 0 || addrlen > MAX_SOCK_ADDR) return -TARGET_EINVAL; host_msg = lock_user(VERIFY_READ, msg, len, 1); if (!host_msg) return -TARGET_EFAULT; if (target_addr) { addr = alloca(addrlen); target_to_host_sockaddr(addr, target_addr, addrlen); ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen)); } else { ret = get_errno(send(fd, host_msg, len, flags)); } unlock_user(host_msg, msg, 0); return ret; }"} {"target": 0, "idx": 22083, "func": "void qxl_render_resize(PCIQXLDevice *qxl) { QXLSurfaceCreate *sc = &qxl->guest_primary.surface; qxl->guest_primary.stride = sc->stride; qxl->guest_primary.resized++; switch (sc->format) { case SPICE_SURFACE_FMT_16_555: qxl->guest_primary.bytes_pp = 2; qxl->guest_primary.bits_pp = 15; break; case SPICE_SURFACE_FMT_16_565: qxl->guest_primary.bytes_pp = 2; qxl->guest_primary.bits_pp = 16; break; case SPICE_SURFACE_FMT_32_xRGB: case SPICE_SURFACE_FMT_32_ARGB: qxl->guest_primary.bytes_pp = 4; qxl->guest_primary.bits_pp = 32; break; default: fprintf(stderr, \"%s: unhandled format: %x\\n\", __FUNCTION__, qxl->guest_primary.surface.format); qxl->guest_primary.bytes_pp = 4; qxl->guest_primary.bits_pp = 32; break; } }"} {"target": 0, "idx": 22091, "func": "static av_cold int a64multi_init_encoder(AVCodecContext *avctx) { A64Context *c = avctx->priv_data; int a; av_lfg_init(&c->randctx, 1); if (avctx->global_quality < 1) { c->mc_lifetime = 4; } else { c->mc_lifetime = avctx->global_quality /= FF_QP2LAMBDA; } av_log(avctx, AV_LOG_INFO, \"charset lifetime set to %d frame(s)\\n\", c->mc_lifetime); /* precalc luma values for later use */ for (a = 0; a < 5; a++) { c->mc_luma_vals[a]=a64_palette[mc_colors[a]][0] * 0.30 + a64_palette[mc_colors[a]][1] * 0.59 + a64_palette[mc_colors[a]][2] * 0.11; } c->mc_frame_counter = 0; c->mc_use_5col = avctx->codec->id == CODEC_ID_A64_MULTI5; c->mc_meta_charset = av_malloc (32000 * c->mc_lifetime * sizeof(int)); c->mc_best_cb = av_malloc (CHARSET_CHARS * 32 * sizeof(int)); c->mc_charmap = av_mallocz(1000 * c->mc_lifetime * sizeof(int)); c->mc_colram = av_mallocz(CHARSET_CHARS * sizeof(uint8_t)); c->mc_charset = av_malloc (0x800 * (INTERLACED+1) * sizeof(uint8_t)); /* set up extradata */ avctx->extradata = av_mallocz(8 * 4 + FF_INPUT_BUFFER_PADDING_SIZE); avctx->extradata_size = 8 * 4; AV_WB32(avctx->extradata, c->mc_lifetime); AV_WB32(avctx->extradata+16, INTERLACED); avcodec_get_frame_defaults(&c->picture); avctx->coded_frame = &c->picture; avctx->coded_frame->pict_type = FF_I_TYPE; avctx->coded_frame->key_frame = 1; if (!avctx->codec_tag) avctx->codec_tag = AV_RL32(\"a64m\"); return 0; }"} {"target": 0, "idx": 22097, "func": "static bool insn_crosses_page(CPUARMState *env, DisasContext *s) { /* Return true if the insn at dc->pc might cross a page boundary. * (False positives are OK, false negatives are not.) */ uint16_t insn; if ((s->pc & 3) == 0) { /* At a 4-aligned address we can't be crossing a page */ return false; } /* This must be a Thumb insn */ insn = arm_lduw_code(env, s->pc, s->sctlr_b); if ((insn >> 11) >= 0x1d) { /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the * First half of a 32-bit Thumb insn. Thumb-1 cores might * end up actually treating this as two 16-bit insns (see the * code at the start of disas_thumb2_insn()) but we don't bother * to check for that as it is unlikely, and false positives here * are harmless. */ return true; } /* Definitely a 16-bit insn, can't be crossing a page. */ return false; }"} {"target": 0, "idx": 22104, "func": "static int vmdk_is_cid_valid(BlockDriverState *bs) { #ifdef CHECK_CID BDRVVmdkState *s = bs->opaque; BlockDriverState *p_bs = bs->backing_hd; uint32_t cur_pcid; if (p_bs) { cur_pcid = vmdk_read_cid(p_bs, 0); if (s->parent_cid != cur_pcid) { /* CID not valid */ return 0; } } #endif /* CID valid */ return 1; }"} {"target": 0, "idx": 22105, "func": "static uint32_t slavio_timer_mem_readl(void *opaque, target_phys_addr_t addr) { SLAVIO_TIMERState *s = opaque; uint32_t saddr, ret; saddr = (addr & TIMER_MAXADDR) >> 2; switch (saddr) { case 0: // read limit (system counter mode) or read most signifying // part of counter (user mode) if (slavio_timer_is_user(s)) { // read user timer MSW slavio_timer_get_out(s); ret = s->counthigh; } else { // read limit // clear irq qemu_irq_lower(s->irq); s->reached = 0; ret = s->limit & 0x7fffffff; } break; case 1: // read counter and reached bit (system mode) or read lsbits // of counter (user mode) slavio_timer_get_out(s); if (slavio_timer_is_user(s)) // read user timer LSW ret = s->count & 0xffffffe00; else // read limit ret = (s->count & 0x7ffffe00) | s->reached; break; case 3: // only available in processor counter/timer // read start/stop status ret = s->running; break; case 4: // only available in system counter // read user/system mode ret = s->slave_mode; break; default: DPRINTF(\"invalid read address \" TARGET_FMT_plx \"\\n\", addr); ret = 0; break; } DPRINTF(\"read \" TARGET_FMT_plx \" = %08x\\n\", addr, ret); return ret; }"} {"target": 0, "idx": 22109, "func": "void qemu_system_suspend_request(void) { if (is_suspended) { return; } suspend_requested = 1; cpu_stop_current(); qemu_notify_event(); }"} {"target": 0, "idx": 22119, "func": "static void ssh_parse_filename(const char *filename, QDict *options, Error **errp) { if (qdict_haskey(options, \"user\") || qdict_haskey(options, \"host\") || qdict_haskey(options, \"port\") || qdict_haskey(options, \"path\") || qdict_haskey(options, \"host_key_check\")) { error_setg(errp, \"user, host, port, path, host_key_check cannot be used at the same time as a file option\"); return; } parse_uri(filename, options, errp); }"} {"target": 1, "idx": 22124, "func": "static int webm_dash_manifest_write_header(AVFormatContext *s) { int i; double start = 0.0; WebMDashMuxContext *w = s->priv_data; parse_adaptation_sets(s); write_header(s); avio_printf(s->pb, \"pb, \" start=\\\"PT%gS\\\"\", start); if (!w->is_live) { avio_printf(s->pb, \" duration=\\\"PT%gS\\\"\", get_duration(s)); } avio_printf(s->pb, \" >\\n\"); for (i = 0; i < w->nb_as; i++) { if (write_adaptation_set(s, i) < 0) return -1; } avio_printf(s->pb, \"\\n\"); write_footer(s); return 0; }"} {"target": 0, "idx": 22142, "func": "static void do_acl_show(Monitor *mon, const QDict *qdict) { const char *aclname = qdict_get_str(qdict, \"aclname\"); qemu_acl *acl = find_acl(mon, aclname); qemu_acl_entry *entry; int i = 0; if (acl) { monitor_printf(mon, \"policy: %s\\n\", acl->defaultDeny ? \"deny\" : \"allow\"); TAILQ_FOREACH(entry, &acl->entries, next) { i++; monitor_printf(mon, \"%d: %s %s\\n\", i, entry->deny ? \"deny\" : \"allow\", entry->match); } } }"} {"target": 0, "idx": 22143, "func": "MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, target_phys_addr_t index) { PhysPageEntry lp = d->phys_map; PhysPageEntry *p; int i; uint16_t s_index = phys_section_unassigned; for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) { if (lp.ptr == PHYS_MAP_NODE_NIL) { goto not_found; } p = phys_map_nodes[lp.ptr]; lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)]; } s_index = lp.ptr; not_found: return &phys_sections[s_index]; }"} {"target": 0, "idx": 22184, "func": "int drive_get_max_bus(BlockInterfaceType type) { int max_bus; DriveInfo *dinfo; max_bus = -1; TAILQ_FOREACH(dinfo, &drives, next) { if(dinfo->type == type && dinfo->bus > max_bus) max_bus = dinfo->bus; } return max_bus; }"} {"target": 0, "idx": 22187, "func": "PCIBus *pci_find_bus(PCIBus *bus, int bus_num) { PCIBus *sec; if (!bus) return NULL; if (pci_bus_num(bus) == bus_num) { return bus; } /* try child bus */ QLIST_FOREACH(sec, &bus->child, sibling) { if (!bus->parent_dev /* pci host bridge */ || (pci_bus_num(sec) <= bus_num && bus->parent_dev->config[PCI_SUBORDINATE_BUS])) { return pci_find_bus(sec, bus_num); } } return NULL; }"} {"target": 0, "idx": 22189, "func": "ser_write(void *opaque, target_phys_addr_t addr, uint64_t val64, unsigned int size) { struct etrax_serial *s = opaque; uint32_t value = val64; unsigned char ch = val64; D(CPUCRISState *env = s->env); D(qemu_log(\"%s \" TARGET_FMT_plx \"=%x\\n\", __func__, addr, value)); addr >>= 2; switch (addr) { case RW_DOUT: qemu_chr_fe_write(s->chr, &ch, 1); s->regs[R_INTR] |= 3; s->pending_tx = 1; s->regs[addr] = value; break; case RW_ACK_INTR: if (s->pending_tx) { value &= ~1; s->pending_tx = 0; D(qemu_log(\"fixedup value=%x r_intr=%x\\n\", value, s->regs[R_INTR])); } s->regs[addr] = value; s->regs[R_INTR] &= ~value; D(printf(\"r_intr=%x\\n\", s->regs[R_INTR])); break; default: s->regs[addr] = value; break; } ser_update_irq(s); }"} {"target": 0, "idx": 22190, "func": "static void gic_dist_writeb(void *opaque, target_phys_addr_t offset, uint32_t value) { GICState *s = (GICState *)opaque; int irq; int i; int cpu; cpu = gic_get_current_cpu(s); if (offset < 0x100) { if (offset == 0) { s->enabled = (value & 1); DPRINTF(\"Distribution %sabled\\n\", s->enabled ? \"En\" : \"Dis\"); } else if (offset < 4) { /* ignored. */ } else if (offset >= 0x80) { /* Interrupt Security Registers, RAZ/WI */ } else { goto bad_reg; } } else if (offset < 0x180) { /* Interrupt Set Enable. */ irq = (offset - 0x100) * 8 + GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; if (irq < 16) value = 0xff; for (i = 0; i < 8; i++) { if (value & (1 << i)) { int mask = (irq < GIC_INTERNAL) ? (1 << cpu) : GIC_TARGET(irq); int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; if (!GIC_TEST_ENABLED(irq + i, cm)) { DPRINTF(\"Enabled IRQ %d\\n\", irq + i); } GIC_SET_ENABLED(irq + i, cm); /* If a raised level triggered IRQ enabled then mark is as pending. */ if (GIC_TEST_LEVEL(irq + i, mask) && !GIC_TEST_TRIGGER(irq + i)) { DPRINTF(\"Set %d pending mask %x\\n\", irq + i, mask); GIC_SET_PENDING(irq + i, mask); } } } } else if (offset < 0x200) { /* Interrupt Clear Enable. */ irq = (offset - 0x180) * 8 + GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; if (irq < 16) value = 0; for (i = 0; i < 8; i++) { if (value & (1 << i)) { int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK; if (GIC_TEST_ENABLED(irq + i, cm)) { DPRINTF(\"Disabled IRQ %d\\n\", irq + i); } GIC_CLEAR_ENABLED(irq + i, cm); } } } else if (offset < 0x280) { /* Interrupt Set Pending. */ irq = (offset - 0x200) * 8 + GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; if (irq < 16) irq = 0; for (i = 0; i < 8; i++) { if (value & (1 << i)) { GIC_SET_PENDING(irq + i, GIC_TARGET(irq)); } } } else if (offset < 0x300) { /* Interrupt Clear Pending. */ irq = (offset - 0x280) * 8 + GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; for (i = 0; i < 8; i++) { /* ??? This currently clears the pending bit for all CPUs, even for per-CPU interrupts. It's unclear whether this is the corect behavior. */ if (value & (1 << i)) { GIC_CLEAR_PENDING(irq + i, ALL_CPU_MASK); } } } else if (offset < 0x400) { /* Interrupt Active. */ goto bad_reg; } else if (offset < 0x800) { /* Interrupt Priority. */ irq = (offset - 0x400) + GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; if (irq < GIC_INTERNAL) { s->priority1[irq][cpu] = value; } else { s->priority2[irq - GIC_INTERNAL] = value; } } else if (offset < 0xc00) { /* Interrupt CPU Target. RAZ/WI on uniprocessor GICs, with the * annoying exception of the 11MPCore's GIC. */ if (s->num_cpu != 1 || s->revision == REV_11MPCORE) { irq = (offset - 0x800) + GIC_BASE_IRQ; if (irq >= s->num_irq) { goto bad_reg; } if (irq < 29) { value = 0; } else if (irq < GIC_INTERNAL) { value = ALL_CPU_MASK; } s->irq_target[irq] = value & ALL_CPU_MASK; } } else if (offset < 0xf00) { /* Interrupt Configuration. */ irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; if (irq < GIC_INTERNAL) value |= 0xaa; for (i = 0; i < 4; i++) { if (value & (1 << (i * 2))) { GIC_SET_MODEL(irq + i); } else { GIC_CLEAR_MODEL(irq + i); } if (value & (2 << (i * 2))) { GIC_SET_TRIGGER(irq + i); } else { GIC_CLEAR_TRIGGER(irq + i); } } } else { /* 0xf00 is only handled for 32-bit writes. */ goto bad_reg; } gic_update(s); return; bad_reg: hw_error(\"gic_dist_writeb: Bad offset %x\\n\", (int)offset); }"} {"target": 0, "idx": 22192, "func": "static void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) { uint32_t type; struct iovec *iov = req->elem->out_sg; unsigned out_num = req->elem->out_num; if (req->elem->out_num < 1 || req->elem->in_num < 1) { error_report(\"virtio-blk missing headers\"); exit(1); } if (req->elem->out_sg[0].iov_len < sizeof(req->out) || req->elem->in_sg[req->elem->in_num - 1].iov_len < sizeof(*req->in)) { error_report(\"virtio-blk header not in correct element\"); exit(1); } if (unlikely(iov_to_buf(iov, out_num, 0, &req->out, sizeof(req->out)) != sizeof(req->out))) { error_report(\"virtio-blk request outhdr too short\"); exit(1); } iov_discard_front(&iov, &out_num, sizeof(req->out)); req->in = (void *)req->elem->in_sg[req->elem->in_num - 1].iov_base; type = ldl_p(&req->out.type); if (type & VIRTIO_BLK_T_FLUSH) { virtio_blk_handle_flush(req, mrb); } else if (type & VIRTIO_BLK_T_SCSI_CMD) { virtio_blk_handle_scsi(req); } else if (type & VIRTIO_BLK_T_GET_ID) { VirtIOBlock *s = req->dev; /* * NB: per existing s/n string convention the string is * terminated by '\\0' only when shorter than buffer. */ strncpy(req->elem->in_sg[0].iov_base, s->blk.serial ? s->blk.serial : \"\", MIN(req->elem->in_sg[0].iov_len, VIRTIO_BLK_ID_BYTES)); virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); virtio_blk_free_request(req); } else if (type & VIRTIO_BLK_T_OUT) { qemu_iovec_init_external(&req->qiov, &req->elem->out_sg[1], req->elem->out_num - 1); virtio_blk_handle_write(req, mrb); } else if (type == VIRTIO_BLK_T_IN || type == VIRTIO_BLK_T_BARRIER) { /* VIRTIO_BLK_T_IN is 0, so we can't just & it. */ qemu_iovec_init_external(&req->qiov, &req->elem->in_sg[0], req->elem->in_num - 1); virtio_blk_handle_read(req); } else { virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); virtio_blk_free_request(req); } }"} {"target": 0, "idx": 22213, "func": "static void gen_addq_lo(DisasContext *s, TCGv val, int rlow) { TCGv tmp; TCGv tmp2; /* Load value and extend to 64 bits. */ tmp = tcg_temp_new(TCG_TYPE_I64); tmp2 = load_reg(s, rlow); tcg_gen_extu_i32_i64(tmp, tmp2); dead_tmp(tmp2); tcg_gen_add_i64(val, val, tmp); }"} {"target": 1, "idx": 22222, "func": "static int analyze_chunk(AVFormatContext *s, const uint8_t *chunk) { TYDemuxContext *ty = s->priv_data; int num_recs, i; TyRecHdr *hdrs; int num_6e0, num_be0, num_9c0, num_3c0; /* skip if it's a Part header */ if (AV_RB32(&chunk[0]) == TIVO_PES_FILEID) return 0; /* number of records in chunk (we ignore high order byte; * rarely are there > 256 chunks & we don't need that many anyway) */ num_recs = chunk[0]; if (num_recs < 5) { /* try again with the next chunk. Sometimes there are dead ones */ return 0; } chunk += 4; /* skip past rec count & SEQ bytes */ ff_dlog(s, \"probe: chunk has %d recs\\n\", num_recs); hdrs = parse_chunk_headers(chunk, num_recs); if (!hdrs) return AVERROR(ENOMEM); /* scan headers. * 1. check video packets. Presence of 0x6e0 means S1. * No 6e0 but have be0 means S2. * 2. probe for audio 0x9c0 vs 0x3c0 (AC3 vs Mpeg) * If AC-3, then we have DTivo. * If MPEG, search for PTS offset. This will determine SA vs. DTivo. */ num_6e0 = num_be0 = num_9c0 = num_3c0 = 0; for (i = 0; i < num_recs; i++) { switch (hdrs[i].subrec_type << 8 | hdrs[i].rec_type) { case 0x6e0: num_6e0++; case 0xbe0: num_be0++; case 0x3c0: num_3c0++; case 0x9c0: num_9c0++; } } ff_dlog(s, \"probe: chunk has %d 0x6e0 recs, %d 0xbe0 recs.\\n\", num_6e0, num_be0); /* set up our variables */ if (num_6e0 > 0) { ff_dlog(s, \"detected Series 1 Tivo\\n\"); ty->tivo_series = TIVO_SERIES1; ty->pes_length = SERIES1_PES_LENGTH; } else if (num_be0 > 0) { ff_dlog(s, \"detected Series 2 Tivo\\n\"); ty->tivo_series = TIVO_SERIES2; ty->pes_length = SERIES2_PES_LENGTH; } if (num_9c0 > 0) { ff_dlog(s, \"detected AC-3 Audio (DTivo)\\n\"); ty->audio_type = TIVO_AUDIO_AC3; ty->tivo_type = TIVO_TYPE_DTIVO; ty->pts_offset = AC3_PTS_OFFSET; ty->pes_length = AC3_PES_LENGTH; } else if (num_3c0 > 0) { ty->audio_type = TIVO_AUDIO_MPEG; ff_dlog(s, \"detected MPEG Audio\\n\"); } /* if tivo_type still unknown, we can check PTS location * in MPEG packets to determine tivo_type */ if (ty->tivo_type == TIVO_TYPE_UNKNOWN) { uint32_t data_offset = 16 * num_recs; for (i = 0; i < num_recs; i++) { if ((hdrs[i].subrec_type << 0x08 | hdrs[i].rec_type) == 0x3c0 && hdrs[i].rec_size > 15) { /* first make sure we're aligned */ int pes_offset = find_es_header(ty_MPEGAudioPacket, &chunk[data_offset], 5); if (pes_offset >= 0) { /* pes found. on SA, PES has hdr data at offset 6, not PTS. */ if ((chunk[data_offset + 6 + pes_offset] & 0x80) == 0x80) { /* S1SA or S2(any) Mpeg Audio (PES hdr, not a PTS start) */ if (ty->tivo_series == TIVO_SERIES1) ff_dlog(s, \"detected Stand-Alone Tivo\\n\"); ty->tivo_type = TIVO_TYPE_SA; ty->pts_offset = SA_PTS_OFFSET; } else { if (ty->tivo_series == TIVO_SERIES1) ff_dlog(s, \"detected DirecTV Tivo\\n\"); ty->tivo_type = TIVO_TYPE_DTIVO; ty->pts_offset = DTIVO_PTS_OFFSET; } } } data_offset += hdrs[i].rec_size; } } av_free(hdrs); return 0; }"} {"target": 1, "idx": 22223, "func": "static void nbd_close(BlockDriverState *bs) { nbd_teardown_connection(bs); }"} {"target": 1, "idx": 22226, "func": "static void vmxnet3_handle_command(VMXNET3State *s, uint64_t cmd) { s->last_command = cmd; switch (cmd) { case VMXNET3_CMD_GET_PERM_MAC_HI: VMW_CBPRN(\"Set: Get upper part of permanent MAC\"); break; case VMXNET3_CMD_GET_PERM_MAC_LO: VMW_CBPRN(\"Set: Get lower part of permanent MAC\"); break; case VMXNET3_CMD_GET_STATS: VMW_CBPRN(\"Set: Get device statistics\"); vmxnet3_fill_stats(s); break; case VMXNET3_CMD_ACTIVATE_DEV: VMW_CBPRN(\"Set: Activating vmxnet3 device\"); vmxnet3_activate_device(s); break; case VMXNET3_CMD_UPDATE_RX_MODE: VMW_CBPRN(\"Set: Update rx mode\"); vmxnet3_update_rx_mode(s); break; case VMXNET3_CMD_UPDATE_VLAN_FILTERS: VMW_CBPRN(\"Set: Update VLAN filters\"); vmxnet3_update_vlan_filters(s); break; case VMXNET3_CMD_UPDATE_MAC_FILTERS: VMW_CBPRN(\"Set: Update MAC filters\"); vmxnet3_update_mcast_filters(s); break; case VMXNET3_CMD_UPDATE_FEATURE: VMW_CBPRN(\"Set: Update features\"); vmxnet3_update_features(s); break; case VMXNET3_CMD_UPDATE_PMCFG: VMW_CBPRN(\"Set: Update power management config\"); vmxnet3_update_pm_state(s); break; case VMXNET3_CMD_GET_LINK: VMW_CBPRN(\"Set: Get link\"); break; case VMXNET3_CMD_RESET_DEV: VMW_CBPRN(\"Set: Reset device\"); vmxnet3_reset(s); break; case VMXNET3_CMD_QUIESCE_DEV: VMW_CBPRN(\"Set: VMXNET3_CMD_QUIESCE_DEV - pause the device\"); vmxnet3_deactivate_device(s); break; case VMXNET3_CMD_GET_CONF_INTR: VMW_CBPRN(\"Set: VMXNET3_CMD_GET_CONF_INTR - interrupt configuration\"); break; case VMXNET3_CMD_GET_ADAPTIVE_RING_INFO: VMW_CBPRN(\"Set: VMXNET3_CMD_GET_ADAPTIVE_RING_INFO - \" \"adaptive ring info flags\"); break; default: VMW_CBPRN(\"Received unknown command: %\" PRIx64, cmd); break; } }"} {"target": 1, "idx": 22236, "func": "static void kvm_s390_flic_realize(DeviceState *dev, Error **errp) { KVMS390FLICState *flic_state = KVM_S390_FLIC(dev); struct kvm_create_device cd = {0}; struct kvm_device_attr test_attr = {0}; int ret; Error *errp_local = NULL; flic_state->fd = -1; if (!kvm_check_extension(kvm_state, KVM_CAP_DEVICE_CTRL)) { error_setg_errno(&errp_local, errno, \"KVM is missing capability\" \" KVM_CAP_DEVICE_CTRL\"); trace_flic_no_device_api(errno); cd.type = KVM_DEV_TYPE_FLIC; ret = kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd); if (ret < 0) { error_setg_errno(&errp_local, errno, \"Creating the KVM device failed\"); trace_flic_create_device(errno); flic_state->fd = cd.fd; /* Check clear_io_irq support */ test_attr.group = KVM_DEV_FLIC_CLEAR_IO_IRQ; flic_state->clear_io_supported = !ioctl(flic_state->fd, KVM_HAS_DEVICE_ATTR, test_attr); return; fail: error_propagate(errp, errp_local);"} {"target": 1, "idx": 22244, "func": "static void test_visitor_out_empty(TestOutputVisitorData *data, const void *unused) { QObject *arg; arg = qmp_output_get_qobject(data->qov); g_assert(qobject_type(arg) == QTYPE_QNULL); qobject_decref(arg); }"} {"target": 1, "idx": 22261, "func": "uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs) { int ret, irq, src; int cm = 1 << cpu; /* gic_get_current_pending_irq() will return 1022 or 1023 appropriately * for the case where this GIC supports grouping and the pending interrupt * is in the wrong group. */ irq = gic_get_current_pending_irq(s, cpu, attrs); if (irq >= GIC_MAXIRQ) { DPRINTF(\"ACK, no pending interrupt or it is hidden: %d\\n\", irq); return irq; } if (GIC_GET_PRIORITY(irq, cpu) >= s->running_priority[cpu]) { DPRINTF(\"ACK, pending interrupt (%d) has insufficient priority\\n\", irq); return 1023; } if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) { /* Clear pending flags for both level and edge triggered interrupts. * Level triggered IRQs will be reasserted once they become inactive. */ GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm); ret = irq; } else { if (irq < GIC_NR_SGIS) { /* Lookup the source CPU for the SGI and clear this in the * sgi_pending map. Return the src and clear the overall pending * state on this CPU if the SGI is not pending from any CPUs. */ assert(s->sgi_pending[irq][cpu] != 0); src = ctz32(s->sgi_pending[irq][cpu]); s->sgi_pending[irq][cpu] &= ~(1 << src); if (s->sgi_pending[irq][cpu] == 0) { GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm); } ret = irq | ((src & 0x7) << 10); } else { /* Clear pending state for both level and edge triggered * interrupts. (level triggered interrupts with an active line * remain pending, see gic_test_pending) */ GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm); ret = irq; } } gic_activate_irq(s, cpu, irq); gic_update(s); DPRINTF(\"ACK %d\\n\", irq); return ret; }"} {"target": 0, "idx": 22269, "func": "static void virtio_net_receive(void *opaque, const uint8_t *buf, size_t size) { VirtIONet *n = opaque; struct virtio_net_hdr_mrg_rxbuf *mhdr = NULL; size_t hdr_len, offset, i; if (!do_virtio_net_can_receive(n, size)) return; if (!receive_filter(n, buf, size)) return; /* hdr_len refers to the header we supply to the guest */ hdr_len = n->mergeable_rx_bufs ? sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr); offset = i = 0; while (offset < size) { VirtQueueElement elem; int len, total; struct iovec sg[VIRTQUEUE_MAX_SIZE]; len = total = 0; if ((i != 0 && !n->mergeable_rx_bufs) || virtqueue_pop(n->rx_vq, &elem) == 0) { if (i == 0) return; fprintf(stderr, \"virtio-net truncating packet\\n\"); exit(1); } if (elem.in_num < 1) { fprintf(stderr, \"virtio-net receive queue contains no in buffers\\n\"); exit(1); } if (!n->mergeable_rx_bufs && elem.in_sg[0].iov_len != hdr_len) { fprintf(stderr, \"virtio-net header not in first element\\n\"); exit(1); } memcpy(&sg, &elem.in_sg[0], sizeof(sg[0]) * elem.in_num); if (i == 0) { if (n->mergeable_rx_bufs) mhdr = (struct virtio_net_hdr_mrg_rxbuf *)sg[0].iov_base; offset += receive_header(n, sg, elem.in_num, buf + offset, size - offset, hdr_len); total += hdr_len; } /* copy in packet. ugh */ len = iov_fill(sg, elem.in_num, buf + offset, size - offset); total += len; /* signal other side */ virtqueue_fill(n->rx_vq, &elem, total, i++); offset += len; } if (mhdr) mhdr->num_buffers = i; virtqueue_flush(n->rx_vq, i); virtio_notify(&n->vdev, n->rx_vq); }"} {"target": 0, "idx": 22273, "func": "static inline int writer_print_string(WriterContext *wctx, const char *key, const char *val, int opt) { const struct section *section = wctx->section[wctx->level]; int ret = 0; if (opt && !(wctx->writer->flags & WRITER_FLAG_DISPLAY_OPTIONAL_FIELDS)) return 0; if (section->show_all_entries || av_dict_get(section->entries_to_show, key, NULL, 0)) { wctx->writer->print_string(wctx, key, val); wctx->nb_item[wctx->level]++; } return ret; }"} {"target": 0, "idx": 22291, "func": "static void aarch64_any_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V8); set_feature(&cpu->env, ARM_FEATURE_VFP4); set_feature(&cpu->env, ARM_FEATURE_NEON); set_feature(&cpu->env, ARM_FEATURE_AARCH64); set_feature(&cpu->env, ARM_FEATURE_V8_AES); set_feature(&cpu->env, ARM_FEATURE_V8_SHA1); set_feature(&cpu->env, ARM_FEATURE_V8_SHA256); set_feature(&cpu->env, ARM_FEATURE_V8_PMULL); set_feature(&cpu->env, ARM_FEATURE_CRC); cpu->ctr = 0x80030003; /* 32 byte I and D cacheline size, VIPT icache */ cpu->dcz_blocksize = 7; /* 512 bytes */ }"} {"target": 1, "idx": 22313, "func": "static av_always_inline float quantize_and_encode_band_cost_template( struct AACEncContext *s, PutBitContext *pb, const float *in, const float *scaled, int size, int scale_idx, int cb, const float lambda, const float uplim, int *bits, int BT_ZERO, int BT_UNSIGNED, int BT_PAIR, int BT_ESC) { const int q_idx = POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512; const float Q = ff_aac_pow2sf_tab [q_idx]; const float Q34 = ff_aac_pow34sf_tab[q_idx]; const float IQ = ff_aac_pow2sf_tab [POW_SF2_ZERO + scale_idx - SCALE_ONE_POS + SCALE_DIV_512]; const float CLIPPED_ESCAPE = 165140.0f*IQ; int i, j; float cost = 0; const int dim = BT_PAIR ? 2 : 4; int resbits = 0; const int range = aac_cb_range[cb]; const int maxval = aac_cb_maxval[cb]; int off; if (BT_ZERO) { for (i = 0; i < size; i++) cost += in[i]*in[i]; if (bits) *bits = 0; return cost * lambda; } if (!scaled) { abs_pow34_v(s->scoefs, in, size); scaled = s->scoefs; } quantize_bands(s->qcoefs, in, scaled, size, Q34, !BT_UNSIGNED, maxval); if (BT_UNSIGNED) { off = 0; } else { off = maxval; } for (i = 0; i < size; i += dim) { const float *vec; int *quants = s->qcoefs + i; int curidx = 0; int curbits; float rd = 0.0f; for (j = 0; j < dim; j++) { curidx *= range; curidx += quants[j] + off; } curbits = ff_aac_spectral_bits[cb-1][curidx]; vec = &ff_aac_codebook_vectors[cb-1][curidx*dim]; if (BT_UNSIGNED) { for (j = 0; j < dim; j++) { float t = fabsf(in[i+j]); float di; if (BT_ESC && vec[j] == 64.0f) { //FIXME: slow if (t >= CLIPPED_ESCAPE) { di = t - CLIPPED_ESCAPE; curbits += 21; } else { int c = av_clip_uintp2(quant(t, Q), 13); di = t - c*cbrtf(c)*IQ; curbits += av_log2(c)*2 - 4 + 1; } } else { di = t - vec[j]*IQ; } if (vec[j] != 0.0f) curbits++; rd += di*di; } } else { for (j = 0; j < dim; j++) { float di = in[i+j] - vec[j]*IQ; rd += di*di; } } cost += rd * lambda + curbits; resbits += curbits; if (cost >= uplim) return uplim; if (pb) { put_bits(pb, ff_aac_spectral_bits[cb-1][curidx], ff_aac_spectral_codes[cb-1][curidx]); if (BT_UNSIGNED) for (j = 0; j < dim; j++) if (ff_aac_codebook_vectors[cb-1][curidx*dim+j] != 0.0f) put_bits(pb, 1, in[i+j] < 0.0f); if (BT_ESC) { for (j = 0; j < 2; j++) { if (ff_aac_codebook_vectors[cb-1][curidx*2+j] == 64.0f) { int coef = av_clip_uintp2(quant(fabsf(in[i+j]), Q), 13); int len = av_log2(coef); put_bits(pb, len - 4 + 1, (1 << (len - 4 + 1)) - 2); put_bits(pb, len, coef & ((1 << len) - 1)); } } } } } if (bits) *bits = resbits; return cost; }"} {"target": 1, "idx": 22315, "func": "e1000e_set_icr(E1000ECore *core, int index, uint32_t val) { if ((core->mac[ICR] & E1000_ICR_ASSERTED) && (core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) { trace_e1000e_irq_icr_process_iame(); e1000e_clear_ims_bits(core, core->mac[IAM]); } trace_e1000e_irq_icr_write(val, core->mac[ICR], core->mac[ICR] & ~val); core->mac[ICR] &= ~val; e1000e_update_interrupt_state(core); }"} {"target": 0, "idx": 22331, "func": "MemoryRegionSection *phys_page_find(target_phys_addr_t index) { PhysPageEntry lp = phys_map; PhysPageEntry *p; int i; uint16_t s_index = phys_section_unassigned; for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) { if (lp.ptr == PHYS_MAP_NODE_NIL) { goto not_found; } p = phys_map_nodes[lp.ptr]; lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)]; } s_index = lp.ptr; not_found: return &phys_sections[s_index]; }"} {"target": 0, "idx": 22333, "func": "static int encode_init(AVCodecContext * avctx){ WMACodecContext *s = avctx->priv_data; int i, flags1, flags2; uint8_t *extradata; s->avctx = avctx; if(avctx->channels > MAX_CHANNELS) { av_log(avctx, AV_LOG_ERROR, \"too many channels: got %i, need %i or fewer\", avctx->channels, MAX_CHANNELS); return AVERROR(EINVAL); } if (avctx->sample_rate > 48000) { av_log(avctx, AV_LOG_ERROR, \"sample rate is too high: %d > 48kHz\", avctx->sample_rate); return AVERROR(EINVAL); } if(avctx->bit_rate < 24*1000) { av_log(avctx, AV_LOG_ERROR, \"bitrate too low: got %i, need 24000 or higher\\n\", avctx->bit_rate); return AVERROR(EINVAL); } /* extract flag infos */ flags1 = 0; flags2 = 1; if (avctx->codec->id == CODEC_ID_WMAV1) { extradata= av_malloc(4); avctx->extradata_size= 4; AV_WL16(extradata, flags1); AV_WL16(extradata+2, flags2); } else if (avctx->codec->id == CODEC_ID_WMAV2) { extradata= av_mallocz(10); avctx->extradata_size= 10; AV_WL32(extradata, flags1); AV_WL16(extradata+4, flags2); }else av_assert0(0); avctx->extradata= extradata; s->use_exp_vlc = flags2 & 0x0001; s->use_bit_reservoir = flags2 & 0x0002; s->use_variable_block_len = flags2 & 0x0004; if (avctx->channels == 2) s->ms_stereo = 1; ff_wma_init(avctx, flags2); /* init MDCT */ for(i = 0; i < s->nb_block_sizes; i++) ff_mdct_init(&s->mdct_ctx[i], s->frame_len_bits - i + 1, 0, 1.0); s->block_align = avctx->bit_rate * (int64_t)s->frame_len / (avctx->sample_rate * 8); s->block_align = FFMIN(s->block_align, MAX_CODED_SUPERFRAME_SIZE); avctx->block_align = s->block_align; avctx->bit_rate = avctx->block_align * 8LL * avctx->sample_rate / s->frame_len; //av_log(NULL, AV_LOG_ERROR, \"%d %d %d %d\\n\", s->block_align, avctx->bit_rate, s->frame_len, avctx->sample_rate); avctx->frame_size = avctx->delay = s->frame_len; #if FF_API_OLD_ENCODE_AUDIO avctx->coded_frame = &s->frame; avcodec_get_frame_defaults(avctx->coded_frame); #endif return 0; }"} {"target": 0, "idx": 22334, "func": "START_TEST(float_number) { int i; struct { const char *encoded; double decoded; int skip; } test_cases[] = { { \"32.43\", 32.43 }, { \"0.222\", 0.222 }, { \"-32.12313\", -32.12313 }, { \"-32.20e-10\", -32.20e-10, .skip = 1 }, { }, }; for (i = 0; test_cases[i].encoded; i++) { QObject *obj; QFloat *qfloat; obj = qobject_from_json(test_cases[i].encoded); fail_unless(obj != NULL); fail_unless(qobject_type(obj) == QTYPE_QFLOAT); qfloat = qobject_to_qfloat(obj); fail_unless(qfloat_get_double(qfloat) == test_cases[i].decoded); if (test_cases[i].skip == 0) { QString *str; str = qobject_to_json(obj); fail_unless(strcmp(qstring_get_str(str), test_cases[i].encoded) == 0); QDECREF(str); } QDECREF(qfloat); } }"} {"target": 1, "idx": 22350, "func": "static void check_watchpoint(int offset, int len_mask, int flags) { CPUState *env = cpu_single_env; target_ulong pc, cs_base; TranslationBlock *tb; target_ulong vaddr; CPUWatchpoint *wp; int cpu_flags; if (env->watchpoint_hit) { /* We re-entered the check after replacing the TB. Now raise * the debug interrupt so that is will trigger after the * current instruction. */ cpu_interrupt(env, CPU_INTERRUPT_DEBUG); return; } vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset; QTAILQ_FOREACH(wp, &env->watchpoints, entry) { if ((vaddr == (wp->vaddr & len_mask) || (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) { wp->flags |= BP_WATCHPOINT_HIT; if (!env->watchpoint_hit) { env->watchpoint_hit = wp; tb = tb_find_pc(env->mem_io_pc); if (!tb) { cpu_abort(env, \"check_watchpoint: could not find TB for \" \"pc=%p\", (void *)env->mem_io_pc); } cpu_restore_state(tb, env, env->mem_io_pc); tb_phys_invalidate(tb, -1); if (wp->flags & BP_STOP_BEFORE_ACCESS) { env->exception_index = EXCP_DEBUG; } else { cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags); tb_gen_code(env, pc, cs_base, cpu_flags, 1); } cpu_resume_from_signal(env, NULL); } } else { wp->flags &= ~BP_WATCHPOINT_HIT; } } }"} {"target": 1, "idx": 22368, "func": "static gboolean gd_vc_in(GIOChannel *chan, GIOCondition cond, void *opaque) { VirtualConsole *vc = opaque; uint8_t buffer[1024]; ssize_t len; len = read(vc->fd, buffer, sizeof(buffer)); if (len <= 0) { return FALSE; } qemu_chr_be_write(vc->chr, buffer, len); return TRUE; }"} {"target": 0, "idx": 22405, "func": "void ff_aac_search_for_is(AACEncContext *s, AVCodecContext *avctx, ChannelElement *cpe) { SingleChannelElement *sce0 = &cpe->ch[0]; SingleChannelElement *sce1 = &cpe->ch[1]; int start = 0, count = 0, w, w2, g, i, prev_sf1 = -1; const float freq_mult = avctx->sample_rate/(1024.0f/sce0->ics.num_windows)/2.0f; uint8_t nextband1[128]; if (!cpe->common_window) return; /** Scout out next nonzero bands */ ff_init_nextband_map(sce1, nextband1); for (w = 0; w < sce0->ics.num_windows; w += sce0->ics.group_len[w]) { start = 0; for (g = 0; g < sce0->ics.num_swb; g++) { if (start*freq_mult > INT_STEREO_LOW_LIMIT*(s->lambda/170.0f) && cpe->ch[0].band_type[w*16+g] != NOISE_BT && !cpe->ch[0].zeroes[w*16+g] && cpe->ch[1].band_type[w*16+g] != NOISE_BT && !cpe->ch[1].zeroes[w*16+g] && ff_sfdelta_can_remove_band(sce1, nextband1, prev_sf1, w*16+g)) { float ener0 = 0.0f, ener1 = 0.0f, ener01 = 0.0f, ener01p = 0.0f; struct AACISError ph_err1, ph_err2, *best; if (sce0->band_type[w*16+g] == NOISE_BT || sce1->band_type[w*16+g] == NOISE_BT) { start += sce0->ics.swb_sizes[g]; continue; } for (w2 = 0; w2 < sce0->ics.group_len[w]; w2++) { for (i = 0; i < sce0->ics.swb_sizes[g]; i++) { float coef0 = fabsf(sce0->coeffs[start+(w+w2)*128+i]); float coef1 = fabsf(sce1->coeffs[start+(w+w2)*128+i]); ener0 += coef0*coef0; ener1 += coef1*coef1; ener01 += (coef0 + coef1)*(coef0 + coef1); ener01p += (coef0 - coef1)*(coef0 - coef1); } } ph_err1 = ff_aac_is_encoding_err(s, cpe, start, w, g, ener0, ener1, ener01p, 0, -1); ph_err2 = ff_aac_is_encoding_err(s, cpe, start, w, g, ener0, ener1, ener01, 0, +1); best = (ph_err1.pass && ph_err1.error < ph_err2.error) ? &ph_err1 : &ph_err2; if (best->pass) { cpe->is_mask[w*16+g] = 1; cpe->ms_mask[w*16+g] = 0; cpe->ch[0].is_ener[w*16+g] = sqrt(ener0 / best->ener01); cpe->ch[1].is_ener[w*16+g] = ener0/ener1; cpe->ch[1].band_type[w*16+g] = (best->phase > 0) ? INTENSITY_BT : INTENSITY_BT2; count++; } } if (!sce1->zeroes[w*16+g] && sce1->band_type[w*16+g] < RESERVED_BT) prev_sf1 = sce1->sf_idx[w*16+g]; start += sce0->ics.swb_sizes[g]; } } cpe->is_mode = !!count; }"} {"target": 0, "idx": 22415, "func": "static void vnc_write_s32(VncState *vs, int32_t value) { vnc_write_u32(vs, *(uint32_t *)&value); }"} {"target": 0, "idx": 22423, "func": "static int vnc_zlib_stop(VncState *vs) { z_streamp zstream = &vs->zlib_stream; int previous_out; // switch back to normal output/zlib buffers vs->zlib = vs->output; vs->output = vs->zlib_tmp; // compress the zlib buffer // initialize the stream // XXX need one stream per session if (zstream->opaque != vs) { int err; VNC_DEBUG(\"VNC: initializing zlib stream\\n\"); VNC_DEBUG(\"VNC: opaque = %p | vs = %p\\n\", zstream->opaque, vs); zstream->zalloc = vnc_zlib_zalloc; zstream->zfree = vnc_zlib_zfree; err = deflateInit2(zstream, vs->tight_compression, Z_DEFLATED, MAX_WBITS, MAX_MEM_LEVEL, Z_DEFAULT_STRATEGY); if (err != Z_OK) { fprintf(stderr, \"VNC: error initializing zlib\\n\"); return -1; } vs->zlib_level = vs->tight_compression; zstream->opaque = vs; } if (vs->tight_compression != vs->zlib_level) { if (deflateParams(zstream, vs->tight_compression, Z_DEFAULT_STRATEGY) != Z_OK) { return -1; } vs->zlib_level = vs->tight_compression; } // reserve memory in output buffer buffer_reserve(&vs->output, vs->zlib.offset + 64); // set pointers zstream->next_in = vs->zlib.buffer; zstream->avail_in = vs->zlib.offset; zstream->next_out = vs->output.buffer + vs->output.offset; zstream->avail_out = vs->output.capacity - vs->output.offset; zstream->data_type = Z_BINARY; previous_out = zstream->total_out; // start encoding if (deflate(zstream, Z_SYNC_FLUSH) != Z_OK) { fprintf(stderr, \"VNC: error during zlib compression\\n\"); return -1; } vs->output.offset = vs->output.capacity - zstream->avail_out; return zstream->total_out - previous_out; }"} {"target": 1, "idx": 22437, "func": "static int ivi_decode_blocks(GetBitContext *gb, IVIBandDesc *band, IVITile *tile, AVCodecContext *avctx) { int mbn, blk, num_blocks, num_coeffs, blk_size, scan_pos, run, val, pos, is_intra, mc_type = 0, mv_x, mv_y, col_mask; uint8_t col_flags[8]; int32_t prev_dc, trvec[64]; uint32_t cbp, sym, lo, hi, quant, buf_offs, q; IVIMbInfo *mb; RVMapDesc *rvmap = band->rv_map; void (*mc_with_delta_func)(int16_t *buf, const int16_t *ref_buf, uint32_t pitch, int mc_type); void (*mc_no_delta_func) (int16_t *buf, const int16_t *ref_buf, uint32_t pitch, int mc_type); const uint16_t *base_tab; const uint8_t *scale_tab; prev_dc = 0; /* init intra prediction for the DC coefficient */ blk_size = band->blk_size; col_mask = blk_size - 1; /* column mask for tracking non-zero coeffs */ num_blocks = (band->mb_size != blk_size) ? 4 : 1; /* number of blocks per mb */ num_coeffs = blk_size * blk_size; if (blk_size == 8) { mc_with_delta_func = ff_ivi_mc_8x8_delta; mc_no_delta_func = ff_ivi_mc_8x8_no_delta; } else { mc_with_delta_func = ff_ivi_mc_4x4_delta; mc_no_delta_func = ff_ivi_mc_4x4_no_delta; for (mbn = 0, mb = tile->mbs; mbn < tile->num_MBs; mb++, mbn++) { is_intra = !mb->type; cbp = mb->cbp; buf_offs = mb->buf_offs; quant = av_clip(band->glob_quant + mb->q_delta, 0, 23); base_tab = is_intra ? band->intra_base : band->inter_base; scale_tab = is_intra ? band->intra_scale : band->inter_scale; if (scale_tab) quant = scale_tab[quant]; if (!is_intra) { mv_x = mb->mv_x; mv_y = mb->mv_y; if (band->is_halfpel) { mc_type = ((mv_y & 1) << 1) | (mv_x & 1); mv_x >>= 1; mv_y >>= 1; /* convert halfpel vectors into fullpel ones */ if (mb->type) { int dmv_x, dmv_y, cx, cy; dmv_x = mb->mv_x >> band->is_halfpel; dmv_y = mb->mv_y >> band->is_halfpel; cx = mb->mv_x & band->is_halfpel; cy = mb->mv_y & band->is_halfpel; if ( mb->xpos + dmv_x < 0 || mb->xpos + dmv_x + band->mb_size + cx > band->pitch || mb->ypos + dmv_y < 0 || mb->ypos + dmv_y + band->mb_size + cy > band->aheight) { for (blk = 0; blk < num_blocks; blk++) { /* adjust block position in the buffer according to its number */ if (blk & 1) { buf_offs += blk_size; } else if (blk == 2) { buf_offs -= blk_size; buf_offs += blk_size * band->pitch; if (cbp & 1) { /* block coded ? */ scan_pos = -1; memset(trvec, 0, num_coeffs*sizeof(trvec[0])); /* zero transform vector */ memset(col_flags, 0, sizeof(col_flags)); /* zero column flags */ while (scan_pos <= num_coeffs) { sym = get_vlc2(gb, band->blk_vlc.tab->table, IVI_VLC_BITS, 1); if (sym == rvmap->eob_sym) break; /* End of block */ if (sym == rvmap->esc_sym) { /* Escape - run/val explicitly coded using 3 vlc codes */ run = get_vlc2(gb, band->blk_vlc.tab->table, IVI_VLC_BITS, 1) + 1; lo = get_vlc2(gb, band->blk_vlc.tab->table, IVI_VLC_BITS, 1); hi = get_vlc2(gb, band->blk_vlc.tab->table, IVI_VLC_BITS, 1); val = IVI_TOSIGNED((hi << 6) | lo); /* merge them and convert into signed val */ } else { if (sym >= 256U) { av_log(avctx, AV_LOG_ERROR, \"Invalid sym encountered: %d.\\n\", sym); return -1; run = rvmap->runtab[sym]; val = rvmap->valtab[sym]; /* de-zigzag and dequantize */ scan_pos += run; if (scan_pos >= num_coeffs) break; pos = band->scan[scan_pos]; if (!val) av_dlog(avctx, \"Val = 0 encountered!\\n\"); q = (base_tab[pos] * quant) >> 9; if (q > 1) val = val * q + FFSIGN(val) * (((q ^ 1) - 1) >> 1); trvec[pos] = val; col_flags[pos & col_mask] |= !!val; /* track columns containing non-zero coeffs */ }// while if (scan_pos >= num_coeffs && sym != rvmap->eob_sym) return -1; /* corrupt block data */ /* undoing DC coeff prediction for intra-blocks */ if (is_intra && band->is_2d_trans) { prev_dc += trvec[0]; trvec[0] = prev_dc; col_flags[0] |= !!prev_dc; /* apply inverse transform */ band->inv_transform(trvec, band->buf + buf_offs, band->pitch, col_flags); /* apply motion compensation */ if (!is_intra) mc_with_delta_func(band->buf + buf_offs, band->ref_buf + buf_offs + mv_y * band->pitch + mv_x, band->pitch, mc_type); } else { /* block not coded */ /* for intra blocks apply the dc slant transform */ /* for inter - perform the motion compensation without delta */ if (is_intra && band->dc_transform) { band->dc_transform(&prev_dc, band->buf + buf_offs, band->pitch, blk_size); } else mc_no_delta_func(band->buf + buf_offs, band->ref_buf + buf_offs + mv_y * band->pitch + mv_x, band->pitch, mc_type); cbp >>= 1; }// for blk }// for mbn align_get_bits(gb); return 0;"} {"target": 1, "idx": 22442, "func": "static int skip_data_stream_element(AACContext *ac, GetBitContext *gb) { int byte_align = get_bits1(gb); int count = get_bits(gb, 8); if (count == 255) count += get_bits(gb, 8); if (byte_align) align_get_bits(gb); if (get_bits_left(gb) < 8 * count) { av_log(ac->avctx, AV_LOG_ERROR, overread_err); return -1; } skip_bits_long(gb, 8 * count); return 0; }"} {"target": 1, "idx": 22445, "func": "static int filter_frame(AVFilterLink *inlink, AVFrame *inbuf) { AudioPhaserContext *s = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFrame *outbuf; if (av_frame_is_writable(inbuf)) { outbuf = inbuf; } else { outbuf = ff_get_audio_buffer(inlink, inbuf->nb_samples); if (!outbuf) return AVERROR(ENOMEM); av_frame_copy_props(outbuf, inbuf); } s->phaser(s, inbuf->extended_data, outbuf->extended_data, outbuf->nb_samples, outbuf->channels); if (inbuf != outbuf) av_frame_free(&inbuf); return ff_filter_frame(outlink, outbuf); }"} {"target": 0, "idx": 22453, "func": "static AVStream *find_stream(void *log, AVFormatContext *avf, const char *spec) { int i, ret, already = 0, stream_id = -1; char type_char, dummy; AVStream *found = NULL; enum AVMediaType type; ret = sscanf(spec, \"d%[av]%d%c\", &type_char, &stream_id, &dummy); if (ret >= 1 && ret <= 2) { type = type_char == 'v' ? AVMEDIA_TYPE_VIDEO : AVMEDIA_TYPE_AUDIO; ret = av_find_best_stream(avf, type, stream_id, -1, NULL, 0); if (ret < 0) { av_log(log, AV_LOG_ERROR, \"No %s stream with index '%d' found\\n\", av_get_media_type_string(type), stream_id); return NULL; } return avf->streams[ret]; } for (i = 0; i < avf->nb_streams; i++) { ret = avformat_match_stream_specifier(avf, avf->streams[i], spec); if (ret < 0) { av_log(log, AV_LOG_ERROR, \"Invalid stream specifier \\\"%s\\\"\\n\", spec); return NULL; } if (!ret) continue; if (avf->streams[i]->discard != AVDISCARD_ALL) { already++; continue; } if (found) { av_log(log, AV_LOG_WARNING, \"Ambiguous stream specifier \\\"%s\\\", using #%d\\n\", spec, i); break; } found = avf->streams[i]; } if (!found) { av_log(log, AV_LOG_WARNING, \"Stream specifier \\\"%s\\\" %s\\n\", spec, already ? \"matched only already used streams\" : \"did not match any stream\"); return NULL; } if (found->codec->codec_type != AVMEDIA_TYPE_VIDEO && found->codec->codec_type != AVMEDIA_TYPE_AUDIO) { av_log(log, AV_LOG_ERROR, \"Stream specifier \\\"%s\\\" matched a %s stream,\" \"currently unsupported by libavfilter\\n\", spec, av_get_media_type_string(found->codec->codec_type)); return NULL; } return found; }"} {"target": 0, "idx": 22456, "func": "static void check_rgb2yuv(void) { declare_func(void, uint8_t *dst[3], ptrdiff_t dst_stride[3], int16_t *src[3], ptrdiff_t src_stride, int w, int h, const int16_t coeff[3][3][8], const int16_t off[8]); ColorSpaceDSPContext dsp; int odepth, fmt, n; LOCAL_ALIGNED_32(int16_t, src_y, [W * H * 2]); LOCAL_ALIGNED_32(int16_t, src_u, [W * H * 2]); LOCAL_ALIGNED_32(int16_t, src_v, [W * H * 2]); int16_t *src[3] = { src_y, src_u, src_v }; LOCAL_ALIGNED_32(uint8_t, dst0_y, [W * H]); LOCAL_ALIGNED_32(uint8_t, dst0_u, [W * H]); LOCAL_ALIGNED_32(uint8_t, dst0_v, [W * H]); LOCAL_ALIGNED_32(uint8_t, dst1_y, [W * H]); LOCAL_ALIGNED_32(uint8_t, dst1_u, [W * H]); LOCAL_ALIGNED_32(uint8_t, dst1_v, [W * H]); uint8_t *dst0[3] = { dst0_y, dst0_u, dst0_v }, *dst1[3] = { dst1_y, dst1_u, dst1_v }; LOCAL_ALIGNED_32(int16_t, offset, [8]); LOCAL_ALIGNED_32(int16_t, coeff_buf, [3 * 3 * 8]); int16_t (*coeff)[3][8] = (int16_t(*)[3][8]) coeff_buf; ff_colorspacedsp_init(&dsp); for (n = 0; n < 8; n++) { offset[n] = 16; // these somewhat resemble bt601/smpte170m coefficients coeff[0][0][n] = lrint(0.3 * (1 << 14)); coeff[0][1][n] = lrint(0.6 * (1 << 14)); coeff[0][2][n] = lrint(0.1 * (1 << 14)); coeff[1][0][n] = lrint(-0.15 * (1 << 14)); coeff[1][1][n] = lrint(-0.35 * (1 << 14)); coeff[1][2][n] = lrint(0.5 * (1 << 14)); coeff[2][0][n] = lrint(0.5 * (1 << 14)); coeff[2][1][n] = lrint(-0.42 * (1 << 14)); coeff[2][2][n] = lrint(-0.08 * (1 << 14)); } for (odepth = 0; odepth < 3; odepth++) { for (fmt = 0; fmt < 3; fmt++) { if (check_func(dsp.rgb2yuv[odepth][fmt], \"ff_colorspacedsp_rgb2yuv_%sp%d\", format_string[fmt], odepth * 2 + 8)) { int ss_w = !!fmt, ss_h = fmt == 2; int y_dst_stride = W << !!odepth; int uv_dst_stride = y_dst_stride >> ss_w; randomize_buffers(); call_ref(dst0, (ptrdiff_t[3]) { y_dst_stride, uv_dst_stride, uv_dst_stride }, src, W, W, H, coeff, offset); call_new(dst1, (ptrdiff_t[3]) { y_dst_stride, uv_dst_stride, uv_dst_stride }, src, W, W, H, coeff, offset); if (memcmp(dst0[0], dst1[0], H * y_dst_stride) || memcmp(dst0[1], dst1[1], H * uv_dst_stride >> ss_h) || memcmp(dst0[2], dst1[2], H * uv_dst_stride >> ss_h)) { fail(); } } } } report(\"rgb2yuv\"); }"} {"target": 0, "idx": 22458, "func": "flac_header (AVFormatContext * s, int idx) { struct ogg *ogg = s->priv_data; struct ogg_stream *os = ogg->streams + idx; AVStream *st = s->streams[idx]; GetBitContext gb; FLACStreaminfo si; int mdt; if (os->buf[os->pstart] == 0xff) return 0; init_get_bits(&gb, os->buf + os->pstart, os->psize*8); skip_bits1(&gb); /* metadata_last */ mdt = get_bits(&gb, 7); if (mdt == OGG_FLAC_METADATA_TYPE_STREAMINFO) { uint8_t *streaminfo_start = os->buf + os->pstart + 5 + 4 + 4 + 4; skip_bits_long(&gb, 4*8); /* \"FLAC\" */ if(get_bits(&gb, 8) != 1) /* unsupported major version */ return -1; skip_bits_long(&gb, 8 + 16); /* minor version + header count */ skip_bits_long(&gb, 4*8); /* \"fLaC\" */ /* METADATA_BLOCK_HEADER */ if (get_bits_long(&gb, 32) != FLAC_STREAMINFO_SIZE) return -1; avpriv_flac_parse_streaminfo(st->codec, &si, streaminfo_start); st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = AV_CODEC_ID_FLAC; st->need_parsing = AVSTREAM_PARSE_HEADERS; ff_alloc_extradata(st->codec, FLAC_STREAMINFO_SIZE); memcpy(st->codec->extradata, streaminfo_start, st->codec->extradata_size); avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate); } else if (mdt == FLAC_METADATA_TYPE_VORBIS_COMMENT) { ff_vorbis_comment (s, &st->metadata, os->buf + os->pstart + 4, os->psize - 4); } return 1; }"} {"target": 0, "idx": 22477, "func": "void acpi_memory_plug_cb(ACPIREGS *ar, qemu_irq irq, MemHotplugState *mem_st, DeviceState *dev, Error **errp) { MemStatus *mdev; mdev = acpi_memory_slot_status(mem_st, dev, errp); if (!mdev) { return; } mdev->dimm = dev; mdev->is_enabled = true; mdev->is_inserting = true; /* do ACPI magic */ acpi_send_gpe_event(ar, irq, ACPI_MEMORY_HOTPLUG_STATUS); return; }"} {"target": 0, "idx": 22479, "func": "static int decode_i_picture_header(VC9Context *v) { int pqindex, status = 0, ac_pred, condover; /* Prolog common to all frametypes should be done in caller */ //BF = Buffer Fullness if (v->profile <= PROFILE_MAIN && get_bits(&v->gb, 7)) { av_log(v, AV_LOG_DEBUG, \"I BufferFullness not 0\\n\"); } /* Quantizer stuff */ pqindex = get_bits(&v->gb, 5); if (v->quantizer_mode == QUANT_FRAME_IMPLICIT) v->pq = pquant_table[0][pqindex]; else { v->pq = pquant_table[v->quantizer_mode-1][pqindex]; } if (pqindex < 9) v->halfpq = get_bits(&v->gb, 1); if (v->quantizer_mode == QUANT_FRAME_EXPLICIT) v->pquantizer = get_bits(&v->gb, 1); av_log(v->avctx, AV_LOG_DEBUG, \"I frame: QP=%i (+%i/2)\\n\", v->pq, v->halfpq); #if HAS_ADVANCED_PROFILE if (v->profile <= PROFILE_MAIN) #endif { if (v->extended_mv) v->mvrange = get_prefix(&v->gb, 0, 3); if (v->multires) v->respic = get_bits(&v->gb, 2); } #if HAS_ADVANCED_PROFILE else { ac_pred = get_bits(&v->gb, 1); if (v->postprocflag) v->postproc = get_bits(&v->gb, 1); /* 7.1.1.34 + 8.5.2 */ if (v->overlap && v->pq<9) { condover = get_bits(&v->gb, 1); if (condover) { condover = 2+get_bits(&v->gb, 1); if (condover == 3) status = bitplane_decoding(v->over_flags_plane, v->width_mb, v->height_mb, v); } } } #endif /* Epilog should be done in caller */ return status; }"} {"target": 0, "idx": 22482, "func": "static int vtd_page_walk(VTDContextEntry *ce, uint64_t start, uint64_t end, vtd_page_walk_hook hook_fn, void *private) { dma_addr_t addr = vtd_get_slpt_base_from_context(ce); uint32_t level = vtd_get_level_from_context_entry(ce); if (!vtd_iova_range_check(start, ce)) { return -VTD_FR_ADDR_BEYOND_MGAW; } if (!vtd_iova_range_check(end, ce)) { /* Fix end so that it reaches the maximum */ end = vtd_iova_limit(ce); } return vtd_page_walk_level(addr, start, end, hook_fn, private, level, true, true, false); }"} {"target": 0, "idx": 22483, "func": "static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *iov) { return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false); }"} {"target": 1, "idx": 22494, "func": "static int32_t scsi_send_command(SCSIRequest *req, uint8_t *buf) { SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); int32_t len; uint8_t command; uint8_t *outbuf; int rc; command = buf[0]; outbuf = (uint8_t *)r->iov.iov_base; DPRINTF(\"Command: lun=%d tag=0x%x data=0x%02x\", req->lun, req->tag, buf[0]); if (scsi_req_parse(&r->req, buf) != 0) { BADF(\"Unsupported command length, command %x\\n\", command); scsi_command_complete(r, CHECK_CONDITION, SENSE_CODE(INVALID_OPCODE)); return 0; } #ifdef DEBUG_SCSI { int i; for (i = 1; i < r->req.cmd.len; i++) { printf(\" 0x%02x\", buf[i]); } printf(\"\\n\"); } #endif if (req->lun) { /* Only LUN 0 supported. */ DPRINTF(\"Unimplemented LUN %d\\n\", req->lun); if (command != REQUEST_SENSE && command != INQUIRY) { scsi_command_complete(r, CHECK_CONDITION, SENSE_CODE(LUN_NOT_SUPPORTED)); return 0; } } switch (command) { case TEST_UNIT_READY: case REQUEST_SENSE: case INQUIRY: case MODE_SENSE: case MODE_SENSE_10: case RESERVE: case RESERVE_10: case RELEASE: case RELEASE_10: case START_STOP: case ALLOW_MEDIUM_REMOVAL: case READ_CAPACITY: case SYNCHRONIZE_CACHE: case READ_TOC: case GET_CONFIGURATION: case SERVICE_ACTION_IN: case REPORT_LUNS: case VERIFY: rc = scsi_disk_emulate_command(r, outbuf); if (rc < 0) { return 0; } r->iov.iov_len = rc; break; case READ_6: case READ_10: case READ_12: case READ_16: len = r->req.cmd.xfer / s->qdev.blocksize; DPRINTF(\"Read (sector %\" PRId64 \", count %d)\\n\", r->req.cmd.lba, len); if (r->req.cmd.lba > s->max_lba) goto illegal_lba; r->sector = r->req.cmd.lba * s->cluster_size; r->sector_count = len * s->cluster_size; break; case WRITE_6: case WRITE_10: case WRITE_12: case WRITE_16: case WRITE_VERIFY: case WRITE_VERIFY_12: case WRITE_VERIFY_16: len = r->req.cmd.xfer / s->qdev.blocksize; DPRINTF(\"Write %s(sector %\" PRId64 \", count %d)\\n\", (command & 0xe) == 0xe ? \"And Verify \" : \"\", r->req.cmd.lba, len); if (r->req.cmd.lba > s->max_lba) goto illegal_lba; r->sector = r->req.cmd.lba * s->cluster_size; r->sector_count = len * s->cluster_size; break; case MODE_SELECT: DPRINTF(\"Mode Select(6) (len %lu)\\n\", (long)r->req.cmd.xfer); /* We don't support mode parameter changes. Allow the mode parameter header + block descriptors only. */ if (r->req.cmd.xfer > 12) { goto fail; } break; case MODE_SELECT_10: DPRINTF(\"Mode Select(10) (len %lu)\\n\", (long)r->req.cmd.xfer); /* We don't support mode parameter changes. Allow the mode parameter header + block descriptors only. */ if (r->req.cmd.xfer > 16) { goto fail; } break; case SEEK_6: case SEEK_10: DPRINTF(\"Seek(%d) (sector %\" PRId64 \")\\n\", command == SEEK_6 ? 6 : 10, r->req.cmd.lba); if (r->req.cmd.lba > s->max_lba) { goto illegal_lba; } break; case WRITE_SAME_16: len = r->req.cmd.xfer / s->qdev.blocksize; DPRINTF(\"WRITE SAME(16) (sector %\" PRId64 \", count %d)\\n\", r->req.cmd.lba, len); if (r->req.cmd.lba > s->max_lba) { goto illegal_lba; } /* * We only support WRITE SAME with the unmap bit set for now. */ if (!(buf[1] & 0x8)) { goto fail; } rc = bdrv_discard(s->bs, r->req.cmd.lba * s->cluster_size, len * s->cluster_size); if (rc < 0) { /* XXX: better error code ?*/ goto fail; } break; default: DPRINTF(\"Unknown SCSI command (%2.2x)\\n\", buf[0]); scsi_command_complete(r, CHECK_CONDITION, SENSE_CODE(INVALID_OPCODE)); return 0; fail: scsi_command_complete(r, CHECK_CONDITION, SENSE_CODE(INVALID_FIELD)); return 0; illegal_lba: scsi_command_complete(r, CHECK_CONDITION, SENSE_CODE(LBA_OUT_OF_RANGE)); return 0; } if (r->sector_count == 0 && r->iov.iov_len == 0) { scsi_command_complete(r, GOOD, SENSE_CODE(NO_SENSE)); } len = r->sector_count * 512 + r->iov.iov_len; if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { return -len; } else { if (!r->sector_count) r->sector_count = -1; return len; } }"} {"target": 1, "idx": 22507, "func": "static int wav_init_out (HWVoiceOut *hw, struct audsettings *as) { WAVVoiceOut *wav = (WAVVoiceOut *) hw; int bits16 = 0, stereo = 0; uint8_t hdr[] = { 0x52, 0x49, 0x46, 0x46, 0x00, 0x00, 0x00, 0x00, 0x57, 0x41, 0x56, 0x45, 0x66, 0x6d, 0x74, 0x20, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x44, 0xac, 0x00, 0x00, 0x10, 0xb1, 0x02, 0x00, 0x04, 0x00, 0x10, 0x00, 0x64, 0x61, 0x74, 0x61, 0x00, 0x00, 0x00, 0x00 }; struct audsettings wav_as = conf.settings; (void) as; stereo = wav_as.nchannels == 2; switch (wav_as.fmt) { case AUD_FMT_S8: case AUD_FMT_U8: bits16 = 0; break; case AUD_FMT_S16: case AUD_FMT_U16: bits16 = 1; break; case AUD_FMT_S32: case AUD_FMT_U32: dolog (\"WAVE files can not handle 32bit formats\\n\"); return -1; } hdr[34] = bits16 ? 0x10 : 0x08; wav_as.endianness = 0; audio_pcm_init_info (&hw->info, &wav_as); hw->samples = 1024; wav->pcm_buf = audio_calloc (AUDIO_FUNC, hw->samples, 1 << hw->info.shift); if (!wav->pcm_buf) { dolog (\"Could not allocate buffer (%d bytes)\\n\", hw->samples << hw->info.shift); return -1; } le_store (hdr + 22, hw->info.nchannels, 2); le_store (hdr + 24, hw->info.freq, 4); le_store (hdr + 28, hw->info.freq << (bits16 + stereo), 4); le_store (hdr + 32, 1 << (bits16 + stereo), 2); wav->f = fopen (conf.wav_path, \"wb\"); if (!wav->f) { dolog (\"Failed to open wave file `%s'\\nReason: %s\\n\", conf.wav_path, strerror (errno)); g_free (wav->pcm_buf); wav->pcm_buf = NULL; return -1; } if (fwrite (hdr, sizeof (hdr), 1, wav->f) != 1) { dolog (\"wav_init_out: failed to write header\\nReason: %s\\n\", strerror(errno)); return -1; } return 0; }"} {"target": 0, "idx": 22511, "func": "static char *time_value_string(char *buf, int buf_size, int64_t val, const AVRational *time_base) { if (val == AV_NOPTS_VALUE) { snprintf(buf, buf_size, \"N/A\"); } else { double d = val * av_q2d(*time_base); value_string(buf, buf_size, (struct unit_value){.val.d=d, .unit=unit_second_str}); } return buf; }"} {"target": 0, "idx": 22546, "func": "static void t_gen_cc_jmp(TCGv pc_true, TCGv pc_false) { TCGv btaken; int l1; l1 = gen_new_label(); btaken = tcg_temp_new(TCG_TYPE_TL); /* Conditional jmp. */ tcg_gen_mov_tl(btaken, env_btaken); tcg_gen_mov_tl(env_pc, pc_false); tcg_gen_brcondi_tl(TCG_COND_EQ, btaken, 0, l1); tcg_gen_mov_tl(env_pc, pc_true); gen_set_label(l1); tcg_temp_free(btaken); }"} {"target": 0, "idx": 22547, "func": "NBDClient *nbd_client_new(NBDExport *exp, int csock, void (*close)(NBDClient *)) { NBDClient *client; client = g_malloc0(sizeof(NBDClient)); client->refcount = 1; client->exp = exp; client->sock = csock; if (nbd_send_negotiate(client)) { g_free(client); return NULL; } client->close = close; qemu_co_mutex_init(&client->send_lock); qemu_set_fd_handler2(csock, nbd_can_read, nbd_read, NULL, client); if (exp) { QTAILQ_INSERT_TAIL(&exp->clients, client, next); nbd_export_get(exp); } return client; }"} {"target": 0, "idx": 22553, "func": "void helper_ljmp_protected_T0_T1(void) { int new_cs, new_eip; uint32_t e1, e2, cpl, dpl, rpl, limit; new_cs = T0; new_eip = T1; if ((new_cs & 0xfffc) == 0) raise_exception_err(EXCP0D_GPF, 0); if (load_segment(&e1, &e2, new_cs) != 0) raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); cpl = env->hflags & HF_CPL_MASK; if (e2 & DESC_S_MASK) { if (!(e2 & DESC_CS_MASK)) raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (e2 & DESC_CS_MASK) { /* conforming code segment */ if (dpl > cpl) raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); } else { /* non conforming code segment */ rpl = new_cs & 3; if (rpl > cpl) raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); if (dpl != cpl) raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); } if (!(e2 & DESC_P_MASK)) raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); limit = get_seg_limit(e1, e2); if (new_eip > limit) raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, get_seg_base(e1, e2), limit, e2); EIP = new_eip; } else { cpu_abort(env, \"jmp to call/task gate not supported 0x%04x:0x%08x\", new_cs, new_eip); } }"} {"target": 0, "idx": 22565, "func": "static int read_sbr_single_channel_element(AACContext *ac, SpectralBandReplication *sbr, GetBitContext *gb) { int ret; if (get_bits1(gb)) // bs_data_extra skip_bits(gb, 4); // bs_reserved if (read_sbr_grid(ac, sbr, gb, &sbr->data[0])) return -1; read_sbr_dtdf(sbr, gb, &sbr->data[0]); read_sbr_invf(sbr, gb, &sbr->data[0]); read_sbr_envelope(sbr, gb, &sbr->data[0], 0); if((ret = read_sbr_noise(ac, sbr, gb, &sbr->data[0], 0)) < 0) return ret; if ((sbr->data[0].bs_add_harmonic_flag = get_bits1(gb))) get_bits1_vector(gb, sbr->data[0].bs_add_harmonic, sbr->n[1]); return 0; }"} {"target": 0, "idx": 22573, "func": "const char *swscale_configuration(void) { return FFMPEG_CONFIGURATION; }"} {"target": 1, "idx": 22591, "func": "static inline void RENAME(duplicate)(uint8_t src[], int stride) { #if TEMPLATE_PP_MMX __asm__ volatile( \"movq (%0), %%mm0 \\n\\t\" \"add %1, %0 \\n\\t\" \"movq %%mm0, (%0) \\n\\t\" \"movq %%mm0, (%0, %1) \\n\\t\" \"movq %%mm0, (%0, %1, 2) \\n\\t\" : \"+r\" (src) : \"r\" ((x86_reg)-stride) ); #else int i; uint8_t *p=src; for(i=0; i<3; i++){ p-= stride; memcpy(p, src, 8); } #endif }"} {"target": 0, "idx": 22600, "func": "static void RENAME(extract_even2)(const uint8_t *src, uint8_t *dst0, uint8_t *dst1, x86_reg count) { dst0+= count; dst1+= count; src += 4*count; count= - count; #if COMPILE_TEMPLATE_MMX if(count <= -8) { count += 7; __asm__ volatile( \"pcmpeqw %%mm7, %%mm7 \\n\\t\" \"psrlw $8, %%mm7 \\n\\t\" \"1: \\n\\t\" \"movq -28(%1, %0, 4), %%mm0 \\n\\t\" \"movq -20(%1, %0, 4), %%mm1 \\n\\t\" \"movq -12(%1, %0, 4), %%mm2 \\n\\t\" \"movq -4(%1, %0, 4), %%mm3 \\n\\t\" \"pand %%mm7, %%mm0 \\n\\t\" \"pand %%mm7, %%mm1 \\n\\t\" \"pand %%mm7, %%mm2 \\n\\t\" \"pand %%mm7, %%mm3 \\n\\t\" \"packuswb %%mm1, %%mm0 \\n\\t\" \"packuswb %%mm3, %%mm2 \\n\\t\" \"movq %%mm0, %%mm1 \\n\\t\" \"movq %%mm2, %%mm3 \\n\\t\" \"psrlw $8, %%mm0 \\n\\t\" \"psrlw $8, %%mm2 \\n\\t\" \"pand %%mm7, %%mm1 \\n\\t\" \"pand %%mm7, %%mm3 \\n\\t\" \"packuswb %%mm2, %%mm0 \\n\\t\" \"packuswb %%mm3, %%mm1 \\n\\t\" MOVNTQ\" %%mm0,- 7(%3, %0) \\n\\t\" MOVNTQ\" %%mm1,- 7(%2, %0) \\n\\t\" \"add $8, %0 \\n\\t\" \" js 1b \\n\\t\" : \"+r\"(count) : \"r\"(src), \"r\"(dst0), \"r\"(dst1) ); count -= 7; } #endif while(count<0) { dst0[count]= src[4*count+0]; dst1[count]= src[4*count+2]; count++; } }"} {"target": 1, "idx": 22619, "func": "static void usb_xhci_exit(PCIDevice *dev) { int i; XHCIState *xhci = XHCI(dev); trace_usb_xhci_exit(); for (i = 0; i < xhci->numslots; i++) { xhci_disable_slot(xhci, i + 1); } if (xhci->mfwrap_timer) { timer_del(xhci->mfwrap_timer); timer_free(xhci->mfwrap_timer); xhci->mfwrap_timer = NULL; } memory_region_del_subregion(&xhci->mem, &xhci->mem_cap); memory_region_del_subregion(&xhci->mem, &xhci->mem_oper); memory_region_del_subregion(&xhci->mem, &xhci->mem_runtime); memory_region_del_subregion(&xhci->mem, &xhci->mem_doorbell); for (i = 0; i < xhci->numports; i++) { XHCIPort *port = &xhci->ports[i]; memory_region_del_subregion(&xhci->mem, &port->mem); } /* destroy msix memory region */ if (dev->msix_table && dev->msix_pba && dev->msix_entry_used) { memory_region_del_subregion(&xhci->mem, &dev->msix_table_mmio); memory_region_del_subregion(&xhci->mem, &dev->msix_pba_mmio); } usb_bus_release(&xhci->bus); }"} {"target": 1, "idx": 22626, "func": "static void evaluate_utility_inc(elbg_data *elbg) { int i, inc=0; for (i=0; i < elbg->numCB; i++) { if (elbg->numCB*elbg->utility[i] > elbg->error) inc += elbg->utility[i]; elbg->utility_inc[i] = inc; } }"} {"target": 1, "idx": 22640, "func": "static void init_excp_POWER7 (CPUPPCState *env) { #if !defined(CONFIG_USER_ONLY) env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; env->excp_vectors[POWERPC_EXCP_DSEG] = 0x00000380; env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; env->excp_vectors[POWERPC_EXCP_ISEG] = 0x00000480; env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; env->excp_vectors[POWERPC_EXCP_HDECR] = 0x00000980; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00; env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00; env->excp_vectors[POWERPC_EXCP_VPU] = 0x00000F20; env->excp_vectors[POWERPC_EXCP_VSXU] = 0x00000F40; env->excp_vectors[POWERPC_EXCP_FU] = 0x00000F60; env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300; env->excp_vectors[POWERPC_EXCP_MAINT] = 0x00001600; env->excp_vectors[POWERPC_EXCP_VPUA] = 0x00001700; env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001800; /* Hardware reset vector */ env->hreset_vector = 0x0000000000000100ULL; #endif }"} {"target": 0, "idx": 22664, "func": "static void sdt_cb(MpegTSFilter *filter, const uint8_t *section, int section_len) { MpegTSContext *ts = filter->u.section_filter.opaque; SectionHeader h1, *h = &h1; const uint8_t *p, *p_end, *desc_list_end, *desc_end; int onid, val, sid, desc_list_len, desc_tag, desc_len, service_type; char *name, *provider_name; av_dlog(ts->stream, \"SDT:\\n\"); hex_dump_debug(ts->stream, section, section_len); p_end = section + section_len - 4; p = section; if (parse_section_header(h, &p, p_end) < 0) return; if (h->tid != SDT_TID) return; if (ts->skip_changes) return; onid = get16(&p, p_end); if (onid < 0) return; val = get8(&p, p_end); if (val < 0) return; for (;;) { sid = get16(&p, p_end); if (sid < 0) break; val = get8(&p, p_end); if (val < 0) break; desc_list_len = get16(&p, p_end); if (desc_list_len < 0) break; desc_list_len &= 0xfff; desc_list_end = p + desc_list_len; if (desc_list_end > p_end) break; for (;;) { desc_tag = get8(&p, desc_list_end); if (desc_tag < 0) break; desc_len = get8(&p, desc_list_end); desc_end = p + desc_len; if (desc_end > desc_list_end) break; av_dlog(ts->stream, \"tag: 0x%02x len=%d\\n\", desc_tag, desc_len); switch (desc_tag) { case 0x48: service_type = get8(&p, p_end); if (service_type < 0) break; provider_name = getstr8(&p, p_end); if (!provider_name) break; name = getstr8(&p, p_end); if (name) { AVProgram *program = av_new_program(ts->stream, sid); if (program) { av_dict_set(&program->metadata, \"service_name\", name, 0); av_dict_set(&program->metadata, \"service_provider\", provider_name, 0); } } av_free(name); av_free(provider_name); break; default: break; } p = desc_end; } p = desc_list_end; } }"} {"target": 1, "idx": 22666, "func": "static int socket_get_fd(void *opaque) { QEMUFileSocket *s = opaque; return s->fd; }"} {"target": 1, "idx": 22688, "func": "void do_addmeo_64 (void) { T1 = T0; T0 += xer_ca + (-1); if (likely(!((uint64_t)T1 & ((uint64_t)T1 ^ (uint64_t)T0) & (1ULL << 63)))) { xer_ov = 0; } else { xer_so = 1; xer_ov = 1; } if (likely(T1 != 0)) xer_ca = 1; }"} {"target": 0, "idx": 22697, "func": "static int nppscale_query_formats(AVFilterContext *ctx) { static const enum AVPixelFormat pixel_formats[] = { AV_PIX_FMT_CUDA, AV_PIX_FMT_NONE, }; AVFilterFormats *pix_fmts = ff_make_format_list(pixel_formats); ff_set_common_formats(ctx, pix_fmts); return 0; }"} {"target": 0, "idx": 22699, "func": "static void delete_next_frame(AudioFrameQueue *afq) { AudioFrame *f = afq->frame_queue; if (f) { afq->frame_queue = f->next; f->next = NULL; av_freep(&f); } }"} {"target": 1, "idx": 22710, "func": "int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags) { f->owner = avctx; return ff_get_buffer(avctx, f->f, flags); }"} {"target": 0, "idx": 22724, "func": "size_t qcrypto_cipher_get_key_len(QCryptoCipherAlgorithm alg) { if (alg >= G_N_ELEMENTS(alg_key_len)) { return 0; } return alg_key_len[alg]; }"} {"target": 0, "idx": 22739, "func": "static int img_amend(int argc, char **argv) { int c, ret = 0; char *options = NULL; QemuOptsList *create_opts = NULL; QemuOpts *opts = NULL; const char *fmt = NULL, *filename, *cache; int flags; bool quiet = false, progress = false; BlockBackend *blk = NULL; BlockDriverState *bs = NULL; cache = BDRV_DEFAULT_CACHE; for (;;) { c = getopt(argc, argv, \"ho:f:t:pq\"); if (c == -1) { break; } switch (c) { case 'h': case '?': help(); break; case 'o': if (!is_valid_option_list(optarg)) { error_report(\"Invalid option list: %s\", optarg); ret = -1; goto out; } if (!options) { options = g_strdup(optarg); } else { char *old_options = options; options = g_strdup_printf(\"%s,%s\", options, optarg); g_free(old_options); } break; case 'f': fmt = optarg; break; case 't': cache = optarg; break; case 'p': progress = true; break; case 'q': quiet = true; break; } } if (!options) { error_exit(\"Must specify options (-o)\"); } if (quiet) { progress = false; } qemu_progress_init(progress, 1.0); filename = (optind == argc - 1) ? argv[argc - 1] : NULL; if (fmt && has_help_option(options)) { /* If a format is explicitly specified (and possibly no filename is * given), print option help here */ ret = print_block_option_help(filename, fmt); goto out; } if (optind != argc - 1) { error_report(\"Expecting one image file name\"); ret = -1; goto out; } flags = BDRV_O_FLAGS | BDRV_O_RDWR; ret = bdrv_parse_cache_flags(cache, &flags); if (ret < 0) { error_report(\"Invalid cache option: %s\", cache); goto out; } blk = img_open(\"image\", filename, fmt, flags, true, quiet); if (!blk) { ret = -1; goto out; } bs = blk_bs(blk); fmt = bs->drv->format_name; if (has_help_option(options)) { /* If the format was auto-detected, print option help here */ ret = print_block_option_help(filename, fmt); goto out; } if (!bs->drv->create_opts) { error_report(\"Format driver '%s' does not support any options to amend\", fmt); ret = -1; goto out; } create_opts = qemu_opts_append(create_opts, bs->drv->create_opts); opts = qemu_opts_create(create_opts, NULL, 0, &error_abort); if (options && qemu_opts_do_parse(opts, options, NULL)) { error_report(\"Invalid options for file format '%s'\", fmt); ret = -1; goto out; } /* In case the driver does not call amend_status_cb() */ qemu_progress_print(0.f, 0); ret = bdrv_amend_options(bs, opts, &amend_status_cb); qemu_progress_print(100.f, 0); if (ret < 0) { error_report(\"Error while amending options: %s\", strerror(-ret)); goto out; } out: qemu_progress_end(); blk_unref(blk); qemu_opts_del(opts); qemu_opts_free(create_opts); g_free(options); if (ret) { return 1; } return 0; }"} {"target": 0, "idx": 22753, "func": "static void channel_load_g(struct fs_dma_ctrl *ctrl, int c) { target_phys_addr_t addr = channel_reg(ctrl, c, RW_GROUP); /* Load and decode. FIXME: handle endianness. */ cpu_physical_memory_read (addr, (void *) &ctrl->channels[c].current_g, sizeof ctrl->channels[c].current_g); }"} {"target": 0, "idx": 22774, "func": "uint32_t HELPER(rer)(CPUXtensaState *env, uint32_t addr) { return address_space_ldl(env->address_space_er, addr, (MemTxAttrs){0}, NULL); }"} {"target": 0, "idx": 22775, "func": "static void sun4uv_init(MemoryRegion *address_space_mem, MachineState *machine, const struct hwdef *hwdef) { SPARCCPU *cpu; Nvram *nvram; unsigned int i; uint64_t initrd_addr, initrd_size, kernel_addr, kernel_size, kernel_entry; PCIBus *pci_bus, *pci_bus2, *pci_bus3; ISABus *isa_bus; SysBusDevice *s; qemu_irq *ivec_irqs, *pbm_irqs; DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; DriveInfo *fd[MAX_FD]; FWCfgState *fw_cfg; /* init CPUs */ cpu = cpu_devinit(machine->cpu_model, hwdef); /* set up devices */ ram_init(0, machine->ram_size); prom_init(hwdef->prom_addr, bios_name); ivec_irqs = qemu_allocate_irqs(cpu_set_ivec_irq, cpu, IVEC_MAX); pci_bus = pci_apb_init(APB_SPECIAL_BASE, APB_MEM_BASE, ivec_irqs, &pci_bus2, &pci_bus3, &pbm_irqs); pci_vga_init(pci_bus); // XXX Should be pci_bus3 isa_bus = pci_ebus_init(pci_bus, -1, pbm_irqs); i = 0; if (hwdef->console_serial_base) { serial_mm_init(address_space_mem, hwdef->console_serial_base, 0, NULL, 115200, serial_hds[i], DEVICE_BIG_ENDIAN); i++; } serial_hds_isa_init(isa_bus, MAX_SERIAL_PORTS); parallel_hds_isa_init(isa_bus, MAX_PARALLEL_PORTS); for(i = 0; i < nb_nics; i++) pci_nic_init_nofail(&nd_table[i], pci_bus, \"ne2k_pci\", NULL); ide_drive_get(hd, ARRAY_SIZE(hd)); pci_cmd646_ide_init(pci_bus, hd, 1); isa_create_simple(isa_bus, \"i8042\"); for(i = 0; i < MAX_FD; i++) { fd[i] = drive_get(IF_FLOPPY, 0, i); } fdctrl_init_isa(isa_bus, fd); /* Map NVRAM into I/O (ebus) space */ nvram = m48t59_init(NULL, 0, 0, NVRAM_SIZE, 1968, 59); s = SYS_BUS_DEVICE(nvram); memory_region_add_subregion(get_system_io(), 0x2000, sysbus_mmio_get_region(s, 0)); initrd_size = 0; initrd_addr = 0; kernel_size = sun4u_load_kernel(machine->kernel_filename, machine->initrd_filename, ram_size, &initrd_size, &initrd_addr, &kernel_addr, &kernel_entry); sun4u_NVRAM_set_params(nvram, NVRAM_SIZE, \"Sun4u\", machine->ram_size, machine->boot_order, kernel_addr, kernel_size, machine->kernel_cmdline, initrd_addr, initrd_size, /* XXX: need an option to load a NVRAM image */ 0, graphic_width, graphic_height, graphic_depth, (uint8_t *)&nd_table[0].macaddr); fw_cfg = fw_cfg_init_io(BIOS_CFG_IOPORT); fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus); fw_cfg_add_i32(fw_cfg, FW_CFG_ID, 1); fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, hwdef->machine_id); fw_cfg_add_i64(fw_cfg, FW_CFG_KERNEL_ADDR, kernel_entry); fw_cfg_add_i64(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); if (machine->kernel_cmdline) { fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, strlen(machine->kernel_cmdline) + 1); fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, machine->kernel_cmdline); } else { fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, 0); } fw_cfg_add_i64(fw_cfg, FW_CFG_INITRD_ADDR, initrd_addr); fw_cfg_add_i64(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, machine->boot_order[0]); fw_cfg_add_i16(fw_cfg, FW_CFG_SPARC64_WIDTH, graphic_width); fw_cfg_add_i16(fw_cfg, FW_CFG_SPARC64_HEIGHT, graphic_height); fw_cfg_add_i16(fw_cfg, FW_CFG_SPARC64_DEPTH, graphic_depth); qemu_register_boot_set(fw_cfg_boot_set, fw_cfg); }"} {"target": 0, "idx": 22787, "func": "static void fw_cfg_data_mem_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { fw_cfg_write(opaque, (uint8_t)value); }"} {"target": 0, "idx": 22794, "func": "static void do_cpu_reset(void *opaque) { ARMCPU *cpu = opaque; CPUARMState *env = &cpu->env; const struct arm_boot_info *info = env->boot_info; cpu_reset(CPU(cpu)); if (info) { if (!info->is_linux) { /* Jump to the entry point. */ env->regs[15] = info->entry & 0xfffffffe; env->thumb = info->entry & 1; } else { if (CPU(cpu) == first_cpu) { env->regs[15] = info->loader_start; if (!info->dtb_filename) { if (old_param) { set_kernel_args_old(info); } else { set_kernel_args(info); } } } else { info->secondary_cpu_reset_hook(cpu, info); } } } }"} {"target": 1, "idx": 22811, "func": "void ff_msmpeg4_encode_init(MpegEncContext *s) { static int init_done=0; int i; common_init(s); if(s->msmpeg4_version>=4){ s->min_qcoeff= -255; s->max_qcoeff= 255; } if (!init_done) { /* init various encoding tables */ init_done = 1; init_mv_table(&mv_tables[0]); init_mv_table(&mv_tables[1]); for(i=0;i= src_end) { av_log(ctx->avctx, AV_LOG_ERROR, \"Data overrun\\n\"); return AVERROR_INVALIDDATA; } val = *src++; for (i = 0; i < 64; i++) BLK(ctx->cur, bx + (i & 0x7), by + (i >> 3)) = val; } else { // copy block from previous frame for (i = 0; i < 64; i++) BLK(ctx->cur, bx + (i & 0x7), by + (i >> 3)) = BLK(ctx->prev, bx + (i & 0x7), by + (i >> 3)); } } else { // handle four 4x4 subblocks for (i = 0; i < 4; i++) { l0x = bx + (i & 1) * 4; l0y = by + (i & 2) * 2; kmvc_getbit(bb, src, src_end, res); if (!res) { kmvc_getbit(bb, src, src_end, res); if (!res) { // fill whole 4x4 block if (src >= src_end) { av_log(ctx->avctx, AV_LOG_ERROR, \"Data overrun\\n\"); return AVERROR_INVALIDDATA; } val = *src++; for (j = 0; j < 16; j++) BLK(ctx->cur, l0x + (j & 3), l0y + (j >> 2)) = val; } else { // copy block if (src >= src_end) { av_log(ctx->avctx, AV_LOG_ERROR, \"Data overrun\\n\"); return AVERROR_INVALIDDATA; } val = *src++; mx = (val & 0xF) - 8; my = (val >> 4) - 8; for (j = 0; j < 16; j++) BLK(ctx->cur, l0x + (j & 3), l0y + (j >> 2)) = BLK(ctx->prev, l0x + (j & 3) + mx, l0y + (j >> 2) + my); } } else { // descend to 2x2 sub-sub-blocks for (j = 0; j < 4; j++) { l1x = l0x + (j & 1) * 2; l1y = l0y + (j & 2); kmvc_getbit(bb, src, src_end, res); if (!res) { kmvc_getbit(bb, src, src_end, res); if (!res) { // fill whole 2x2 block if (src >= src_end) { av_log(ctx->avctx, AV_LOG_ERROR, \"Data overrun\\n\"); return AVERROR_INVALIDDATA; } val = *src++; BLK(ctx->cur, l1x, l1y) = val; BLK(ctx->cur, l1x + 1, l1y) = val; BLK(ctx->cur, l1x, l1y + 1) = val; BLK(ctx->cur, l1x + 1, l1y + 1) = val; } else { // copy block if (src >= src_end) { av_log(ctx->avctx, AV_LOG_ERROR, \"Data overrun\\n\"); return AVERROR_INVALIDDATA; } val = *src++; mx = (val & 0xF) - 8; my = (val >> 4) - 8; BLK(ctx->cur, l1x, l1y) = BLK(ctx->prev, l1x + mx, l1y + my); BLK(ctx->cur, l1x + 1, l1y) = BLK(ctx->prev, l1x + 1 + mx, l1y + my); BLK(ctx->cur, l1x, l1y + 1) = BLK(ctx->prev, l1x + mx, l1y + 1 + my); BLK(ctx->cur, l1x + 1, l1y + 1) = BLK(ctx->prev, l1x + 1 + mx, l1y + 1 + my); } } else { // read values for block BLK(ctx->cur, l1x, l1y) = *src++; BLK(ctx->cur, l1x + 1, l1y) = *src++; BLK(ctx->cur, l1x, l1y + 1) = *src++; BLK(ctx->cur, l1x + 1, l1y + 1) = *src++; } } } } } } return 0; }"} {"target": 0, "idx": 22822, "func": "static int amr_nb_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; AMRContext *s = avctx->priv_data; static const uint8_t block_size[16] = { 12, 13, 15, 17, 19, 20, 26, 31, 5, 0, 0, 0, 0, 0, 0, 0 }; enum Mode dec_mode; int packet_size; av_dlog(avctx, \"amr_decode_frame buf=%p buf_size=%d frame_count=%d!!\\n\", buf, buf_size, avctx->frame_number); dec_mode = (buf[0] >> 3) & 0x000F; packet_size = block_size[dec_mode] + 1; if (packet_size > buf_size) { av_log(avctx, AV_LOG_ERROR, \"amr frame too short (%u, should be %u)\\n\", buf_size, packet_size); return AVERROR_INVALIDDATA; } av_dlog(avctx, \"packet_size=%d buf= 0x%X %X %X %X\\n\", packet_size, buf[0], buf[1], buf[2], buf[3]); /* call decoder */ Decoder_Interface_Decode(s->dec_state, buf, data, 0); *data_size = 160 * 2; return packet_size; }"} {"target": 0, "idx": 22825, "func": "static void avc_h_loop_filter_chroma422_mbaff_msa(uint8_t *src, int32_t stride, int32_t alpha_in, int32_t beta_in, int8_t *tc0) { int32_t col, tc_val; int16_t out0, out1; v16u8 alpha, beta, res; alpha = (v16u8) __msa_fill_b(alpha_in); beta = (v16u8) __msa_fill_b(beta_in); for (col = 0; col < 4; col++) { tc_val = (tc0[col] - 1) + 1; if (tc_val <= 0) { src += 4 * stride; continue; } AVC_LPF_H_2BYTE_CHROMA_422(src, stride, tc_val, alpha, beta, res); out0 = __msa_copy_s_h((v8i16) res, 0); out1 = __msa_copy_s_h((v8i16) res, 1); STORE_HWORD((src - 1), out0); src += stride; STORE_HWORD((src - 1), out1); src += stride; } }"} {"target": 0, "idx": 22828, "func": "static void copy_cell(Indeo3DecodeContext *ctx, Plane *plane, Cell *cell) { int h, w, mv_x, mv_y, offset, offset_dst; uint8_t *src, *dst; /* setup output and reference pointers */ offset_dst = (cell->ypos << 2) * plane->pitch + (cell->xpos << 2); dst = plane->pixels[ctx->buf_sel] + offset_dst; mv_y = cell->mv_ptr[0]; mv_x = cell->mv_ptr[1]; offset = offset_dst + mv_y * plane->pitch + mv_x; src = plane->pixels[ctx->buf_sel ^ 1] + offset; h = cell->height << 2; for (w = cell->width; w > 0;) { /* copy using 16xH blocks */ if (!((cell->xpos << 2) & 15) && w >= 4) { for (; w >= 4; src += 16, dst += 16, w -= 4) ctx->dsp.put_no_rnd_pixels_tab[0][0](dst, src, plane->pitch, h); } /* copy using 8xH blocks */ if (!((cell->xpos << 2) & 7) && w >= 2) { ctx->dsp.put_no_rnd_pixels_tab[1][0](dst, src, plane->pitch, h); w -= 2; src += 8; dst += 8; } if (w >= 1) { ctx->dsp.put_no_rnd_pixels_tab[2][0](dst, src, plane->pitch, h); w--; src += 4; dst += 4; } } }"} {"target": 1, "idx": 22837, "func": "static int tmp105_tx(I2CSlave *i2c, uint8_t data) { TMP105State *s = (TMP105State *) i2c; if (!s->len ++) s->pointer = data; else { if (s->len <= 2) s->buf[s->len - 1] = data; tmp105_write(s); } return 0; }"} {"target": 1, "idx": 22850, "func": "static void flac_lpc_16_c(int32_t *decoded, const int coeffs[32], int pred_order, int qlevel, int len) { int i, j; for (i = pred_order; i < len - 1; i += 2, decoded += 2) { int c = coeffs[0]; int d = decoded[0]; int s0 = 0, s1 = 0; for (j = 1; j < pred_order; j++) { s0 += c*d; d = decoded[j]; s1 += c*d; c = coeffs[j]; } s0 += c*d; d = decoded[j] += s0 >> qlevel; s1 += c*d; decoded[j + 1] += s1 >> qlevel; } if (i < len) { int sum = 0; for (j = 0; j < pred_order; j++) sum += coeffs[j] * decoded[j]; decoded[j] += sum >> qlevel; } }"} {"target": 0, "idx": 22855, "func": "static bool cpu_thread_is_idle(CPUState *cpu) { if (cpu->stop || cpu->queued_work_first) { return false; } if (cpu->stopped || !runstate_is_running()) { return true; } if (!cpu->halted || qemu_cpu_has_work(cpu) || kvm_async_interrupts_enabled()) { return false; } return true; }"} {"target": 0, "idx": 22867, "func": "static inline QEMUClock *qemu_clock_ptr(QEMUClockType type) { return &qemu_clocks[type]; }"} {"target": 1, "idx": 22890, "func": "int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb, int first_slice) { MpegEncContext * const s = &h->s; int i, ret; MMCO mmco_temp[MAX_MMCO_COUNT], *mmco = first_slice ? h->mmco : mmco_temp; int mmco_index = 0; if (h->nal_unit_type == NAL_IDR_SLICE){ // FIXME fields s->broken_link = get_bits1(gb) - 1; if (get_bits1(gb)){ mmco[0].opcode = MMCO_LONG; mmco[0].long_arg = 0; mmco_index = 1; } } else { if (get_bits1(gb)) { // adaptive_ref_pic_marking_mode_flag for (i = 0; i < MAX_MMCO_COUNT; i++) { MMCOOpcode opcode = get_ue_golomb_31(gb); mmco[i].opcode = opcode; if (opcode == MMCO_SHORT2UNUSED || opcode == MMCO_SHORT2LONG){ mmco[i].short_pic_num = (h->curr_pic_num - get_ue_golomb(gb) - 1) & (h->max_pic_num - 1); #if 0 if (mmco[i].short_pic_num >= h->short_ref_count || h->short_ref[ mmco[i].short_pic_num ] == NULL){ av_log(s->avctx, AV_LOG_ERROR, \"illegal short ref in memory management control \" \"operation %d\\n\", mmco); return -1; } #endif } if (opcode == MMCO_SHORT2LONG || opcode == MMCO_LONG2UNUSED || opcode == MMCO_LONG || opcode == MMCO_SET_MAX_LONG) { unsigned int long_arg = get_ue_golomb_31(gb); if (long_arg >= 32 || (long_arg >= 16 && !(opcode == MMCO_SET_MAX_LONG && long_arg == 16) && !(opcode == MMCO_LONG2UNUSED && FIELD_PICTURE))){ av_log(h->s.avctx, AV_LOG_ERROR, \"illegal long ref in memory management control \" \"operation %d\\n\", opcode); return -1; } mmco[i].long_arg = long_arg; } if (opcode > (unsigned) MMCO_LONG){ av_log(h->s.avctx, AV_LOG_ERROR, \"illegal memory management control operation %d\\n\", opcode); return -1; } if (opcode == MMCO_END) break; } mmco_index = i; } else { if (first_slice) { ret = ff_generate_sliding_window_mmcos(h, first_slice); if (ret < 0 && s->avctx->err_recognition & AV_EF_EXPLODE) return ret; } mmco_index = -1; } } if (first_slice && mmco_index != -1) { h->mmco_index = mmco_index; } else if (!first_slice && mmco_index >= 0 && (mmco_index != h->mmco_index || (i = check_opcodes(h->mmco, mmco_temp, mmco_index)))) { av_log(h->s.avctx, AV_LOG_ERROR, \"Inconsistent MMCO state between slices [%d, %d, %d]\\n\", mmco_index, h->mmco_index, i); return AVERROR_INVALIDDATA; } return 0; }"} {"target": 1, "idx": 22895, "func": "static int rprobe(AVFormatContext *s, uint8_t *enc_header, const uint8_t *r_val) { OMAContext *oc = s->priv_data; unsigned int pos; struct AVDES av_des; if (!enc_header || !r_val) return -1; /* m_val */ av_des_init(&av_des, r_val, 192, 1); av_des_crypt(&av_des, oc->m_val, &enc_header[48], 1, NULL, 1); /* s_val */ av_des_init(&av_des, oc->m_val, 64, 0); av_des_crypt(&av_des, oc->s_val, NULL, 1, NULL, 0); /* sm_val */ pos = OMA_ENC_HEADER_SIZE + oc->k_size + oc->e_size; av_des_init(&av_des, oc->s_val, 64, 0); av_des_mac(&av_des, oc->sm_val, &enc_header[pos], (oc->i_size >> 3)); pos += oc->i_size; return memcmp(&enc_header[pos], oc->sm_val, 8) ? -1 : 0; }"} {"target": 0, "idx": 22902, "func": "static int check_opcodes(MMCO *mmco1, MMCO *mmco2, int n_mmcos) { int i; for (i = 0; i < n_mmcos; i++) { if (mmco1[i].opcode != mmco2[i].opcode) return -1; } return 0; }"} {"target": 0, "idx": 22937, "func": "void ff_put_h264_qpel4_mc03_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { avc_luma_vt_qrt_4w_msa(src - (stride * 2), stride, dst, stride, 4, 1); }"} {"target": 1, "idx": 22954, "func": "static VncJobQueue *vnc_queue_init(void) { VncJobQueue *queue = g_malloc0(sizeof(VncJobQueue)); qemu_cond_init(&queue->cond); qemu_mutex_init(&queue->mutex); QTAILQ_INIT(&queue->jobs); return queue; }"} {"target": 1, "idx": 22957, "func": "void test_tls_write_cert_chain(const char *filename, gnutls_x509_crt_t *certs, size_t ncerts) { size_t i; size_t capacity = 1024, offset = 0; char *buffer = g_new0(char, capacity); int err; for (i = 0; i < ncerts; i++) { size_t len = capacity - offset; retry: err = gnutls_x509_crt_export(certs[i], GNUTLS_X509_FMT_PEM, buffer + offset, &len); if (err < 0) { if (err == GNUTLS_E_SHORT_MEMORY_BUFFER) { buffer = g_renew(char, buffer, offset + len); capacity = offset + len; goto retry; } g_critical(\"Failed to export certificate chain %s: %d\", gnutls_strerror(err), err); abort(); } offset += len; } if (!g_file_set_contents(filename, buffer, offset, NULL)) { abort(); } }"} {"target": 0, "idx": 22958, "func": "static int tak_parse(AVCodecParserContext *s, AVCodecContext *avctx, const uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size) { TAKParseContext *t = s->priv_data; ParseContext *pc = &t->pc; int next = END_NOT_FOUND; GetBitContext gb; int consumed = 0; int needed = buf_size ? TAK_MAX_FRAME_HEADER_BYTES : 8; if (s->flags & PARSER_FLAG_COMPLETE_FRAMES) { TAKStreamInfo ti; init_get_bits(&gb, buf, buf_size); if (!ff_tak_decode_frame_header(avctx, &gb, &ti, 127)) s->duration = t->ti.last_frame_samples ? t->ti.last_frame_samples : t->ti.frame_samples; *poutbuf = buf; *poutbuf_size = buf_size; return buf_size; } while (buf_size || t->index + needed <= pc->index) { if (buf_size && t->index + TAK_MAX_FRAME_HEADER_BYTES > pc->index) { int tmp_buf_size = FFMIN(2 * TAK_MAX_FRAME_HEADER_BYTES, buf_size); const uint8_t *tmp_buf = buf; ff_combine_frame(pc, END_NOT_FOUND, &tmp_buf, &tmp_buf_size); consumed += tmp_buf_size; buf += tmp_buf_size; buf_size -= tmp_buf_size; } for (; t->index + needed <= pc->index; t->index++) { if (pc->buffer[ t->index ] == 0xFF && pc->buffer[ t->index + 1 ] == 0xA0) { TAKStreamInfo ti; init_get_bits(&gb, pc->buffer + t->index, 8 * (pc->index - t->index)); if (!ff_tak_decode_frame_header(avctx, &gb, pc->frame_start_found ? &ti : &t->ti, 127) && !ff_tak_check_crc(pc->buffer + t->index, get_bits_count(&gb) / 8)) { if (!pc->frame_start_found) { pc->frame_start_found = 1; s->duration = t->ti.last_frame_samples ? t->ti.last_frame_samples : t->ti.frame_samples; } else { pc->frame_start_found = 0; next = t->index - pc->index; t->index = 0; goto found; } } } } } found: if (consumed && !buf_size && next == END_NOT_FOUND || ff_combine_frame(pc, next, &buf, &buf_size) < 0) { *poutbuf = NULL; *poutbuf_size = 0; return buf_size + consumed; } if (next != END_NOT_FOUND) { next += consumed; pc->overread = FFMAX(0, -next); } *poutbuf = buf; *poutbuf_size = buf_size; return next; }"} {"target": 1, "idx": 22978, "func": "static int gif_read_header(AVFormatContext * s1, AVFormatParameters * ap) { GifState *s = s1->priv_data; ByteIOContext *f = s1->pb; AVStream *st; s->f = f; if (gif_read_header1(s) < 0) return -1; /* allocate image buffer */ s->image_linesize = s->screen_width * 3; s->image_buf = av_malloc(s->screen_height * s->image_linesize); if (!s->image_buf) return AVERROR(ENOMEM); s->pix_fmt = PIX_FMT_RGB24; /* now we are ready: build format streams */ st = av_new_stream(s1, 0); if (!st) return -1; st->codec->codec_type = CODEC_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_RAWVIDEO; st->codec->time_base.den = 5; st->codec->time_base.num = 1; /* XXX: check if screen size is always valid */ st->codec->width = s->screen_width; st->codec->height = s->screen_height; st->codec->pix_fmt = PIX_FMT_RGB24; return 0; }"} {"target": 0, "idx": 23011, "func": "int64_t strtosz_suffix_unit(const char *nptr, char **end, const char default_suffix, int64_t unit) { int64_t retval = -1; char *endptr; unsigned char c, d; int mul_required = 0; double val, mul, integral, fraction; errno = 0; val = strtod(nptr, &endptr); if (isnan(val) || endptr == nptr || errno != 0) { goto fail; } fraction = modf(val, &integral); if (fraction != 0) { mul_required = 1; } /* * Any whitespace character is fine for terminating the number, * in addition we accept ',' to handle strings where the size is * part of a multi token argument. */ c = *endptr; d = c; if (qemu_isspace(c) || c == '\\0' || c == ',') { c = 0; d = default_suffix; } switch (qemu_toupper(d)) { case STRTOSZ_DEFSUFFIX_B: mul = 1; if (mul_required) { goto fail; } break; case STRTOSZ_DEFSUFFIX_KB: mul = unit; break; case STRTOSZ_DEFSUFFIX_MB: mul = unit * unit; break; case STRTOSZ_DEFSUFFIX_GB: mul = unit * unit * unit; break; case STRTOSZ_DEFSUFFIX_TB: mul = unit * unit * unit * unit; break; default: goto fail; } /* * If not terminated by whitespace, ',', or \\0, increment endptr * to point to next character, then check that we are terminated * by an appropriate separating character, ie. whitespace, ',', or * \\0. If not, we are seeing trailing garbage, thus fail. */ if (c != 0) { endptr++; if (!qemu_isspace(*endptr) && *endptr != ',' && *endptr != 0) { goto fail; } } if ((val * mul >= INT64_MAX) || val < 0) { goto fail; } retval = val * mul; fail: if (end) { *end = endptr; } return retval; }"} {"target": 0, "idx": 23015, "func": "static void qmp_output_push_obj(QmpOutputVisitor *qov, QObject *value, void *qapi) { QStackEntry *e = g_malloc0(sizeof(*e)); assert(qov->root); assert(value); e->value = value; e->qapi = qapi; QSLIST_INSERT_HEAD(&qov->stack, e, node); }"} {"target": 0, "idx": 23017, "func": "static int decode_frame_common(AVCodecContext *avctx, PNGDecContext *s, AVFrame *p, AVPacket *avpkt) { AVDictionary **metadatap = NULL; uint32_t tag, length; int decode_next_dat = 0; int ret; for (;;) { length = bytestream2_get_bytes_left(&s->gb); if (length <= 0) { if (avctx->codec_id == AV_CODEC_ID_PNG && avctx->skip_frame == AVDISCARD_ALL) { return 0; } if (CONFIG_APNG_DECODER && avctx->codec_id == AV_CODEC_ID_APNG && length == 0) { if (!(s->pic_state & PNG_IDAT)) return 0; else goto exit_loop; } av_log(avctx, AV_LOG_ERROR, \"%d bytes left\\n\", length); if ( s->pic_state & PNG_ALLIMAGE && avctx->strict_std_compliance <= FF_COMPLIANCE_NORMAL) goto exit_loop; ret = AVERROR_INVALIDDATA; goto fail; } length = bytestream2_get_be32(&s->gb); if (length > 0x7fffffff || length > bytestream2_get_bytes_left(&s->gb)) { av_log(avctx, AV_LOG_ERROR, \"chunk too big\\n\"); ret = AVERROR_INVALIDDATA; goto fail; } tag = bytestream2_get_le32(&s->gb); if (avctx->debug & FF_DEBUG_STARTCODE) av_log(avctx, AV_LOG_DEBUG, \"png: tag=%s length=%u\\n\", av_fourcc2str(tag), length); if (avctx->codec_id == AV_CODEC_ID_PNG && avctx->skip_frame == AVDISCARD_ALL) { switch(tag) { case MKTAG('I', 'H', 'D', 'R'): case MKTAG('p', 'H', 'Y', 's'): case MKTAG('t', 'E', 'X', 't'): case MKTAG('I', 'D', 'A', 'T'): case MKTAG('t', 'R', 'N', 'S'): break; default: goto skip_tag; } } metadatap = &p->metadata; switch (tag) { case MKTAG('I', 'H', 'D', 'R'): if ((ret = decode_ihdr_chunk(avctx, s, length)) < 0) goto fail; break; case MKTAG('p', 'H', 'Y', 's'): if ((ret = decode_phys_chunk(avctx, s)) < 0) goto fail; break; case MKTAG('f', 'c', 'T', 'L'): if (!CONFIG_APNG_DECODER || avctx->codec_id != AV_CODEC_ID_APNG) goto skip_tag; if ((ret = decode_fctl_chunk(avctx, s, length)) < 0) goto fail; decode_next_dat = 1; break; case MKTAG('f', 'd', 'A', 'T'): if (!CONFIG_APNG_DECODER || avctx->codec_id != AV_CODEC_ID_APNG) goto skip_tag; if (!decode_next_dat) { ret = AVERROR_INVALIDDATA; goto fail; } bytestream2_get_be32(&s->gb); length -= 4; /* fallthrough */ case MKTAG('I', 'D', 'A', 'T'): if (CONFIG_APNG_DECODER && avctx->codec_id == AV_CODEC_ID_APNG && !decode_next_dat) goto skip_tag; if ((ret = decode_idat_chunk(avctx, s, length, p)) < 0) goto fail; break; case MKTAG('P', 'L', 'T', 'E'): if (decode_plte_chunk(avctx, s, length) < 0) goto skip_tag; break; case MKTAG('t', 'R', 'N', 'S'): if (decode_trns_chunk(avctx, s, length) < 0) goto skip_tag; break; case MKTAG('t', 'E', 'X', 't'): if (decode_text_chunk(s, length, 0, metadatap) < 0) av_log(avctx, AV_LOG_WARNING, \"Broken tEXt chunk\\n\"); bytestream2_skip(&s->gb, length + 4); break; case MKTAG('z', 'T', 'X', 't'): if (decode_text_chunk(s, length, 1, metadatap) < 0) av_log(avctx, AV_LOG_WARNING, \"Broken zTXt chunk\\n\"); bytestream2_skip(&s->gb, length + 4); break; case MKTAG('s', 'T', 'E', 'R'): { int mode = bytestream2_get_byte(&s->gb); AVStereo3D *stereo3d = av_stereo3d_create_side_data(p); if (!stereo3d) goto fail; if (mode == 0 || mode == 1) { stereo3d->type = AV_STEREO3D_SIDEBYSIDE; stereo3d->flags = mode ? 0 : AV_STEREO3D_FLAG_INVERT; } else { av_log(avctx, AV_LOG_WARNING, \"Unknown value in sTER chunk (%d)\\n\", mode); } bytestream2_skip(&s->gb, 4); /* crc */ break; } case MKTAG('i', 'C', 'C', 'P'): { if (decode_iccp_chunk(s, length, p) < 0) goto fail; break; } case MKTAG('I', 'E', 'N', 'D'): if (!(s->pic_state & PNG_ALLIMAGE)) av_log(avctx, AV_LOG_ERROR, \"IEND without all image\\n\"); if (!(s->pic_state & (PNG_ALLIMAGE|PNG_IDAT))) { ret = AVERROR_INVALIDDATA; goto fail; } bytestream2_skip(&s->gb, 4); /* crc */ goto exit_loop; default: /* skip tag */ skip_tag: bytestream2_skip(&s->gb, length + 4); break; } } exit_loop: if (avctx->codec_id == AV_CODEC_ID_PNG && avctx->skip_frame == AVDISCARD_ALL) { return 0; } if (s->bits_per_pixel <= 4) handle_small_bpp(s, p); /* apply transparency if needed */ if (s->has_trns && s->color_type != PNG_COLOR_TYPE_PALETTE) { size_t byte_depth = s->bit_depth > 8 ? 2 : 1; size_t raw_bpp = s->bpp - byte_depth; unsigned x, y; av_assert0(s->bit_depth > 1); for (y = 0; y < s->height; ++y) { uint8_t *row = &s->image_buf[s->image_linesize * y]; /* since we're updating in-place, we have to go from right to left */ for (x = s->width; x > 0; --x) { uint8_t *pixel = &row[s->bpp * (x - 1)]; memmove(pixel, &row[raw_bpp * (x - 1)], raw_bpp); if (!memcmp(pixel, s->transparent_color_be, raw_bpp)) { memset(&pixel[raw_bpp], 0, byte_depth); } else { memset(&pixel[raw_bpp], 0xff, byte_depth); } } } } /* handle P-frames only if a predecessor frame is available */ if (s->last_picture.f->data[0]) { if ( !(avpkt->flags & AV_PKT_FLAG_KEY) && avctx->codec_tag != AV_RL32(\"MPNG\") && s->last_picture.f->width == p->width && s->last_picture.f->height== p->height && s->last_picture.f->format== p->format ) { if (CONFIG_PNG_DECODER && avctx->codec_id != AV_CODEC_ID_APNG) handle_p_frame_png(s, p); else if (CONFIG_APNG_DECODER && avctx->codec_id == AV_CODEC_ID_APNG && (ret = handle_p_frame_apng(avctx, s, p)) < 0) goto fail; } } ff_thread_report_progress(&s->picture, INT_MAX, 0); ff_thread_report_progress(&s->previous_picture, INT_MAX, 0); return 0; fail: ff_thread_report_progress(&s->picture, INT_MAX, 0); ff_thread_report_progress(&s->previous_picture, INT_MAX, 0); return ret; }"} {"target": 0, "idx": 23019, "func": "static int sync(AVFormatContext *s, int64_t *timestamp, int *flags, int *stream_index, int64_t *pos){ RMDemuxContext *rm = s->priv_data; ByteIOContext *pb = s->pb; int len, num, res, i; AVStream *st; uint32_t state=0xFFFFFFFF; while(!url_feof(pb)){ *pos= url_ftell(pb) - 3; if(rm->remaining_len > 0){ num= rm->current_stream; len= rm->remaining_len; *timestamp = AV_NOPTS_VALUE; *flags= 0; }else{ state= (state<<8) + get_byte(pb); if(state == MKBETAG('I', 'N', 'D', 'X')){ int n_pkts, expected_len; len = get_be32(pb); url_fskip(pb, 2); n_pkts = get_be32(pb); expected_len = 20 + n_pkts * 14; if (len == 20) /* some files don't add index entries to chunk size... */ len = expected_len; else if (len != expected_len) av_log(s, AV_LOG_WARNING, \"Index size %d (%d pkts) is wrong, should be %d.\\n\", len, n_pkts, expected_len); len -= 14; // we already read part of the index header if(len<0) continue; goto skip; } if(state > (unsigned)0xFFFF || state < 12) continue; len=state; state= 0xFFFFFFFF; num = get_be16(pb); *timestamp = get_be32(pb); res= get_byte(pb); /* reserved */ *flags = get_byte(pb); /* flags */ len -= 12; } for(i=0;inb_streams;i++) { st = s->streams[i]; if (num == st->id) break; } if (i == s->nb_streams) { skip: /* skip packet if unknown number */ url_fskip(pb, len); rm->remaining_len = 0; continue; } *stream_index= i; return len; } return -1; }"} {"target": 0, "idx": 23022, "func": "static int config_props(AVFilterLink *link) { UnsharpContext *unsharp = link->dst->priv; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format); unsharp->hsub = desc->log2_chroma_w; unsharp->vsub = desc->log2_chroma_h; init_filter_param(link->dst, &unsharp->luma, \"luma\", link->w); init_filter_param(link->dst, &unsharp->chroma, \"chroma\", SHIFTUP(link->w, unsharp->hsub)); return 0; }"} {"target": 0, "idx": 23027, "func": "static int64_t wav_seek_tag(AVIOContext *s, int64_t offset, int whence) { offset += offset < INT64_MAX && offset & 1; return avio_seek(s, offset, whence); }"} {"target": 1, "idx": 23033, "func": "int qemu_global_option(const char *str) { char driver[64], property[64]; QemuOpts *opts; int rc, offset; rc = sscanf(str, \"%63[^.].%63[^=]%n\", driver, property, &offset); if (rc < 2 || str[offset] != '=') { error_report(\"can't parse: \\\"%s\\\"\", str); return -1; } opts = qemu_opts_create(&qemu_global_opts, NULL, 0); qemu_opt_set(opts, \"driver\", driver); qemu_opt_set(opts, \"property\", property); qemu_opt_set(opts, \"value\", str+offset+1); return 0; }"} {"target": 1, "idx": 23034, "func": "QEMUFile *qemu_fdopen(int fd, const char *mode) { QEMUFileSocket *s; if (mode == NULL || (mode[0] != 'r' && mode[0] != 'w') || mode[1] != 'b' || mode[2] != 0) { fprintf(stderr, \"qemu_fdopen: Argument validity check failed\\n\"); return NULL; } s = g_malloc0(sizeof(QEMUFileSocket)); s->fd = fd; if (mode[0] == 'r') { s->file = qemu_fopen_ops(s, &unix_read_ops); } else { s->file = qemu_fopen_ops(s, &unix_write_ops); } return s->file; }"} {"target": 0, "idx": 23070, "func": "static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu, MSIMessage *origin, MSIMessage *translated) { int ret = 0; VTD_IR_MSIAddress addr; uint16_t index; VTDIrq irq = {}; assert(origin && translated); if (!iommu || !iommu->intr_enabled) { goto do_not_translate; } if (origin->address & VTD_MSI_ADDR_HI_MASK) { VTD_DPRINTF(GENERAL, \"error: MSI addr high 32 bits nonzero\" \" during interrupt remapping: 0x%\"PRIx32, (uint32_t)((origin->address & VTD_MSI_ADDR_HI_MASK) >> \\ VTD_MSI_ADDR_HI_SHIFT)); return -VTD_FR_IR_REQ_RSVD; } addr.data = origin->address & VTD_MSI_ADDR_LO_MASK; if (le16_to_cpu(addr.__head) != 0xfee) { VTD_DPRINTF(GENERAL, \"error: MSI addr low 32 bits invalid: \" \"0x%\"PRIx32, addr.data); return -VTD_FR_IR_REQ_RSVD; } /* This is compatible mode. */ if (addr.int_mode != VTD_IR_INT_FORMAT_REMAP) { goto do_not_translate; } index = addr.index_h << 15 | le16_to_cpu(addr.index_l); #define VTD_IR_MSI_DATA_SUBHANDLE (0x0000ffff) #define VTD_IR_MSI_DATA_RESERVED (0xffff0000) if (addr.sub_valid) { /* See VT-d spec 5.1.2.2 and 5.1.3 on subhandle */ index += origin->data & VTD_IR_MSI_DATA_SUBHANDLE; } ret = vtd_remap_irq_get(iommu, index, &irq); if (ret) { return ret; } if (addr.sub_valid) { VTD_DPRINTF(IR, \"received MSI interrupt\"); if (origin->data & VTD_IR_MSI_DATA_RESERVED) { VTD_DPRINTF(GENERAL, \"error: MSI data bits non-zero for \" \"interrupt remappable entry: 0x%\"PRIx32, origin->data); return -VTD_FR_IR_REQ_RSVD; } } else { uint8_t vector = origin->data & 0xff; VTD_DPRINTF(IR, \"received IOAPIC interrupt\"); /* IOAPIC entry vector should be aligned with IRTE vector * (see vt-d spec 5.1.5.1). */ if (vector != irq.vector) { VTD_DPRINTF(GENERAL, \"IOAPIC vector inconsistent: \" \"entry: %d, IRTE: %d, index: %d\", vector, irq.vector, index); } } /* * We'd better keep the last two bits, assuming that guest OS * might modify it. Keep it does not hurt after all. */ irq.msi_addr_last_bits = addr.__not_care; /* Translate VTDIrq to MSI message */ vtd_generate_msi_message(&irq, translated); VTD_DPRINTF(IR, \"mapping MSI 0x%\"PRIx64\":0x%\"PRIx32 \" -> \" \"0x%\"PRIx64\":0x%\"PRIx32, origin->address, origin->data, translated->address, translated->data); return 0; do_not_translate: memcpy(translated, origin, sizeof(*origin)); return 0; }"} {"target": 0, "idx": 23071, "func": "static bool vtd_process_inv_desc(IntelIOMMUState *s) { VTDInvDesc inv_desc; uint8_t desc_type; VTD_DPRINTF(INV, \"iq head %\"PRIu16, s->iq_head); if (!vtd_get_inv_desc(s->iq, s->iq_head, &inv_desc)) { s->iq_last_desc_type = VTD_INV_DESC_NONE; return false; } desc_type = inv_desc.lo & VTD_INV_DESC_TYPE; /* FIXME: should update at first or at last? */ s->iq_last_desc_type = desc_type; switch (desc_type) { case VTD_INV_DESC_CC: VTD_DPRINTF(INV, \"Context-cache Invalidate Descriptor hi 0x%\"PRIx64 \" lo 0x%\"PRIx64, inv_desc.hi, inv_desc.lo); if (!vtd_process_context_cache_desc(s, &inv_desc)) { return false; } break; case VTD_INV_DESC_IOTLB: VTD_DPRINTF(INV, \"IOTLB Invalidate Descriptor hi 0x%\"PRIx64 \" lo 0x%\"PRIx64, inv_desc.hi, inv_desc.lo); if (!vtd_process_iotlb_desc(s, &inv_desc)) { return false; } break; case VTD_INV_DESC_WAIT: VTD_DPRINTF(INV, \"Invalidation Wait Descriptor hi 0x%\"PRIx64 \" lo 0x%\"PRIx64, inv_desc.hi, inv_desc.lo); if (!vtd_process_wait_desc(s, &inv_desc)) { return false; } break; case VTD_INV_DESC_IEC: VTD_DPRINTF(INV, \"Interrupt Entry Cache Invalidation \" \"not implemented yet\"); /* * Since currently we do not cache interrupt entries, we can * just mark this descriptor as \"good\" and move on. */ break; default: VTD_DPRINTF(GENERAL, \"error: unkonw Invalidation Descriptor type \" \"hi 0x%\"PRIx64 \" lo 0x%\"PRIx64 \" type %\"PRIu8, inv_desc.hi, inv_desc.lo, desc_type); return false; } s->iq_head++; if (s->iq_head == s->iq_size) { s->iq_head = 0; } return true; }"} {"target": 0, "idx": 23075, "func": "static bool cmd_read_pio(IDEState *s, uint8_t cmd) { bool lba48 = (cmd == WIN_READ_EXT); if (s->drive_kind == IDE_CD) { ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */ ide_abort_command(s); return true; } if (!s->bs) { ide_abort_command(s); return true; } ide_cmd_lba48_transform(s, lba48); s->req_nb_sectors = 1; ide_sector_read(s); return false; }"} {"target": 1, "idx": 23093, "func": "void os_setup_post(void) { int fd = 0; if (daemonize) { uint8_t status = 0; ssize_t len; do { len = write(daemon_pipe, &status, 1); } while (len < 0 && errno == EINTR); if (len != 1) { exit(1); } if (chdir(\"/\")) { perror(\"not able to chdir to /\"); exit(1); } TFR(fd = qemu_open(\"/dev/null\", O_RDWR)); if (fd == -1) { exit(1); } } change_root(); change_process_uid(); if (daemonize) { dup2(fd, 0); dup2(fd, 1); dup2(fd, 2); close(fd); } }"} {"target": 0, "idx": 23102, "func": "int qcow2_check_metadata_overlap(BlockDriverState *bs, int chk, int64_t offset, int64_t size) { BDRVQcowState *s = bs->opaque; int i, j; if (!size) { return 0; } if (chk & QCOW2_OL_MAIN_HEADER) { if (offset < s->cluster_size) { return QCOW2_OL_MAIN_HEADER; } } /* align range to test to cluster boundaries */ size = align_offset(offset_into_cluster(s, offset) + size, s->cluster_size); offset = start_of_cluster(s, offset); if ((chk & QCOW2_OL_ACTIVE_L1) && s->l1_size) { if (overlaps_with(s->l1_table_offset, s->l1_size * sizeof(uint64_t))) { return QCOW2_OL_ACTIVE_L1; } } if ((chk & QCOW2_OL_REFCOUNT_TABLE) && s->refcount_table_size) { if (overlaps_with(s->refcount_table_offset, s->refcount_table_size * sizeof(uint64_t))) { return QCOW2_OL_REFCOUNT_TABLE; } } if ((chk & QCOW2_OL_SNAPSHOT_TABLE) && s->snapshots_size) { if (overlaps_with(s->snapshots_offset, s->snapshots_size)) { return QCOW2_OL_SNAPSHOT_TABLE; } } if ((chk & QCOW2_OL_INACTIVE_L1) && s->snapshots) { for (i = 0; i < s->nb_snapshots; i++) { if (s->snapshots[i].l1_size && overlaps_with(s->snapshots[i].l1_table_offset, s->snapshots[i].l1_size * sizeof(uint64_t))) { return QCOW2_OL_INACTIVE_L1; } } } if ((chk & QCOW2_OL_ACTIVE_L2) && s->l1_table) { for (i = 0; i < s->l1_size; i++) { if ((s->l1_table[i] & L1E_OFFSET_MASK) && overlaps_with(s->l1_table[i] & L1E_OFFSET_MASK, s->cluster_size)) { return QCOW2_OL_ACTIVE_L2; } } } if ((chk & QCOW2_OL_REFCOUNT_BLOCK) && s->refcount_table) { for (i = 0; i < s->refcount_table_size; i++) { if ((s->refcount_table[i] & REFT_OFFSET_MASK) && overlaps_with(s->refcount_table[i] & REFT_OFFSET_MASK, s->cluster_size)) { return QCOW2_OL_REFCOUNT_BLOCK; } } } if ((chk & QCOW2_OL_INACTIVE_L2) && s->snapshots) { for (i = 0; i < s->nb_snapshots; i++) { uint64_t l1_ofs = s->snapshots[i].l1_table_offset; uint32_t l1_sz = s->snapshots[i].l1_size; uint64_t *l1 = g_malloc(l1_sz * sizeof(uint64_t)); int ret; ret = bdrv_read(bs->file, l1_ofs / BDRV_SECTOR_SIZE, (uint8_t *)l1, l1_sz * sizeof(uint64_t) / BDRV_SECTOR_SIZE); if (ret < 0) { g_free(l1); return ret; } for (j = 0; j < l1_sz; j++) { if ((l1[j] & L1E_OFFSET_MASK) && overlaps_with(l1[j] & L1E_OFFSET_MASK, s->cluster_size)) { g_free(l1); return QCOW2_OL_INACTIVE_L2; } } g_free(l1); } } return 0; }"} {"target": 0, "idx": 23109, "func": "static int cpu_x86_find_by_name(X86CPU *cpu, x86_def_t *x86_cpu_def, const char *name) { x86_def_t *def; int i; if (name == NULL) { return -1; } if (kvm_enabled() && strcmp(name, \"host\") == 0) { kvm_cpu_fill_host(x86_cpu_def); object_property_set_bool(OBJECT(cpu), true, \"pmu\", &error_abort); return 0; } for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { def = &builtin_x86_defs[i]; if (strcmp(name, def->name) == 0) { memcpy(x86_cpu_def, def, sizeof(*def)); /* sysenter isn't supported in compatibility mode on AMD, * syscall isn't supported in compatibility mode on Intel. * Normally we advertise the actual CPU vendor, but you can * override this using the 'vendor' property if you want to use * KVM's sysenter/syscall emulation in compatibility mode and * when doing cross vendor migration */ if (kvm_enabled()) { uint32_t ebx = 0, ecx = 0, edx = 0; host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); x86_cpu_vendor_words2str(x86_cpu_def->vendor, ebx, edx, ecx); } return 0; } } return -1; }"} {"target": 0, "idx": 23112, "func": "static void put_pixels_y2_mmx(UINT8 *block, const UINT8 *pixels, int line_size, int h) { #if 0 UINT8 *p; const UINT8 *pix; p = block; pix = pixels; MOVQ_ZERO(mm7); MOVQ_WONE(mm4); JUMPALIGN(); do { __asm __volatile( \"movq %1, %%mm0\\n\\t\" \"movq %2, %%mm1\\n\\t\" \"movq %%mm0, %%mm2\\n\\t\" \"movq %%mm1, %%mm3\\n\\t\" \"punpcklbw %%mm7, %%mm0\\n\\t\" \"punpcklbw %%mm7, %%mm1\\n\\t\" \"punpckhbw %%mm7, %%mm2\\n\\t\" \"punpckhbw %%mm7, %%mm3\\n\\t\" \"paddusw %%mm1, %%mm0\\n\\t\" \"paddusw %%mm3, %%mm2\\n\\t\" \"paddusw %%mm4, %%mm0\\n\\t\" \"paddusw %%mm4, %%mm2\\n\\t\" \"psrlw $1, %%mm0\\n\\t\" \"psrlw $1, %%mm2\\n\\t\" \"packuswb %%mm2, %%mm0\\n\\t\" \"movq %%mm0, %0\\n\\t\" :\"=m\"(*p) :\"m\"(*pix), \"m\"(*(pix+line_size)) :\"memory\"); pix += line_size; p += line_size; } while (--h); #else __asm __volatile( MOVQ_BFE(%%mm7) \"lea (%3, %3), %%eax \\n\\t\" \"movq (%1), %%mm0 \\n\\t\" \".balign 8 \\n\\t\" \"1: \\n\\t\" \"movq (%1, %3), %%mm1 \\n\\t\" \"movq (%1, %%eax),%%mm2 \\n\\t\" PAVG_MMX(%%mm1, %%mm0) \"movq %%mm6, (%2) \\n\\t\" PAVG_MMX(%%mm2, %%mm1) \"movq %%mm6, (%2, %3) \\n\\t\" \"addl %%eax, %1 \\n\\t\" \"addl %%eax, %2 \\n\\t\" #ifdef LONG_UNROLL \"movq (%1, %3), %%mm1 \\n\\t\" \"movq (%1, %%eax),%%mm0 \\n\\t\" PAVG_MMX(%%mm1, %%mm2) \"movq %%mm6, (%2) \\n\\t\" PAVG_MMX(%%mm0, %%mm1) \"movq %%mm6, (%2, %3) \\n\\t\" \"addl %%eax, %1 \\n\\t\" \"addl %%eax, %2 \\n\\t\" \"subl $4, %0 \\n\\t\" #else \"subl $2, %0 \\n\\t\" #endif \"jnz 1b \\n\\t\" :\"+g\"(h), \"+S\"(pixels), \"+D\"(block) :\"r\"(line_size) :\"eax\", \"memory\"); #endif }"} {"target": 0, "idx": 23135, "func": "static int pty_chr_write(CharDriverState *chr, const uint8_t *buf, int len) { PtyCharDriver *s = chr->opaque; if (!s->connected) { /* guest sends data, check for (re-)connect */ pty_chr_update_read_handler(chr); return 0; } return io_channel_send(s->fd, buf, len); }"} {"target": 0, "idx": 23146, "func": "static void gen_rlwimi(DisasContext *ctx) { uint32_t mb, me, sh; mb = MB(ctx->opcode); me = ME(ctx->opcode); sh = SH(ctx->opcode); if (likely(sh == (31-me) && mb <= me)) { tcg_gen_deposit_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh, me - mb + 1); } else { target_ulong mask; TCGv t1; TCGv t0 = tcg_temp_new(); #if defined(TARGET_PPC64) tcg_gen_deposit_i64(t0, cpu_gpr[rS(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 32, 32); tcg_gen_rotli_i64(t0, t0, sh); #else tcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh); #endif #if defined(TARGET_PPC64) mb += 32; me += 32; #endif mask = MASK(mb, me); t1 = tcg_temp_new(); tcg_gen_andi_tl(t0, t0, mask); tcg_gen_andi_tl(t1, cpu_gpr[rA(ctx->opcode)], ~mask); tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); tcg_temp_free(t0); tcg_temp_free(t1); } if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); }"} {"target": 0, "idx": 23156, "func": "static int av_seek_frame_generic(AVFormatContext *s, int stream_index, int64_t timestamp) { int index; AVStream *st; AVIndexEntry *ie; if (!s->index_built) { if (is_raw_stream(s)) { av_build_index_raw(s); } else { return -1; } s->index_built = 1; } if (stream_index < 0) stream_index = 0; st = s->streams[stream_index]; index = index_search_timestamp(st->index_entries, st->nb_index_entries, timestamp); if (index < 0) return -1; /* now we have found the index, we can seek */ ie = &st->index_entries[index]; av_read_frame_flush(s); url_fseek(&s->pb, ie->pos, SEEK_SET); st->cur_dts = ie->timestamp; return 0; }"} {"target": 1, "idx": 23160, "func": "static int mxf_read_index_table_segment(void *arg, AVIOContext *pb, int tag, int size, UID uid) { MXFIndexTableSegment *segment = arg; switch(tag) { case 0x3F05: segment->edit_unit_byte_count = avio_rb32(pb); av_dlog(NULL, \"EditUnitByteCount %d\\n\", segment->edit_unit_byte_count); break; case 0x3F06: segment->index_sid = avio_rb32(pb); av_dlog(NULL, \"IndexSID %d\\n\", segment->index_sid); break; case 0x3F07: segment->body_sid = avio_rb32(pb); av_dlog(NULL, \"BodySID %d\\n\", segment->body_sid); break; case 0x3F08: segment->slice_count = avio_r8(pb); av_dlog(NULL, \"SliceCount %d\\n\", segment->slice_count); break; case 0x3F09: av_dlog(NULL, \"DeltaEntryArray found\\n\"); return mxf_read_delta_entry_array(pb, segment); case 0x3F0A: av_dlog(NULL, \"IndexEntryArray found\\n\"); return mxf_read_index_entry_array(pb, segment); case 0x3F0B: segment->index_edit_rate.num = avio_rb32(pb); segment->index_edit_rate.den = avio_rb32(pb); av_dlog(NULL, \"IndexEditRate %d/%d\\n\", segment->index_edit_rate.num, segment->index_edit_rate.den); break; case 0x3F0C: segment->index_start_position = avio_rb64(pb); av_dlog(NULL, \"IndexStartPosition %\"PRId64\"\\n\", segment->index_start_position); break; case 0x3F0D: segment->index_duration = avio_rb64(pb); av_dlog(NULL, \"IndexDuration %\"PRId64\"\\n\", segment->index_duration); break; } return 0; }"} {"target": 0, "idx": 23167, "func": "static ssize_t test_block_write_func(QCryptoBlock *block, void *opaque, size_t offset, const uint8_t *buf, size_t buflen, Error **errp) { Buffer *header = opaque; g_assert_cmpint(buflen + offset, <=, header->capacity); memcpy(header->buffer + offset, buf, buflen); header->offset = offset + buflen; return buflen; }"} {"target": 0, "idx": 23173, "func": "static ssize_t block_crypto_read_func(QCryptoBlock *block, size_t offset, uint8_t *buf, size_t buflen, Error **errp, void *opaque) { BlockDriverState *bs = opaque; ssize_t ret; ret = bdrv_pread(bs->file, offset, buf, buflen); if (ret < 0) { error_setg_errno(errp, -ret, \"Could not read encryption header\"); return ret; } return ret; }"} {"target": 1, "idx": 23187, "func": "static int mov_write_video_tag(AVIOContext *pb, MOVMuxContext *mov, MOVTrack *track) { int64_t pos = avio_tell(pb); char compressor_name[32] = { 0 }; avio_wb32(pb, 0); /* size */ avio_wl32(pb, track->tag); // store it byteswapped avio_wb32(pb, 0); /* Reserved */ avio_wb16(pb, 0); /* Reserved */ avio_wb16(pb, 1); /* Data-reference index */ avio_wb16(pb, 0); /* Codec stream version */ avio_wb16(pb, 0); /* Codec stream revision (=0) */ if (track->mode == MODE_MOV) { ffio_wfourcc(pb, \"FFMP\"); /* Vendor */ if (track->enc->codec_id == AV_CODEC_ID_RAWVIDEO) { avio_wb32(pb, 0); /* Temporal Quality */ avio_wb32(pb, 0x400); /* Spatial Quality = lossless*/ } else { avio_wb32(pb, 0x200); /* Temporal Quality = normal */ avio_wb32(pb, 0x200); /* Spatial Quality = normal */ } } else { avio_wb32(pb, 0); /* Reserved */ avio_wb32(pb, 0); /* Reserved */ avio_wb32(pb, 0); /* Reserved */ } avio_wb16(pb, track->enc->width); /* Video width */ avio_wb16(pb, track->height); /* Video height */ avio_wb32(pb, 0x00480000); /* Horizontal resolution 72dpi */ avio_wb32(pb, 0x00480000); /* Vertical resolution 72dpi */ avio_wb32(pb, 0); /* Data size (= 0) */ avio_wb16(pb, 1); /* Frame count (= 1) */ /* FIXME not sure, ISO 14496-1 draft where it shall be set to 0 */ find_compressor(compressor_name, 32, track); avio_w8(pb, strlen(compressor_name)); avio_write(pb, compressor_name, 31); if (track->mode == MODE_MOV && track->enc->bits_per_coded_sample) avio_wb16(pb, track->enc->bits_per_coded_sample); else avio_wb16(pb, 0x18); /* Reserved */ avio_wb16(pb, 0xffff); /* Reserved */ if (track->tag == MKTAG('m','p','4','v')) mov_write_esds_tag(pb, track); else if (track->enc->codec_id == AV_CODEC_ID_H263) mov_write_d263_tag(pb); else if (track->enc->codec_id == AV_CODEC_ID_AVUI || track->enc->codec_id == AV_CODEC_ID_SVQ3) { mov_write_extradata_tag(pb, track); avio_wb32(pb, 0); } else if (track->enc->codec_id == AV_CODEC_ID_DNXHD) mov_write_avid_tag(pb, track); else if (track->enc->codec_id == AV_CODEC_ID_HEVC) mov_write_hvcc_tag(pb, track); else if (track->enc->codec_id == AV_CODEC_ID_H264 && !TAG_IS_AVCI(track->tag)) { mov_write_avcc_tag(pb, track); if (track->mode == MODE_IPOD) mov_write_uuid_tag_ipod(pb); } else if (track->enc->codec_id == AV_CODEC_ID_VC1 && track->vos_len > 0) mov_write_dvc1_tag(pb, track); else if (track->enc->codec_id == AV_CODEC_ID_VP6F || track->enc->codec_id == AV_CODEC_ID_VP6A) { /* Don't write any potential extradata here - the cropping * is signalled via the normal width/height fields. */ } else if (track->enc->codec_id == AV_CODEC_ID_R10K) { if (track->enc->codec_tag == MKTAG('R','1','0','k')) mov_write_dpxe_tag(pb, track); } else if (track->vos_len > 0) mov_write_glbl_tag(pb, track); if (track->enc->codec_id != AV_CODEC_ID_H264 && track->enc->codec_id != AV_CODEC_ID_MPEG4 && track->enc->codec_id != AV_CODEC_ID_DNXHD) if (track->enc->field_order != AV_FIELD_UNKNOWN) mov_write_fiel_tag(pb, track); if (mov->flags & FF_MOV_FLAG_WRITE_COLR) mov_write_colr_tag(pb, track); if (track->enc->sample_aspect_ratio.den && track->enc->sample_aspect_ratio.num && track->enc->sample_aspect_ratio.den != track->enc->sample_aspect_ratio.num) { mov_write_pasp_tag(pb, track); } return update_size(pb, pos); }"} {"target": 1, "idx": 23204, "func": "int load_multiboot(FWCfgState *fw_cfg, FILE *f, const char *kernel_filename, const char *initrd_filename, const char *kernel_cmdline, int kernel_file_size, uint8_t *header) { int i, is_multiboot = 0; uint32_t flags = 0; uint32_t mh_entry_addr; uint32_t mh_load_addr; uint32_t mb_kernel_size; MultibootState mbs; uint8_t bootinfo[MBI_SIZE]; uint8_t *mb_bootinfo_data; uint32_t cmdline_len; /* Ok, let's see if it is a multiboot image. The header is 12x32bit long, so the latest entry may be 8192 - 48. */ for (i = 0; i < (8192 - 48); i += 4) { if (ldl_p(header+i) == 0x1BADB002) { uint32_t checksum = ldl_p(header+i+8); flags = ldl_p(header+i+4); checksum += flags; checksum += (uint32_t)0x1BADB002; if (!checksum) { is_multiboot = 1; break; if (!is_multiboot) return 0; /* no multiboot */ mb_debug(\"qemu: I believe we found a multiboot image!\\n\"); memset(bootinfo, 0, sizeof(bootinfo)); memset(&mbs, 0, sizeof(mbs)); if (flags & 0x00000004) { /* MULTIBOOT_HEADER_HAS_VBE */ fprintf(stderr, \"qemu: multiboot knows VBE. we don't.\\n\"); if (!(flags & 0x00010000)) { /* MULTIBOOT_HEADER_HAS_ADDR */ uint64_t elf_entry; uint64_t elf_low, elf_high; int kernel_size; fclose(f); if (((struct elf64_hdr*)header)->e_machine == EM_X86_64) { fprintf(stderr, \"Cannot load x86-64 image, give a 32bit one.\\n\"); kernel_size = load_elf(kernel_filename, NULL, NULL, &elf_entry, &elf_low, &elf_high, 0, I386_ELF_MACHINE, 0, 0); if (kernel_size < 0) { fprintf(stderr, \"Error while loading elf kernel\\n\"); mh_load_addr = elf_low; mb_kernel_size = elf_high - elf_low; mh_entry_addr = elf_entry; mbs.mb_buf = g_malloc(mb_kernel_size); if (rom_copy(mbs.mb_buf, mh_load_addr, mb_kernel_size) != mb_kernel_size) { fprintf(stderr, \"Error while fetching elf kernel from rom\\n\"); mb_debug(\"qemu: loading multiboot-elf kernel (%#x bytes) with entry %#zx\\n\", mb_kernel_size, (size_t)mh_entry_addr); } else { /* Valid if mh_flags sets MULTIBOOT_HEADER_HAS_ADDR. */ uint32_t mh_header_addr = ldl_p(header+i+12); uint32_t mh_load_end_addr = ldl_p(header+i+20); uint32_t mh_bss_end_addr = ldl_p(header+i+24); mh_load_addr = ldl_p(header+i+16); uint32_t mb_kernel_text_offset = i - (mh_header_addr - mh_load_addr); uint32_t mb_load_size = 0; mh_entry_addr = ldl_p(header+i+28); if (mh_load_end_addr) { mb_kernel_size = mh_bss_end_addr - mh_load_addr; mb_load_size = mh_load_end_addr - mh_load_addr; } else { mb_kernel_size = kernel_file_size - mb_kernel_text_offset; mb_load_size = mb_kernel_size; /* Valid if mh_flags sets MULTIBOOT_HEADER_HAS_VBE. uint32_t mh_mode_type = ldl_p(header+i+32); uint32_t mh_width = ldl_p(header+i+36); uint32_t mh_height = ldl_p(header+i+40); uint32_t mh_depth = ldl_p(header+i+44); */ mb_debug(\"multiboot: mh_header_addr = %#x\\n\", mh_header_addr); mb_debug(\"multiboot: mh_load_addr = %#x\\n\", mh_load_addr); mb_debug(\"multiboot: mh_load_end_addr = %#x\\n\", mh_load_end_addr); mb_debug(\"multiboot: mh_bss_end_addr = %#x\\n\", mh_bss_end_addr); mb_debug(\"qemu: loading multiboot kernel (%#x bytes) at %#x\\n\", mb_load_size, mh_load_addr); mbs.mb_buf = g_malloc(mb_kernel_size); fseek(f, mb_kernel_text_offset, SEEK_SET); if (fread(mbs.mb_buf, 1, mb_load_size, f) != mb_load_size) { fprintf(stderr, \"fread() failed\\n\"); memset(mbs.mb_buf + mb_load_size, 0, mb_kernel_size - mb_load_size); fclose(f); mbs.mb_buf_phys = mh_load_addr; mbs.mb_buf_size = TARGET_PAGE_ALIGN(mb_kernel_size); mbs.offset_mbinfo = mbs.mb_buf_size; /* Calculate space for cmdlines, bootloader name, and mb_mods */ cmdline_len = strlen(kernel_filename) + 1; cmdline_len += strlen(kernel_cmdline) + 1; if (initrd_filename) { const char *r = initrd_filename; cmdline_len += strlen(r) + 1; mbs.mb_mods_avail = 1; while (*(r = get_opt_value(NULL, 0, r))) { mbs.mb_mods_avail++; r++; mbs.mb_buf_size += cmdline_len; mbs.mb_buf_size += MB_MOD_SIZE * mbs.mb_mods_avail; mbs.mb_buf_size += strlen(bootloader_name) + 1; mbs.mb_buf_size = TARGET_PAGE_ALIGN(mbs.mb_buf_size); /* enlarge mb_buf to hold cmdlines, bootloader, mb-info structs */ mbs.mb_buf = g_realloc(mbs.mb_buf, mbs.mb_buf_size); mbs.offset_cmdlines = mbs.offset_mbinfo + mbs.mb_mods_avail * MB_MOD_SIZE; mbs.offset_bootloader = mbs.offset_cmdlines + cmdline_len; if (initrd_filename) { const char *next_initrd; char not_last, tmpbuf[strlen(initrd_filename) + 1]; mbs.offset_mods = mbs.mb_buf_size; do { char *next_space; int mb_mod_length; uint32_t offs = mbs.mb_buf_size; next_initrd = get_opt_value(tmpbuf, sizeof(tmpbuf), initrd_filename); not_last = *next_initrd; /* if a space comes after the module filename, treat everything after that as parameters */ hwaddr c = mb_add_cmdline(&mbs, tmpbuf); if ((next_space = strchr(tmpbuf, ' '))) *next_space = '\\0'; mb_debug(\"multiboot loading module: %s\\n\", tmpbuf); mb_mod_length = get_image_size(tmpbuf); if (mb_mod_length < 0) { fprintf(stderr, \"Failed to open file '%s'\\n\", tmpbuf); mbs.mb_buf_size = TARGET_PAGE_ALIGN(mb_mod_length + mbs.mb_buf_size); mbs.mb_buf = g_realloc(mbs.mb_buf, mbs.mb_buf_size); load_image(tmpbuf, (unsigned char *)mbs.mb_buf + offs); mb_add_mod(&mbs, mbs.mb_buf_phys + offs, mbs.mb_buf_phys + offs + mb_mod_length, c); mb_debug(\"mod_start: %p\\nmod_end: %p\\n cmdline: \"TARGET_FMT_plx\"\\n\", (char *)mbs.mb_buf + offs, (char *)mbs.mb_buf + offs + mb_mod_length, c); initrd_filename = next_initrd+1; } while (not_last); /* Commandline support */ char kcmdline[strlen(kernel_filename) + strlen(kernel_cmdline) + 2]; snprintf(kcmdline, sizeof(kcmdline), \"%s %s\", kernel_filename, kernel_cmdline); stl_p(bootinfo + MBI_CMDLINE, mb_add_cmdline(&mbs, kcmdline)); stl_p(bootinfo + MBI_BOOTLOADER, mb_add_bootloader(&mbs, bootloader_name)); stl_p(bootinfo + MBI_MODS_ADDR, mbs.mb_buf_phys + mbs.offset_mbinfo); stl_p(bootinfo + MBI_MODS_COUNT, mbs.mb_mods_count); /* mods_count */ /* the kernel is where we want it to be now */ stl_p(bootinfo + MBI_FLAGS, MULTIBOOT_FLAGS_MEMORY | MULTIBOOT_FLAGS_BOOT_DEVICE | MULTIBOOT_FLAGS_CMDLINE | MULTIBOOT_FLAGS_MODULES | MULTIBOOT_FLAGS_MMAP | MULTIBOOT_FLAGS_BOOTLOADER); stl_p(bootinfo + MBI_BOOT_DEVICE, 0x8000ffff); /* XXX: use the -boot switch? */ stl_p(bootinfo + MBI_MMAP_ADDR, ADDR_E820_MAP); mb_debug(\"multiboot: mh_entry_addr = %#x\\n\", mh_entry_addr); mb_debug(\" mb_buf_phys = \"TARGET_FMT_plx\"\\n\", mbs.mb_buf_phys); mb_debug(\" mod_start = \"TARGET_FMT_plx\"\\n\", mbs.mb_buf_phys + mbs.offset_mods); mb_debug(\" mb_mods_count = %d\\n\", mbs.mb_mods_count); /* save bootinfo off the stack */ mb_bootinfo_data = g_memdup(bootinfo, sizeof(bootinfo)); /* Pass variables to option rom */ fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ENTRY, mh_entry_addr); fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, mh_load_addr); fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, mbs.mb_buf_size); fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA, mbs.mb_buf, mbs.mb_buf_size); fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, ADDR_MBI); fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, sizeof(bootinfo)); fw_cfg_add_bytes(fw_cfg, FW_CFG_INITRD_DATA, mb_bootinfo_data, sizeof(bootinfo)); option_rom[nb_option_roms].name = \"multiboot.bin\"; option_rom[nb_option_roms].bootindex = 0; nb_option_roms++; return 1; /* yes, we are multiboot */"} {"target": 0, "idx": 23227, "func": "static void lm32_cpu_reset(CPUState *s) { LM32CPU *cpu = LM32_CPU(s); LM32CPUClass *lcc = LM32_CPU_GET_CLASS(cpu); CPULM32State *env = &cpu->env; if (qemu_loglevel_mask(CPU_LOG_RESET)) { qemu_log(\"CPU Reset (CPU %d)\\n\", s->cpu_index); log_cpu_state(env, 0); } lcc->parent_reset(s); tlb_flush(env, 1); /* reset cpu state */ memset(env, 0, offsetof(CPULM32State, breakpoints)); }"} {"target": 0, "idx": 23249, "func": "static int null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { return 0; }"} {"target": 0, "idx": 23251, "func": "D(float, sse) D(float, avx) D(int16, mmx) D(int16, sse2) av_cold int swri_rematrix_init_x86(struct SwrContext *s){ #if HAVE_YASM int mm_flags = av_get_cpu_flags(); int nb_in = av_get_channel_layout_nb_channels(s->in_ch_layout); int nb_out = av_get_channel_layout_nb_channels(s->out_ch_layout); int num = nb_in * nb_out; int i,j; s->mix_1_1_simd = NULL; s->mix_2_1_simd = NULL; if (s->midbuf.fmt == AV_SAMPLE_FMT_S16P){ if(EXTERNAL_MMX(mm_flags)) { s->mix_1_1_simd = ff_mix_1_1_a_int16_mmx; s->mix_2_1_simd = ff_mix_2_1_a_int16_mmx; } if(EXTERNAL_SSE2(mm_flags)) { s->mix_1_1_simd = ff_mix_1_1_a_int16_sse2; s->mix_2_1_simd = ff_mix_2_1_a_int16_sse2; } s->native_simd_matrix = av_mallocz_array(num, 2 * sizeof(int16_t)); s->native_simd_one = av_mallocz(2 * sizeof(int16_t)); if (!s->native_simd_matrix || !s->native_simd_one) return AVERROR(ENOMEM); for(i=0; inative_matrix)[i * nb_in + j])); sh = FFMAX(av_log2(sh) - 14, 0); for(j=0; jnative_simd_matrix)[2*(i * nb_in + j)+1] = 15 - sh; ((int16_t*)s->native_simd_matrix)[2*(i * nb_in + j)] = ((((int*)s->native_matrix)[i * nb_in + j]) + (1<>1)) >> sh; } } ((int16_t*)s->native_simd_one)[1] = 14; ((int16_t*)s->native_simd_one)[0] = 16384; } else if(s->midbuf.fmt == AV_SAMPLE_FMT_FLTP){ if(EXTERNAL_SSE(mm_flags)) { s->mix_1_1_simd = ff_mix_1_1_a_float_sse; s->mix_2_1_simd = ff_mix_2_1_a_float_sse; } if(EXTERNAL_AVX(mm_flags)) { s->mix_1_1_simd = ff_mix_1_1_a_float_avx; s->mix_2_1_simd = ff_mix_2_1_a_float_avx; } s->native_simd_matrix = av_mallocz_array(num, sizeof(float)); s->native_simd_one = av_mallocz(sizeof(float)); if (!s->native_simd_matrix || !s->native_simd_one) return AVERROR(ENOMEM); memcpy(s->native_simd_matrix, s->native_matrix, num * sizeof(float)); memcpy(s->native_simd_one, s->native_one, sizeof(float)); } #endif return 0; }"} {"target": 0, "idx": 23260, "func": "static int av_transcode(AVFormatContext **output_files, int nb_output_files, AVFormatContext **input_files, int nb_input_files, AVStreamMap *stream_maps, int nb_stream_maps) { int ret = 0, i, j, k, n, nb_istreams = 0, nb_ostreams = 0; AVFormatContext *is, *os; AVCodecContext *codec, *icodec; AVOutputStream *ost, **ost_table = NULL; AVInputStream *ist, **ist_table = NULL; AVInputFile *file_table; char error[1024]; int key; int want_sdp = 1; uint8_t no_packet[MAX_FILES]={0}; int no_packet_count=0; file_table= av_mallocz(nb_input_files * sizeof(AVInputFile)); if (!file_table) goto fail; /* input stream init */ j = 0; for(i=0;inb_streams; j += is->nb_streams; } nb_istreams = j; ist_table = av_mallocz(nb_istreams * sizeof(AVInputStream *)); if (!ist_table) goto fail; for(i=0;inb_streams;k++) { ist = ist_table[j++]; ist->st = is->streams[k]; ist->file_index = i; ist->index = k; ist->discard = 1; /* the stream is discarded by default (changed later) */ if (rate_emu) { ist->start = av_gettime(); } } } /* output stream init */ nb_ostreams = 0; for(i=0;inb_streams) { dump_format(output_files[i], i, output_files[i]->filename, 1); fprintf(stderr, \"Output file #%d does not contain any stream\\n\", i); av_exit(1); } nb_ostreams += os->nb_streams; } if (nb_stream_maps > 0 && nb_stream_maps != nb_ostreams) { fprintf(stderr, \"Number of stream maps must match number of output streams\\n\"); av_exit(1); } /* Sanity check the mapping args -- do the input files & streams exist? */ for(i=0;i nb_input_files - 1 || si < 0 || si > file_table[fi].nb_streams - 1) { fprintf(stderr,\"Could not find input stream #%d.%d\\n\", fi, si); av_exit(1); } fi = stream_maps[i].sync_file_index; si = stream_maps[i].sync_stream_index; if (fi < 0 || fi > nb_input_files - 1 || si < 0 || si > file_table[fi].nb_streams - 1) { fprintf(stderr,\"Could not find sync stream #%d.%d\\n\", fi, si); av_exit(1); } } ost_table = av_mallocz(sizeof(AVOutputStream *) * nb_ostreams); if (!ost_table) goto fail; for(i=0;inb_streams;i++,n++) { int found; ost = ost_table[n]; ost->file_index = k; ost->index = i; ost->st = os->streams[i]; if (nb_stream_maps > 0) { ost->source_index = file_table[stream_maps[n].file_index].ist_index + stream_maps[n].stream_index; /* Sanity check that the stream types match */ if (ist_table[ost->source_index]->st->codec->codec_type != ost->st->codec->codec_type) { int i= ost->file_index; dump_format(output_files[i], i, output_files[i]->filename, 1); fprintf(stderr, \"Codec type mismatch for mapping #%d.%d -> #%d.%d\\n\", stream_maps[n].file_index, stream_maps[n].stream_index, ost->file_index, ost->index); av_exit(1); } } else { int best_nb_frames=-1; /* get corresponding input stream index : we select the first one with the right type */ found = 0; for(j=0;jfile_index ]; skip=1; for(pi=0; pinb_programs; pi++){ AVProgram *p= f->programs[pi]; if(p->id == opt_programid) for(si=0; sinb_stream_indexes; si++){ if(f->streams[ p->stream_index[si] ] == ist->st) skip=0; } } } if (ist->discard && ist->st->discard != AVDISCARD_ALL && !skip && ist->st->codec->codec_type == ost->st->codec->codec_type) { if(best_nb_frames < ist->st->codec_info_nb_frames){ best_nb_frames= ist->st->codec_info_nb_frames; ost->source_index = j; found = 1; } } } if (!found) { if(! opt_programid) { /* try again and reuse existing stream */ for(j=0;jst->codec->codec_type == ost->st->codec->codec_type && ist->st->discard != AVDISCARD_ALL) { ost->source_index = j; found = 1; } } } if (!found) { int i= ost->file_index; dump_format(output_files[i], i, output_files[i]->filename, 1); fprintf(stderr, \"Could not find input stream matching output stream #%d.%d\\n\", ost->file_index, ost->index); av_exit(1); } } } ist = ist_table[ost->source_index]; ist->discard = 0; ost->sync_ist = (nb_stream_maps > 0) ? ist_table[file_table[stream_maps[n].sync_file_index].ist_index + stream_maps[n].sync_stream_index] : ist; } } /* for each output stream, we compute the right encoding parameters */ for(i=0;ifile_index]; ist = ist_table[ost->source_index]; codec = ost->st->codec; icodec = ist->st->codec; while ((t = av_metadata_get(ist->st->metadata, \"\", t, AV_METADATA_IGNORE_SUFFIX))) { av_metadata_set2(&ost->st->metadata, t->key, t->value, AV_METADATA_DONT_OVERWRITE); } ost->st->disposition = ist->st->disposition; codec->bits_per_raw_sample= icodec->bits_per_raw_sample; codec->chroma_sample_location = icodec->chroma_sample_location; if (ost->st->stream_copy) { /* if stream_copy is selected, no need to decode or encode */ codec->codec_id = icodec->codec_id; codec->codec_type = icodec->codec_type; if(!codec->codec_tag){ if( !os->oformat->codec_tag || av_codec_get_id (os->oformat->codec_tag, icodec->codec_tag) == codec->codec_id || av_codec_get_tag(os->oformat->codec_tag, icodec->codec_id) <= 0) codec->codec_tag = icodec->codec_tag; } codec->bit_rate = icodec->bit_rate; codec->extradata= icodec->extradata; codec->extradata_size= icodec->extradata_size; if(av_q2d(icodec->time_base)*icodec->ticks_per_frame > av_q2d(ist->st->time_base) && av_q2d(ist->st->time_base) < 1.0/1000){ codec->time_base = icodec->time_base; codec->time_base.num *= icodec->ticks_per_frame; }else codec->time_base = ist->st->time_base; switch(codec->codec_type) { case AVMEDIA_TYPE_AUDIO: if(audio_volume != 256) { fprintf(stderr,\"-acodec copy and -vol are incompatible (frames are not decoded)\\n\"); av_exit(1); } codec->channel_layout = icodec->channel_layout; codec->sample_rate = icodec->sample_rate; codec->channels = icodec->channels; codec->frame_size = icodec->frame_size; codec->block_align= icodec->block_align; if(codec->block_align == 1 && codec->codec_id == CODEC_ID_MP3) codec->block_align= 0; if(codec->codec_id == CODEC_ID_AC3) codec->block_align= 0; break; case AVMEDIA_TYPE_VIDEO: codec->pix_fmt = icodec->pix_fmt; codec->width = icodec->width; codec->height = icodec->height; codec->has_b_frames = icodec->has_b_frames; break; case AVMEDIA_TYPE_SUBTITLE: codec->width = icodec->width; codec->height = icodec->height; break; default: abort(); } } else { switch(codec->codec_type) { case AVMEDIA_TYPE_AUDIO: ost->fifo= av_fifo_alloc(1024); if(!ost->fifo) goto fail; ost->reformat_pair = MAKE_SFMT_PAIR(SAMPLE_FMT_NONE,SAMPLE_FMT_NONE); ost->audio_resample = codec->sample_rate != icodec->sample_rate || audio_sync_method > 1; icodec->request_channels = codec->channels; ist->decoding_needed = 1; ost->encoding_needed = 1; break; case AVMEDIA_TYPE_VIDEO: if (ost->st->codec->pix_fmt == PIX_FMT_NONE) { fprintf(stderr, \"Video pixel format is unknown, stream cannot be encoded\\n\"); av_exit(1); } ost->video_crop = ((frame_leftBand + frame_rightBand + frame_topBand + frame_bottomBand) != 0); ost->video_pad = ((frame_padleft + frame_padright + frame_padtop + frame_padbottom) != 0); ost->video_resample = ((codec->width != icodec->width - (frame_leftBand + frame_rightBand) + (frame_padleft + frame_padright)) || (codec->height != icodec->height - (frame_topBand + frame_bottomBand) + (frame_padtop + frame_padbottom)) || (codec->pix_fmt != icodec->pix_fmt)); if (ost->video_crop) { ost->topBand = ost->original_topBand = frame_topBand; ost->bottomBand = ost->original_bottomBand = frame_bottomBand; ost->leftBand = ost->original_leftBand = frame_leftBand; ost->rightBand = ost->original_rightBand = frame_rightBand; } if (ost->video_pad) { ost->padtop = frame_padtop; ost->padleft = frame_padleft; ost->padbottom = frame_padbottom; ost->padright = frame_padright; if (!ost->video_resample) { avcodec_get_frame_defaults(&ost->pict_tmp); if(avpicture_alloc((AVPicture*)&ost->pict_tmp, codec->pix_fmt, codec->width, codec->height)) goto fail; } } if (ost->video_resample) { avcodec_get_frame_defaults(&ost->pict_tmp); if(avpicture_alloc((AVPicture*)&ost->pict_tmp, codec->pix_fmt, codec->width, codec->height)) { fprintf(stderr, \"Cannot allocate temp picture, check pix fmt\\n\"); av_exit(1); } sws_flags = av_get_int(sws_opts, \"sws_flags\", NULL); ost->img_resample_ctx = sws_getContext( icodec->width - (frame_leftBand + frame_rightBand), icodec->height - (frame_topBand + frame_bottomBand), icodec->pix_fmt, codec->width - (frame_padleft + frame_padright), codec->height - (frame_padtop + frame_padbottom), codec->pix_fmt, sws_flags, NULL, NULL, NULL); if (ost->img_resample_ctx == NULL) { fprintf(stderr, \"Cannot get resampling context\\n\"); av_exit(1); } #if !CONFIG_AVFILTER ost->original_height = icodec->height; ost->original_width = icodec->width; #endif codec->bits_per_raw_sample= 0; } ost->resample_height = icodec->height - (frame_topBand + frame_bottomBand); ost->resample_width = icodec->width - (frame_leftBand + frame_rightBand); ost->resample_pix_fmt= icodec->pix_fmt; ost->encoding_needed = 1; ist->decoding_needed = 1; #if CONFIG_AVFILTER if (configure_filters(ist, ost)) { fprintf(stderr, \"Error opening filters!\\n\"); exit(1); } #endif break; case AVMEDIA_TYPE_SUBTITLE: ost->encoding_needed = 1; ist->decoding_needed = 1; break; default: abort(); break; } /* two pass mode */ if (ost->encoding_needed && (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) { char logfilename[1024]; FILE *f; snprintf(logfilename, sizeof(logfilename), \"%s-%d.log\", pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX, i); if (codec->flags & CODEC_FLAG_PASS1) { f = fopen(logfilename, \"w\"); if (!f) { fprintf(stderr, \"Cannot write log file '%s' for pass-1 encoding: %s\\n\", logfilename, strerror(errno)); av_exit(1); } ost->logfile = f; } else { char *logbuffer; size_t logbuffer_size; if (read_file(logfilename, &logbuffer, &logbuffer_size) < 0) { fprintf(stderr, \"Error reading log file '%s' for pass-2 encoding\\n\", logfilename); av_exit(1); } codec->stats_in = logbuffer; } } } if(codec->codec_type == AVMEDIA_TYPE_VIDEO){ int size= codec->width * codec->height; bit_buffer_size= FFMAX(bit_buffer_size, 6*size + 200); } } if (!bit_buffer) bit_buffer = av_malloc(bit_buffer_size); if (!bit_buffer) { fprintf(stderr, \"Cannot allocate %d bytes output buffer\\n\", bit_buffer_size); ret = AVERROR(ENOMEM); goto fail; } /* open each encoder */ for(i=0;iencoding_needed) { AVCodec *codec = output_codecs[i]; if (!codec) codec = avcodec_find_encoder(ost->st->codec->codec_id); if (!codec) { snprintf(error, sizeof(error), \"Encoder (codec id %d) not found for output stream #%d.%d\", ost->st->codec->codec_id, ost->file_index, ost->index); ret = AVERROR(EINVAL); goto dump_format; } if (avcodec_open(ost->st->codec, codec) < 0) { snprintf(error, sizeof(error), \"Error while opening encoder for output stream #%d.%d - maybe incorrect parameters such as bit_rate, rate, width or height\", ost->file_index, ost->index); ret = AVERROR(EINVAL); goto dump_format; } extra_size += ost->st->codec->extradata_size; } } /* open each decoder */ for(i=0;idecoding_needed) { AVCodec *codec = input_codecs[i]; if (!codec) codec = avcodec_find_decoder(ist->st->codec->codec_id); if (!codec) { snprintf(error, sizeof(error), \"Decoder (codec id %d) not found for input stream #%d.%d\", ist->st->codec->codec_id, ist->file_index, ist->index); ret = AVERROR(EINVAL); goto dump_format; } if (avcodec_open(ist->st->codec, codec) < 0) { snprintf(error, sizeof(error), \"Error while opening decoder for input stream #%d.%d\", ist->file_index, ist->index); ret = AVERROR(EINVAL); goto dump_format; } //if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) // ist->st->codec->flags |= CODEC_FLAG_REPEAT_FIELD; } } /* init pts */ for(i=0;ist; ist->pts = st->avg_frame_rate.num ? - st->codec->has_b_frames*AV_TIME_BASE / av_q2d(st->avg_frame_rate) : 0; ist->next_pts = AV_NOPTS_VALUE; ist->is_start = 1; } /* set meta data information from input file if required */ for (i=0;i= nb_output_files) { snprintf(error, sizeof(error), \"Invalid output file index %d map_meta_data(%d,%d)\", out_file_index, out_file_index, in_file_index); ret = AVERROR(EINVAL); goto dump_format; } if (in_file_index < 0 || in_file_index >= nb_input_files) { snprintf(error, sizeof(error), \"Invalid input file index %d map_meta_data(%d,%d)\", in_file_index, out_file_index, in_file_index); ret = AVERROR(EINVAL); goto dump_format; } out_file = output_files[out_file_index]; in_file = input_files[in_file_index]; mtag=NULL; while((mtag=av_metadata_get(in_file->metadata, \"\", mtag, AV_METADATA_IGNORE_SUFFIX))) av_metadata_set2(&out_file->metadata, mtag->key, mtag->value, AV_METADATA_DONT_OVERWRITE); av_metadata_conv(out_file, out_file->oformat->metadata_conv, in_file->iformat->metadata_conv); } /* copy chapters from the first input file that has them*/ for (i = 0; i < nb_input_files; i++) { if (!input_files[i]->nb_chapters) continue; for (j = 0; j < nb_output_files; j++) if ((ret = copy_chapters(i, j)) < 0) goto dump_format; } /* open files and write file headers */ for(i=0;ioformat->name, \"rtp\")) { want_sdp = 0; } } dump_format: /* dump the file output parameters - cannot be done before in case of stream copy */ for(i=0;ifilename, 1); } /* dump the stream mapping */ if (verbose >= 0) { fprintf(stderr, \"Stream mapping:\\n\"); for(i=0;i #%d.%d\", ist_table[ost->source_index]->file_index, ist_table[ost->source_index]->index, ost->file_index, ost->index); if (ost->sync_ist != ist_table[ost->source_index]) fprintf(stderr, \" [sync #%d.%d]\", ost->sync_ist->file_index, ost->sync_ist->index); fprintf(stderr, \"\\n\"); } } if (ret) { fprintf(stderr, \"%s\\n\", error); goto fail; } if (want_sdp) { print_sdp(output_files, nb_output_files); } if (!using_stdin && verbose >= 0) { fprintf(stderr, \"Press [q] to stop encoding\\n\"); url_set_interrupt_cb(decode_interrupt_cb); } term_init(); timer_start = av_gettime(); for(; received_sigterm == 0;) { int file_index, ist_index; AVPacket pkt; double ipts_min; double opts_min; redo: ipts_min= 1e100; opts_min= 1e100; /* if 'q' pressed, exits */ if (!using_stdin) { if (q_pressed) break; /* read_key() returns 0 on EOF */ key = read_key(); if (key == 'q') break; } /* select the stream that we must read now by looking at the smallest output pts */ file_index = -1; for(i=0;ifile_index]; ist = ist_table[ost->source_index]; if(ist->is_past_recording_time || no_packet[ist->file_index]) continue; opts = ost->st->pts.val * av_q2d(ost->st->time_base); ipts = (double)ist->pts; if (!file_table[ist->file_index].eof_reached){ if(ipts < ipts_min) { ipts_min = ipts; if(input_sync ) file_index = ist->file_index; } if(opts < opts_min) { opts_min = opts; if(!input_sync) file_index = ist->file_index; } } if(ost->frame_number >= max_frames[ost->st->codec->codec_type]){ file_index= -1; break; } } /* if none, if is finished */ if (file_index < 0) { if(no_packet_count){ no_packet_count=0; memset(no_packet, 0, sizeof(no_packet)); usleep(10000); continue; } break; } /* finish if limit size exhausted */ if (limit_filesize != 0 && limit_filesize < url_ftell(output_files[0]->pb)) break; /* read a frame from it and output it in the fifo */ is = input_files[file_index]; ret= av_read_frame(is, &pkt); if(ret == AVERROR(EAGAIN)){ no_packet[file_index]=1; no_packet_count++; continue; } if (ret < 0) { file_table[file_index].eof_reached = 1; if (opt_shortest) break; else continue; } no_packet_count=0; memset(no_packet, 0, sizeof(no_packet)); if (do_pkt_dump) { av_pkt_dump_log(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump); } /* the following test is needed in case new streams appear dynamically in stream : we ignore them */ if (pkt.stream_index >= file_table[file_index].nb_streams) goto discard_packet; ist_index = file_table[file_index].ist_index + pkt.stream_index; ist = ist_table[ist_index]; if (ist->discard) goto discard_packet; if (pkt.dts != AV_NOPTS_VALUE) pkt.dts += av_rescale_q(input_files_ts_offset[ist->file_index], AV_TIME_BASE_Q, ist->st->time_base); if (pkt.pts != AV_NOPTS_VALUE) pkt.pts += av_rescale_q(input_files_ts_offset[ist->file_index], AV_TIME_BASE_Q, ist->st->time_base); if(input_files_ts_scale[file_index][pkt.stream_index]){ if(pkt.pts != AV_NOPTS_VALUE) pkt.pts *= input_files_ts_scale[file_index][pkt.stream_index]; if(pkt.dts != AV_NOPTS_VALUE) pkt.dts *= input_files_ts_scale[file_index][pkt.stream_index]; } // fprintf(stderr, \"next:%\"PRId64\" dts:%\"PRId64\" off:%\"PRId64\" %d\\n\", ist->next_pts, pkt.dts, input_files_ts_offset[ist->file_index], ist->st->codec->codec_type); if (pkt.dts != AV_NOPTS_VALUE && ist->next_pts != AV_NOPTS_VALUE && (is->iformat->flags & AVFMT_TS_DISCONT)) { int64_t pkt_dts= av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q); int64_t delta= pkt_dts - ist->next_pts; if((FFABS(delta) > 1LL*dts_delta_threshold*AV_TIME_BASE || pkt_dts+1pts)&& !copy_ts){ input_files_ts_offset[ist->file_index]-= delta; if (verbose > 2) fprintf(stderr, \"timestamp discontinuity %\"PRId64\", new offset= %\"PRId64\"\\n\", delta, input_files_ts_offset[ist->file_index]); pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); if(pkt.pts != AV_NOPTS_VALUE) pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); } } /* finish if recording time exhausted */ if (recording_time != INT64_MAX && av_compare_ts(pkt.pts, ist->st->time_base, recording_time + start_time, (AVRational){1, 1000000}) >= 0) { ist->is_past_recording_time = 1; goto discard_packet; } //fprintf(stderr,\"read #%d.%d size=%d\\n\", ist->file_index, ist->index, pkt.size); if (output_packet(ist, ist_index, ost_table, nb_ostreams, &pkt) < 0) { if (verbose >= 0) fprintf(stderr, \"Error while decoding stream #%d.%d\\n\", ist->file_index, ist->index); if (exit_on_error) av_exit(1); av_free_packet(&pkt); goto redo; } discard_packet: av_free_packet(&pkt); /* dump report by using the output first video and audio streams */ print_report(output_files, ost_table, nb_ostreams, 0); } /* at the end of stream, we must flush the decoder buffers */ for(i=0;idecoding_needed) { output_packet(ist, i, ost_table, nb_ostreams, NULL); } } term_exit(); /* write the trailer if needed and close file */ for(i=0;iencoding_needed) { av_freep(&ost->st->codec->stats_in); avcodec_close(ost->st->codec); } } /* close each decoder */ for(i=0;idecoding_needed) { avcodec_close(ist->st->codec); } } #if CONFIG_AVFILTER if (filt_graph_all) { avfilter_graph_destroy(filt_graph_all); av_freep(&filt_graph_all); } #endif /* finished ! */ ret = 0; fail: av_freep(&bit_buffer); av_free(file_table); if (ist_table) { for(i=0;ilogfile) { fclose(ost->logfile); ost->logfile = NULL; } av_fifo_free(ost->fifo); /* works even if fifo is not initialized but set to zero */ av_free(ost->pict_tmp.data[0]); if (ost->video_resample) sws_freeContext(ost->img_resample_ctx); if (ost->resample) audio_resample_close(ost->resample); if (ost->reformat_ctx) av_audio_convert_free(ost->reformat_ctx); av_free(ost); } } av_free(ost_table); } return ret; }"} {"target": 0, "idx": 23263, "func": "static int mpegts_write_packet(AVFormatContext *s, AVPacket *pkt) { AVStream *st = s->streams[pkt->stream_index]; int size= pkt->size; uint8_t *buf= pkt->data; MpegTSWriteStream *ts_st = st->priv_data; int len, max_payload_size; const uint8_t *access_unit_index = NULL; if (st->codec->codec_type == CODEC_TYPE_SUBTITLE) { /* for subtitle, a single PES packet must be generated */ mpegts_write_pes(s, st, buf, size, pkt->pts, AV_NOPTS_VALUE); return 0; } if (st->codec->codec_id == CODEC_ID_DIRAC) { /* for Dirac, a single PES packet must be generated */ mpegts_write_pes(s, st, buf, size, pkt->pts, pkt->dts); return 0; } max_payload_size = DEFAULT_PES_PAYLOAD_SIZE; if (st->codec->codec_id == CODEC_ID_MPEG2VIDEO || st->codec->codec_id == CODEC_ID_MPEG1VIDEO) { const uint8_t *p = pkt->data; const uint8_t *end = pkt->data+pkt->size; uint32_t state = -1; while (p < end) { p = ff_find_start_code(p, end, &state); if (state == PICTURE_START_CODE) { access_unit_index = p - 4; break; } } } else if (st->codec->codec_type == CODEC_TYPE_AUDIO) { access_unit_index = pkt->data; } if (!access_unit_index) { av_log(s, AV_LOG_ERROR, \"error, could not find access unit start\\n\"); return -1; } while (size > 0) { len = max_payload_size - ts_st->payload_index; if (len > size) len = size; memcpy(ts_st->payload + ts_st->payload_index, buf, len); buf += len; size -= len; ts_st->payload_index += len; if (access_unit_index && access_unit_index < buf && ts_st->payload_pts == AV_NOPTS_VALUE && ts_st->payload_dts == AV_NOPTS_VALUE) { ts_st->payload_dts = pkt->dts; ts_st->payload_pts = pkt->pts; } if (ts_st->payload_index >= max_payload_size) { mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_index, ts_st->payload_pts, ts_st->payload_dts); ts_st->payload_pts = AV_NOPTS_VALUE; ts_st->payload_dts = AV_NOPTS_VALUE; ts_st->payload_index = 0; access_unit_index = NULL; // unset access unit to avoid setting pts/dts again } } return 0; }"} {"target": 1, "idx": 23279, "func": "static inline void RENAME(yuv2packedX)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int dstY) { #if COMPILE_TEMPLATE_MMX x86_reg dummy=0; if(!(c->flags & SWS_BITEXACT)) { if (c->flags & SWS_ACCURATE_RND) { switch(c->dstFormat) { case PIX_FMT_RGB32: if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) { YSCALEYUV2PACKEDX_ACCURATE YSCALEYUV2RGBX \"movq %%mm2, \"U_TEMP\"(%0) \\n\\t\" \"movq %%mm4, \"V_TEMP\"(%0) \\n\\t\" \"movq %%mm5, \"Y_TEMP\"(%0) \\n\\t\" YSCALEYUV2PACKEDX_ACCURATE_YA(ALP_MMX_FILTER_OFFSET) \"movq \"Y_TEMP\"(%0), %%mm5 \\n\\t\" \"psraw $3, %%mm1 \\n\\t\" \"psraw $3, %%mm7 \\n\\t\" \"packuswb %%mm7, %%mm1 \\n\\t\" WRITEBGR32(%4, %5, %%REGa, %%mm3, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm2, %%mm6) YSCALEYUV2PACKEDX_END } else { YSCALEYUV2PACKEDX_ACCURATE YSCALEYUV2RGBX \"pcmpeqd %%mm7, %%mm7 \\n\\t\" WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6) YSCALEYUV2PACKEDX_END } return; case PIX_FMT_BGR24: YSCALEYUV2PACKEDX_ACCURATE YSCALEYUV2RGBX \"pxor %%mm7, %%mm7 \\n\\t\" \"lea (%%\"REG_a\", %%\"REG_a\", 2), %%\"REG_c\"\\n\\t\" //FIXME optimize \"add %4, %%\"REG_c\" \\n\\t\" WRITEBGR24(%%REGc, %5, %%REGa) :: \"r\" (&c->redDither), \"m\" (dummy), \"m\" (dummy), \"m\" (dummy), \"r\" (dest), \"m\" (dstW) : \"%\"REG_a, \"%\"REG_c, \"%\"REG_d, \"%\"REG_S ); return; case PIX_FMT_RGB555: YSCALEYUV2PACKEDX_ACCURATE YSCALEYUV2RGBX \"pxor %%mm7, %%mm7 \\n\\t\" /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ #ifdef DITHER1XBPP \"paddusb \"BLUE_DITHER\"(%0), %%mm2\\n\\t\" \"paddusb \"GREEN_DITHER\"(%0), %%mm4\\n\\t\" \"paddusb \"RED_DITHER\"(%0), %%mm5\\n\\t\" #endif WRITERGB15(%4, %5, %%REGa) YSCALEYUV2PACKEDX_END return; case PIX_FMT_RGB565: YSCALEYUV2PACKEDX_ACCURATE YSCALEYUV2RGBX \"pxor %%mm7, %%mm7 \\n\\t\" /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ #ifdef DITHER1XBPP \"paddusb \"BLUE_DITHER\"(%0), %%mm2\\n\\t\" \"paddusb \"GREEN_DITHER\"(%0), %%mm4\\n\\t\" \"paddusb \"RED_DITHER\"(%0), %%mm5\\n\\t\" #endif WRITERGB16(%4, %5, %%REGa) YSCALEYUV2PACKEDX_END return; case PIX_FMT_YUYV422: YSCALEYUV2PACKEDX_ACCURATE /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ \"psraw $3, %%mm3 \\n\\t\" \"psraw $3, %%mm4 \\n\\t\" \"psraw $3, %%mm1 \\n\\t\" \"psraw $3, %%mm7 \\n\\t\" WRITEYUY2(%4, %5, %%REGa) YSCALEYUV2PACKEDX_END return; } } else { switch(c->dstFormat) { case PIX_FMT_RGB32: if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) { YSCALEYUV2PACKEDX YSCALEYUV2RGBX YSCALEYUV2PACKEDX_YA(ALP_MMX_FILTER_OFFSET, %%mm0, %%mm3, %%mm6, %%mm1, %%mm7) \"psraw $3, %%mm1 \\n\\t\" \"psraw $3, %%mm7 \\n\\t\" \"packuswb %%mm7, %%mm1 \\n\\t\" WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6) YSCALEYUV2PACKEDX_END } else { YSCALEYUV2PACKEDX YSCALEYUV2RGBX \"pcmpeqd %%mm7, %%mm7 \\n\\t\" WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6) YSCALEYUV2PACKEDX_END } return; case PIX_FMT_BGR24: YSCALEYUV2PACKEDX YSCALEYUV2RGBX \"pxor %%mm7, %%mm7 \\n\\t\" \"lea (%%\"REG_a\", %%\"REG_a\", 2), %%\"REG_c\" \\n\\t\" //FIXME optimize \"add %4, %%\"REG_c\" \\n\\t\" WRITEBGR24(%%REGc, %5, %%REGa) :: \"r\" (&c->redDither), \"m\" (dummy), \"m\" (dummy), \"m\" (dummy), \"r\" (dest), \"m\" (dstW) : \"%\"REG_a, \"%\"REG_c, \"%\"REG_d, \"%\"REG_S ); return; case PIX_FMT_RGB555: YSCALEYUV2PACKEDX YSCALEYUV2RGBX \"pxor %%mm7, %%mm7 \\n\\t\" /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ #ifdef DITHER1XBPP \"paddusb \"BLUE_DITHER\"(%0), %%mm2 \\n\\t\" \"paddusb \"GREEN_DITHER\"(%0), %%mm4 \\n\\t\" \"paddusb \"RED_DITHER\"(%0), %%mm5 \\n\\t\" #endif WRITERGB15(%4, %5, %%REGa) YSCALEYUV2PACKEDX_END return; case PIX_FMT_RGB565: YSCALEYUV2PACKEDX YSCALEYUV2RGBX \"pxor %%mm7, %%mm7 \\n\\t\" /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ #ifdef DITHER1XBPP \"paddusb \"BLUE_DITHER\"(%0), %%mm2 \\n\\t\" \"paddusb \"GREEN_DITHER\"(%0), %%mm4 \\n\\t\" \"paddusb \"RED_DITHER\"(%0), %%mm5 \\n\\t\" #endif WRITERGB16(%4, %5, %%REGa) YSCALEYUV2PACKEDX_END return; case PIX_FMT_YUYV422: YSCALEYUV2PACKEDX /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ \"psraw $3, %%mm3 \\n\\t\" \"psraw $3, %%mm4 \\n\\t\" \"psraw $3, %%mm1 \\n\\t\" \"psraw $3, %%mm7 \\n\\t\" WRITEYUY2(%4, %5, %%REGa) YSCALEYUV2PACKEDX_END return; } } } #endif /* COMPILE_TEMPLATE_MMX */ #if COMPILE_TEMPLATE_ALTIVEC /* The following list of supported dstFormat values should match what's found in the body of ff_yuv2packedX_altivec() */ if (!(c->flags & SWS_BITEXACT) && !c->alpPixBuf && (c->dstFormat==PIX_FMT_ABGR || c->dstFormat==PIX_FMT_BGRA || c->dstFormat==PIX_FMT_BGR24 || c->dstFormat==PIX_FMT_RGB24 || c->dstFormat==PIX_FMT_RGBA || c->dstFormat==PIX_FMT_ARGB)) ff_yuv2packedX_altivec(c, lumFilter, lumSrc, lumFilterSize, chrFilter, chrSrc, chrFilterSize, dest, dstW, dstY); else #endif yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize, chrFilter, chrSrc, chrFilterSize, alpSrc, dest, dstW, dstY); }"} {"target": 0, "idx": 23286, "func": "static av_cold int frei0r_init(AVFilterContext *ctx, const char *dl_name, int type) { Frei0rContext *frei0r = ctx->priv; f0r_init_f f0r_init; f0r_get_plugin_info_f f0r_get_plugin_info; f0r_plugin_info_t *pi; char *path; int ret = 0; /* see: http://frei0r.dyne.org/codedoc/html/group__pluglocations.html */ if ((path = av_strdup(getenv(\"FREI0R_PATH\")))) { #ifdef _WIN32 const char *separator = \";\"; #else const char *separator = \":\"; #endif char *p, *ptr = NULL; for (p = path; p = av_strtok(p, separator, &ptr); p = NULL) { /* add additional trailing slash in case it is missing */ char *p1 = av_asprintf(\"%s/\", p); if (!p1) { av_free(path); return AVERROR(ENOMEM); } ret = load_path(ctx, &frei0r->dl_handle, p1, dl_name); av_free(p1); if (ret < 0) { av_free(path); return ret; } if (frei0r->dl_handle) break; } av_free(path); } if (!frei0r->dl_handle && (path = getenv(\"HOME\"))) { char *prefix = av_asprintf(\"%s/.frei0r-1/lib/\", path); if (!prefix) return AVERROR(ENOMEM); ret = load_path(ctx, &frei0r->dl_handle, prefix, dl_name); av_free(prefix); if (ret < 0) return ret; } if (!frei0r->dl_handle) { ret = load_path(ctx, &frei0r->dl_handle, \"/usr/local/lib/frei0r-1/\", dl_name); if (ret < 0) return ret; } if (!frei0r->dl_handle) { ret = load_path(ctx, &frei0r->dl_handle, \"/usr/lib/frei0r-1/\", dl_name); if (ret < 0) return ret; } if (!frei0r->dl_handle) { av_log(ctx, AV_LOG_ERROR, \"Could not find module '%s'\\n\", dl_name); return AVERROR(EINVAL); } if (!(f0r_init = load_sym(ctx, \"f0r_init\" )) || !(f0r_get_plugin_info = load_sym(ctx, \"f0r_get_plugin_info\")) || !(frei0r->get_param_info = load_sym(ctx, \"f0r_get_param_info\" )) || !(frei0r->get_param_value = load_sym(ctx, \"f0r_get_param_value\")) || !(frei0r->set_param_value = load_sym(ctx, \"f0r_set_param_value\")) || !(frei0r->update = load_sym(ctx, \"f0r_update\" )) || !(frei0r->construct = load_sym(ctx, \"f0r_construct\" )) || !(frei0r->destruct = load_sym(ctx, \"f0r_destruct\" )) || !(frei0r->deinit = load_sym(ctx, \"f0r_deinit\" ))) return AVERROR(EINVAL); if (f0r_init() < 0) { av_log(ctx, AV_LOG_ERROR, \"Could not init the frei0r module\\n\"); return AVERROR(EINVAL); } f0r_get_plugin_info(&frei0r->plugin_info); pi = &frei0r->plugin_info; if (pi->plugin_type != type) { av_log(ctx, AV_LOG_ERROR, \"Invalid type '%s' for the plugin\\n\", pi->plugin_type == F0R_PLUGIN_TYPE_FILTER ? \"filter\" : pi->plugin_type == F0R_PLUGIN_TYPE_SOURCE ? \"source\" : pi->plugin_type == F0R_PLUGIN_TYPE_MIXER2 ? \"mixer2\" : pi->plugin_type == F0R_PLUGIN_TYPE_MIXER3 ? \"mixer3\" : \"unknown\"); return AVERROR(EINVAL); } av_log(ctx, AV_LOG_VERBOSE, \"name:%s author:'%s' explanation:'%s' color_model:%s \" \"frei0r_version:%d version:%d.%d num_params:%d\\n\", pi->name, pi->author, pi->explanation, pi->color_model == F0R_COLOR_MODEL_BGRA8888 ? \"bgra8888\" : pi->color_model == F0R_COLOR_MODEL_RGBA8888 ? \"rgba8888\" : pi->color_model == F0R_COLOR_MODEL_PACKED32 ? \"packed32\" : \"unknown\", pi->frei0r_version, pi->major_version, pi->minor_version, pi->num_params); return 0; }"} {"target": 1, "idx": 23292, "func": "uint32_t HELPER(tprot)(CPUS390XState *env, uint64_t a1, uint64_t a2) { S390CPU *cpu = s390_env_get_cpu(env); CPUState *cs = CPU(cpu); /* * TODO: we currently don't handle all access protection types * (including access-list and key-controlled) as well as AR mode. */ if (!s390_cpu_virt_mem_check_write(cpu, a1, 0, 1)) { /* Fetching permitted; storing permitted */ return 0; } switch (env->int_pgm_code) { case PGM_PROTECTION: /* Fetching permitted; storing not permitted */ cs->exception_index = 0; return 1; case PGM_ADDRESSING: /* Fetching not permitted; storing not permitted */ cs->exception_index = 0; return 2; case PGM_ASCE_TYPE: case PGM_REG_FIRST_TRANS: case PGM_REG_SEC_TRANS: case PGM_REG_THIRD_TRANS: case PGM_SEGMENT_TRANS: case PGM_PAGE_TRANS: case PGM_ALET_SPEC: case PGM_ALEN_SPEC: case PGM_ALE_SEQ: case PGM_ASTE_VALID: case PGM_ASTE_SEQ: case PGM_EXT_AUTH: /* Translation not available */ cs->exception_index = 0; return 3; } /* any other exception is forwarded to the guest */ s390_cpu_virt_mem_handle_exc(cpu, GETPC()); return 0; }"} {"target": 1, "idx": 23298, "func": "static int adb_kbd_poll(ADBDevice *d, uint8_t *obuf) { KBDState *s = ADB_KEYBOARD(d); int keycode; int olen; olen = 0; if (s->count == 0) { return 0; } keycode = s->data[s->rptr]; s->rptr++; if (s->rptr == sizeof(s->data)) { s->rptr = 0; } s->count--; /* * The power key is the only two byte value key, so it is a special case. * Since 0x7f is not a used keycode for ADB we overload it to indicate the * power button when we're storing keycodes in our internal buffer, and * expand it out to two bytes when we send to the guest. */ if (keycode == 0x7f) { obuf[0] = 0x7f; obuf[1] = 0x7f; olen = 2; } else { obuf[0] = keycode; /* NOTE: the power key key-up is the two byte sequence 0xff 0xff; * otherwise we could in theory send a second keycode in the second * byte, but choose not to bother. */ obuf[1] = 0xff; olen = 2; } return olen; }"} {"target": 1, "idx": 23308, "func": "static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb, void *dst_l, void *dst_r, const int type) { int i, j, count = 0; int last, t; int A, B, L, L2, R, R2; int pos = s->pos; uint32_t crc = s->sc.crc; uint32_t crc_extra_bits = s->extra_sc.crc; int16_t *dst16_l = dst_l; int16_t *dst16_r = dst_r; int32_t *dst32_l = dst_l; int32_t *dst32_r = dst_r; float *dstfl_l = dst_l; float *dstfl_r = dst_r; s->one = s->zero = s->zeroes = 0; do { L = wv_get_value(s, gb, 0, &last); if (last) break; R = wv_get_value(s, gb, 1, &last); if (last) break; for (i = 0; i < s->terms; i++) { t = s->decorr[i].value; if (t > 0) { if (t > 8) { if (t & 1) { A = 2U * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1]; B = 2U * s->decorr[i].samplesB[0] - s->decorr[i].samplesB[1]; } else { A = (int)(3U * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1]) >> 1; B = (int)(3U * s->decorr[i].samplesB[0] - s->decorr[i].samplesB[1]) >> 1; } s->decorr[i].samplesA[1] = s->decorr[i].samplesA[0]; s->decorr[i].samplesB[1] = s->decorr[i].samplesB[0]; j = 0; } else { A = s->decorr[i].samplesA[pos]; B = s->decorr[i].samplesB[pos]; j = (pos + t) & 7; } if (type != AV_SAMPLE_FMT_S16P) { L2 = L + ((s->decorr[i].weightA * (int64_t)A + 512) >> 10); R2 = R + ((s->decorr[i].weightB * (int64_t)B + 512) >> 10); } else { L2 = L + ((int)(s->decorr[i].weightA * (unsigned)A + 512) >> 10); R2 = R + ((int)(s->decorr[i].weightB * (unsigned)B + 512) >> 10); } if (A && L) s->decorr[i].weightA -= ((((L ^ A) >> 30) & 2) - 1) * s->decorr[i].delta; if (B && R) s->decorr[i].weightB -= ((((R ^ B) >> 30) & 2) - 1) * s->decorr[i].delta; s->decorr[i].samplesA[j] = L = L2; s->decorr[i].samplesB[j] = R = R2; } else if (t == -1) { if (type != AV_SAMPLE_FMT_S16P) L2 = L + ((s->decorr[i].weightA * (int64_t)s->decorr[i].samplesA[0] + 512) >> 10); else L2 = L + ((int)(s->decorr[i].weightA * (unsigned)s->decorr[i].samplesA[0] + 512) >> 10); UPDATE_WEIGHT_CLIP(s->decorr[i].weightA, s->decorr[i].delta, s->decorr[i].samplesA[0], L); L = L2; if (type != AV_SAMPLE_FMT_S16P) R2 = R + ((s->decorr[i].weightB * (int64_t)L2 + 512) >> 10); else R2 = R + ((int)(s->decorr[i].weightB * (unsigned)L2 + 512) >> 10); UPDATE_WEIGHT_CLIP(s->decorr[i].weightB, s->decorr[i].delta, L2, R); R = R2; s->decorr[i].samplesA[0] = R; } else { if (type != AV_SAMPLE_FMT_S16P) R2 = R + ((s->decorr[i].weightB * (int64_t)s->decorr[i].samplesB[0] + 512) >> 10); else R2 = R + ((int)(s->decorr[i].weightB * (unsigned)s->decorr[i].samplesB[0] + 512) >> 10); UPDATE_WEIGHT_CLIP(s->decorr[i].weightB, s->decorr[i].delta, s->decorr[i].samplesB[0], R); R = R2; if (t == -3) { R2 = s->decorr[i].samplesA[0]; s->decorr[i].samplesA[0] = R; } if (type != AV_SAMPLE_FMT_S16P) L2 = L + ((s->decorr[i].weightA * (int64_t)R2 + 512) >> 10); else L2 = L + ((int)(s->decorr[i].weightA * (unsigned)R2 + 512) >> 10); UPDATE_WEIGHT_CLIP(s->decorr[i].weightA, s->decorr[i].delta, R2, L); L = L2; s->decorr[i].samplesB[0] = L; } } if (type == AV_SAMPLE_FMT_S16P) { if (FFABS(L) + (unsigned)FFABS(R) > (1<<19)) { av_log(s->avctx, AV_LOG_ERROR, \"sample %d %d too large\\n\", L, R); return AVERROR_INVALIDDATA; } } pos = (pos + 1) & 7; if (s->joint) L += (unsigned)(R -= (unsigned)(L >> 1)); crc = (crc * 3 + L) * 3 + R; if (type == AV_SAMPLE_FMT_FLTP) { *dstfl_l++ = wv_get_value_float(s, &crc_extra_bits, L); *dstfl_r++ = wv_get_value_float(s, &crc_extra_bits, R); } else if (type == AV_SAMPLE_FMT_S32P) { *dst32_l++ = wv_get_value_integer(s, &crc_extra_bits, L); *dst32_r++ = wv_get_value_integer(s, &crc_extra_bits, R); } else { *dst16_l++ = wv_get_value_integer(s, &crc_extra_bits, L); *dst16_r++ = wv_get_value_integer(s, &crc_extra_bits, R); } count++; } while (!last && count < s->samples); wv_reset_saved_context(s); if (last && count < s->samples) { int size = av_get_bytes_per_sample(type); memset((uint8_t*)dst_l + count*size, 0, (s->samples-count)*size); memset((uint8_t*)dst_r + count*size, 0, (s->samples-count)*size); } if ((s->avctx->err_recognition & AV_EF_CRCCHECK) && wv_check_crc(s, crc, crc_extra_bits)) return AVERROR_INVALIDDATA; return 0; }"} {"target": 1, "idx": 23314, "func": "static void qxl_create_guest_primary(PCIQXLDevice *qxl, int loadvm, qxl_async_io async) { QXLDevSurfaceCreate surface; QXLSurfaceCreate *sc = &qxl->guest_primary.surface; int size; int requested_height = le32_to_cpu(sc->height); int requested_stride = le32_to_cpu(sc->stride); size = abs(requested_stride) * requested_height; if (size > qxl->vgamem_size) { qxl_set_guest_bug(qxl, \"%s: requested primary larger then framebuffer\" \" size\", __func__); return; } if (qxl->mode == QXL_MODE_NATIVE) { qxl_set_guest_bug(qxl, \"%s: nop since already in QXL_MODE_NATIVE\", __func__); } qxl_exit_vga_mode(qxl); surface.format = le32_to_cpu(sc->format); surface.height = le32_to_cpu(sc->height); surface.mem = le64_to_cpu(sc->mem); surface.position = le32_to_cpu(sc->position); surface.stride = le32_to_cpu(sc->stride); surface.width = le32_to_cpu(sc->width); surface.type = le32_to_cpu(sc->type); surface.flags = le32_to_cpu(sc->flags); trace_qxl_create_guest_primary(qxl->id, sc->width, sc->height, sc->mem, sc->format, sc->position); trace_qxl_create_guest_primary_rest(qxl->id, sc->stride, sc->type, sc->flags); if ((surface.stride & 0x3) != 0) { qxl_set_guest_bug(qxl, \"primary surface stride = %d %% 4 != 0\", surface.stride); return; } surface.mouse_mode = true; surface.group_id = MEMSLOT_GROUP_GUEST; if (loadvm) { surface.flags |= QXL_SURF_FLAG_KEEP_DATA; } qxl->mode = QXL_MODE_NATIVE; qxl->cmdflags = 0; qemu_spice_create_primary_surface(&qxl->ssd, 0, &surface, async); if (async == QXL_SYNC) { qxl_create_guest_primary_complete(qxl); } }"} {"target": 1, "idx": 23316, "func": "static inline int signed_shift(int i, int shift) { if (shift > 0) return i << shift; return i >> -shift; }"} {"target": 0, "idx": 23365, "func": "static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = arm_env_get_cpu(env); uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); if (!u32p) { return; } u32p += env->pmsav7.rnr; tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ *u32p = value; }"} {"target": 0, "idx": 23366, "func": "void qemu_opts_del(QemuOpts *opts) { QemuOpt *opt; for (;;) { opt = TAILQ_FIRST(&opts->head); if (opt == NULL) break; qemu_opt_del(opt); } TAILQ_REMOVE(&opts->list->head, opts, next); qemu_free(opts); }"} {"target": 0, "idx": 23368, "func": "static int raw_pwrite(BlockDriverState *bs, int64_t offset, const uint8_t *buf, int count) { BDRVRawState *s = bs->opaque; int size, ret, shift, sum; sum = 0; if (s->aligned_buf != NULL) { if (offset & 0x1ff) { /* align offset on a 512 bytes boundary */ shift = offset & 0x1ff; ret = raw_pread_aligned(bs, offset - shift, s->aligned_buf, 512); if (ret < 0) return ret; size = 512 - shift; if (size > count) size = count; memcpy(s->aligned_buf + shift, buf, size); ret = raw_pwrite_aligned(bs, offset - shift, s->aligned_buf, 512); if (ret < 0) return ret; buf += size; offset += size; count -= size; sum += size; if (count == 0) return sum; } if (count & 0x1ff || (uintptr_t) buf & 0x1ff) { while ((size = (count & ~0x1ff)) != 0) { if (size > ALIGNED_BUFFER_SIZE) size = ALIGNED_BUFFER_SIZE; memcpy(s->aligned_buf, buf, size); ret = raw_pwrite_aligned(bs, offset, s->aligned_buf, size); if (ret < 0) return ret; buf += ret; offset += ret; count -= ret; sum += ret; } /* here, count < 512 because (count & ~0x1ff) == 0 */ if (count) { ret = raw_pread_aligned(bs, offset, s->aligned_buf, 512); if (ret < 0) return ret; memcpy(s->aligned_buf, buf, count); ret = raw_pwrite_aligned(bs, offset, s->aligned_buf, 512); if (ret < 0) return ret; if (count < ret) ret = count; sum += ret; } return sum; } } return raw_pwrite_aligned(bs, offset, buf, count) + sum; }"} {"target": 0, "idx": 23373, "func": "void acpi_pcihp_device_unplug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s, DeviceState *dev, Error **errp) { PCIDevice *pdev = PCI_DEVICE(dev); int slot = PCI_SLOT(pdev->devfn); int bsel = acpi_pcihp_get_bsel(pdev->bus); if (bsel < 0) { error_setg(errp, \"Unsupported bus. Bus doesn't have property '\" ACPI_PCIHP_PROP_BSEL \"' set\"); return; } s->acpi_pcihp_pci_status[bsel].down |= (1U << slot); acpi_send_event(DEVICE(hotplug_dev), ACPI_PCI_HOTPLUG_STATUS); }"} {"target": 0, "idx": 23379, "func": "int bdrv_debug_resume(BlockDriverState *bs, const char *tag) { while (bs && (!bs->drv || !bs->drv->bdrv_debug_resume)) { bs = bs->file; } if (bs && bs->drv && bs->drv->bdrv_debug_resume) { return bs->drv->bdrv_debug_resume(bs, tag); } return -ENOTSUP; }"} {"target": 1, "idx": 23384, "func": "static int r3d_read_redv(AVFormatContext *s, AVPacket *pkt, Atom *atom) { AVStream *st = s->streams[0]; int tmp, tmp2; uint64_t pos = url_ftell(s->pb); unsigned dts; dts = get_be32(s->pb); tmp = get_be32(s->pb); dprintf(s, \"frame num %d\\n\", tmp); tmp = get_byte(s->pb); // major version tmp2 = get_byte(s->pb); // minor version dprintf(s, \"version %d.%d\\n\", tmp, tmp2); tmp = get_be16(s->pb); // unknown dprintf(s, \"unknown %d\\n\", tmp); if (tmp > 4) { tmp = get_be16(s->pb); // unknown dprintf(s, \"unknown %d\\n\", tmp); tmp = get_be16(s->pb); // unknown dprintf(s, \"unknown %d\\n\", tmp); tmp = get_be32(s->pb); dprintf(s, \"width %d\\n\", tmp); tmp = get_be32(s->pb); dprintf(s, \"height %d\\n\", tmp); tmp = get_be32(s->pb); dprintf(s, \"metadata len %d\\n\", tmp); } tmp = atom->size - 8 - (url_ftell(s->pb) - pos); if (tmp < 0) return -1; if (av_get_packet(s->pb, pkt, tmp) != tmp) { av_log(s, AV_LOG_ERROR, \"error reading video packet\\n\"); return -1; } pkt->stream_index = 0; pkt->dts = dts; if (st->codec->time_base.den) pkt->duration = (uint64_t)st->time_base.den* st->codec->time_base.num/st->codec->time_base.den; dprintf(s, \"pkt dts %lld duration %d\\n\", pkt->dts, pkt->duration); return 0; }"} {"target": 0, "idx": 23394, "func": "static uint64_t mpc8544_guts_read(void *opaque, target_phys_addr_t addr, unsigned size) { uint32_t value = 0; CPUPPCState *env = cpu_single_env; addr &= MPC8544_GUTS_MMIO_SIZE - 1; switch (addr) { case MPC8544_GUTS_ADDR_PVR: value = env->spr[SPR_PVR]; break; case MPC8544_GUTS_ADDR_SVR: value = env->spr[SPR_E500_SVR]; break; default: fprintf(stderr, \"guts: Unknown register read: %x\\n\", (int)addr); break; } return value; }"} {"target": 0, "idx": 23396, "func": "int bdrv_set_read_only(BlockDriverState *bs, bool read_only, Error **errp) { /* Do not set read_only if copy_on_read is enabled */ if (bs->copy_on_read && read_only) { error_setg(errp, \"Can't set node '%s' to r/o with copy-on-read enabled\", bdrv_get_device_or_node_name(bs)); return -EINVAL; } /* Do not clear read_only if it is prohibited */ if (!read_only && !(bs->open_flags & BDRV_O_ALLOW_RDWR)) { error_setg(errp, \"Node '%s' is read only\", bdrv_get_device_or_node_name(bs)); return -EPERM; } bs->read_only = read_only; return 0; }"} {"target": 0, "idx": 23404, "func": "int qemu_savevm_state_begin(Monitor *mon, QEMUFile *f, int blk_enable, int shared) { SaveStateEntry *se; int ret; QTAILQ_FOREACH(se, &savevm_handlers, entry) { if(se->set_params == NULL) { continue; } se->set_params(blk_enable, shared, se->opaque); } qemu_put_be32(f, QEMU_VM_FILE_MAGIC); qemu_put_be32(f, QEMU_VM_FILE_VERSION); QTAILQ_FOREACH(se, &savevm_handlers, entry) { int len; if (se->save_live_state == NULL) continue; /* Section type */ qemu_put_byte(f, QEMU_VM_SECTION_START); qemu_put_be32(f, se->section_id); /* ID string */ len = strlen(se->idstr); qemu_put_byte(f, len); qemu_put_buffer(f, (uint8_t *)se->idstr, len); qemu_put_be32(f, se->instance_id); qemu_put_be32(f, se->version_id); ret = se->save_live_state(mon, f, QEMU_VM_SECTION_START, se->opaque); if (ret < 0) { qemu_savevm_state_cancel(mon, f); return ret; } } ret = qemu_file_get_error(f); if (ret != 0) { qemu_savevm_state_cancel(mon, f); } return ret; }"} {"target": 0, "idx": 23409, "func": "static void gen_conditional_jump(DisasContext * ctx, target_ulong ift, target_ulong ifnott) { int l1; TCGv sr; l1 = gen_new_label(); sr = tcg_temp_new(); tcg_gen_andi_i32(sr, cpu_sr, SR_T); tcg_gen_brcondi_i32(TCG_COND_NE, sr, 0, l1); gen_goto_tb(ctx, 0, ifnott); gen_set_label(l1); gen_goto_tb(ctx, 1, ift); }"} {"target": 0, "idx": 23417, "func": "static int wav_write_trailer(AVFormatContext *s) { AVIOContext *pb = s->pb; WAVMuxContext *wav = s->priv_data; int64_t file_size, data_size; int64_t number_of_samples = 0; int rf64 = 0; avio_flush(pb); if (s->pb->seekable) { if (wav->write_peak != 2) { ff_end_tag(pb, wav->data); avio_flush(pb); } if (wav->write_peak && wav->peak_output) { peak_write_chunk(s); avio_flush(pb); } /* update file size */ file_size = avio_tell(pb); data_size = file_size - wav->data; if (wav->rf64 == RF64_ALWAYS || (wav->rf64 == RF64_AUTO && file_size - 8 > UINT32_MAX)) { rf64 = 1; } else { avio_seek(pb, 4, SEEK_SET); avio_wl32(pb, (uint32_t)(file_size - 8)); avio_seek(pb, file_size, SEEK_SET); avio_flush(pb); } number_of_samples = av_rescale(wav->maxpts - wav->minpts + wav->last_duration, s->streams[0]->codec->sample_rate * (int64_t)s->streams[0]->time_base.num, s->streams[0]->time_base.den); if(s->streams[0]->codec->codec_tag != 0x01) { /* Update num_samps in fact chunk */ avio_seek(pb, wav->fact_pos, SEEK_SET); if (rf64 || (wav->rf64 == RF64_AUTO && number_of_samples > UINT32_MAX)) { rf64 = 1; avio_wl32(pb, -1); } else { avio_wl32(pb, number_of_samples); avio_seek(pb, file_size, SEEK_SET); avio_flush(pb); } } if (rf64) { /* overwrite RIFF with RF64 */ avio_seek(pb, 0, SEEK_SET); ffio_wfourcc(pb, \"RF64\"); avio_wl32(pb, -1); /* write ds64 chunk (overwrite JUNK if rf64 == RF64_AUTO) */ avio_seek(pb, wav->ds64 - 8, SEEK_SET); ffio_wfourcc(pb, \"ds64\"); avio_wl32(pb, 28); /* ds64 chunk size */ avio_wl64(pb, file_size - 8); /* RF64 chunk size */ avio_wl64(pb, data_size); /* data chunk size */ avio_wl64(pb, number_of_samples); /* fact chunk number of samples */ avio_wl32(pb, 0); /* number of table entries for non-'data' chunks */ /* write -1 in data chunk size */ avio_seek(pb, wav->data - 4, SEEK_SET); avio_wl32(pb, -1); avio_seek(pb, file_size, SEEK_SET); avio_flush(pb); } } if (wav->write_peak) peak_free_buffers(s); return 0; }"} {"target": 0, "idx": 23426, "func": "av_cold int swr_init(struct SwrContext *s){ int ret; clear_context(s); if(s-> in_sample_fmt >= AV_SAMPLE_FMT_NB){ av_log(s, AV_LOG_ERROR, \"Requested input sample format %d is invalid\\n\", s->in_sample_fmt); return AVERROR(EINVAL); } if(s->out_sample_fmt >= AV_SAMPLE_FMT_NB){ av_log(s, AV_LOG_ERROR, \"Requested output sample format %d is invalid\\n\", s->out_sample_fmt); return AVERROR(EINVAL); } s->out.ch_count = s-> user_out_ch_count; s-> in.ch_count = s-> user_in_ch_count; s->used_ch_count = s->user_used_ch_count; s-> in_ch_layout = s-> user_in_ch_layout; s->out_ch_layout = s->user_out_ch_layout; if(av_get_channel_layout_nb_channels(s-> in_ch_layout) > SWR_CH_MAX) { av_log(s, AV_LOG_WARNING, \"Input channel layout 0x%\"PRIx64\" is invalid or unsupported.\\n\", s-> in_ch_layout); s->in_ch_layout = 0; } if(av_get_channel_layout_nb_channels(s->out_ch_layout) > SWR_CH_MAX) { av_log(s, AV_LOG_WARNING, \"Output channel layout 0x%\"PRIx64\" is invalid or unsupported.\\n\", s->out_ch_layout); s->out_ch_layout = 0; } switch(s->engine){ #if CONFIG_LIBSOXR case SWR_ENGINE_SOXR: s->resampler = &swri_soxr_resampler; break; #endif case SWR_ENGINE_SWR : s->resampler = &swri_resampler; break; default: av_log(s, AV_LOG_ERROR, \"Requested resampling engine is unavailable\\n\"); return AVERROR(EINVAL); } if(!s->used_ch_count) s->used_ch_count= s->in.ch_count; if(s->used_ch_count && s-> in_ch_layout && s->used_ch_count != av_get_channel_layout_nb_channels(s-> in_ch_layout)){ av_log(s, AV_LOG_WARNING, \"Input channel layout has a different number of channels than the number of used channels, ignoring layout\\n\"); s-> in_ch_layout= 0; } if(!s-> in_ch_layout) s-> in_ch_layout= av_get_default_channel_layout(s->used_ch_count); if(!s->out_ch_layout) s->out_ch_layout= av_get_default_channel_layout(s->out.ch_count); s->rematrix= s->out_ch_layout !=s->in_ch_layout || s->rematrix_volume!=1.0 || s->rematrix_custom; if(s->int_sample_fmt == AV_SAMPLE_FMT_NONE){ if(av_get_planar_sample_fmt(s->in_sample_fmt) <= AV_SAMPLE_FMT_S16P){ s->int_sample_fmt= AV_SAMPLE_FMT_S16P; }else if( av_get_planar_sample_fmt(s-> in_sample_fmt) == AV_SAMPLE_FMT_S32P && av_get_planar_sample_fmt(s->out_sample_fmt) == AV_SAMPLE_FMT_S32P && !s->rematrix && s->engine != SWR_ENGINE_SOXR){ s->int_sample_fmt= AV_SAMPLE_FMT_S32P; }else if(av_get_planar_sample_fmt(s->in_sample_fmt) <= AV_SAMPLE_FMT_FLTP){ s->int_sample_fmt= AV_SAMPLE_FMT_FLTP; }else{ av_log(s, AV_LOG_DEBUG, \"Using double precision mode\\n\"); s->int_sample_fmt= AV_SAMPLE_FMT_DBLP; } } if( s->int_sample_fmt != AV_SAMPLE_FMT_S16P &&s->int_sample_fmt != AV_SAMPLE_FMT_S32P &&s->int_sample_fmt != AV_SAMPLE_FMT_FLTP &&s->int_sample_fmt != AV_SAMPLE_FMT_DBLP){ av_log(s, AV_LOG_ERROR, \"Requested sample format %s is not supported internally, S16/S32/FLT/DBL is supported\\n\", av_get_sample_fmt_name(s->int_sample_fmt)); return AVERROR(EINVAL); } set_audiodata_fmt(&s-> in, s-> in_sample_fmt); set_audiodata_fmt(&s->out, s->out_sample_fmt); if (s->firstpts_in_samples != AV_NOPTS_VALUE) { if (!s->async && s->min_compensation >= FLT_MAX/2) s->async = 1; s->firstpts = s->outpts = s->firstpts_in_samples * s->out_sample_rate; } else s->firstpts = AV_NOPTS_VALUE; if (s->async) { if (s->min_compensation >= FLT_MAX/2) s->min_compensation = 0.001; if (s->async > 1.0001) { s->max_soft_compensation = s->async / (double) s->in_sample_rate; } } if (s->out_sample_rate!=s->in_sample_rate || (s->flags & SWR_FLAG_RESAMPLE)){ s->resample = s->resampler->init(s->resample, s->out_sample_rate, s->in_sample_rate, s->filter_size, s->phase_shift, s->linear_interp, s->cutoff, s->int_sample_fmt, s->filter_type, s->kaiser_beta, s->precision, s->cheby); }else s->resampler->free(&s->resample); if( s->int_sample_fmt != AV_SAMPLE_FMT_S16P && s->int_sample_fmt != AV_SAMPLE_FMT_S32P && s->int_sample_fmt != AV_SAMPLE_FMT_FLTP && s->int_sample_fmt != AV_SAMPLE_FMT_DBLP && s->resample){ av_log(s, AV_LOG_ERROR, \"Resampling only supported with internal s16/s32/flt/dbl\\n\"); return -1; } #define RSC 1 //FIXME finetune if(!s-> in.ch_count) s-> in.ch_count= av_get_channel_layout_nb_channels(s-> in_ch_layout); if(!s->used_ch_count) s->used_ch_count= s->in.ch_count; if(!s->out.ch_count) s->out.ch_count= av_get_channel_layout_nb_channels(s->out_ch_layout); if(!s-> in.ch_count){ av_assert0(!s->in_ch_layout); av_log(s, AV_LOG_ERROR, \"Input channel count and layout are unset\\n\"); return -1; } if ((!s->out_ch_layout || !s->in_ch_layout) && s->used_ch_count != s->out.ch_count && !s->rematrix_custom) { char l1[1024], l2[1024]; av_get_channel_layout_string(l1, sizeof(l1), s-> in.ch_count, s-> in_ch_layout); av_get_channel_layout_string(l2, sizeof(l2), s->out.ch_count, s->out_ch_layout); av_log(s, AV_LOG_ERROR, \"Rematrix is needed between %s and %s \" \"but there is not enough information to do it\\n\", l1, l2); return -1; } av_assert0(s->used_ch_count); av_assert0(s->out.ch_count); s->resample_first= RSC*s->out.ch_count/s->in.ch_count - RSC < s->out_sample_rate/(float)s-> in_sample_rate - 1.0; s->in_buffer= s->in; s->silence = s->in; s->drop_temp= s->out; if(!s->resample && !s->rematrix && !s->channel_map && !s->dither.method){ s->full_convert = swri_audio_convert_alloc(s->out_sample_fmt, s-> in_sample_fmt, s-> in.ch_count, NULL, 0); return 0; } s->in_convert = swri_audio_convert_alloc(s->int_sample_fmt, s-> in_sample_fmt, s->used_ch_count, s->channel_map, 0); s->out_convert= swri_audio_convert_alloc(s->out_sample_fmt, s->int_sample_fmt, s->out.ch_count, NULL, 0); if (!s->in_convert || !s->out_convert) return AVERROR(ENOMEM); s->postin= s->in; s->preout= s->out; s->midbuf= s->in; if(s->channel_map){ s->postin.ch_count= s->midbuf.ch_count= s->used_ch_count; if(s->resample) s->in_buffer.ch_count= s->used_ch_count; } if(!s->resample_first){ s->midbuf.ch_count= s->out.ch_count; if(s->resample) s->in_buffer.ch_count = s->out.ch_count; } set_audiodata_fmt(&s->postin, s->int_sample_fmt); set_audiodata_fmt(&s->midbuf, s->int_sample_fmt); set_audiodata_fmt(&s->preout, s->int_sample_fmt); if(s->resample){ set_audiodata_fmt(&s->in_buffer, s->int_sample_fmt); } if ((ret = swri_dither_init(s, s->out_sample_fmt, s->int_sample_fmt)) < 0) return ret; if(s->rematrix || s->dither.method) return swri_rematrix_init(s); return 0; }"} {"target": 1, "idx": 23430, "func": "static void remove_port(VirtIOSerial *vser, uint32_t port_id) { VirtIOSerialPort *port; unsigned int i; i = port_id / 32; vser->ports_map[i] &= ~(1U << (port_id % 32)); port = find_port_by_id(vser, port_id); /* * This function is only called from qdev's unplug callback; if we * get a NULL port here, we're in trouble. */ assert(port); /* Flush out any unconsumed buffers first */ discard_vq_data(port->ovq, VIRTIO_DEVICE(port->vser)); send_control_event(vser, port->id, VIRTIO_CONSOLE_PORT_REMOVE, 1); }"} {"target": 1, "idx": 23431, "func": "void rgb16tobgr32(const uint8_t *src, uint8_t *dst, unsigned int src_size) { const uint16_t *end; uint8_t *d = (uint8_t *)dst; const uint16_t *s = (uint16_t *)src; end = s + src_size/2; while(s < end) { register uint16_t bgr; bgr = *s++; *d++ = (bgr&0xF800)>>8; *d++ = (bgr&0x7E0)>>3; *d++ = (bgr&0x1F)<<3; *d++ = 0; } }"} {"target": 1, "idx": 23432, "func": "static int decode_audio_block(AC3DecodeContext *s, int blk) { int fbw_channels = s->fbw_channels; int channel_mode = s->channel_mode; int i, bnd, seg, ch, ret; int different_transforms; int downmix_output; int cpl_in_use; GetBitContext *gbc = &s->gbc; uint8_t bit_alloc_stages[AC3_MAX_CHANNELS] = { 0 }; /* block switch flags */ different_transforms = 0; if (s->block_switch_syntax) { for (ch = 1; ch <= fbw_channels; ch++) { s->block_switch[ch] = get_bits1(gbc); if (ch > 1 && s->block_switch[ch] != s->block_switch[1]) different_transforms = 1; } } /* dithering flags */ if (s->dither_flag_syntax) { for (ch = 1; ch <= fbw_channels; ch++) { s->dither_flag[ch] = get_bits1(gbc); } } /* dynamic range */ i = !s->channel_mode; do { if (get_bits1(gbc)) { /* Allow asymmetric application of DRC when drc_scale > 1. Amplification of quiet sounds is enhanced */ int range_bits = get_bits(gbc, 8); INTFLOAT range = AC3_RANGE(range_bits); if (range_bits <= 127 || s->drc_scale <= 1.0) s->dynamic_range[i] = AC3_DYNAMIC_RANGE(range); else s->dynamic_range[i] = range; } else if (blk == 0) { s->dynamic_range[i] = AC3_DYNAMIC_RANGE1; } } while (i--); /* spectral extension strategy */ if (s->eac3 && (!blk || get_bits1(gbc))) { s->spx_in_use = get_bits1(gbc); if (s->spx_in_use) { if ((ret = spx_strategy(s, blk)) < 0) return ret; } } if (!s->eac3 || !s->spx_in_use) { s->spx_in_use = 0; for (ch = 1; ch <= fbw_channels; ch++) { s->channel_uses_spx[ch] = 0; s->first_spx_coords[ch] = 1; } } /* spectral extension coordinates */ if (s->spx_in_use) spx_coordinates(s); /* coupling strategy */ if (s->eac3 ? s->cpl_strategy_exists[blk] : get_bits1(gbc)) { if ((ret = coupling_strategy(s, blk, bit_alloc_stages)) < 0) return ret; } else if (!s->eac3) { if (!blk) { av_log(s->avctx, AV_LOG_ERROR, \"new coupling strategy must \" \"be present in block 0\\n\"); return AVERROR_INVALIDDATA; } else { s->cpl_in_use[blk] = s->cpl_in_use[blk-1]; } } cpl_in_use = s->cpl_in_use[blk]; /* coupling coordinates */ if (cpl_in_use) { if ((ret = coupling_coordinates(s, blk)) < 0) return ret; } /* stereo rematrixing strategy and band structure */ if (channel_mode == AC3_CHMODE_STEREO) { if ((s->eac3 && !blk) || get_bits1(gbc)) { s->num_rematrixing_bands = 4; if (cpl_in_use && s->start_freq[CPL_CH] <= 61) { s->num_rematrixing_bands -= 1 + (s->start_freq[CPL_CH] == 37); } else if (s->spx_in_use && s->spx_src_start_freq <= 61) { s->num_rematrixing_bands--; } for (bnd = 0; bnd < s->num_rematrixing_bands; bnd++) s->rematrixing_flags[bnd] = get_bits1(gbc); } else if (!blk) { av_log(s->avctx, AV_LOG_WARNING, \"Warning: \" \"new rematrixing strategy not present in block 0\\n\"); s->num_rematrixing_bands = 0; } } /* exponent strategies for each channel */ for (ch = !cpl_in_use; ch <= s->channels; ch++) { if (!s->eac3) s->exp_strategy[blk][ch] = get_bits(gbc, 2 - (ch == s->lfe_ch)); if (s->exp_strategy[blk][ch] != EXP_REUSE) bit_alloc_stages[ch] = 3; } /* channel bandwidth */ for (ch = 1; ch <= fbw_channels; ch++) { s->start_freq[ch] = 0; if (s->exp_strategy[blk][ch] != EXP_REUSE) { int group_size; int prev = s->end_freq[ch]; if (s->channel_in_cpl[ch]) s->end_freq[ch] = s->start_freq[CPL_CH]; else if (s->channel_uses_spx[ch]) s->end_freq[ch] = s->spx_src_start_freq; else { int bandwidth_code = get_bits(gbc, 6); if (bandwidth_code > 60) { av_log(s->avctx, AV_LOG_ERROR, \"bandwidth code = %d > 60\\n\", bandwidth_code); return AVERROR_INVALIDDATA; } s->end_freq[ch] = bandwidth_code * 3 + 73; } group_size = 3 << (s->exp_strategy[blk][ch] - 1); s->num_exp_groups[ch] = (s->end_freq[ch] + group_size-4) / group_size; if (blk > 0 && s->end_freq[ch] != prev) memset(bit_alloc_stages, 3, AC3_MAX_CHANNELS); } } if (cpl_in_use && s->exp_strategy[blk][CPL_CH] != EXP_REUSE) { s->num_exp_groups[CPL_CH] = (s->end_freq[CPL_CH] - s->start_freq[CPL_CH]) / (3 << (s->exp_strategy[blk][CPL_CH] - 1)); } /* decode exponents for each channel */ for (ch = !cpl_in_use; ch <= s->channels; ch++) { if (s->exp_strategy[blk][ch] != EXP_REUSE) { s->dexps[ch][0] = get_bits(gbc, 4) << !ch; if (decode_exponents(s, gbc, s->exp_strategy[blk][ch], s->num_exp_groups[ch], s->dexps[ch][0], &s->dexps[ch][s->start_freq[ch]+!!ch])) { return AVERROR_INVALIDDATA; } if (ch != CPL_CH && ch != s->lfe_ch) skip_bits(gbc, 2); /* skip gainrng */ } } /* bit allocation information */ if (s->bit_allocation_syntax) { if (get_bits1(gbc)) { s->bit_alloc_params.slow_decay = ff_ac3_slow_decay_tab[get_bits(gbc, 2)] >> s->bit_alloc_params.sr_shift; s->bit_alloc_params.fast_decay = ff_ac3_fast_decay_tab[get_bits(gbc, 2)] >> s->bit_alloc_params.sr_shift; s->bit_alloc_params.slow_gain = ff_ac3_slow_gain_tab[get_bits(gbc, 2)]; s->bit_alloc_params.db_per_bit = ff_ac3_db_per_bit_tab[get_bits(gbc, 2)]; s->bit_alloc_params.floor = ff_ac3_floor_tab[get_bits(gbc, 3)]; for (ch = !cpl_in_use; ch <= s->channels; ch++) bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2); } else if (!blk) { av_log(s->avctx, AV_LOG_ERROR, \"new bit allocation info must \" \"be present in block 0\\n\"); return AVERROR_INVALIDDATA; } } /* signal-to-noise ratio offsets and fast gains (signal-to-mask ratios) */ if (!s->eac3 || !blk) { if (s->snr_offset_strategy && get_bits1(gbc)) { int snr = 0; int csnr; csnr = (get_bits(gbc, 6) - 15) << 4; for (i = ch = !cpl_in_use; ch <= s->channels; ch++) { /* snr offset */ if (ch == i || s->snr_offset_strategy == 2) snr = (csnr + get_bits(gbc, 4)) << 2; /* run at least last bit allocation stage if snr offset changes */ if (blk && s->snr_offset[ch] != snr) { bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 1); } s->snr_offset[ch] = snr; /* fast gain (normal AC-3 only) */ if (!s->eac3) { int prev = s->fast_gain[ch]; s->fast_gain[ch] = ff_ac3_fast_gain_tab[get_bits(gbc, 3)]; /* run last 2 bit allocation stages if fast gain changes */ if (blk && prev != s->fast_gain[ch]) bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2); } } } else if (!s->eac3 && !blk) { av_log(s->avctx, AV_LOG_ERROR, \"new snr offsets must be present in block 0\\n\"); return AVERROR_INVALIDDATA; } } /* fast gain (E-AC-3 only) */ if (s->fast_gain_syntax && get_bits1(gbc)) { for (ch = !cpl_in_use; ch <= s->channels; ch++) { int prev = s->fast_gain[ch]; s->fast_gain[ch] = ff_ac3_fast_gain_tab[get_bits(gbc, 3)]; /* run last 2 bit allocation stages if fast gain changes */ if (blk && prev != s->fast_gain[ch]) bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2); } } else if (s->eac3 && !blk) { for (ch = !cpl_in_use; ch <= s->channels; ch++) s->fast_gain[ch] = ff_ac3_fast_gain_tab[4]; } /* E-AC-3 to AC-3 converter SNR offset */ if (s->frame_type == EAC3_FRAME_TYPE_INDEPENDENT && get_bits1(gbc)) { skip_bits(gbc, 10); // skip converter snr offset } /* coupling leak information */ if (cpl_in_use) { if (s->first_cpl_leak || get_bits1(gbc)) { int fl = get_bits(gbc, 3); int sl = get_bits(gbc, 3); /* run last 2 bit allocation stages for coupling channel if coupling leak changes */ if (blk && (fl != s->bit_alloc_params.cpl_fast_leak || sl != s->bit_alloc_params.cpl_slow_leak)) { bit_alloc_stages[CPL_CH] = FFMAX(bit_alloc_stages[CPL_CH], 2); } s->bit_alloc_params.cpl_fast_leak = fl; s->bit_alloc_params.cpl_slow_leak = sl; } else if (!s->eac3 && !blk) { av_log(s->avctx, AV_LOG_ERROR, \"new coupling leak info must \" \"be present in block 0\\n\"); return AVERROR_INVALIDDATA; } s->first_cpl_leak = 0; } /* delta bit allocation information */ if (s->dba_syntax && get_bits1(gbc)) { /* delta bit allocation exists (strategy) */ for (ch = !cpl_in_use; ch <= fbw_channels; ch++) { s->dba_mode[ch] = get_bits(gbc, 2); if (s->dba_mode[ch] == DBA_RESERVED) { av_log(s->avctx, AV_LOG_ERROR, \"delta bit allocation strategy reserved\\n\"); return AVERROR_INVALIDDATA; } bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2); } /* channel delta offset, len and bit allocation */ for (ch = !cpl_in_use; ch <= fbw_channels; ch++) { if (s->dba_mode[ch] == DBA_NEW) { s->dba_nsegs[ch] = get_bits(gbc, 3) + 1; for (seg = 0; seg < s->dba_nsegs[ch]; seg++) { s->dba_offsets[ch][seg] = get_bits(gbc, 5); s->dba_lengths[ch][seg] = get_bits(gbc, 4); s->dba_values[ch][seg] = get_bits(gbc, 3); } /* run last 2 bit allocation stages if new dba values */ bit_alloc_stages[ch] = FFMAX(bit_alloc_stages[ch], 2); } } } else if (blk == 0) { for (ch = 0; ch <= s->channels; ch++) { s->dba_mode[ch] = DBA_NONE; } } /* Bit allocation */ for (ch = !cpl_in_use; ch <= s->channels; ch++) { if (bit_alloc_stages[ch] > 2) { /* Exponent mapping into PSD and PSD integration */ ff_ac3_bit_alloc_calc_psd(s->dexps[ch], s->start_freq[ch], s->end_freq[ch], s->psd[ch], s->band_psd[ch]); } if (bit_alloc_stages[ch] > 1) { /* Compute excitation function, Compute masking curve, and Apply delta bit allocation */ if (ff_ac3_bit_alloc_calc_mask(&s->bit_alloc_params, s->band_psd[ch], s->start_freq[ch], s->end_freq[ch], s->fast_gain[ch], (ch == s->lfe_ch), s->dba_mode[ch], s->dba_nsegs[ch], s->dba_offsets[ch], s->dba_lengths[ch], s->dba_values[ch], s->mask[ch])) { av_log(s->avctx, AV_LOG_ERROR, \"error in bit allocation\\n\"); return AVERROR_INVALIDDATA; } } if (bit_alloc_stages[ch] > 0) { /* Compute bit allocation */ const uint8_t *bap_tab = s->channel_uses_aht[ch] ? ff_eac3_hebap_tab : ff_ac3_bap_tab; s->ac3dsp.bit_alloc_calc_bap(s->mask[ch], s->psd[ch], s->start_freq[ch], s->end_freq[ch], s->snr_offset[ch], s->bit_alloc_params.floor, bap_tab, s->bap[ch]); } } /* unused dummy data */ if (s->skip_syntax && get_bits1(gbc)) { int skipl = get_bits(gbc, 9); skip_bits_long(gbc, 8 * skipl); } /* unpack the transform coefficients this also uncouples channels if coupling is in use. */ decode_transform_coeffs(s, blk); /* TODO: generate enhanced coupling coordinates and uncouple */ /* recover coefficients if rematrixing is in use */ if (s->channel_mode == AC3_CHMODE_STEREO) do_rematrixing(s); /* apply scaling to coefficients (headroom, dynrng) */ for (ch = 1; ch <= s->channels; ch++) { int audio_channel = 0; INTFLOAT gain; if (s->channel_mode == AC3_CHMODE_DUALMONO) audio_channel = 2-ch; if (s->heavy_compression && s->compression_exists[audio_channel]) gain = s->heavy_dynamic_range[audio_channel]; else gain = s->dynamic_range[audio_channel]; #if USE_FIXED scale_coefs(s->transform_coeffs[ch], s->fixed_coeffs[ch], gain, 256); #else if (s->target_level != 0) gain = gain * s->level_gain[audio_channel]; gain *= 1.0 / 4194304.0f; s->fmt_conv.int32_to_float_fmul_scalar(s->transform_coeffs[ch], s->fixed_coeffs[ch], gain, 256); #endif } /* apply spectral extension to high frequency bins */ if (CONFIG_EAC3_DECODER && s->spx_in_use) { ff_eac3_apply_spectral_extension(s); } /* downmix and MDCT. order depends on whether block switching is used for any channel in this block. this is because coefficients for the long and short transforms cannot be mixed. */ downmix_output = s->channels != s->out_channels && !((s->output_mode & AC3_OUTPUT_LFEON) && s->fbw_channels == s->out_channels); if (different_transforms) { /* the delay samples have already been downmixed, so we upmix the delay samples in order to reconstruct all channels before downmixing. */ if (s->downmixed) { s->downmixed = 0; ac3_upmix_delay(s); } do_imdct(s, s->channels); if (downmix_output) { #if USE_FIXED ac3_downmix_c_fixed16(s->outptr, s->downmix_coeffs, s->out_channels, s->fbw_channels, 256); #else ff_ac3dsp_downmix(&s->ac3dsp, s->outptr, s->downmix_coeffs, s->out_channels, s->fbw_channels, 256); #endif } } else { if (downmix_output) { AC3_RENAME(ff_ac3dsp_downmix)(&s->ac3dsp, s->xcfptr + 1, s->downmix_coeffs, s->out_channels, s->fbw_channels, 256); } if (downmix_output && !s->downmixed) { s->downmixed = 1; AC3_RENAME(ff_ac3dsp_downmix)(&s->ac3dsp, s->dlyptr, s->downmix_coeffs, s->out_channels, s->fbw_channels, 128); } do_imdct(s, s->out_channels); } return 0; }"} {"target": 1, "idx": 23434, "func": "static void write_header(AVFormatContext *s) { double min_buffer_time = 1.0; avio_printf(s->pb, \"\\n\"); avio_printf(s->pb, \"pb, \" xmlns:xsi=\\\"http://www.w3.org/2001/XMLSchema-instance\\\"\\n\"); avio_printf(s->pb, \" xmlns=\\\"urn:mpeg:DASH:schema:MPD:2011\\\"\\n\"); avio_printf(s->pb, \" xsi:schemaLocation=\\\"urn:mpeg:DASH:schema:MPD:2011\\\"\\n\"); avio_printf(s->pb, \" type=\\\"static\\\"\\n\"); avio_printf(s->pb, \" mediaPresentationDuration=\\\"PT%gS\\\"\\n\", get_duration(s)); avio_printf(s->pb, \" minBufferTime=\\\"PT%gS\\\"\\n\", min_buffer_time); avio_printf(s->pb, \" profiles=\\\"urn:webm:dash:profile:webm-on-demand:2012\\\"\"); avio_printf(s->pb, \">\\n\"); }"} {"target": 1, "idx": 23440, "func": "static void opt_input_file(void *optctx, const char *arg) { if (input_filename) { fprintf(stderr, \"Argument '%s' provided as input filename, but '%s' was already specified.\\n\", arg, input_filename); exit(1); } if (!strcmp(arg, \"-\")) arg = \"pipe:\"; input_filename = arg; }"} {"target": 0, "idx": 23444, "func": "static void gen_rlwinm(DisasContext *ctx) { uint32_t mb, me, sh; sh = SH(ctx->opcode); mb = MB(ctx->opcode); me = ME(ctx->opcode); if (likely(mb == 0 && me == (31 - sh))) { if (likely(sh == 0)) { tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); } else { TCGv t0 = tcg_temp_new(); tcg_gen_ext32u_tl(t0, cpu_gpr[rS(ctx->opcode)]); tcg_gen_shli_tl(t0, t0, sh); tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], t0); tcg_temp_free(t0); } } else if (likely(sh != 0 && me == 31 && sh == (32 - mb))) { TCGv t0 = tcg_temp_new(); tcg_gen_ext32u_tl(t0, cpu_gpr[rS(ctx->opcode)]); tcg_gen_shri_tl(t0, t0, mb); tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], t0); tcg_temp_free(t0); } else if (likely(mb == 0 && me == 31)) { TCGv_i32 t0 = tcg_temp_new_i32(); tcg_gen_trunc_tl_i32(t0, cpu_gpr[rS(ctx->opcode)]); tcg_gen_rotli_i32(t0, t0, sh); tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t0); tcg_temp_free_i32(t0); } else { TCGv t0 = tcg_temp_new(); #if defined(TARGET_PPC64) tcg_gen_deposit_i64(t0, cpu_gpr[rS(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 32, 32); tcg_gen_rotli_i64(t0, t0, sh); #else tcg_gen_rotli_i32(t0, cpu_gpr[rS(ctx->opcode)], sh); #endif #if defined(TARGET_PPC64) mb += 32; me += 32; #endif tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], t0, MASK(mb, me)); tcg_temp_free(t0); } if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); }"} {"target": 0, "idx": 23471, "func": "static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output) { AVSubtitle subtitle; int i, ret = avcodec_decode_subtitle2(ist->dec_ctx, &subtitle, got_output, pkt); if (ret < 0) return ret; if (!*got_output) return ret; ist->frames_decoded++; for (i = 0; i < nb_output_streams; i++) { OutputStream *ost = output_streams[i]; if (!check_output_constraints(ist, ost) || !ost->encoding_needed) continue; do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle, pkt->pts); } avsubtitle_free(&subtitle); return ret; }"} {"target": 0, "idx": 23483, "func": "void bdrv_setup_io_funcs(BlockDriver *bdrv) { /* Block drivers without coroutine functions need emulation */ if (!bdrv->bdrv_co_readv) { bdrv->bdrv_co_readv = bdrv_co_readv_em; bdrv->bdrv_co_writev = bdrv_co_writev_em; /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if * the block driver lacks aio we need to emulate that too. */ if (!bdrv->bdrv_aio_readv) { /* add AIO emulation layer */ bdrv->bdrv_aio_readv = bdrv_aio_readv_em; bdrv->bdrv_aio_writev = bdrv_aio_writev_em; } } }"} {"target": 0, "idx": 23488, "func": "static void ff_h264_idct8_add4_mmx(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ int i; for(i=0; i<16; i+=4){ if(nnzc[ scan8[i] ]) ff_h264_idct8_add_mmx(dst + block_offset[i], block + i*16, stride); } }"} {"target": 0, "idx": 23509, "func": "static int mov_read_udta_string(MOVContext *c, AVIOContext *pb, MOVAtom atom) { char tmp_key[5]; char str[1024], key2[32], language[4] = {0}; const char *key = NULL; uint16_t langcode = 0; uint32_t data_type = 0, str_size; int (*parse)(MOVContext*, AVIOContext*, unsigned, const char*) = NULL; switch (atom.type) { case MKTAG(0xa9,'n','a','m'): key = \"title\"; break; case MKTAG(0xa9,'a','u','t'): case MKTAG(0xa9,'A','R','T'): key = \"artist\"; break; case MKTAG( 'a','A','R','T'): key = \"album_artist\"; break; case MKTAG(0xa9,'w','r','t'): key = \"composer\"; break; case MKTAG( 'c','p','r','t'): case MKTAG(0xa9,'c','p','y'): key = \"copyright\"; break; case MKTAG(0xa9,'c','m','t'): case MKTAG(0xa9,'i','n','f'): key = \"comment\"; break; case MKTAG(0xa9,'a','l','b'): key = \"album\"; break; case MKTAG(0xa9,'d','a','y'): key = \"date\"; break; case MKTAG(0xa9,'g','e','n'): key = \"genre\"; break; case MKTAG( 'g','n','r','e'): key = \"genre\"; parse = mov_metadata_gnre; break; case MKTAG(0xa9,'t','o','o'): case MKTAG(0xa9,'s','w','r'): key = \"encoder\"; break; case MKTAG(0xa9,'e','n','c'): key = \"encoder\"; break; case MKTAG(0xa9,'x','y','z'): key = \"location\"; break; case MKTAG( 'd','e','s','c'): key = \"description\";break; case MKTAG( 'l','d','e','s'): key = \"synopsis\"; break; case MKTAG( 't','v','s','h'): key = \"show\"; break; case MKTAG( 't','v','e','n'): key = \"episode_id\";break; case MKTAG( 't','v','n','n'): key = \"network\"; break; case MKTAG( 't','r','k','n'): key = \"track\"; parse = mov_metadata_track_or_disc_number; break; case MKTAG( 'd','i','s','k'): key = \"disc\"; parse = mov_metadata_track_or_disc_number; break; case MKTAG( 't','v','e','s'): key = \"episode_sort\"; parse = mov_metadata_int8_bypass_padding; break; case MKTAG( 't','v','s','n'): key = \"season_number\"; parse = mov_metadata_int8_bypass_padding; break; case MKTAG( 's','t','i','k'): key = \"media_type\"; parse = mov_metadata_int8_no_padding; break; case MKTAG( 'h','d','v','d'): key = \"hd_video\"; parse = mov_metadata_int8_no_padding; break; case MKTAG( 'p','g','a','p'): key = \"gapless_playback\"; parse = mov_metadata_int8_no_padding; break; case MKTAG( 'l','o','c','i'): return mov_metadata_loci(c, pb, atom.size); } if (c->itunes_metadata && atom.size > 8) { int data_size = avio_rb32(pb); int tag = avio_rl32(pb); if (tag == MKTAG('d','a','t','a')) { data_type = avio_rb32(pb); // type avio_rb32(pb); // unknown str_size = data_size - 16; atom.size -= 16; if (atom.type == MKTAG('c', 'o', 'v', 'r')) { int ret = mov_read_covr(c, pb, data_type, str_size); if (ret < 0) { av_log(c->fc, AV_LOG_ERROR, \"Error parsing cover art.\\n\"); return ret; } } } else return 0; } else if (atom.size > 4 && key && !c->itunes_metadata) { str_size = avio_rb16(pb); // string length langcode = avio_rb16(pb); ff_mov_lang_to_iso639(langcode, language); atom.size -= 4; } else str_size = atom.size; if (c->export_all && !key) { snprintf(tmp_key, 5, \"%.4s\", (char*)&atom.type); key = tmp_key; } if (!key) return 0; if (atom.size < 0) return AVERROR_INVALIDDATA; str_size = FFMIN3(sizeof(str)-1, str_size, atom.size); if (parse) parse(c, pb, str_size, key); else { if (data_type == 3 || (data_type == 0 && (langcode < 0x400 || langcode == 0x7fff))) { // MAC Encoded mov_read_mac_string(c, pb, str_size, str, sizeof(str)); } else { avio_read(pb, str, str_size); str[str_size] = 0; } c->fc->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED; av_dict_set(&c->fc->metadata, key, str, 0); if (*language && strcmp(language, \"und\")) { snprintf(key2, sizeof(key2), \"%s-%s\", key, language); av_dict_set(&c->fc->metadata, key2, str, 0); } } av_dlog(c->fc, \"lang \\\"%3s\\\" \", language); av_dlog(c->fc, \"tag \\\"%s\\\" value \\\"%s\\\" atom \\\"%.4s\\\" %d %\"PRId64\"\\n\", key, str, (char*)&atom.type, str_size, atom.size); return 0; }"} {"target": 0, "idx": 23511, "func": "size_t virtio_serial_guest_ready(VirtIOSerialPort *port) { VirtQueue *vq = port->ivq; unsigned int bytes; if (!virtio_queue_ready(vq) || !(port->vser->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK) || virtio_queue_empty(vq)) { return 0; } if (use_multiport(port->vser) && !port->guest_connected) { return 0; } virtqueue_get_avail_bytes(vq, &bytes, NULL); return bytes; }"} {"target": 0, "idx": 23520, "func": "DriveInfo *add_init_drive(const char *optstr) { DriveInfo *dinfo; QemuOpts *opts; opts = drive_def(optstr); if (!opts) return NULL; dinfo = drive_init(opts, current_machine->use_scsi); if (!dinfo) { qemu_opts_del(opts); return NULL; } return dinfo; }"} {"target": 0, "idx": 23523, "func": "static BusState *qbus_find_recursive(BusState *bus, const char *name, const char *bus_typename) { BusChild *kid; BusState *child, *ret; int match = 1; if (name && (strcmp(bus->name, name) != 0)) { match = 0; } if (bus_typename && (strcmp(object_get_typename(OBJECT(bus)), bus_typename) != 0)) { match = 0; } if (match) { return bus; } QTAILQ_FOREACH(kid, &bus->children, sibling) { DeviceState *dev = kid->child; QLIST_FOREACH(child, &dev->child_bus, sibling) { ret = qbus_find_recursive(child, name, bus_typename); if (ret) { return ret; } } } return NULL; }"} {"target": 0, "idx": 23545, "func": "void qemu_input_event_sync(void) { QemuInputHandlerState *s; if (!runstate_is_running() && !runstate_check(RUN_STATE_SUSPENDED)) { return; } trace_input_event_sync(); QTAILQ_FOREACH(s, &handlers, node) { if (!s->events) { continue; } if (s->handler->sync) { s->handler->sync(s->dev); } s->events = 0; } }"} {"target": 0, "idx": 23575, "func": "static int nvdec_h264_decode_init(AVCodecContext *avctx) { const H264Context *h = avctx->priv_data; const SPS *sps = h->ps.sps; return ff_nvdec_decode_init(avctx, sps->ref_frame_count + sps->num_reorder_frames); }"} {"target": 1, "idx": 23580, "func": "static void mxf_free_metadataset(MXFMetadataSet **ctx, int freectx) { MXFIndexTableSegment *seg; switch ((*ctx)->type) { case Descriptor: av_freep(&((MXFDescriptor *)*ctx)->extradata); break; case MultipleDescriptor: av_freep(&((MXFDescriptor *)*ctx)->sub_descriptors_refs); break; case Sequence: av_freep(&((MXFSequence *)*ctx)->structural_components_refs); break; case EssenceGroup: av_freep(&((MXFEssenceGroup *)*ctx)->structural_components_refs); break; case SourcePackage: case MaterialPackage: av_freep(&((MXFPackage *)*ctx)->tracks_refs); av_freep(&((MXFPackage *)*ctx)->name); break; case TaggedValue: av_freep(&((MXFTaggedValue *)*ctx)->name); av_freep(&((MXFTaggedValue *)*ctx)->value); break; case IndexTableSegment: seg = (MXFIndexTableSegment *)*ctx; av_freep(&seg->temporal_offset_entries); av_freep(&seg->flag_entries); av_freep(&seg->stream_offset_entries); default: break; } if (freectx) av_freep(ctx); }"} {"target": 0, "idx": 23589, "func": "static void test_visitor_in_native_list_int64(TestInputVisitorData *data, const void *unused) { test_native_list_integer_helper(data, unused, USER_DEF_NATIVE_LIST_UNION_KIND_S64); }"} {"target": 0, "idx": 23590, "func": "static void qint_destroy_obj(QObject *obj) { assert(obj != NULL); g_free(qobject_to_qint(obj)); }"} {"target": 0, "idx": 23594, "func": "int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb) { CPUS390XState *env = &cpu->env; int cssid, ssid, schid, m; SubchDev *sch; IRB irb; uint64_t addr; int cc, irb_len; uint8_t ar; if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { program_interrupt(env, PGM_OPERAND, 2); return -EIO; } trace_ioinst_sch_id(\"tsch\", cssid, ssid, schid); addr = decode_basedisp_s(env, ipb, &ar); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return -EIO; } sch = css_find_subch(m, cssid, ssid, schid); if (sch && css_subch_visible(sch)) { cc = css_do_tsch_get_irb(sch, &irb, &irb_len); } else { cc = 3; } /* 0 - status pending, 1 - not status pending, 3 - not operational */ if (cc != 3) { if (s390_cpu_virt_mem_write(cpu, addr, ar, &irb, irb_len) != 0) { return -EFAULT; } css_do_tsch_update_subch(sch); } else { irb_len = sizeof(irb) - sizeof(irb.emw); /* Access exceptions have a higher priority than cc3 */ if (s390_cpu_virt_mem_check_write(cpu, addr, ar, irb_len) != 0) { return -EFAULT; } } setcc(cpu, cc); return 0; }"} {"target": 0, "idx": 23613, "func": "static int lag_decode_arith_plane(LagarithContext *l, uint8_t *dst, int width, int height, int stride, const uint8_t *src, int src_size) { int i = 0; int read = 0; uint32_t length; uint32_t offset = 1; int esc_count; GetBitContext gb; lag_rac rac; const uint8_t *src_end = src + src_size; rac.avctx = l->avctx; l->zeros = 0; if(src_size < 2) return AVERROR_INVALIDDATA; esc_count = src[0]; if (esc_count < 4) { length = width * height; if(src_size < 5) return AVERROR_INVALIDDATA; if (esc_count && AV_RL32(src + 1) < length) { length = AV_RL32(src + 1); offset += 4; } init_get_bits8(&gb, src + offset, src_size - offset); if (lag_read_prob_header(&rac, &gb) < 0) return -1; ff_lag_rac_init(&rac, &gb, length - stride); for (i = 0; i < height; i++) read += lag_decode_line(l, &rac, dst + (i * stride), width, stride, esc_count); if (read > length) av_log(l->avctx, AV_LOG_WARNING, \"Output more bytes than length (%d of %d)\\n\", read, length); } else if (esc_count < 8) { esc_count -= 4; src ++; src_size --; if (esc_count > 0) { /* Zero run coding only, no range coding. */ for (i = 0; i < height; i++) { int res = lag_decode_zero_run_line(l, dst + (i * stride), src, src_end, width, esc_count); if (res < 0) return res; src += res; } } else { if (src_size < width * height) return AVERROR_INVALIDDATA; // buffer not big enough /* Plane is stored uncompressed */ for (i = 0; i < height; i++) { memcpy(dst + (i * stride), src, width); src += width; } } } else if (esc_count == 0xff) { /* Plane is a solid run of given value */ for (i = 0; i < height; i++) memset(dst + i * stride, src[1], width); /* Do not apply prediction. Note: memset to 0 above, setting first value to src[1] and applying prediction gives the same result. */ return 0; } else { av_log(l->avctx, AV_LOG_ERROR, \"Invalid zero run escape code! (%#x)\\n\", esc_count); return -1; } if (l->avctx->pix_fmt != AV_PIX_FMT_YUV422P) { for (i = 0; i < height; i++) { lag_pred_line(l, dst, width, stride, i); dst += stride; } } else { for (i = 0; i < height; i++) { lag_pred_line_yuy2(l, dst, width, stride, i, width == l->avctx->width); dst += stride; } } return 0; }"} {"target": 0, "idx": 23616, "func": "static ssize_t v9fs_synth_pwritev(FsContext *ctx, V9fsFidOpenState *fs, const struct iovec *iov, int iovcnt, off_t offset) { int i, count = 0, wcount; V9fsSynthOpenState *synth_open = fs->private; V9fsSynthNode *node = synth_open->node; if (!node->attr->write) { errno = EPERM; return -1; } for (i = 0; i < iovcnt; i++) { wcount = node->attr->write(iov[i].iov_base, iov[i].iov_len, offset, node->private); offset += wcount; count += wcount; /* If we wrote less than requested. we are done */ if (wcount < iov[i].iov_len) { break; } } return count; }"} {"target": 0, "idx": 23621, "func": "static void piix4_device_unplug_request_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { PIIX4PMState *s = PIIX4_PM(hotplug_dev); if (s->acpi_memory_hotplug.is_enabled && object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { acpi_memory_unplug_request_cb(hotplug_dev, &s->acpi_memory_hotplug, dev, errp); } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { acpi_pcihp_device_unplug_cb(hotplug_dev, &s->acpi_pci_hotplug, dev, errp); } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU) && !s->cpu_hotplug_legacy) { acpi_cpu_unplug_request_cb(hotplug_dev, &s->cpuhp_state, dev, errp); } else { error_setg(errp, \"acpi: device unplug request for not supported device\" \" type: %s\", object_get_typename(OBJECT(dev))); } }"} {"target": 0, "idx": 23653, "func": "static av_cold int smc_decode_init(AVCodecContext *avctx) { SmcContext *s = avctx->priv_data; s->avctx = avctx; avctx->pix_fmt = AV_PIX_FMT_PAL8; s->frame.data[0] = NULL; return 0; }"} {"target": 1, "idx": 23663, "func": "static void ohci_td_pkt(const char *msg, const uint8_t *buf, size_t len) { bool print16 = !!trace_event_get_state(TRACE_USB_OHCI_TD_PKT_SHORT); bool printall = !!trace_event_get_state(TRACE_USB_OHCI_TD_PKT_FULL); const int width = 16; int i; char tmp[3 * width + 1]; char *p = tmp; if (!printall && !print16) { return; } for (i = 0; ; i++) { if (i && (!(i % width) || (i == len))) { if (!printall) { trace_usb_ohci_td_pkt_short(msg, tmp); break; } trace_usb_ohci_td_pkt_full(msg, tmp); p = tmp; *p = 0; } if (i == len) { break; } p += sprintf(p, \" %.2x\", buf[i]); } }"} {"target": 1, "idx": 23665, "func": "void ff_set_fixed_vector(float *out, const AMRFixed *in, float scale, int size) { int i; for (i=0; i < in->n; i++) { int x = in->x[i], repeats = !((in->no_repeat_mask >> i) & 1); float y = in->y[i] * scale; do { out[x] += y; y *= in->pitch_fac; x += in->pitch_lag; } while (x < size && repeats); } }"} {"target": 0, "idx": 23682, "func": "int do_subchannel_work_virtual(SubchDev *sch) { SCSW *s = &sch->curr_status.scsw; if (s->ctrl & SCSW_FCTL_CLEAR_FUNC) { sch_handle_clear_func(sch); } else if (s->ctrl & SCSW_FCTL_HALT_FUNC) { sch_handle_halt_func(sch); } else if (s->ctrl & SCSW_FCTL_START_FUNC) { /* Triggered by both ssch and rsch. */ sch_handle_start_func_virtual(sch); } else { /* Cannot happen. */ return 0; } css_inject_io_interrupt(sch); return 0; }"} {"target": 0, "idx": 23686, "func": "static inline uint64_t ldq_phys_internal(target_phys_addr_t addr, enum device_endian endian) { uint8_t *ptr; uint64_t val; MemoryRegionSection *section; section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS); if (!(memory_region_is_ram(section->mr) || memory_region_is_romd(section->mr))) { /* I/O case */ addr = memory_region_section_addr(section, addr); /* XXX This is broken when device endian != cpu endian. Fix and add \"endian\" variable check */ #ifdef TARGET_WORDS_BIGENDIAN val = io_mem_read(section->mr, addr, 4) << 32; val |= io_mem_read(section->mr, addr + 4, 4); #else val = io_mem_read(section->mr, addr, 4); val |= io_mem_read(section->mr, addr + 4, 4) << 32; #endif } else { /* RAM case */ ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) + memory_region_section_addr(section, addr)); switch (endian) { case DEVICE_LITTLE_ENDIAN: val = ldq_le_p(ptr); break; case DEVICE_BIG_ENDIAN: val = ldq_be_p(ptr); break; default: val = ldq_p(ptr); break; } } return val; }"} {"target": 0, "idx": 23689, "func": "static int raw_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue, Error **errp) { assert(reopen_state != NULL); assert(reopen_state->bs != NULL); reopen_state->opaque = g_new0(BDRVRawState, 1); return raw_read_options( reopen_state->options, reopen_state->bs, reopen_state->opaque, errp); }"} {"target": 1, "idx": 23710, "func": "static void nfs_refresh_filename(BlockDriverState *bs, QDict *options) { NFSClient *client = bs->opaque; QDict *opts = qdict_new(); QObject *server_qdict; Visitor *ov; qdict_put(opts, \"driver\", qstring_from_str(\"nfs\")); if (client->uid && !client->gid) { snprintf(bs->exact_filename, sizeof(bs->exact_filename), \"nfs://%s%s?uid=%\" PRId64, client->server->host, client->path, client->uid); } else if (!client->uid && client->gid) { snprintf(bs->exact_filename, sizeof(bs->exact_filename), \"nfs://%s%s?gid=%\" PRId64, client->server->host, client->path, client->gid); } else if (client->uid && client->gid) { snprintf(bs->exact_filename, sizeof(bs->exact_filename), \"nfs://%s%s?uid=%\" PRId64 \"&gid=%\" PRId64, client->server->host, client->path, client->uid, client->gid); } else { snprintf(bs->exact_filename, sizeof(bs->exact_filename), \"nfs://%s%s\", client->server->host, client->path); } ov = qobject_output_visitor_new(&server_qdict); visit_type_NFSServer(ov, NULL, &client->server, &error_abort); visit_complete(ov, &server_qdict); assert(qobject_type(server_qdict) == QTYPE_QDICT); qdict_put_obj(opts, \"server\", server_qdict); qdict_put(opts, \"path\", qstring_from_str(client->path)); if (client->uid) { qdict_put(opts, \"uid\", qint_from_int(client->uid)); } if (client->gid) { qdict_put(opts, \"gid\", qint_from_int(client->gid)); } if (client->tcp_syncnt) { qdict_put(opts, \"tcp-syncnt\", qint_from_int(client->tcp_syncnt)); } if (client->readahead) { qdict_put(opts, \"readahead\", qint_from_int(client->readahead)); } if (client->pagecache) { qdict_put(opts, \"pagecache\", qint_from_int(client->pagecache)); } if (client->debug) { qdict_put(opts, \"debug\", qint_from_int(client->debug)); } visit_free(ov); qdict_flatten(opts); bs->full_open_options = opts; }"} {"target": 1, "idx": 23727, "func": "iscsi_readcapacity16_cb(struct iscsi_context *iscsi, int status, void *command_data, void *opaque) { struct IscsiTask *itask = opaque; struct scsi_readcapacity16 *rc16; struct scsi_task *task = command_data; if (status != 0) { error_report(\"iSCSI: Failed to read capacity of iSCSI lun. %s\", iscsi_get_error(iscsi)); itask->status = 1; itask->complete = 1; scsi_free_scsi_task(task); return; } rc16 = scsi_datain_unmarshall(task); if (rc16 == NULL) { error_report(\"iSCSI: Failed to unmarshall readcapacity16 data.\"); itask->status = 1; itask->complete = 1; scsi_free_scsi_task(task); return; } itask->iscsilun->block_size = rc16->block_length; itask->iscsilun->num_blocks = rc16->returned_lba + 1; itask->bs->total_sectors = itask->iscsilun->num_blocks * itask->iscsilun->block_size / BDRV_SECTOR_SIZE ; itask->status = 0; itask->complete = 1; scsi_free_scsi_task(task); }"} {"target": 0, "idx": 23731, "func": "static void fill_scaling_lists(const AVCodecContext *avctx, AVDXVAContext *ctx, const H264Context *h, DXVA_Qmatrix_H264 *qm) { unsigned i, j; memset(qm, 0, sizeof(*qm)); if (DXVA_CONTEXT_WORKAROUND(avctx, ctx) & FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG) { for (i = 0; i < 6; i++) for (j = 0; j < 16; j++) qm->bScalingLists4x4[i][j] = h->pps.scaling_matrix4[i][j]; for (i = 0; i < 64; i++) { qm->bScalingLists8x8[0][i] = h->pps.scaling_matrix8[0][i]; qm->bScalingLists8x8[1][i] = h->pps.scaling_matrix8[3][i]; } } else { for (i = 0; i < 6; i++) for (j = 0; j < 16; j++) qm->bScalingLists4x4[i][j] = h->pps.scaling_matrix4[i][ff_zigzag_scan[j]]; for (i = 0; i < 64; i++) { qm->bScalingLists8x8[0][i] = h->pps.scaling_matrix8[0][ff_zigzag_direct[i]]; qm->bScalingLists8x8[1][i] = h->pps.scaling_matrix8[3][ff_zigzag_direct[i]]; } } }"} {"target": 0, "idx": 23733, "func": "uint64_t helper_fnmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2, uint64_t arg3) { CPU_DoubleU farg1, farg2, farg3; farg1.ll = arg1; farg2.ll = arg2; farg3.ll = arg3; if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) { /* Multiplication of zero by infinity */ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ); } else { if (unlikely(float64_is_signaling_nan(farg1.d) || float64_is_signaling_nan(farg2.d) || float64_is_signaling_nan(farg3.d))) { /* sNaN operation */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); } /* This is the way the PowerPC specification defines it */ float128 ft0_128, ft1_128; ft0_128 = float64_to_float128(farg1.d, &env->fp_status); ft1_128 = float64_to_float128(farg2.d, &env->fp_status); ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status); if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) && float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) { /* Magnitude subtraction of infinities */ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI); } else { ft1_128 = float64_to_float128(farg3.d, &env->fp_status); ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status); farg1.d = float128_to_float64(ft0_128, &env->fp_status); } if (likely(!float64_is_any_nan(farg1.d))) { farg1.d = float64_chs(farg1.d); } } return farg1.ll; }"} {"target": 0, "idx": 23756, "func": "static int decode_tag(AVCodecContext * avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; NellyMoserDecodeContext *s = avctx->priv_data; int blocks, i; int16_t* samples; *data_size = 0; samples = (int16_t*)data; if (buf_size < avctx->block_align) return buf_size; if (buf_size % 64) { av_log(avctx, AV_LOG_ERROR, \"Tag size %d.\\n\", buf_size); return buf_size; } blocks = buf_size / 64; /* Normal numbers of blocks for sample rates: * 8000 Hz - 1 * 11025 Hz - 2 * 16000 Hz - 3 * 22050 Hz - 4 * 44100 Hz - 8 */ for (i=0 ; ifloat_buf); s->fmt_conv.float_to_int16(&samples[i*NELLY_SAMPLES], s->float_buf, NELLY_SAMPLES); *data_size += NELLY_SAMPLES*sizeof(int16_t); } return buf_size; }"} {"target": 1, "idx": 23761, "func": "static int megasas_map_dcmd(MegasasState *s, MegasasCmd *cmd) { dma_addr_t iov_pa, iov_size; cmd->flags = le16_to_cpu(cmd->frame->header.flags); if (!cmd->frame->header.sge_count) { trace_megasas_dcmd_zero_sge(cmd->index); cmd->iov_size = 0; return 0; } else if (cmd->frame->header.sge_count > 1) { trace_megasas_dcmd_invalid_sge(cmd->index, cmd->frame->header.sge_count); cmd->iov_size = 0; return -1; } iov_pa = megasas_sgl_get_addr(cmd, &cmd->frame->dcmd.sgl); iov_size = megasas_sgl_get_len(cmd, &cmd->frame->dcmd.sgl); pci_dma_sglist_init(&cmd->qsg, PCI_DEVICE(s), 1); qemu_sglist_add(&cmd->qsg, iov_pa, iov_size); cmd->iov_size = iov_size; return cmd->iov_size; }"} {"target": 1, "idx": 23772, "func": "static void cdrom_change_cb(void *opaque, int reason) { IDEState *s = opaque; uint64_t nb_sectors; if (!(reason & CHANGE_MEDIA)) { return; } bdrv_get_geometry(s->bs, &nb_sectors); s->nb_sectors = nb_sectors; s->sense_key = SENSE_UNIT_ATTENTION; s->asc = ASC_MEDIUM_MAY_HAVE_CHANGED; s->cdrom_changed = 1; s->events.new_media = true; ide_set_irq(s->bus); }"} {"target": 1, "idx": 23774, "func": "static int gdb_handle_packet(GDBState *s, const char *line_buf) { CPUArchState *env; const char *p; uint32_t thread; int ch, reg_size, type, res; char buf[MAX_PACKET_LENGTH]; uint8_t mem_buf[MAX_PACKET_LENGTH]; uint8_t *registers; target_ulong addr, len; #ifdef DEBUG_GDB printf(\"command='%s'\\n\", line_buf); #endif p = line_buf; ch = *p++; switch(ch) { case '?': /* TODO: Make this return the correct value for user-mode. */ snprintf(buf, sizeof(buf), \"T%02xthread:%02x;\", GDB_SIGNAL_TRAP, cpu_index(ENV_GET_CPU(s->c_cpu))); put_packet(s, buf); /* Remove all the breakpoints when this query is issued, * because gdb is doing and initial connect and the state * should be cleaned up. */ gdb_breakpoint_remove_all(); break; case 'c': if (*p != '\\0') { addr = strtoull(p, (char **)&p, 16); gdb_set_cpu_pc(s, addr); } s->signal = 0; gdb_continue(s); return RS_IDLE; case 'C': s->signal = gdb_signal_to_target (strtoul(p, (char **)&p, 16)); if (s->signal == -1) s->signal = 0; gdb_continue(s); return RS_IDLE; case 'v': if (strncmp(p, \"Cont\", 4) == 0) { int res_signal, res_thread; p += 4; if (*p == '?') { put_packet(s, \"vCont;c;C;s;S\"); break; } res = 0; res_signal = 0; res_thread = 0; while (*p) { int action, signal; if (*p++ != ';') { res = 0; break; } action = *p++; signal = 0; if (action == 'C' || action == 'S') { signal = strtoul(p, (char **)&p, 16); } else if (action != 'c' && action != 's') { res = 0; break; } thread = 0; if (*p == ':') { thread = strtoull(p+1, (char **)&p, 16); } action = tolower(action); if (res == 0 || (res == 'c' && action == 's')) { res = action; res_signal = signal; res_thread = thread; } } if (res) { if (res_thread != -1 && res_thread != 0) { env = find_cpu(res_thread); if (env == NULL) { put_packet(s, \"E22\"); break; } s->c_cpu = env; } if (res == 's') { cpu_single_step(s->c_cpu, sstep_flags); } s->signal = res_signal; gdb_continue(s); return RS_IDLE; } break; } else { goto unknown_command; } case 'k': #ifdef CONFIG_USER_ONLY /* Kill the target */ fprintf(stderr, \"\\nQEMU: Terminated via GDBstub\\n\"); exit(0); #endif case 'D': /* Detach packet */ gdb_breakpoint_remove_all(); gdb_syscall_mode = GDB_SYS_DISABLED; gdb_continue(s); put_packet(s, \"OK\"); break; case 's': if (*p != '\\0') { addr = strtoull(p, (char **)&p, 16); gdb_set_cpu_pc(s, addr); } cpu_single_step(s->c_cpu, sstep_flags); gdb_continue(s); return RS_IDLE; case 'F': { target_ulong ret; target_ulong err; ret = strtoull(p, (char **)&p, 16); if (*p == ',') { p++; err = strtoull(p, (char **)&p, 16); } else { err = 0; } if (*p == ',') p++; type = *p; if (s->current_syscall_cb) { s->current_syscall_cb(s->c_cpu, ret, err); s->current_syscall_cb = NULL; } if (type == 'C') { put_packet(s, \"T02\"); } else { gdb_continue(s); } } break; case 'g': cpu_synchronize_state(ENV_GET_CPU(s->g_cpu)); env = s->g_cpu; len = 0; for (addr = 0; addr < num_g_regs; addr++) { reg_size = gdb_read_register(s->g_cpu, mem_buf + len, addr); len += reg_size; } memtohex(buf, mem_buf, len); put_packet(s, buf); break; case 'G': cpu_synchronize_state(ENV_GET_CPU(s->g_cpu)); env = s->g_cpu; registers = mem_buf; len = strlen(p) / 2; hextomem((uint8_t *)registers, p, len); for (addr = 0; addr < num_g_regs && len > 0; addr++) { reg_size = gdb_write_register(s->g_cpu, registers, addr); len -= reg_size; registers += reg_size; } put_packet(s, \"OK\"); break; case 'm': addr = strtoull(p, (char **)&p, 16); if (*p == ',') p++; len = strtoull(p, NULL, 16); if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len, 0) != 0) { put_packet (s, \"E14\"); } else { memtohex(buf, mem_buf, len); put_packet(s, buf); } break; case 'M': addr = strtoull(p, (char **)&p, 16); if (*p == ',') p++; len = strtoull(p, (char **)&p, 16); if (*p == ':') p++; hextomem(mem_buf, p, len); if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len, 1) != 0) { put_packet(s, \"E14\"); } else { put_packet(s, \"OK\"); } break; case 'p': /* Older gdb are really dumb, and don't use 'g' if 'p' is avaialable. This works, but can be very slow. Anything new enough to understand XML also knows how to use this properly. */ if (!gdb_has_xml) goto unknown_command; addr = strtoull(p, (char **)&p, 16); reg_size = gdb_read_register(s->g_cpu, mem_buf, addr); if (reg_size) { memtohex(buf, mem_buf, reg_size); put_packet(s, buf); } else { put_packet(s, \"E14\"); } break; case 'P': if (!gdb_has_xml) goto unknown_command; addr = strtoull(p, (char **)&p, 16); if (*p == '=') p++; reg_size = strlen(p) / 2; hextomem(mem_buf, p, reg_size); gdb_write_register(s->g_cpu, mem_buf, addr); put_packet(s, \"OK\"); break; case 'Z': case 'z': type = strtoul(p, (char **)&p, 16); if (*p == ',') p++; addr = strtoull(p, (char **)&p, 16); if (*p == ',') p++; len = strtoull(p, (char **)&p, 16); if (ch == 'Z') res = gdb_breakpoint_insert(addr, len, type); else res = gdb_breakpoint_remove(addr, len, type); if (res >= 0) put_packet(s, \"OK\"); else if (res == -ENOSYS) put_packet(s, \"\"); else put_packet(s, \"E22\"); break; case 'H': type = *p++; thread = strtoull(p, (char **)&p, 16); if (thread == -1 || thread == 0) { put_packet(s, \"OK\"); break; } env = find_cpu(thread); if (env == NULL) { put_packet(s, \"E22\"); break; } switch (type) { case 'c': s->c_cpu = env; put_packet(s, \"OK\"); break; case 'g': s->g_cpu = env; put_packet(s, \"OK\"); break; default: put_packet(s, \"E22\"); break; } break; case 'T': thread = strtoull(p, (char **)&p, 16); env = find_cpu(thread); if (env != NULL) { put_packet(s, \"OK\"); } else { put_packet(s, \"E22\"); } break; case 'q': case 'Q': /* parse any 'q' packets here */ if (!strcmp(p,\"qemu.sstepbits\")) { /* Query Breakpoint bit definitions */ snprintf(buf, sizeof(buf), \"ENABLE=%x,NOIRQ=%x,NOTIMER=%x\", SSTEP_ENABLE, SSTEP_NOIRQ, SSTEP_NOTIMER); put_packet(s, buf); break; } else if (strncmp(p,\"qemu.sstep\",10) == 0) { /* Display or change the sstep_flags */ p += 10; if (*p != '=') { /* Display current setting */ snprintf(buf, sizeof(buf), \"0x%x\", sstep_flags); put_packet(s, buf); break; } p++; type = strtoul(p, (char **)&p, 16); sstep_flags = type; put_packet(s, \"OK\"); break; } else if (strcmp(p,\"C\") == 0) { /* \"Current thread\" remains vague in the spec, so always return * the first CPU (gdb returns the first thread). */ put_packet(s, \"QC1\"); break; } else if (strcmp(p,\"fThreadInfo\") == 0) { s->query_cpu = first_cpu->env_ptr; goto report_cpuinfo; } else if (strcmp(p,\"sThreadInfo\") == 0) { report_cpuinfo: if (s->query_cpu) { snprintf(buf, sizeof(buf), \"m%x\", cpu_index(ENV_GET_CPU(s->query_cpu))); put_packet(s, buf); s->query_cpu = ENV_GET_CPU(s->query_cpu)->next_cpu->env_ptr; } else put_packet(s, \"l\"); break; } else if (strncmp(p,\"ThreadExtraInfo,\", 16) == 0) { thread = strtoull(p+16, (char **)&p, 16); env = find_cpu(thread); if (env != NULL) { CPUState *cpu = ENV_GET_CPU(env); cpu_synchronize_state(cpu); len = snprintf((char *)mem_buf, sizeof(mem_buf), \"CPU#%d [%s]\", cpu->cpu_index, cpu->halted ? \"halted \" : \"running\"); memtohex(buf, mem_buf, len); put_packet(s, buf); } break; } #ifdef CONFIG_USER_ONLY else if (strncmp(p, \"Offsets\", 7) == 0) { TaskState *ts = s->c_cpu->opaque; snprintf(buf, sizeof(buf), \"Text=\" TARGET_ABI_FMT_lx \";Data=\" TARGET_ABI_FMT_lx \";Bss=\" TARGET_ABI_FMT_lx, ts->info->code_offset, ts->info->data_offset, ts->info->data_offset); put_packet(s, buf); break; } #else /* !CONFIG_USER_ONLY */ else if (strncmp(p, \"Rcmd,\", 5) == 0) { int len = strlen(p + 5); if ((len % 2) != 0) { put_packet(s, \"E01\"); break; } hextomem(mem_buf, p + 5, len); len = len / 2; mem_buf[len++] = 0; qemu_chr_be_write(s->mon_chr, mem_buf, len); put_packet(s, \"OK\"); break; } #endif /* !CONFIG_USER_ONLY */ if (strncmp(p, \"Supported\", 9) == 0) { snprintf(buf, sizeof(buf), \"PacketSize=%x\", MAX_PACKET_LENGTH); #ifdef GDB_CORE_XML pstrcat(buf, sizeof(buf), \";qXfer:features:read+\"); #endif put_packet(s, buf); break; } #ifdef GDB_CORE_XML if (strncmp(p, \"Xfer:features:read:\", 19) == 0) { const char *xml; target_ulong total_len; gdb_has_xml = 1; p += 19; xml = get_feature_xml(p, &p); if (!xml) { snprintf(buf, sizeof(buf), \"E00\"); put_packet(s, buf); break; } if (*p == ':') p++; addr = strtoul(p, (char **)&p, 16); if (*p == ',') p++; len = strtoul(p, (char **)&p, 16); total_len = strlen(xml); if (addr > total_len) { snprintf(buf, sizeof(buf), \"E00\"); put_packet(s, buf); break; } if (len > (MAX_PACKET_LENGTH - 5) / 2) len = (MAX_PACKET_LENGTH - 5) / 2; if (len < total_len - addr) { buf[0] = 'm'; len = memtox(buf + 1, xml + addr, len); } else { buf[0] = 'l'; len = memtox(buf + 1, xml + addr, total_len - addr); } put_packet_binary(s, buf, len + 1); break; } #endif /* Unrecognised 'q' command. */ goto unknown_command; default: unknown_command: /* put empty packet */ buf[0] = '\\0'; put_packet(s, buf); break; } return RS_IDLE; }"} {"target": 0, "idx": 23797, "func": "static int http_connect(URLContext *h, const char *path, const char *local_path, const char *hoststr, const char *auth, const char *proxyauth, int *new_location) { HTTPContext *s = h->priv_data; int post, err; char headers[HTTP_HEADERS_SIZE] = \"\"; char *authstr = NULL, *proxyauthstr = NULL; uint64_t off = s->off; int len = 0; const char *method; int send_expect_100 = 0; /* send http header */ post = h->flags & AVIO_FLAG_WRITE; if (s->post_data) { /* force POST method and disable chunked encoding when * custom HTTP post data is set */ post = 1; s->chunked_post = 0; } if (s->method) method = s->method; else method = post ? \"POST\" : \"GET\"; authstr = ff_http_auth_create_response(&s->auth_state, auth, local_path, method); proxyauthstr = ff_http_auth_create_response(&s->proxy_auth_state, proxyauth, local_path, method); if (post && !s->post_data) { send_expect_100 = s->send_expect_100; /* The user has supplied authentication but we don't know the auth type, * send Expect: 100-continue to get the 401 response including the * WWW-Authenticate header, or an 100 continue if no auth actually * is needed. */ if (auth && *auth && s->auth_state.auth_type == HTTP_AUTH_NONE && s->http_code != 401) send_expect_100 = 1; } #if FF_API_HTTP_USER_AGENT if (strcmp(s->user_agent_deprecated, DEFAULT_USER_AGENT)) { av_log(s, AV_LOG_WARNING, \"the user-agent option is deprecated, please use user_agent option\\n\"); s->user_agent = av_strdup(s->user_agent_deprecated); } #endif /* set default headers if needed */ if (!has_header(s->headers, \"\\r\\nUser-Agent: \")) len += av_strlcatf(headers + len, sizeof(headers) - len, \"User-Agent: %s\\r\\n\", s->user_agent); if (!has_header(s->headers, \"\\r\\nAccept: \")) len += av_strlcpy(headers + len, \"Accept: */*\\r\\n\", sizeof(headers) - len); // Note: we send this on purpose even when s->off is 0 when we're probing, // since it allows us to detect more reliably if a (non-conforming) // server supports seeking by analysing the reply headers. if (!has_header(s->headers, \"\\r\\nRange: \") && !post && (s->off > 0 || s->end_off || s->seekable == -1)) { len += av_strlcatf(headers + len, sizeof(headers) - len, \"Range: bytes=%\"PRIu64\"-\", s->off); if (s->end_off) len += av_strlcatf(headers + len, sizeof(headers) - len, \"%\"PRId64, s->end_off - 1); len += av_strlcpy(headers + len, \"\\r\\n\", sizeof(headers) - len); } if (send_expect_100 && !has_header(s->headers, \"\\r\\nExpect: \")) len += av_strlcatf(headers + len, sizeof(headers) - len, \"Expect: 100-continue\\r\\n\"); if (!has_header(s->headers, \"\\r\\nConnection: \")) { if (s->multiple_requests) len += av_strlcpy(headers + len, \"Connection: keep-alive\\r\\n\", sizeof(headers) - len); else len += av_strlcpy(headers + len, \"Connection: close\\r\\n\", sizeof(headers) - len); } if (!has_header(s->headers, \"\\r\\nHost: \")) len += av_strlcatf(headers + len, sizeof(headers) - len, \"Host: %s\\r\\n\", hoststr); if (!has_header(s->headers, \"\\r\\nContent-Length: \") && s->post_data) len += av_strlcatf(headers + len, sizeof(headers) - len, \"Content-Length: %d\\r\\n\", s->post_datalen); if (!has_header(s->headers, \"\\r\\nContent-Type: \") && s->content_type) len += av_strlcatf(headers + len, sizeof(headers) - len, \"Content-Type: %s\\r\\n\", s->content_type); if (!has_header(s->headers, \"\\r\\nCookie: \") && s->cookies) { char *cookies = NULL; if (!get_cookies(s, &cookies, path, hoststr) && cookies) { len += av_strlcatf(headers + len, sizeof(headers) - len, \"Cookie: %s\\r\\n\", cookies); av_free(cookies); } } if (!has_header(s->headers, \"\\r\\nIcy-MetaData: \") && s->icy) len += av_strlcatf(headers + len, sizeof(headers) - len, \"Icy-MetaData: %d\\r\\n\", 1); /* now add in custom headers */ if (s->headers) av_strlcpy(headers + len, s->headers, sizeof(headers) - len); snprintf(s->buffer, sizeof(s->buffer), \"%s %s HTTP/1.1\\r\\n\" \"%s\" \"%s\" \"%s\" \"%s%s\" \"\\r\\n\", method, path, post && s->chunked_post ? \"Transfer-Encoding: chunked\\r\\n\" : \"\", headers, authstr ? authstr : \"\", proxyauthstr ? \"Proxy-\" : \"\", proxyauthstr ? proxyauthstr : \"\"); av_log(h, AV_LOG_DEBUG, \"request: %s\\n\", s->buffer); if ((err = ffurl_write(s->hd, s->buffer, strlen(s->buffer))) < 0) goto done; if (s->post_data) if ((err = ffurl_write(s->hd, s->post_data, s->post_datalen)) < 0) goto done; /* init input buffer */ s->buf_ptr = s->buffer; s->buf_end = s->buffer; s->line_count = 0; s->off = 0; s->icy_data_read = 0; s->filesize = UINT64_MAX; s->willclose = 0; s->end_chunked_post = 0; s->end_header = 0; if (post && !s->post_data && !send_expect_100) { /* Pretend that it did work. We didn't read any header yet, since * we've still to send the POST data, but the code calling this * function will check http_code after we return. */ s->http_code = 200; err = 0; goto done; } /* wait for header */ err = http_read_header(h, new_location); if (err < 0) goto done; if (*new_location) s->off = off; err = (off == s->off) ? 0 : -1; done: av_freep(&authstr); av_freep(&proxyauthstr); return err; }"} {"target": 1, "idx": 23806, "func": "static int xen_host_pci_sysfs_path(const XenHostPCIDevice *d, const char *name, char *buf, ssize_t size) { int rc; rc = snprintf(buf, size, \"/sys/bus/pci/devices/%04x:%02x:%02x.%d/%s\", d->domain, d->bus, d->dev, d->func, name); if (rc >= size || rc < 0) { /* The output is truncated, or some other error was encountered */ return -ENODEV; } return 0; }"} {"target": 0, "idx": 23822, "func": "static void test_validate_fail_union_native_list(TestInputVisitorData *data, const void *unused) { UserDefNativeListUnion *tmp = NULL; Error *err = NULL; Visitor *v; v = validate_test_init(data, \"{ 'type': 'integer', 'data' : [ 'string' ] }\"); visit_type_UserDefNativeListUnion(v, NULL, &tmp, &err); error_free_or_abort(&err); g_assert(!tmp); }"} {"target": 1, "idx": 23840, "func": "static int colo_do_checkpoint_transaction(MigrationState *s, QIOChannelBuffer *bioc, QEMUFile *fb) { Error *local_err = NULL; int ret = -1; colo_send_message(s->to_dst_file, COLO_MESSAGE_CHECKPOINT_REQUEST, &local_err); if (local_err) { colo_receive_check_message(s->rp_state.from_dst_file, COLO_MESSAGE_CHECKPOINT_REPLY, &local_err); if (local_err) { /* Reset channel-buffer directly */ qio_channel_io_seek(QIO_CHANNEL(bioc), 0, 0, NULL); bioc->usage = 0; qemu_mutex_lock_iothread(); vm_stop_force_state(RUN_STATE_COLO); trace_colo_vm_state_change(\"run\", \"stop\"); /* Disable block migration */ s->params.blk = 0; s->params.shared = 0; qemu_savevm_state_header(fb); qemu_savevm_state_begin(fb, &s->params); qemu_mutex_lock_iothread(); qemu_savevm_state_complete_precopy(fb, false); qemu_fflush(fb); colo_send_message(s->to_dst_file, COLO_MESSAGE_VMSTATE_SEND, &local_err); if (local_err) { * We need the size of the VMstate data in Secondary side, * With which we can decide how much data should be read. colo_send_message_value(s->to_dst_file, COLO_MESSAGE_VMSTATE_SIZE, bioc->usage, &local_err); if (local_err) { qemu_put_buffer(s->to_dst_file, bioc->data, bioc->usage); qemu_fflush(s->to_dst_file); ret = qemu_file_get_error(s->to_dst_file); if (ret < 0) { colo_receive_check_message(s->rp_state.from_dst_file, COLO_MESSAGE_VMSTATE_RECEIVED, &local_err); if (local_err) { colo_receive_check_message(s->rp_state.from_dst_file, COLO_MESSAGE_VMSTATE_LOADED, &local_err); if (local_err) { ret = 0; qemu_mutex_lock_iothread(); vm_start(); trace_colo_vm_state_change(\"stop\", \"run\"); out: if (local_err) { error_report_err(local_err); return ret;"} {"target": 1, "idx": 23841, "func": "void qpci_msix_disable(QPCIDevice *dev) { uint8_t addr; uint16_t val; g_assert(dev->msix_enabled); addr = qpci_find_capability(dev, PCI_CAP_ID_MSIX); g_assert_cmphex(addr, !=, 0); val = qpci_config_readw(dev, addr + PCI_MSIX_FLAGS); qpci_config_writew(dev, addr + PCI_MSIX_FLAGS, val & ~PCI_MSIX_FLAGS_ENABLE); qpci_iounmap(dev, dev->msix_table); qpci_iounmap(dev, dev->msix_pba); dev->msix_enabled = 0; dev->msix_table = NULL; dev->msix_pba = NULL; }"} {"target": 0, "idx": 23847, "func": "static struct omap_rtc_s *omap_rtc_init(MemoryRegion *system_memory, target_phys_addr_t base, qemu_irq timerirq, qemu_irq alarmirq, omap_clk clk) { struct omap_rtc_s *s = (struct omap_rtc_s *) g_malloc0(sizeof(struct omap_rtc_s)); s->irq = timerirq; s->alarm = alarmirq; s->clk = qemu_new_timer_ms(rtc_clock, omap_rtc_tick, s); omap_rtc_reset(s); memory_region_init_io(&s->iomem, &omap_rtc_ops, s, \"omap-rtc\", 0x800); memory_region_add_subregion(system_memory, base, &s->iomem); return s; }"} {"target": 0, "idx": 23849, "func": "static ssize_t qio_channel_socket_writev(QIOChannel *ioc, const struct iovec *iov, size_t niov, int *fds, size_t nfds, Error **errp) { QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc); ssize_t ret; struct msghdr msg = { NULL, }; char control[CMSG_SPACE(sizeof(int) * SOCKET_MAX_FDS)] = { 0 }; size_t fdsize = sizeof(int) * nfds; struct cmsghdr *cmsg; msg.msg_iov = (struct iovec *)iov; msg.msg_iovlen = niov; if (nfds) { if (nfds > SOCKET_MAX_FDS) { error_setg_errno(errp, EINVAL, \"Only %d FDs can be sent, got %zu\", SOCKET_MAX_FDS, nfds); return -1; } msg.msg_control = control; msg.msg_controllen = CMSG_SPACE(sizeof(int) * nfds); cmsg = CMSG_FIRSTHDR(&msg); cmsg->cmsg_len = CMSG_LEN(fdsize); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_RIGHTS; memcpy(CMSG_DATA(cmsg), fds, fdsize); } retry: ret = sendmsg(sioc->fd, &msg, 0); if (ret <= 0) { if (socket_error() == EAGAIN || socket_error() == EWOULDBLOCK) { return QIO_CHANNEL_ERR_BLOCK; } if (socket_error() == EINTR) { goto retry; } error_setg_errno(errp, socket_error(), \"Unable to write to socket\"); return -1; } return ret; }"} {"target": 0, "idx": 23861, "func": "void *lsi_scsi_init(PCIBus *bus, int devfn) { LSIState *s; s = (LSIState *)pci_register_device(bus, \"LSI53C895A SCSI HBA\", sizeof(*s), devfn, NULL, NULL); if (s == NULL) { fprintf(stderr, \"lsi-scsi: Failed to register PCI device\\n\"); return NULL; } s->pci_dev.config[0x00] = 0x00; s->pci_dev.config[0x01] = 0x10; s->pci_dev.config[0x02] = 0x12; s->pci_dev.config[0x03] = 0x00; s->pci_dev.config[0x0b] = 0x01; s->pci_dev.config[0x3d] = 0x01; /* interrupt pin 1 */ s->mmio_io_addr = cpu_register_io_memory(0, lsi_mmio_readfn, lsi_mmio_writefn, s); s->ram_io_addr = cpu_register_io_memory(0, lsi_ram_readfn, lsi_ram_writefn, s); pci_register_io_region((struct PCIDevice *)s, 0, 256, PCI_ADDRESS_SPACE_IO, lsi_io_mapfunc); pci_register_io_region((struct PCIDevice *)s, 1, 0x400, PCI_ADDRESS_SPACE_MEM, lsi_mmio_mapfunc); pci_register_io_region((struct PCIDevice *)s, 2, 0x2000, PCI_ADDRESS_SPACE_MEM, lsi_ram_mapfunc); s->queue = qemu_malloc(sizeof(lsi_queue)); s->queue_len = 1; s->active_commands = 0; lsi_soft_reset(s); return s; }"} {"target": 0, "idx": 23871, "func": "int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl) { int mb_xy; int mb_type, partition_count, cbp = 0; int dct8x8_allowed= h->pps.transform_8x8_mode; int decode_chroma = h->sps.chroma_format_idc == 1 || h->sps.chroma_format_idc == 2; const int pixel_shift = h->pixel_shift; mb_xy = sl->mb_xy = sl->mb_x + sl->mb_y*h->mb_stride; ff_tlog(h->avctx, \"pic:%d mb:%d/%d\\n\", h->frame_num, sl->mb_x, sl->mb_y); if (sl->slice_type_nos != AV_PICTURE_TYPE_I) { int skip; /* a skipped mb needs the aff flag from the following mb */ if (FRAME_MBAFF(h) && (sl->mb_y & 1) == 1 && sl->prev_mb_skipped) skip = sl->next_mb_skipped; else skip = decode_cabac_mb_skip(h, sl, sl->mb_x, sl->mb_y ); /* read skip flags */ if( skip ) { if (FRAME_MBAFF(h) && (sl->mb_y & 1) == 0) { h->cur_pic.mb_type[mb_xy] = MB_TYPE_SKIP; sl->next_mb_skipped = decode_cabac_mb_skip(h, sl, sl->mb_x, sl->mb_y+1 ); if(!sl->next_mb_skipped) sl->mb_mbaff = sl->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h, sl); } decode_mb_skip(h, sl); h->cbp_table[mb_xy] = 0; h->chroma_pred_mode_table[mb_xy] = 0; sl->last_qscale_diff = 0; return 0; } } if (FRAME_MBAFF(h)) { if ((sl->mb_y & 1) == 0) sl->mb_mbaff = sl->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h, sl); } sl->prev_mb_skipped = 0; fill_decode_neighbors(h, sl, -(MB_FIELD(sl))); if (sl->slice_type_nos == AV_PICTURE_TYPE_B) { int ctx = 0; assert(sl->slice_type_nos == AV_PICTURE_TYPE_B); if (!IS_DIRECT(sl->left_type[LTOP] - 1)) ctx++; if (!IS_DIRECT(sl->top_type - 1)) ctx++; if( !get_cabac_noinline( &sl->cabac, &sl->cabac_state[27+ctx] ) ){ mb_type= 0; /* B_Direct_16x16 */ }else if( !get_cabac_noinline( &sl->cabac, &sl->cabac_state[27+3] ) ) { mb_type= 1 + get_cabac_noinline( &sl->cabac, &sl->cabac_state[27+5] ); /* B_L[01]_16x16 */ }else{ int bits; bits = get_cabac_noinline( &sl->cabac, &sl->cabac_state[27+4] ) << 3; bits+= get_cabac_noinline( &sl->cabac, &sl->cabac_state[27+5] ) << 2; bits+= get_cabac_noinline( &sl->cabac, &sl->cabac_state[27+5] ) << 1; bits+= get_cabac_noinline( &sl->cabac, &sl->cabac_state[27+5] ); if( bits < 8 ){ mb_type= bits + 3; /* B_Bi_16x16 through B_L1_L0_16x8 */ }else if( bits == 13 ){ mb_type = decode_cabac_intra_mb_type(sl, 32, 0); goto decode_intra_mb; }else if( bits == 14 ){ mb_type= 11; /* B_L1_L0_8x16 */ }else if( bits == 15 ){ mb_type= 22; /* B_8x8 */ }else{ bits= ( bits<<1 ) + get_cabac_noinline( &sl->cabac, &sl->cabac_state[27+5] ); mb_type= bits - 4; /* B_L0_Bi_* through B_Bi_Bi_* */ } } partition_count = ff_h264_b_mb_type_info[mb_type].partition_count; mb_type = ff_h264_b_mb_type_info[mb_type].type; } else if (sl->slice_type_nos == AV_PICTURE_TYPE_P) { if( get_cabac_noinline( &sl->cabac, &sl->cabac_state[14] ) == 0 ) { /* P-type */ if( get_cabac_noinline( &sl->cabac, &sl->cabac_state[15] ) == 0 ) { /* P_L0_D16x16, P_8x8 */ mb_type= 3 * get_cabac_noinline( &sl->cabac, &sl->cabac_state[16] ); } else { /* P_L0_D8x16, P_L0_D16x8 */ mb_type= 2 - get_cabac_noinline( &sl->cabac, &sl->cabac_state[17] ); } partition_count = ff_h264_p_mb_type_info[mb_type].partition_count; mb_type = ff_h264_p_mb_type_info[mb_type].type; } else { mb_type = decode_cabac_intra_mb_type(sl, 17, 0); goto decode_intra_mb; } } else { mb_type = decode_cabac_intra_mb_type(sl, 3, 1); if (sl->slice_type == AV_PICTURE_TYPE_SI && mb_type) mb_type--; assert(sl->slice_type_nos == AV_PICTURE_TYPE_I); decode_intra_mb: partition_count = 0; cbp = ff_h264_i_mb_type_info[mb_type].cbp; sl->intra16x16_pred_mode = ff_h264_i_mb_type_info[mb_type].pred_mode; mb_type = ff_h264_i_mb_type_info[mb_type].type; } if (MB_FIELD(sl)) mb_type |= MB_TYPE_INTERLACED; h->slice_table[mb_xy] = sl->slice_num; if(IS_INTRA_PCM(mb_type)) { const int mb_size = ff_h264_mb_sizes[h->sps.chroma_format_idc] * h->sps.bit_depth_luma >> 3; const uint8_t *ptr; // We assume these blocks are very rare so we do not optimize it. // FIXME The two following lines get the bitstream position in the cabac // decode, I think it should be done by a function in cabac.h (or cabac.c). ptr= sl->cabac.bytestream; if(sl->cabac.low&0x1) ptr--; if(CABAC_BITS==16){ if(sl->cabac.low&0x1FF) ptr--; } // The pixels are stored in the same order as levels in h->mb array. if ((int) (sl->cabac.bytestream_end - ptr) < mb_size) return -1; sl->intra_pcm_ptr = ptr; ptr += mb_size; ff_init_cabac_decoder(&sl->cabac, ptr, sl->cabac.bytestream_end - ptr); // All blocks are present h->cbp_table[mb_xy] = 0xf7ef; h->chroma_pred_mode_table[mb_xy] = 0; // In deblocking, the quantizer is 0 h->cur_pic.qscale_table[mb_xy] = 0; // All coeffs are present memset(h->non_zero_count[mb_xy], 16, 48); h->cur_pic.mb_type[mb_xy] = mb_type; sl->last_qscale_diff = 0; return 0; } fill_decode_caches(h, sl, mb_type); if( IS_INTRA( mb_type ) ) { int i, pred_mode; if( IS_INTRA4x4( mb_type ) ) { if (dct8x8_allowed && get_cabac_noinline(&sl->cabac, &sl->cabac_state[399 + sl->neighbor_transform_size])) { mb_type |= MB_TYPE_8x8DCT; for( i = 0; i < 16; i+=4 ) { int pred = pred_intra_mode(h, sl, i); int mode = decode_cabac_mb_intra4x4_pred_mode(sl, pred); fill_rectangle(&sl->intra4x4_pred_mode_cache[scan8[i]], 2, 2, 8, mode, 1); } } else { for( i = 0; i < 16; i++ ) { int pred = pred_intra_mode(h, sl, i); sl->intra4x4_pred_mode_cache[scan8[i]] = decode_cabac_mb_intra4x4_pred_mode(sl, pred); ff_dlog(h->avctx, \"i4x4 pred=%d mode=%d\\n\", pred, sl->intra4x4_pred_mode_cache[scan8[i]]); } } write_back_intra_pred_mode(h, sl); if (ff_h264_check_intra4x4_pred_mode(sl->intra4x4_pred_mode_cache, h->avctx, sl->top_samples_available, sl->left_samples_available) < 0 ) return -1; } else { sl->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h->avctx, sl->top_samples_available, sl->left_samples_available, sl->intra16x16_pred_mode, 0); if (sl->intra16x16_pred_mode < 0) return -1; } if(decode_chroma){ h->chroma_pred_mode_table[mb_xy] = pred_mode = decode_cabac_mb_chroma_pre_mode(h, sl); pred_mode= ff_h264_check_intra_pred_mode(h->avctx, sl->top_samples_available, sl->left_samples_available, pred_mode, 1 ); if( pred_mode < 0 ) return -1; sl->chroma_pred_mode = pred_mode; } else { sl->chroma_pred_mode = DC_128_PRED8x8; } } else if( partition_count == 4 ) { int i, j, sub_partition_count[4], list, ref[2][4]; if (sl->slice_type_nos == AV_PICTURE_TYPE_B ) { for( i = 0; i < 4; i++ ) { sl->sub_mb_type[i] = decode_cabac_b_mb_sub_type(sl); sub_partition_count[i] = ff_h264_b_sub_mb_type_info[sl->sub_mb_type[i]].partition_count; sl->sub_mb_type[i] = ff_h264_b_sub_mb_type_info[sl->sub_mb_type[i]].type; } if (IS_DIRECT(sl->sub_mb_type[0] | sl->sub_mb_type[1] | sl->sub_mb_type[2] | sl->sub_mb_type[3])) { ff_h264_pred_direct_motion(h, sl, &mb_type); sl->ref_cache[0][scan8[4]] = sl->ref_cache[1][scan8[4]] = sl->ref_cache[0][scan8[12]] = sl->ref_cache[1][scan8[12]] = PART_NOT_AVAILABLE; for( i = 0; i < 4; i++ ) fill_rectangle(&sl->direct_cache[scan8[4*i]], 2, 2, 8, (sl->sub_mb_type[i] >> 1) & 0xFF, 1); } } else { for( i = 0; i < 4; i++ ) { sl->sub_mb_type[i] = decode_cabac_p_mb_sub_type(sl); sub_partition_count[i] = ff_h264_p_sub_mb_type_info[sl->sub_mb_type[i]].partition_count; sl->sub_mb_type[i] = ff_h264_p_sub_mb_type_info[sl->sub_mb_type[i]].type; } } for( list = 0; list < sl->list_count; list++ ) { for( i = 0; i < 4; i++ ) { if(IS_DIRECT(sl->sub_mb_type[i])) continue; if(IS_DIR(sl->sub_mb_type[i], 0, list)){ int rc = sl->ref_count[list] << MB_MBAFF(sl); if (rc > 1) { ref[list][i] = decode_cabac_mb_ref(sl, list, 4 * i); if (ref[list][i] >= (unsigned) rc) { av_log(h->avctx, AV_LOG_ERROR, \"Reference %d >= %d\\n\", ref[list][i], rc); return -1; } }else ref[list][i] = 0; } else { ref[list][i] = -1; } sl->ref_cache[list][scan8[4 * i] + 1] = sl->ref_cache[list][scan8[4 * i] + 8] = sl->ref_cache[list][scan8[4 * i] + 9] = ref[list][i]; } } if(dct8x8_allowed) dct8x8_allowed = get_dct8x8_allowed(h, sl); for (list = 0; list < sl->list_count; list++) { for(i=0; i<4; i++){ sl->ref_cache[list][scan8[4 * i]] = sl->ref_cache[list][scan8[4 * i] + 1]; if(IS_DIRECT(sl->sub_mb_type[i])){ fill_rectangle(sl->mvd_cache[list][scan8[4*i]], 2, 2, 8, 0, 2); continue; } if(IS_DIR(sl->sub_mb_type[i], 0, list) && !IS_DIRECT(sl->sub_mb_type[i])){ const int sub_mb_type= sl->sub_mb_type[i]; const int block_width= (sub_mb_type & (MB_TYPE_16x16|MB_TYPE_16x8)) ? 2 : 1; for(j=0; jmv_cache[list][ scan8[index] ]; uint8_t (* mvd_cache)[2]= &sl->mvd_cache[list][ scan8[index] ]; pred_motion(h, sl, index, block_width, list, sl->ref_cache[list][ scan8[index] ], &mx, &my); DECODE_CABAC_MB_MVD(sl, list, index) ff_tlog(h->avctx, \"final mv:%d %d\\n\", mx, my); if(IS_SUB_8X8(sub_mb_type)){ mv_cache[ 1 ][0]= mv_cache[ 8 ][0]= mv_cache[ 9 ][0]= mx; mv_cache[ 1 ][1]= mv_cache[ 8 ][1]= mv_cache[ 9 ][1]= my; mvd_cache[ 1 ][0]= mvd_cache[ 8 ][0]= mvd_cache[ 9 ][0]= mpx; mvd_cache[ 1 ][1]= mvd_cache[ 8 ][1]= mvd_cache[ 9 ][1]= mpy; }else if(IS_SUB_8X4(sub_mb_type)){ mv_cache[ 1 ][0]= mx; mv_cache[ 1 ][1]= my; mvd_cache[ 1 ][0]= mpx; mvd_cache[ 1 ][1]= mpy; }else if(IS_SUB_4X8(sub_mb_type)){ mv_cache[ 8 ][0]= mx; mv_cache[ 8 ][1]= my; mvd_cache[ 8 ][0]= mpx; mvd_cache[ 8 ][1]= mpy; } mv_cache[ 0 ][0]= mx; mv_cache[ 0 ][1]= my; mvd_cache[ 0 ][0]= mpx; mvd_cache[ 0 ][1]= mpy; } }else{ fill_rectangle(sl->mv_cache [list][ scan8[4*i] ], 2, 2, 8, 0, 4); fill_rectangle(sl->mvd_cache[list][ scan8[4*i] ], 2, 2, 8, 0, 2); } } } } else if( IS_DIRECT(mb_type) ) { ff_h264_pred_direct_motion(h, sl, &mb_type); fill_rectangle(sl->mvd_cache[0][scan8[0]], 4, 4, 8, 0, 2); fill_rectangle(sl->mvd_cache[1][scan8[0]], 4, 4, 8, 0, 2); dct8x8_allowed &= h->sps.direct_8x8_inference_flag; } else { int list, i; if(IS_16X16(mb_type)){ for (list = 0; list < sl->list_count; list++) { if(IS_DIR(mb_type, 0, list)){ int ref, rc = sl->ref_count[list] << MB_MBAFF(sl); if (rc > 1) { ref= decode_cabac_mb_ref(sl, list, 0); if (ref >= (unsigned) rc) { av_log(h->avctx, AV_LOG_ERROR, \"Reference %d >= %d\\n\", ref, rc); return -1; } }else ref=0; fill_rectangle(&sl->ref_cache[list][ scan8[0] ], 4, 4, 8, ref, 1); } } for (list = 0; list < sl->list_count; list++) { if(IS_DIR(mb_type, 0, list)){ int mx,my,mpx,mpy; pred_motion(h, sl, 0, 4, list, sl->ref_cache[list][ scan8[0] ], &mx, &my); DECODE_CABAC_MB_MVD(sl, list, 0) ff_tlog(h->avctx, \"final mv:%d %d\\n\", mx, my); fill_rectangle(sl->mvd_cache[list][ scan8[0] ], 4, 4, 8, pack8to16(mpx,mpy), 2); fill_rectangle(sl->mv_cache[list][ scan8[0] ], 4, 4, 8, pack16to32(mx,my), 4); } } } else if(IS_16X8(mb_type)){ for (list = 0; list < sl->list_count; list++) { for(i=0; i<2; i++){ if(IS_DIR(mb_type, i, list)){ int ref, rc = sl->ref_count[list] << MB_MBAFF(sl); if (rc > 1) { ref= decode_cabac_mb_ref(sl, list, 8 * i); if (ref >= (unsigned) rc) { av_log(h->avctx, AV_LOG_ERROR, \"Reference %d >= %d\\n\", ref, rc); return -1; } }else ref=0; fill_rectangle(&sl->ref_cache[list][ scan8[0] + 16*i ], 4, 2, 8, ref, 1); }else fill_rectangle(&sl->ref_cache[list][ scan8[0] + 16*i ], 4, 2, 8, (LIST_NOT_USED&0xFF), 1); } } for (list = 0; list < sl->list_count; list++) { for(i=0; i<2; i++){ if(IS_DIR(mb_type, i, list)){ int mx,my,mpx,mpy; pred_16x8_motion(h, sl, 8*i, list, sl->ref_cache[list][scan8[0] + 16*i], &mx, &my); DECODE_CABAC_MB_MVD(sl, list, 8*i) ff_tlog(h->avctx, \"final mv:%d %d\\n\", mx, my); fill_rectangle(sl->mvd_cache[list][ scan8[0] + 16*i ], 4, 2, 8, pack8to16(mpx,mpy), 2); fill_rectangle(sl->mv_cache[list][ scan8[0] + 16*i ], 4, 2, 8, pack16to32(mx,my), 4); }else{ fill_rectangle(sl->mvd_cache[list][ scan8[0] + 16*i ], 4, 2, 8, 0, 2); fill_rectangle(sl->mv_cache[list][ scan8[0] + 16*i ], 4, 2, 8, 0, 4); } } } }else{ assert(IS_8X16(mb_type)); for (list = 0; list < sl->list_count; list++) { for(i=0; i<2; i++){ if(IS_DIR(mb_type, i, list)){ //FIXME optimize int ref, rc = sl->ref_count[list] << MB_MBAFF(sl); if (rc > 1) { ref = decode_cabac_mb_ref(sl, list, 4 * i); if (ref >= (unsigned) rc) { av_log(h->avctx, AV_LOG_ERROR, \"Reference %d >= %d\\n\", ref, rc); return -1; } }else ref=0; fill_rectangle(&sl->ref_cache[list][ scan8[0] + 2*i ], 2, 4, 8, ref, 1); }else fill_rectangle(&sl->ref_cache[list][ scan8[0] + 2*i ], 2, 4, 8, (LIST_NOT_USED&0xFF), 1); } } for (list = 0; list < sl->list_count; list++) { for(i=0; i<2; i++){ if(IS_DIR(mb_type, i, list)){ int mx,my,mpx,mpy; pred_8x16_motion(h, sl, i*4, list, sl->ref_cache[list][ scan8[0] + 2*i ], &mx, &my); DECODE_CABAC_MB_MVD(sl, list, 4*i) ff_tlog(h->avctx, \"final mv:%d %d\\n\", mx, my); fill_rectangle(sl->mvd_cache[list][ scan8[0] + 2*i ], 2, 4, 8, pack8to16(mpx,mpy), 2); fill_rectangle(sl->mv_cache[list][ scan8[0] + 2*i ], 2, 4, 8, pack16to32(mx,my), 4); }else{ fill_rectangle(sl->mvd_cache[list][ scan8[0] + 2*i ], 2, 4, 8, 0, 2); fill_rectangle(sl->mv_cache[list][ scan8[0] + 2*i ], 2, 4, 8, 0, 4); } } } } } if( IS_INTER( mb_type ) ) { h->chroma_pred_mode_table[mb_xy] = 0; write_back_motion(h, sl, mb_type); } if( !IS_INTRA16x16( mb_type ) ) { cbp = decode_cabac_mb_cbp_luma(sl); if(decode_chroma) cbp |= decode_cabac_mb_cbp_chroma(sl) << 4; } h->cbp_table[mb_xy] = sl->cbp = cbp; if( dct8x8_allowed && (cbp&15) && !IS_INTRA( mb_type ) ) { mb_type |= MB_TYPE_8x8DCT * get_cabac_noinline(&sl->cabac, &sl->cabac_state[399 + sl->neighbor_transform_size]); } /* It would be better to do this in fill_decode_caches, but we don't know * the transform mode of the current macroblock there. */ if (CHROMA444(h) && IS_8x8DCT(mb_type)){ int i; uint8_t *nnz_cache = sl->non_zero_count_cache; for (i = 0; i < 2; i++){ if (sl->left_type[LEFT(i)] && !IS_8x8DCT(sl->left_type[LEFT(i)])) { nnz_cache[3+8* 1 + 2*8*i]= nnz_cache[3+8* 2 + 2*8*i]= nnz_cache[3+8* 6 + 2*8*i]= nnz_cache[3+8* 7 + 2*8*i]= nnz_cache[3+8*11 + 2*8*i]= nnz_cache[3+8*12 + 2*8*i]= IS_INTRA(mb_type) ? 64 : 0; } } if (sl->top_type && !IS_8x8DCT(sl->top_type)){ uint32_t top_empty = CABAC(h) && !IS_INTRA(mb_type) ? 0 : 0x40404040; AV_WN32A(&nnz_cache[4+8* 0], top_empty); AV_WN32A(&nnz_cache[4+8* 5], top_empty); AV_WN32A(&nnz_cache[4+8*10], top_empty); } } h->cur_pic.mb_type[mb_xy] = mb_type; if( cbp || IS_INTRA16x16( mb_type ) ) { const uint8_t *scan, *scan8x8; const uint32_t *qmul; if(IS_INTERLACED(mb_type)){ scan8x8 = sl->qscale ? h->field_scan8x8 : h->field_scan8x8_q0; scan = sl->qscale ? h->field_scan : h->field_scan_q0; }else{ scan8x8 = sl->qscale ? h->zigzag_scan8x8 : h->zigzag_scan8x8_q0; scan = sl->qscale ? h->zigzag_scan : h->zigzag_scan_q0; } // decode_cabac_mb_dqp if(get_cabac_noinline( &sl->cabac, &sl->cabac_state[60 + (sl->last_qscale_diff != 0)])){ int val = 1; int ctx= 2; const int max_qp = 51 + 6*(h->sps.bit_depth_luma-8); while( get_cabac_noinline( &sl->cabac, &sl->cabac_state[60 + ctx] ) ) { ctx= 3; val++; if(val > 2*max_qp){ //prevent infinite loop av_log(h->avctx, AV_LOG_ERROR, \"cabac decode of qscale diff failed at %d %d\\n\", sl->mb_x, sl->mb_y); return -1; } } if( val&0x01 ) val= (val + 1)>>1 ; else val= -((val + 1)>>1); sl->last_qscale_diff = val; sl->qscale += val; if (((unsigned)sl->qscale) > max_qp){ if (sl->qscale < 0) sl->qscale += max_qp + 1; else sl->qscale -= max_qp + 1; } sl->chroma_qp[0] = get_chroma_qp(h, 0, sl->qscale); sl->chroma_qp[1] = get_chroma_qp(h, 1, sl->qscale); }else sl->last_qscale_diff=0; decode_cabac_luma_residual(h, sl, scan, scan8x8, pixel_shift, mb_type, cbp, 0); if (CHROMA444(h)) { decode_cabac_luma_residual(h, sl, scan, scan8x8, pixel_shift, mb_type, cbp, 1); decode_cabac_luma_residual(h, sl, scan, scan8x8, pixel_shift, mb_type, cbp, 2); } else if (CHROMA422(h)) { if( cbp&0x30 ){ int c; for (c = 0; c < 2; c++) decode_cabac_residual_dc_422(h, sl, sl->mb + ((256 + 16*16*c) << pixel_shift), 3, CHROMA_DC_BLOCK_INDEX + c, ff_h264_chroma422_dc_scan, 8); } if( cbp&0x20 ) { int c, i, i8x8; for( c = 0; c < 2; c++ ) { int16_t *mb = sl->mb + (16*(16 + 16*c) << pixel_shift); qmul = h->dequant4_coeff[c+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[c]]; for (i8x8 = 0; i8x8 < 2; i8x8++) { for (i = 0; i < 4; i++) { const int index = 16 + 16 * c + 8*i8x8 + i; decode_cabac_residual_nondc(h, sl, mb, 4, index, scan + 1, qmul, 15); mb += 16<non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1); fill_rectangle(&sl->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1); } } else /* yuv420 */ { if( cbp&0x30 ){ int c; for (c = 0; c < 2; c++) decode_cabac_residual_dc(h, sl, sl->mb + ((256 + 16 * 16 * c) << pixel_shift), 3, CHROMA_DC_BLOCK_INDEX + c, ff_h264_chroma_dc_scan, 4); } if( cbp&0x20 ) { int c, i; for( c = 0; c < 2; c++ ) { qmul = h->dequant4_coeff[c+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[c]]; for( i = 0; i < 4; i++ ) { const int index = 16 + 16 * c + i; decode_cabac_residual_nondc(h, sl, sl->mb + (16*index << pixel_shift), 4, index, scan + 1, qmul, 15); } } } else { fill_rectangle(&sl->non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1); fill_rectangle(&sl->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1); } } } else { fill_rectangle(&sl->non_zero_count_cache[scan8[ 0]], 4, 4, 8, 0, 1); fill_rectangle(&sl->non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1); fill_rectangle(&sl->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1); sl->last_qscale_diff = 0; } h->cur_pic.qscale_table[mb_xy] = sl->qscale; write_back_non_zero_count(h, sl); return 0; }"} {"target": 1, "idx": 23894, "func": "static void vnc_refresh(DisplayChangeListener *dcl) { VncDisplay *vd = container_of(dcl, VncDisplay, dcl); VncState *vs, *vn; int has_dirty, rects = 0; graphic_hw_update(NULL); if (vnc_trylock_display(vd)) { update_displaychangelistener(&vd->dcl, VNC_REFRESH_INTERVAL_BASE); return; } has_dirty = vnc_refresh_server_surface(vd); vnc_unlock_display(vd); QTAILQ_FOREACH_SAFE(vs, &vd->clients, next, vn) { rects += vnc_update_client(vs, has_dirty); /* vs might be free()ed here */ } if (QTAILQ_EMPTY(&vd->clients)) { update_displaychangelistener(&vd->dcl, VNC_REFRESH_INTERVAL_MAX); return; } if (has_dirty && rects) { vd->dcl.update_interval /= 2; if (vd->dcl.update_interval < VNC_REFRESH_INTERVAL_BASE) { vd->dcl.update_interval = VNC_REFRESH_INTERVAL_BASE; } } else { vd->dcl.update_interval += VNC_REFRESH_INTERVAL_INC; if (vd->dcl.update_interval > VNC_REFRESH_INTERVAL_MAX) { vd->dcl.update_interval = VNC_REFRESH_INTERVAL_MAX; } } }"} {"target": 1, "idx": 23903, "func": "static int int_pow(int i, int *exp_ptr) { int e, er, eq, j; int a, a1; /* renormalize */ a = i; e = POW_FRAC_BITS; while (a < (1 << (POW_FRAC_BITS - 1))) { a = a << 1; e--; } a -= (1 << POW_FRAC_BITS); a1 = 0; for(j = DEV_ORDER - 1; j >= 0; j--) a1 = POW_MULL(a, dev_4_3_coefs[j] + a1); a = (1 << POW_FRAC_BITS) + a1; /* exponent compute (exact) */ e = e * 4; er = e % 3; eq = e / 3; a = POW_MULL(a, pow_mult3[er]); while (a >= 2 * POW_FRAC_ONE) { a = a >> 1; eq++; } /* convert to float */ while (a < POW_FRAC_ONE) { a = a << 1; eq--; } /* now POW_FRAC_ONE <= a < 2 * POW_FRAC_ONE */ #if (POW_FRAC_BITS - 1) > FRAC_BITS a = (a + (1 << (POW_FRAC_BITS - FRAC_BITS - 1))) >> (POW_FRAC_BITS - FRAC_BITS); /* correct overflow */ if (a >= 2 * (1 << FRAC_BITS)) { a = a >> 1; eq++; } #endif *exp_ptr = eq; return a; }"} {"target": 1, "idx": 23911, "func": "int monitor_set_cpu(int cpu_index) { CPUState *cpu; cpu = qemu_get_cpu(cpu_index); if (cpu == NULL) { return -1; } cur_mon->mon_cpu = cpu; return 0; }"} {"target": 0, "idx": 23929, "func": "unsigned ff_dxva2_get_surface_index(const AVCodecContext *avctx, const AVDXVAContext *ctx, const AVFrame *frame) { void *surface = get_surface(frame); unsigned i; for (i = 0; i < DXVA_CONTEXT_COUNT(avctx, ctx); i++) { #if CONFIG_D3D11VA if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD && ctx->d3d11va.surface[i] == surface) { D3D11_VIDEO_DECODER_OUTPUT_VIEW_DESC viewDesc; ID3D11VideoDecoderOutputView_GetDesc(ctx->d3d11va.surface[i], &viewDesc); return viewDesc.Texture2D.ArraySlice; } #endif #if CONFIG_DXVA2 if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD && ctx->dxva2.surface[i] == surface) return i; #endif } assert(0); return 0; }"} {"target": 1, "idx": 23949, "func": "uint32_t HELPER(mul32)(CPUOpenRISCState *env, uint32_t ra, uint32_t rb) { uint64_t result; uint32_t high, cy; OpenRISCCPU *cpu = openrisc_env_get_cpu(env); result = (uint64_t)ra * rb; /* regisiers in or32 is 32bit, so 32 is NOT a magic number. or64 is not handled in this function, and not implement yet, TARGET_LONG_BITS for or64 is 64, it will break this function, so, we didn't use TARGET_LONG_BITS here. */ high = result >> 32; cy = result >> (32 - 1); if ((cy & 0x1) == 0x0) { if (high == 0x0) { return result; } } if ((cy & 0x1) == 0x1) { if (high == 0xffffffff) { return result; } } cpu->env.sr |= (SR_OV | SR_CY); if (cpu->env.sr & SR_OVE) { raise_exception(cpu, EXCP_RANGE); } return result; }"} {"target": 1, "idx": 24013, "func": "static int roq_dpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { int i, stereo, data_size, ret; const int16_t *in = frame ? (const int16_t *)frame->data[0] : NULL; uint8_t *out; ROQDPCMContext *context = avctx->priv_data; stereo = (avctx->channels == 2); if (!in && context->input_frames >= 8) return 0; if (in && context->input_frames < 8) { memcpy(&context->frame_buffer[context->buffered_samples * avctx->channels], in, avctx->frame_size * avctx->channels * sizeof(*in)); context->buffered_samples += avctx->frame_size; if (context->input_frames == 0) context->first_pts = frame->pts; if (context->input_frames < 7) { context->input_frames++; return 0; in = context->frame_buffer; if (stereo) { context->lastSample[0] &= 0xFF00; context->lastSample[1] &= 0xFF00; if (context->input_frames == 7 || !in) data_size = avctx->channels * context->buffered_samples; else data_size = avctx->channels * avctx->frame_size; if ((ret = ff_alloc_packet2(avctx, avpkt, ROQ_HEADER_SIZE + data_size))) return ret; out = avpkt->data; bytestream_put_byte(&out, stereo ? 0x21 : 0x20); bytestream_put_byte(&out, 0x10); bytestream_put_le32(&out, data_size); if (stereo) { bytestream_put_byte(&out, (context->lastSample[1])>>8); bytestream_put_byte(&out, (context->lastSample[0])>>8); } else bytestream_put_le16(&out, context->lastSample[0]); /* Write the actual samples */ for (i = 0; i < data_size; i++) *out++ = dpcm_predict(&context->lastSample[i & 1], *in++); avpkt->pts = context->input_frames <= 7 ? context->first_pts : frame->pts; avpkt->duration = data_size / avctx->channels; context->input_frames++; if (!in) context->input_frames = FFMAX(context->input_frames, 8); *got_packet_ptr = 1; return 0;"} {"target": 0, "idx": 24037, "func": "static int find_optimal_param(uint32_t sum, int n) { int k, k_opt; uint32_t nbits[MAX_RICE_PARAM+1]; k_opt = 0; nbits[0] = UINT32_MAX; for(k=0; k<=MAX_RICE_PARAM; k++) { nbits[k] = rice_encode_count(sum, n, k); if(nbits[k] < nbits[k_opt]) { k_opt = k; } } return k_opt; }"} {"target": 0, "idx": 24042, "func": "float64 HELPER(ucf64_sf2df)(float32 x, CPUUniCore32State *env) { return float32_to_float64(x, &env->ucf64.fp_status); }"} {"target": 0, "idx": 24046, "func": "static ssize_t colo_rewriter_receive_iov(NetFilterState *nf, NetClientState *sender, unsigned flags, const struct iovec *iov, int iovcnt, NetPacketSent *sent_cb) { RewriterState *s = FILTER_COLO_REWRITER(nf); Connection *conn; ConnectionKey key; Packet *pkt; ssize_t size = iov_size(iov, iovcnt); char *buf = g_malloc0(size); iov_to_buf(iov, iovcnt, 0, buf, size); pkt = packet_new(buf, size); /* * if we get tcp packet * we will rewrite it to make secondary guest's * connection established successfully */ if (pkt && is_tcp_packet(pkt)) { fill_connection_key(pkt, &key); if (sender == nf->netdev) { /* * We need make tcp TX and RX packet * into one connection. */ reverse_connection_key(&key); } conn = connection_get(s->connection_track_table, &key, NULL); if (sender == nf->netdev) { /* NET_FILTER_DIRECTION_TX */ /* handle_primary_tcp_pkt */ } else { /* NET_FILTER_DIRECTION_RX */ /* handle_secondary_tcp_pkt */ } } packet_destroy(pkt, NULL); pkt = NULL; return 0; }"} {"target": 0, "idx": 24048, "func": "static void ts_str(char buffer[60], int64_t ts, AVRational base) { if (ts == AV_NOPTS_VALUE) { strcpy(buffer, \" NOPTS \"); return; } ts= av_rescale_q(ts, base, (AVRational){1, 1000000}); snprintf(buffer, 60, \"%c%Ld.%06Ld\", ts<0 ? '-' : ' ', FFABS(ts)/1000000, FFABS(ts)%1000000); }"} {"target": 0, "idx": 24053, "func": "static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) { int inv, jcc_op, size, cond; CCPrepare cc; TCGv t0; inv = b & 1; jcc_op = (b >> 1) & 7; switch (s->cc_op) { case CC_OP_SUBB ... CC_OP_SUBQ: /* We optimize relational operators for the cmp/jcc case. */ size = s->cc_op - CC_OP_SUBB; switch (jcc_op) { case JCC_BE: tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src); gen_extu(size, cpu_tmp4); t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false); cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4, .reg2 = t0, .mask = -1, .use_reg2 = true }; break; case JCC_L: cond = TCG_COND_LT; goto fast_jcc_l; case JCC_LE: cond = TCG_COND_LE; fast_jcc_l: tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src); gen_exts(size, cpu_tmp4); t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true); cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4, .reg2 = t0, .mask = -1, .use_reg2 = true }; break; default: goto slow_jcc; } break; default: slow_jcc: /* This actually generates good code for JC, JZ and JS. */ switch (jcc_op) { case JCC_O: cc = gen_prepare_eflags_o(s, reg); break; case JCC_B: cc = gen_prepare_eflags_c(s, reg); break; case JCC_Z: cc = gen_prepare_eflags_z(s, reg); break; case JCC_BE: gen_compute_eflags(s); cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src, .mask = CC_Z | CC_C }; break; case JCC_S: cc = gen_prepare_eflags_s(s, reg); break; case JCC_P: cc = gen_prepare_eflags_p(s, reg); break; case JCC_L: gen_compute_eflags(s); if (TCGV_EQUAL(reg, cpu_cc_src)) { reg = cpu_tmp0; } tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */ tcg_gen_xor_tl(reg, reg, cpu_cc_src); cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg, .mask = CC_S }; break; default: case JCC_LE: gen_compute_eflags(s); if (TCGV_EQUAL(reg, cpu_cc_src)) { reg = cpu_tmp0; } tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */ tcg_gen_xor_tl(reg, reg, cpu_cc_src); cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg, .mask = CC_S | CC_Z }; break; } break; } if (inv) { cc.cond = tcg_invert_cond(cc.cond); } return cc; }"} {"target": 1, "idx": 24063, "func": "static void lz_unpack(const unsigned char *src, int src_len, unsigned char *dest, int dest_len) { const unsigned char *s; const unsigned char *s_end; unsigned char *d; unsigned char *d_end; unsigned char queue[QUEUE_SIZE]; unsigned int qpos; unsigned int dataleft; unsigned int chainofs; unsigned int chainlen; unsigned int speclen; unsigned char tag; unsigned int i, j; s = src; s_end = src + src_len; d = dest; d_end = d + dest_len; if (s_end - s < 8) return; dataleft = AV_RL32(s); s += 4; memset(queue, 0x20, QUEUE_SIZE); if (AV_RL32(s) == 0x56781234) { s += 4; qpos = 0x111; speclen = 0xF + 3; } else { qpos = 0xFEE; speclen = 100; /* no speclen */ } while (s_end - s > 0 && dataleft > 0) { tag = *s++; if ((tag == 0xFF) && (dataleft > 8)) { if (d + 8 > d_end || s_end - s < 8) return; for (i = 0; i < 8; i++) { queue[qpos++] = *d++ = *s++; qpos &= QUEUE_MASK; } dataleft -= 8; } else { for (i = 0; i < 8; i++) { if (dataleft == 0) break; if (tag & 0x01) { if (d + 1 > d_end || s_end - s < 1) return; queue[qpos++] = *d++ = *s++; qpos &= QUEUE_MASK; dataleft--; } else { if (s_end - s < 2) return; chainofs = *s++; chainofs |= ((*s & 0xF0) << 4); chainlen = (*s++ & 0x0F) + 3; if (chainlen == speclen) { if (s_end - s < 1) return; chainlen = *s++ + 0xF + 3; } if (d + chainlen > d_end) return; for (j = 0; j < chainlen; j++) { *d = queue[chainofs++ & QUEUE_MASK]; queue[qpos++] = *d++; qpos &= QUEUE_MASK; } dataleft -= chainlen; } tag >>= 1; } } } }"} {"target": 1, "idx": 24066, "func": "static int alsa_init_out (HWVoiceOut *hw, struct audsettings *as) { ALSAVoiceOut *alsa = (ALSAVoiceOut *) hw; struct alsa_params_req req; struct alsa_params_obt obt; snd_pcm_t *handle; struct audsettings obt_as; req.fmt = aud_to_alsafmt (as->fmt, as->endianness); req.freq = as->freq; req.nchannels = as->nchannels; req.period_size = conf.period_size_out; req.buffer_size = conf.buffer_size_out; req.size_in_usec = conf.size_in_usec_out; req.override_mask = (conf.period_size_out_overridden ? 1 : 0) | (conf.buffer_size_out_overridden ? 2 : 0); if (alsa_open (0, &req, &obt, &handle)) { return -1; } obt_as.freq = obt.freq; obt_as.nchannels = obt.nchannels; obt_as.fmt = obt.fmt; obt_as.endianness = obt.endianness; audio_pcm_init_info (&hw->info, &obt_as); hw->samples = obt.samples; alsa->pcm_buf = audio_calloc (AUDIO_FUNC, obt.samples, 1 << hw->info.shift); if (!alsa->pcm_buf) { dolog (\"Could not allocate DAC buffer (%d samples, each %d bytes)\\n\", hw->samples, 1 << hw->info.shift); alsa_anal_close1 (&handle); return -1; } alsa->handle = handle; return 0; }"} {"target": 0, "idx": 24069, "func": "static int ipvideo_decode_block_opcode_0x9(IpvideoContext *s) { int x, y; unsigned char P[4]; /* 4-color encoding */ CHECK_STREAM_PTR(4); memcpy(P, s->stream_ptr, 4); s->stream_ptr += 4; if (P[0] <= P[1]) { if (P[2] <= P[3]) { /* 1 of 4 colors for each pixel, need 16 more bytes */ CHECK_STREAM_PTR(16); for (y = 0; y < 8; y++) { /* get the next set of 8 2-bit flags */ int flags = bytestream_get_le16(&s->stream_ptr); for (x = 0; x < 8; x++, flags >>= 2) *s->pixel_ptr++ = P[flags & 0x03]; s->pixel_ptr += s->line_inc; } } else { uint32_t flags; /* 1 of 4 colors for each 2x2 block, need 4 more bytes */ CHECK_STREAM_PTR(4); flags = bytestream_get_le32(&s->stream_ptr); for (y = 0; y < 8; y += 2) { for (x = 0; x < 8; x += 2, flags >>= 2) { s->pixel_ptr[x ] = s->pixel_ptr[x + 1 ] = s->pixel_ptr[x + s->stride] = s->pixel_ptr[x + 1 + s->stride] = P[flags & 0x03]; } s->pixel_ptr += s->stride * 2; } } } else { uint64_t flags; /* 1 of 4 colors for each 2x1 or 1x2 block, need 8 more bytes */ CHECK_STREAM_PTR(8); flags = bytestream_get_le64(&s->stream_ptr); if (P[2] <= P[3]) { for (y = 0; y < 8; y++) { for (x = 0; x < 8; x += 2, flags >>= 2) { s->pixel_ptr[x ] = s->pixel_ptr[x + 1] = P[flags & 0x03]; } s->pixel_ptr += s->stride; } } else { for (y = 0; y < 8; y += 2) { for (x = 0; x < 8; x++, flags >>= 2) { s->pixel_ptr[x ] = s->pixel_ptr[x + s->stride] = P[flags & 0x03]; } s->pixel_ptr += s->stride * 2; } } } /* report success */ return 0; }"} {"target": 0, "idx": 24070, "func": "int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MJpegDecodeContext *s = avctx->priv_data; const uint8_t *buf_end, *buf_ptr; const uint8_t *unescaped_buf_ptr; int unescaped_buf_size; int start_code; AVFrame *picture = data; s->got_picture = 0; // picture from previous image can not be reused buf_ptr = buf; buf_end = buf + buf_size; while (buf_ptr < buf_end) { /* find start next marker */ start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end, &unescaped_buf_ptr, &unescaped_buf_size); { /* EOF */ if (start_code < 0) { goto the_end; } else { av_log(avctx, AV_LOG_DEBUG, \"marker=%x avail_size_in_buf=%td\\n\", start_code, buf_end - buf_ptr); init_get_bits(&s->gb, unescaped_buf_ptr, unescaped_buf_size*8); s->start_code = start_code; if(s->avctx->debug & FF_DEBUG_STARTCODE){ av_log(avctx, AV_LOG_DEBUG, \"startcode: %X\\n\", start_code); } /* process markers */ if (start_code >= 0xd0 && start_code <= 0xd7) { av_log(avctx, AV_LOG_DEBUG, \"restart marker: %d\\n\", start_code&0x0f); /* APP fields */ } else if (start_code >= APP0 && start_code <= APP15) { mjpeg_decode_app(s); /* Comment */ } else if (start_code == COM){ mjpeg_decode_com(s); } switch(start_code) { case SOI: s->restart_interval = 0; s->restart_count = 0; /* nothing to do on SOI */ break; case DQT: ff_mjpeg_decode_dqt(s); break; case DHT: if(ff_mjpeg_decode_dht(s) < 0){ av_log(avctx, AV_LOG_ERROR, \"huffman table decode error\\n\"); return -1; } break; case SOF0: case SOF1: s->lossless=0; s->ls=0; s->progressive=0; if (ff_mjpeg_decode_sof(s) < 0) return -1; break; case SOF2: s->lossless=0; s->ls=0; s->progressive=1; if (ff_mjpeg_decode_sof(s) < 0) return -1; break; case SOF3: s->lossless=1; s->ls=0; s->progressive=0; if (ff_mjpeg_decode_sof(s) < 0) return -1; break; case SOF48: s->lossless=1; s->ls=1; s->progressive=0; if (ff_mjpeg_decode_sof(s) < 0) return -1; break; case LSE: if (!CONFIG_JPEGLS_DECODER || ff_jpegls_decode_lse(s) < 0) return -1; break; case EOI: if ((s->buggy_avid && !s->interlaced) || s->restart_interval) break; eoi_parser: s->cur_scan = 0; if (!s->got_picture) { av_log(avctx, AV_LOG_WARNING, \"Found EOI before any SOF, ignoring\\n\"); break; } if (s->interlaced) { s->bottom_field ^= 1; /* if not bottom field, do not output image yet */ if (s->bottom_field == !s->interlace_polarity) break; } *picture = *s->picture_ptr; *data_size = sizeof(AVFrame); if(!s->lossless){ picture->quality= FFMAX3(s->qscale[0], s->qscale[1], s->qscale[2]); picture->qstride= 0; picture->qscale_table= s->qscale_table; memset(picture->qscale_table, picture->quality, (s->width+15)/16); if(avctx->debug & FF_DEBUG_QP) av_log(avctx, AV_LOG_DEBUG, \"QP: %d\\n\", picture->quality); picture->quality*= FF_QP2LAMBDA; } goto the_end; case SOS: if (!s->got_picture) { av_log(avctx, AV_LOG_WARNING, \"Can not process SOS before SOF, skipping\\n\"); break; } if (ff_mjpeg_decode_sos(s, NULL, NULL) < 0 && avctx->error_recognition >= FF_ER_EXPLODE) return AVERROR_INVALIDDATA; /* buggy avid puts EOI every 10-20th frame */ /* if restart period is over process EOI */ if ((s->buggy_avid && !s->interlaced) || s->restart_interval) goto eoi_parser; break; case DRI: mjpeg_decode_dri(s); break; case SOF5: case SOF6: case SOF7: case SOF9: case SOF10: case SOF11: case SOF13: case SOF14: case SOF15: case JPG: av_log(avctx, AV_LOG_ERROR, \"mjpeg: unsupported coding type (%x)\\n\", start_code); break; // default: // printf(\"mjpeg: unsupported marker (%x)\\n\", start_code); // break; } /* eof process start code */ buf_ptr += (get_bits_count(&s->gb)+7)/8; av_log(avctx, AV_LOG_DEBUG, \"marker parser used %d bytes (%d bits)\\n\", (get_bits_count(&s->gb)+7)/8, get_bits_count(&s->gb)); } } } if (s->got_picture) { av_log(avctx, AV_LOG_WARNING, \"EOI missing, emulating\\n\"); goto eoi_parser; } av_log(avctx, AV_LOG_FATAL, \"No JPEG data found in image\\n\"); return -1; the_end: av_log(avctx, AV_LOG_DEBUG, \"mjpeg decode frame unused %td bytes\\n\", buf_end - buf_ptr); // return buf_end - buf_ptr; return buf_ptr - buf; }"} {"target": 0, "idx": 24089, "func": "static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm, struct image_info *info) { abi_ulong stack_base, size, error; int i; /* Create enough stack to hold everything. If we don't use * it for args, we'll use it for something else... */ size = guest_stack_size; if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE) size = MAX_ARG_PAGES*TARGET_PAGE_SIZE; error = target_mmap(0, size + qemu_host_page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (error == -1) { perror(\"stk mmap\"); exit(-1); } /* we reserve one extra page at the top of the stack as guard */ target_mprotect(error + size, qemu_host_page_size, PROT_NONE); info->stack_limit = error; stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE; p += stack_base; for (i = 0 ; i < MAX_ARG_PAGES ; i++) { if (bprm->page[i]) { info->rss++; /* FIXME - check return value of memcpy_to_target() for failure */ memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE); free(bprm->page[i]); } stack_base += TARGET_PAGE_SIZE; } return p; }"} {"target": 0, "idx": 24095, "func": "static size_t handle_aiocb_rw_vector(struct qemu_paiocb *aiocb) { size_t offset = 0; ssize_t len; do { if (aiocb->aio_type == QEMU_PAIO_WRITE) len = qemu_pwritev(aiocb->aio_fildes, aiocb->aio_iov, aiocb->aio_niov, aiocb->aio_offset + offset); else len = qemu_preadv(aiocb->aio_fildes, aiocb->aio_iov, aiocb->aio_niov, aiocb->aio_offset + offset); } while (len == -1 && errno == EINTR); if (len == -1) return -errno; return len; }"} {"target": 1, "idx": 24114, "func": "static VncClientInfo *qmp_query_vnc_client(const VncState *client) { struct sockaddr_storage sa; socklen_t salen = sizeof(sa); char host[NI_MAXHOST]; char serv[NI_MAXSERV]; VncClientInfo *info; if (getpeername(client->csock, (struct sockaddr *)&sa, &salen) < 0) { return NULL; } if (getnameinfo((struct sockaddr *)&sa, salen, host, sizeof(host), serv, sizeof(serv), NI_NUMERICHOST | NI_NUMERICSERV) < 0) { return NULL; } info = g_malloc0(sizeof(*info)); info->base = g_malloc0(sizeof(*info->base)); info->base->host = g_strdup(host); info->base->service = g_strdup(serv); info->base->family = inet_netfamily(sa.ss_family); info->base->websocket = client->websocket; #ifdef CONFIG_VNC_TLS if (client->tls.session && client->tls.dname) { info->has_x509_dname = true; info->x509_dname = g_strdup(client->tls.dname); } #endif #ifdef CONFIG_VNC_SASL if (client->sasl.conn && client->sasl.username) { info->has_sasl_username = true; info->sasl_username = g_strdup(client->sasl.username); } #endif return info; }"} {"target": 1, "idx": 24117, "func": "int kvm_arch_put_registers(CPUState *env) { struct kvm_regs regs; int ret; int i; ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s); if (ret < 0) return ret; regs.ctr = env->ctr; regs.lr = env->lr; regs.xer = env->xer; regs.msr = env->msr; regs.pc = env->nip; regs.srr0 = env->spr[SPR_SRR0]; regs.srr1 = env->spr[SPR_SRR1]; regs.sprg0 = env->spr[SPR_SPRG0]; regs.sprg1 = env->spr[SPR_SPRG1]; regs.sprg2 = env->spr[SPR_SPRG2]; regs.sprg3 = env->spr[SPR_SPRG3]; regs.sprg4 = env->spr[SPR_SPRG4]; regs.sprg5 = env->spr[SPR_SPRG5]; regs.sprg6 = env->spr[SPR_SPRG6]; regs.sprg7 = env->spr[SPR_SPRG7]; for (i = 0;i < 32; i++) regs.gpr[i] = env->gpr[i]; ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, ®s); if (ret < 0) return ret; return ret; }"} {"target": 0, "idx": 24155, "func": "static int mp_decode_layer3(MPADecodeContext *s) { int nb_granules, main_data_begin; int gr, ch, blocksplit_flag, i, j, k, n, bits_pos; GranuleDef *g; int16_t exponents[576]; //FIXME try INTFLOAT /* read side info */ if (s->lsf) { main_data_begin = get_bits(&s->gb, 8); skip_bits(&s->gb, s->nb_channels); nb_granules = 1; } else { main_data_begin = get_bits(&s->gb, 9); if (s->nb_channels == 2) skip_bits(&s->gb, 3); else skip_bits(&s->gb, 5); nb_granules = 2; for (ch = 0; ch < s->nb_channels; ch++) { s->granules[ch][0].scfsi = 0;/* all scale factors are transmitted */ s->granules[ch][1].scfsi = get_bits(&s->gb, 4); } } for (gr = 0; gr < nb_granules; gr++) { for (ch = 0; ch < s->nb_channels; ch++) { av_dlog(s->avctx, \"gr=%d ch=%d: side_info\\n\", gr, ch); g = &s->granules[ch][gr]; g->part2_3_length = get_bits(&s->gb, 12); g->big_values = get_bits(&s->gb, 9); if (g->big_values > 288) { av_log(s->avctx, AV_LOG_ERROR, \"big_values too big\\n\"); return AVERROR_INVALIDDATA; } g->global_gain = get_bits(&s->gb, 8); /* if MS stereo only is selected, we precompute the 1/sqrt(2) renormalization factor */ if ((s->mode_ext & (MODE_EXT_MS_STEREO | MODE_EXT_I_STEREO)) == MODE_EXT_MS_STEREO) g->global_gain -= 2; if (s->lsf) g->scalefac_compress = get_bits(&s->gb, 9); else g->scalefac_compress = get_bits(&s->gb, 4); blocksplit_flag = get_bits1(&s->gb); if (blocksplit_flag) { g->block_type = get_bits(&s->gb, 2); if (g->block_type == 0) { av_log(s->avctx, AV_LOG_ERROR, \"invalid block type\\n\"); return AVERROR_INVALIDDATA; } g->switch_point = get_bits1(&s->gb); for (i = 0; i < 2; i++) g->table_select[i] = get_bits(&s->gb, 5); for (i = 0; i < 3; i++) g->subblock_gain[i] = get_bits(&s->gb, 3); ff_init_short_region(s, g); } else { int region_address1, region_address2; g->block_type = 0; g->switch_point = 0; for (i = 0; i < 3; i++) g->table_select[i] = get_bits(&s->gb, 5); /* compute huffman coded region sizes */ region_address1 = get_bits(&s->gb, 4); region_address2 = get_bits(&s->gb, 3); av_dlog(s->avctx, \"region1=%d region2=%d\\n\", region_address1, region_address2); ff_init_long_region(s, g, region_address1, region_address2); } ff_region_offset2size(g); ff_compute_band_indexes(s, g); g->preflag = 0; if (!s->lsf) g->preflag = get_bits1(&s->gb); g->scalefac_scale = get_bits1(&s->gb); g->count1table_select = get_bits1(&s->gb); av_dlog(s->avctx, \"block_type=%d switch_point=%d\\n\", g->block_type, g->switch_point); } } if (!s->adu_mode) { const uint8_t *ptr = s->gb.buffer + (get_bits_count(&s->gb)>>3); int extrasize = av_clip(get_bits_left(&s->gb) >> 3, 0, EXTRABYTES); assert((get_bits_count(&s->gb) & 7) == 0); /* now we get bits from the main_data_begin offset */ av_dlog(s->avctx, \"seekback: %d\\n\", main_data_begin); //av_log(NULL, AV_LOG_ERROR, \"backstep:%d, lastbuf:%d\\n\", main_data_begin, s->last_buf_size); memcpy(s->last_buf + s->last_buf_size, ptr, extrasize); s->in_gb = s->gb; init_get_bits(&s->gb, s->last_buf, s->last_buf_size*8); #if !UNCHECKED_BITSTREAM_READER s->gb.size_in_bits_plus8 += FFMAX(extrasize, LAST_BUF_SIZE - s->last_buf_size) * 8; #endif skip_bits_long(&s->gb, 8*(s->last_buf_size - main_data_begin)); } for (gr = 0; gr < nb_granules; gr++) { for (ch = 0; ch < s->nb_channels; ch++) { g = &s->granules[ch][gr]; if (get_bits_count(&s->gb) < 0) { av_log(s->avctx, AV_LOG_DEBUG, \"mdb:%d, lastbuf:%d skipping granule %d\\n\", main_data_begin, s->last_buf_size, gr); skip_bits_long(&s->gb, g->part2_3_length); memset(g->sb_hybrid, 0, sizeof(g->sb_hybrid)); if (get_bits_count(&s->gb) >= s->gb.size_in_bits && s->in_gb.buffer) { skip_bits_long(&s->in_gb, get_bits_count(&s->gb) - s->gb.size_in_bits); s->gb = s->in_gb; s->in_gb.buffer = NULL; } continue; } bits_pos = get_bits_count(&s->gb); if (!s->lsf) { uint8_t *sc; int slen, slen1, slen2; /* MPEG1 scale factors */ slen1 = slen_table[0][g->scalefac_compress]; slen2 = slen_table[1][g->scalefac_compress]; av_dlog(s->avctx, \"slen1=%d slen2=%d\\n\", slen1, slen2); if (g->block_type == 2) { n = g->switch_point ? 17 : 18; j = 0; if (slen1) { for (i = 0; i < n; i++) g->scale_factors[j++] = get_bits(&s->gb, slen1); } else { for (i = 0; i < n; i++) g->scale_factors[j++] = 0; } if (slen2) { for (i = 0; i < 18; i++) g->scale_factors[j++] = get_bits(&s->gb, slen2); for (i = 0; i < 3; i++) g->scale_factors[j++] = 0; } else { for (i = 0; i < 21; i++) g->scale_factors[j++] = 0; } } else { sc = s->granules[ch][0].scale_factors; j = 0; for (k = 0; k < 4; k++) { n = k == 0 ? 6 : 5; if ((g->scfsi & (0x8 >> k)) == 0) { slen = (k < 2) ? slen1 : slen2; if (slen) { for (i = 0; i < n; i++) g->scale_factors[j++] = get_bits(&s->gb, slen); } else { for (i = 0; i < n; i++) g->scale_factors[j++] = 0; } } else { /* simply copy from last granule */ for (i = 0; i < n; i++) { g->scale_factors[j] = sc[j]; j++; } } } g->scale_factors[j++] = 0; } } else { int tindex, tindex2, slen[4], sl, sf; /* LSF scale factors */ if (g->block_type == 2) tindex = g->switch_point ? 2 : 1; else tindex = 0; sf = g->scalefac_compress; if ((s->mode_ext & MODE_EXT_I_STEREO) && ch == 1) { /* intensity stereo case */ sf >>= 1; if (sf < 180) { lsf_sf_expand(slen, sf, 6, 6, 0); tindex2 = 3; } else if (sf < 244) { lsf_sf_expand(slen, sf - 180, 4, 4, 0); tindex2 = 4; } else { lsf_sf_expand(slen, sf - 244, 3, 0, 0); tindex2 = 5; } } else { /* normal case */ if (sf < 400) { lsf_sf_expand(slen, sf, 5, 4, 4); tindex2 = 0; } else if (sf < 500) { lsf_sf_expand(slen, sf - 400, 5, 4, 0); tindex2 = 1; } else { lsf_sf_expand(slen, sf - 500, 3, 0, 0); tindex2 = 2; g->preflag = 1; } } j = 0; for (k = 0; k < 4; k++) { n = lsf_nsf_table[tindex2][tindex][k]; sl = slen[k]; if (sl) { for (i = 0; i < n; i++) g->scale_factors[j++] = get_bits(&s->gb, sl); } else { for (i = 0; i < n; i++) g->scale_factors[j++] = 0; } } /* XXX: should compute exact size */ for (; j < 40; j++) g->scale_factors[j] = 0; } exponents_from_scale_factors(s, g, exponents); /* read Huffman coded residue */ huffman_decode(s, g, exponents, bits_pos + g->part2_3_length); } /* ch */ if (s->nb_channels == 2) compute_stereo(s, &s->granules[0][gr], &s->granules[1][gr]); for (ch = 0; ch < s->nb_channels; ch++) { g = &s->granules[ch][gr]; reorder_block(s, g); compute_antialias(s, g); compute_imdct(s, g, &s->sb_samples[ch][18 * gr][0], s->mdct_buf[ch]); } } /* gr */ if (get_bits_count(&s->gb) < 0) skip_bits_long(&s->gb, -get_bits_count(&s->gb)); return nb_granules * 18; }"} {"target": 0, "idx": 24156, "func": "static void msix_mmio_writel(void *opaque, target_phys_addr_t addr, uint32_t val) { PCIDevice *dev = opaque; unsigned int offset = addr & (MSIX_PAGE_SIZE - 1) & ~0x3; int vector = offset / MSIX_ENTRY_SIZE; pci_set_long(dev->msix_table_page + offset, val); if (!msix_is_masked(dev, vector) && msix_is_pending(dev, vector)) { msix_clr_pending(dev, vector); msix_notify(dev, vector); } }"} {"target": 0, "idx": 24160, "func": "static void vhost_iommu_region_del(MemoryListener *listener, MemoryRegionSection *section) { struct vhost_dev *dev = container_of(listener, struct vhost_dev, iommu_listener); struct vhost_iommu *iommu; if (!memory_region_is_iommu(section->mr)) { return; } QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) { if (iommu->mr == section->mr) { memory_region_unregister_iommu_notifier(iommu->mr, &iommu->n); QLIST_REMOVE(iommu, iommu_next); g_free(iommu); break; } } }"} {"target": 0, "idx": 24161, "func": "static void vfio_vga_quirk_teardown(VFIOPCIDevice *vdev) { int i; for (i = 0; i < ARRAY_SIZE(vdev->vga.region); i++) { while (!QLIST_EMPTY(&vdev->vga.region[i].quirks)) { VFIOQuirk *quirk = QLIST_FIRST(&vdev->vga.region[i].quirks); memory_region_del_subregion(&vdev->vga.region[i].mem, &quirk->mem); object_unparent(OBJECT(&quirk->mem)); QLIST_REMOVE(quirk, next); g_free(quirk); } } }"} {"target": 0, "idx": 24168, "func": "int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len) { assert(len >= 14); if (!req->sense_len) { return 0; } return scsi_build_sense(req->sense, req->sense_len, buf, len, true); }"} {"target": 0, "idx": 24199, "func": "uint64_t HELPER(popcnt)(uint64_t r2) { uint64_t ret = 0; int i; for (i = 0; i < 64; i += 8) { uint64_t t = ctpop32((r2 >> i) & 0xff); ret |= t << i; } return ret; }"} {"target": 0, "idx": 24218, "func": "static void pxa2xx_lcdc_dma0_redraw_vert(struct pxa2xx_lcdc_s *s, uint8_t *fb, int *miny, int *maxy) { int y, src_width, dest_width, dirty[2]; uint8_t *src, *dest; ram_addr_t x, addr, new_addr, start, end; drawfn fn = 0; if (s->dest_width) fn = s->line_fn[s->transp][s->bpp]; if (!fn) return; src = fb; src_width = (s->xres + 3) & ~3; /* Pad to a 4 pixels multiple */ if (s->bpp == pxa_lcdc_19pbpp || s->bpp == pxa_lcdc_18pbpp) src_width *= 3; else if (s->bpp > pxa_lcdc_16bpp) src_width *= 4; else if (s->bpp > pxa_lcdc_8bpp) src_width *= 2; dest_width = s->yres * s->dest_width; dest = ds_get_data(s->ds) + dest_width * (s->xres - 1); addr = (ram_addr_t) (fb - phys_ram_base); start = addr + s->yres * src_width; end = addr; dirty[0] = dirty[1] = cpu_physical_memory_get_dirty(start, VGA_DIRTY_FLAG); for (y = 0; y < s->yres; y ++) { new_addr = addr + src_width; for (x = addr + TARGET_PAGE_SIZE; x < new_addr; x += TARGET_PAGE_SIZE) { dirty[1] = cpu_physical_memory_get_dirty(x, VGA_DIRTY_FLAG); dirty[0] |= dirty[1]; } if (dirty[0] || s->invalidated) { fn((uint32_t *) s->dma_ch[0].palette, dest, src, s->xres, -dest_width); if (addr < start) start = addr; end = new_addr; if (y < *miny) *miny = y; if (y >= *maxy) *maxy = y + 1; } addr = new_addr; dirty[0] = dirty[1]; src += src_width; dest += s->dest_width; } if (end > start) cpu_physical_memory_reset_dirty(start, end, VGA_DIRTY_FLAG); }"} {"target": 0, "idx": 24219, "func": "static void term_backspace(void) { if (term_cmd_buf_index > 0) { term_backward_char(); term_delete_char(); } }"} {"target": 1, "idx": 24221, "func": "static inline void libopenjpeg_copy_to_packed16(AVFrame *picture, opj_image_t *image) { uint16_t *img_ptr; int index, x, y, c; int adjust[4]; for (x = 0; x < image->numcomps; x++) adjust[x] = FFMAX(FFMIN(av_pix_fmt_desc_get(picture->format)->comp[x].depth_minus1 + 1 - image->comps[x].prec, 8), 0); for (y = 0; y < picture->height; y++) { index = y*picture->width; img_ptr = (uint16_t*) (picture->data[0] + y*picture->linesize[0]); for (x = 0; x < picture->width; x++, index++) { for (c = 0; c < image->numcomps; c++) { *img_ptr++ = 0x8000 * image->comps[c].sgnd + (image->comps[c].data[index] << adjust[c]); } } } }"} {"target": 1, "idx": 24224, "func": "int MPV_encode_picture(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data) { MpegEncContext *s = avctx->priv_data; AVFrame *pic_arg = data; int i, stuffing_count; for(i=0; ithread_count; i++){ int start_y= s->thread_context[i]->start_mb_y; int end_y= s->thread_context[i]-> end_mb_y; int h= s->mb_height; uint8_t *start= buf + (size_t)(((int64_t) buf_size)*start_y/h); uint8_t *end = buf + (size_t)(((int64_t) buf_size)* end_y/h); init_put_bits(&s->thread_context[i]->pb, start, end - start); } s->picture_in_gop_number++; if(load_input_picture(s, pic_arg) < 0) return -1; select_input_picture(s); /* output? */ if(s->new_picture.data[0]){ s->pict_type= s->new_picture.pict_type; //emms_c(); //printf(\"qs:%f %f %d\\n\", s->new_picture.quality, s->current_picture.quality, s->qscale); MPV_frame_start(s, avctx); if (encode_picture(s, s->picture_number) < 0) return -1; avctx->real_pict_num = s->picture_number; avctx->header_bits = s->header_bits; avctx->mv_bits = s->mv_bits; avctx->misc_bits = s->misc_bits; avctx->i_tex_bits = s->i_tex_bits; avctx->p_tex_bits = s->p_tex_bits; avctx->i_count = s->i_count; avctx->p_count = s->mb_num - s->i_count - s->skip_count; //FIXME f/b_count in avctx avctx->skip_count = s->skip_count; MPV_frame_end(s); if (s->out_format == FMT_MJPEG) mjpeg_picture_trailer(s); if(s->flags&CODEC_FLAG_PASS1) ff_write_pass1_stats(s); for(i=0; i<4; i++){ s->current_picture_ptr->error[i]= s->current_picture.error[i]; avctx->error[i] += s->current_picture_ptr->error[i]; } if(s->flags&CODEC_FLAG_PASS1) assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits + avctx->i_tex_bits + avctx->p_tex_bits == put_bits_count(&s->pb)); flush_put_bits(&s->pb); s->frame_bits = put_bits_count(&s->pb); stuffing_count= ff_vbv_update(s, s->frame_bits); if(stuffing_count){ if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < stuffing_count + 50){ av_log(s->avctx, AV_LOG_ERROR, \"stuffing too large\\n\"); return -1; } switch(s->codec_id){ case CODEC_ID_MPEG1VIDEO: case CODEC_ID_MPEG2VIDEO: while(stuffing_count--){ put_bits(&s->pb, 8, 0); } break; case CODEC_ID_MPEG4: put_bits(&s->pb, 16, 0); put_bits(&s->pb, 16, 0x1C3); stuffing_count -= 4; while(stuffing_count--){ put_bits(&s->pb, 8, 0xFF); } break; default: av_log(s->avctx, AV_LOG_ERROR, \"vbv buffer overflow\\n\"); } flush_put_bits(&s->pb); s->frame_bits = put_bits_count(&s->pb); } /* update mpeg1/2 vbv_delay for CBR */ if(s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate && s->out_format == FMT_MPEG1 && 90000LL * (avctx->rc_buffer_size-1) <= s->avctx->rc_max_rate*0xFFFFLL){ int vbv_delay; assert(s->repeat_first_field==0); vbv_delay= lrintf(90000 * s->rc_context.buffer_index / s->avctx->rc_max_rate); assert(vbv_delay < 0xFFFF); s->vbv_delay_ptr[0] &= 0xF8; s->vbv_delay_ptr[0] |= vbv_delay>>13; s->vbv_delay_ptr[1] = vbv_delay>>5; s->vbv_delay_ptr[2] &= 0x07; s->vbv_delay_ptr[2] |= vbv_delay<<3; } s->total_bits += s->frame_bits; avctx->frame_bits = s->frame_bits; }else{ assert((pbBufPtr(&s->pb) == s->pb.buf)); s->frame_bits=0; } assert((s->frame_bits&7)==0); return s->frame_bits/8; }"} {"target": 0, "idx": 24226, "func": "static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y) { int h, cy, i; int offset[AV_NUM_DATA_POINTERS]; if (HAVE_THREADS && s->avctx->active_thread_type & FF_THREAD_FRAME) { int y_flipped = s->flipped_image ? s->avctx->height - y : y; /* At the end of the frame, report INT_MAX instead of the height of * the frame. This makes the other threads' ff_thread_await_progress() * calls cheaper, because they don't have to clip their values. */ ff_thread_report_progress(&s->current_frame, y_flipped == s->avctx->height ? INT_MAX : y_flipped - 1, 0); } if (s->avctx->draw_horiz_band == NULL) return; h = y - s->last_slice_end; s->last_slice_end = y; y -= h; if (!s->flipped_image) y = s->avctx->height - y - h; cy = y >> s->chroma_y_shift; offset[0] = s->current_frame.f->linesize[0] * y; offset[1] = s->current_frame.f->linesize[1] * cy; offset[2] = s->current_frame.f->linesize[2] * cy; for (i = 3; i < AV_NUM_DATA_POINTERS; i++) offset[i] = 0; emms_c(); s->avctx->draw_horiz_band(s->avctx, s->current_frame.f, offset, y, 3, h); }"} {"target": 0, "idx": 24227, "func": "static int gif_write_header(AVFormatContext *s) { GIFContext *gif = s->priv_data; AVIOContext *pb = s->pb; AVCodecContext *enc, *video_enc; int i, width, height, loop_count /*, rate*/; /* XXX: do we reject audio streams or just ignore them ? if(s->nb_streams > 1) return -1; */ gif->time = 0; gif->file_time = 0; video_enc = NULL; for(i=0;inb_streams;i++) { enc = s->streams[i]->codec; if (enc->codec_type != AVMEDIA_TYPE_AUDIO) video_enc = enc; } if (!video_enc) { av_free(gif); return -1; } else { width = video_enc->width; height = video_enc->height; loop_count = s->loop_output; // rate = video_enc->time_base.den; } if (video_enc->pix_fmt != PIX_FMT_RGB24) { av_log(s, AV_LOG_ERROR, \"ERROR: gif only handles the rgb24 pixel format. Use -pix_fmt rgb24.\\n\"); return AVERROR(EIO); } gif_image_write_header(pb, width, height, loop_count, NULL); avio_flush(s->pb); return 0; }"} {"target": 1, "idx": 24235, "func": "fixup_vorbis_headers(AVFormatContext * as, struct oggvorbis_private *priv, uint8_t **buf) { int i,offset, len, buf_len; unsigned char *ptr; len = priv->len[0] + priv->len[1] + priv->len[2]; buf_len = len + len/255 + 64; ptr = *buf = av_realloc(NULL, buf_len); memset(*buf, '\\0', buf_len); ptr[0] = 2; offset = 1; offset += av_xiphlacing(&ptr[offset], priv->len[0]); offset += av_xiphlacing(&ptr[offset], priv->len[1]); for (i = 0; i < 3; i++) { memcpy(&ptr[offset], priv->packet[i], priv->len[i]); offset += priv->len[i]; av_freep(&priv->packet[i]); } *buf = av_realloc(*buf, offset + FF_INPUT_BUFFER_PADDING_SIZE); return offset; }"} {"target": 0, "idx": 24251, "func": "static inline int small_diamond_search4MV(MpegEncContext * s, int *best, int dmin, UINT8 *new_pic, UINT8 *old_pic, int pic_stride, int pred_x, int pred_y, UINT16 *mv_penalty, int quant, int xmin, int ymin, int xmax, int ymax, int shift) { int next_dir=-1; for(;;){ int d; const int dir= next_dir; const int x= best[0]; const int y= best[1]; next_dir=-1; //printf(\"%d\", dir); if(dir!=2 && x>xmin) CHECK_MV4_DIR(x-1, y , 0) if(dir!=3 && y>ymin) CHECK_MV4_DIR(x , y-1, 1) if(dir!=0 && xlong_ref_count == 0); ff_h264_unref_picture(h, &h->last_pic_for_ec); if (h->short_ref_count) ff_h264_ref_picture(h, &h->last_pic_for_ec, h->short_ref[0]); for (i = 0; i < h->short_ref_count; i++) { unreference_pic(h, h->short_ref[i], 0); h->short_ref[i] = NULL; } h->short_ref_count = 0; memset(h->default_ref_list, 0, sizeof(h->default_ref_list)); memset(h->ref_list, 0, sizeof(h->ref_list)); }"} {"target": 0, "idx": 24290, "func": "static int get_max_p_order(int max_porder, int n, int order) { int porder, max_parts; porder = max_porder; while(porder > 0) { max_parts = (1 << porder); if(!(n % max_parts) && (n > max_parts*order)) { break; } porder--; } return porder; }"} {"target": 0, "idx": 24308, "func": "TCGv_i64 tcg_global_reg_new_i64(int reg, const char *name) { int idx; idx = tcg_global_reg_new_internal(TCG_TYPE_I64, reg, name); return MAKE_TCGV_I64(idx); }"} {"target": 0, "idx": 24309, "func": "static void gen_window_check1(DisasContext *dc, unsigned r1) { if (dc->tb->flags & XTENSA_TBFLAG_EXCM) { return; } if (option_enabled(dc, XTENSA_OPTION_WINDOWED_REGISTER) && r1 / 4 > dc->used_window) { TCGv_i32 pc = tcg_const_i32(dc->pc); TCGv_i32 w = tcg_const_i32(r1 / 4); dc->used_window = r1 / 4; gen_advance_ccount(dc); gen_helper_window_check(cpu_env, pc, w); tcg_temp_free(w); tcg_temp_free(pc); } }"} {"target": 0, "idx": 24318, "func": "void aio_notify(AioContext *ctx) { /* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs * with atomic_or in aio_ctx_prepare or atomic_add in aio_poll. */ smp_mb(); if (ctx->notify_me) { event_notifier_set(&ctx->notifier); atomic_mb_set(&ctx->notified, true); } }"} {"target": 0, "idx": 24320, "func": "static void xlnx_zynqmp_qspips_reset(DeviceState *d) { XlnxZynqMPQSPIPS *s = XLNX_ZYNQMP_QSPIPS(d); int i; xilinx_spips_reset(d); for (i = 0; i < XLNX_ZYNQMP_SPIPS_R_MAX; i++) { s->regs[i] = 0; } fifo8_reset(&s->rx_fifo_g); fifo8_reset(&s->rx_fifo_g); fifo32_reset(&s->fifo_g); s->regs[R_INTR_STATUS] = R_INTR_STATUS_RESET; s->regs[R_GPIO] = 1; s->regs[R_LPBK_DLY_ADJ] = R_LPBK_DLY_ADJ_RESET; s->regs[R_GQSPI_GFIFO_THRESH] = 0x10; s->regs[R_MOD_ID] = 0x01090101; s->regs[R_GQSPI_IMR] = R_GQSPI_IMR_RESET; s->regs[R_GQSPI_TX_THRESH] = 1; s->regs[R_GQSPI_RX_THRESH] = 1; s->regs[R_GQSPI_GPIO] = 1; s->regs[R_GQSPI_LPBK_DLY_ADJ] = R_GQSPI_LPBK_DLY_ADJ_RESET; s->regs[R_GQSPI_MOD_ID] = R_GQSPI_MOD_ID_RESET; s->regs[R_QSPIDMA_DST_CTRL] = R_QSPIDMA_DST_CTRL_RESET; s->regs[R_QSPIDMA_DST_I_MASK] = R_QSPIDMA_DST_I_MASK_RESET; s->regs[R_QSPIDMA_DST_CTRL2] = R_QSPIDMA_DST_CTRL2_RESET; s->man_start_com_g = false; s->gqspi_irqline = 0; xlnx_zynqmp_qspips_update_ixr(s); }"} {"target": 0, "idx": 24326, "func": "static void spapr_reset_htab(sPAPRMachineState *spapr) { long shift; int index; shift = kvmppc_reset_htab(spapr->htab_shift); if (shift < 0) { error_setg(&error_abort, \"Failed to reset HTAB\"); } else if (shift > 0) { if (shift != spapr->htab_shift) { error_setg(&error_abort, \"Requested HTAB allocation failed during reset\"); } /* Tell readers to update their file descriptor */ if (spapr->htab_fd >= 0) { spapr->htab_fd_stale = true; } } else { memset(spapr->htab, 0, HTAB_SIZE(spapr)); for (index = 0; index < HTAB_SIZE(spapr) / HASH_PTE_SIZE_64; index++) { DIRTY_HPTE(HPTE(spapr->htab, index)); } } /* Update the RMA size if necessary */ if (spapr->vrma_adjust) { spapr->rma_size = kvmppc_rma_size(spapr_node0_size(), spapr->htab_shift); } }"} {"target": 1, "idx": 24334, "func": "static void load_tco(const TestData *d) { qpci_io_writew(d->dev, d->tco_io_base + TCO_RLD, 4); }"} {"target": 1, "idx": 24336, "func": "target_ulong helper_srad(CPUPPCState *env, target_ulong value, target_ulong shift) { int64_t ret; if (likely(!(shift & 0x40))) { if (likely((uint64_t)shift != 0)) { shift &= 0x3f; ret = (int64_t)value >> shift; if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) { env->ca = 0; } else { env->ca = 1; } } else { ret = (int64_t)value; env->ca = 0; } } else { ret = (int64_t)value >> 63; env->ca = (ret != 0); } return ret; }"} {"target": 1, "idx": 24339, "func": "static target_ulong h_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr, target_ulong opcode, target_ulong *args) { CPUPPCState *env = &cpu->env; target_ulong flags = args[0]; target_ulong pte_index = args[1]; target_ulong avpn = args[2]; RemoveResult ret; ret = remove_hpte(cpu, pte_index, avpn, flags, &args[0], &args[1]); switch (ret) { case REMOVE_SUCCESS: check_tlb_flush(env); return H_SUCCESS; case REMOVE_NOT_FOUND: return H_NOT_FOUND; case REMOVE_PARM: return H_PARAMETER; case REMOVE_HW: return H_HARDWARE; } g_assert_not_reached(); }"} {"target": 1, "idx": 24340, "func": "static void qtrle_decode_16bpp(QtrleContext *s) { int stream_ptr; int header; int start_line; int lines_to_change; signed char rle_code; int row_ptr, pixel_ptr; int row_inc = s->frame.linesize[0]; unsigned short rgb16; unsigned char *rgb = s->frame.data[0]; int pixel_limit = s->frame.linesize[0] * s->avctx->height; /* check if this frame is even supposed to change */ if (s->size < 8) return; /* start after the chunk size */ stream_ptr = 4; /* fetch the header */ CHECK_STREAM_PTR(2); header = BE_16(&s->buf[stream_ptr]); stream_ptr += 2; /* if a header is present, fetch additional decoding parameters */ if (header & 0x0008) { CHECK_STREAM_PTR(8); start_line = BE_16(&s->buf[stream_ptr]); stream_ptr += 4; lines_to_change = BE_16(&s->buf[stream_ptr]); stream_ptr += 4; } else { start_line = 0; lines_to_change = s->avctx->height; } row_ptr = row_inc * start_line; while (lines_to_change--) { CHECK_STREAM_PTR(2); pixel_ptr = row_ptr + (s->buf[stream_ptr++] - 1) * 2; while ((rle_code = (signed char)s->buf[stream_ptr++]) != -1) { if (rle_code == 0) { /* there's another skip code in the stream */ CHECK_STREAM_PTR(1); pixel_ptr += (s->buf[stream_ptr++] - 1) * 2; CHECK_PIXEL_PTR(0); /* make sure pixel_ptr is positive */ } else if (rle_code < 0) { /* decode the run length code */ rle_code = -rle_code; CHECK_STREAM_PTR(2); rgb16 = BE_16(&s->buf[stream_ptr]); stream_ptr += 2; CHECK_PIXEL_PTR(rle_code * 2); while (rle_code--) { *(unsigned short *)(&rgb[pixel_ptr]) = rgb16; pixel_ptr += 2; } } else { CHECK_STREAM_PTR(rle_code * 2); CHECK_PIXEL_PTR(rle_code * 2); /* copy pixels directly to output */ while (rle_code--) { rgb16 = BE_16(&s->buf[stream_ptr]); stream_ptr += 2; *(unsigned short *)(&rgb[pixel_ptr]) = rgb16; pixel_ptr += 2; } } } row_ptr += row_inc; } }"} {"target": 1, "idx": 24379, "func": "static inline void RENAME(palToY)(uint8_t *dst, uint8_t *src, int width, uint32_t *pal) { int i; for(i=0; imac[r->dh] == core->mac[r->dt]; }"} {"target": 1, "idx": 24392, "func": "void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory) { int i, rc; xen_pfn_t ioreq_pfn; xen_pfn_t bufioreq_pfn; evtchn_port_t bufioreq_evtchn; XenIOState *state; state = g_malloc0(sizeof (XenIOState)); state->xce_handle = xenevtchn_open(NULL, 0); if (state->xce_handle == NULL) { perror(\"xen: event channel open\"); goto err; } state->xenstore = xs_daemon_open(); if (state->xenstore == NULL) { perror(\"xen: xenstore open\"); goto err; } rc = xen_create_ioreq_server(xen_xc, xen_domid, &state->ioservid); if (rc < 0) { perror(\"xen: ioreq server create\"); goto err; } state->exit.notify = xen_exit_notifier; qemu_add_exit_notifier(&state->exit); state->suspend.notify = xen_suspend_notifier; qemu_register_suspend_notifier(&state->suspend); state->wakeup.notify = xen_wakeup_notifier; qemu_register_wakeup_notifier(&state->wakeup); rc = xen_get_ioreq_server_info(xen_xc, xen_domid, state->ioservid, &ioreq_pfn, &bufioreq_pfn, &bufioreq_evtchn); if (rc < 0) { error_report(\"failed to get ioreq server info: error %d handle=\" XC_INTERFACE_FMT, errno, xen_xc); goto err; } DPRINTF(\"shared page at pfn %lx\\n\", ioreq_pfn); DPRINTF(\"buffered io page at pfn %lx\\n\", bufioreq_pfn); DPRINTF(\"buffered io evtchn is %x\\n\", bufioreq_evtchn); state->shared_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE, PROT_READ|PROT_WRITE, ioreq_pfn); if (state->shared_page == NULL) { error_report(\"map shared IO page returned error %d handle=\" XC_INTERFACE_FMT, errno, xen_xc); goto err; } rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn); if (!rc) { DPRINTF(\"shared vmport page at pfn %lx\\n\", ioreq_pfn); state->shared_vmport_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE, PROT_READ|PROT_WRITE, ioreq_pfn); if (state->shared_vmport_page == NULL) { error_report(\"map shared vmport IO page returned error %d handle=\" XC_INTERFACE_FMT, errno, xen_xc); goto err; } } else if (rc != -ENOSYS) { error_report(\"get vmport regs pfn returned error %d, rc=%d\", errno, rc); goto err; } state->buffered_io_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE, PROT_READ|PROT_WRITE, bufioreq_pfn); if (state->buffered_io_page == NULL) { error_report(\"map buffered IO page returned error %d\", errno); goto err; } /* Note: cpus is empty at this point in init */ state->cpu_by_vcpu_id = g_malloc0(max_cpus * sizeof(CPUState *)); rc = xen_set_ioreq_server_state(xen_xc, xen_domid, state->ioservid, true); if (rc < 0) { error_report(\"failed to enable ioreq server info: error %d handle=\" XC_INTERFACE_FMT, errno, xen_xc); goto err; } state->ioreq_local_port = g_malloc0(max_cpus * sizeof (evtchn_port_t)); /* FIXME: how about if we overflow the page here? */ for (i = 0; i < max_cpus; i++) { rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid, xen_vcpu_eport(state->shared_page, i)); if (rc == -1) { error_report(\"shared evtchn %d bind error %d\", i, errno); goto err; } state->ioreq_local_port[i] = rc; } rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid, bufioreq_evtchn); if (rc == -1) { error_report(\"buffered evtchn bind error %d\", errno); goto err; } state->bufioreq_local_port = rc; /* Init RAM management */ xen_map_cache_init(xen_phys_offset_to_gaddr, state); xen_ram_init(pcms, ram_size, ram_memory); qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state); state->memory_listener = xen_memory_listener; QLIST_INIT(&state->physmap); memory_listener_register(&state->memory_listener, &address_space_memory); state->log_for_dirtybit = NULL; state->io_listener = xen_io_listener; memory_listener_register(&state->io_listener, &address_space_io); state->device_listener = xen_device_listener; device_listener_register(&state->device_listener); /* Initialize backend core & drivers */ if (xen_be_init() != 0) { error_report(\"xen backend core setup failed\"); goto err; } xen_be_register(\"console\", &xen_console_ops); xen_be_register(\"vkbd\", &xen_kbdmouse_ops); xen_be_register(\"qdisk\", &xen_blkdev_ops); xen_read_physmap(state); return; err: error_report(\"xen hardware virtual machine initialisation failed\"); exit(1); }"} {"target": 0, "idx": 24398, "func": "void ff_dsputil_init_neon(DSPContext *c, AVCodecContext *avctx) { c->put_pixels_tab[0][0] = ff_put_pixels16_neon; c->put_pixels_tab[0][1] = ff_put_pixels16_x2_neon; c->put_pixels_tab[0][2] = ff_put_pixels16_y2_neon; c->put_pixels_tab[0][3] = ff_put_pixels16_xy2_neon; c->put_pixels_tab[1][0] = ff_put_pixels8_neon; c->put_pixels_tab[1][1] = ff_put_pixels8_x2_neon; c->put_pixels_tab[1][2] = ff_put_pixels8_y2_neon; c->put_pixels_tab[1][3] = ff_put_pixels8_xy2_neon; c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_neon; c->put_no_rnd_pixels_tab[0][1] = ff_put_pixels16_x2_no_rnd_neon; c->put_no_rnd_pixels_tab[0][2] = ff_put_pixels16_y2_no_rnd_neon; c->put_no_rnd_pixels_tab[0][3] = ff_put_pixels16_xy2_no_rnd_neon; c->put_no_rnd_pixels_tab[1][0] = ff_put_pixels8_neon; c->put_no_rnd_pixels_tab[1][1] = ff_put_pixels8_x2_no_rnd_neon; c->put_no_rnd_pixels_tab[1][2] = ff_put_pixels8_y2_no_rnd_neon; c->put_no_rnd_pixels_tab[1][3] = ff_put_pixels8_xy2_no_rnd_neon; c->avg_pixels_tab[0][0] = ff_avg_pixels16_neon; c->add_pixels_clamped = ff_add_pixels_clamped_neon; c->put_pixels_clamped = ff_put_pixels_clamped_neon; c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_neon; c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_neon; c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_neon; c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_neon; c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_neon; c->put_h264_qpel_pixels_tab[0][ 0] = ff_put_h264_qpel16_mc00_neon; c->put_h264_qpel_pixels_tab[0][ 1] = ff_put_h264_qpel16_mc10_neon; c->put_h264_qpel_pixels_tab[0][ 2] = ff_put_h264_qpel16_mc20_neon; c->put_h264_qpel_pixels_tab[0][ 3] = ff_put_h264_qpel16_mc30_neon; c->put_h264_qpel_pixels_tab[0][ 4] = ff_put_h264_qpel16_mc01_neon; c->put_h264_qpel_pixels_tab[0][ 5] = ff_put_h264_qpel16_mc11_neon; c->put_h264_qpel_pixels_tab[0][ 6] = ff_put_h264_qpel16_mc21_neon; c->put_h264_qpel_pixels_tab[0][ 7] = ff_put_h264_qpel16_mc31_neon; c->put_h264_qpel_pixels_tab[0][ 8] = ff_put_h264_qpel16_mc02_neon; c->put_h264_qpel_pixels_tab[0][ 9] = ff_put_h264_qpel16_mc12_neon; c->put_h264_qpel_pixels_tab[0][10] = ff_put_h264_qpel16_mc22_neon; c->put_h264_qpel_pixels_tab[0][11] = ff_put_h264_qpel16_mc32_neon; c->put_h264_qpel_pixels_tab[0][12] = ff_put_h264_qpel16_mc03_neon; c->put_h264_qpel_pixels_tab[0][13] = ff_put_h264_qpel16_mc13_neon; c->put_h264_qpel_pixels_tab[0][14] = ff_put_h264_qpel16_mc23_neon; c->put_h264_qpel_pixels_tab[0][15] = ff_put_h264_qpel16_mc33_neon; c->put_h264_qpel_pixels_tab[1][ 0] = ff_put_h264_qpel8_mc00_neon; c->put_h264_qpel_pixels_tab[1][ 1] = ff_put_h264_qpel8_mc10_neon; c->put_h264_qpel_pixels_tab[1][ 2] = ff_put_h264_qpel8_mc20_neon; c->put_h264_qpel_pixels_tab[1][ 3] = ff_put_h264_qpel8_mc30_neon; c->put_h264_qpel_pixels_tab[1][ 4] = ff_put_h264_qpel8_mc01_neon; c->put_h264_qpel_pixels_tab[1][ 5] = ff_put_h264_qpel8_mc11_neon; c->put_h264_qpel_pixels_tab[1][ 6] = ff_put_h264_qpel8_mc21_neon; c->put_h264_qpel_pixels_tab[1][ 7] = ff_put_h264_qpel8_mc31_neon; c->put_h264_qpel_pixels_tab[1][ 8] = ff_put_h264_qpel8_mc02_neon; c->put_h264_qpel_pixels_tab[1][ 9] = ff_put_h264_qpel8_mc12_neon; c->put_h264_qpel_pixels_tab[1][10] = ff_put_h264_qpel8_mc22_neon; c->put_h264_qpel_pixels_tab[1][11] = ff_put_h264_qpel8_mc32_neon; c->put_h264_qpel_pixels_tab[1][12] = ff_put_h264_qpel8_mc03_neon; c->put_h264_qpel_pixels_tab[1][13] = ff_put_h264_qpel8_mc13_neon; c->put_h264_qpel_pixels_tab[1][14] = ff_put_h264_qpel8_mc23_neon; c->put_h264_qpel_pixels_tab[1][15] = ff_put_h264_qpel8_mc33_neon; c->avg_h264_qpel_pixels_tab[0][ 0] = ff_avg_h264_qpel16_mc00_neon; c->h264_v_loop_filter_luma = ff_h264_v_loop_filter_luma_neon; c->h264_h_loop_filter_luma = ff_h264_h_loop_filter_luma_neon; c->h264_v_loop_filter_chroma = ff_h264_v_loop_filter_chroma_neon; c->h264_h_loop_filter_chroma = ff_h264_h_loop_filter_chroma_neon; c->weight_h264_pixels_tab[0] = ff_weight_h264_pixels_16x16_neon; c->weight_h264_pixels_tab[1] = ff_weight_h264_pixels_16x8_neon; c->weight_h264_pixels_tab[2] = ff_weight_h264_pixels_8x16_neon; c->weight_h264_pixels_tab[3] = ff_weight_h264_pixels_8x8_neon; c->weight_h264_pixels_tab[4] = ff_weight_h264_pixels_8x4_neon; c->weight_h264_pixels_tab[5] = ff_weight_h264_pixels_4x8_neon; c->weight_h264_pixels_tab[6] = ff_weight_h264_pixels_4x4_neon; c->weight_h264_pixels_tab[7] = ff_weight_h264_pixels_4x2_neon; c->biweight_h264_pixels_tab[0] = ff_biweight_h264_pixels_16x16_neon; c->biweight_h264_pixels_tab[1] = ff_biweight_h264_pixels_16x8_neon; c->biweight_h264_pixels_tab[2] = ff_biweight_h264_pixels_8x16_neon; c->biweight_h264_pixels_tab[3] = ff_biweight_h264_pixels_8x8_neon; c->biweight_h264_pixels_tab[4] = ff_biweight_h264_pixels_8x4_neon; c->biweight_h264_pixels_tab[5] = ff_biweight_h264_pixels_4x8_neon; c->biweight_h264_pixels_tab[6] = ff_biweight_h264_pixels_4x4_neon; c->biweight_h264_pixels_tab[7] = ff_biweight_h264_pixels_4x2_neon; c->h264_idct_add = ff_h264_idct_add_neon; c->h264_idct_dc_add = ff_h264_idct_dc_add_neon; c->h264_idct_add16 = ff_h264_idct_add16_neon; c->h264_idct_add16intra = ff_h264_idct_add16intra_neon; c->h264_idct_add8 = ff_h264_idct_add8_neon; if (CONFIG_VP3_DECODER || CONFIG_THEORA_DECODER) { c->vp3_v_loop_filter = ff_vp3_v_loop_filter_neon; c->vp3_h_loop_filter = ff_vp3_h_loop_filter_neon; } c->vector_fmul = ff_vector_fmul_neon; c->vector_fmul_window = ff_vector_fmul_window_neon; if (!(avctx->flags & CODEC_FLAG_BITEXACT)) { c->float_to_int16 = ff_float_to_int16_neon; c->float_to_int16_interleave = ff_float_to_int16_interleave_neon; } }"} {"target": 1, "idx": 24410, "func": "static void test_qemu_strtol_full_empty(void) { const char *str = \"\"; long res = 999L; int err; err = qemu_strtol(str, NULL, 0, &res); g_assert_cmpint(err, ==, 0); g_assert_cmpint(res, ==, 0); }"} {"target": 1, "idx": 24415, "func": "static void vga_draw_graphic(VGACommonState *s, int full_update) { DisplaySurface *surface = qemu_console_surface(s->con); int y1, y, update, linesize, y_start, double_scan, mask, depth; int width, height, shift_control, line_offset, bwidth, bits; ram_addr_t page0, page1; DirtyBitmapSnapshot *snap = NULL; int disp_width, multi_scan, multi_run; uint8_t *d; uint32_t v, addr1, addr; vga_draw_line_func *vga_draw_line = NULL; bool share_surface; pixman_format_code_t format; #ifdef HOST_WORDS_BIGENDIAN bool byteswap = !s->big_endian_fb; #else bool byteswap = s->big_endian_fb; #endif full_update |= update_basic_params(s); s->get_resolution(s, &width, &height); disp_width = width; shift_control = (s->gr[VGA_GFX_MODE] >> 5) & 3; double_scan = (s->cr[VGA_CRTC_MAX_SCAN] >> 7); if (shift_control != 1) { multi_scan = (((s->cr[VGA_CRTC_MAX_SCAN] & 0x1f) + 1) << double_scan) - 1; } else { /* in CGA modes, multi_scan is ignored */ /* XXX: is it correct ? */ multi_scan = double_scan; } multi_run = multi_scan; if (shift_control != s->shift_control || double_scan != s->double_scan) { full_update = 1; s->shift_control = shift_control; s->double_scan = double_scan; } if (shift_control == 0) { if (sr(s, VGA_SEQ_CLOCK_MODE) & 8) { disp_width <<= 1; } } else if (shift_control == 1) { if (sr(s, VGA_SEQ_CLOCK_MODE) & 8) { disp_width <<= 1; } } depth = s->get_bpp(s); /* * Check whether we can share the surface with the backend * or whether we need a shadow surface. We share native * endian surfaces for 15bpp and above and byteswapped * surfaces for 24bpp and above. */ format = qemu_default_pixman_format(depth, !byteswap); if (format) { share_surface = dpy_gfx_check_format(s->con, format) && !s->force_shadow; } else { share_surface = false; } if (s->line_offset != s->last_line_offset || disp_width != s->last_width || height != s->last_height || s->last_depth != depth || s->last_byteswap != byteswap || share_surface != is_buffer_shared(surface)) { if (share_surface) { surface = qemu_create_displaysurface_from(disp_width, height, format, s->line_offset, s->vram_ptr + (s->start_addr * 4)); dpy_gfx_replace_surface(s->con, surface); } else { qemu_console_resize(s->con, disp_width, height); surface = qemu_console_surface(s->con); } s->last_scr_width = disp_width; s->last_scr_height = height; s->last_width = disp_width; s->last_height = height; s->last_line_offset = s->line_offset; s->last_depth = depth; s->last_byteswap = byteswap; full_update = 1; } else if (is_buffer_shared(surface) && (full_update || surface_data(surface) != s->vram_ptr + (s->start_addr * 4))) { pixman_format_code_t format = qemu_default_pixman_format(depth, !byteswap); surface = qemu_create_displaysurface_from(disp_width, height, format, s->line_offset, s->vram_ptr + (s->start_addr * 4)); dpy_gfx_replace_surface(s->con, surface); } if (shift_control == 0) { full_update |= update_palette16(s); if (sr(s, VGA_SEQ_CLOCK_MODE) & 8) { v = VGA_DRAW_LINE4D2; } else { v = VGA_DRAW_LINE4; } bits = 4; } else if (shift_control == 1) { full_update |= update_palette16(s); if (sr(s, VGA_SEQ_CLOCK_MODE) & 8) { v = VGA_DRAW_LINE2D2; } else { v = VGA_DRAW_LINE2; } bits = 4; } else { switch(s->get_bpp(s)) { default: case 0: full_update |= update_palette256(s); v = VGA_DRAW_LINE8D2; bits = 4; break; case 8: full_update |= update_palette256(s); v = VGA_DRAW_LINE8; bits = 8; break; case 15: v = s->big_endian_fb ? VGA_DRAW_LINE15_BE : VGA_DRAW_LINE15_LE; bits = 16; break; case 16: v = s->big_endian_fb ? VGA_DRAW_LINE16_BE : VGA_DRAW_LINE16_LE; bits = 16; break; case 24: v = s->big_endian_fb ? VGA_DRAW_LINE24_BE : VGA_DRAW_LINE24_LE; bits = 24; break; case 32: v = s->big_endian_fb ? VGA_DRAW_LINE32_BE : VGA_DRAW_LINE32_LE; bits = 32; break; } } vga_draw_line = vga_draw_line_table[v]; if (!is_buffer_shared(surface) && s->cursor_invalidate) { s->cursor_invalidate(s); } line_offset = s->line_offset; #if 0 printf(\"w=%d h=%d v=%d line_offset=%d cr[0x09]=0x%02x cr[0x17]=0x%02x linecmp=%d sr[0x01]=0x%02x\\n\", width, height, v, line_offset, s->cr[9], s->cr[VGA_CRTC_MODE], s->line_compare, sr(s, VGA_SEQ_CLOCK_MODE)); #endif addr1 = (s->start_addr * 4); bwidth = DIV_ROUND_UP(width * bits, 8); y_start = -1; d = surface_data(surface); linesize = surface_stride(surface); y1 = 0; if (!full_update) { ram_addr_t region_start = addr1; ram_addr_t region_end = addr1 + line_offset * height; vga_sync_dirty_bitmap(s); if (s->line_compare < height) { /* split screen mode */ region_start = 0; } snap = memory_region_snapshot_and_clear_dirty(&s->vram, region_start, region_end - region_start, DIRTY_MEMORY_VGA); } for(y = 0; y < height; y++) { addr = addr1; if (!(s->cr[VGA_CRTC_MODE] & 1)) { int shift; /* CGA compatibility handling */ shift = 14 + ((s->cr[VGA_CRTC_MODE] >> 6) & 1); addr = (addr & ~(1 << shift)) | ((y1 & 1) << shift); } if (!(s->cr[VGA_CRTC_MODE] & 2)) { addr = (addr & ~0x8000) | ((y1 & 2) << 14); } update = full_update; page0 = addr; page1 = addr + bwidth - 1; if (full_update) { update = 1; } else { update = memory_region_snapshot_get_dirty(&s->vram, snap, page0, page1 - page0); } /* explicit invalidation for the hardware cursor (cirrus only) */ update |= vga_scanline_invalidated(s, y); if (update) { if (y_start < 0) y_start = y; if (!(is_buffer_shared(surface))) { vga_draw_line(s, d, s->vram_ptr + addr, width); if (s->cursor_draw_line) s->cursor_draw_line(s, d, y); } } else { if (y_start >= 0) { /* flush to display */ dpy_gfx_update(s->con, 0, y_start, disp_width, y - y_start); y_start = -1; } } if (!multi_run) { mask = (s->cr[VGA_CRTC_MODE] & 3) ^ 3; if ((y1 & mask) == mask) addr1 += line_offset; y1++; multi_run = multi_scan; } else { multi_run--; } /* line compare acts on the displayed lines */ if (y == s->line_compare) addr1 = 0; d += linesize; } if (y_start >= 0) { /* flush to display */ dpy_gfx_update(s->con, 0, y_start, disp_width, y - y_start); } g_free(snap); memset(s->invalidated_y_table, 0, sizeof(s->invalidated_y_table)); }"} {"target": 0, "idx": 24430, "func": "static int filter_frame(AVFilterLink *link, AVFrame *frame) { AVFilterContext *ctx = link->dst; AudioFIRContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; int ret = 0; av_audio_fifo_write(s->fifo[0], (void **)frame->extended_data, frame->nb_samples); if (s->pts == AV_NOPTS_VALUE) s->pts = frame->pts; av_frame_free(&frame); if (!s->have_coeffs && s->eof_coeffs) { ret = convert_coeffs(ctx); if (ret < 0) return ret; } if (s->have_coeffs) { while (av_audio_fifo_size(s->fifo[0]) >= s->part_size) { ret = fir_frame(s, outlink); if (ret < 0) break; } } return ret; }"} {"target": 0, "idx": 24442, "func": "static int dnxhd_encode_init(AVCodecContext *avctx) { DNXHDEncContext *ctx = avctx->priv_data; int i, index, bit_depth; switch (avctx->pix_fmt) { case AV_PIX_FMT_YUV422P: bit_depth = 8; break; case AV_PIX_FMT_YUV422P10: bit_depth = 10; break; default: av_log(avctx, AV_LOG_ERROR, \"pixel format is incompatible with DNxHD\\n\"); return -1; } ctx->cid = ff_dnxhd_find_cid(avctx, bit_depth); if (!ctx->cid) { av_log(avctx, AV_LOG_ERROR, \"video parameters incompatible with DNxHD\\n\"); return -1; } av_log(avctx, AV_LOG_DEBUG, \"cid %d\\n\", ctx->cid); index = ff_dnxhd_get_cid_table(ctx->cid); av_assert0(index >= 0); ctx->cid_table = &ff_dnxhd_cid_table[index]; ctx->m.avctx = avctx; ctx->m.mb_intra = 1; ctx->m.h263_aic = 1; avctx->bits_per_raw_sample = ctx->cid_table->bit_depth; ff_dct_common_init(&ctx->m); ff_dct_encode_init(&ctx->m); if (!ctx->m.dct_quantize) ctx->m.dct_quantize = ff_dct_quantize_c; if (ctx->cid_table->bit_depth == 10) { ctx->m.dct_quantize = dnxhd_10bit_dct_quantize; ctx->get_pixels_8x4_sym = dnxhd_10bit_get_pixels_8x4_sym; ctx->block_width_l2 = 4; } else { ctx->get_pixels_8x4_sym = dnxhd_8bit_get_pixels_8x4_sym; ctx->block_width_l2 = 3; } if (ARCH_X86) ff_dnxhdenc_init_x86(ctx); ctx->m.mb_height = (avctx->height + 15) / 16; ctx->m.mb_width = (avctx->width + 15) / 16; if (avctx->flags & CODEC_FLAG_INTERLACED_DCT) { ctx->interlaced = 1; ctx->m.mb_height /= 2; } ctx->m.mb_num = ctx->m.mb_height * ctx->m.mb_width; if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS) ctx->m.intra_quant_bias = avctx->intra_quant_bias; if (dnxhd_init_qmat(ctx, ctx->m.intra_quant_bias, 0) < 0) // XXX tune lbias/cbias return -1; // Avid Nitris hardware decoder requires a minimum amount of padding in the coding unit payload if (ctx->nitris_compat) ctx->min_padding = 1600; if (dnxhd_init_vlc(ctx) < 0) return -1; if (dnxhd_init_rc(ctx) < 0) return -1; FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->slice_size, ctx->m.mb_height*sizeof(uint32_t), fail); FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->slice_offs, ctx->m.mb_height*sizeof(uint32_t), fail); FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_bits, ctx->m.mb_num *sizeof(uint16_t), fail); FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_qscale, ctx->m.mb_num *sizeof(uint8_t), fail); ctx->frame.key_frame = 1; ctx->frame.pict_type = AV_PICTURE_TYPE_I; ctx->m.avctx->coded_frame = &ctx->frame; if (avctx->thread_count > MAX_THREADS) { av_log(avctx, AV_LOG_ERROR, \"too many threads\\n\"); return -1; } ctx->thread[0] = ctx; for (i = 1; i < avctx->thread_count; i++) { ctx->thread[i] = av_malloc(sizeof(DNXHDEncContext)); memcpy(ctx->thread[i], ctx, sizeof(DNXHDEncContext)); } return 0; fail: //for FF_ALLOCZ_OR_GOTO return -1; }"} {"target": 0, "idx": 24448, "func": "static int nbd_co_readv(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) { BDRVNBDState *s = bs->opaque; return nbd_client_session_co_readv(&s->client, sector_num, nb_sectors, qiov); }"} {"target": 0, "idx": 24469, "func": "static inline int msmpeg4_decode_block(MpegEncContext * s, DCTELEM * block, int n, int coded) { int level, i, last, run, run_diff; int dc_pred_dir; RLTable *rl; RL_VLC_ELEM *rl_vlc; const UINT8 *scan_table; int qmul, qadd; if (s->mb_intra) { qmul=1; qadd=0; /* DC coef */ set_stat(ST_DC); level = msmpeg4_decode_dc(s, n, &dc_pred_dir); #ifdef PRINT_MB { static int c; if(n==0) c=0; if(n==4) printf(\"%X\", c); c+= c +dc_pred_dir; } #endif if (level < 0){ fprintf(stderr, \"dc overflow- block: %d qscale: %d//\\n\", n, s->qscale); if(s->inter_intra_pred) level=0; else return -1; } if (n < 4) { rl = &rl_table[s->rl_table_index]; if(level > 256*s->y_dc_scale){ fprintf(stderr, \"dc overflow+ L qscale: %d//\\n\", s->qscale); if(!s->inter_intra_pred) return -1; } } else { rl = &rl_table[3 + s->rl_chroma_table_index]; if(level > 256*s->c_dc_scale){ fprintf(stderr, \"dc overflow+ C qscale: %d//\\n\", s->qscale); if(!s->inter_intra_pred) return -1; } } block[0] = level; run_diff = 0; i = 0; if (!coded) { goto not_coded; } if (s->ac_pred) { if (dc_pred_dir == 0) scan_table = s->intra_v_scantable; /* left */ else scan_table = s->intra_h_scantable; /* top */ } else { scan_table = s->intra_scantable; } set_stat(ST_INTRA_AC); rl_vlc= rl->rl_vlc[0]; } else { qmul = s->qscale << 1; qadd = (s->qscale - 1) | 1; i = -1; rl = &rl_table[3 + s->rl_table_index]; if(s->msmpeg4_version==2) run_diff = 0; else run_diff = 1; if (!coded) { s->block_last_index[n] = i; return 0; } scan_table = s->inter_scantable; set_stat(ST_INTER_AC); rl_vlc= rl->rl_vlc[s->qscale]; } { OPEN_READER(re, &s->gb); for(;;) { UPDATE_CACHE(re, &s->gb); GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2); if (level==0) { int cache; cache= GET_CACHE(re, &s->gb); /* escape */ if (s->msmpeg4_version==1 || (cache&0x80000000)==0) { if (s->msmpeg4_version==1 || (cache&0x40000000)==0) { /* third escape */ if(s->msmpeg4_version!=1) LAST_SKIP_BITS(re, &s->gb, 2); UPDATE_CACHE(re, &s->gb); if(s->msmpeg4_version<=3){ last= SHOW_UBITS(re, &s->gb, 1); SKIP_CACHE(re, &s->gb, 1); run= SHOW_UBITS(re, &s->gb, 6); SKIP_CACHE(re, &s->gb, 6); level= SHOW_SBITS(re, &s->gb, 8); LAST_SKIP_CACHE(re, &s->gb, 8); SKIP_COUNTER(re, &s->gb, 1+6+8); }else{ int sign; last= SHOW_UBITS(re, &s->gb, 1); SKIP_BITS(re, &s->gb, 1); if(!s->esc3_level_length){ int ll; //printf(\"ESC-3 %X at %d %d\\n\", show_bits(&s->gb, 24), s->mb_x, s->mb_y); if(s->qscale<8){ ll= SHOW_UBITS(re, &s->gb, 3); SKIP_BITS(re, &s->gb, 3); if(ll==0){ if(SHOW_UBITS(re, &s->gb, 1)) printf(\"cool a new vlc code ,contact the ffmpeg developers and upload the file\\n\"); SKIP_BITS(re, &s->gb, 1); ll=8; } }else{ ll=2; while(ll<8 && SHOW_UBITS(re, &s->gb, 1)==0){ ll++; SKIP_BITS(re, &s->gb, 1); } if(ll<8) SKIP_BITS(re, &s->gb, 1); } s->esc3_level_length= ll; s->esc3_run_length= SHOW_UBITS(re, &s->gb, 2) + 3; SKIP_BITS(re, &s->gb, 2); //printf(\"level length:%d, run length: %d\\n\", ll, s->esc3_run_length); UPDATE_CACHE(re, &s->gb); } run= SHOW_UBITS(re, &s->gb, s->esc3_run_length); SKIP_BITS(re, &s->gb, s->esc3_run_length); sign= SHOW_UBITS(re, &s->gb, 1); SKIP_BITS(re, &s->gb, 1); level= SHOW_UBITS(re, &s->gb, s->esc3_level_length); SKIP_BITS(re, &s->gb, s->esc3_level_length); if(sign) level= -level; } //printf(\"level: %d, run: %d at %d %d\\n\", level, run, s->mb_x, s->mb_y); #if 0 // waste of time / this will detect very few errors { const int abs_level= ABS(level); const int run1= run - rl->max_run[last][abs_level] - run_diff; if(abs_level<=MAX_LEVEL && run<=MAX_RUN){ if(abs_level <= rl->max_level[last][run]){ fprintf(stderr, \"illegal 3. esc, vlc encoding possible\\n\"); return DECODING_AC_LOST; } if(abs_level <= rl->max_level[last][run]*2){ fprintf(stderr, \"illegal 3. esc, esc 1 encoding possible\\n\"); return DECODING_AC_LOST; } if(run1>=0 && abs_level <= rl->max_level[last][run1]){ fprintf(stderr, \"illegal 3. esc, esc 2 encoding possible\\n\"); return DECODING_AC_LOST; } } } #endif //level = level * qmul + (level>0) * qadd - (level<=0) * qadd ; if (level>0) level= level * qmul + qadd; else level= level * qmul - qadd; #if 0 // waste of time too :( if(level>2048 || level<-2048){ fprintf(stderr, \"|level| overflow in 3. esc\\n\"); return DECODING_AC_LOST; } #endif i+= run + 1; if(last) i+=192; #ifdef ERROR_DETAILS if(run==66) fprintf(stderr, \"illegal vlc code in ESC3 level=%d\\n\", level); else if((i>62 && i<192) || i>192+63) fprintf(stderr, \"run overflow in ESC3 i=%d run=%d level=%d\\n\", i, run, level); #endif } else { /* second escape */ #if MIN_CACHE_BITS < 23 LAST_SKIP_BITS(re, &s->gb, 2); UPDATE_CACHE(re, &s->gb); #else SKIP_BITS(re, &s->gb, 2); #endif GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2); i+= run + rl->max_run[run>>7][level/qmul] + run_diff; //FIXME opt indexing level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); LAST_SKIP_BITS(re, &s->gb, 1); #ifdef ERROR_DETAILS if(run==66) fprintf(stderr, \"illegal vlc code in ESC2 level=%d\\n\", level); else if((i>62 && i<192) || i>192+63) fprintf(stderr, \"run overflow in ESC2 i=%d run=%d level=%d\\n\", i, run, level); #endif } } else { /* first escape */ #if MIN_CACHE_BITS < 22 LAST_SKIP_BITS(re, &s->gb, 1); UPDATE_CACHE(re, &s->gb); #else SKIP_BITS(re, &s->gb, 1); #endif GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2); i+= run; level = level + rl->max_level[run>>7][(run-1)&63] * qmul;//FIXME opt indexing level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); LAST_SKIP_BITS(re, &s->gb, 1); #ifdef ERROR_DETAILS if(run==66) fprintf(stderr, \"illegal vlc code in ESC1 level=%d\\n\", level); else if((i>62 && i<192) || i>192+63) fprintf(stderr, \"run overflow in ESC1 i=%d run=%d level=%d\\n\", i, run, level); #endif } } else { i+= run; level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); LAST_SKIP_BITS(re, &s->gb, 1); #ifdef ERROR_DETAILS if(run==66) fprintf(stderr, \"illegal vlc code level=%d\\n\", level); else if((i>62 && i<192) || i>192+63) fprintf(stderr, \"run overflow i=%d run=%d level=%d\\n\", i, run, level); #endif } if (i > 62){ i-= 192; if(i&(~63)){ if(i+192 == 64 && level/qmul==-1){ fprintf(stderr, \"ignoring overflow at %d %d\\n\", s->mb_x, s->mb_y); break; }else{ fprintf(stderr, \"ac-tex damaged at %d %d\\n\", s->mb_x, s->mb_y); return -1; } } block[scan_table[i]] = level; break; } block[scan_table[i]] = level; } CLOSE_READER(re, &s->gb); } not_coded: if (s->mb_intra) { mpeg4_pred_ac(s, block, n, dc_pred_dir); if (s->ac_pred) { i = 63; /* XXX: not optimal */ } } if(s->msmpeg4_version==4 && i>0) i=63; //FIXME/XXX optimize s->block_last_index[n] = i; return 0; }"} {"target": 1, "idx": 24471, "func": "static void revert_acfilter(WmallDecodeCtx *s, int tile_size) { int ich, pred, i, j; int16_t *filter_coeffs = s->acfilter_coeffs; int scaling = s->acfilter_scaling; int order = s->acfilter_order; for (ich = 0; ich < s->num_channels; ich++) { int *prevvalues = s->acfilter_prevvalues[ich]; for (i = 0; i < order; i++) { pred = 0; for (j = 0; j < order; j++) { if (i <= j) pred += filter_coeffs[j] * prevvalues[j - i]; else pred += s->channel_residues[ich][i - j - 1] * filter_coeffs[j]; } pred >>= scaling; s->channel_residues[ich][i] += pred; } for (i = order; i < tile_size; i++) { pred = 0; for (j = 0; j < order; j++) pred += s->channel_residues[ich][i - j - 1] * filter_coeffs[j]; pred >>= scaling; s->channel_residues[ich][i] += pred; } for (j = 0; j < order; j++) prevvalues[j] = s->channel_residues[ich][tile_size - j - 1]; } }"} {"target": 0, "idx": 24502, "func": "int bdrv_snapshot_list(BlockDriverState *bs, QEMUSnapshotInfo **psn_info) { BlockDriver *drv = bs->drv; if (!drv) return -ENOMEDIUM; if (drv->bdrv_snapshot_list) return drv->bdrv_snapshot_list(bs, psn_info); if (bs->file) return bdrv_snapshot_list(bs->file, psn_info); return -ENOTSUP; }"} {"target": 0, "idx": 24506, "func": "static int mp3_write_xing(AVFormatContext *s) { MP3Context *mp3 = s->priv_data; AVCodecContext *codec = s->streams[mp3->audio_stream_idx]->codec; AVDictionaryEntry *enc = av_dict_get(s->streams[mp3->audio_stream_idx]->metadata, \"encoder\", NULL, 0); AVIOContext *dyn_ctx; int32_t header; MPADecodeHeader mpah; int srate_idx, i, channels; int bitrate_idx; int best_bitrate_idx = -1; int best_bitrate_error = INT_MAX; int ret; int ver = 0; int bytes_needed; if (!s->pb->seekable || !mp3->write_xing) return 0; for (i = 0; i < FF_ARRAY_ELEMS(avpriv_mpa_freq_tab); i++) { const uint16_t base_freq = avpriv_mpa_freq_tab[i]; if (codec->sample_rate == base_freq) ver = 0x3; // MPEG 1 else if (codec->sample_rate == base_freq / 2) ver = 0x2; // MPEG 2 else if (codec->sample_rate == base_freq / 4) ver = 0x0; // MPEG 2.5 else continue; srate_idx = i; break; } if (i == FF_ARRAY_ELEMS(avpriv_mpa_freq_tab)) { av_log(s, AV_LOG_WARNING, \"Unsupported sample rate, not writing Xing header.\\n\"); return -1; } switch (codec->channels) { case 1: channels = MPA_MONO; break; case 2: channels = MPA_STEREO; break; default: av_log(s, AV_LOG_WARNING, \"Unsupported number of channels, \" \"not writing Xing header.\\n\"); return -1; } /* dummy MPEG audio header */ header = 0xffU << 24; // sync header |= (0x7 << 5 | ver << 3 | 0x1 << 1 | 0x1) << 16; // sync/audio-version/layer 3/no crc*/ header |= (srate_idx << 2) << 8; header |= channels << 6; for (bitrate_idx = 1; bitrate_idx < 15; bitrate_idx++) { int bit_rate = 1000 * avpriv_mpa_bitrate_tab[ver != 3][3 - 1][bitrate_idx]; int error = FFABS(bit_rate - codec->bit_rate); if (error < best_bitrate_error) { best_bitrate_error = error; best_bitrate_idx = bitrate_idx; } } av_assert0(best_bitrate_idx >= 0); for (bitrate_idx = best_bitrate_idx; ; bitrate_idx++) { int32_t mask = bitrate_idx << (4 + 8); if (15 == bitrate_idx) return -1; header |= mask; avpriv_mpegaudio_decode_header(&mpah, header); mp3->xing_offset = xing_offtbl[mpah.lsf == 1][mpah.nb_channels == 1] + 4; bytes_needed = mp3->xing_offset + XING_SIZE; if (bytes_needed <= mpah.frame_size) break; header &= ~mask; } ret = avio_open_dyn_buf(&dyn_ctx); if (ret < 0) return ret; avio_wb32(dyn_ctx, header); ffio_fill(dyn_ctx, 0, mp3->xing_offset - 4); ffio_wfourcc(dyn_ctx, \"Xing\"); avio_wb32(dyn_ctx, 0x01 | 0x02 | 0x04 | 0x08); // frames / size / TOC / vbr scale mp3->size = mpah.frame_size; mp3->want=1; mp3->seen=0; mp3->pos=0; avio_wb32(dyn_ctx, 0); // frames avio_wb32(dyn_ctx, 0); // size // TOC for (i = 0; i < XING_TOC_SIZE; i++) avio_w8(dyn_ctx, (uint8_t)(255 * i / XING_TOC_SIZE)); // vbr quality // we write it, because some (broken) tools always expect it to be present avio_wb32(dyn_ctx, 0); // encoder short version string if (enc) { uint8_t encoder_str[9] = { 0 }; if ( strlen(enc->value) > sizeof(encoder_str) && !strcmp(\"Lavc libmp3lame\", enc->value)) { memcpy(encoder_str, \"Lavf lame\", 9); } else memcpy(encoder_str, enc->value, FFMIN(strlen(enc->value), sizeof(encoder_str))); avio_write(dyn_ctx, encoder_str, sizeof(encoder_str)); } else avio_write(dyn_ctx, \"Lavf\\0\\0\\0\\0\\0\", 9); avio_w8(dyn_ctx, 0); // tag revision 0 / unknown vbr method avio_w8(dyn_ctx, 0); // unknown lowpass filter value ffio_fill(dyn_ctx, 0, 8); // empty replaygain fields avio_w8(dyn_ctx, 0); // unknown encoding flags avio_w8(dyn_ctx, 0); // unknown abr/minimal bitrate // encoder delay if (codec->initial_padding - 528 - 1 >= 1 << 12) { av_log(s, AV_LOG_WARNING, \"Too many samples of initial padding.\\n\"); } avio_wb24(dyn_ctx, FFMAX(codec->initial_padding - 528 - 1, 0)<<12); avio_w8(dyn_ctx, 0); // misc avio_w8(dyn_ctx, 0); // mp3gain avio_wb16(dyn_ctx, 0); // preset // audio length and CRCs (will be updated later) avio_wb32(dyn_ctx, 0); // music length avio_wb16(dyn_ctx, 0); // music crc avio_wb16(dyn_ctx, 0); // tag crc ffio_fill(dyn_ctx, 0, mpah.frame_size - bytes_needed); mp3->xing_frame_size = avio_close_dyn_buf(dyn_ctx, &mp3->xing_frame); mp3->xing_frame_offset = avio_tell(s->pb); avio_write(s->pb, mp3->xing_frame, mp3->xing_frame_size); mp3->audio_size = mp3->xing_frame_size; return 0; }"} {"target": 0, "idx": 24511, "func": "static void dec_store(DisasContext *dc) { TCGv t, *addr; unsigned int size; size = 1 << (dc->opcode & 3); if (size > 4 && (dc->tb_flags & MSR_EE_FLAG) && !(dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) { tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP); t_gen_raise_exception(dc, EXCP_HW_EXCP); return; } LOG_DIS(\"s%d%s\\n\", size, dc->type_b ? \"i\" : \"\"); t_sync_flags(dc); /* If we get a fault on a dslot, the jmpstate better be in sync. */ sync_jmpstate(dc); addr = compute_ldst_addr(dc, &t); /* Verify alignment if needed. */ if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) { gen_helper_memalign(*addr, tcg_const_tl(dc->rd), tcg_const_tl(1), tcg_const_tl(size - 1)); } gen_store(dc, *addr, cpu_R[dc->rd], size); if (addr == &t) tcg_temp_free(t); }"} {"target": 0, "idx": 24514, "func": "static void qpci_spapr_io_writew(QPCIBus *bus, void *addr, uint16_t value) { QPCIBusSPAPR *s = container_of(bus, QPCIBusSPAPR, bus); uint64_t port = (uintptr_t)addr; value = bswap16(value); if (port < s->pio.size) { writew(s->pio_cpu_base + port, value); } else { writew(s->mmio_cpu_base + port, value); } }"} {"target": 1, "idx": 24524, "func": "static void dwt_decode97_int(DWTContext *s, int32_t *t) { int lev; int w = s->linelen[s->ndeclevels - 1][0]; int h = s->linelen[s->ndeclevels - 1][1]; int i; int32_t *line = s->i_linebuf; int32_t *data = t; /* position at index O of line range [0-5,w+5] cf. extend function */ line += 5; for (i = 0; i < w * h; i++) data[i] *= 1 << I_PRESHIFT; for (lev = 0; lev < s->ndeclevels; lev++) { int lh = s->linelen[lev][0], lv = s->linelen[lev][1], mh = s->mod[lev][0], mv = s->mod[lev][1], lp; int32_t *l; // HOR_SD l = line + mh; for (lp = 0; lp < lv; lp++) { int i, j = 0; // rescale with interleaving for (i = mh; i < lh; i += 2, j++) l[i] = ((data[w * lp + j] * I_LFTG_K) + (1 << 15)) >> 16; for (i = 1 - mh; i < lh; i += 2, j++) l[i] = data[w * lp + j]; sr_1d97_int(line, mh, mh + lh); for (i = 0; i < lh; i++) data[w * lp + i] = l[i]; } // VER_SD l = line + mv; for (lp = 0; lp < lh; lp++) { int i, j = 0; // rescale with interleaving for (i = mv; i < lv; i += 2, j++) l[i] = ((data[w * j + lp] * I_LFTG_K) + (1 << 15)) >> 16; for (i = 1 - mv; i < lv; i += 2, j++) l[i] = data[w * j + lp]; sr_1d97_int(line, mv, mv + lv); for (i = 0; i < lv; i++) data[w * i + lp] = l[i]; } } for (i = 0; i < w * h; i++) data[i] = (data[i] + ((1<>1)) >> I_PRESHIFT; }"} {"target": 1, "idx": 24533, "func": "int kvm_arch_get_registers(CPUState *env) { struct kvm_regs regs; struct kvm_sregs sregs; uint32_t cr; int i, ret; ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s); if (ret < 0) return ret; cr = regs.cr; for (i = 7; i >= 0; i--) { env->crf[i] = cr & 15; cr >>= 4; } env->ctr = regs.ctr; env->lr = regs.lr; env->xer = regs.xer; env->msr = regs.msr; env->nip = regs.pc; env->spr[SPR_SRR0] = regs.srr0; env->spr[SPR_SRR1] = regs.srr1; env->spr[SPR_SPRG0] = regs.sprg0; env->spr[SPR_SPRG1] = regs.sprg1; env->spr[SPR_SPRG2] = regs.sprg2; env->spr[SPR_SPRG3] = regs.sprg3; env->spr[SPR_SPRG4] = regs.sprg4; env->spr[SPR_SPRG5] = regs.sprg5; env->spr[SPR_SPRG6] = regs.sprg6; env->spr[SPR_SPRG7] = regs.sprg7; env->spr[SPR_BOOKE_PID] = regs.pid; for (i = 0;i < 32; i++) env->gpr[i] = regs.gpr[i]; #ifdef KVM_CAP_PPC_BOOKE_SREGS if (cap_booke_sregs) { ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs); if (ret < 0) { return ret; } if (sregs.u.e.features & KVM_SREGS_E_BASE) { env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0; env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1; env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr; env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear; env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr; env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr; env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr; env->spr[SPR_DECR] = sregs.u.e.dec; env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff; env->spr[SPR_TBU] = sregs.u.e.tb >> 32; env->spr[SPR_VRSAVE] = sregs.u.e.vrsave; } if (sregs.u.e.features & KVM_SREGS_E_ARCH206) { env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir; env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0; env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1; env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar; env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr; } if (sregs.u.e.features & KVM_SREGS_E_64) { env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr; } if (sregs.u.e.features & KVM_SREGS_E_SPRG8) { env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8; } if (sregs.u.e.features & KVM_SREGS_E_IVOR) { env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0]; env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1]; env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2]; env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3]; env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4]; env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5]; env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6]; env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7]; env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8]; env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9]; env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10]; env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11]; env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12]; env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13]; env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14]; env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15]; if (sregs.u.e.features & KVM_SREGS_E_SPE) { env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0]; env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1]; env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2]; } if (sregs.u.e.features & KVM_SREGS_E_PM) { env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3]; } if (sregs.u.e.features & KVM_SREGS_E_PC) { env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4]; env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5]; } } if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) { env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0; env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1; env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2; env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff; env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4; env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6; env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32; env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg; env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0]; env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1]; } if (sregs.u.e.features & KVM_SREGS_EXP) { env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr; } if (sregs.u.e.features & KVM_SREGS_E_PD) { env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc; env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc; } if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) { env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr; env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar; env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0; if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) { env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1; env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2; } } } #endif #ifdef KVM_CAP_PPC_SEGSTATE if (cap_segstate) { ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs); if (ret < 0) { return ret; } ppc_store_sdr1(env, sregs.u.s.sdr1); /* Sync SLB */ #ifdef TARGET_PPC64 for (i = 0; i < 64; i++) { ppc_store_slb(env, sregs.u.s.ppc64.slb[i].slbe, sregs.u.s.ppc64.slb[i].slbv); } #endif /* Sync SRs */ for (i = 0; i < 16; i++) { env->sr[i] = sregs.u.s.ppc32.sr[i]; } /* Sync BATs */ for (i = 0; i < 8; i++) { env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff; env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32; env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff; env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32; } } #endif return 0; }"} {"target": 1, "idx": 24543, "func": "static int cdrom_probe_device(const char *filename) { if (strstart(filename, \"/dev/cd\", NULL)) return 100; return 0; }"} {"target": 1, "idx": 24544, "func": "static int qio_channel_socket_listen_worker(QIOTask *task, Error **errp, gpointer opaque) { QIOChannelSocket *ioc = QIO_CHANNEL_SOCKET(qio_task_get_source(task)); SocketAddress *addr = opaque; int ret; ret = qio_channel_socket_listen_sync(ioc, addr, errp); object_unref(OBJECT(ioc)); return ret; }"} {"target": 0, "idx": 24545, "func": "static int get_http_header_data(MMSHContext *mmsh) { MMSContext *mms = &mmsh->mms; int res, len; ChunkType chunk_type; for (;;) { len = 0; res = chunk_type = get_chunk_header(mmsh, &len); if (res < 0) { return res; } else if (chunk_type == CHUNK_TYPE_ASF_HEADER){ // get asf header and stored it if (!mms->header_parsed) { if (mms->asf_header) { if (len != mms->asf_header_size) { mms->asf_header_size = len; av_dlog(NULL, \"Header len changed from %d to %d\\n\", mms->asf_header_size, len); av_freep(&mms->asf_header); } } mms->asf_header = av_mallocz(len); if (!mms->asf_header) { return AVERROR(ENOMEM); } mms->asf_header_size = len; } if (len > mms->asf_header_size) { av_log(NULL, AV_LOG_ERROR, \"Asf header packet len = %d exceed the asf header buf size %d\\n\", len, mms->asf_header_size); return AVERROR(EIO); } res = ffurl_read_complete(mms->mms_hd, mms->asf_header, len); if (res != len) { av_log(NULL, AV_LOG_ERROR, \"Recv asf header data len %d != expected len %d\\n\", res, len); return AVERROR(EIO); } mms->asf_header_size = len; if (!mms->header_parsed) { res = ff_mms_asf_header_parser(mms); mms->header_parsed = 1; return res; } } else if (chunk_type == CHUNK_TYPE_DATA) { // read data packet and do padding return read_data_packet(mmsh, len); } else { if (len) { if (len > sizeof(mms->in_buffer)) { av_log(NULL, AV_LOG_ERROR, \"Other packet len = %d exceed the in_buffer size %zu\\n\", len, sizeof(mms->in_buffer)); return AVERROR(EIO); } res = ffurl_read_complete(mms->mms_hd, mms->in_buffer, len); if (res != len) { av_log(NULL, AV_LOG_ERROR, \"Read other chunk type data failed!\\n\"); return AVERROR(EIO); } else { av_dlog(NULL, \"Skip chunk type %d \\n\", chunk_type); continue; } } } } return 0; }"} {"target": 1, "idx": 24547, "func": "static USBPort *xhci_lookup_uport(XHCIState *xhci, uint32_t *slot_ctx) { USBPort *uport; char path[32]; int i, pos, port; port = (slot_ctx[1]>>16) & 0xFF; port = xhci->ports[port-1].uport->index+1; pos = snprintf(path, sizeof(path), \"%d\", port); for (i = 0; i < 5; i++) { port = (slot_ctx[0] >> 4*i) & 0x0f; if (!port) { break; pos += snprintf(path + pos, sizeof(path) - pos, \".%d\", port); QTAILQ_FOREACH(uport, &xhci->bus.used, next) { if (strcmp(uport->path, path) == 0) { return uport;"} {"target": 1, "idx": 24548, "func": "static void io_watch_poll_finalize(GSource *source) { IOWatchPoll *iwp = io_watch_poll_from_source(source); if (iwp->src) { g_source_destroy(iwp->src); g_source_unref(iwp->src); iwp->src = NULL; } }"} {"target": 1, "idx": 24558, "func": "static int pnm_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; PNMContext * const s = avctx->priv_data; AVFrame *picture = data; AVFrame * const p = (AVFrame*)&s->picture; int i, j, n, linesize, h, upgrade = 0; unsigned char *ptr; int components, sample_len; s->bytestream_start = s->bytestream = buf; s->bytestream_end = buf + buf_size; if (ff_pnm_decode_header(avctx, s) < 0) return -1; if (p->data[0]) avctx->release_buffer(avctx, p); p->reference = 0; if (avctx->get_buffer(avctx, p) < 0) { av_log(avctx, AV_LOG_ERROR, \"get_buffer() failed\\n\"); return -1; } p->pict_type = AV_PICTURE_TYPE_I; p->key_frame = 1; switch (avctx->pix_fmt) { default: return -1; case PIX_FMT_RGB48BE: n = avctx->width * 6; components=3; sample_len=16; goto do_read; case PIX_FMT_RGB24: n = avctx->width * 3; components=3; sample_len=8; goto do_read; case PIX_FMT_GRAY8: n = avctx->width; components=1; sample_len=8; if (s->maxval < 255) upgrade = 1; goto do_read; case PIX_FMT_GRAY16BE: case PIX_FMT_GRAY16LE: n = avctx->width * 2; components=1; sample_len=16; if (s->maxval < 65535) upgrade = 2; goto do_read; case PIX_FMT_MONOWHITE: case PIX_FMT_MONOBLACK: n = (avctx->width + 7) >> 3; components=1; sample_len=1; do_read: ptr = p->data[0]; linesize = p->linesize[0]; if (s->bytestream + n * avctx->height > s->bytestream_end) return -1; if(s->type < 4){ for (i=0; iheight; i++) { PutBitContext pb; init_put_bits(&pb, ptr, linesize); for(j=0; jwidth * components; j++){ unsigned int c=0; int v=0; while(s->bytestream < s->bytestream_end && (*s->bytestream < '0' || *s->bytestream > '9' )) s->bytestream++; if(s->bytestream >= s->bytestream_end) return -1; do{ v= 10*v + c; c= (*s->bytestream++) - '0'; }while(c <= 9); put_bits(&pb, sample_len, (((1<maxval>>1))/s->maxval); } flush_put_bits(&pb); ptr+= linesize; } }else{ for (i = 0; i < avctx->height; i++) { if (!upgrade) memcpy(ptr, s->bytestream, n); else if (upgrade == 1) { unsigned int j, f = (255 * 128 + s->maxval / 2) / s->maxval; for (j = 0; j < n; j++) ptr[j] = (s->bytestream[j] * f + 64) >> 7; } else if (upgrade == 2) { unsigned int j, v, f = (65535 * 32768 + s->maxval / 2) / s->maxval; for (j = 0; j < n / 2; j++) { v = av_be2ne16(((uint16_t *)s->bytestream)[j]); ((uint16_t *)ptr)[j] = (v * f + 16384) >> 15; } } s->bytestream += n; ptr += linesize; } } break; case PIX_FMT_YUV420P: { unsigned char *ptr1, *ptr2; n = avctx->width; ptr = p->data[0]; linesize = p->linesize[0]; if (s->bytestream + n * avctx->height * 3 / 2 > s->bytestream_end) return -1; for (i = 0; i < avctx->height; i++) { memcpy(ptr, s->bytestream, n); s->bytestream += n; ptr += linesize; } ptr1 = p->data[1]; ptr2 = p->data[2]; n >>= 1; h = avctx->height >> 1; for (i = 0; i < h; i++) { memcpy(ptr1, s->bytestream, n); s->bytestream += n; memcpy(ptr2, s->bytestream, n); s->bytestream += n; ptr1 += p->linesize[1]; ptr2 += p->linesize[2]; } } break; case PIX_FMT_RGB32: ptr = p->data[0]; linesize = p->linesize[0]; if (s->bytestream + avctx->width * avctx->height * 4 > s->bytestream_end) return -1; for (i = 0; i < avctx->height; i++) { int j, r, g, b, a; for (j = 0; j < avctx->width; j++) { r = *s->bytestream++; g = *s->bytestream++; b = *s->bytestream++; a = *s->bytestream++; ((uint32_t *)ptr)[j] = (a << 24) | (r << 16) | (g << 8) | b; } ptr += linesize; } break; } *picture = *(AVFrame*)&s->picture; *data_size = sizeof(AVPicture); return s->bytestream - s->bytestream_start; }"} {"target": 1, "idx": 24570, "func": "static int estimate_best_b_count(MpegEncContext *s) { AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id); AVCodecContext *c = avcodec_alloc_context3(NULL); const int scale = s->avctx->brd_scale; int i, j, out_size, p_lambda, b_lambda, lambda2; int64_t best_rd = INT64_MAX; int best_b_count = -1; assert(scale >= 0 && scale <= 3); //emms_c(); //s->next_picture_ptr->quality; p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P]; //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset; b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B]; if (!b_lambda) // FIXME we should do this somewhere else b_lambda = p_lambda; lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >> FF_LAMBDA_SHIFT; c->width = s->width >> scale; c->height = s->height >> scale; c->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR | CODEC_FLAG_INPUT_PRESERVED; c->flags |= s->avctx->flags & CODEC_FLAG_QPEL; c->mb_decision = s->avctx->mb_decision; c->me_cmp = s->avctx->me_cmp; c->mb_cmp = s->avctx->mb_cmp; c->me_sub_cmp = s->avctx->me_sub_cmp; c->pix_fmt = AV_PIX_FMT_YUV420P; c->time_base = s->avctx->time_base; c->max_b_frames = s->max_b_frames; if (avcodec_open2(c, codec, NULL) < 0) return -1; for (i = 0; i < s->max_b_frames + 2; i++) { Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] : s->next_picture_ptr; if (pre_input_ptr && (!i || s->input_picture[i - 1])) { pre_input = *pre_input_ptr; if (!pre_input.shared && i) { pre_input.f.data[0] += INPLACE_OFFSET; pre_input.f.data[1] += INPLACE_OFFSET; pre_input.f.data[2] += INPLACE_OFFSET; } s->dsp.shrink[scale](s->tmp_frames[i]->data[0], s->tmp_frames[i]->linesize[0], pre_input.f.data[0], pre_input.f.linesize[0], c->width, c->height); s->dsp.shrink[scale](s->tmp_frames[i]->data[1], s->tmp_frames[i]->linesize[1], pre_input.f.data[1], pre_input.f.linesize[1], c->width >> 1, c->height >> 1); s->dsp.shrink[scale](s->tmp_frames[i]->data[2], s->tmp_frames[i]->linesize[2], pre_input.f.data[2], pre_input.f.linesize[2], c->width >> 1, c->height >> 1); } } for (j = 0; j < s->max_b_frames + 1; j++) { int64_t rd = 0; if (!s->input_picture[j]) break; c->error[0] = c->error[1] = c->error[2] = 0; s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I; s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA; out_size = encode_frame(c, s->tmp_frames[0]); //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT; for (i = 0; i < s->max_b_frames + 1; i++) { int is_p = i % (j + 1) == j || i == s->max_b_frames; s->tmp_frames[i + 1]->pict_type = is_p ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B; s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda; out_size = encode_frame(c, s->tmp_frames[i + 1]); rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3); } /* get the delayed frames */ while (out_size) { out_size = encode_frame(c, NULL); rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3); } rd += c->error[0] + c->error[1] + c->error[2]; if (rd < best_rd) { best_rd = rd; best_b_count = j; } } avcodec_close(c); av_freep(&c); return best_b_count; }"} {"target": 0, "idx": 24575, "func": "static int str_read_packet(AVFormatContext *s, AVPacket *ret_pkt) { ByteIOContext *pb = s->pb; StrDemuxContext *str = s->priv_data; unsigned char sector[RAW_CD_SECTOR_SIZE]; int channel; AVPacket *pkt; AVStream *st; while (1) { if (get_buffer(pb, sector, RAW_CD_SECTOR_SIZE) != RAW_CD_SECTOR_SIZE) return AVERROR(EIO); channel = sector[0x11]; if (channel >= 32) return AVERROR_INVALIDDATA; switch (sector[0x12] & CDXA_TYPE_MASK) { case CDXA_TYPE_DATA: case CDXA_TYPE_VIDEO: { int current_sector = AV_RL16(§or[0x1C]); int sector_count = AV_RL16(§or[0x1E]); int frame_size = AV_RL32(§or[0x24]); if(!( frame_size>=0 && current_sector < sector_count && sector_count*VIDEO_DATA_CHUNK_SIZE >=frame_size)){ av_log(s, AV_LOG_ERROR, \"Invalid parameters %d %d %d\\n\", current_sector, sector_count, frame_size); return AVERROR_INVALIDDATA; } if(str->channels[channel].video_stream_index < 0){ /* allocate a new AVStream */ st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); av_set_pts_info(st, 64, 1, 15); str->channels[channel].video_stream_index = st->index; st->codec->codec_type = CODEC_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_MDEC; st->codec->codec_tag = 0; /* no fourcc */ st->codec->width = AV_RL16(§or[0x28]); st->codec->height = AV_RL16(§or[0x2A]); } /* if this is the first sector of the frame, allocate a pkt */ pkt = &str->channels[channel].tmp_pkt; if(pkt->size != sector_count*VIDEO_DATA_CHUNK_SIZE){ if(pkt->data) av_log(s, AV_LOG_ERROR, \"missmatching sector_count\\n\"); av_free_packet(pkt); if (av_new_packet(pkt, sector_count*VIDEO_DATA_CHUNK_SIZE)) return AVERROR(EIO); pkt->pos= url_ftell(pb) - RAW_CD_SECTOR_SIZE; pkt->stream_index = str->channels[channel].video_stream_index; } memcpy(pkt->data + current_sector*VIDEO_DATA_CHUNK_SIZE, sector + VIDEO_DATA_HEADER_SIZE, VIDEO_DATA_CHUNK_SIZE); if (current_sector == sector_count-1) { pkt->size= frame_size; *ret_pkt = *pkt; pkt->data= NULL; pkt->size= -1; return 0; } } break; case CDXA_TYPE_AUDIO: if(str->channels[channel].audio_stream_index < 0){ int fmt = sector[0x13]; /* allocate a new AVStream */ st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); str->channels[channel].audio_stream_index = st->index; st->codec->codec_type = CODEC_TYPE_AUDIO; st->codec->codec_id = CODEC_ID_ADPCM_XA; st->codec->codec_tag = 0; /* no fourcc */ st->codec->channels = (fmt&1)?2:1; st->codec->sample_rate = (fmt&4)?18900:37800; // st->codec->bit_rate = 0; //FIXME; st->codec->block_align = 128; av_set_pts_info(st, 64, 128, st->codec->sample_rate); } pkt = ret_pkt; if (av_new_packet(pkt, 2304)) return AVERROR(EIO); memcpy(pkt->data,sector+24,2304); pkt->stream_index = str->channels[channel].audio_stream_index; return 0; break; default: /* drop the sector and move on */ break; } if (url_feof(pb)) return AVERROR(EIO); } }"} {"target": 0, "idx": 24587, "func": "static inline void RENAME(rgb32tobgr24)(const uint8_t *src, uint8_t *dst, long src_size) { uint8_t *dest = dst; const uint8_t *s = src; const uint8_t *end; #if COMPILE_TEMPLATE_MMX const uint8_t *mm_end; #endif end = s + src_size; #if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH\" %0\"::\"m\"(*s):\"memory\"); mm_end = end - 31; while (s < mm_end) { __asm__ volatile( PREFETCH\" 32%1 \\n\\t\" \"movq %1, %%mm0 \\n\\t\" \"movq 8%1, %%mm1 \\n\\t\" \"movq 16%1, %%mm4 \\n\\t\" \"movq 24%1, %%mm5 \\n\\t\" \"movq %%mm0, %%mm2 \\n\\t\" \"movq %%mm1, %%mm3 \\n\\t\" \"movq %%mm4, %%mm6 \\n\\t\" \"movq %%mm5, %%mm7 \\n\\t\" STORE_BGR24_MMX :\"=m\"(*dest) :\"m\"(*s) :\"memory\"); dest += 24; s += 32; } __asm__ volatile(SFENCE:::\"memory\"); __asm__ volatile(EMMS:::\"memory\"); #endif while (s < end) { #if HAVE_BIGENDIAN /* RGB32 (= A,B,G,R) -> RGB24 (= R,G,B) */ s++; dest[2] = *s++; dest[1] = *s++; dest[0] = *s++; dest += 3; #else *dest++ = *s++; *dest++ = *s++; *dest++ = *s++; s++; #endif } }"} {"target": 0, "idx": 24597, "func": "static void bmdma_addr_write(void *opaque, target_phys_addr_t addr, uint64_t data, unsigned width) { BMDMAState *bm = opaque; int shift = addr * 8; uint32_t mask = (1ULL << (width * 8)) - 1; #ifdef DEBUG_IDE printf(\"%s: 0x%08x\\n\", __func__, (unsigned)data); #endif bm->addr &= ~(mask << shift); bm->addr |= ((data & mask) << shift) & ~3; }"} {"target": 0, "idx": 24600, "func": "int qemu_chr_fe_add_watch(CharDriverState *s, GIOCondition cond, GIOFunc func, void *user_data) { GSource *src; guint tag; if (s->chr_add_watch == NULL) { return -ENOSYS; } src = s->chr_add_watch(s, cond); if (!src) { return -EINVAL; } g_source_set_callback(src, (GSourceFunc)func, user_data, NULL); tag = g_source_attach(src, NULL); g_source_unref(src); return tag; }"} {"target": 0, "idx": 24602, "func": "uint64_t ldq_phys(target_phys_addr_t addr) { return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN); }"} {"target": 1, "idx": 24611, "func": "static ssize_t mp_dacl_getxattr(FsContext *ctx, const char *path, const char *name, void *value, size_t size) { char *buffer; ssize_t ret; buffer = rpath(ctx, path); ret = lgetxattr(buffer, MAP_ACL_DEFAULT, value, size); g_free(buffer); return ret; }"} {"target": 1, "idx": 24612, "func": "static void search_for_ms_mips(AACEncContext *s, ChannelElement *cpe) { int start = 0, i, w, w2, g; float M[128], S[128]; float *L34 = s->scoefs, *R34 = s->scoefs + 128, *M34 = s->scoefs + 128*2, *S34 = s->scoefs + 128*3; const float lambda = s->lambda; SingleChannelElement *sce0 = &cpe->ch[0]; SingleChannelElement *sce1 = &cpe->ch[1]; if (!cpe->common_window) return; for (w = 0; w < sce0->ics.num_windows; w += sce0->ics.group_len[w]) { start = 0; for (g = 0; g < sce0->ics.num_swb; g++) { if (!cpe->ch[0].zeroes[w*16+g] && !cpe->ch[1].zeroes[w*16+g]) { float dist1 = 0.0f, dist2 = 0.0f; for (w2 = 0; w2 < sce0->ics.group_len[w]; w2++) { FFPsyBand *band0 = &s->psy.ch[s->cur_channel+0].psy_bands[(w+w2)*16+g]; FFPsyBand *band1 = &s->psy.ch[s->cur_channel+1].psy_bands[(w+w2)*16+g]; float minthr = FFMIN(band0->threshold, band1->threshold); float maxthr = FFMAX(band0->threshold, band1->threshold); for (i = 0; i < sce0->ics.swb_sizes[g]; i+=4) { M[i ] = (sce0->coeffs[start+w2*128+i ] + sce1->coeffs[start+w2*128+i ]) * 0.5; M[i+1] = (sce0->coeffs[start+w2*128+i+1] + sce1->coeffs[start+w2*128+i+1]) * 0.5; M[i+2] = (sce0->coeffs[start+w2*128+i+2] + sce1->coeffs[start+w2*128+i+2]) * 0.5; M[i+3] = (sce0->coeffs[start+w2*128+i+3] + sce1->coeffs[start+w2*128+i+3]) * 0.5; S[i ] = M[i ] - sce1->coeffs[start+w2*128+i ]; S[i+1] = M[i+1] - sce1->coeffs[start+w2*128+i+1]; S[i+2] = M[i+2] - sce1->coeffs[start+w2*128+i+2]; S[i+3] = M[i+3] - sce1->coeffs[start+w2*128+i+3]; } abs_pow34_v(L34, sce0->coeffs+start+(w+w2)*128, sce0->ics.swb_sizes[g]); abs_pow34_v(R34, sce1->coeffs+start+(w+w2)*128, sce0->ics.swb_sizes[g]); abs_pow34_v(M34, M, sce0->ics.swb_sizes[g]); abs_pow34_v(S34, S, sce0->ics.swb_sizes[g]); dist1 += quantize_band_cost(s, &sce0->coeffs[start + (w+w2)*128], L34, sce0->ics.swb_sizes[g], sce0->sf_idx[(w+w2)*16+g], sce0->band_type[(w+w2)*16+g], lambda / band0->threshold, INFINITY, NULL, NULL, 0); dist1 += quantize_band_cost(s, &sce1->coeffs[start + (w+w2)*128], R34, sce1->ics.swb_sizes[g], sce1->sf_idx[(w+w2)*16+g], sce1->band_type[(w+w2)*16+g], lambda / band1->threshold, INFINITY, NULL, NULL, 0); dist2 += quantize_band_cost(s, M, M34, sce0->ics.swb_sizes[g], sce0->sf_idx[(w+w2)*16+g], sce0->band_type[(w+w2)*16+g], lambda / maxthr, INFINITY, NULL, NULL, 0); dist2 += quantize_band_cost(s, S, S34, sce1->ics.swb_sizes[g], sce1->sf_idx[(w+w2)*16+g], sce1->band_type[(w+w2)*16+g], lambda / minthr, INFINITY, NULL, NULL, 0); } cpe->ms_mask[w*16+g] = dist2 < dist1; } start += sce0->ics.swb_sizes[g]; } } }"} {"target": 1, "idx": 24615, "func": "static void fw_cfg_realize(DeviceState *dev, Error **errp) { FWCfgState *s = FW_CFG(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev); if (s->ctl_iobase + 1 == s->data_iobase) { sysbus_add_io(sbd, s->ctl_iobase, &s->comb_iomem); } else { if (s->ctl_iobase) { sysbus_add_io(sbd, s->ctl_iobase, &s->ctl_iomem); } if (s->data_iobase) { sysbus_add_io(sbd, s->data_iobase, &s->data_iomem); } } }"} {"target": 1, "idx": 24620, "func": "static void vscsi_got_payload(VSCSIState *s, vscsi_crq *crq) { vscsi_req *req; int done; req = vscsi_get_req(s); if (req == NULL) { fprintf(stderr, \"VSCSI: Failed to get a request !\\n\"); return; } /* We only support a limited number of descriptors, we know * the ibmvscsi driver uses up to 10 max, so it should fit * in our 256 bytes IUs. If not we'll have to increase the size * of the structure. */ if (crq->s.IU_length > sizeof(union viosrp_iu)) { fprintf(stderr, \"VSCSI: SRP IU too long (%d bytes) !\\n\", crq->s.IU_length); return; } /* XXX Handle failure differently ? */ if (spapr_tce_dma_read(&s->vdev, crq->s.IU_data_ptr, &req->iu, crq->s.IU_length)) { fprintf(stderr, \"vscsi_got_payload: DMA read failure !\\n\"); g_free(req); } memcpy(&req->crq, crq, sizeof(vscsi_crq)); if (crq->s.format == VIOSRP_MAD_FORMAT) { done = vscsi_handle_mad_req(s, req); } else { done = vscsi_handle_srp_req(s, req); } if (done) { vscsi_put_req(req); } }"} {"target": 1, "idx": 24632, "func": "static MTPData *usb_mtp_get_object_info(MTPState *s, MTPControl *c, MTPObject *o) { MTPData *d = usb_mtp_data_alloc(c); trace_usb_mtp_op_get_object_info(s->dev.addr, o->handle, o->path); usb_mtp_add_u32(d, QEMU_STORAGE_ID); usb_mtp_add_u16(d, o->format); usb_mtp_add_u16(d, 0); usb_mtp_add_u32(d, o->stat.st_size); usb_mtp_add_u16(d, 0); usb_mtp_add_u32(d, 0); usb_mtp_add_u32(d, 0); usb_mtp_add_u32(d, 0); usb_mtp_add_u32(d, 0); usb_mtp_add_u32(d, 0); usb_mtp_add_u32(d, 0); if (o->parent) { usb_mtp_add_u32(d, o->parent->handle); } else { usb_mtp_add_u32(d, 0); } if (o->format == FMT_ASSOCIATION) { usb_mtp_add_u16(d, 0x0001); usb_mtp_add_u32(d, 0x00000001); usb_mtp_add_u32(d, 0); } else { usb_mtp_add_u16(d, 0); usb_mtp_add_u32(d, 0); usb_mtp_add_u32(d, 0); } usb_mtp_add_str(d, o->name); usb_mtp_add_time(d, o->stat.st_ctime); usb_mtp_add_time(d, o->stat.st_mtime); usb_mtp_add_wstr(d, L\"\"); return d; }"} {"target": 1, "idx": 24635, "func": "static inline bool rom_order_compare(Rom *rom, Rom *item) { return (rom->as > item->as) || (rom->as == item->as && rom->addr >= item->addr); }"} {"target": 1, "idx": 24644, "func": "static int slb_lookup (CPUState *env, target_ulong eaddr, target_ulong *vsid, target_ulong *page_mask, int *attr) { target_phys_addr_t sr_base; target_ulong mask; uint64_t tmp64; uint32_t tmp; int n, ret; int slb_nr; ret = -5; sr_base = env->spr[SPR_ASR]; mask = 0x0000000000000000ULL; /* Avoid gcc warning */ #if 0 /* XXX: Fix this */ slb_nr = env->slb_nr; #else slb_nr = 32; #endif for (n = 0; n < slb_nr; n++) { tmp64 = ldq_phys(sr_base); if (tmp64 & 0x0000000008000000ULL) { /* SLB entry is valid */ switch (tmp64 & 0x0000000006000000ULL) { case 0x0000000000000000ULL: /* 256 MB segment */ mask = 0xFFFFFFFFF0000000ULL; break; case 0x0000000002000000ULL: /* 1 TB segment */ mask = 0xFFFF000000000000ULL; break; case 0x0000000004000000ULL: case 0x0000000006000000ULL: /* Reserved => segment is invalid */ continue; } if ((eaddr & mask) == (tmp64 & mask)) { /* SLB match */ tmp = ldl_phys(sr_base + 8); *vsid = ((tmp64 << 24) | (tmp >> 8)) & 0x0003FFFFFFFFFFFFULL; *page_mask = ~mask; *attr = tmp & 0xFF; ret = 0; break; } } sr_base += 12; } return ret; }"} {"target": 1, "idx": 24649, "func": "static uint8_t fw_cfg_read(FWCfgState *s) { int arch = !!(s->cur_entry & FW_CFG_ARCH_LOCAL); FWCfgEntry *e = (s->cur_entry == FW_CFG_INVALID) ? NULL : &s->entries[arch][s->cur_entry & FW_CFG_ENTRY_MASK]; uint8_t ret; if (s->cur_entry == FW_CFG_INVALID || !e->data || s->cur_offset >= e->len) ret = 0; else { ret = e->data[s->cur_offset++]; } trace_fw_cfg_read(s, ret); return ret; }"} {"target": 1, "idx": 24650, "func": "void qmp_block_resize(bool has_device, const char *device, bool has_node_name, const char *node_name, int64_t size, Error **errp) { Error *local_err = NULL; BlockBackend *blk = NULL; BlockDriverState *bs; AioContext *aio_context; int ret; bs = bdrv_lookup_bs(has_device ? device : NULL, has_node_name ? node_name : NULL, &local_err); if (local_err) { error_propagate(errp, local_err); return; } aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); if (!bdrv_is_first_non_filter(bs)) { error_setg(errp, QERR_FEATURE_DISABLED, \"resize\"); goto out; } if (size < 0) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, \"size\", \"a >0 size\"); goto out; } if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_RESIZE, NULL)) { error_setg(errp, QERR_DEVICE_IN_USE, device); goto out; } blk = blk_new(BLK_PERM_RESIZE, BLK_PERM_ALL); ret = blk_insert_bs(blk, bs, errp); if (ret < 0) { goto out; } /* complete all in-flight operations before resizing the device */ bdrv_drain_all(); ret = blk_truncate(blk, size, errp); out: blk_unref(blk); aio_context_release(aio_context); }"} {"target": 1, "idx": 24651, "func": "static void clear_context(MpegEncContext *s) { int i, j, k; memset(&s->next_picture, 0, sizeof(s->next_picture)); memset(&s->last_picture, 0, sizeof(s->last_picture)); memset(&s->current_picture, 0, sizeof(s->current_picture)); memset(&s->new_picture, 0, sizeof(s->new_picture)); memset(s->thread_context, 0, sizeof(s->thread_context)); s->me.map = NULL; s->me.score_map = NULL; s->dct_error_sum = NULL; s->block = NULL; s->blocks = NULL; memset(s->pblocks, 0, sizeof(s->pblocks)); s->ac_val_base = NULL; s->ac_val[0] = s->ac_val[1] = s->ac_val[2] =NULL; s->sc.edge_emu_buffer = NULL; s->me.scratchpad = NULL; s->me.temp = s->sc.rd_scratchpad = s->sc.b_scratchpad = s->sc.obmc_scratchpad = NULL; s->parse_context.buffer = NULL; s->parse_context.buffer_size = 0; s->bitstream_buffer = NULL; s->allocated_bitstream_buffer_size = 0; s->picture = NULL; s->mb_type = NULL; s->p_mv_table_base = NULL; s->b_forw_mv_table_base = NULL; s->b_back_mv_table_base = NULL; s->b_bidir_forw_mv_table_base = NULL; s->b_bidir_back_mv_table_base = NULL; s->b_direct_mv_table_base = NULL; s->p_mv_table = NULL; s->b_forw_mv_table = NULL; s->b_back_mv_table = NULL; s->b_bidir_forw_mv_table = NULL; s->b_bidir_back_mv_table = NULL; s->b_direct_mv_table = NULL; for (i = 0; i < 2; i++) { for (j = 0; j < 2; j++) { for (k = 0; k < 2; k++) { s->b_field_mv_table_base[i][j][k] = NULL; s->b_field_mv_table[i][j][k] = NULL; } s->b_field_select_table[i][j] = NULL; s->p_field_mv_table_base[i][j] = NULL; s->p_field_mv_table[i][j] = NULL; } s->p_field_select_table[i] = NULL; } s->dc_val_base = NULL; s->coded_block_base = NULL; s->mbintra_table = NULL; s->cbp_table = NULL; s->pred_dir_table = NULL; s->mbskip_table = NULL; s->er.error_status_table = NULL; s->er.er_temp_buffer = NULL; s->mb_index2xy = NULL; s->lambda_table = NULL; s->cplx_tab = NULL; s->bits_tab = NULL; }"} {"target": 1, "idx": 24654, "func": "static int rac_get_model256_sym(RangeCoder *c, Model256 *m) { int prob, prob2, helper, val; int start, end; int ssym; prob2 = c->range; c->range >>= MODEL_SCALE; helper = c->low / c->range; ssym = helper >> MODEL256_SEC_SCALE; val = m->secondary[ssym]; end = start = m->secondary[ssym + 1] + 1; while (end > val + 1) { ssym = (end + val) >> 1; if (m->freqs[ssym] <= helper) { end = start; val = ssym; } else { end = (end + val) >> 1; start = ssym; } } prob = m->freqs[val] * c->range; if (val != 255) prob2 = m->freqs[val + 1] * c->range; c->low -= prob; c->range = prob2 - prob; if (c->range < RAC_BOTTOM) rac_normalise(c); model256_update(m, val); return val; }"} {"target": 0, "idx": 24659, "func": "int ff_replaygain_export(AVStream *st, AVDictionary *metadata) { const AVDictionaryEntry *tg, *tp, *ag, *ap; tg = av_dict_get(metadata, \"REPLAYGAIN_TRACK_GAIN\", NULL, 0); tp = av_dict_get(metadata, \"REPLAYGAIN_TRACK_PEAK\", NULL, 0); ag = av_dict_get(metadata, \"REPLAYGAIN_ALBUM_GAIN\", NULL, 0); ap = av_dict_get(metadata, \"REPLAYGAIN_ALBUM_PEAK\", NULL, 0); return replaygain_export(st, tg ? tg->value : NULL, tp ? tp->value : NULL, ag ? ag->value : NULL, ap ? ap->value : NULL); }"} {"target": 0, "idx": 24673, "func": "static int decode_end(AVCodecContext * avctx) { KmvcContext *const c = (KmvcContext *) avctx->priv_data; if (c->frm0) av_free(c->frm0); if (c->frm1) av_free(c->frm1); if (c->pic.data[0]) avctx->release_buffer(avctx, &c->pic); return 0; }"} {"target": 0, "idx": 24682, "func": "static int cow_update_bitmap(BlockDriverState *bs, int64_t sector_num, int nb_sectors) { int64_t bitnum = sector_num + sizeof(struct cow_header_v2) * 8; uint64_t offset = (bitnum / 8) & -BDRV_SECTOR_SIZE; bool first = true; int sector_bits; for ( ; nb_sectors; bitnum += sector_bits, nb_sectors -= sector_bits, offset += BDRV_SECTOR_SIZE) { int ret, set; uint8_t bitmap[BDRV_SECTOR_SIZE]; bitnum &= BITS_PER_BITMAP_SECTOR - 1; sector_bits = MIN(nb_sectors, BITS_PER_BITMAP_SECTOR - bitnum); ret = bdrv_pread(bs->file, offset, &bitmap, sizeof(bitmap)); if (ret < 0) { return ret; } /* Skip over any already set bits */ set = cow_find_streak(bitmap, 1, bitnum, sector_bits); bitnum += set; sector_bits -= set; nb_sectors -= set; if (!sector_bits) { continue; } if (first) { ret = bdrv_flush(bs->file); if (ret < 0) { return ret; } first = false; } cow_set_bits(bitmap, bitnum, sector_bits); ret = bdrv_pwrite(bs->file, offset, &bitmap, sizeof(bitmap)); if (ret < 0) { return ret; } } return 0; }"} {"target": 0, "idx": 24688, "func": "static void ff_h264_idct_add16_mmx2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ int i; for(i=0; i<16; i++){ int nnz = nnzc[ scan8[i] ]; if(nnz){ if(nnz==1 && block[i*16]) ff_h264_idct_dc_add_mmx2(dst + block_offset[i], block + i*16, stride); else ff_h264_idct_add_mmx (dst + block_offset[i], block + i*16, stride); } } }"} {"target": 0, "idx": 24695, "func": "void op_mtc0_ebase (void) { /* vectored interrupts not implemented */ /* Multi-CPU not implemented */ env->CP0_EBase = (int32_t)0x80000000 | (T0 & 0x3FFFF000); RETURN(); }"} {"target": 0, "idx": 24699, "func": "static void check_cpu_flag(const char *name, int flag) { int old_cpu_flag = state.cpu_flag; flag |= old_cpu_flag; av_set_cpu_flags_mask(flag); state.cpu_flag = av_get_cpu_flags(); if (!flag || state.cpu_flag != old_cpu_flag) { int i; state.cpu_flag_name = name; for (i = 0; tests[i].func; i++) { state.current_test_name = tests[i].name; tests[i].func(); } } }"} {"target": 0, "idx": 24711, "func": "bool replay_checkpoint(ReplayCheckpoint checkpoint) { bool res = false; assert(EVENT_CHECKPOINT + checkpoint <= EVENT_CHECKPOINT_LAST); replay_save_instructions(); if (!replay_file) { return true; } replay_mutex_lock(); if (replay_mode == REPLAY_MODE_PLAY) { if (replay_next_event_is(EVENT_CHECKPOINT + checkpoint)) { replay_finish_event(); } else if (replay_data_kind != EVENT_ASYNC) { res = false; goto out; } replay_read_events(checkpoint); /* replay_read_events may leave some unread events. Return false if not all of the events associated with checkpoint were processed */ res = replay_data_kind != EVENT_ASYNC; } else if (replay_mode == REPLAY_MODE_RECORD) { replay_put_event(EVENT_CHECKPOINT + checkpoint); replay_save_events(checkpoint); res = true; } out: replay_mutex_unlock(); return res; }"} {"target": 0, "idx": 24716, "func": "static void openrisc_sim_machine_init(MachineClass *mc) { mc->desc = \"or1k simulation\"; mc->init = openrisc_sim_init; mc->max_cpus = 1; mc->is_default = 1; }"} {"target": 0, "idx": 24726, "func": "static int coroutine_fn qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len) { /* Allocate buffer for zero writes */ if (acb->flags & QED_AIOCB_ZERO) { struct iovec *iov = acb->qiov->iov; if (!iov->iov_base) { iov->iov_base = qemu_try_blockalign(acb->bs, iov->iov_len); if (iov->iov_base == NULL) { return -ENOMEM; } memset(iov->iov_base, 0, iov->iov_len); } } /* Calculate the I/O vector */ acb->cur_cluster = offset; qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); /* Do the actual write */ return qed_aio_write_main(acb); }"} {"target": 0, "idx": 24727, "func": "Aml *aml_add(Aml *arg1, Aml *arg2) { Aml *var = aml_opcode(0x72 /* AddOp */); aml_append(var, arg1); aml_append(var, arg2); build_append_byte(var->buf, 0x00 /* NullNameOp */); return var; }"} {"target": 0, "idx": 24728, "func": "static int menelaus_load(QEMUFile *f, void *opaque, int version_id) { MenelausState *s = (MenelausState *) opaque; s->firstbyte = qemu_get_be32(f); qemu_get_8s(f, &s->reg); if (s->rtc.ctrl & 1) /* RTC_EN */ menelaus_rtc_stop(s); qemu_get_8s(f, &s->vcore[0]); qemu_get_8s(f, &s->vcore[1]); qemu_get_8s(f, &s->vcore[2]); qemu_get_8s(f, &s->vcore[3]); qemu_get_8s(f, &s->vcore[4]); qemu_get_8s(f, &s->dcdc[0]); qemu_get_8s(f, &s->dcdc[1]); qemu_get_8s(f, &s->dcdc[2]); qemu_get_8s(f, &s->ldo[0]); qemu_get_8s(f, &s->ldo[1]); qemu_get_8s(f, &s->ldo[2]); qemu_get_8s(f, &s->ldo[3]); qemu_get_8s(f, &s->ldo[4]); qemu_get_8s(f, &s->ldo[5]); qemu_get_8s(f, &s->ldo[6]); qemu_get_8s(f, &s->ldo[7]); qemu_get_8s(f, &s->sleep[0]); qemu_get_8s(f, &s->sleep[1]); qemu_get_8s(f, &s->osc); qemu_get_8s(f, &s->detect); qemu_get_be16s(f, &s->mask); qemu_get_be16s(f, &s->status); qemu_get_8s(f, &s->dir); qemu_get_8s(f, &s->inputs); qemu_get_8s(f, &s->outputs); qemu_get_8s(f, &s->bbsms); qemu_get_8s(f, &s->pull[0]); qemu_get_8s(f, &s->pull[1]); qemu_get_8s(f, &s->pull[2]); qemu_get_8s(f, &s->pull[3]); qemu_get_8s(f, &s->mmc_ctrl[0]); qemu_get_8s(f, &s->mmc_ctrl[1]); qemu_get_8s(f, &s->mmc_ctrl[2]); qemu_get_8s(f, &s->mmc_debounce); qemu_get_8s(f, &s->rtc.ctrl); qemu_get_be16s(f, &s->rtc.comp); s->rtc.next = qemu_get_be16(f); tm_get(f, &s->rtc.new); tm_get(f, &s->rtc.alm); s->pwrbtn_state = qemu_get_byte(f); menelaus_alm_update(s); menelaus_update(s); if (s->rtc.ctrl & 1) /* RTC_EN */ menelaus_rtc_start(s); i2c_slave_load(f, &s->i2c); return 0; }"} {"target": 0, "idx": 24743, "func": "static void find_completion(const char *cmdline) { const char *cmdname; char *args[MAX_ARGS]; int nb_args, i, len; const char *ptype, *str; term_cmd_t *cmd; parse_cmdline(cmdline, &nb_args, args); #ifdef DEBUG_COMPLETION for(i = 0; i < nb_args; i++) { term_printf(\"arg%d = '%s'\\n\", i, (char *)args[i]); } #endif /* if the line ends with a space, it means we want to complete the next arg */ len = strlen(cmdline); if (len > 0 && isspace(cmdline[len - 1])) { if (nb_args >= MAX_ARGS) return; args[nb_args++] = qemu_strdup(\"\"); } if (nb_args <= 1) { /* command completion */ if (nb_args == 0) cmdname = \"\"; else cmdname = args[0]; completion_index = strlen(cmdname); for(cmd = term_cmds; cmd->name != NULL; cmd++) { cmd_completion(cmdname, cmd->name); } } else { /* find the command */ for(cmd = term_cmds; cmd->name != NULL; cmd++) { if (compare_cmd(args[0], cmd->name)) goto found; } return; found: ptype = cmd->args_type; for(i = 0; i < nb_args - 2; i++) { if (*ptype != '\\0') { ptype++; while (*ptype == '?') ptype++; } } str = args[nb_args - 1]; switch(*ptype) { case 'F': /* file completion */ completion_index = strlen(str); file_completion(str); break; case 'B': /* block device name completion */ completion_index = strlen(str); bdrv_iterate(block_completion_it, (void *)str); break; default: break; } } for(i = 0; i < nb_args; i++) qemu_free(args[i]); }"} {"target": 0, "idx": 24757, "func": "static int decode_packet(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket* avpkt) { WMAProDecodeCtx *s = avctx->priv_data; GetBitContext* gb = &s->pgb; const uint8_t* buf = avpkt->data; int buf_size = avpkt->size; int num_bits_prev_frame; int packet_sequence_number; *got_frame_ptr = 0; if (s->packet_done || s->packet_loss) { s->packet_done = 0; /** sanity check for the buffer length */ if (buf_size < avctx->block_align) return 0; s->next_packet_start = buf_size - avctx->block_align; buf_size = avctx->block_align; s->buf_bit_size = buf_size << 3; /** parse packet header */ init_get_bits(gb, buf, s->buf_bit_size); packet_sequence_number = get_bits(gb, 4); skip_bits(gb, 2); /** get number of bits that need to be added to the previous frame */ num_bits_prev_frame = get_bits(gb, s->log2_frame_size); av_dlog(avctx, \"packet[%d]: nbpf %x\\n\", avctx->frame_number, num_bits_prev_frame); /** check for packet loss */ if (!s->packet_loss && ((s->packet_sequence_number + 1) & 0xF) != packet_sequence_number) { s->packet_loss = 1; av_log(avctx, AV_LOG_ERROR, \"Packet loss detected! seq %x vs %x\\n\", s->packet_sequence_number, packet_sequence_number); } s->packet_sequence_number = packet_sequence_number; if (num_bits_prev_frame > 0) { int remaining_packet_bits = s->buf_bit_size - get_bits_count(gb); if (num_bits_prev_frame >= remaining_packet_bits) { num_bits_prev_frame = remaining_packet_bits; s->packet_done = 1; } /** append the previous frame data to the remaining data from the previous packet to create a full frame */ save_bits(s, gb, num_bits_prev_frame, 1); av_dlog(avctx, \"accumulated %x bits of frame data\\n\", s->num_saved_bits - s->frame_offset); /** decode the cross packet frame if it is valid */ if (!s->packet_loss) decode_frame(s, data, got_frame_ptr); } else if (s->num_saved_bits - s->frame_offset) { av_dlog(avctx, \"ignoring %x previously saved bits\\n\", s->num_saved_bits - s->frame_offset); } if (s->packet_loss) { /** reset number of saved bits so that the decoder does not start to decode incomplete frames in the s->len_prefix == 0 case */ s->num_saved_bits = 0; s->packet_loss = 0; } } else { int frame_size; s->buf_bit_size = (avpkt->size - s->next_packet_start) << 3; init_get_bits(gb, avpkt->data, s->buf_bit_size); skip_bits(gb, s->packet_offset); if (s->len_prefix && remaining_bits(s, gb) > s->log2_frame_size && (frame_size = show_bits(gb, s->log2_frame_size)) && frame_size <= remaining_bits(s, gb)) { save_bits(s, gb, frame_size, 0); s->packet_done = !decode_frame(s, data, got_frame_ptr); } else if (!s->len_prefix && s->num_saved_bits > get_bits_count(&s->gb)) { /** when the frames do not have a length prefix, we don't know the compressed length of the individual frames however, we know what part of a new packet belongs to the previous frame therefore we save the incoming packet first, then we append the \"previous frame\" data from the next packet so that we get a buffer that only contains full frames */ s->packet_done = !decode_frame(s, data, got_frame_ptr); } else s->packet_done = 1; } if (s->packet_done && !s->packet_loss && remaining_bits(s, gb) > 0) { /** save the rest of the data so that it can be decoded with the next packet */ save_bits(s, gb, remaining_bits(s, gb), 0); } s->packet_offset = get_bits_count(gb) & 7; if (s->packet_loss) return AVERROR_INVALIDDATA; return get_bits_count(gb) >> 3; }"} {"target": 0, "idx": 24758, "func": "static int pci_cirrus_vga_initfn(PCIDevice *dev) { PCICirrusVGAState *d = DO_UPCAST(PCICirrusVGAState, dev, dev); CirrusVGAState *s = &d->cirrus_vga; PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev); int16_t device_id = pc->device_id; /* setup VGA */ vga_common_init(&s->vga); cirrus_init_common(s, device_id, 1, pci_address_space(dev), pci_address_space_io(dev)); s->vga.con = graphic_console_init(s->vga.update, s->vga.invalidate, s->vga.screen_dump, s->vga.text_update, &s->vga); /* setup PCI */ memory_region_init(&s->pci_bar, \"cirrus-pci-bar0\", 0x2000000); /* XXX: add byte swapping apertures */ memory_region_add_subregion(&s->pci_bar, 0, &s->cirrus_linear_io); memory_region_add_subregion(&s->pci_bar, 0x1000000, &s->cirrus_linear_bitblt_io); /* setup memory space */ /* memory #0 LFB */ /* memory #1 memory-mapped I/O */ /* XXX: s->vga.vram_size must be a power of two */ pci_register_bar(&d->dev, 0, PCI_BASE_ADDRESS_MEM_PREFETCH, &s->pci_bar); if (device_id == CIRRUS_ID_CLGD5446) { pci_register_bar(&d->dev, 1, 0, &s->cirrus_mmio_io); } return 0; }"} {"target": 1, "idx": 24763, "func": "static int acpi_pcihp_get_bsel(PCIBus *bus) { QObject *o = object_property_get_qobject(OBJECT(bus), ACPI_PCIHP_PROP_BSEL, NULL); int64_t bsel = -1; if (o) { bsel = qint_get_int(qobject_to_qint(o)); } if (bsel < 0) { return -1; } return bsel; }"} {"target": 1, "idx": 24764, "func": "void vnc_client_write(void *opaque) { long ret; VncState *vs = opaque; #ifdef CONFIG_VNC_TLS if (vs->tls.session) { ret = gnutls_write(vs->tls.session, vs->output.buffer, vs->output.offset); if (ret < 0) { if (ret == GNUTLS_E_AGAIN) errno = EAGAIN; else errno = EIO; ret = -1; } } else #endif /* CONFIG_VNC_TLS */ ret = send(vs->csock, vs->output.buffer, vs->output.offset, 0); ret = vnc_client_io_error(vs, ret, socket_error()); if (!ret) return; memmove(vs->output.buffer, vs->output.buffer + ret, (vs->output.offset - ret)); vs->output.offset -= ret; if (vs->output.offset == 0) { qemu_set_fd_handler2(vs->csock, NULL, vnc_client_read, NULL, vs); } }"} {"target": 0, "idx": 24790, "func": "static int avi_read_packet(AVFormatContext *s, AVPacket *pkt) { AVIContext *avi = s->priv_data; ByteIOContext *pb = &s->pb; int n, d[8], size; offset_t i; void* dstr; memset(d, -1, sizeof(int)*8); if (avi->dv_demux) { size = dv_get_packet(avi->dv_demux, pkt); if (size >= 0) return size; } for(i=url_ftell(pb); !url_feof(pb); i++) { int j; if (i >= avi->movi_end) { if (avi->is_odml) { url_fskip(pb, avi->riff_end - i); avi->riff_end = avi->movi_end = url_filesize(url_fileno(pb)); } else break; } for(j=0; j<7; j++) d[j]= d[j+1]; d[7]= get_byte(pb); size= d[4] + (d[5]<<8) + (d[6]<<16) + (d[7]<<24); //parse ix## n= (d[2] - '0') * 10 + (d[3] - '0'); if( d[2] >= '0' && d[2] <= '9' && d[3] >= '0' && d[3] <= '9' && d[0] == 'i' && d[1] == 'x' && n < s->nb_streams && i + size <= avi->movi_end){ url_fskip(pb, size); } //parse JUNK if(d[0] == 'J' && d[1] == 'U' && d[2] == 'N' && d[3] == 'K' && i + size <= avi->movi_end) { url_fskip(pb, size); } //parse ##dc/##wb n= (d[0] - '0') * 10 + (d[1] - '0'); if( d[0] >= '0' && d[0] <= '9' && d[1] >= '0' && d[1] <= '9' && ((d[2] == 'd' && d[3] == 'c') || (d[2] == 'w' && d[3] == 'b') || (d[2] == 'd' && d[3] == 'b') || (d[2] == '_' && d[3] == '_')) && n < s->nb_streams && i + size <= avi->movi_end) { av_new_packet(pkt, size); get_buffer(pb, pkt->data, size); if (size & 1) { get_byte(pb); size++; } if (avi->dv_demux) { dstr = pkt->destruct; size = dv_produce_packet(avi->dv_demux, pkt, pkt->data, pkt->size); pkt->destruct = dstr; pkt->flags |= PKT_FLAG_KEY; } else { AVStream *st; AVIStream *ast; st = s->streams[n]; ast = st->priv_data; /* XXX: how to handle B frames in avi ? */ pkt->dts = ast->frame_offset; // pkt->dts += ast->start; if(ast->sample_size) pkt->dts /= ast->sample_size; //av_log(NULL, AV_LOG_DEBUG, \"dts:%Ld offset:%d %d/%d smpl_siz:%d base:%d st:%d size:%d\\n\", pkt->dts, ast->frame_offset, ast->scale, ast->rate, ast->sample_size, AV_TIME_BASE, n, size); pkt->stream_index = n; /* FIXME: We really should read index for that */ if (st->codec.codec_type == CODEC_TYPE_VIDEO) { if (ast->frame_offset < ast->nb_index_entries) { if (ast->index_entries[ast->frame_offset].flags & AVIIF_INDEX) pkt->flags |= PKT_FLAG_KEY; } else { /* if no index, better to say that all frames are key frames */ pkt->flags |= PKT_FLAG_KEY; } } else { pkt->flags |= PKT_FLAG_KEY; } if(ast->sample_size) ast->frame_offset += pkt->size; else ast->frame_offset++; } return size; } } return -1; }"} {"target": 0, "idx": 24806, "func": "static int decode_slice(struct AVCodecContext *avctx, void *arg){ H264Context *h = *(void**)arg; MpegEncContext * const s = &h->s; const int part_mask= s->partitioned_frame ? (AC_END|AC_ERROR) : 0x7F; int lf_x_start = s->mb_x; s->mb_skip_run= -1; h->is_complex = FRAME_MBAFF || s->picture_structure != PICT_FRAME || s->codec_id != CODEC_ID_H264 || (CONFIG_GRAY && (s->flags&CODEC_FLAG_GRAY)); if( h->pps.cabac ) { /* realign */ align_get_bits( &s->gb ); /* init cabac */ ff_init_cabac_states( &h->cabac); ff_init_cabac_decoder( &h->cabac, s->gb.buffer + get_bits_count(&s->gb)/8, (get_bits_left(&s->gb) + 7)/8); ff_h264_init_cabac_states(h); for(;;){ //START_TIMER int ret = ff_h264_decode_mb_cabac(h); int eos; //STOP_TIMER(\"decode_mb_cabac\") if(ret>=0) ff_h264_hl_decode_mb(h); if( ret >= 0 && FRAME_MBAFF ) { //FIXME optimal? or let mb_decode decode 16x32 ? s->mb_y++; ret = ff_h264_decode_mb_cabac(h); if(ret>=0) ff_h264_hl_decode_mb(h); s->mb_y--; } eos = get_cabac_terminate( &h->cabac ); if((s->workaround_bugs & FF_BUG_TRUNCATED) && h->cabac.bytestream > h->cabac.bytestream_end + 2){ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask); if (s->mb_x >= lf_x_start) loop_filter(h, lf_x_start, s->mb_x + 1); return 0; } if( ret < 0 || h->cabac.bytestream > h->cabac.bytestream_end + 2) { av_log(h->s.avctx, AV_LOG_ERROR, \"error while decoding MB %d %d, bytestream (%td)\\n\", s->mb_x, s->mb_y, h->cabac.bytestream_end - h->cabac.bytestream); ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_ERROR|DC_ERROR|MV_ERROR)&part_mask); return -1; } if( ++s->mb_x >= s->mb_width ) { loop_filter(h, lf_x_start, s->mb_x); s->mb_x = lf_x_start = 0; decode_finish_row(h); ++s->mb_y; if(FIELD_OR_MBAFF_PICTURE) { ++s->mb_y; if(FRAME_MBAFF && s->mb_y < s->mb_height) predict_field_decoding_flag(h); } } if( eos || s->mb_y >= s->mb_height ) { tprintf(s->avctx, \"slice end %d %d\\n\", get_bits_count(&s->gb), s->gb.size_in_bits); ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask); if (s->mb_x > lf_x_start) loop_filter(h, lf_x_start, s->mb_x); return 0; } } } else { for(;;){ int ret = ff_h264_decode_mb_cavlc(h); if(ret>=0) ff_h264_hl_decode_mb(h); if(ret>=0 && FRAME_MBAFF){ //FIXME optimal? or let mb_decode decode 16x32 ? s->mb_y++; ret = ff_h264_decode_mb_cavlc(h); if(ret>=0) ff_h264_hl_decode_mb(h); s->mb_y--; } if(ret<0){ av_log(h->s.avctx, AV_LOG_ERROR, \"error while decoding MB %d %d\\n\", s->mb_x, s->mb_y); ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_ERROR|DC_ERROR|MV_ERROR)&part_mask); return -1; } if(++s->mb_x >= s->mb_width){ loop_filter(h, lf_x_start, s->mb_x); s->mb_x = lf_x_start = 0; decode_finish_row(h); ++s->mb_y; if(FIELD_OR_MBAFF_PICTURE) { ++s->mb_y; if(FRAME_MBAFF && s->mb_y < s->mb_height) predict_field_decoding_flag(h); } if(s->mb_y >= s->mb_height){ tprintf(s->avctx, \"slice end %d %d\\n\", get_bits_count(&s->gb), s->gb.size_in_bits); if(get_bits_count(&s->gb) == s->gb.size_in_bits ) { ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask); return 0; }else{ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END)&part_mask); return -1; } } } if(get_bits_count(&s->gb) >= s->gb.size_in_bits && s->mb_skip_run<=0){ tprintf(s->avctx, \"slice end %d %d\\n\", get_bits_count(&s->gb), s->gb.size_in_bits); if(get_bits_count(&s->gb) == s->gb.size_in_bits ){ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask); if (s->mb_x > lf_x_start) loop_filter(h, lf_x_start, s->mb_x); return 0; }else{ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_ERROR|DC_ERROR|MV_ERROR)&part_mask); return -1; } } } } #if 0 for(;s->mb_y < s->mb_height; s->mb_y++){ for(;s->mb_x < s->mb_width; s->mb_x++){ int ret= decode_mb(h); ff_h264_hl_decode_mb(h); if(ret<0){ av_log(s->avctx, AV_LOG_ERROR, \"error while decoding MB %d %d\\n\", s->mb_x, s->mb_y); ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_ERROR|DC_ERROR|MV_ERROR)&part_mask); return -1; } if(++s->mb_x >= s->mb_width){ s->mb_x=0; if(++s->mb_y >= s->mb_height){ if(get_bits_count(s->gb) == s->gb.size_in_bits){ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask); return 0; }else{ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END)&part_mask); return -1; } } } if(get_bits_count(s->?gb) >= s->gb?.size_in_bits){ if(get_bits_count(s->gb) == s->gb.size_in_bits){ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask); return 0; }else{ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_ERROR|DC_ERROR|MV_ERROR)&part_mask); return -1; } } } s->mb_x=0; ff_draw_horiz_band(s, 16*s->mb_y, 16); } #endif return -1; //not reached }"} {"target": 1, "idx": 24813, "func": "int spapr_tce_dma_write(VIOsPAPRDevice *dev, uint64_t taddr, const void *buf, uint32_t size) { #ifdef DEBUG_TCE fprintf(stderr, \"spapr_tce_dma_write taddr=0x%llx size=0x%x\\n\", (unsigned long long)taddr, size); #endif while (size) { uint64_t tce; uint32_t lsize; uint64_t txaddr; /* Check if we are in bound */ if (taddr >= dev->rtce_window_size) { #ifdef DEBUG_TCE fprintf(stderr, \"spapr_tce_dma_write out of bounds\\n\"); #endif return H_DEST_PARM; tce = dev->rtce_table[taddr >> SPAPR_VIO_TCE_PAGE_SHIFT].tce; /* How much til end of page ? */ lsize = MIN(size, ((~taddr) & SPAPR_VIO_TCE_PAGE_MASK) + 1); /* Check TCE */ if (!(tce & 2)) { return H_DEST_PARM; /* Translate */ txaddr = (tce & ~SPAPR_VIO_TCE_PAGE_MASK) | (taddr & SPAPR_VIO_TCE_PAGE_MASK); #ifdef DEBUG_TCE fprintf(stderr, \" -> write to txaddr=0x%llx, size=0x%x\\n\", (unsigned long long)txaddr, lsize); #endif /* Do it */ cpu_physical_memory_write(txaddr, buf, lsize); buf += lsize; taddr += lsize; size -= lsize;"} {"target": 1, "idx": 24815, "func": "void updateMMXDitherTables(SwsContext *c, int dstY, int lumBufIndex, int chrBufIndex, int lastInLumBuf, int lastInChrBuf) { const int dstH= c->dstH; const int flags= c->flags; int16_t **lumPixBuf= c->lumPixBuf; int16_t **chrUPixBuf= c->chrUPixBuf; int16_t **alpPixBuf= c->alpPixBuf; const int vLumBufSize= c->vLumBufSize; const int vChrBufSize= c->vChrBufSize; int16_t *vLumFilterPos= c->vLumFilterPos; int16_t *vChrFilterPos= c->vChrFilterPos; int16_t *vLumFilter= c->vLumFilter; int16_t *vChrFilter= c->vChrFilter; int32_t *lumMmxFilter= c->lumMmxFilter; int32_t *chrMmxFilter= c->chrMmxFilter; int32_t av_unused *alpMmxFilter= c->alpMmxFilter; const int vLumFilterSize= c->vLumFilterSize; const int vChrFilterSize= c->vChrFilterSize; const int chrDstY= dstY>>c->chrDstVSubSample; const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input c->blueDither= ff_dither8[dstY&1]; if (c->dstFormat == PIX_FMT_RGB555 || c->dstFormat == PIX_FMT_BGR555) c->greenDither= ff_dither8[dstY&1]; else c->greenDither= ff_dither4[dstY&1]; c->redDither= ff_dither8[(dstY+1)&1]; if (dstY < dstH - 2) { const int16_t **lumSrcPtr= (const int16_t **) lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize; const int16_t **chrUSrcPtr= (const int16_t **) chrUPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize; const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? (const int16_t **) alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL; int i; if (flags & SWS_ACCURATE_RND) { int s= APCK_SIZE / 8; for (i=0; i1)]; lumMmxFilter[s*i+APCK_COEF/4 ]= lumMmxFilter[s*i+APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize + i ] + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0); if (CONFIG_SWSCALE_ALPHA && alpPixBuf) { *(const void**)&alpMmxFilter[s*i ]= alpSrcPtr[i ]; *(const void**)&alpMmxFilter[s*i+APCK_PTR2/4 ]= alpSrcPtr[i+(vLumFilterSize>1)]; alpMmxFilter[s*i+APCK_COEF/4 ]= alpMmxFilter[s*i+APCK_COEF/4+1]= lumMmxFilter[s*i+APCK_COEF/4 ]; for (i=0; i1)]; chrMmxFilter[s*i+APCK_COEF/4 ]= chrMmxFilter[s*i+APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize + i ] + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0); } else { for (i=0; ipb); if(ff_mpa_check_header(v) < 0) return; ff_mpegaudio_decode_header(&c, v); if(c.layer != 3) return; /* Check for Xing / Info tag */ url_fseek(s->pb, xing_offtbl[c.lsf == 1][c.nb_channels == 1], SEEK_CUR); v = get_be32(s->pb); if(v == MKBETAG('X', 'i', 'n', 'g') || v == MKBETAG('I', 'n', 'f', 'o')) { v = get_be32(s->pb); if(v & 0x1) frames = get_be32(s->pb); } /* Check for VBRI tag (always 32 bytes after end of mpegaudio header) */ url_fseek(s->pb, base + 4 + 32, SEEK_SET); v = get_be32(s->pb); if(v == MKBETAG('V', 'B', 'R', 'I')) { /* Check tag version */ if(get_be16(s->pb) == 1) { /* skip delay, quality and total bytes */ url_fseek(s->pb, 8, SEEK_CUR); frames = get_be32(s->pb); } } if(frames < 0) return; spf = c.lsf ? 576 : 1152; /* Samples per frame, layer 3 */ st->duration = av_rescale_q(frames, (AVRational){spf, c.sample_rate}, st->time_base); }"} {"target": 1, "idx": 24830, "func": "void vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd, void *opaque) { VMStateField *field = vmsd->fields; if (vmsd->pre_save) { vmsd->pre_save(opaque); while (field->name) { if (!field->field_exists || field->field_exists(opaque, vmsd->version_id)) { void *base_addr = vmstate_base_addr(opaque, field); int i, n_elems = vmstate_n_elems(opaque, field); int size = vmstate_size(opaque, field); for (i = 0; i < n_elems; i++) { void *addr = base_addr + size * i; if (field->flags & VMS_ARRAY_OF_POINTER) { addr = *(void **)addr; if (field->flags & VMS_STRUCT) { vmstate_save_state(f, field->vmsd, addr); field->info->put(f, addr, size); field++; vmstate_subsection_save(f, vmsd, opaque);"} {"target": 0, "idx": 24842, "func": "static int v9fs_synth_closedir(FsContext *ctx, V9fsFidOpenState *fs) { V9fsSynthOpenState *synth_open = fs->private; V9fsSynthNode *node = synth_open->node; node->open_count--; g_free(synth_open); fs->private = NULL; return 0; }"} {"target": 0, "idx": 24859, "func": "static void mdct512(int32_t *out, int16_t *in) { int i, re, im, re1, im1; int16_t rot[MDCT_SAMPLES]; IComplex x[MDCT_SAMPLES/4]; /* shift to simplify computations */ for (i = 0; i < MDCT_SAMPLES/4; i++) rot[i] = -in[i + 3*MDCT_SAMPLES/4]; for (;i < MDCT_SAMPLES; i++) rot[i] = in[i - MDCT_SAMPLES/4]; /* pre rotation */ for (i = 0; i < MDCT_SAMPLES/4; i++) { re = ((int)rot[ 2*i] - (int)rot[MDCT_SAMPLES -1-2*i]) >> 1; im = -((int)rot[MDCT_SAMPLES/2+2*i] - (int)rot[MDCT_SAMPLES/2-1-2*i]) >> 1; CMUL(x[i].re, x[i].im, re, im, -xcos1[i], xsin1[i]); } fft(x, MDCT_NBITS - 2); /* post rotation */ for (i = 0; i < MDCT_SAMPLES/4; i++) { re = x[i].re; im = x[i].im; CMUL(re1, im1, re, im, xsin1[i], xcos1[i]); out[ 2*i] = im1; out[MDCT_SAMPLES/2-1-2*i] = re1; } }"} {"target": 0, "idx": 24863, "func": "static int blk_root_inactivate(BdrvChild *child) { BlockBackend *blk = child->opaque; if (blk->disable_perm) { return 0; } /* Only inactivate BlockBackends for guest devices (which are inactive at * this point because the VM is stopped) and unattached monitor-owned * BlockBackends. If there is still any other user like a block job, then * we simply can't inactivate the image. */ if (!blk->dev && !blk_name(blk)[0]) { return -EPERM; } blk->disable_perm = true; if (blk->root) { bdrv_child_try_set_perm(blk->root, 0, BLK_PERM_ALL, &error_abort); } return 0; }"} {"target": 0, "idx": 24865, "func": "static inline void vmsvga_update_rect_flush(struct vmsvga_state_s *s) { struct vmsvga_rect_s *rect; if (s->invalidated) { s->redraw_fifo_first = s->redraw_fifo_last; return; } /* Overlapping region updates can be optimised out here - if someone * knows a smart algorithm to do that, please share. */ while (s->redraw_fifo_first != s->redraw_fifo_last) { rect = &s->redraw_fifo[s->redraw_fifo_first ++]; s->redraw_fifo_first &= REDRAW_FIFO_LEN - 1; vmsvga_update_rect(s, rect->x, rect->y, rect->w, rect->h); } }"} {"target": 0, "idx": 24872, "func": "length_f(int argc, char **argv) { int64_t size; char s1[64]; size = bdrv_getlength(bs); if (size < 0) { printf(\"getlength: %s\", strerror(size)); return 0; } cvtstr(size, s1, sizeof(s1)); printf(\"%s\\n\", s1); return 0; }"} {"target": 0, "idx": 24885, "func": "static void kvm_arm_gic_get(GICState *s) { /* TODO: there isn't currently a kernel interface to get the GIC state */ }"} {"target": 1, "idx": 24903, "func": "static int flic_decode_frame_15_16BPP(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size) { /* Note, the only difference between the 15Bpp and 16Bpp */ /* Format is the pixel format, the packets are processed the same. */ FlicDecodeContext *s = avctx->priv_data; int stream_ptr = 0; int pixel_ptr; unsigned char palette_idx1; unsigned int frame_size; int num_chunks; unsigned int chunk_size; int chunk_type; int i, j; int lines; int compressed_lines; signed short line_packets; int y_ptr; int byte_run; int pixel_skip; int pixel_countdown; unsigned char *pixels; int pixel; unsigned int pixel_limit; s->frame.reference = 1; s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if (avctx->reget_buffer(avctx, &s->frame) < 0) { av_log(avctx, AV_LOG_ERROR, \"reget_buffer() failed\\n\"); return -1; } pixels = s->frame.data[0]; pixel_limit = s->avctx->height * s->frame.linesize[0]; frame_size = AV_RL32(&buf[stream_ptr]); stream_ptr += 6; /* skip the magic number */ num_chunks = AV_RL16(&buf[stream_ptr]); stream_ptr += 10; /* skip padding */ frame_size -= 16; /* iterate through the chunks */ while ((frame_size > 0) && (num_chunks > 0)) { chunk_size = AV_RL32(&buf[stream_ptr]); stream_ptr += 4; chunk_type = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; switch (chunk_type) { case FLI_256_COLOR: case FLI_COLOR: /* For some reason, it seems that non-palettized flics do * include one of these chunks in their first frame. * Why I do not know, it seems rather extraneous. */ /* av_log(avctx, AV_LOG_ERROR, \"Unexpected Palette chunk %d in non-paletised FLC\\n\",chunk_type);*/ stream_ptr = stream_ptr + chunk_size - 6; break; case FLI_DELTA: case FLI_DTA_LC: y_ptr = 0; compressed_lines = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; while (compressed_lines > 0) { line_packets = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; if (line_packets < 0) { line_packets = -line_packets; y_ptr += line_packets * s->frame.linesize[0]; } else { compressed_lines--; pixel_ptr = y_ptr; CHECK_PIXEL_PTR(0); pixel_countdown = s->avctx->width; for (i = 0; i < line_packets; i++) { /* account for the skip bytes */ pixel_skip = buf[stream_ptr++]; pixel_ptr += (pixel_skip*2); /* Pixel is 2 bytes wide */ pixel_countdown -= pixel_skip; byte_run = (signed char)(buf[stream_ptr++]); if (byte_run < 0) { byte_run = -byte_run; pixel = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; CHECK_PIXEL_PTR(2 * byte_run); for (j = 0; j < byte_run; j++, pixel_countdown -= 2) { *((signed short*)(&pixels[pixel_ptr])) = pixel; pixel_ptr += 2; } } else { CHECK_PIXEL_PTR(2 * byte_run); for (j = 0; j < byte_run; j++, pixel_countdown--) { *((signed short*)(&pixels[pixel_ptr])) = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; pixel_ptr += 2; } } } y_ptr += s->frame.linesize[0]; } } break; case FLI_LC: av_log(avctx, AV_LOG_ERROR, \"Unexpected FLI_LC chunk in non-paletised FLC\\n\"); stream_ptr = stream_ptr + chunk_size - 6; break; case FLI_BLACK: /* set the whole frame to 0x0000 which is black in both 15Bpp and 16Bpp modes. */ memset(pixels, 0x0000, s->frame.linesize[0] * s->avctx->height); break; case FLI_BRUN: y_ptr = 0; for (lines = 0; lines < s->avctx->height; lines++) { pixel_ptr = y_ptr; /* disregard the line packets; instead, iterate through all * pixels on a row */ stream_ptr++; pixel_countdown = (s->avctx->width * 2); while (pixel_countdown > 0) { byte_run = (signed char)(buf[stream_ptr++]); if (byte_run > 0) { palette_idx1 = buf[stream_ptr++]; CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++) { pixels[pixel_ptr++] = palette_idx1; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, \"pixel_countdown < 0 (%d) (linea%d)\\n\", pixel_countdown, lines); } } else { /* copy bytes if byte_run < 0 */ byte_run = -byte_run; CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++) { palette_idx1 = buf[stream_ptr++]; pixels[pixel_ptr++] = palette_idx1; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, \"pixel_countdown < 0 (%d) at line %d\\n\", pixel_countdown, lines); } } } /* Now FLX is strange, in that it is \"byte\" as opposed to \"pixel\" run length compressed. * This does not give us any good oportunity to perform word endian conversion * during decompression. So if it is required (i.e., this is not a LE target, we do * a second pass over the line here, swapping the bytes. */ #if HAVE_BIGENDIAN pixel_ptr = y_ptr; pixel_countdown = s->avctx->width; while (pixel_countdown > 0) { *((signed short*)(&pixels[pixel_ptr])) = AV_RL16(&buf[pixel_ptr]); pixel_ptr += 2; } #endif y_ptr += s->frame.linesize[0]; } break; case FLI_DTA_BRUN: y_ptr = 0; for (lines = 0; lines < s->avctx->height; lines++) { pixel_ptr = y_ptr; /* disregard the line packets; instead, iterate through all * pixels on a row */ stream_ptr++; pixel_countdown = s->avctx->width; /* Width is in pixels, not bytes */ while (pixel_countdown > 0) { byte_run = (signed char)(buf[stream_ptr++]); if (byte_run > 0) { pixel = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; CHECK_PIXEL_PTR(2 * byte_run); for (j = 0; j < byte_run; j++) { *((signed short*)(&pixels[pixel_ptr])) = pixel; pixel_ptr += 2; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, \"pixel_countdown < 0 (%d)\\n\", pixel_countdown); } } else { /* copy pixels if byte_run < 0 */ byte_run = -byte_run; CHECK_PIXEL_PTR(2 * byte_run); for (j = 0; j < byte_run; j++) { *((signed short*)(&pixels[pixel_ptr])) = AV_RL16(&buf[stream_ptr]); stream_ptr += 2; pixel_ptr += 2; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, \"pixel_countdown < 0 (%d)\\n\", pixel_countdown); } } } y_ptr += s->frame.linesize[0]; } break; case FLI_COPY: case FLI_DTA_COPY: /* copy the chunk (uncompressed frame) */ if (chunk_size - 6 > (unsigned int)(s->avctx->width * s->avctx->height)*2) { av_log(avctx, AV_LOG_ERROR, \"In chunk FLI_COPY : source data (%d bytes) \" \\ \"bigger than image, skipping chunk\\n\", chunk_size - 6); stream_ptr += chunk_size - 6; } else { for (y_ptr = 0; y_ptr < s->frame.linesize[0] * s->avctx->height; y_ptr += s->frame.linesize[0]) { pixel_countdown = s->avctx->width; pixel_ptr = 0; while (pixel_countdown > 0) { *((signed short*)(&pixels[y_ptr + pixel_ptr])) = AV_RL16(&buf[stream_ptr+pixel_ptr]); pixel_ptr += 2; pixel_countdown--; } stream_ptr += s->avctx->width*2; } } break; case FLI_MINI: /* some sort of a thumbnail? disregard this chunk... */ stream_ptr += chunk_size - 6; break; default: av_log(avctx, AV_LOG_ERROR, \"Unrecognized chunk type: %d\\n\", chunk_type); break; } frame_size -= chunk_size; num_chunks--; } /* by the end of the chunk, the stream ptr should equal the frame * size (minus 1, possibly); if it doesn't, issue a warning */ if ((stream_ptr != buf_size) && (stream_ptr != buf_size - 1)) av_log(avctx, AV_LOG_ERROR, \"Processed FLI chunk where chunk size = %d \" \\ \"and final chunk ptr = %d\\n\", buf_size, stream_ptr); *data_size=sizeof(AVFrame); *(AVFrame*)data = s->frame; return buf_size; }"} {"target": 1, "idx": 24918, "func": "static void init_proc_601 (CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_601(env); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, \"HID0\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, \"HID1\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_601_HID2, \"HID2\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_601_HID5, \"HID5\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_601_HID15, \"HID15\", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ #if !defined(CONFIG_USER_ONLY) env->nb_tlb = 64; env->nb_ways = 2; env->id_tlbs = 0; #endif init_excp_601(env); env->dcache_line_size = 64; env->icache_line_size = 64; /* XXX: TODO: allocate internal IRQ controller */ }"} {"target": 0, "idx": 24935, "func": "static int i6300esb_init(PCIDevice *dev) { I6300State *d = DO_UPCAST(I6300State, dev, dev); uint8_t *pci_conf; int io_mem; static CPUReadMemoryFunc * const mem_read[3] = { i6300esb_mem_readb, i6300esb_mem_readw, i6300esb_mem_readl, }; static CPUWriteMemoryFunc * const mem_write[3] = { i6300esb_mem_writeb, i6300esb_mem_writew, i6300esb_mem_writel, }; i6300esb_debug(\"I6300State = %p\\n\", d); d->timer = qemu_new_timer_ns(vm_clock, i6300esb_timer_expired, d); d->previous_reboot_flag = 0; pci_conf = d->dev.config; pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_INTEL); pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_INTEL_ESB_9); pci_config_set_class(pci_conf, PCI_CLASS_SYSTEM_OTHER); io_mem = cpu_register_io_memory(mem_read, mem_write, d, DEVICE_NATIVE_ENDIAN); pci_register_bar_simple(&d->dev, 0, 0x10, 0, io_mem); /* qemu_register_coalesced_mmio (addr, 0x10); ? */ return 0; }"} {"target": 0, "idx": 24937, "func": "NBDExport *nbd_export_new(BlockDriverState *bs, off_t dev_offset, off_t size, uint32_t nbdflags) { NBDExport *exp = g_malloc0(sizeof(NBDExport)); QSIMPLEQ_INIT(&exp->requests); exp->refcount = 1; QTAILQ_INIT(&exp->clients); exp->bs = bs; exp->dev_offset = dev_offset; exp->nbdflags = nbdflags; exp->size = size == -1 ? bdrv_getlength(bs) : size; return exp; }"} {"target": 0, "idx": 24938, "func": "static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque, int is_write) { BlockDriverAIOCBSync *acb; acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque); acb->is_write = is_write; acb->qiov = qiov; acb->bounce = qemu_blockalign(bs, qiov->size); acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb); if (is_write) { qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size); acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors); } else { acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors); } qemu_bh_schedule(acb->bh); return &acb->common; }"} {"target": 0, "idx": 24939, "func": "static void write_audio_frame(AVFormatContext *oc, AVStream *st) { AVCodecContext *c; AVPacket pkt = { 0 }; // data and size must be 0; int got_packet, ret, dst_nb_samples; av_init_packet(&pkt); c = st->codec; get_audio_frame((int16_t *)src_samples_data[0], src_nb_samples, c->channels); /* convert samples from native format to destination codec format, using the resampler */ if (swr_ctx) { /* compute destination number of samples */ dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + src_nb_samples, c->sample_rate, c->sample_rate, AV_ROUND_UP); if (dst_nb_samples > max_dst_nb_samples) { av_free(dst_samples_data[0]); ret = av_samples_alloc(dst_samples_data, &dst_samples_linesize, c->channels, dst_nb_samples, c->sample_fmt, 0); if (ret < 0) exit(1); max_dst_nb_samples = dst_nb_samples; dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, dst_nb_samples, c->sample_fmt, 0); } /* convert to destination format */ ret = swr_convert(swr_ctx, dst_samples_data, dst_nb_samples, (const uint8_t **)src_samples_data, src_nb_samples); if (ret < 0) { fprintf(stderr, \"Error while converting\\n\"); exit(1); } } else { dst_nb_samples = src_nb_samples; } audio_frame->nb_samples = dst_nb_samples; audio_frame->pts = av_rescale_q(samples_count, (AVRational){1, c->sample_rate}, c->time_base); avcodec_fill_audio_frame(audio_frame, c->channels, c->sample_fmt, dst_samples_data[0], dst_samples_size, 0); samples_count += dst_nb_samples; ret = avcodec_encode_audio2(c, &pkt, audio_frame, &got_packet); if (ret < 0) { fprintf(stderr, \"Error encoding audio frame: %s\\n\", av_err2str(ret)); exit(1); } if (!got_packet) return; ret = write_frame(oc, &c->time_base, st, &pkt); if (ret != 0) { fprintf(stderr, \"Error while writing audio frame: %s\\n\", av_err2str(ret)); exit(1); } }"} {"target": 1, "idx": 24950, "func": "static void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev, uint32_t event, uint32_t reason) { VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); VirtIOSCSIReq *req = virtio_scsi_pop_req(s, vs->event_vq); VirtIOSCSIEvent *evt; VirtIODevice *vdev = VIRTIO_DEVICE(s); int in_size; if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { return; } if (!req) { s->events_dropped = true; return; } if (req->elem.out_num || req->elem.in_num != 1) { virtio_scsi_bad_req(); } if (s->events_dropped) { event |= VIRTIO_SCSI_T_EVENTS_MISSED; s->events_dropped = false; } in_size = req->elem.in_sg[0].iov_len; if (in_size < sizeof(VirtIOSCSIEvent)) { virtio_scsi_bad_req(); } evt = req->resp.event; memset(evt, 0, sizeof(VirtIOSCSIEvent)); evt->event = event; evt->reason = reason; if (!dev) { assert(event == VIRTIO_SCSI_T_NO_EVENT); } else { evt->lun[0] = 1; evt->lun[1] = dev->id; /* Linux wants us to keep the same encoding we use for REPORT LUNS. */ if (dev->lun >= 256) { evt->lun[2] = (dev->lun >> 8) | 0x40; } evt->lun[3] = dev->lun & 0xFF; } virtio_scsi_complete_req(req); }"} {"target": 1, "idx": 24953, "func": "AVFrame *avcodec_alloc_frame(void) { AVFrame *frame = av_malloc(sizeof(AVFrame)); if (frame == NULL) return NULL; avcodec_get_frame_defaults(frame); return frame; }"} {"target": 1, "idx": 24965, "func": "static void ahci_irq_raise(AHCIState *s, AHCIDevice *dev) { AHCIPCIState *d = container_of(s, AHCIPCIState, ahci); PCIDevice *pci_dev = (PCIDevice *)object_dynamic_cast(OBJECT(d), TYPE_PCI_DEVICE); DPRINTF(0, \"raise irq\\n\"); if (pci_dev && msi_enabled(pci_dev)) { msi_notify(pci_dev, 0); } else { qemu_irq_raise(s->irq); } }"} {"target": 1, "idx": 24966, "func": "static int mov_write_hdlr_tag(ByteIOContext *pb, MOVTrack *track) { const char *descr, *hdlr, *hdlr_type; int64_t pos = url_ftell(pb); if (!track) { /* no media --> data handler */ hdlr = \"dhlr\"; hdlr_type = \"url \"; descr = \"DataHandler\"; } else { hdlr = (track->mode == MODE_MOV) ? \"mhlr\" : \"\\0\\0\\0\\0\"; if (track->enc->codec_type == CODEC_TYPE_VIDEO) { hdlr_type = \"vide\"; descr = \"VideoHandler\"; } else if (track->enc->codec_type == CODEC_TYPE_AUDIO){ hdlr_type = \"soun\"; descr = \"SoundHandler\"; } else if (track->enc->codec_type == CODEC_TYPE_SUBTITLE){ if (track->mode == MODE_IPOD) hdlr_type = \"sbtl\"; else hdlr_type = \"text\"; descr = \"SubtitleHandler\"; } } put_be32(pb, 0); /* size */ put_tag(pb, \"hdlr\"); put_be32(pb, 0); /* Version & flags */ put_buffer(pb, hdlr, 4); /* handler */ put_tag(pb, hdlr_type); /* handler type */ put_be32(pb ,0); /* reserved */ put_be32(pb ,0); /* reserved */ put_be32(pb ,0); /* reserved */ put_byte(pb, strlen(descr)); /* string counter */ put_buffer(pb, descr, strlen(descr)); /* handler description */ return updateSize(pb, pos); }"} {"target": 0, "idx": 24976, "func": "static av_always_inline void decode_cabac_residual_internal( H264Context *h, DCTELEM *block, int cat, int n, const uint8_t *scantable, const uint32_t *qmul, int max_coeff, int is_dc ) { static const int significant_coeff_flag_offset[2][6] = { { 105+0, 105+15, 105+29, 105+44, 105+47, 402 }, { 277+0, 277+15, 277+29, 277+44, 277+47, 436 } }; static const int last_coeff_flag_offset[2][6] = { { 166+0, 166+15, 166+29, 166+44, 166+47, 417 }, { 338+0, 338+15, 338+29, 338+44, 338+47, 451 } }; static const int coeff_abs_level_m1_offset[6] = { 227+0, 227+10, 227+20, 227+30, 227+39, 426 }; static const uint8_t significant_coeff_flag_offset_8x8[2][63] = { { 0, 1, 2, 3, 4, 5, 5, 4, 4, 3, 3, 4, 4, 4, 5, 5, 4, 4, 4, 4, 3, 3, 6, 7, 7, 7, 8, 9,10, 9, 8, 7, 7, 6,11,12,13,11, 6, 7, 8, 9,14,10, 9, 8, 6,11, 12,13,11, 6, 9,14,10, 9,11,12,13,11,14,10,12 }, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 6, 7, 7, 7, 8, 4, 5, 6, 9,10,10, 8,11,12,11, 9, 9,10,10, 8,11,12,11, 9, 9,10,10, 8,11,12,11, 9, 9,10,10, 8,13,13, 9, 9,10,10, 8,13,13, 9, 9,10,10,14,14,14,14,14 } }; /* node ctx: 0..3: abslevel1 (with abslevelgt1 == 0). * 4..7: abslevelgt1 + 3 (and abslevel1 doesn't matter). * map node ctx => cabac ctx for level=1 */ static const uint8_t coeff_abs_level1_ctx[8] = { 1, 2, 3, 4, 0, 0, 0, 0 }; /* map node ctx => cabac ctx for level>1 */ static const uint8_t coeff_abs_levelgt1_ctx[8] = { 5, 5, 5, 5, 6, 7, 8, 9 }; static const uint8_t coeff_abs_level_transition[2][8] = { /* update node ctx after decoding a level=1 */ { 1, 2, 3, 3, 4, 5, 6, 7 }, /* update node ctx after decoding a level>1 */ { 4, 4, 4, 4, 5, 6, 7, 7 } }; int index[64]; int av_unused last; int coeff_count = 0; int node_ctx = 0; uint8_t *significant_coeff_ctx_base; uint8_t *last_coeff_ctx_base; uint8_t *abs_level_m1_ctx_base; #ifndef ARCH_X86 #define CABAC_ON_STACK #endif #ifdef CABAC_ON_STACK #define CC &cc CABACContext cc; cc.range = h->cabac.range; cc.low = h->cabac.low; cc.bytestream= h->cabac.bytestream; #else #define CC &h->cabac #endif /* cat: 0-> DC 16x16 n = 0 * 1-> AC 16x16 n = luma4x4idx * 2-> Luma4x4 n = luma4x4idx * 3-> DC Chroma n = iCbCr * 4-> AC Chroma n = 4 * iCbCr + chroma4x4idx * 5-> Luma8x8 n = 4 * luma8x8idx */ /* read coded block flag */ if( is_dc || cat != 5 ) { if( get_cabac( CC, &h->cabac_state[85 + get_cabac_cbf_ctx( h, cat, n, is_dc ) ] ) == 0 ) { if( !is_dc ) { if( cat == 1 || cat == 2 ) h->non_zero_count_cache[scan8[n]] = 0; else h->non_zero_count_cache[scan8[16+n]] = 0; } #ifdef CABAC_ON_STACK h->cabac.range = cc.range ; h->cabac.low = cc.low ; h->cabac.bytestream= cc.bytestream; #endif return; } } significant_coeff_ctx_base = h->cabac_state + significant_coeff_flag_offset[MB_FIELD][cat]; last_coeff_ctx_base = h->cabac_state + last_coeff_flag_offset[MB_FIELD][cat]; abs_level_m1_ctx_base = h->cabac_state + coeff_abs_level_m1_offset[cat]; if( !is_dc && cat == 5 ) { #define DECODE_SIGNIFICANCE( coefs, sig_off, last_off ) \\ for(last= 0; last < coefs; last++) { \\ uint8_t *sig_ctx = significant_coeff_ctx_base + sig_off; \\ if( get_cabac( CC, sig_ctx )) { \\ uint8_t *last_ctx = last_coeff_ctx_base + last_off; \\ index[coeff_count++] = last; \\ if( get_cabac( CC, last_ctx ) ) { \\ last= max_coeff; \\ break; \\ } \\ } \\ }\\ if( last == max_coeff -1 ) {\\ index[coeff_count++] = last;\\ } const uint8_t *sig_off = significant_coeff_flag_offset_8x8[MB_FIELD]; #if defined(ARCH_X86) && defined(HAVE_7REGS) && defined(HAVE_EBX_AVAILABLE) && !defined(BROKEN_RELOCATIONS) coeff_count= decode_significance_8x8_x86(CC, significant_coeff_ctx_base, index, sig_off); } else { coeff_count= decode_significance_x86(CC, max_coeff, significant_coeff_ctx_base, index); #else DECODE_SIGNIFICANCE( 63, sig_off[last], last_coeff_flag_offset_8x8[last] ); } else { DECODE_SIGNIFICANCE( max_coeff - 1, last, last ); #endif } assert(coeff_count > 0); if( is_dc ) { if( cat == 0 ) h->cbp_table[h->mb_xy] |= 0x100; else h->cbp_table[h->mb_xy] |= 0x40 << n; } else { if( cat == 1 || cat == 2 ) h->non_zero_count_cache[scan8[n]] = coeff_count; else if( cat == 4 ) h->non_zero_count_cache[scan8[16+n]] = coeff_count; else { assert( cat == 5 ); fill_rectangle(&h->non_zero_count_cache[scan8[n]], 2, 2, 8, coeff_count, 1); } } for( coeff_count--; coeff_count >= 0; coeff_count-- ) { uint8_t *ctx = coeff_abs_level1_ctx[node_ctx] + abs_level_m1_ctx_base; int j= scantable[index[coeff_count]]; if( get_cabac( CC, ctx ) == 0 ) { node_ctx = coeff_abs_level_transition[0][node_ctx]; if( is_dc ) { block[j] = get_cabac_bypass_sign( CC, -1); }else{ block[j] = (get_cabac_bypass_sign( CC, -qmul[j]) + 32) >> 6; } } else { int coeff_abs = 2; ctx = coeff_abs_levelgt1_ctx[node_ctx] + abs_level_m1_ctx_base; node_ctx = coeff_abs_level_transition[1][node_ctx]; while( coeff_abs < 15 && get_cabac( CC, ctx ) ) { coeff_abs++; } if( coeff_abs >= 15 ) { int j = 0; while( get_cabac_bypass( CC ) ) { j++; } coeff_abs=1; while( j-- ) { coeff_abs += coeff_abs + get_cabac_bypass( CC ); } coeff_abs+= 14; } if( is_dc ) { if( get_cabac_bypass( CC ) ) block[j] = -coeff_abs; else block[j] = coeff_abs; }else{ if( get_cabac_bypass( CC ) ) block[j] = (-coeff_abs * qmul[j] + 32) >> 6; else block[j] = ( coeff_abs * qmul[j] + 32) >> 6; } } } #ifdef CABAC_ON_STACK h->cabac.range = cc.range ; h->cabac.low = cc.low ; h->cabac.bytestream= cc.bytestream; #endif }"} {"target": 0, "idx": 24978, "func": "static void mpc8_parse_seektable(AVFormatContext *s, int64_t off) { MPCContext *c = s->priv_data; int tag; int64_t size, pos, ppos[2]; uint8_t *buf; int i, t, seekd; GetBitContext gb; if (s->nb_streams == 0) { av_log(s, AV_LOG_ERROR, \"No stream added before parsing seek table\\n\"); return; } avio_seek(s->pb, off, SEEK_SET); mpc8_get_chunk_header(s->pb, &tag, &size); if(tag != TAG_SEEKTABLE){ av_log(s, AV_LOG_ERROR, \"No seek table at given position\\n\"); return; } if (size > INT_MAX/10 || size<=0) { av_log(s, AV_LOG_ERROR, \"Bad seek table size\\n\"); return; } if(!(buf = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE))) return; avio_read(s->pb, buf, size); memset(buf+size, 0, FF_INPUT_BUFFER_PADDING_SIZE); init_get_bits(&gb, buf, size * 8); size = gb_get_v(&gb); if(size > UINT_MAX/4 || size > c->samples/1152){ av_log(s, AV_LOG_ERROR, \"Seek table is too big\\n\"); return; } seekd = get_bits(&gb, 4); for(i = 0; i < 2; i++){ pos = gb_get_v(&gb) + c->header_pos; ppos[1 - i] = pos; av_add_index_entry(s->streams[0], pos, i, 0, 0, AVINDEX_KEYFRAME); } for(; i < size; i++){ t = get_unary(&gb, 1, 33) << 12; t += get_bits(&gb, 12); if(t & 1) t = -(t & ~1); pos = (t >> 1) + ppos[0]*2 - ppos[1]; av_add_index_entry(s->streams[0], pos, i << seekd, 0, 0, AVINDEX_KEYFRAME); ppos[1] = ppos[0]; ppos[0] = pos; } av_free(buf); }"} {"target": 1, "idx": 24985, "func": "static int ftp_features(FTPContext *s) { static const char *feat_command = \"FEAT\\r\\n\"; static const char *enable_utf8_command = \"OPTS UTF8 ON\\r\\n\"; static const int feat_codes[] = {211, 0}; static const int opts_codes[] = {200, 451}; char *feat; if (ftp_send_command(s, feat_command, feat_codes, &feat) == 211) { if (av_stristr(feat, \"UTF8\")) ftp_send_command(s, enable_utf8_command, opts_codes, NULL); } return 0; }"} {"target": 0, "idx": 25007, "func": "void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) { struct qemu_work_item wi; if (qemu_cpu_self(env)) { func(data); return; } wi.func = func; wi.data = data; if (!env->queued_work_first) env->queued_work_first = &wi; else env->queued_work_last->next = &wi; env->queued_work_last = &wi; wi.next = NULL; wi.done = false; qemu_cpu_kick(env); while (!wi.done) { CPUState *self_env = cpu_single_env; qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex); cpu_single_env = self_env; } }"} {"target": 0, "idx": 25011, "func": "static int ffserver_apply_stream_config(AVCodecContext *enc, const AVDictionary *conf, AVDictionary **opts) { AVDictionaryEntry *e; int ret = 0; /* Return values from ffserver_set_*_param are ignored. Values are initially parsed and checked before inserting to AVDictionary. */ //video params if ((e = av_dict_get(conf, \"VideoBitRateRangeMin\", NULL, 0))) ffserver_set_int_param(&enc->rc_min_rate, e->value, 1000, INT_MIN, INT_MAX, NULL, 0, NULL); if ((e = av_dict_get(conf, \"VideoBitRateRangeMax\", NULL, 0))) ffserver_set_int_param(&enc->rc_max_rate, e->value, 1000, INT_MIN, INT_MAX, NULL, 0, NULL); if ((e = av_dict_get(conf, \"Debug\", NULL, 0))) ffserver_set_int_param(&enc->debug, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL); if ((e = av_dict_get(conf, \"Strict\", NULL, 0))) ffserver_set_int_param(&enc->strict_std_compliance, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL); if ((e = av_dict_get(conf, \"VideoBufferSize\", NULL, 0))) ffserver_set_int_param(&enc->rc_buffer_size, e->value, 8*1024, INT_MIN, INT_MAX, NULL, 0, NULL); if ((e = av_dict_get(conf, \"VideoBitRateTolerance\", NULL, 0))) ffserver_set_int_param(&enc->bit_rate_tolerance, e->value, 1000, INT_MIN, INT_MAX, NULL, 0, NULL); if ((e = av_dict_get(conf, \"VideoBitRate\", NULL, 0))) ffserver_set_int_param(&enc->bit_rate, e->value, 1000, INT_MIN, INT_MAX, NULL, 0, NULL); if ((e = av_dict_get(conf, \"VideoSizeWidth\", NULL, 0))) ffserver_set_int_param(&enc->width, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL); if ((e = av_dict_get(conf, \"VideoSizeHeight\", NULL, 0))) ffserver_set_int_param(&enc->height, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL); if ((e = av_dict_get(conf, \"PixelFormat\", NULL, 0))) { int val; ffserver_set_int_param(&val, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL); enc->pix_fmt = val; } if ((e = av_dict_get(conf, \"VideoGopSize\", NULL, 0))) ffserver_set_int_param(&enc->gop_size, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL); if ((e = av_dict_get(conf, \"VideoFrameRateNum\", NULL, 0))) ffserver_set_int_param(&enc->time_base.num, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL); if ((e = av_dict_get(conf, \"VideoFrameRateDen\", NULL, 0))) ffserver_set_int_param(&enc->time_base.den, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL); if ((e = av_dict_get(conf, \"VideoQDiff\", NULL, 0))) ffserver_set_int_param(&enc->max_qdiff, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL); if ((e = av_dict_get(conf, \"VideoQMax\", NULL, 0))) ffserver_set_int_param(&enc->qmax, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL); if ((e = av_dict_get(conf, \"VideoQMin\", NULL, 0))) ffserver_set_int_param(&enc->qmin, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL); if ((e = av_dict_get(conf, \"LumiMask\", NULL, 0))) ffserver_set_float_param(&enc->lumi_masking, e->value, 0, -FLT_MAX, FLT_MAX, NULL, 0, NULL); if ((e = av_dict_get(conf, \"DarkMask\", NULL, 0))) ffserver_set_float_param(&enc->dark_masking, e->value, 0, -FLT_MAX, FLT_MAX, NULL, 0, NULL); if (av_dict_get(conf, \"BitExact\", NULL, 0)) enc->flags |= CODEC_FLAG_BITEXACT; if (av_dict_get(conf, \"DctFastint\", NULL, 0)) enc->dct_algo = FF_DCT_FASTINT; if (av_dict_get(conf, \"IdctSimple\", NULL, 0)) enc->idct_algo = FF_IDCT_SIMPLE; if (av_dict_get(conf, \"VideoHighQuality\", NULL, 0)) enc->mb_decision = FF_MB_DECISION_BITS; if ((e = av_dict_get(conf, \"VideoTag\", NULL, 0))) enc->codec_tag = MKTAG(e->value[0], e->value[1], e->value[2], e->value[3]); if (av_dict_get(conf, \"Qscale\", NULL, 0)) { enc->flags |= CODEC_FLAG_QSCALE; ffserver_set_int_param(&enc->global_quality, e->value, FF_QP2LAMBDA, INT_MIN, INT_MAX, NULL, 0, NULL); } if (av_dict_get(conf, \"Video4MotionVector\", NULL, 0)) { enc->mb_decision = FF_MB_DECISION_BITS; //FIXME remove enc->flags |= CODEC_FLAG_4MV; } //audio params if ((e = av_dict_get(conf, \"AudioChannels\", NULL, 0))) ffserver_set_int_param(&enc->channels, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL); if ((e = av_dict_get(conf, \"AudioSampleRate\", NULL, 0))) ffserver_set_int_param(&enc->sample_rate, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL); if ((e = av_dict_get(conf, \"AudioBitRate\", NULL, 0))) ffserver_set_int_param(&enc->bit_rate, e->value, 0, INT_MIN, INT_MAX, NULL, 0, NULL); av_opt_set_dict2(enc, opts, AV_OPT_SEARCH_CHILDREN); e = NULL; while (e = av_dict_get(*opts, \"\", e, AV_DICT_IGNORE_SUFFIX)) { av_log(NULL, AV_LOG_ERROR, \"Provided AVOption '%s' doesn't match any existing option.\\n\", e->key); ret = AVERROR(EINVAL); } return ret; }"} {"target": 1, "idx": 25049, "func": "static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) { uint32_t rca = 0x0000; uint64_t addr = (sd->ocr & (1 << 30)) ? (uint64_t) req.arg << 9 : req.arg; if (sd_cmd_type[req.cmd] == sd_ac || sd_cmd_type[req.cmd] == sd_adtc) rca = req.arg >> 16; DPRINTF(\"CMD%d 0x%08x state %d\\n\", req.cmd, req.arg, sd->state); switch (req.cmd) { /* Basic commands (Class 0 and Class 1) */ case 0: /* CMD0: GO_IDLE_STATE */ switch (sd->state) { case sd_inactive_state: return sd->spi ? sd_r1 : sd_r0; default: sd->state = sd_idle_state; sd_reset(sd, sd->bdrv); return sd->spi ? sd_r1 : sd_r0; } break; case 1: /* CMD1: SEND_OP_CMD */ if (!sd->spi) goto bad_cmd; sd->state = sd_transfer_state; return sd_r1; case 2: /* CMD2: ALL_SEND_CID */ if (sd->spi) goto bad_cmd; switch (sd->state) { case sd_ready_state: sd->state = sd_identification_state; return sd_r2_i; default: break; } break; case 3: /* CMD3: SEND_RELATIVE_ADDR */ if (sd->spi) goto bad_cmd; switch (sd->state) { case sd_identification_state: case sd_standby_state: sd->state = sd_standby_state; sd_set_rca(sd); return sd_r6; default: break; } break; case 4: /* CMD4: SEND_DSR */ if (sd->spi) goto bad_cmd; switch (sd->state) { case sd_standby_state: break; default: break; } break; case 5: /* CMD5: reserved for SDIO cards */ sd->card_status |= ILLEGAL_COMMAND; return sd_r0; case 6: /* CMD6: SWITCH_FUNCTION */ if (sd->spi) goto bad_cmd; switch (sd->mode) { case sd_data_transfer_mode: sd_function_switch(sd, req.arg); sd->state = sd_sendingdata_state; sd->data_start = 0; sd->data_offset = 0; return sd_r1; default: break; } break; case 7: /* CMD7: SELECT/DESELECT_CARD */ if (sd->spi) goto bad_cmd; switch (sd->state) { case sd_standby_state: if (sd->rca != rca) return sd_r0; sd->state = sd_transfer_state; return sd_r1b; case sd_transfer_state: case sd_sendingdata_state: if (sd->rca == rca) break; sd->state = sd_standby_state; return sd_r1b; case sd_disconnect_state: if (sd->rca != rca) return sd_r0; sd->state = sd_programming_state; return sd_r1b; case sd_programming_state: if (sd->rca == rca) break; sd->state = sd_disconnect_state; return sd_r1b; default: break; } break; case 8: /* CMD8: SEND_IF_COND */ /* Physical Layer Specification Version 2.00 command */ switch (sd->state) { case sd_idle_state: sd->vhs = 0; /* No response if not exactly one VHS bit is set. */ if (!(req.arg >> 8) || (req.arg >> ffs(req.arg & ~0xff))) return sd->spi ? sd_r7 : sd_r0; /* Accept. */ sd->vhs = req.arg; return sd_r7; default: break; } break; case 9: /* CMD9: SEND_CSD */ switch (sd->state) { case sd_standby_state: if (sd->rca != rca) return sd_r0; return sd_r2_s; case sd_transfer_state: if (!sd->spi) break; sd->state = sd_sendingdata_state; memcpy(sd->data, sd->csd, 16); sd->data_start = addr; sd->data_offset = 0; return sd_r1; default: break; } break; case 10: /* CMD10: SEND_CID */ switch (sd->state) { case sd_standby_state: if (sd->rca != rca) return sd_r0; return sd_r2_i; case sd_transfer_state: if (!sd->spi) break; sd->state = sd_sendingdata_state; memcpy(sd->data, sd->cid, 16); sd->data_start = addr; sd->data_offset = 0; return sd_r1; default: break; } break; case 11: /* CMD11: READ_DAT_UNTIL_STOP */ if (sd->spi) goto bad_cmd; switch (sd->state) { case sd_transfer_state: sd->state = sd_sendingdata_state; sd->data_start = req.arg; sd->data_offset = 0; if (sd->data_start + sd->blk_len > sd->size) sd->card_status |= ADDRESS_ERROR; return sd_r0; default: break; } break; case 12: /* CMD12: STOP_TRANSMISSION */ switch (sd->state) { case sd_sendingdata_state: sd->state = sd_transfer_state; return sd_r1b; case sd_receivingdata_state: sd->state = sd_programming_state; /* Bzzzzzzztt .... Operation complete. */ sd->state = sd_transfer_state; return sd_r1b; default: break; } break; case 13: /* CMD13: SEND_STATUS */ switch (sd->mode) { case sd_data_transfer_mode: if (sd->rca != rca) return sd_r0; return sd_r1; default: break; } break; case 15: /* CMD15: GO_INACTIVE_STATE */ if (sd->spi) goto bad_cmd; switch (sd->mode) { case sd_data_transfer_mode: if (sd->rca != rca) return sd_r0; sd->state = sd_inactive_state; return sd_r0; default: break; } break; /* Block read commands (Classs 2) */ case 16: /* CMD16: SET_BLOCKLEN */ switch (sd->state) { case sd_transfer_state: if (req.arg > (1 << HWBLOCK_SHIFT)) sd->card_status |= BLOCK_LEN_ERROR; else sd->blk_len = req.arg; return sd_r1; default: break; } break; case 17: /* CMD17: READ_SINGLE_BLOCK */ switch (sd->state) { case sd_transfer_state: sd->state = sd_sendingdata_state; sd->data_start = addr; sd->data_offset = 0; if (sd->data_start + sd->blk_len > sd->size) sd->card_status |= ADDRESS_ERROR; return sd_r1; default: break; } break; case 18: /* CMD18: READ_MULTIPLE_BLOCK */ switch (sd->state) { case sd_transfer_state: sd->state = sd_sendingdata_state; sd->data_start = addr; sd->data_offset = 0; if (sd->data_start + sd->blk_len > sd->size) sd->card_status |= ADDRESS_ERROR; return sd_r1; default: break; } break; /* Block write commands (Class 4) */ case 24: /* CMD24: WRITE_SINGLE_BLOCK */ if (sd->spi) goto unimplemented_cmd; switch (sd->state) { case sd_transfer_state: /* Writing in SPI mode not implemented. */ if (sd->spi) break; sd->state = sd_receivingdata_state; sd->data_start = addr; sd->data_offset = 0; sd->blk_written = 0; if (sd->data_start + sd->blk_len > sd->size) sd->card_status |= ADDRESS_ERROR; if (sd_wp_addr(sd, sd->data_start)) sd->card_status |= WP_VIOLATION; if (sd->csd[14] & 0x30) sd->card_status |= WP_VIOLATION; return sd_r1; default: break; } break; case 25: /* CMD25: WRITE_MULTIPLE_BLOCK */ if (sd->spi) goto unimplemented_cmd; switch (sd->state) { case sd_transfer_state: /* Writing in SPI mode not implemented. */ if (sd->spi) break; sd->state = sd_receivingdata_state; sd->data_start = addr; sd->data_offset = 0; sd->blk_written = 0; if (sd->data_start + sd->blk_len > sd->size) sd->card_status |= ADDRESS_ERROR; if (sd_wp_addr(sd, sd->data_start)) sd->card_status |= WP_VIOLATION; if (sd->csd[14] & 0x30) sd->card_status |= WP_VIOLATION; return sd_r1; default: break; } break; case 26: /* CMD26: PROGRAM_CID */ if (sd->spi) goto bad_cmd; switch (sd->state) { case sd_transfer_state: sd->state = sd_receivingdata_state; sd->data_start = 0; sd->data_offset = 0; return sd_r1; default: break; } break; case 27: /* CMD27: PROGRAM_CSD */ if (sd->spi) goto unimplemented_cmd; switch (sd->state) { case sd_transfer_state: sd->state = sd_receivingdata_state; sd->data_start = 0; sd->data_offset = 0; return sd_r1; default: break; } break; /* Write protection (Class 6) */ case 28: /* CMD28: SET_WRITE_PROT */ switch (sd->state) { case sd_transfer_state: if (addr >= sd->size) { sd->card_status |= ADDRESS_ERROR; return sd_r1b; } sd->state = sd_programming_state; sd->wp_groups[addr >> (HWBLOCK_SHIFT + SECTOR_SHIFT + WPGROUP_SHIFT)] = 1; /* Bzzzzzzztt .... Operation complete. */ sd->state = sd_transfer_state; return sd_r1b; default: break; } break; case 29: /* CMD29: CLR_WRITE_PROT */ switch (sd->state) { case sd_transfer_state: if (addr >= sd->size) { sd->card_status |= ADDRESS_ERROR; return sd_r1b; } sd->state = sd_programming_state; sd->wp_groups[addr >> (HWBLOCK_SHIFT + SECTOR_SHIFT + WPGROUP_SHIFT)] = 0; /* Bzzzzzzztt .... Operation complete. */ sd->state = sd_transfer_state; return sd_r1b; default: break; } break; case 30: /* CMD30: SEND_WRITE_PROT */ switch (sd->state) { case sd_transfer_state: sd->state = sd_sendingdata_state; *(uint32_t *) sd->data = sd_wpbits(sd, req.arg); sd->data_start = addr; sd->data_offset = 0; return sd_r1b; default: break; } break; /* Erase commands (Class 5) */ case 32: /* CMD32: ERASE_WR_BLK_START */ switch (sd->state) { case sd_transfer_state: sd->erase_start = req.arg; return sd_r1; default: break; } break; case 33: /* CMD33: ERASE_WR_BLK_END */ switch (sd->state) { case sd_transfer_state: sd->erase_end = req.arg; return sd_r1; default: break; } break; case 38: /* CMD38: ERASE */ switch (sd->state) { case sd_transfer_state: if (sd->csd[14] & 0x30) { sd->card_status |= WP_VIOLATION; return sd_r1b; } sd->state = sd_programming_state; sd_erase(sd); /* Bzzzzzzztt .... Operation complete. */ sd->state = sd_transfer_state; return sd_r1b; default: break; } break; /* Lock card commands (Class 7) */ case 42: /* CMD42: LOCK_UNLOCK */ if (sd->spi) goto unimplemented_cmd; switch (sd->state) { case sd_transfer_state: sd->state = sd_receivingdata_state; sd->data_start = 0; sd->data_offset = 0; return sd_r1; default: break; } break; case 52: case 53: /* CMD52, CMD53: reserved for SDIO cards * (see the SDIO Simplified Specification V2.0) * Handle as illegal command but do not complain * on stderr, as some OSes may use these in their * probing for presence of an SDIO card. */ sd->card_status |= ILLEGAL_COMMAND; return sd_r0; /* Application specific commands (Class 8) */ case 55: /* CMD55: APP_CMD */ if (sd->rca != rca) return sd_r0; sd->card_status |= APP_CMD; return sd_r1; case 56: /* CMD56: GEN_CMD */ fprintf(stderr, \"SD: GEN_CMD 0x%08x\\n\", req.arg); switch (sd->state) { case sd_transfer_state: sd->data_offset = 0; if (req.arg & 1) sd->state = sd_sendingdata_state; else sd->state = sd_receivingdata_state; return sd_r1; default: break; } break; default: bad_cmd: sd->card_status |= ILLEGAL_COMMAND; fprintf(stderr, \"SD: Unknown CMD%i\\n\", req.cmd); return sd_r0; unimplemented_cmd: /* Commands that are recognised but not yet implemented in SPI mode. */ sd->card_status |= ILLEGAL_COMMAND; fprintf(stderr, \"SD: CMD%i not implemented in SPI mode\\n\", req.cmd); return sd_r0; } sd->card_status |= ILLEGAL_COMMAND; fprintf(stderr, \"SD: CMD%i in a wrong state\\n\", req.cmd); return sd_r0; }"} {"target": 1, "idx": 25051, "func": "int avfilter_graph_parse(AVFilterGraph *graph, const char *filters, AVFilterInOut **open_inputs, AVFilterInOut **open_outputs, void *log_ctx) { int index = 0, ret; char chr = 0; AVFilterInOut *curr_inputs = NULL; do { AVFilterContext *filter; const char *filterchain = filters; filters += strspn(filters, WHITESPACES); if ((ret = parse_inputs(&filters, &curr_inputs, open_outputs, log_ctx)) < 0) goto fail; if ((ret = parse_filter(&filter, &filters, graph, index, log_ctx)) < 0) goto fail; if (filter->input_count == 1 && !curr_inputs && !index) { /* First input can be omitted if it is \"[in]\" */ const char *tmp = \"[in]\"; if ((ret = parse_inputs(&tmp, &curr_inputs, open_outputs, log_ctx)) < 0) goto fail; } if ((ret = link_filter_inouts(filter, &curr_inputs, open_inputs, log_ctx)) < 0) goto fail; if ((ret = parse_outputs(&filters, &curr_inputs, open_inputs, open_outputs, log_ctx)) < 0) goto fail; filters += strspn(filters, WHITESPACES); chr = *filters++; if (chr == ';' && curr_inputs) { av_log(log_ctx, AV_LOG_ERROR, \"Invalid filterchain containing an unlabelled output pad: \\\"%s\\\"\\n\", filterchain); ret = AVERROR(EINVAL); goto fail; } index++; } while (chr == ',' || chr == ';'); if (chr) { av_log(log_ctx, AV_LOG_ERROR, \"Unable to parse graph description substring: \\\"%s\\\"\\n\", filters - 1); ret = AVERROR(EINVAL); goto fail; } if (*open_inputs && !strcmp((*open_inputs)->name, \"out\") && curr_inputs) { /* Last output can be omitted if it is \"[out]\" */ const char *tmp = \"[out]\"; if ((ret = parse_outputs(&tmp, &curr_inputs, open_inputs, open_outputs, log_ctx)) < 0) goto fail; } return 0; fail: for (; graph->filter_count > 0; graph->filter_count--) avfilter_free(graph->filters[graph->filter_count - 1]); av_freep(&graph->filters); avfilter_inout_free(open_inputs); avfilter_inout_free(open_outputs); avfilter_inout_free(&curr_inputs); return ret; }"} {"target": 1, "idx": 25058, "func": "static int libopenjpeg_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { uint8_t *buf = avpkt->data; int buf_size = avpkt->size; LibOpenJPEGContext *ctx = avctx->priv_data; AVFrame *picture = &ctx->image, *output = data; opj_dinfo_t *dec; opj_cio_t *stream; opj_image_t *image; int width, height, ret = -1; int pixel_size = 0; int ispacked = 0; *data_size = 0; // Check if input is a raw jpeg2k codestream or in jp2 wrapping if((AV_RB32(buf) == 12) && (AV_RB32(buf + 4) == JP2_SIG_TYPE) && (AV_RB32(buf + 8) == JP2_SIG_VALUE)) { dec = opj_create_decompress(CODEC_JP2); } else { // If the AVPacket contains a jp2c box, then skip to // the starting byte of the codestream. if (AV_RB32(buf + 4) == AV_RB32(\"jp2c\")) buf += 8; dec = opj_create_decompress(CODEC_J2K); if(!dec) { av_log(avctx, AV_LOG_ERROR, \"Error initializing decoder.\\n\"); opj_set_event_mgr((opj_common_ptr)dec, NULL, NULL); ctx->dec_params.cp_limit_decoding = LIMIT_TO_MAIN_HEADER; // Tie decoder with decoding parameters opj_setup_decoder(dec, &ctx->dec_params); stream = opj_cio_open((opj_common_ptr)dec, buf, buf_size); if(!stream) { av_log(avctx, AV_LOG_ERROR, \"Codestream could not be opened for reading.\\n\"); // Decode the header only image = opj_decode_with_info(dec, stream, NULL); opj_cio_close(stream); width = image->x1 - image->x0; height = image->y1 - image->y0; if(av_image_check_size(width, height, 0, avctx) < 0) { av_log(avctx, AV_LOG_ERROR, \"%dx%d dimension invalid.\\n\", width, height); goto done; avcodec_set_dimensions(avctx, width, height); switch (image->numcomps) { case 1: avctx->pix_fmt = (image->comps[0].bpp == 8) ? PIX_FMT_GRAY8 : PIX_FMT_GRAY16; break; case 2: avctx->pix_fmt = PIX_FMT_GRAY8A; break; case 3: case 4: avctx->pix_fmt = check_image_attributes(avctx, image); break; default: av_log(avctx, AV_LOG_ERROR, \"%d components unsupported.\\n\", image->numcomps); goto done; if(picture->data[0]) ff_thread_release_buffer(avctx, picture); if(ff_thread_get_buffer(avctx, picture) < 0){ av_log(avctx, AV_LOG_ERROR, \"ff_thread_get_buffer() failed\\n\"); ctx->dec_params.cp_limit_decoding = NO_LIMITATION; ctx->dec_params.cp_reduce = avctx->lowres; // Tie decoder with decoding parameters opj_setup_decoder(dec, &ctx->dec_params); stream = opj_cio_open((opj_common_ptr)dec, buf, buf_size); if(!stream) { av_log(avctx, AV_LOG_ERROR, \"Codestream could not be opened for reading.\\n\"); // Decode the codestream image = opj_decode_with_info(dec, stream, NULL); opj_cio_close(stream); pixel_size = av_pix_fmt_descriptors[avctx->pix_fmt].comp[0].step_minus1 + 1; ispacked = libopenjpeg_ispacked(avctx->pix_fmt); switch (pixel_size) { case 1: if (ispacked) { libopenjpeg_copy_to_packed8(picture, image); } else { libopenjpeg_copyto8(picture, image); break; case 2: if (ispacked) { libopenjpeg_copy_to_packed8(picture, image); } else { libopenjpeg_copyto16(picture, image); break; case 3: case 4: if (ispacked) { libopenjpeg_copy_to_packed8(picture, image); break; case 6: case 8: if (ispacked) { libopenjpeg_copy_to_packed16(picture, image); break; default: av_log(avctx, AV_LOG_ERROR, \"unsupported pixel size %d\\n\", pixel_size); goto done; *output = ctx->image; *data_size = sizeof(AVPicture); ret = buf_size; done: opj_image_destroy(image); return ret;"} {"target": 0, "idx": 25071, "func": "void ffv1_clear_slice_state(FFV1Context *f, FFV1Context *fs) { int i, j; for (i = 0; i < f->plane_count; i++) { PlaneContext *p = &fs->plane[i]; p->interlace_bit_state[0] = 128; p->interlace_bit_state[1] = 128; if (fs->ac) { if (f->initial_states[p->quant_table_index]) { memcpy(p->state, f->initial_states[p->quant_table_index], CONTEXT_SIZE * p->context_count); } else memset(p->state, 128, CONTEXT_SIZE * p->context_count); } else { for (j = 0; j < p->context_count; j++) { p->vlc_state[j].drift = 0; p->vlc_state[j].error_sum = 4; //FFMAX((RANGE + 32)/64, 2); p->vlc_state[j].bias = 0; p->vlc_state[j].count = 1; } } } }"} {"target": 1, "idx": 25077, "func": "static int av_buffersrc_add_frame_internal(AVFilterContext *ctx, AVFrame *frame, int flags) { BufferSourceContext *s = ctx->priv; AVFrame *copy; int ret; if (!frame) { s->eof = 1; return 0; } else if (s->eof) return AVERROR(EINVAL); if (!(flags & AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT)) { switch (ctx->outputs[0]->type) { case AVMEDIA_TYPE_VIDEO: CHECK_VIDEO_PARAM_CHANGE(ctx, s, frame->width, frame->height, frame->format); break; case AVMEDIA_TYPE_AUDIO: CHECK_AUDIO_PARAM_CHANGE(ctx, s, frame->sample_rate, frame->channel_layout, frame->format); break; default: return AVERROR(EINVAL); } } if (!av_fifo_space(s->fifo) && (ret = av_fifo_realloc2(s->fifo, av_fifo_size(s->fifo) + sizeof(copy))) < 0) return ret; if (!(copy = av_frame_alloc())) return AVERROR(ENOMEM); av_frame_move_ref(copy, frame); if ((ret = av_fifo_generic_write(s->fifo, ©, sizeof(copy), NULL)) < 0) { av_frame_move_ref(frame, copy); av_frame_free(©); return ret; } if ((flags & AV_BUFFERSRC_FLAG_PUSH)) if ((ret = ctx->output_pads[0].request_frame(ctx->outputs[0])) < 0) return ret; return 0; }"} {"target": 0, "idx": 25089, "func": "static int asf_read_seek(AVFormatContext *s, int stream_index, int64_t pts, int flags) { ASFContext *asf = s->priv_data; AVStream *st = s->streams[stream_index]; int64_t pos; int index; if (s->packet_size <= 0) return -1; /* Try using the protocol's read_seek if available */ if(s->pb) { int ret = avio_seek_time(s->pb, stream_index, pts, flags); if(ret >= 0) asf_reset_header(s); if (ret != AVERROR(ENOSYS)) return ret; } if (!asf->index_read) asf_build_simple_index(s, stream_index); if((asf->index_read && st->index_entries)){ index= av_index_search_timestamp(st, pts, flags); if(index >= 0) { /* find the position */ pos = st->index_entries[index].pos; /* do the seek */ av_log(s, AV_LOG_DEBUG, \"SEEKTO: %\"PRId64\"\\n\", pos); avio_seek(s->pb, pos, SEEK_SET); asf_reset_header(s); return 0; } } /* no index or seeking by index failed */ if(av_seek_frame_binary(s, stream_index, pts, flags)<0) return -1; asf_reset_header(s); return 0; }"} {"target": 0, "idx": 25090, "func": "static void decode(RA288Context *ractx, float gain, int cb_coef) { int i, j; double sumsum; float sum, buffer[5]; memmove(ractx->sp_block + 5, ractx->sp_block, 36*sizeof(*ractx->sp_block)); for (i=4; i >= 0; i--) ractx->sp_block[i] = -scalar_product_float(ractx->sp_block + i + 1, ractx->sp_lpc, 36); /* block 46 of G.728 spec */ sum = 32. - scalar_product_float(ractx->gain_lpc, ractx->gain_block, 10); /* block 47 of G.728 spec */ sum = av_clipf(sum, 0, 60); /* block 48 of G.728 spec */ sumsum = exp(sum * 0.1151292546497) * gain; /* pow(10.0,sum/20)*gain */ for (i=0; i < 5; i++) buffer[i] = codetable[cb_coef][i] * sumsum; sum = scalar_product_float(buffer, buffer, 5) / 5; sum = FFMAX(sum, 1); /* shift and store */ memmove(ractx->gain_block, ractx->gain_block - 1, 10 * sizeof(*ractx->gain_block)); *ractx->gain_block = 10 * log10(sum) - 32; for (i=1; i < 5; i++) for (j=i-1; j >= 0; j--) buffer[i] -= ractx->sp_lpc[i-j-1] * buffer[j]; /* output */ for (i=0; i < 5; i++) ractx->sp_block[4-i] = av_clipf(ractx->sp_block[4-i] + buffer[i], -4095, 4095); }"} {"target": 0, "idx": 25098, "func": "static void color16(WaveformContext *s, AVFrame *in, AVFrame *out, int component, int intensity, int offset, int column) { const int plane = s->desc->comp[component].plane; const int mirror = s->mirror; const int limit = s->size - 1; const uint16_t *c0_data = (const uint16_t *)in->data[plane + 0]; const uint16_t *c1_data = (const uint16_t *)in->data[(plane + 1) % s->ncomp]; const uint16_t *c2_data = (const uint16_t *)in->data[(plane + 2) % s->ncomp]; const int c0_linesize = in->linesize[ plane + 0 ] / 2; const int c1_linesize = in->linesize[(plane + 1) % s->ncomp] / 2; const int c2_linesize = in->linesize[(plane + 2) % s->ncomp] / 2; const int d0_linesize = out->linesize[ plane + 0 ] / 2; const int d1_linesize = out->linesize[(plane + 1) % s->ncomp] / 2; const int d2_linesize = out->linesize[(plane + 2) % s->ncomp] / 2; const int src_h = in->height; const int src_w = in->width; int x, y; if (s->mode) { const int d0_signed_linesize = d0_linesize * (mirror == 1 ? -1 : 1); const int d1_signed_linesize = d1_linesize * (mirror == 1 ? -1 : 1); const int d2_signed_linesize = d2_linesize * (mirror == 1 ? -1 : 1); uint16_t *d0_data = (uint16_t *)out->data[plane] + offset * d0_linesize; uint16_t *d1_data = (uint16_t *)out->data[(plane + 1) % s->ncomp] + offset * d1_linesize; uint16_t *d2_data = (uint16_t *)out->data[(plane + 2) % s->ncomp] + offset * d2_linesize; uint16_t * const d0_bottom_line = d0_data + d0_linesize * (s->size - 1); uint16_t * const d0 = (mirror ? d0_bottom_line : d0_data); uint16_t * const d1_bottom_line = d1_data + d1_linesize * (s->size - 1); uint16_t * const d1 = (mirror ? d1_bottom_line : d1_data); uint16_t * const d2_bottom_line = d2_data + d2_linesize * (s->size - 1); uint16_t * const d2 = (mirror ? d2_bottom_line : d2_data); for (y = 0; y < src_h; y++) { for (x = 0; x < src_w; x++) { const int c0 = FFMIN(c0_data[x], limit); const int c1 = c1_data[x]; const int c2 = c2_data[x]; *(d0 + d0_signed_linesize * c0 + x) = c0; *(d1 + d1_signed_linesize * c0 + x) = c1; *(d2 + d2_signed_linesize * c0 + x) = c2; } c0_data += c0_linesize; c1_data += c1_linesize; c2_data += c2_linesize; d0_data += d0_linesize; d1_data += d1_linesize; d2_data += d2_linesize; } } else { uint16_t *d0_data = (uint16_t *)out->data[plane] + offset; uint16_t *d1_data = (uint16_t *)out->data[(plane + 1) % s->ncomp] + offset; uint16_t *d2_data = (uint16_t *)out->data[(plane + 2) % s->ncomp] + offset; if (mirror) { d0_data += s->size - 1; d1_data += s->size - 1; d2_data += s->size - 1; } for (y = 0; y < src_h; y++) { for (x = 0; x < src_w; x++) { const int c0 = FFMIN(c0_data[x], limit); const int c1 = c1_data[x]; const int c2 = c2_data[x]; if (mirror) { *(d0_data - c0) = c0; *(d1_data - c0) = c1; *(d2_data - c0) = c2; } else { *(d0_data + c0) = c0; *(d1_data + c0) = c1; *(d2_data + c0) = c2; } } c0_data += c0_linesize; c1_data += c1_linesize; c2_data += c2_linesize; d0_data += d0_linesize; d1_data += d1_linesize; d2_data += d2_linesize; } } envelope16(s, out, plane, plane); }"} {"target": 1, "idx": 25123, "func": "static int get_uint8_equal(QEMUFile *f, void *pv, size_t size) { uint8_t *v = pv; uint8_t v2; qemu_get_8s(f, &v2); if (*v == v2) { return 0; } return -EINVAL; }"} {"target": 1, "idx": 25146, "func": "static void test_visitor_in_intList(TestInputVisitorData *data, const void *unused) { int64_t value[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 20}; int16List *res = NULL, *tmp; Error *err = NULL; Visitor *v; int i = 0; v = visitor_input_test_init(data, \"1,2,0,2-4,20,5-9,1-8\"); visit_type_int16List(v, NULL, &res, &error_abort); tmp = res; while (i < sizeof(value) / sizeof(value[0])) { g_assert(tmp); g_assert_cmpint(tmp->value, ==, value[i++]); tmp = tmp->next; } g_assert(!tmp); tmp = res; while (tmp) { res = res->next; g_free(tmp); tmp = res; } visitor_input_teardown(data, unused); v = visitor_input_test_init(data, \"not an int list\"); visit_type_int16List(v, NULL, &res, &err); /* FIXME fix the visitor, then error_free_or_abort(&err) here */ }"} {"target": 1, "idx": 25157, "func": "gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, int search_pc) { uint16_t *gen_opc_end; uint32_t pc_start; unsigned int insn_len; int j, lj; struct DisasContext *dc = &ctx; uint32_t next_page_start; pc_start = tb->pc; dc->env = env; dc->tb = tb; gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; dc->is_jmp = DISAS_NEXT; dc->pc = pc_start; dc->singlestep_enabled = env->singlestep_enabled; dc->flagx_live = 0; dc->flags_x = 0; next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; lj = -1; do { check_breakpoint(env, dc); if (dc->is_jmp == DISAS_JUMP) goto done; if (search_pc) { j = gen_opc_ptr - gen_opc_buf; if (lj < j) { lj++; while (lj < j) gen_opc_instr_start[lj++] = 0; } gen_opc_pc[lj] = dc->pc; gen_opc_instr_start[lj] = 1; } insn_len = cris_decoder(dc); STATS(gen_op_exec_insn()); dc->pc += insn_len; if (!dc->flagx_live || (dc->flagx_live && !(dc->cc_op == CC_OP_FLAGS && dc->flags_x))) { gen_movl_T0_preg[PR_CCS](); gen_op_andl_T0_im(~X_FLAG); gen_movl_preg_T0[PR_CCS](); dc->flagx_live = 1; dc->flags_x = 0; } /* Check for delayed branches here. If we do it before actually genereating any host code, the simulator will just loop doing nothing for on this program location. */ if (dc->delayed_branch) { dc->delayed_branch--; if (dc->delayed_branch == 0) { if (dc->bcc == CC_A) { gen_op_jmp1 (); dc->is_jmp = DISAS_UPDATE; } else { /* Conditional jmp. */ gen_op_cc_jmp (dc->delayed_pc, dc->pc); dc->is_jmp = DISAS_UPDATE; } } } if (env->singlestep_enabled) break; } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end && dc->pc < next_page_start); if (!dc->is_jmp) { gen_op_movl_T0_im((long)dc->pc); gen_op_movl_pc_T0(); } cris_evaluate_flags (dc); done: if (__builtin_expect(env->singlestep_enabled, 0)) { gen_op_debug(); } else { switch(dc->is_jmp) { case DISAS_NEXT: gen_goto_tb(dc, 1, dc->pc); break; default: case DISAS_JUMP: case DISAS_UPDATE: /* indicate that the hash table must be used to find the next TB */ tcg_gen_exit_tb(0); break; case DISAS_TB_JUMP: /* nothing more to generate */ break; } } *gen_opc_ptr = INDEX_op_end; if (search_pc) { j = gen_opc_ptr - gen_opc_buf; lj++; while (lj <= j) gen_opc_instr_start[lj++] = 0; } else { tb->size = dc->pc - pc_start; } #ifdef DEBUG_DISAS if (loglevel & CPU_LOG_TB_IN_ASM) { fprintf(logfile, \"--------------\\n\"); fprintf(logfile, \"IN: %s\\n\", lookup_symbol(pc_start)); target_disas(logfile, pc_start, dc->pc + 4 - pc_start, 0); fprintf(logfile, \"\\n\"); } #endif return 0; }"} {"target": 1, "idx": 25184, "func": "static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, unsigned int width, unsigned int height, int lumStride, int chromStride, int dstStride, int vertLumPerChroma) { unsigned y; const unsigned chromWidth= width>>1; for(y=0; y= 64 int i; uint64_t *ldst = (uint64_t *) dst; const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc; for(i = 0; i < chromWidth; i += 2){ uint64_t k, l; k = yc[0] + (uc[0] << 8) + (yc[1] << 16) + (vc[0] << 24); l = yc[2] + (uc[1] << 8) + (yc[3] << 16) + (vc[1] << 24); *ldst++ = k + (l << 32); yc += 4; uc += 2; vc += 2; } #else int i, *idst = (int32_t *) dst; const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc; for(i = 0; i < chromWidth; i++){ #ifdef WORDS_BIGENDIAN *idst++ = (yc[0] << 24)+ (uc[0] << 16) + (yc[1] << 8) + (vc[0] << 0); #else *idst++ = yc[0] + (uc[0] << 8) + (yc[1] << 16) + (vc[0] << 24); #endif yc += 2; uc++; vc++; } #endif #endif if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) ) { usrc += chromStride; vsrc += chromStride; } ysrc += lumStride; dst += dstStride; } #ifdef HAVE_MMX asm( EMMS\" \\n\\t\" SFENCE\" \\n\\t\" :::\"memory\"); #endif }"} {"target": 1, "idx": 25194, "func": "static void vc1_inv_trans_4x4_dc_c(uint8_t *dest, int linesize, DCTELEM *block) { int i; int dc = block[0]; const uint8_t *cm; dc = (17 * dc + 4) >> 3; dc = (17 * dc + 64) >> 7; cm = ff_cropTbl + MAX_NEG_CROP + dc; for(i = 0; i < 4; i++){ dest[0] = cm[dest[0]]; dest[1] = cm[dest[1]]; dest[2] = cm[dest[2]]; dest[3] = cm[dest[3]]; dest += linesize; } }"} {"target": 0, "idx": 25197, "func": "static void read_sbr_noise(SpectralBandReplication *sbr, GetBitContext *gb, SBRData *ch_data, int ch) { int i, j; VLC_TYPE (*t_huff)[2], (*f_huff)[2]; int t_lav, f_lav; int delta = (ch == 1 && sbr->bs_coupling == 1) + 1; if (sbr->bs_coupling && ch) { t_huff = vlc_sbr[T_HUFFMAN_NOISE_BAL_3_0DB].table; t_lav = vlc_sbr_lav[T_HUFFMAN_NOISE_BAL_3_0DB]; f_huff = vlc_sbr[F_HUFFMAN_ENV_BAL_3_0DB].table; f_lav = vlc_sbr_lav[F_HUFFMAN_ENV_BAL_3_0DB]; } else { t_huff = vlc_sbr[T_HUFFMAN_NOISE_3_0DB].table; t_lav = vlc_sbr_lav[T_HUFFMAN_NOISE_3_0DB]; f_huff = vlc_sbr[F_HUFFMAN_ENV_3_0DB].table; f_lav = vlc_sbr_lav[F_HUFFMAN_ENV_3_0DB]; } #if USE_FIXED for (i = 0; i < ch_data->bs_num_noise; i++) { if (ch_data->bs_df_noise[i]) { for (j = 0; j < sbr->n_q; j++) ch_data->noise_facs[i + 1][j].mant = ch_data->noise_facs[i][j].mant + delta * (get_vlc2(gb, t_huff, 9, 2) - t_lav); } else { ch_data->noise_facs[i + 1][0].mant = delta * get_bits(gb, 5); // bs_noise_start_value_balance or bs_noise_start_value_level for (j = 1; j < sbr->n_q; j++) ch_data->noise_facs[i + 1][j].mant = ch_data->noise_facs[i + 1][j - 1].mant + delta * (get_vlc2(gb, f_huff, 9, 3) - f_lav); } } #else for (i = 0; i < ch_data->bs_num_noise; i++) { if (ch_data->bs_df_noise[i]) { for (j = 0; j < sbr->n_q; j++) ch_data->noise_facs[i + 1][j] = ch_data->noise_facs[i][j] + delta * (get_vlc2(gb, t_huff, 9, 2) - t_lav); } else { ch_data->noise_facs[i + 1][0] = delta * get_bits(gb, 5); // bs_noise_start_value_balance or bs_noise_start_value_level for (j = 1; j < sbr->n_q; j++) ch_data->noise_facs[i + 1][j] = ch_data->noise_facs[i + 1][j - 1] + delta * (get_vlc2(gb, f_huff, 9, 3) - f_lav); } } #endif /* USE_FIXED */ //assign 0th elements of noise_facs from last elements memcpy(ch_data->noise_facs[0], ch_data->noise_facs[ch_data->bs_num_noise], sizeof(ch_data->noise_facs[0])); }"} {"target": 0, "idx": 25206, "func": "void bdrv_io_limits_disable(BlockDriverState *bs) { bs->io_limits_enabled = false; bdrv_start_throttled_reqs(bs); throttle_destroy(&bs->throttle_state); }"} {"target": 0, "idx": 25219, "func": "static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) { BlockDriver *drv = bs->drv; BdrvTrackedRequest req; uint64_t align = bdrv_get_align(bs); uint8_t *head_buf = NULL; uint8_t *tail_buf = NULL; QEMUIOVector local_qiov; bool use_local_qiov = false; int ret; if (!drv) { return -ENOMEDIUM; } ret = bdrv_check_byte_request(bs, offset, bytes); if (ret < 0) { return ret; } if (bs->copy_on_read) { flags |= BDRV_REQ_COPY_ON_READ; } /* throttling disk I/O */ if (bs->io_limits_enabled) { bdrv_io_limits_intercept(bs, bytes, false); } /* Align read if necessary by padding qiov */ if (offset & (align - 1)) { head_buf = qemu_blockalign(bs, align); qemu_iovec_init(&local_qiov, qiov->niov + 2); qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1)); qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); use_local_qiov = true; bytes += offset & (align - 1); offset = offset & ~(align - 1); } if ((offset + bytes) & (align - 1)) { if (!use_local_qiov) { qemu_iovec_init(&local_qiov, qiov->niov + 1); qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size); use_local_qiov = true; } tail_buf = qemu_blockalign(bs, align); qemu_iovec_add(&local_qiov, tail_buf, align - ((offset + bytes) & (align - 1))); bytes = ROUND_UP(bytes, align); } tracked_request_begin(&req, bs, offset, bytes, false); ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align, use_local_qiov ? &local_qiov : qiov, flags); tracked_request_end(&req); if (use_local_qiov) { qemu_iovec_destroy(&local_qiov); qemu_vfree(head_buf); qemu_vfree(tail_buf); } return ret; }"} {"target": 0, "idx": 25232, "func": "void kvmppc_update_sdr1(target_ulong sdr1) { CPUState *cs; CPU_FOREACH(cs) { run_on_cpu(cs, kvmppc_pivot_hpt_cpu, RUN_ON_CPU_TARGET_PTR(sdr1)); } }"} {"target": 0, "idx": 25234, "func": "static void vfio_start_irqfd_injection(SysBusDevice *sbdev, qemu_irq irq) { VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev); VFIOINTp *intp; if (!kvm_irqfds_enabled() || !kvm_resamplefds_enabled() || !vdev->irqfd_allowed) { goto fail_irqfd; } QLIST_FOREACH(intp, &vdev->intp_list, next) { if (intp->qemuirq == irq) { break; } } assert(intp); if (kvm_irqchip_add_irqfd_notifier(kvm_state, &intp->interrupt, &intp->unmask, irq) < 0) { goto fail_irqfd; } if (vfio_set_trigger_eventfd(intp, NULL) < 0) { goto fail_vfio; } if (vfio_set_resample_eventfd(intp) < 0) { goto fail_vfio; } intp->kvm_accel = true; trace_vfio_platform_start_irqfd_injection(intp->pin, event_notifier_get_fd(&intp->interrupt), event_notifier_get_fd(&intp->unmask)); return; fail_vfio: kvm_irqchip_remove_irqfd_notifier(kvm_state, &intp->interrupt, irq); error_report(\"vfio: failed to start eventfd signaling for IRQ %d: %m\", intp->pin); abort(); fail_irqfd: vfio_start_eventfd_injection(sbdev, irq); return; }"} {"target": 0, "idx": 25238, "func": "static void omap_mpuio_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { struct omap_mpuio_s *s = (struct omap_mpuio_s *) opaque; int offset = addr & OMAP_MPUI_REG_MASK; uint16_t diff; int ln; if (size != 2) { return omap_badwidth_write16(opaque, addr, value); } switch (offset) { case 0x04: /* OUTPUT_REG */ diff = (s->outputs ^ value) & ~s->dir; s->outputs = value; while ((ln = ffs(diff))) { ln --; if (s->handler[ln]) qemu_set_irq(s->handler[ln], (value >> ln) & 1); diff &= ~(1 << ln); } break; case 0x08: /* IO_CNTL */ diff = s->outputs & (s->dir ^ value); s->dir = value; value = s->outputs & ~s->dir; while ((ln = ffs(diff))) { ln --; if (s->handler[ln]) qemu_set_irq(s->handler[ln], (value >> ln) & 1); diff &= ~(1 << ln); } break; case 0x14: /* KBC_REG */ s->cols = value; omap_mpuio_kbd_update(s); break; case 0x18: /* GPIO_EVENT_MODE_REG */ s->event = value & 0x1f; break; case 0x1c: /* GPIO_INT_EDGE_REG */ s->edge = value; break; case 0x28: /* KBD_MASKIT */ s->kbd_mask = value & 1; omap_mpuio_kbd_update(s); break; case 0x2c: /* GPIO_MASKIT */ s->mask = value; break; case 0x30: /* GPIO_DEBOUNCING_REG */ s->debounce = value & 0x1ff; break; case 0x00: /* INPUT_LATCH */ case 0x10: /* KBR_LATCH */ case 0x20: /* KBD_INT */ case 0x24: /* GPIO_INT */ case 0x34: /* GPIO_LATCH_REG */ OMAP_RO_REG(addr); return; default: OMAP_BAD_REG(addr); return; } }"} {"target": 1, "idx": 25256, "func": "int net_init_tap(const NetClientOptions *opts, const char *name, NetClientState *peer, Error **errp) { /* FIXME error_setg(errp, ...) on failure */ const NetdevTapOptions *tap; int fd, vnet_hdr = 0, i = 0, queues; /* for the no-fd, no-helper case */ const char *script = NULL; /* suppress wrong \"uninit'd use\" gcc warning */ const char *downscript = NULL; Error *err = NULL; const char *vhostfdname; char ifname[128]; assert(opts->kind == NET_CLIENT_OPTIONS_KIND_TAP); tap = opts->tap; queues = tap->has_queues ? tap->queues : 1; vhostfdname = tap->has_vhostfd ? tap->vhostfd : NULL; /* QEMU vlans does not support multiqueue tap, in this case peer is set. * For -netdev, peer is always NULL. */ if (peer && (tap->has_queues || tap->has_fds || tap->has_vhostfds)) { error_report(\"Multiqueue tap cannot be used with QEMU vlans\"); return -1; } if (tap->has_fd) { if (tap->has_ifname || tap->has_script || tap->has_downscript || tap->has_vnet_hdr || tap->has_helper || tap->has_queues || tap->has_fds || tap->has_vhostfds) { error_report(\"ifname=, script=, downscript=, vnet_hdr=, \" \"helper=, queues=, fds=, and vhostfds= \" \"are invalid with fd=\"); return -1; } fd = monitor_fd_param(cur_mon, tap->fd, &err); if (fd == -1) { error_report_err(err); return -1; } fcntl(fd, F_SETFL, O_NONBLOCK); vnet_hdr = tap_probe_vnet_hdr(fd); net_init_tap_one(tap, peer, \"tap\", name, NULL, script, downscript, vhostfdname, vnet_hdr, fd, &err); if (err) { error_report_err(err); return -1; } } else if (tap->has_fds) { char *fds[MAX_TAP_QUEUES]; char *vhost_fds[MAX_TAP_QUEUES]; int nfds, nvhosts; if (tap->has_ifname || tap->has_script || tap->has_downscript || tap->has_vnet_hdr || tap->has_helper || tap->has_queues || tap->has_vhostfd) { error_report(\"ifname=, script=, downscript=, vnet_hdr=, \" \"helper=, queues=, and vhostfd= \" \"are invalid with fds=\"); return -1; } nfds = get_fds(tap->fds, fds, MAX_TAP_QUEUES); if (tap->has_vhostfds) { nvhosts = get_fds(tap->vhostfds, vhost_fds, MAX_TAP_QUEUES); if (nfds != nvhosts) { error_report(\"The number of fds passed does not match the \" \"number of vhostfds passed\"); return -1; } } for (i = 0; i < nfds; i++) { fd = monitor_fd_param(cur_mon, fds[i], &err); if (fd == -1) { error_report_err(err); return -1; } fcntl(fd, F_SETFL, O_NONBLOCK); if (i == 0) { vnet_hdr = tap_probe_vnet_hdr(fd); } else if (vnet_hdr != tap_probe_vnet_hdr(fd)) { error_report(\"vnet_hdr not consistent across given tap fds\"); return -1; } net_init_tap_one(tap, peer, \"tap\", name, ifname, script, downscript, tap->has_vhostfds ? vhost_fds[i] : NULL, vnet_hdr, fd, &err); if (err) { error_report_err(err); return -1; } } } else if (tap->has_helper) { if (tap->has_ifname || tap->has_script || tap->has_downscript || tap->has_vnet_hdr || tap->has_queues || tap->has_vhostfds) { error_report(\"ifname=, script=, downscript=, and vnet_hdr= \" \"queues=, and vhostfds= are invalid with helper=\"); return -1; } fd = net_bridge_run_helper(tap->helper, DEFAULT_BRIDGE_INTERFACE, errp); if (fd == -1) { return -1; } fcntl(fd, F_SETFL, O_NONBLOCK); vnet_hdr = tap_probe_vnet_hdr(fd); net_init_tap_one(tap, peer, \"bridge\", name, ifname, script, downscript, vhostfdname, vnet_hdr, fd, &err); if (err) { error_report_err(err); close(fd); return -1; } } else { if (tap->has_vhostfds) { error_report(\"vhostfds= is invalid if fds= wasn't specified\"); return -1; } script = tap->has_script ? tap->script : DEFAULT_NETWORK_SCRIPT; downscript = tap->has_downscript ? tap->downscript : DEFAULT_NETWORK_DOWN_SCRIPT; if (tap->has_ifname) { pstrcpy(ifname, sizeof ifname, tap->ifname); } else { ifname[0] = '\\0'; } for (i = 0; i < queues; i++) { fd = net_tap_init(tap, &vnet_hdr, i >= 1 ? \"no\" : script, ifname, sizeof ifname, queues > 1, &err); if (fd == -1) { error_report_err(err); return -1; } if (queues > 1 && i == 0 && !tap->has_ifname) { if (tap_fd_get_ifname(fd, ifname)) { error_report(\"Fail to get ifname\"); close(fd); return -1; } } net_init_tap_one(tap, peer, \"tap\", name, ifname, i >= 1 ? \"no\" : script, i >= 1 ? \"no\" : downscript, vhostfdname, vnet_hdr, fd, &err); if (err) { error_report_err(err); close(fd); return -1; } } } return 0; }"} {"target": 0, "idx": 25266, "func": "int av_image_fill_pointers(uint8_t *data[4], enum PixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4]) { int i, total_size, size[4], has_plane[4]; const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[pix_fmt]; memset(data , 0, sizeof(data[0])*4); memset(size , 0, sizeof(size)); memset(has_plane, 0, sizeof(has_plane)); if (desc->flags & PIX_FMT_HWACCEL) return AVERROR(EINVAL); data[0] = ptr; size[0] = linesizes[0] * height; if (desc->flags & PIX_FMT_PAL) { size[0] = (size[0] + 3) & ~3; data[1] = ptr + size[0]; /* palette is stored here as 256 32 bits words */ return size[0] + 256 * 4; } for (i = 0; i < 4; i++) has_plane[desc->comp[i].plane] = 1; total_size = size[0]; for (i = 1; has_plane[i] && i < 4; i++) { int h, s = (i == 1 || i == 2) ? desc->log2_chroma_h : 0; data[i] = data[i-1] + size[i-1]; h = (height + (1 << s) - 1) >> s; size[i] = h * linesizes[i]; total_size += size[i]; } return total_size; }"} {"target": 1, "idx": 25271, "func": "static int pcm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *src = avpkt->data; int buf_size = avpkt->size; PCMDecode *s = avctx->priv_data; AVFrame *frame = data; int sample_size, c, n, ret, samples_per_block; uint8_t *samples; int32_t *dst_int32_t; sample_size = av_get_bits_per_sample(avctx->codec_id) / 8; /* av_get_bits_per_sample returns 0 for AV_CODEC_ID_PCM_DVD */ samples_per_block = 1; if (avctx->codec->id == AV_CODEC_ID_PCM_DVD) { if (avctx->bits_per_coded_sample != 20 && avctx->bits_per_coded_sample != 24) { av_log(avctx, AV_LOG_ERROR, \"PCM DVD unsupported sample depth\\n\"); return AVERROR(EINVAL); } /* 2 samples are interleaved per block in PCM_DVD */ samples_per_block = 2; sample_size = avctx->bits_per_coded_sample * 2 / 8; } else if (avctx->codec_id == AV_CODEC_ID_PCM_LXF) { /* we process 40-bit blocks per channel for LXF */ samples_per_block = 2; sample_size = 5; } if (sample_size == 0) { av_log(avctx, AV_LOG_ERROR, \"Invalid sample_size\\n\"); return AVERROR(EINVAL); } n = avctx->channels * sample_size; if (n && buf_size % n) { if (buf_size < n) { av_log(avctx, AV_LOG_ERROR, \"invalid PCM packet\\n\"); return -1; } else buf_size -= buf_size % n; } n = buf_size / sample_size; /* get output buffer */ frame->nb_samples = n * samples_per_block / avctx->channels; if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) { av_log(avctx, AV_LOG_ERROR, \"get_buffer() failed\\n\"); return ret; } samples = frame->data[0]; switch (avctx->codec->id) { case AV_CODEC_ID_PCM_U32LE: DECODE(32, le32, src, samples, n, 0, 0x80000000) break; case AV_CODEC_ID_PCM_U32BE: DECODE(32, be32, src, samples, n, 0, 0x80000000) break; case AV_CODEC_ID_PCM_S24LE: DECODE(32, le24, src, samples, n, 8, 0) break; case AV_CODEC_ID_PCM_S24BE: DECODE(32, be24, src, samples, n, 8, 0) break; case AV_CODEC_ID_PCM_U24LE: DECODE(32, le24, src, samples, n, 8, 0x800000) break; case AV_CODEC_ID_PCM_U24BE: DECODE(32, be24, src, samples, n, 8, 0x800000) break; case AV_CODEC_ID_PCM_S24DAUD: for (; n > 0; n--) { uint32_t v = bytestream_get_be24(&src); v >>= 4; // sync flags are here AV_WN16A(samples, ff_reverse[(v >> 8) & 0xff] + (ff_reverse[v & 0xff] << 8)); samples += 2; } break; case AV_CODEC_ID_PCM_S16LE_PLANAR: { int av_unused n2; n /= avctx->channels; for (c = 0; c < avctx->channels; c++) { samples = frame->extended_data[c]; #if HAVE_BIGENDIAN n2 = n; DECODE(16, le16, src, samples, n2, 0, 0) #else memcpy(samples, src, n * 2); src += n * 2; #endif } break; } case AV_CODEC_ID_PCM_U16LE: DECODE(16, le16, src, samples, n, 0, 0x8000) break; case AV_CODEC_ID_PCM_U16BE: DECODE(16, be16, src, samples, n, 0, 0x8000) break; case AV_CODEC_ID_PCM_S8: for (; n > 0; n--) *samples++ = *src++ + 128; break; #if HAVE_BIGENDIAN case AV_CODEC_ID_PCM_F64LE: DECODE(64, le64, src, samples, n, 0, 0) break; case AV_CODEC_ID_PCM_S32LE: case AV_CODEC_ID_PCM_F32LE: DECODE(32, le32, src, samples, n, 0, 0) break; case AV_CODEC_ID_PCM_S16LE: DECODE(16, le16, src, samples, n, 0, 0) break; case AV_CODEC_ID_PCM_F64BE: case AV_CODEC_ID_PCM_F32BE: case AV_CODEC_ID_PCM_S32BE: case AV_CODEC_ID_PCM_S16BE: #else case AV_CODEC_ID_PCM_F64BE: DECODE(64, be64, src, samples, n, 0, 0) break; case AV_CODEC_ID_PCM_F32BE: case AV_CODEC_ID_PCM_S32BE: DECODE(32, be32, src, samples, n, 0, 0) break; case AV_CODEC_ID_PCM_S16BE: DECODE(16, be16, src, samples, n, 0, 0) break; case AV_CODEC_ID_PCM_F64LE: case AV_CODEC_ID_PCM_F32LE: case AV_CODEC_ID_PCM_S32LE: case AV_CODEC_ID_PCM_S16LE: #endif /* HAVE_BIGENDIAN */ case AV_CODEC_ID_PCM_U8: memcpy(samples, src, n * sample_size); break; case AV_CODEC_ID_PCM_ZORK: for (; n > 0; n--) { int v = *src++; if (v < 128) v = 128 - v; *samples++ = v; } break; case AV_CODEC_ID_PCM_ALAW: case AV_CODEC_ID_PCM_MULAW: for (; n > 0; n--) { AV_WN16A(samples, s->table[*src++]); samples += 2; } break; case AV_CODEC_ID_PCM_DVD: { const uint8_t *src8; dst_int32_t = (int32_t *)frame->data[0]; n /= avctx->channels; switch (avctx->bits_per_coded_sample) { case 20: while (n--) { c = avctx->channels; src8 = src + 4 * c; while (c--) { *dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8 & 0xf0) << 8); *dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8++ & 0x0f) << 12); } src = src8; } break; case 24: while (n--) { c = avctx->channels; src8 = src + 4 * c; while (c--) { *dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8++) << 8); *dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8++) << 8); } src = src8; } break; } break; } case AV_CODEC_ID_PCM_LXF: { int i; n /= avctx->channels; for (c = 0; c < avctx->channels; c++) { dst_int32_t = (int32_t *)frame->extended_data[c]; for (i = 0; i < n; i++) { // extract low 20 bits and expand to 32 bits *dst_int32_t++ = (src[2] << 28) | (src[1] << 20) | (src[0] << 12) | ((src[2] & 0x0F) << 8) | src[1]; // extract high 20 bits and expand to 32 bits *dst_int32_t++ = (src[4] << 24) | (src[3] << 16) | ((src[2] & 0xF0) << 8) | (src[4] << 4) | (src[3] >> 4); src += 5; } } break; } default: return -1; } *got_frame_ptr = 1; return buf_size; }"} {"target": 0, "idx": 25278, "func": "static void proxy_rewinddir(FsContext *ctx, V9fsFidOpenState *fs) { rewinddir(fs->dir); }"} {"target": 0, "idx": 25280, "func": "static int pci_ivshmem_init(PCIDevice *dev) { IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev); uint8_t *pci_conf; if (s->sizearg == NULL) s->ivshmem_size = 4 << 20; /* 4 MB default */ else { s->ivshmem_size = ivshmem_get_size(s); } register_savevm(&s->dev.qdev, \"ivshmem\", 0, 0, ivshmem_save, ivshmem_load, dev); /* IRQFD requires MSI */ if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD) && !ivshmem_has_feature(s, IVSHMEM_MSI)) { fprintf(stderr, \"ivshmem: ioeventfd/irqfd requires MSI\\n\"); exit(1); } /* check that role is reasonable */ if (s->role) { if (strncmp(s->role, \"peer\", 5) == 0) { s->role_val = IVSHMEM_PEER; } else if (strncmp(s->role, \"master\", 7) == 0) { s->role_val = IVSHMEM_MASTER; } else { fprintf(stderr, \"ivshmem: 'role' must be 'peer' or 'master'\\n\"); exit(1); } } else { s->role_val = IVSHMEM_MASTER; /* default */ } if (s->role_val == IVSHMEM_PEER) { register_device_unmigratable(&s->dev.qdev, \"ivshmem\", s); } pci_conf = s->dev.config; pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_REDHAT_QUMRANET); pci_conf[0x02] = 0x10; pci_conf[0x03] = 0x11; pci_conf[PCI_COMMAND] = PCI_COMMAND_IO | PCI_COMMAND_MEMORY; pci_config_set_class(pci_conf, PCI_CLASS_MEMORY_RAM); pci_conf[PCI_HEADER_TYPE] = PCI_HEADER_TYPE_NORMAL; pci_config_set_interrupt_pin(pci_conf, 1); s->shm_pci_addr = 0; s->ivshmem_offset = 0; s->shm_fd = 0; s->ivshmem_mmio_io_addr = cpu_register_io_memory(ivshmem_mmio_read, ivshmem_mmio_write, s, DEVICE_NATIVE_ENDIAN); /* region for registers*/ pci_register_bar(&s->dev, 0, IVSHMEM_REG_BAR_SIZE, PCI_BASE_ADDRESS_SPACE_MEMORY, ivshmem_mmio_map); if ((s->server_chr != NULL) && (strncmp(s->server_chr->filename, \"unix:\", 5) == 0)) { /* if we get a UNIX socket as the parameter we will talk * to the ivshmem server to receive the memory region */ if (s->shmobj != NULL) { fprintf(stderr, \"WARNING: do not specify both 'chardev' \" \"and 'shm' with ivshmem\\n\"); } IVSHMEM_DPRINTF(\"using shared memory server (socket = %s)\\n\", s->server_chr->filename); if (ivshmem_has_feature(s, IVSHMEM_MSI)) { ivshmem_setup_msi(s); } /* we allocate enough space for 16 guests and grow as needed */ s->nb_peers = 16; s->vm_id = -1; /* allocate/initialize space for interrupt handling */ s->peers = qemu_mallocz(s->nb_peers * sizeof(Peer)); pci_register_bar(&s->dev, 2, s->ivshmem_size, PCI_BASE_ADDRESS_SPACE_MEMORY, ivshmem_map); s->eventfd_chr = qemu_mallocz(s->vectors * sizeof(CharDriverState *)); qemu_chr_add_handlers(s->server_chr, ivshmem_can_receive, ivshmem_read, ivshmem_event, s); } else { /* just map the file immediately, we're not using a server */ int fd; if (s->shmobj == NULL) { fprintf(stderr, \"Must specify 'chardev' or 'shm' to ivshmem\\n\"); } IVSHMEM_DPRINTF(\"using shm_open (shm object = %s)\\n\", s->shmobj); /* try opening with O_EXCL and if it succeeds zero the memory * by truncating to 0 */ if ((fd = shm_open(s->shmobj, O_CREAT|O_RDWR|O_EXCL, S_IRWXU|S_IRWXG|S_IRWXO)) > 0) { /* truncate file to length PCI device's memory */ if (ftruncate(fd, s->ivshmem_size) != 0) { fprintf(stderr, \"ivshmem: could not truncate shared file\\n\"); } } else if ((fd = shm_open(s->shmobj, O_CREAT|O_RDWR, S_IRWXU|S_IRWXG|S_IRWXO)) < 0) { fprintf(stderr, \"ivshmem: could not open shared file\\n\"); exit(-1); } if (check_shm_size(s, fd) == -1) { exit(-1); } create_shared_memory_BAR(s, fd); } return 0; }"} {"target": 1, "idx": 25306, "func": "void ff_thread_await_progress(ThreadFrame *f, int n, int field) { PerThreadContext *p; atomic_int *progress = f->progress ? (atomic_int*)f->progress->data : NULL; if (!progress || atomic_load_explicit(&progress[field], memory_order_acquire) >= n) return; p = f->owner[field]->internal->thread_ctx; pthread_mutex_lock(&p->progress_mutex); if (f->owner[field]->debug&FF_DEBUG_THREADS) av_log(f->owner[field], AV_LOG_DEBUG, \"thread awaiting %d field %d from %p\\n\", n, field, progress); while (atomic_load_explicit(&progress[field], memory_order_relaxed) < n) pthread_cond_wait(&p->progress_cond, &p->progress_mutex); pthread_mutex_unlock(&p->progress_mutex); }"} {"target": 0, "idx": 25324, "func": "static void gen_compute_eflags_c(DisasContext *s, TCGv reg, bool inv) { TCGv t0, t1; int size; switch (s->cc_op) { case CC_OP_SUBB ... CC_OP_SUBQ: /* (DATA_TYPE)(CC_DST + CC_SRC) < (DATA_TYPE)CC_SRC */ size = s->cc_op - CC_OP_SUBB; t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false); /* If no temporary was used, be careful not to alias t1 and t0. */ t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg; tcg_gen_add_tl(t0, cpu_cc_dst, cpu_cc_src); gen_extu(size, t0); goto add_sub; case CC_OP_ADDB ... CC_OP_ADDQ: /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */ size = s->cc_op - CC_OP_ADDB; t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false); t0 = gen_ext_tl(reg, cpu_cc_dst, size, false); add_sub: tcg_gen_setcond_tl(inv ? TCG_COND_GEU : TCG_COND_LTU, reg, t0, t1); inv = false; break; case CC_OP_SBBB ... CC_OP_SBBQ: /* (DATA_TYPE)(CC_DST + CC_SRC + 1) <= (DATA_TYPE)CC_SRC */ size = s->cc_op - CC_OP_SBBB; t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false); if (TCGV_EQUAL(t1, reg) && TCGV_EQUAL(reg, cpu_cc_src)) { tcg_gen_mov_tl(cpu_tmp0, cpu_cc_src); t1 = cpu_tmp0; } tcg_gen_add_tl(reg, cpu_cc_dst, cpu_cc_src); tcg_gen_addi_tl(reg, reg, 1); gen_extu(size, reg); t0 = reg; goto adc_sbb; case CC_OP_ADCB ... CC_OP_ADCQ: /* (DATA_TYPE)CC_DST <= (DATA_TYPE)CC_SRC */ size = s->cc_op - CC_OP_ADCB; t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false); t0 = gen_ext_tl(reg, cpu_cc_dst, size, false); adc_sbb: tcg_gen_setcond_tl(inv ? TCG_COND_GTU : TCG_COND_LEU, reg, t0, t1); inv = false; break; case CC_OP_LOGICB ... CC_OP_LOGICQ: tcg_gen_movi_tl(reg, 0); break; case CC_OP_INCB ... CC_OP_INCQ: case CC_OP_DECB ... CC_OP_DECQ: if (inv) { tcg_gen_xori_tl(reg, cpu_cc_src, 1); } else { tcg_gen_mov_tl(reg, cpu_cc_src); } inv = false; break; case CC_OP_SHLB ... CC_OP_SHLQ: /* (CC_SRC >> (DATA_BITS - 1)) & 1 */ size = s->cc_op - CC_OP_SHLB; tcg_gen_shri_tl(reg, cpu_cc_src, (8 << size) - 1); tcg_gen_andi_tl(reg, reg, 1); break; case CC_OP_MULB ... CC_OP_MULQ: tcg_gen_setcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, reg, cpu_cc_src, 0); inv = false; break; case CC_OP_EFLAGS: case CC_OP_SARB ... CC_OP_SARQ: /* CC_SRC & 1 */ tcg_gen_andi_tl(reg, cpu_cc_src, 1); break; default: /* The need to compute only C from CC_OP_DYNAMIC is important in efficiently implementing e.g. INC at the start of a TB. */ gen_update_cc_op(s); gen_helper_cc_compute_c(cpu_tmp2_i32, cpu_env, cpu_cc_op); tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32); break; } if (inv) { tcg_gen_xori_tl(reg, reg, 1); } }"} {"target": 0, "idx": 25325, "func": "static void test_struct(gconstpointer opaque) { TestArgs *args = (TestArgs *) opaque; const SerializeOps *ops = args->ops; TestStruct *ts = struct_create(); TestStruct *ts_copy = NULL; Error *err = NULL; void *serialize_data; ops->serialize(ts, &serialize_data, visit_struct, &err); ops->deserialize((void **)&ts_copy, serialize_data, visit_struct, &err); g_assert(err == NULL); struct_compare(ts, ts_copy); struct_cleanup(ts); struct_cleanup(ts_copy); ops->cleanup(serialize_data); g_free(args); }"} {"target": 0, "idx": 25347, "func": "AVResampleContext *av_resample_init(int out_rate, int in_rate, int filter_size, int phase_shift, int linear, double cutoff){ AVResampleContext *c= av_mallocz(sizeof(AVResampleContext)); double factor= FFMIN(out_rate * cutoff / in_rate, 1.0); int phase_count= 1<phase_shift= phase_shift; c->phase_mask= phase_count-1; c->linear= linear; c->filter_length= FFMAX(ceil(filter_size/factor), 1); c->filter_bank= av_mallocz(c->filter_length*(phase_count+1)*sizeof(FELEM)); av_build_filter(c->filter_bank, factor, c->filter_length, phase_count, 1<filter_bank[c->filter_length*phase_count+1], c->filter_bank, (c->filter_length-1)*sizeof(FELEM)); c->filter_bank[c->filter_length*phase_count]= c->filter_bank[c->filter_length - 1]; c->src_incr= out_rate; c->ideal_dst_incr= c->dst_incr= in_rate * phase_count; c->index= -phase_count*((c->filter_length-1)/2); return c; }"} {"target": 0, "idx": 25348, "func": "static int mov_read_elst(MOVContext *c, ByteIOContext *pb, MOVAtom atom) { MOVStreamContext *sc = c->fc->streams[c->fc->nb_streams-1]->priv_data; int i, edit_count; get_byte(pb); /* version */ get_be24(pb); /* flags */ edit_count= sc->edit_count = get_be32(pb); /* entries */ for(i=0; ifc, AV_LOG_WARNING, \"edit list not starting at 0, \" \"a/v desync might occur, patch welcome\\n\"); } dprintf(c->fc, \"track[%i].edit_count = %i\\n\", c->fc->nb_streams-1, sc->edit_count); return 0; }"} {"target": 0, "idx": 25363, "func": "static void set_irq_level(void *opaque, int n, int level) { NVICState *s = opaque; VecInfo *vec; n += NVIC_FIRST_IRQ; assert(n >= NVIC_FIRST_IRQ && n < s->num_irq); trace_nvic_set_irq_level(n, level); /* The pending status of an external interrupt is * latched on rising edge and exception handler return. * * Pulsing the IRQ will always run the handler * once, and the handler will re-run until the * level is low when the handler completes. */ vec = &s->vectors[n]; if (level != vec->level) { vec->level = level; if (level) { armv7m_nvic_set_pending(s, n); } } }"} {"target": 0, "idx": 25368, "func": "static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){ int time_incr, time_increment; s->pict_type = get_bits(gb, 2) + I_TYPE; /* pict type: I = 0 , P = 1 */ if(s->pict_type==B_TYPE && s->low_delay && s->vol_control_parameters==0 && !(s->flags & CODEC_FLAG_LOW_DELAY)){ av_log(s->avctx, AV_LOG_ERROR, \"low_delay flag set, but shouldnt, clearing it\\n\"); s->low_delay=0; } s->partitioned_frame= s->data_partitioning && s->pict_type!=B_TYPE; if(s->partitioned_frame) s->decode_mb= mpeg4_decode_partitioned_mb; else s->decode_mb= ff_h263_decode_mb; if(s->time_increment_resolution==0){ s->time_increment_resolution=1; // fprintf(stderr, \"time_increment_resolution is illegal\\n\"); } time_incr=0; while (get_bits1(gb) != 0) time_incr++; check_marker(gb, \"before time_increment\"); if(s->time_increment_bits==0){ av_log(s->avctx, AV_LOG_ERROR, \"hmm, seems the headers arnt complete, trying to guess time_increment_bits\\n\"); for(s->time_increment_bits=1 ;s->time_increment_bits<16; s->time_increment_bits++){ if(show_bits(gb, s->time_increment_bits+1)&1) break; } av_log(s->avctx, AV_LOG_ERROR, \"my guess is %d bits ;)\\n\",s->time_increment_bits); } if(IS_3IV1) time_increment= get_bits1(gb); //FIXME investigate further else time_increment= get_bits(gb, s->time_increment_bits); // printf(\"%d %X\\n\", s->time_increment_bits, time_increment); //printf(\" type:%d modulo_time_base:%d increment:%d\\n\", s->pict_type, time_incr, time_increment); if(s->pict_type!=B_TYPE){ s->last_time_base= s->time_base; s->time_base+= time_incr; s->time= s->time_base*s->time_increment_resolution + time_increment; if(s->workaround_bugs&FF_BUG_UMP4){ if(s->time < s->last_non_b_time){ // fprintf(stderr, \"header is not mpeg4 compatible, broken encoder, trying to workaround\\n\"); s->time_base++; s->time+= s->time_increment_resolution; } } s->pp_time= s->time - s->last_non_b_time; s->last_non_b_time= s->time; }else{ s->time= (s->last_time_base + time_incr)*s->time_increment_resolution + time_increment; s->pb_time= s->pp_time - (s->last_non_b_time - s->time); if(s->pp_time <=s->pb_time || s->pp_time <= s->pp_time - s->pb_time || s->pp_time<=0){ // printf(\"messed up order, seeking?, skiping current b frame\\n\"); return FRAME_SKIPED; } if(s->t_frame==0) s->t_frame= s->time - s->last_time_base; if(s->t_frame==0) s->t_frame=1; // 1/0 protection //printf(\"%Ld %Ld %d %d\\n\", s->last_non_b_time, s->time, s->pp_time, s->t_frame); fflush(stdout); s->pp_field_time= ( ROUNDED_DIV(s->last_non_b_time, s->t_frame) - ROUNDED_DIV(s->last_non_b_time - s->pp_time, s->t_frame))*2; s->pb_field_time= ( ROUNDED_DIV(s->time, s->t_frame) - ROUNDED_DIV(s->last_non_b_time - s->pp_time, s->t_frame))*2; } s->current_picture_ptr->pts= s->time*1000LL*1000LL / s->time_increment_resolution; if(s->avctx->debug&FF_DEBUG_PTS) av_log(s->avctx, AV_LOG_DEBUG, \"MPEG4 PTS: %f\\n\", s->current_picture_ptr->pts/(1000.0*1000.0)); check_marker(gb, \"before vop_coded\"); /* vop coded */ if (get_bits1(gb) != 1){ av_log(s->avctx, AV_LOG_ERROR, \"vop not coded\\n\"); return FRAME_SKIPED; } //printf(\"time %d %d %d || %Ld %Ld %Ld\\n\", s->time_increment_bits, s->time_increment_resolution, s->time_base, //s->time, s->last_non_b_time, s->last_non_b_time - s->pp_time); if (s->shape != BIN_ONLY_SHAPE && ( s->pict_type == P_TYPE || (s->pict_type == S_TYPE && s->vol_sprite_usage==GMC_SPRITE))) { /* rounding type for motion estimation */ s->no_rounding = get_bits1(gb); } else { s->no_rounding = 0; } //FIXME reduced res stuff if (s->shape != RECT_SHAPE) { if (s->vol_sprite_usage != 1 || s->pict_type != I_TYPE) { int width, height, hor_spat_ref, ver_spat_ref; width = get_bits(gb, 13); skip_bits1(gb); /* marker */ height = get_bits(gb, 13); skip_bits1(gb); /* marker */ hor_spat_ref = get_bits(gb, 13); /* hor_spat_ref */ skip_bits1(gb); /* marker */ ver_spat_ref = get_bits(gb, 13); /* ver_spat_ref */ } skip_bits1(gb); /* change_CR_disable */ if (get_bits1(gb) != 0) { skip_bits(gb, 8); /* constant_alpha_value */ } } //FIXME complexity estimation stuff if (s->shape != BIN_ONLY_SHAPE) { s->intra_dc_threshold= mpeg4_dc_threshold[ get_bits(gb, 3) ]; if(!s->progressive_sequence){ s->top_field_first= get_bits1(gb); s->alternate_scan= get_bits1(gb); }else s->alternate_scan= 0; } if(s->alternate_scan){ ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan); ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan); ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_vertical_scan); ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan); } else{ ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct); ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct); ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan); ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan); } if(s->pict_type == S_TYPE && (s->vol_sprite_usage==STATIC_SPRITE || s->vol_sprite_usage==GMC_SPRITE)){ mpeg4_decode_sprite_trajectory(s); if(s->sprite_brightness_change) av_log(s->avctx, AV_LOG_ERROR, \"sprite_brightness_change not supported\\n\"); if(s->vol_sprite_usage==STATIC_SPRITE) av_log(s->avctx, AV_LOG_ERROR, \"static sprite not supported\\n\"); } if (s->shape != BIN_ONLY_SHAPE) { s->qscale = get_bits(gb, s->quant_precision); if(s->qscale==0){ av_log(s->avctx, AV_LOG_ERROR, \"Error, header damaged or not MPEG4 header (qscale=0)\\n\"); return -1; // makes no sense to continue, as there is nothing left from the image then } if (s->pict_type != I_TYPE) { s->f_code = get_bits(gb, 3); /* fcode_for */ if(s->f_code==0){ av_log(s->avctx, AV_LOG_ERROR, \"Error, header damaged or not MPEG4 header (f_code=0)\\n\"); return -1; // makes no sense to continue, as the MV decoding will break very quickly } }else s->f_code=1; if (s->pict_type == B_TYPE) { s->b_code = get_bits(gb, 3); }else s->b_code=1; if(s->avctx->debug&FF_DEBUG_PICT_INFO){ av_log(s->avctx, AV_LOG_DEBUG, \"qp:%d fc:%d,%d %s size:%d pro:%d alt:%d top:%d %spel part:%d resync:%d w:%d a:%d rnd:%d vot:%d%s dc:%d\\n\", s->qscale, s->f_code, s->b_code, s->pict_type == I_TYPE ? \"I\" : (s->pict_type == P_TYPE ? \"P\" : (s->pict_type == B_TYPE ? \"B\" : \"S\")), gb->size_in_bits,s->progressive_sequence, s->alternate_scan, s->top_field_first, s->quarter_sample ? \"q\" : \"h\", s->data_partitioning, s->resync_marker, s->num_sprite_warping_points, s->sprite_warping_accuracy, 1-s->no_rounding, s->vo_type, s->vol_control_parameters ? \" VOLC\" : \" \", s->intra_dc_threshold); } if(!s->scalability){ if (s->shape!=RECT_SHAPE && s->pict_type!=I_TYPE) { skip_bits1(gb); // vop shape coding type } }else{ if(s->enhancement_type){ int load_backward_shape= get_bits1(gb); if(load_backward_shape){ av_log(s->avctx, AV_LOG_ERROR, \"load backward shape isnt supported\\n\"); } } skip_bits(gb, 2); //ref_select_code } } /* detect buggy encoders which dont set the low_delay flag (divx4/xvid/opendivx)*/ // note we cannot detect divx5 without b-frames easyly (allthough its buggy too) if(s->vo_type==0 && s->vol_control_parameters==0 && s->divx_version==0 && s->picture_number==0){ av_log(s->avctx, AV_LOG_ERROR, \"looks like this file was encoded with (divx4/(old)xvid/opendivx) -> forcing low_delay flag\\n\"); s->low_delay=1; } s->picture_number++; // better than pic number==0 allways ;) s->y_dc_scale_table= ff_mpeg4_y_dc_scale_table; //FIXME add short header support s->c_dc_scale_table= ff_mpeg4_c_dc_scale_table; if(s->workaround_bugs&FF_BUG_EDGE){ s->h_edge_pos= s->width; s->v_edge_pos= s->height; } return 0; }"} {"target": 1, "idx": 25375, "func": "static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, long width, long height, long lumStride, long chromStride, long srcStride) { long y; const long chromWidth= width>>1; for(y=0; ypriv_data; int ret, i, minvariant = -1; if (c->first_packet) { recheck_discard_flags(s, 1); c->first_packet = 0; } start: c->end_of_segment = 0; for (i = 0; i < c->n_variants; i++) { struct variant *var = c->variants[i]; /* Make sure we've got one buffered packet from each open variant * stream */ if (var->needed && !var->pkt.data) { while (1) { int64_t ts_diff; AVStream *st; ret = av_read_frame(var->ctx, &var->pkt); if (ret < 0) { if (!var->pb.eof_reached) return ret; break; } else { if (c->first_timestamp == AV_NOPTS_VALUE && var->pkt.dts != AV_NOPTS_VALUE) c->first_timestamp = av_rescale_q(var->pkt.dts, var->ctx->streams[var->pkt.stream_index]->time_base, AV_TIME_BASE_Q); } if (c->seek_timestamp == AV_NOPTS_VALUE) break; if (var->pkt.dts == AV_NOPTS_VALUE) { c->seek_timestamp = AV_NOPTS_VALUE; break; } st = var->ctx->streams[var->pkt.stream_index]; ts_diff = av_rescale_rnd(var->pkt.dts, AV_TIME_BASE, st->time_base.den, AV_ROUND_DOWN) - c->seek_timestamp; if (ts_diff >= 0 && (c->seek_flags & AVSEEK_FLAG_ANY || var->pkt.flags & AV_PKT_FLAG_KEY)) { c->seek_timestamp = AV_NOPTS_VALUE; break; } } } /* Check if this stream still is on an earlier segment number, or * has the packet with the lowest dts */ if (var->pkt.data) { struct variant *minvar = c->variants[minvariant]; if (minvariant < 0 || var->cur_seq_no < minvar->cur_seq_no) { minvariant = i; } else if (var->cur_seq_no == minvar->cur_seq_no) { int64_t dts = var->pkt.dts; int64_t mindts = minvar->pkt.dts; AVStream *st = var->ctx->streams[var->pkt.stream_index]; AVStream *minst = minvar->ctx->streams[minvar->pkt.stream_index]; if (dts == AV_NOPTS_VALUE) { minvariant = i; } else if (mindts != AV_NOPTS_VALUE) { if (st->start_time != AV_NOPTS_VALUE) dts -= st->start_time; if (minst->start_time != AV_NOPTS_VALUE) mindts -= minst->start_time; if (av_compare_ts(dts, st->time_base, mindts, minst->time_base) < 0) minvariant = i; } } } } if (c->end_of_segment) { if (recheck_discard_flags(s, 0)) goto start; } /* If we got a packet, return it */ if (minvariant >= 0) { *pkt = c->variants[minvariant]->pkt; pkt->stream_index += c->variants[minvariant]->stream_offset; reset_packet(&c->variants[minvariant]->pkt); return 0; } return AVERROR_EOF; }"} {"target": 1, "idx": 25388, "func": "QObject *qmp_dispatch(QObject *request) { Error *err = NULL; QObject *ret; QDict *rsp; ret = do_qmp_dispatch(request, &err); rsp = qdict_new(); if (err) { qdict_put_obj(rsp, \"error\", error_get_qobject(err)); error_free(err); } else if (ret) { qdict_put_obj(rsp, \"return\", ret); } else { QDECREF(rsp); return NULL; } return QOBJECT(rsp); }"} {"target": 1, "idx": 25414, "func": "int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) { int i, r; hdev->started = true; r = vhost_dev_set_features(hdev, hdev->log_enabled); if (r < 0) { goto fail_features; } r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_MEM_TABLE, hdev->mem); if (r < 0) { r = -errno; goto fail_mem; } for (i = 0; i < hdev->nvqs; ++i) { r = vhost_virtqueue_start(hdev, vdev, hdev->vqs + i, hdev->vq_index + i); if (r < 0) { goto fail_vq; } } if (hdev->log_enabled) { hdev->log_size = vhost_get_log_size(hdev); hdev->log = hdev->log_size ? g_malloc0(hdev->log_size * sizeof *hdev->log) : NULL; r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_LOG_BASE, hdev->log); if (r < 0) { r = -errno; goto fail_log; } } return 0; fail_log: fail_vq: while (--i >= 0) { vhost_virtqueue_stop(hdev, vdev, hdev->vqs + i, hdev->vq_index + i); } i = hdev->nvqs; fail_mem: fail_features: hdev->started = false; return r; }"} {"target": 1, "idx": 25460, "func": "static int jpeg2000_decode_packets(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile) { int layno, reslevelno, compno, precno, ok_reslevel, ret; uint8_t prog_order = tile->codsty[0].prog_order; uint16_t x; uint16_t y; s->bit_index = 8; switch (prog_order) { case JPEG2000_PGOD_LRCP: for (layno = 0; layno < tile->codsty[0].nlayers; layno++) { ok_reslevel = 1; for (reslevelno = 0; ok_reslevel; reslevelno++) { ok_reslevel = 0; for (compno = 0; compno < s->ncomponents; compno++) { Jpeg2000CodingStyle *codsty = tile->codsty + compno; Jpeg2000QuantStyle *qntsty = tile->qntsty + compno; if (reslevelno < codsty->nreslevels) { Jpeg2000ResLevel *rlevel = tile->comp[compno].reslevel + reslevelno; ok_reslevel = 1; for (precno = 0; precno < rlevel->num_precincts_x * rlevel->num_precincts_y; precno++) if ((ret = jpeg2000_decode_packet(s, codsty, rlevel, precno, layno, qntsty->expn + (reslevelno ? 3 * (reslevelno - 1) + 1 : 0), qntsty->nguardbits)) < 0) return ret; } } } } break; case JPEG2000_PGOD_CPRL: for (compno = 0; compno < s->ncomponents; compno++) { Jpeg2000CodingStyle *codsty = tile->codsty + compno; Jpeg2000QuantStyle *qntsty = tile->qntsty + compno; /* Set bit stream buffer address according to tile-part. * For DCinema one tile-part per component, so can be * indexed by component. */ s->buf = tile->tile_part[compno].tp_start_bstrm; /* Position loop (y axis) * TODO: Automate computing of step 256. * Fixed here, but to be computed before entering here. */ for (y = 0; y < s->height; y += 256) { /* Position loop (y axis) * TODO: automate computing of step 256. * Fixed here, but to be computed before entering here. */ for (x = 0; x < s->width; x += 256) { for (reslevelno = 0; reslevelno < codsty->nreslevels; reslevelno++) { uint16_t prcx, prcy; uint8_t reducedresno = codsty->nreslevels - 1 -reslevelno; // ==> N_L - r Jpeg2000ResLevel *rlevel = tile->comp[compno].reslevel + reslevelno; if (!((y % (1 << (rlevel->log2_prec_height + reducedresno)) == 0) || (y == 0))) // TODO: 2nd condition simplified as try0 always =0 for dcinema continue; if (!((x % (1 << (rlevel->log2_prec_width + reducedresno)) == 0) || (x == 0))) // TODO: 2nd condition simplified as try0 always =0 for dcinema continue; // check if a precinct exists prcx = ff_jpeg2000_ceildivpow2(x, reducedresno) >> rlevel->log2_prec_width; prcy = ff_jpeg2000_ceildivpow2(y, reducedresno) >> rlevel->log2_prec_height; precno = prcx + rlevel->num_precincts_x * prcy; for (layno = 0; layno < tile->codsty[0].nlayers; layno++) { if ((ret = jpeg2000_decode_packet(s, codsty, rlevel, precno, layno, qntsty->expn + (reslevelno ? 3 * (reslevelno - 1) + 1 : 0), qntsty->nguardbits)) < 0) return ret; } } } } } break; default: break; } /* EOC marker reached */ s->buf += 2; return 0; }"} {"target": 1, "idx": 25466, "func": "static int io_open_default(AVFormatContext *s, AVIOContext **pb, const char *url, int flags, AVDictionary **options) { #if FF_API_OLD_OPEN_CALLBACKS FF_DISABLE_DEPRECATION_WARNINGS if (s->open_cb) return s->open_cb(s, pb, url, flags, &s->interrupt_callback, options); FF_ENABLE_DEPRECATION_WARNINGS #endif return ffio_open_whitelist(pb, url, flags, &s->interrupt_callback, options, s->protocol_whitelist, s->protocol_blacklist); }"} {"target": 1, "idx": 25468, "func": "static void mmubooke_dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env) { ppcemb_tlb_t *entry; int i; if (kvm_enabled() && !env->kvm_sw_tlb) { cpu_fprintf(f, \"Cannot access KVM TLB\\n\"); return; } cpu_fprintf(f, \"\\nTLB:\\n\"); cpu_fprintf(f, \"Effective Physical Size PID Prot \" \"Attr\\n\"); entry = &env->tlb.tlbe[0]; for (i = 0; i < env->nb_tlb; i++, entry++) { hwaddr ea, pa; target_ulong mask; uint64_t size = (uint64_t)entry->size; char size_buf[20]; /* Check valid flag */ if (!(entry->prot & PAGE_VALID)) { continue; } mask = ~(entry->size - 1); ea = entry->EPN & mask; pa = entry->RPN & mask; #if (TARGET_PHYS_ADDR_SPACE_BITS >= 36) /* Extend the physical address to 36 bits */ pa |= (hwaddr)(entry->RPN & 0xF) << 32; #endif size /= 1024; if (size >= 1024) { snprintf(size_buf, sizeof(size_buf), \"%3\" PRId64 \"M\", size / 1024); } else { snprintf(size_buf, sizeof(size_buf), \"%3\" PRId64 \"k\", size); } cpu_fprintf(f, \"0x%016\" PRIx64 \" 0x%016\" PRIx64 \" %s %-5u %08x %08x\\n\", (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID, entry->prot, entry->attr); } }"} {"target": 1, "idx": 25477, "func": "static void rstrip_spaces_buf(AVBPrint *buf) { while (buf->len > 0 && buf->str[buf->len - 1] == ' ') buf->str[--buf->len] = 0; }"} {"target": 0, "idx": 25481, "func": "static inline void RENAME(rgb24ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused) { #if COMPILE_TEMPLATE_MMX RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_RGB24); #else int i; for (i=0; i>RGB2YUV_SHIFT); } #endif }"} {"target": 1, "idx": 25489, "func": "static int compand_drain(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; CompandContext *s = ctx->priv; const int channels = outlink->channels; AVFrame *frame = NULL; int chan, i, dindex; /* 2048 is to limit output frame size during drain */ frame = ff_get_audio_buffer(outlink, FFMIN(2048, s->delay_count)); if (!frame) return AVERROR(ENOMEM); frame->pts = s->pts; s->pts += av_rescale_q(frame->nb_samples, (AVRational){ 1, outlink->sample_rate }, outlink->time_base); for (chan = 0; chan < channels; chan++) { AVFrame *delay_frame = s->delay_frame; double *dbuf = (double *)delay_frame->extended_data[chan]; double *dst = (double *)frame->extended_data[chan]; ChanParam *cp = &s->channels[chan]; dindex = s->delay_index; for (i = 0; i < frame->nb_samples; i++) { dst[i] = av_clipd(dbuf[dindex] * get_volume(s, cp->volume), -1, 1); dindex = MOD(dindex + 1, s->delay_samples); } } s->delay_count -= frame->nb_samples; s->delay_index = dindex; return ff_filter_frame(outlink, frame); }"} {"target": 1, "idx": 25494, "func": "InetSocketAddress *inet_parse(const char *str, Error **errp) { InetSocketAddress *addr; const char *optstr, *h; char host[64]; char port[33]; int to; int pos; addr = g_new0(InetSocketAddress, 1); /* parse address */ if (str[0] == ':') { /* no host given */ host[0] = '\\0'; if (1 != sscanf(str, \":%32[^,]%n\", port, &pos)) { error_setg(errp, \"error parsing port in address '%s'\", str); goto fail; } } else if (str[0] == '[') { /* IPv6 addr */ if (2 != sscanf(str, \"[%64[^]]]:%32[^,]%n\", host, port, &pos)) { error_setg(errp, \"error parsing IPv6 address '%s'\", str); goto fail; } addr->ipv6 = addr->has_ipv6 = true; } else { /* hostname or IPv4 addr */ if (2 != sscanf(str, \"%64[^:]:%32[^,]%n\", host, port, &pos)) { error_setg(errp, \"error parsing address '%s'\", str); goto fail; } if (host[strspn(host, \"0123456789.\")] == '\\0') { addr->ipv4 = addr->has_ipv4 = true; } } addr->host = g_strdup(host); addr->port = g_strdup(port); /* parse options */ optstr = str + pos; h = strstr(optstr, \",to=\"); if (h) { h += 4; if (sscanf(h, \"%d%n\", &to, &pos) != 1 || (h[pos] != '\\0' && h[pos] != ',')) { error_setg(errp, \"error parsing to= argument\"); goto fail; } addr->has_to = true; addr->to = to; } if (strstr(optstr, \",ipv4\")) { addr->ipv4 = addr->has_ipv4 = true; } if (strstr(optstr, \",ipv6\")) { addr->ipv6 = addr->has_ipv6 = true; } return addr; fail: qapi_free_InetSocketAddress(addr); return NULL; }"} {"target": 1, "idx": 25498, "func": "static void pnv_chip_power9_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PnvChipClass *k = PNV_CHIP_CLASS(klass); k->cpu_model = \"POWER9\"; k->chip_type = PNV_CHIP_POWER9; k->chip_cfam_id = 0x100d104980000000ull; /* P9 Nimbus DD1.0 */ k->cores_mask = POWER9_CORE_MASK; k->core_pir = pnv_chip_core_pir_p9; dc->desc = \"PowerNV Chip POWER9\"; }"} {"target": 1, "idx": 25500, "func": "static int rtsp_read_packet(AVFormatContext *s, AVPacket *pkt) { RTSPState *rt = s->priv_data; RTSPStream *rtsp_st; int ret, len; uint8_t buf[RTP_MAX_PACKET_LENGTH]; /* get next frames from the same RTP packet */ if (rt->cur_rtp) { ret = rtp_parse_packet(rt->cur_rtp, pkt, NULL, 0); if (ret == 0) { rt->cur_rtp = NULL; return 0; } else if (ret == 1) { return 0; } else { rt->cur_rtp = NULL; } } /* read next RTP packet */ redo: switch(rt->protocol) { default: case RTSP_PROTOCOL_RTP_TCP: len = tcp_read_packet(s, &rtsp_st, buf, sizeof(buf)); break; case RTSP_PROTOCOL_RTP_UDP: case RTSP_PROTOCOL_RTP_UDP_MULTICAST: len = udp_read_packet(s, &rtsp_st, buf, sizeof(buf)); if (rtsp_st->rtp_ctx) rtp_check_and_send_back_rr(rtsp_st->rtp_ctx, len); break; } if (len < 0) return AVERROR_IO; ret = rtp_parse_packet(rtsp_st->rtp_ctx, pkt, buf, len); if (ret < 0) goto redo; if (ret == 1) { /* more packets may follow, so we save the RTP context */ rt->cur_rtp = rtsp_st->rtp_ctx; } return 0; }"} {"target": 0, "idx": 25506, "func": "static void omap_rfbi_transfer_start(struct omap_dss_s *s) { void *data; hwaddr len; hwaddr data_addr; int pitch; static void *bounce_buffer; static hwaddr bounce_len; if (!s->rfbi.enable || s->rfbi.busy) return; if (s->rfbi.control & (1 << 1)) { /* BYPASS */ /* TODO: in non-Bypass mode we probably need to just assert the * DRQ and wait for DMA to write the pixels. */ fprintf(stderr, \"%s: Bypass mode unimplemented\\n\", __FUNCTION__); return; } if (!(s->dispc.control & (1 << 11))) /* RFBIMODE */ return; /* TODO: check that LCD output is enabled in DISPC. */ s->rfbi.busy = 1; len = s->rfbi.pixels * 2; data_addr = s->dispc.l[0].addr[0]; data = cpu_physical_memory_map(data_addr, &len, 0); if (data && len != s->rfbi.pixels * 2) { cpu_physical_memory_unmap(data, len, 0, 0); data = NULL; len = s->rfbi.pixels * 2; } if (!data) { if (len > bounce_len) { bounce_buffer = g_realloc(bounce_buffer, len); } data = bounce_buffer; cpu_physical_memory_read(data_addr, data, len); } /* TODO bpp */ s->rfbi.pixels = 0; /* TODO: negative values */ pitch = s->dispc.l[0].nx + (s->dispc.l[0].rowinc - 1) / 2; if ((s->rfbi.control & (1 << 2)) && s->rfbi.chip[0]) s->rfbi.chip[0]->block(s->rfbi.chip[0]->opaque, 1, data, len, pitch); if ((s->rfbi.control & (1 << 3)) && s->rfbi.chip[1]) s->rfbi.chip[1]->block(s->rfbi.chip[1]->opaque, 1, data, len, pitch); if (data != bounce_buffer) { cpu_physical_memory_unmap(data, len, 0, len); } omap_rfbi_transfer_stop(s); /* TODO */ s->dispc.irqst |= 1; /* FRAMEDONE */ omap_dispc_interrupt_update(s); }"} {"target": 0, "idx": 25519, "func": "static bool aio_epoll_check_poll(AioContext *ctx, GPollFD *pfds, unsigned npfd, int64_t timeout) { return false; }"} {"target": 1, "idx": 25528, "func": "static int source_config_props(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; Frei0rContext *s = ctx->priv; if (av_image_check_size(s->w, s->h, 0, ctx) < 0) return AVERROR(EINVAL); outlink->w = s->w; outlink->h = s->h; outlink->time_base = s->time_base; if (!(s->instance = s->construct(outlink->w, outlink->h))) { av_log(ctx, AV_LOG_ERROR, \"Impossible to load frei0r instance\"); return AVERROR(EINVAL); } return set_params(ctx, s->params); }"} {"target": 0, "idx": 25551, "func": "void spapr_tce_reset(DMAContext *dma) { if (dma) { sPAPRTCETable *tcet = DO_UPCAST(sPAPRTCETable, dma, dma); size_t table_size = (tcet->window_size >> SPAPR_TCE_PAGE_SHIFT) * sizeof(sPAPRTCE); memset(tcet->table, 0, table_size); } }"} {"target": 1, "idx": 25555, "func": "static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target, const char *replaces, int64_t speed, uint32_t granularity, int64_t buf_size, BlockMirrorBackingMode backing_mode, BlockdevOnError on_source_error, BlockdevOnError on_target_error, bool unmap, BlockCompletionFunc *cb, void *opaque, Error **errp, const BlockJobDriver *driver, bool is_none_mode, BlockDriverState *base) { MirrorBlockJob *s; if (granularity == 0) { granularity = bdrv_get_default_bitmap_granularity(target); } assert ((granularity & (granularity - 1)) == 0); if (buf_size < 0) { error_setg(errp, \"Invalid parameter 'buf-size'\"); return; } if (buf_size == 0) { buf_size = DEFAULT_MIRROR_BUF_SIZE; } s = block_job_create(driver, bs, speed, cb, opaque, errp); if (!s) { return; } s->target = blk_new(); blk_insert_bs(s->target, target); s->replaces = g_strdup(replaces); s->on_source_error = on_source_error; s->on_target_error = on_target_error; s->is_none_mode = is_none_mode; s->backing_mode = backing_mode; s->base = base; s->granularity = granularity; s->buf_size = ROUND_UP(buf_size, granularity); s->unmap = unmap; s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); if (!s->dirty_bitmap) { g_free(s->replaces); blk_unref(s->target); block_job_unref(&s->common); return; } bdrv_op_block_all(target, s->common.blocker); s->common.co = qemu_coroutine_create(mirror_run); trace_mirror_start(bs, s, s->common.co, opaque); qemu_coroutine_enter(s->common.co, s); }"} {"target": 0, "idx": 25570, "func": "static inline int fp_reg_hi_offset(int regno) { return offsetof(CPUARMState, vfp.regs[regno * 2 + 1]); }"} {"target": 0, "idx": 25576, "func": "void qemu_spice_display_init(DisplayState *ds) { assert(sdpy.ds == NULL); sdpy.ds = ds; sdpy.bufsize = (16 * 1024 * 1024); sdpy.buf = qemu_malloc(sdpy.bufsize); pthread_mutex_init(&sdpy.lock, NULL); register_displaychangelistener(ds, &display_listener); sdpy.qxl.base.sif = &dpy_interface.base; qemu_spice_add_interface(&sdpy.qxl.base); assert(sdpy.worker); qemu_add_vm_change_state_handler(qemu_spice_vm_change_state_handler, &sdpy); qemu_spice_create_host_memslot(&sdpy); qemu_spice_create_host_primary(&sdpy); }"} {"target": 0, "idx": 25577, "func": "const DVprofile* ff_dv_frame_profile2(AVCodecContext* codec, const DVprofile *sys, const uint8_t* frame, unsigned buf_size) { int i; int dsf = (frame[3] & 0x80) >> 7; int stype = frame[80*5 + 48 + 3] & 0x1f; /* 576i50 25Mbps 4:1:1 is a special case */ if (dsf == 1 && stype == 0 && frame[4] & 0x07 /* the APT field */) { return &dv_profiles[2]; } if(codec && codec->codec_tag==AV_RL32(\"dvsd\") && codec->width==720 && codec->height==576) return &dv_profiles[1]; for (i=0; iframe_size) return sys; return NULL; }"} {"target": 0, "idx": 25585, "func": "static void cryptodev_builtin_cleanup( CryptoDevBackend *backend, Error **errp) { CryptoDevBackendBuiltin *builtin = CRYPTODEV_BACKEND_BUILTIN(backend); size_t i; int queues = backend->conf.peers.queues; CryptoDevBackendClient *cc; for (i = 0; i < MAX_NUM_SESSIONS; i++) { if (builtin->sessions[i] != NULL) { cryptodev_builtin_sym_close_session( backend, i, 0, errp); } } assert(queues == 1); for (i = 0; i < queues; i++) { cc = backend->conf.peers.ccs[i]; if (cc) { cryptodev_backend_free_client(cc); backend->conf.peers.ccs[i] = NULL; } } cryptodev_backend_set_ready(backend, false); }"} {"target": 1, "idx": 25586, "func": "static void clear_commits(BDRVVVFATState* s) { int i; DLOG(fprintf(stderr, \"clear_commits (%d commits)\\n\", s->commits.next)); for (i = 0; i < s->commits.next; i++) { commit_t* commit = array_get(&(s->commits), i); assert(commit->path || commit->action == ACTION_WRITEOUT); if (commit->action != ACTION_WRITEOUT) { assert(commit->path); free(commit->path); } else assert(commit->path == NULL); } s->commits.next = 0; }"} {"target": 1, "idx": 25600, "func": "vmxnet3_dump_rx_descr(struct Vmxnet3_RxDesc *descr) { VMW_PKPRN(\"RX DESCR: addr %\" PRIx64 \", len: %d, gen: %d, rsvd: %d, \" \"dtype: %d, ext1: %d, btype: %d\", le64_to_cpu(descr->addr), descr->len, descr->gen, descr->rsvd, descr->dtype, descr->ext1, descr->btype); }"} {"target": 1, "idx": 25610, "func": "static int mxf_compute_sample_count(MXFContext *mxf, int stream_index, uint64_t *sample_count) { int i, total = 0, size = 0; AVStream *st = mxf->fc->streams[stream_index]; MXFTrack *track = st->priv_data; AVRational time_base = av_inv_q(track->edit_rate); AVRational sample_rate = av_inv_q(st->time_base); const MXFSamplesPerFrame *spf = NULL; if ((sample_rate.num / sample_rate.den) == 48000) spf = ff_mxf_get_samples_per_frame(mxf->fc, time_base); if (!spf) { int remainder = (sample_rate.num * time_base.num) % (time_base.den * sample_rate.den); *sample_count = av_q2d(av_mul_q((AVRational){mxf->current_edit_unit, 1}, av_mul_q(sample_rate, time_base))); if (remainder) av_log(mxf->fc, AV_LOG_WARNING, \"seeking detected on stream #%d with time base (%d/%d) and sample rate (%d/%d), audio pts won't be accurate.\\n\", stream_index, time_base.num, time_base.den, sample_rate.num, sample_rate.den); return 0; } while (spf->samples_per_frame[size]) { total += spf->samples_per_frame[size]; size++; } av_assert2(size); *sample_count = (mxf->current_edit_unit / size) * total; for (i = 0; i < mxf->current_edit_unit % size; i++) { *sample_count += spf->samples_per_frame[i]; } return 0; }"} {"target": 0, "idx": 25622, "func": "int do_netdev_del(Monitor *mon, const QDict *qdict, QObject **ret_data) { const char *id = qdict_get_str(qdict, \"id\"); VLANClientState *vc; vc = qemu_find_netdev(id); if (!vc || vc->info->type == NET_CLIENT_TYPE_NIC) { qerror_report(QERR_DEVICE_NOT_FOUND, id); return -1; } if (vc->peer) { qerror_report(QERR_DEVICE_IN_USE, id); return -1; } qemu_del_vlan_client(vc); qemu_opts_del(qemu_opts_find(&qemu_netdev_opts, id)); return 0; }"} {"target": 0, "idx": 25624, "func": "int qcrypto_pbkdf2(QCryptoHashAlgorithm hash G_GNUC_UNUSED, const uint8_t *key G_GNUC_UNUSED, size_t nkey G_GNUC_UNUSED, const uint8_t *salt G_GNUC_UNUSED, size_t nsalt G_GNUC_UNUSED, unsigned int iterations G_GNUC_UNUSED, uint8_t *out G_GNUC_UNUSED, size_t nout G_GNUC_UNUSED, Error **errp) { error_setg_errno(errp, ENOSYS, \"No crypto library supporting PBKDF in this build\"); return -1; }"} {"target": 0, "idx": 25625, "func": "static void qmp_chardev_open_socket(Chardev *chr, ChardevBackend *backend, bool *be_opened, Error **errp) { SocketChardev *s = SOCKET_CHARDEV(chr); ChardevSocket *sock = backend->u.socket.data; SocketAddress *addr = sock->addr; bool do_nodelay = sock->has_nodelay ? sock->nodelay : false; bool is_listen = sock->has_server ? sock->server : true; bool is_telnet = sock->has_telnet ? sock->telnet : false; bool is_tn3270 = sock->has_tn3270 ? sock->tn3270 : false; bool is_waitconnect = sock->has_wait ? sock->wait : false; int64_t reconnect = sock->has_reconnect ? sock->reconnect : 0; QIOChannelSocket *sioc = NULL; s->is_listen = is_listen; s->is_telnet = is_telnet; s->is_tn3270 = is_tn3270; s->do_nodelay = do_nodelay; if (sock->tls_creds) { Object *creds; creds = object_resolve_path_component( object_get_objects_root(), sock->tls_creds); if (!creds) { error_setg(errp, \"No TLS credentials with id '%s'\", sock->tls_creds); goto error; } s->tls_creds = (QCryptoTLSCreds *) object_dynamic_cast(creds, TYPE_QCRYPTO_TLS_CREDS); if (!s->tls_creds) { error_setg(errp, \"Object with id '%s' is not TLS credentials\", sock->tls_creds); goto error; } object_ref(OBJECT(s->tls_creds)); if (is_listen) { if (s->tls_creds->endpoint != QCRYPTO_TLS_CREDS_ENDPOINT_SERVER) { error_setg(errp, \"%s\", \"Expected TLS credentials for server endpoint\"); goto error; } } else { if (s->tls_creds->endpoint != QCRYPTO_TLS_CREDS_ENDPOINT_CLIENT) { error_setg(errp, \"%s\", \"Expected TLS credentials for client endpoint\"); goto error; } } } s->addr = QAPI_CLONE(SocketAddress, sock->addr); qemu_chr_set_feature(chr, QEMU_CHAR_FEATURE_RECONNECTABLE); /* TODO SOCKET_ADDRESS_FD where fd has AF_UNIX */ if (addr->type == SOCKET_ADDRESS_KIND_UNIX) { qemu_chr_set_feature(chr, QEMU_CHAR_FEATURE_FD_PASS); } /* be isn't opened until we get a connection */ *be_opened = false; update_disconnected_filename(s); if (is_listen) { if (is_telnet || is_tn3270) { s->do_telnetopt = 1; } } else if (reconnect > 0) { s->reconnect_time = reconnect; } if (s->reconnect_time) { sioc = qio_channel_socket_new(); tcp_chr_set_client_ioc_name(chr, sioc); qio_channel_socket_connect_async(sioc, s->addr, qemu_chr_socket_connected, chr, NULL); } else { if (s->is_listen) { char *name; sioc = qio_channel_socket_new(); name = g_strdup_printf(\"chardev-tcp-listener-%s\", chr->label); qio_channel_set_name(QIO_CHANNEL(sioc), name); g_free(name); if (qio_channel_socket_listen_sync(sioc, s->addr, errp) < 0) { goto error; } qapi_free_SocketAddress(s->addr); s->addr = socket_local_address(sioc->fd, errp); update_disconnected_filename(s); s->listen_ioc = sioc; if (is_waitconnect && qemu_chr_wait_connected(chr, errp) < 0) { return; } if (!s->ioc) { s->listen_tag = qio_channel_add_watch( QIO_CHANNEL(s->listen_ioc), G_IO_IN, tcp_chr_accept, chr, NULL); } } else if (qemu_chr_wait_connected(chr, errp) < 0) { goto error; } } return; error: if (sioc) { object_unref(OBJECT(sioc)); } }"} {"target": 0, "idx": 25644, "func": "int mpeg4_decode_picture_header(MpegEncContext * s) { int time_incr, startcode, state, v; redo: /* search next start code */ align_get_bits(&s->gb); state = 0xff; for(;;) { v = get_bits(&s->gb, 8); if (state == 0x000001) { state = ((state << 8) | v) & 0xffffff; startcode = state; break; } state = ((state << 8) | v) & 0xffffff; if( get_bits_count(&s->gb) > s->gb.size*8){ printf(\"no VOP startcode found\\n\"); return -1; } } //printf(\"startcode %X %d\\n\", startcode, get_bits_count(&s->gb)); if (startcode == 0x120) { // Video Object Layer int width, height, vo_ver_id; /* vol header */ skip_bits(&s->gb, 1); /* random access */ skip_bits(&s->gb, 8); /* vo_type */ if (get_bits1(&s->gb) != 0) { /* is_ol_id */ vo_ver_id = get_bits(&s->gb, 4); /* vo_ver_id */ skip_bits(&s->gb, 3); /* vo_priority */ } else { vo_ver_id = 1; } s->aspect_ratio_info= get_bits(&s->gb, 4); if(s->aspect_ratio_info == EXTENDET_PAR){ skip_bits(&s->gb, 8); //par_width skip_bits(&s->gb, 8); // par_height } if(get_bits1(&s->gb)){ /* vol control parameter */ printf(\"vol control parameter not supported\\n\"); return -1; } s->shape = get_bits(&s->gb, 2); /* vol shape */ if(s->shape != RECT_SHAPE) printf(\"only rectangular vol supported\\n\"); if(s->shape == GRAY_SHAPE && vo_ver_id != 1){ printf(\"Gray shape not supported\\n\"); skip_bits(&s->gb, 4); //video_object_layer_shape_extension } skip_bits1(&s->gb); /* marker */ s->time_increment_resolution = get_bits(&s->gb, 16); s->time_increment_bits = av_log2(s->time_increment_resolution - 1) + 1; if (s->time_increment_bits < 1) s->time_increment_bits = 1; skip_bits1(&s->gb); /* marker */ if (get_bits1(&s->gb) != 0) { /* fixed_vop_rate */ skip_bits(&s->gb, s->time_increment_bits); } if (s->shape != BIN_ONLY_SHAPE) { if (s->shape == RECT_SHAPE) { skip_bits1(&s->gb); /* marker */ width = get_bits(&s->gb, 13); skip_bits1(&s->gb); /* marker */ height = get_bits(&s->gb, 13); skip_bits1(&s->gb); /* marker */ if(width && height){ /* they should be non zero but who knows ... */ s->width = width; s->height = height; // printf(\"%d %d\\n\", width, height); } } if(get_bits1(&s->gb)) printf(\"interlaced not supported\\n\"); /* interlaced */ if(!get_bits1(&s->gb)) printf(\"OBMC not supported\\n\"); /* OBMC Disable */ if (vo_ver_id == 1) { s->vol_sprite_usage = get_bits1(&s->gb); /* vol_sprite_usage */ } else { s->vol_sprite_usage = get_bits(&s->gb, 2); /* vol_sprite_usage */ } if(s->vol_sprite_usage==STATIC_SPRITE) printf(\"Static Sprites not supported\\n\"); if(s->vol_sprite_usage==STATIC_SPRITE || s->vol_sprite_usage==GMC_SPRITE){ if(s->vol_sprite_usage==STATIC_SPRITE){ s->sprite_width = get_bits(&s->gb, 13); skip_bits1(&s->gb); /* marker */ s->sprite_height= get_bits(&s->gb, 13); skip_bits1(&s->gb); /* marker */ s->sprite_left = get_bits(&s->gb, 13); skip_bits1(&s->gb); /* marker */ s->sprite_top = get_bits(&s->gb, 13); skip_bits1(&s->gb); /* marker */ } s->num_sprite_warping_points= get_bits(&s->gb, 6); s->sprite_warping_accuracy = get_bits(&s->gb, 2); s->sprite_brightness_change= get_bits1(&s->gb); if(s->vol_sprite_usage==STATIC_SPRITE) s->low_latency_sprite= get_bits1(&s->gb); } // FIXME sadct disable bit if verid!=1 && shape not rect if (get_bits1(&s->gb) == 1) { /* not_8_bit */ s->quant_precision = get_bits(&s->gb, 4); /* quant_precision */ if(get_bits(&s->gb, 4)!=8) printf(\"N-bit not supported\\n\"); /* bits_per_pixel */ } else { s->quant_precision = 5; } // FIXME a bunch of grayscale shape things if(get_bits1(&s->gb)) printf(\"Quant-Type not supported\\n\"); /* vol_quant_type */ //FIXME if(vo_ver_id != 1) s->quarter_sample= get_bits1(&s->gb); else s->quarter_sample=0; if(!get_bits1(&s->gb)) printf(\"Complexity estimation not supported\\n\"); #if 0 if(get_bits1(&s->gb)) printf(\"resync disable\\n\"); #else skip_bits1(&s->gb); /* resync_marker_disabled */ #endif s->data_partioning= get_bits1(&s->gb); if(s->data_partioning){ printf(\"data partitioning not supported\\n\"); skip_bits1(&s->gb); // reversible vlc } if(vo_ver_id != 1) { s->new_pred= get_bits1(&s->gb); if(s->new_pred){ printf(\"new pred not supported\\n\"); skip_bits(&s->gb, 2); /* requested upstream message type */ skip_bits1(&s->gb); /* newpred segment type */ } s->reduced_res_vop= get_bits1(&s->gb); if(s->reduced_res_vop) printf(\"reduced resolution VOP not supported\\n\"); } else{ s->new_pred=0; s->reduced_res_vop= 0; } s->scalability= get_bits1(&s->gb); if (s->scalability) { printf(\"bad scalability!!!\\n\"); return -1; } } //printf(\"end Data %X %d\\n\", show_bits(&s->gb, 32), get_bits_count(&s->gb)&0x7); goto redo; } else if (startcode == 0x1b2) { //userdata char buf[256]; int i; int e; int ver, build; //printf(\"user Data %X\\n\", show_bits(&s->gb, 32)); buf[0]= show_bits(&s->gb, 8); for(i=1; i<256; i++){ buf[i]= show_bits(&s->gb, 16)&0xFF; if(buf[i]==0) break; skip_bits(&s->gb, 8); } buf[255]=0; e=sscanf(buf, \"DivX%dBuild%d\", &ver, &build); if(e==2){ s->divx_version= ver; s->divx_build= build; if(s->picture_number==0){ printf(\"This file was encoded with DivX%d Build%d\\n\", ver, build); if(ver==500 && build==413){ //most likely all version are indeed totally buggy but i dunno for sure ... printf(\"WARNING: this version of DivX is not MPEG4 compatible, trying to workaround these bugs...\\n\"); }else{ printf(\"hmm, i havnt seen that version of divx yet, lets assume they fixed these bugs ...\\n\" \"using mpeg4 decoder, if it fails contact the developers (of ffmpeg)\\n\"); } } } //printf(\"User Data: %s\\n\", buf); goto redo; } else if (startcode != 0x1b6) { //VOP goto redo; } s->pict_type = get_bits(&s->gb, 2) + 1; /* pict type: I = 0 , P = 1 */ //printf(\"pic: %d\\n\", s->pict_type); time_incr=0; while (get_bits1(&s->gb) != 0) time_incr++; check_marker(&s->gb, \"before time_increment\"); s->time_increment= get_bits(&s->gb, s->time_increment_bits); if(s->pict_type!=B_TYPE){ s->time_base+= time_incr; s->last_non_b_time[1]= s->last_non_b_time[0]; s->last_non_b_time[0]= s->time_base*s->time_increment_resolution + s->time_increment; }else{ s->time= (s->last_non_b_time[1]/s->time_increment_resolution + time_incr)*s->time_increment_resolution; s->time+= s->time_increment; } if(check_marker(&s->gb, \"before vop_coded\")==0 && s->picture_number==0){ printf(\"hmm, seems the headers arnt complete, trying to guess time_increment_bits\\n\"); for(s->time_increment_bits++ ;s->time_increment_bits<16; s->time_increment_bits++){ if(get_bits1(&s->gb)) break; } printf(\"my guess is %d bits ;)\\n\",s->time_increment_bits); } /* vop coded */ if (get_bits1(&s->gb) != 1) goto redo; //printf(\"time %d %d %d || %d %d %d\\n\", s->time_increment_bits, s->time_increment, s->time_base, //s->time, s->last_non_b_time[0], s->last_non_b_time[1]); if (s->shape != BIN_ONLY_SHAPE && ( s->pict_type == P_TYPE || (s->pict_type == S_TYPE && s->vol_sprite_usage==GMC_SPRITE))) { /* rounding type for motion estimation */ s->no_rounding = get_bits1(&s->gb); } else { s->no_rounding = 0; } //FIXME reduced res stuff if (s->shape != RECT_SHAPE) { if (s->vol_sprite_usage != 1 || s->pict_type != I_TYPE) { int width, height, hor_spat_ref, ver_spat_ref; width = get_bits(&s->gb, 13); skip_bits1(&s->gb); /* marker */ height = get_bits(&s->gb, 13); skip_bits1(&s->gb); /* marker */ hor_spat_ref = get_bits(&s->gb, 13); /* hor_spat_ref */ skip_bits1(&s->gb); /* marker */ ver_spat_ref = get_bits(&s->gb, 13); /* ver_spat_ref */ } skip_bits1(&s->gb); /* change_CR_disable */ if (get_bits1(&s->gb) != 0) { skip_bits(&s->gb, 8); /* constant_alpha_value */ } } //FIXME complexity estimation stuff if (s->shape != BIN_ONLY_SHAPE) { skip_bits(&s->gb, 3); /* intra dc VLC threshold */ //FIXME interlaced specific bits } if(s->pict_type == S_TYPE && (s->vol_sprite_usage==STATIC_SPRITE || s->vol_sprite_usage==GMC_SPRITE)){ if(s->num_sprite_warping_points){ mpeg4_decode_sprite_trajectory(s); } if(s->sprite_brightness_change) printf(\"sprite_brightness_change not supported\\n\"); if(s->vol_sprite_usage==STATIC_SPRITE) printf(\"static sprite not supported\\n\"); } if (s->shape != BIN_ONLY_SHAPE) { /* note: we do not use quant_precision to avoid problem if no MPEG4 vol header as it is found on some old opendivx movies */ s->qscale = get_bits(&s->gb, 5); if(s->qscale==0){ printf(\"Error, header damaged or not MPEG4 header (qscale=0)\\n\"); return -1; // makes no sense to continue, as there is nothing left from the image then } if (s->pict_type != I_TYPE) { s->f_code = get_bits(&s->gb, 3); /* fcode_for */ if(s->f_code==0){ printf(\"Error, header damaged or not MPEG4 header (f_code=0)\\n\"); return -1; // makes no sense to continue, as the MV decoding will break very quickly } } if (s->pict_type == B_TYPE) { s->b_code = get_bits(&s->gb, 3); //printf(\"b-code %d\\n\", s->b_code); } //printf(\"quant:%d fcode:%d\\n\", s->qscale, s->f_code); if(!s->scalability){ if (s->shape!=RECT_SHAPE && s->pict_type!=I_TYPE) { skip_bits1(&s->gb); // vop shape coding type } } } s->picture_number++; // better than pic number==0 allways ;) return 0; }"} {"target": 1, "idx": 25676, "func": "static inline void vmxnet3_ring_read_curr_cell(Vmxnet3Ring *ring, void *buff) { vmw_shmem_read(vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size); }"} {"target": 1, "idx": 25682, "func": "static CharDriverState* create_eventfd_chr_device(void * opaque, EventNotifier *n, int vector) { /* create a event character device based on the passed eventfd */ IVShmemState *s = opaque; CharDriverState * chr; int eventfd = event_notifier_get_fd(n); chr = qemu_chr_open_eventfd(eventfd); if (chr == NULL) { fprintf(stderr, \"creating eventfd for eventfd %d failed\\n\", eventfd); exit(-1); } /* if MSI is supported we need multiple interrupts */ if (ivshmem_has_feature(s, IVSHMEM_MSI)) { s->eventfd_table[vector].pdev = &s->dev; s->eventfd_table[vector].vector = vector; qemu_chr_add_handlers(chr, ivshmem_can_receive, fake_irqfd, ivshmem_event, &s->eventfd_table[vector]); } else { qemu_chr_add_handlers(chr, ivshmem_can_receive, ivshmem_receive, ivshmem_event, s); } return chr; }"} {"target": 1, "idx": 25686, "func": "static void xics_realize(DeviceState *dev, Error **errp) { XICSState *icp = XICS(dev); Error *error = NULL; int i; if (!icp->nr_servers) { error_setg(errp, \"Number of servers needs to be greater 0\"); return; } /* Registration of global state belongs into realize */ spapr_rtas_register(\"ibm,set-xive\", rtas_set_xive); spapr_rtas_register(\"ibm,get-xive\", rtas_get_xive); spapr_rtas_register(\"ibm,int-off\", rtas_int_off); spapr_rtas_register(\"ibm,int-on\", rtas_int_on); spapr_register_hypercall(H_CPPR, h_cppr); spapr_register_hypercall(H_IPI, h_ipi); spapr_register_hypercall(H_XIRR, h_xirr); spapr_register_hypercall(H_XIRR_X, h_xirr_x); spapr_register_hypercall(H_EOI, h_eoi); spapr_register_hypercall(H_IPOLL, h_ipoll); object_property_set_bool(OBJECT(icp->ics), true, \"realized\", &error); if (error) { error_propagate(errp, error); return; } for (i = 0; i < icp->nr_servers; i++) { object_property_set_bool(OBJECT(&icp->ss[i]), true, \"realized\", &error); if (error) { error_propagate(errp, error); return; } } }"} {"target": 1, "idx": 25687, "func": "int nbd_trip(BlockDriverState *bs, int csock, off_t size, uint64_t dev_offset, uint32_t nbdflags, uint8_t *data) { struct nbd_request request; struct nbd_reply reply; int ret; TRACE(\"Reading request.\"); if (nbd_receive_request(csock, &request) == -1) return -1; if (request.len > NBD_BUFFER_SIZE) { LOG(\"len (%u) is larger than max len (%u)\", request.len, NBD_BUFFER_SIZE); errno = EINVAL; return -1; } if ((request.from + request.len) < request.from) { LOG(\"integer overflow detected! \" \"you're probably being attacked\"); errno = EINVAL; return -1; } if ((request.from + request.len) > size) { LOG(\"From: %\" PRIu64 \", Len: %u, Size: %\" PRIu64 \", Offset: %\" PRIu64 \"\\n\", request.from, request.len, (uint64_t)size, dev_offset); LOG(\"requested operation past EOF--bad client?\"); errno = EINVAL; return -1; } TRACE(\"Decoding type\"); reply.handle = request.handle; reply.error = 0; switch (request.type & NBD_CMD_MASK_COMMAND) { case NBD_CMD_READ: TRACE(\"Request type is READ\"); ret = bdrv_read(bs, (request.from + dev_offset) / 512, data, request.len / 512); if (ret < 0) { LOG(\"reading from file failed\"); reply.error = -ret; request.len = 0; } TRACE(\"Read %u byte(s)\", request.len); if (nbd_do_send_reply(csock, &reply, data, request.len) < 0) return -1; break; case NBD_CMD_WRITE: TRACE(\"Request type is WRITE\"); TRACE(\"Reading %u byte(s)\", request.len); if (read_sync(csock, data, request.len) != request.len) { LOG(\"reading from socket failed\"); errno = EINVAL; return -1; } if (nbdflags & NBD_FLAG_READ_ONLY) { TRACE(\"Server is read-only, return error\"); reply.error = 1; } else { TRACE(\"Writing to device\"); ret = bdrv_write(bs, (request.from + dev_offset) / 512, data, request.len / 512); if (ret < 0) { LOG(\"writing to file failed\"); reply.error = -ret; request.len = 0; } if (request.type & NBD_CMD_FLAG_FUA) { ret = bdrv_flush(bs); if (ret < 0) { LOG(\"flush failed\"); reply.error = -ret; } } } if (nbd_do_send_reply(csock, &reply, NULL, 0) < 0) return -1; break; case NBD_CMD_DISC: TRACE(\"Request type is DISCONNECT\"); errno = 0; return 1; case NBD_CMD_FLUSH: TRACE(\"Request type is FLUSH\"); ret = bdrv_flush(bs); if (ret < 0) { LOG(\"flush failed\"); reply.error = -ret; } if (nbd_do_send_reply(csock, &reply, NULL, 0) < 0) return -1; break; case NBD_CMD_TRIM: TRACE(\"Request type is TRIM\"); ret = bdrv_discard(bs, (request.from + dev_offset) / 512, request.len / 512); if (ret < 0) { LOG(\"discard failed\"); reply.error = -ret; } if (nbd_do_send_reply(csock, &reply, NULL, 0) < 0) return -1; break; default: LOG(\"invalid request type (%u) received\", request.type); errno = EINVAL; return -1; } TRACE(\"Request/Reply complete\"); return 0; }"} {"target": 0, "idx": 25694, "func": "static int resample(SwrContext *s, AudioData *out_param, int out_count, const AudioData * in_param, int in_count){ AudioData in, out, tmp; int ret_sum=0; int border=0; av_assert1(s->in_buffer.ch_count == in_param->ch_count); av_assert1(s->in_buffer.planar == in_param->planar); av_assert1(s->in_buffer.fmt == in_param->fmt); tmp=out=*out_param; in = *in_param; do{ int ret, size, consumed; if(!s->resample_in_constraint && s->in_buffer_count){ buf_set(&tmp, &s->in_buffer, s->in_buffer_index); ret= s->resampler->multiple_resample(s->resample, &out, out_count, &tmp, s->in_buffer_count, &consumed); out_count -= ret; ret_sum += ret; buf_set(&out, &out, ret); s->in_buffer_count -= consumed; s->in_buffer_index += consumed; if(!in_count) break; if(s->in_buffer_count <= border){ buf_set(&in, &in, -s->in_buffer_count); in_count += s->in_buffer_count; s->in_buffer_count=0; s->in_buffer_index=0; border = 0; } } if((s->flushed || in_count) && !s->in_buffer_count){ s->in_buffer_index=0; ret= s->resampler->multiple_resample(s->resample, &out, out_count, &in, in_count, &consumed); out_count -= ret; ret_sum += ret; buf_set(&out, &out, ret); in_count -= consumed; buf_set(&in, &in, consumed); } //TODO is this check sane considering the advanced copy avoidance below size= s->in_buffer_index + s->in_buffer_count + in_count; if( size > s->in_buffer.count && s->in_buffer_count + in_count <= s->in_buffer_index){ buf_set(&tmp, &s->in_buffer, s->in_buffer_index); copy(&s->in_buffer, &tmp, s->in_buffer_count); s->in_buffer_index=0; }else if((ret=swri_realloc_audio(&s->in_buffer, size)) < 0) return ret; if(in_count){ int count= in_count; if(s->in_buffer_count && s->in_buffer_count+2 < count && out_count) count= s->in_buffer_count+2; buf_set(&tmp, &s->in_buffer, s->in_buffer_index + s->in_buffer_count); copy(&tmp, &in, /*in_*/count); s->in_buffer_count += count; in_count -= count; border += count; buf_set(&in, &in, count); s->resample_in_constraint= 0; if(s->in_buffer_count != count || in_count) continue; } break; }while(1); s->resample_in_constraint= !!out_count; return ret_sum; }"} {"target": 0, "idx": 25706, "func": "static bool msi_is_masked(const PCIDevice *dev, unsigned int vector) { uint16_t flags = pci_get_word(dev->config + msi_flags_off(dev)); uint32_t mask; assert(vector < PCI_MSI_VECTORS_MAX); if (!(flags & PCI_MSI_FLAGS_MASKBIT)) { return false; } mask = pci_get_long(dev->config + msi_mask_off(dev, flags & PCI_MSI_FLAGS_64BIT)); return mask & (1U << vector); }"} {"target": 0, "idx": 25710, "func": "static void ehci_opreg_write(void *ptr, hwaddr addr, uint64_t val, unsigned size) { EHCIState *s = ptr; uint32_t *mmio = s->opreg + (addr >> 2); uint32_t old = *mmio; int i; trace_usb_ehci_opreg_write(addr + s->opregbase, addr2str(addr), val); switch (addr) { case USBCMD: if (val & USBCMD_HCRESET) { ehci_reset(s); val = s->usbcmd; break; } /* not supporting dynamic frame list size at the moment */ if ((val & USBCMD_FLS) && !(s->usbcmd & USBCMD_FLS)) { fprintf(stderr, \"attempt to set frame list size -- value %d\\n\", (int)val & USBCMD_FLS); val &= ~USBCMD_FLS; } if (val & USBCMD_IAAD) { /* * Process IAAD immediately, otherwise the Linux IAAD watchdog may * trigger and re-use a qh without us seeing the unlink. */ s->async_stepdown = 0; qemu_bh_schedule(s->async_bh); trace_usb_ehci_doorbell_ring(); } if (((USBCMD_RUNSTOP | USBCMD_PSE | USBCMD_ASE) & val) != ((USBCMD_RUNSTOP | USBCMD_PSE | USBCMD_ASE) & s->usbcmd)) { if (s->pstate == EST_INACTIVE) { SET_LAST_RUN_CLOCK(s); } s->usbcmd = val; /* Set usbcmd for ehci_update_halt() */ ehci_update_halt(s); s->async_stepdown = 0; qemu_bh_schedule(s->async_bh); } break; case USBSTS: val &= USBSTS_RO_MASK; // bits 6 through 31 are RO ehci_clear_usbsts(s, val); // bits 0 through 5 are R/WC val = s->usbsts; ehci_update_irq(s); break; case USBINTR: val &= USBINTR_MASK; if (ehci_enabled(s) && (USBSTS_FLR & val)) { qemu_bh_schedule(s->async_bh); } break; case FRINDEX: val &= 0x00003ff8; /* frindex is 14bits and always a multiple of 8 */ break; case CONFIGFLAG: val &= 0x1; if (val) { for(i = 0; i < NB_PORTS; i++) handle_port_owner_write(s, i, 0); } break; case PERIODICLISTBASE: if (ehci_periodic_enabled(s)) { fprintf(stderr, \"ehci: PERIODIC list base register set while periodic schedule\\n\" \" is enabled and HC is enabled\\n\"); } break; case ASYNCLISTADDR: if (ehci_async_enabled(s)) { fprintf(stderr, \"ehci: ASYNC list address register set while async schedule\\n\" \" is enabled and HC is enabled\\n\"); } break; } *mmio = val; trace_usb_ehci_opreg_change(addr + s->opregbase, addr2str(addr), *mmio, old); }"} {"target": 0, "idx": 25713, "func": "static void vfio_bar_quirk_teardown(VFIOPCIDevice *vdev, int nr) { VFIOBAR *bar = &vdev->bars[nr]; while (!QLIST_EMPTY(&bar->quirks)) { VFIOQuirk *quirk = QLIST_FIRST(&bar->quirks); memory_region_del_subregion(&bar->region.mem, &quirk->mem); object_unparent(OBJECT(&quirk->mem)); QLIST_REMOVE(quirk, next); g_free(quirk); } }"} {"target": 1, "idx": 25714, "func": "static int blkdebug_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVBlkdebugState *s = bs->opaque; QemuOpts *opts; Error *local_err = NULL; const char *config; uint64_t align; int ret; opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); qemu_opts_absorb_qdict(opts, options, &local_err); if (error_is_set(&local_err)) { error_propagate(errp, local_err); ret = -EINVAL; goto fail; } /* Read rules from config file or command line options */ config = qemu_opt_get(opts, \"config\"); ret = read_config(s, config, options, errp); if (ret) { goto fail; } /* Set initial state */ s->state = 1; /* Open the backing file */ ret = bdrv_open_image(&bs->file, qemu_opt_get(opts, \"x-image\"), options, \"image\", flags, true, false, &local_err); if (ret < 0) { error_propagate(errp, local_err); goto fail; } /* Set request alignment */ align = qemu_opt_get_size(opts, \"align\", bs->request_alignment); if (align > 0 && align < INT_MAX && !(align & (align - 1))) { bs->request_alignment = align; } else { error_setg(errp, \"Invalid alignment\"); ret = -EINVAL; goto fail; } ret = 0; fail: qemu_opts_del(opts); return ret; }"} {"target": 1, "idx": 25717, "func": "void qemu_aio_init(void) { struct sigaction act; aio_initialized = 1; sigfillset(&act.sa_mask); act.sa_flags = 0; /* do not restart syscalls to interrupt select() */ act.sa_handler = aio_signal_handler; sigaction(aio_sig_num, &act, NULL); #if defined(__GLIBC__) && defined(__linux__) { /* XXX: aio thread exit seems to hang on RedHat 9 and this init seems to fix the problem. */ struct aioinit ai; memset(&ai, 0, sizeof(ai)); ai.aio_threads = 1; ai.aio_num = 1; ai.aio_idle_time = 365 * 100000; aio_init(&ai); } #endif }"} {"target": 1, "idx": 25724, "func": "static void zynq_init(MachineState *machine) { ram_addr_t ram_size = machine->ram_size; const char *cpu_model = machine->cpu_model; const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; ObjectClass *cpu_oc; ARMCPU *cpu; MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *ext_ram = g_new(MemoryRegion, 1); MemoryRegion *ocm_ram = g_new(MemoryRegion, 1); DeviceState *dev; SysBusDevice *busdev; qemu_irq pic[64]; Error *err = NULL; int n; if (!cpu_model) { cpu_model = \"cortex-a9\"; } cpu_oc = cpu_class_by_name(TYPE_ARM_CPU, cpu_model); cpu = ARM_CPU(object_new(object_class_get_name(cpu_oc))); /* By default A9 CPUs have EL3 enabled. This board does not * currently support EL3 so the CPU EL3 property is disabled before * realization. */ if (object_property_find(OBJECT(cpu), \"has_el3\", NULL)) { object_property_set_bool(OBJECT(cpu), false, \"has_el3\", &err); if (err) { error_report_err(err); exit(1); } } object_property_set_int(OBJECT(cpu), ZYNQ_BOARD_MIDR, \"midr\", &err); if (err) { error_report_err(err); exit(1); } object_property_set_int(OBJECT(cpu), MPCORE_PERIPHBASE, \"reset-cbar\", &err); if (err) { error_report_err(err); exit(1); } object_property_set_bool(OBJECT(cpu), true, \"realized\", &err); if (err) { error_report_err(err); exit(1); } /* max 2GB ram */ if (ram_size > 0x80000000) { ram_size = 0x80000000; } /* DDR remapped to address zero. */ memory_region_allocate_system_memory(ext_ram, NULL, \"zynq.ext_ram\", ram_size); memory_region_add_subregion(address_space_mem, 0, ext_ram); /* 256K of on-chip memory */ memory_region_init_ram(ocm_ram, NULL, \"zynq.ocm_ram\", 256 << 10, &error_abort); vmstate_register_ram_global(ocm_ram); memory_region_add_subregion(address_space_mem, 0xFFFC0000, ocm_ram); DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0); /* AMD */ pflash_cfi02_register(0xe2000000, NULL, \"zynq.pflash\", FLASH_SIZE, dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, FLASH_SECTOR_SIZE, FLASH_SIZE/FLASH_SECTOR_SIZE, 1, 1, 0x0066, 0x0022, 0x0000, 0x0000, 0x0555, 0x2aa, 0); dev = qdev_create(NULL, \"xilinx,zynq_slcr\"); qdev_init_nofail(dev); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xF8000000); dev = qdev_create(NULL, \"a9mpcore_priv\"); qdev_prop_set_uint32(dev, \"num-cpu\", 1); qdev_init_nofail(dev); busdev = SYS_BUS_DEVICE(dev); sysbus_mmio_map(busdev, 0, MPCORE_PERIPHBASE); sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_IRQ)); for (n = 0; n < 64; n++) { pic[n] = qdev_get_gpio_in(dev, n); } zynq_init_spi_flashes(0xE0006000, pic[58-IRQ_OFFSET], false); zynq_init_spi_flashes(0xE0007000, pic[81-IRQ_OFFSET], false); zynq_init_spi_flashes(0xE000D000, pic[51-IRQ_OFFSET], true); sysbus_create_simple(\"xlnx,ps7-usb\", 0xE0002000, pic[53-IRQ_OFFSET]); sysbus_create_simple(\"xlnx,ps7-usb\", 0xE0003000, pic[76-IRQ_OFFSET]); sysbus_create_simple(\"cadence_uart\", 0xE0000000, pic[59-IRQ_OFFSET]); sysbus_create_simple(\"cadence_uart\", 0xE0001000, pic[82-IRQ_OFFSET]); sysbus_create_varargs(\"cadence_ttc\", 0xF8001000, pic[42-IRQ_OFFSET], pic[43-IRQ_OFFSET], pic[44-IRQ_OFFSET], NULL); sysbus_create_varargs(\"cadence_ttc\", 0xF8002000, pic[69-IRQ_OFFSET], pic[70-IRQ_OFFSET], pic[71-IRQ_OFFSET], NULL); gem_init(&nd_table[0], 0xE000B000, pic[54-IRQ_OFFSET]); gem_init(&nd_table[1], 0xE000C000, pic[77-IRQ_OFFSET]); dev = qdev_create(NULL, \"generic-sdhci\"); qdev_init_nofail(dev); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xE0100000); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[56-IRQ_OFFSET]); dev = qdev_create(NULL, \"generic-sdhci\"); qdev_init_nofail(dev); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xE0101000); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[79-IRQ_OFFSET]); dev = qdev_create(NULL, \"pl330\"); qdev_prop_set_uint8(dev, \"num_chnls\", 8); qdev_prop_set_uint8(dev, \"num_periph_req\", 4); qdev_prop_set_uint8(dev, \"num_events\", 16); qdev_prop_set_uint8(dev, \"data_width\", 64); qdev_prop_set_uint8(dev, \"wr_cap\", 8); qdev_prop_set_uint8(dev, \"wr_q_dep\", 16); qdev_prop_set_uint8(dev, \"rd_cap\", 8); qdev_prop_set_uint8(dev, \"rd_q_dep\", 16); qdev_prop_set_uint16(dev, \"data_buffer_dep\", 256); qdev_init_nofail(dev); busdev = SYS_BUS_DEVICE(dev); sysbus_mmio_map(busdev, 0, 0xF8003000); sysbus_connect_irq(busdev, 0, pic[45-IRQ_OFFSET]); /* abort irq line */ for (n = 0; n < 8; ++n) { /* event irqs */ sysbus_connect_irq(busdev, n + 1, pic[dma_irqs[n] - IRQ_OFFSET]); } zynq_binfo.ram_size = ram_size; zynq_binfo.kernel_filename = kernel_filename; zynq_binfo.kernel_cmdline = kernel_cmdline; zynq_binfo.initrd_filename = initrd_filename; zynq_binfo.nb_cpus = 1; zynq_binfo.board_id = 0xd32; zynq_binfo.loader_start = 0; arm_load_kernel(ARM_CPU(first_cpu), &zynq_binfo); }"} {"target": 1, "idx": 25726, "func": "static void spapr_phb_realize(DeviceState *dev, Error **errp) { sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); SysBusDevice *s = SYS_BUS_DEVICE(dev); sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(s); PCIHostState *phb = PCI_HOST_BRIDGE(s); sPAPRPHBClass *info = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(s); char *namebuf; int i; PCIBus *bus; uint64_t msi_window_size = 4096; if (sphb->index != (uint32_t)-1) { hwaddr windows_base; if ((sphb->buid != (uint64_t)-1) || (sphb->dma_liobn != (uint32_t)-1) || (sphb->mem_win_addr != (hwaddr)-1) || (sphb->io_win_addr != (hwaddr)-1)) { error_setg(errp, \"Either \\\"index\\\" or other parameters must\" \" be specified for PAPR PHB, not both\"); return; } if (sphb->index > SPAPR_PCI_MAX_INDEX) { error_setg(errp, \"\\\"index\\\" for PAPR PHB is too large (max %u)\", SPAPR_PCI_MAX_INDEX); return; } sphb->buid = SPAPR_PCI_BASE_BUID + sphb->index; sphb->dma_liobn = SPAPR_PCI_LIOBN(sphb->index, 0); windows_base = SPAPR_PCI_WINDOW_BASE + sphb->index * SPAPR_PCI_WINDOW_SPACING; sphb->mem_win_addr = windows_base + SPAPR_PCI_MMIO_WIN_OFF; sphb->io_win_addr = windows_base + SPAPR_PCI_IO_WIN_OFF; } if (sphb->buid == (uint64_t)-1) { error_setg(errp, \"BUID not specified for PHB\"); return; } if (sphb->dma_liobn == (uint32_t)-1) { error_setg(errp, \"LIOBN not specified for PHB\"); return; } if (sphb->mem_win_addr == (hwaddr)-1) { error_setg(errp, \"Memory window address not specified for PHB\"); return; } if (sphb->io_win_addr == (hwaddr)-1) { error_setg(errp, \"IO window address not specified for PHB\"); return; } if (spapr_pci_find_phb(spapr, sphb->buid)) { error_setg(errp, \"PCI host bridges must have unique BUIDs\"); return; } sphb->dtbusname = g_strdup_printf(\"pci@%\" PRIx64, sphb->buid); namebuf = alloca(strlen(sphb->dtbusname) + 32); /* Initialize memory regions */ sprintf(namebuf, \"%s.mmio\", sphb->dtbusname); memory_region_init(&sphb->memspace, OBJECT(sphb), namebuf, UINT64_MAX); sprintf(namebuf, \"%s.mmio-alias\", sphb->dtbusname); memory_region_init_alias(&sphb->memwindow, OBJECT(sphb), namebuf, &sphb->memspace, SPAPR_PCI_MEM_WIN_BUS_OFFSET, sphb->mem_win_size); memory_region_add_subregion(get_system_memory(), sphb->mem_win_addr, &sphb->memwindow); /* Initialize IO regions */ sprintf(namebuf, \"%s.io\", sphb->dtbusname); memory_region_init(&sphb->iospace, OBJECT(sphb), namebuf, SPAPR_PCI_IO_WIN_SIZE); sprintf(namebuf, \"%s.io-alias\", sphb->dtbusname); memory_region_init_alias(&sphb->iowindow, OBJECT(sphb), namebuf, &sphb->iospace, 0, SPAPR_PCI_IO_WIN_SIZE); memory_region_add_subregion(get_system_memory(), sphb->io_win_addr, &sphb->iowindow); bus = pci_register_bus(dev, NULL, pci_spapr_set_irq, pci_spapr_map_irq, sphb, &sphb->memspace, &sphb->iospace, PCI_DEVFN(0, 0), PCI_NUM_PINS, TYPE_PCI_BUS); phb->bus = bus; qbus_set_hotplug_handler(BUS(phb->bus), DEVICE(sphb), NULL); /* * Initialize PHB address space. * By default there will be at least one subregion for default * 32bit DMA window. * Later the guest might want to create another DMA window * which will become another memory subregion. */ sprintf(namebuf, \"%s.iommu-root\", sphb->dtbusname); memory_region_init(&sphb->iommu_root, OBJECT(sphb), namebuf, UINT64_MAX); address_space_init(&sphb->iommu_as, &sphb->iommu_root, sphb->dtbusname); /* * As MSI/MSIX interrupts trigger by writing at MSI/MSIX vectors, * we need to allocate some memory to catch those writes coming * from msi_notify()/msix_notify(). * As MSIMessage:addr is going to be the same and MSIMessage:data * is going to be a VIRQ number, 4 bytes of the MSI MR will only * be used. * * For KVM we want to ensure that this memory is a full page so that * our memory slot is of page size granularity. */ #ifdef CONFIG_KVM if (kvm_enabled()) { msi_window_size = getpagesize(); } #endif memory_region_init_io(&sphb->msiwindow, NULL, &spapr_msi_ops, spapr, \"msi\", msi_window_size); memory_region_add_subregion(&sphb->iommu_root, SPAPR_PCI_MSI_WINDOW, &sphb->msiwindow); pci_setup_iommu(bus, spapr_pci_dma_iommu, sphb); pci_bus_set_route_irq_fn(bus, spapr_route_intx_pin_to_irq); QLIST_INSERT_HEAD(&spapr->phbs, sphb, list); /* Initialize the LSI table */ for (i = 0; i < PCI_NUM_PINS; i++) { uint32_t irq; irq = xics_alloc_block(spapr->icp, 0, 1, true, false); if (!irq) { error_setg(errp, \"spapr_allocate_lsi failed\"); return; } sphb->lsi_table[i].irq = irq; } /* allocate connectors for child PCI devices */ if (sphb->dr_enabled) { for (i = 0; i < PCI_SLOT_MAX * 8; i++) { spapr_dr_connector_new(OBJECT(phb), SPAPR_DR_CONNECTOR_TYPE_PCI, (sphb->index << 16) | i); } } if (!info->finish_realize) { error_setg(errp, \"finish_realize not defined\"); return; } info->finish_realize(sphb, errp); sphb->msi = g_hash_table_new_full(g_int_hash, g_int_equal, g_free, g_free); }"} {"target": 0, "idx": 25732, "func": "static int msrle_decode_8_16_24_32(AVCodecContext *avctx, AVPicture *pic, int depth, GetByteContext *gb) { uint8_t *output, *output_end; int p1, p2, line=avctx->height - 1, pos=0, i; uint16_t pix16; uint32_t pix32; unsigned int width= FFABS(pic->linesize[0]) / (depth >> 3); output = pic->data[0] + (avctx->height - 1) * pic->linesize[0]; output_end = pic->data[0] + avctx->height * pic->linesize[0]; while (bytestream2_get_bytes_left(gb) > 0) { p1 = bytestream2_get_byteu(gb); if(p1 == 0) { //Escape code p2 = bytestream2_get_byte(gb); if(p2 == 0) { //End-of-line if (--line < 0) { if (bytestream2_get_be16(gb) == 1) { // end-of-picture return 0; } else { av_log(avctx, AV_LOG_ERROR, \"Next line is beyond picture bounds (%d bytes left)\\n\", bytestream2_get_bytes_left(gb)); return AVERROR_INVALIDDATA; } } output = pic->data[0] + line * pic->linesize[0]; pos = 0; continue; } else if(p2 == 1) { //End-of-picture return 0; } else if(p2 == 2) { //Skip p1 = bytestream2_get_byte(gb); p2 = bytestream2_get_byte(gb); line -= p2; pos += p1; if (line < 0 || pos >= width){ av_log(avctx, AV_LOG_ERROR, \"Skip beyond picture bounds\\n\"); return -1; } output = pic->data[0] + line * pic->linesize[0] + pos * (depth >> 3); continue; } // Copy data if ((pic->linesize[0] > 0 && output + p2 * (depth >> 3) > output_end) || (pic->linesize[0] < 0 && output + p2 * (depth >> 3) < output_end)) { bytestream2_skip(gb, 2 * (depth >> 3)); continue; } else if (bytestream2_get_bytes_left(gb) < p2 * (depth >> 3)) { av_log(avctx, AV_LOG_ERROR, \"bytestream overrun\\n\"); return AVERROR_INVALIDDATA; } if ((depth == 8) || (depth == 24)) { for(i = 0; i < p2 * (depth >> 3); i++) { *output++ = bytestream2_get_byteu(gb); } // RLE8 copy is actually padded - and runs are not! if(depth == 8 && (p2 & 1)) { bytestream2_skip(gb, 1); } } else if (depth == 16) { for(i = 0; i < p2; i++) { *(uint16_t*)output = bytestream2_get_le16u(gb); output += 2; } } else if (depth == 32) { for(i = 0; i < p2; i++) { *(uint32_t*)output = bytestream2_get_le32u(gb); output += 4; } } pos += p2; } else { //run of pixels uint8_t pix[3]; //original pixel if ((pic->linesize[0] > 0 && output + p1 * (depth >> 3) > output_end) || (pic->linesize[0] < 0 && output + p1 * (depth >> 3) < output_end)) continue; switch(depth){ case 8: pix[0] = bytestream2_get_byte(gb); for(i = 0; i < p1; i++) *output++ = pix[0]; break; case 16: pix16 = bytestream2_get_le16(gb); for(i = 0; i < p1; i++) { *(uint16_t*)output = pix16; output += 2; } break; case 24: pix[0] = bytestream2_get_byte(gb); pix[1] = bytestream2_get_byte(gb); pix[2] = bytestream2_get_byte(gb); for(i = 0; i < p1; i++) { *output++ = pix[0]; *output++ = pix[1]; *output++ = pix[2]; } break; case 32: pix32 = bytestream2_get_le32(gb); for(i = 0; i < p1; i++) { *(uint32_t*)output = pix32; output += 4; } break; } pos += p1; } } av_log(avctx, AV_LOG_WARNING, \"MS RLE warning: no end-of-picture code\\n\"); return 0; }"} {"target": 1, "idx": 25743, "func": "static void RENAME(SwScale_YV12slice)(unsigned char* srcptr[],int stride[], int srcSliceY , int srcSliceH, uint8_t* dstptr[], int dststride, int dstbpp, int srcW, int srcH, int dstW, int dstH){ unsigned int lumXInc= (srcW << 16) / dstW; unsigned int lumYInc= (srcH << 16) / dstH; unsigned int chrXInc; unsigned int chrYInc; static int dstY; // used to detect a size change static int oldDstW= -1; static int oldSrcW= -1; static int oldDstH= -1; static int oldSrcH= -1; static int oldFlags=-1; static int lastInLumBuf; static int lastInChrBuf; int chrDstW, chrDstH; static int lumBufIndex=0; static int chrBufIndex=0; static int firstTime=1; int widthAlign= dstbpp==12 ? 16 : 8; if(((dstW + widthAlign-1)&(~(widthAlign-1))) > dststride) { dstW&= ~(widthAlign-1); if(firstTime) fprintf(stderr, \"SwScaler: Warning: dstStride is not a multiple of %d!\\n\" \"SwScaler: ->lowering width to compensate, new width=%d\\n\" \"SwScaler: ->cannot do aligned memory acesses anymore\\n\", widthAlign, dstW); } //printf(\"%d %d %d %d\\n\", srcW, srcH, dstW, dstH); //printf(\"%d %d %d %d\\n\", lumXInc, lumYInc, srcSliceY, srcSliceH); #ifdef HAVE_MMX2 canMMX2BeUsed= (lumXInc <= 0x10000 && (dstW&31)==0 && (srcW&15)==0) ? 1 : 0; if(!canMMX2BeUsed && lumXInc <= 0x10000 && (srcW&15)==0 && sws_flags==SWS_FAST_BILINEAR) { if(firstTime) //FIXME only if verbose ? fprintf(stderr, \"SwScaler: output Width is not a multiple of 32 -> no MMX2 scaler\\n\"); } #endif if(firstTime) { #if defined (DITHER1XBPP) && defined (HAVE_MMX) char *dither= \" dithered\"; #else char *dither= \"\"; #endif if(sws_flags==SWS_FAST_BILINEAR) fprintf(stderr, \"SwScaler: FAST_BILINEAR scaler \"); else if(sws_flags==SWS_BILINEAR) fprintf(stderr, \"SwScaler: BILINEAR scaler \"); else if(sws_flags==SWS_BICUBIC) fprintf(stderr, \"SwScaler: BICUBIC scaler \"); else fprintf(stderr, \"SwScaler: ehh flags invalid?! \"); if(dstbpp==15) fprintf(stderr, \"with%s BGR15 output \", dither); else if(dstbpp==16) fprintf(stderr, \"with%s BGR16 output \", dither); else if(dstbpp==24) fprintf(stderr, \"with BGR24 output \"); else if(dstbpp==32) fprintf(stderr, \"with BGR32 output \"); else if(dstbpp==12) fprintf(stderr, \"with YV12 output \"); else fprintf(stderr, \"without output \"); #ifdef HAVE_MMX2 fprintf(stderr, \"using MMX2\\n\"); #elif defined (HAVE_3DNOW) fprintf(stderr, \"using 3DNOW\\n\"); #elif defined (HAVE_MMX) fprintf(stderr, \"using MMX\\n\"); #elif defined (ARCH_X86) fprintf(stderr, \"using X86 ASM2\\n\"); #else fprintf(stderr, \"using C\\n\"); #endif } // match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src to pixel n-2 of dst // n-2 is the last chrominance sample available // this is not perfect, but noone shuld notice the difference, the more correct variant // would be like the vertical one, but that would require some special code for the // first and last pixel if(sws_flags==SWS_FAST_BILINEAR) { if(canMMX2BeUsed) lumXInc+= 20; else lumXInc = ((srcW-2)<<16)/(dstW-2) - 20; } if(fullUVIpol && !(dstbpp==12)) chrXInc= lumXInc>>1, chrDstW= dstW; else chrXInc= lumXInc, chrDstW= dstW>>1; if(dstbpp==12) chrYInc= lumYInc, chrDstH= dstH>>1; else chrYInc= lumYInc>>1, chrDstH= dstH; // force calculation of the horizontal interpolation of the first line if(srcSliceY ==0){ // printf(\"dstW %d, srcw %d, mmx2 %d\\n\", dstW, srcW, canMMX2BeUsed); lumBufIndex=0; chrBufIndex=0; dstY=0; //precalculate horizontal scaler filter coefficients if(oldDstW!=dstW || oldSrcW!=srcW || oldFlags!=sws_flags) { #ifdef HAVE_MMX const int filterAlign=4; #else const int filterAlign=1; #endif oldDstW= dstW; oldSrcW= srcW; oldFlags= sws_flags; if(sws_flags != SWS_FAST_BILINEAR) { RENAME(initFilter)(hLumFilter, hLumFilterPos, &hLumFilterSize, lumXInc, srcW , dstW , filterAlign, 1<<14); RENAME(initFilter)(hChrFilter, hChrFilterPos, &hChrFilterSize, chrXInc, srcW>>1, chrDstW, filterAlign, 1<<14); } #ifdef HAVE_MMX2 // cant downscale !!! if(canMMX2BeUsed && sws_flags == SWS_FAST_BILINEAR) { initMMX2HScaler(dstW , lumXInc, funnyYCode); initMMX2HScaler(chrDstW, chrXInc, funnyUVCode); } #endif } // Init Horizontal stuff if(oldDstH!=dstH || oldSrcH!=srcH || oldFlags!=sws_flags) { int i; oldDstH= dstH; oldSrcH= srcH; oldFlags= sws_flags; //FIXME swsflags conflict with x check // deallocate pixbufs for(i=0; i>1, chrDstH, 1, (1<<12)-4); // Calculate Buffer Sizes so that they wont run out while handling these damn slices vLumBufSize= vLumFilterSize; vChrBufSize= vChrFilterSize; for(i=0; i>1)) vChrBufSize= (nextSlice>>1) - vChrFilterPos[chrI]; } // allocate pixbufs (we use dynamic allocation because otherwise we would need to // allocate several megabytes to handle all possible cases) for(i=0; i>1)*(dstY>>1); unsigned char *vDest=dstptr[2]+(dststride>>1)*(dstY>>1); const int chrDstY= dstbpp==12 ? (dstY>>1) : dstY; const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input if(sws_flags == SWS_FAST_BILINEAR) { //handle holes if(firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1; if(firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1; } ASSERT(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1) ASSERT(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1) // Do we have enough lines in this slice to output the dstY line if(lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < ((srcSliceY + srcSliceH)>>1)) { //Do horizontal scaling while(lastInLumBuf < lastLumSrcY) { uint8_t *src= srcptr[0]+(lastInLumBuf + 1 - srcSliceY)*stride[0]; lumBufIndex++; ASSERT(lumBufIndex < 2*vLumBufSize) ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH) ASSERT(lastInLumBuf + 1 - srcSliceY >= 0) // printf(\"%d %d\\n\", lumBufIndex, vLumBufSize); RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, src, srcW, lumXInc); lastInLumBuf++; } while(lastInChrBuf < lastChrSrcY) { uint8_t *src1= srcptr[1]+(lastInChrBuf + 1 - (srcSliceY>>1))*stride[1]; uint8_t *src2= srcptr[2]+(lastInChrBuf + 1 - (srcSliceY>>1))*stride[2]; chrBufIndex++; ASSERT(chrBufIndex < 2*vChrBufSize) ASSERT(lastInChrBuf + 1 - (srcSliceY>>1) < (srcSliceH>>1)) ASSERT(lastInChrBuf + 1 - (srcSliceY>>1) >= 0) RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, srcW>>1, chrXInc); lastInChrBuf++; } //wrap buf index around to stay inside the ring buffer if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize; if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize; } else // not enough lines left in this slice -> load the rest in the buffer { /* printf(\"%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\\n\", firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY, lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize, vChrBufSize, vLumBufSize); */ //Do horizontal scaling while(lastInLumBuf+1 < srcSliceY + srcSliceH) { uint8_t *src= srcptr[0]+(lastInLumBuf + 1 - srcSliceY)*stride[0]; lumBufIndex++; ASSERT(lumBufIndex < 2*vLumBufSize) ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH) ASSERT(lastInLumBuf + 1 - srcSliceY >= 0) RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, src, srcW, lumXInc); lastInLumBuf++; } while(lastInChrBuf+1 < ((srcSliceY + srcSliceH)>>1)) { uint8_t *src1= srcptr[1]+(lastInChrBuf + 1 - (srcSliceY>>1))*stride[1]; uint8_t *src2= srcptr[2]+(lastInChrBuf + 1 - (srcSliceY>>1))*stride[2]; chrBufIndex++; ASSERT(chrBufIndex < 2*vChrBufSize) ASSERT(lastInChrBuf + 1 - (srcSliceY>>1) < (srcSliceH>>1)) ASSERT(lastInChrBuf + 1 - (srcSliceY>>1) >= 0) RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, srcW>>1, chrXInc); lastInChrBuf++; } //wrap buf index around to stay inside the ring buffer if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize; if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize; break; //we cant output a dstY line so lets try with the next slice } #ifdef HAVE_MMX b5Dither= dither8[dstY&1]; g6Dither= dither4[dstY&1]; g5Dither= dither8[dstY&1]; r5Dither= dither8[(dstY+1)&1]; #endif if(dstbpp==12) //YV12 { if(dstY&1) uDest=vDest= NULL; //FIXME split functions in lumi / chromi if(vLumFilterSize == 1 && vChrFilterSize == 1) // Unscaled YV12 { int16_t *lumBuf = lumPixBuf[0]; int16_t *chrBuf= chrPixBuf[0]; RENAME(yuv2yuv1)(lumBuf, chrBuf, dest, uDest, vDest, dstW); } else //General YV12 { int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize; int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize; RENAME(yuv2yuvX)( vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, vChrFilter+(dstY>>1)*vChrFilterSize, chrSrcPtr, vChrFilterSize, dest, uDest, vDest, dstW, lumMmxFilter+dstY*vLumFilterSize*4, chrMmxFilter+(dstY>>1)*vChrFilterSize*4); } } else { int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize; int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize; ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2); ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2); if(vLumFilterSize == 1 && vChrFilterSize == 2) //Unscaled RGB { int chrAlpha= vChrFilter[2*dstY+1]; RENAME(yuv2rgb1)(*lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1), dest, dstW, chrAlpha, dstbpp); } else if(vLumFilterSize == 2 && vChrFilterSize == 2) //BiLinear Upscale RGB { int lumAlpha= vLumFilter[2*dstY+1]; int chrAlpha= vChrFilter[2*dstY+1]; RENAME(yuv2rgb2)(*lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1), dest, dstW, lumAlpha, chrAlpha, dstbpp); } else //General RGB { RENAME(yuv2rgbX)( vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, dest, dstW, dstbpp, lumMmxFilter+dstY*vLumFilterSize*4, chrMmxFilter+dstY*vChrFilterSize*4); } } } #ifdef HAVE_MMX __asm __volatile(SFENCE:::\"memory\"); __asm __volatile(EMMS:::\"memory\"); #endif firstTime=0; }"} {"target": 1, "idx": 25744, "func": "void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number) { AVFrameSideData *side_data; mpeg1_encode_sequence_header(s); /* mpeg1 picture header */ put_header(s, PICTURE_START_CODE); /* temporal reference */ // RAL: s->picture_number instead of s->fake_picture_number put_bits(&s->pb, 10, (s->picture_number - s->gop_picture_number) & 0x3ff); put_bits(&s->pb, 3, s->pict_type); s->vbv_delay_ptr = s->pb.buf + put_bits_count(&s->pb) / 8; put_bits(&s->pb, 16, 0xFFFF); /* vbv_delay */ // RAL: Forward f_code also needed for B-frames if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type == AV_PICTURE_TYPE_B) { put_bits(&s->pb, 1, 0); /* half pel coordinates */ if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO) put_bits(&s->pb, 3, s->f_code); /* forward_f_code */ else put_bits(&s->pb, 3, 7); /* forward_f_code */ } // RAL: Backward f_code necessary for B-frames if (s->pict_type == AV_PICTURE_TYPE_B) { put_bits(&s->pb, 1, 0); /* half pel coordinates */ if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO) put_bits(&s->pb, 3, s->b_code); /* backward_f_code */ else put_bits(&s->pb, 3, 7); /* backward_f_code */ } put_bits(&s->pb, 1, 0); /* extra bit picture */ s->frame_pred_frame_dct = 1; if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) { put_header(s, EXT_START_CODE); put_bits(&s->pb, 4, 8); /* pic ext */ if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type == AV_PICTURE_TYPE_B) { put_bits(&s->pb, 4, s->f_code); put_bits(&s->pb, 4, s->f_code); } else { put_bits(&s->pb, 8, 255); } if (s->pict_type == AV_PICTURE_TYPE_B) { put_bits(&s->pb, 4, s->b_code); put_bits(&s->pb, 4, s->b_code); } else { put_bits(&s->pb, 8, 255); } put_bits(&s->pb, 2, s->intra_dc_precision); assert(s->picture_structure == PICT_FRAME); put_bits(&s->pb, 2, s->picture_structure); if (s->progressive_sequence) put_bits(&s->pb, 1, 0); /* no repeat */ else put_bits(&s->pb, 1, s->current_picture_ptr->f.top_field_first); /* XXX: optimize the generation of this flag with entropy measures */ s->frame_pred_frame_dct = s->progressive_sequence; put_bits(&s->pb, 1, s->frame_pred_frame_dct); put_bits(&s->pb, 1, s->concealment_motion_vectors); put_bits(&s->pb, 1, s->q_scale_type); put_bits(&s->pb, 1, s->intra_vlc_format); put_bits(&s->pb, 1, s->alternate_scan); put_bits(&s->pb, 1, s->repeat_first_field); s->progressive_frame = s->progressive_sequence; /* chroma_420_type */ put_bits(&s->pb, 1, s->chroma_format == CHROMA_420 ? s->progressive_frame : 0); put_bits(&s->pb, 1, s->progressive_frame); put_bits(&s->pb, 1, 0); /* composite_display_flag */ } if (s->scan_offset) { int i; put_header(s, USER_START_CODE); for (i = 0; i < sizeof(svcd_scan_offset_placeholder); i++) put_bits(&s->pb, 8, svcd_scan_offset_placeholder[i]); } side_data = av_frame_get_side_data(&s->current_picture_ptr->f, AV_FRAME_DATA_STEREO3D); if (side_data) { AVStereo3D *stereo = (AVStereo3D *)side_data->data; uint8_t fpa_type; switch (stereo->type) { case AV_STEREO3D_SIDEBYSIDE: fpa_type = 0x03; break; case AV_STEREO3D_TOPBOTTOM: fpa_type = 0x04; break; case AV_STEREO3D_2D: fpa_type = 0x08; break; case AV_STEREO3D_SIDEBYSIDE_QUINCUNX: fpa_type = 0x23; break; default: fpa_type = 0; break; } if (fpa_type != 0) { put_header(s, USER_START_CODE); put_bits(&s->pb, 8, 'J'); // S3D_video_format_signaling_identifier put_bits(&s->pb, 8, 'P'); put_bits(&s->pb, 8, '3'); put_bits(&s->pb, 8, 'D'); put_bits(&s->pb, 8, 0x03); // S3D_video_format_length put_bits(&s->pb, 1, 1); // reserved_bit put_bits(&s->pb, 7, fpa_type); // S3D_video_format_type put_bits(&s->pb, 8, 0x04); // reserved_data[0] put_bits(&s->pb, 8, 0xFF); // reserved_data[1] } } s->mb_y = 0; ff_mpeg1_encode_slice_header(s); }"} {"target": 0, "idx": 25769, "func": "static void qmp_input_type_any(Visitor *v, const char *name, QObject **obj, Error **errp) { QmpInputVisitor *qiv = to_qiv(v); QObject *qobj = qmp_input_get_object(qiv, name, true, errp); *obj = NULL; if (!qobj) { return; } qobject_incref(qobj); *obj = qobj; }"} {"target": 1, "idx": 25775, "func": "void *g_try_malloc(size_t n_bytes) { __coverity_negative_sink__(n_bytes); return malloc(n_bytes == 0 ? 1 : n_bytes); }"} {"target": 0, "idx": 25777, "func": "static void FUNCC(pred8x8l_vertical_add)(uint8_t *_pix, const int16_t *_block, ptrdiff_t stride) { int i; pixel *pix = (pixel*)_pix; const dctcoef *block = (const dctcoef*)_block; stride >>= sizeof(pixel)-1; pix -= stride; for(i=0; i<8; i++){ pixel v = pix[0]; pix[1*stride]= v += block[0]; pix[2*stride]= v += block[8]; pix[3*stride]= v += block[16]; pix[4*stride]= v += block[24]; pix[5*stride]= v += block[32]; pix[6*stride]= v += block[40]; pix[7*stride]= v += block[48]; pix[8*stride]= v + block[56]; pix++; block++; } }"} {"target": 1, "idx": 25782, "func": "int ff_vorbis_len2vlc(uint8_t *bits, uint32_t *codes, unsigned num) { uint32_t exit_at_level[33] = { 404 }; unsigned i, j, p, code; for (p = 0; (bits[p] == 0) && (p < num); ++p) ; if (p == num) return 0; codes[p] = 0; if (bits[p] > 32) return AVERROR_INVALIDDATA; for (i = 0; i < bits[p]; ++i) exit_at_level[i+1] = 1 << i; ++p; for (i = p; (bits[i] == 0) && (i < num); ++i) ; if (i == num) return 0; for (; p < num; ++p) { if (bits[p] > 32) return AVERROR_INVALIDDATA; if (bits[p] == 0) continue; // find corresponding exit(node which the tree can grow further from) for (i = bits[p]; i > 0; --i) if (exit_at_level[i]) break; if (!i) // overspecified tree return AVERROR_INVALIDDATA; code = exit_at_level[i]; exit_at_level[i] = 0; // construct code (append 0s to end) and introduce new exits for (j = i + 1 ;j <= bits[p]; ++j) exit_at_level[j] = code + (1 << (j - 1)); codes[p] = code; } //no exits should be left (underspecified tree - ie. unused valid vlcs - not allowed by SPEC) for (p = 1; p < 33; p++) if (exit_at_level[p]) return AVERROR_INVALIDDATA; return 0; }"} {"target": 1, "idx": 25785, "func": "static int tftp_session_allocate(Slirp *slirp, struct sockaddr_storage *srcsas, struct tftp_t *tp) { struct tftp_session *spt; int k; for (k = 0; k < TFTP_SESSIONS_MAX; k++) { spt = &slirp->tftp_sessions[k]; if (!tftp_session_in_use(spt)) goto found; /* sessions time out after 5 inactive seconds */ if ((int)(curtime - spt->timestamp) > 5000) { tftp_session_terminate(spt); goto found; } } return -1; found: memset(spt, 0, sizeof(*spt)); spt->client_addr = *srcsas; spt->fd = -1; spt->block_size = 512; spt->client_port = tp->udp.uh_sport; spt->slirp = slirp; tftp_session_update(spt); return k; }"} {"target": 1, "idx": 25839, "func": "static int tcp_get_msgfd(CharDriverState *chr) { TCPCharDriver *s = chr->opaque; return s->msgfd; }"} {"target": 1, "idx": 25856, "func": "static void decode_cdlms(WmallDecodeCtx *s) { int c, i; int cdlms_send_coef = get_bits1(&s->gb); for(c = 0; c < s->num_channels; c++) { s->cdlms_ttl[c] = get_bits(&s->gb, 3) + 1; for(i = 0; i < s->cdlms_ttl[c]; i++) { s->cdlms[c][i].order = (get_bits(&s->gb, 7) + 1) * 8; } for(i = 0; i < s->cdlms_ttl[c]; i++) { s->cdlms[c][i].scaling = get_bits(&s->gb, 4); } if(cdlms_send_coef) { for(i = 0; i < s->cdlms_ttl[c]; i++) { int cbits, shift_l, shift_r, j; cbits = av_log2(s->cdlms[c][i].order); if(1 << cbits < s->cdlms[c][i].order) cbits++; s->cdlms[c][i].coefsend = get_bits(&s->gb, cbits) + 1; cbits = av_log2(s->cdlms[c][i].scaling + 1); if(1 << cbits < s->cdlms[c][i].scaling + 1) cbits++; s->cdlms[c][i].bitsend = get_bits(&s->gb, cbits) + 2; shift_l = 32 - s->cdlms[c][i].bitsend; shift_r = 32 - 2 - s->cdlms[c][i].scaling; for(j = 0; j < s->cdlms[c][i].coefsend; j++) { s->cdlms[c][i].coefs[j] = (get_bits(&s->gb, s->cdlms[c][i].bitsend) << shift_l) >> shift_r; } } } } }"} {"target": 1, "idx": 25889, "func": "static void imdct12(int *out, int *in) { int in0, in1, in2, in3, in4, in5, t1, t2; in0= in[0*3]<<5; in1= (in[1*3] + in[0*3])<<5; in2= (in[2*3] + in[1*3])<<5; in3= (in[3*3] + in[2*3])<<5; in4= (in[4*3] + in[3*3])<<5; in5= (in[5*3] + in[4*3])<<5; in5 += in3; in3 += in1; in2= MULH(2*in2, C3); in3= MULH(2*in3, C3); t1 = in0 - in4; t2 = MULL(in1 - in5, icos36[4]); out[ 7]= out[10]= t1 + t2; out[ 1]= out[ 4]= t1 - t2; in0 += in4>>1; in4 = in0 + in2; in1 += in5>>1; in5 = MULL(in1 + in3, icos36[1]); out[ 8]= out[ 9]= in4 + in5; out[ 2]= out[ 3]= in4 - in5; in0 -= in2; in1 = MULL(in1 - in3, icos36[7]); out[ 0]= out[ 5]= in0 - in1; out[ 6]= out[11]= in0 + in1; }"} {"target": 1, "idx": 25913, "func": "static void lan9118_eeprom_cmd(lan9118_state *s, int cmd, int addr) { s->e2p_cmd = (s->e2p_cmd & 0x10) | (cmd << 28) | addr; switch (cmd) { case 0: s->e2p_data = s->eeprom[addr]; DPRINTF(\"EEPROM Read %d = 0x%02x\\n\", addr, s->e2p_data); case 1: s->eeprom_writable = 0; DPRINTF(\"EEPROM Write Disable\\n\"); case 2: /* EWEN */ s->eeprom_writable = 1; DPRINTF(\"EEPROM Write Enable\\n\"); case 3: /* WRITE */ if (s->eeprom_writable) { s->eeprom[addr] &= s->e2p_data; DPRINTF(\"EEPROM Write %d = 0x%02x\\n\", addr, s->e2p_data); } else { DPRINTF(\"EEPROM Write %d (ignored)\\n\", addr); } case 4: /* WRAL */ if (s->eeprom_writable) { for (addr = 0; addr < 128; addr++) { s->eeprom[addr] &= s->e2p_data; } DPRINTF(\"EEPROM Write All 0x%02x\\n\", s->e2p_data); } else { DPRINTF(\"EEPROM Write All (ignored)\\n\"); } case 5: /* ERASE */ if (s->eeprom_writable) { s->eeprom[addr] = 0xff; DPRINTF(\"EEPROM Erase %d\\n\", addr); } else { DPRINTF(\"EEPROM Erase %d (ignored)\\n\", addr); } case 6: /* ERAL */ if (s->eeprom_writable) { memset(s->eeprom, 0xff, 128); DPRINTF(\"EEPROM Erase All\\n\"); } else { DPRINTF(\"EEPROM Erase All (ignored)\\n\"); } case 7: /* RELOAD */ lan9118_reload_eeprom(s); } }"} {"target": 0, "idx": 25921, "func": "static void sigp_store_status_at_address(CPUState *cs, run_on_cpu_data arg) { S390CPU *cpu = S390_CPU(cs); SigpInfo *si = arg.host_ptr; uint32_t address = si->param & 0x7ffffe00u; /* cpu has to be stopped */ if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) { set_sigp_status(si, SIGP_STAT_INCORRECT_STATE); return; } cpu_synchronize_state(cs); if (s390_store_status(cpu, address, false)) { set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); return; } si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; }"} {"target": 0, "idx": 25934, "func": "static void ppc_prep_init(MachineState *machine) { ram_addr_t ram_size = machine->ram_size; const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; const char *boot_device = machine->boot_order; MemoryRegion *sysmem = get_system_memory(); PowerPCCPU *cpu = NULL; CPUPPCState *env = NULL; Nvram *m48t59; #if 0 MemoryRegion *xcsr = g_new(MemoryRegion, 1); #endif int linux_boot, i, nb_nics1; MemoryRegion *ram = g_new(MemoryRegion, 1); uint32_t kernel_base, initrd_base; long kernel_size, initrd_size; DeviceState *dev; PCIHostState *pcihost; PCIBus *pci_bus; PCIDevice *pci; ISABus *isa_bus; ISADevice *isa; int ppc_boot_device; DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; sysctrl = g_malloc0(sizeof(sysctrl_t)); linux_boot = (kernel_filename != NULL); /* init CPUs */ if (machine->cpu_model == NULL) machine->cpu_model = \"602\"; for (i = 0; i < smp_cpus; i++) { cpu = cpu_ppc_init(machine->cpu_model); if (cpu == NULL) { fprintf(stderr, \"Unable to find PowerPC CPU definition\\n\"); exit(1); } env = &cpu->env; if (env->flags & POWERPC_FLAG_RTC_CLK) { /* POWER / PowerPC 601 RTC clock frequency is 7.8125 MHz */ cpu_ppc_tb_init(env, 7812500UL); } else { /* Set time-base frequency to 100 Mhz */ cpu_ppc_tb_init(env, 100UL * 1000UL * 1000UL); } qemu_register_reset(ppc_prep_reset, cpu); } /* allocate RAM */ memory_region_allocate_system_memory(ram, NULL, \"ppc_prep.ram\", ram_size); memory_region_add_subregion(sysmem, 0, ram); if (linux_boot) { kernel_base = KERNEL_LOAD_ADDR; /* now we can load the kernel */ kernel_size = load_image_targphys(kernel_filename, kernel_base, ram_size - kernel_base); if (kernel_size < 0) { hw_error(\"qemu: could not load kernel '%s'\\n\", kernel_filename); exit(1); } /* load initrd */ if (initrd_filename) { initrd_base = INITRD_LOAD_ADDR; initrd_size = load_image_targphys(initrd_filename, initrd_base, ram_size - initrd_base); if (initrd_size < 0) { hw_error(\"qemu: could not load initial ram disk '%s'\\n\", initrd_filename); } } else { initrd_base = 0; initrd_size = 0; } ppc_boot_device = 'm'; } else { kernel_base = 0; kernel_size = 0; initrd_base = 0; initrd_size = 0; ppc_boot_device = '\\0'; /* For now, OHW cannot boot from the network. */ for (i = 0; boot_device[i] != '\\0'; i++) { if (boot_device[i] >= 'a' && boot_device[i] <= 'f') { ppc_boot_device = boot_device[i]; break; } } if (ppc_boot_device == '\\0') { fprintf(stderr, \"No valid boot device for Mac99 machine\\n\"); exit(1); } } if (PPC_INPUT(env) != PPC_FLAGS_INPUT_6xx) { hw_error(\"Only 6xx bus is supported on PREP machine\\n\"); } dev = qdev_create(NULL, \"raven-pcihost\"); if (bios_name == NULL) { bios_name = BIOS_FILENAME; } qdev_prop_set_string(dev, \"bios-name\", bios_name); qdev_prop_set_uint32(dev, \"elf-machine\", ELF_MACHINE); pcihost = PCI_HOST_BRIDGE(dev); object_property_add_child(qdev_get_machine(), \"raven\", OBJECT(dev), NULL); qdev_init_nofail(dev); pci_bus = (PCIBus *)qdev_get_child_bus(dev, \"pci.0\"); if (pci_bus == NULL) { fprintf(stderr, \"Couldn't create PCI host controller.\\n\"); exit(1); } sysctrl->contiguous_map_irq = qdev_get_gpio_in(dev, 0); /* PCI -> ISA bridge */ pci = pci_create_simple(pci_bus, PCI_DEVFN(1, 0), \"i82378\"); cpu = POWERPC_CPU(first_cpu); qdev_connect_gpio_out(&pci->qdev, 0, cpu->env.irq_inputs[PPC6xx_INPUT_INT]); qdev_connect_gpio_out(&pci->qdev, 1, qemu_allocate_irq(cpu_request_exit, NULL, 0)); sysbus_connect_irq(&pcihost->busdev, 0, qdev_get_gpio_in(&pci->qdev, 9)); sysbus_connect_irq(&pcihost->busdev, 1, qdev_get_gpio_in(&pci->qdev, 11)); sysbus_connect_irq(&pcihost->busdev, 2, qdev_get_gpio_in(&pci->qdev, 9)); sysbus_connect_irq(&pcihost->busdev, 3, qdev_get_gpio_in(&pci->qdev, 11)); isa_bus = ISA_BUS(qdev_get_child_bus(DEVICE(pci), \"isa.0\")); /* Super I/O (parallel + serial ports) */ isa = isa_create(isa_bus, TYPE_PC87312); dev = DEVICE(isa); qdev_prop_set_uint8(dev, \"config\", 13); /* fdc, ser0, ser1, par0 */ qdev_init_nofail(dev); /* init basic PC hardware */ pci_vga_init(pci_bus); nb_nics1 = nb_nics; if (nb_nics1 > NE2000_NB_MAX) nb_nics1 = NE2000_NB_MAX; for(i = 0; i < nb_nics1; i++) { if (nd_table[i].model == NULL) { nd_table[i].model = g_strdup(\"ne2k_isa\"); } if (strcmp(nd_table[i].model, \"ne2k_isa\") == 0) { isa_ne2000_init(isa_bus, ne2000_io[i], ne2000_irq[i], &nd_table[i]); } else { pci_nic_init_nofail(&nd_table[i], pci_bus, \"ne2k_pci\", NULL); } } ide_drive_get(hd, ARRAY_SIZE(hd)); for(i = 0; i < MAX_IDE_BUS; i++) { isa_ide_init(isa_bus, ide_iobase[i], ide_iobase2[i], ide_irq[i], hd[2 * i], hd[2 * i + 1]); } isa_create_simple(isa_bus, \"i8042\"); cpu = POWERPC_CPU(first_cpu); sysctrl->reset_irq = cpu->env.irq_inputs[PPC6xx_INPUT_HRESET]; portio_list_init(&prep_port_list, NULL, prep_portio_list, sysctrl, \"prep\"); portio_list_add(&prep_port_list, isa_address_space_io(isa), 0x0); /* PowerPC control and status register group */ #if 0 memory_region_init_io(xcsr, NULL, &PPC_XCSR_ops, NULL, \"ppc-xcsr\", 0x1000); memory_region_add_subregion(sysmem, 0xFEFF0000, xcsr); #endif if (usb_enabled()) { pci_create_simple(pci_bus, -1, \"pci-ohci\"); } m48t59 = m48t59_init_isa(isa_bus, 0x0074, NVRAM_SIZE, 2000, 59); if (m48t59 == NULL) return; sysctrl->nvram = m48t59; /* Initialise NVRAM */ PPC_NVRAM_set_params(m48t59, NVRAM_SIZE, \"PREP\", ram_size, ppc_boot_device, kernel_base, kernel_size, kernel_cmdline, initrd_base, initrd_size, /* XXX: need an option to load a NVRAM image */ 0, graphic_width, graphic_height, graphic_depth); }"} {"target": 1, "idx": 25944, "func": "int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix) { BDRVQcowState *s = bs->opaque; BdrvCheckResult pre_compare_res; int64_t size, highest_cluster, nb_clusters; uint16_t *refcount_table = NULL; bool rebuild = false; int ret; size = bdrv_getlength(bs->file); if (size < 0) { res->check_errors++; return size; nb_clusters = size_to_clusters(s, size); if (nb_clusters > INT_MAX) { res->check_errors++; return -EFBIG; res->bfi.total_clusters = size_to_clusters(s, bs->total_sectors * BDRV_SECTOR_SIZE); ret = calculate_refcounts(bs, res, fix, &rebuild, &refcount_table, /* In case we don't need to rebuild the refcount structure (but want to fix * something), this function is immediately called again, in which case the * result should be ignored */ pre_compare_res = *res; compare_refcounts(bs, res, 0, &rebuild, &highest_cluster, refcount_table, nb_clusters); if (rebuild && (fix & BDRV_FIX_ERRORS)) { fprintf(stderr, \"Rebuilding refcount structure\\n\"); ret = rebuild_refcount_structure(bs, res, &refcount_table, } else if (fix) { fprintf(stderr, \"ERROR need to rebuild refcount structures\\n\"); res->check_errors++; ret = -EIO; if (res->leaks || res->corruptions) { *res = pre_compare_res; compare_refcounts(bs, res, fix, &rebuild, &highest_cluster, refcount_table, nb_clusters); /* check OFLAG_COPIED */ ret = check_oflag_copied(bs, res, fix); res->image_end_offset = (highest_cluster + 1) * s->cluster_size; ret = 0; fail: g_free(refcount_table); return ret;"} {"target": 1, "idx": 25951, "func": "static void pmac_ide_atapi_transfer_cb(void *opaque, int ret) { DBDMA_io *io = opaque; MACIOIDEState *m = io->opaque; IDEState *s = idebus_active_if(&m->bus); int64_t offset; MACIO_DPRINTF(\"pmac_ide_atapi_transfer_cb\\n\"); if (ret < 0) { MACIO_DPRINTF(\"DMA error: %d\\n\", ret); ide_atapi_io_error(s, ret); goto done; } if (!m->dma_active) { MACIO_DPRINTF(\"waiting for data (%#x - %#x - %x)\\n\", s->nsector, io->len, s->status); /* data not ready yet, wait for the channel to get restarted */ io->processing = false; return; } if (s->io_buffer_size <= 0) { MACIO_DPRINTF(\"End of IDE transfer\\n\"); ide_atapi_cmd_ok(s); m->dma_active = false; goto done; } if (io->len == 0) { MACIO_DPRINTF(\"End of DMA transfer\\n\"); goto done; } if (s->lba == -1) { /* Non-block ATAPI transfer - just copy to RAM */ s->io_buffer_size = MIN(s->io_buffer_size, io->len); cpu_physical_memory_write(io->addr, s->io_buffer, s->io_buffer_size); ide_atapi_cmd_ok(s); m->dma_active = false; goto done; } /* Calculate current offset */ offset = (int64_t)(s->lba << 11) + s->io_buffer_index; pmac_dma_read(s->blk, offset, io->len, pmac_ide_atapi_transfer_cb, io); return; done: if (ret < 0) { block_acct_failed(blk_get_stats(s->blk), &s->acct); } else { block_acct_done(blk_get_stats(s->blk), &s->acct); } io->dma_end(opaque); }"} {"target": 0, "idx": 25962, "func": "static void qemu_chr_parse_spice_port(QemuOpts *opts, ChardevBackend *backend, Error **errp) { const char *name = qemu_opt_get(opts, \"name\"); ChardevSpicePort *spiceport; if (name == NULL) { error_setg(errp, \"chardev: spice port: no name given\"); return; } spiceport = backend->u.spiceport = g_new0(ChardevSpicePort, 1); qemu_chr_parse_common(opts, qapi_ChardevSpicePort_base(spiceport)); spiceport->fqdn = g_strdup(name); }"} {"target": 0, "idx": 25975, "func": "void qemu_fd_register(int fd) { WSAEventSelect(fd, qemu_event_handle, FD_READ | FD_ACCEPT | FD_CLOSE | FD_CONNECT | FD_WRITE | FD_OOB); }"} {"target": 1, "idx": 25980, "func": "static struct pxa2xx_i2s_s *pxa2xx_i2s_init(target_phys_addr_t base, qemu_irq irq, struct pxa2xx_dma_state_s *dma) { int iomemtype; struct pxa2xx_i2s_s *s = (struct pxa2xx_i2s_s *) qemu_mallocz(sizeof(struct pxa2xx_i2s_s)); s->base = base; s->irq = irq; s->dma = dma; s->data_req = pxa2xx_i2s_data_req; pxa2xx_i2s_reset(s); iomemtype = cpu_register_io_memory(0, pxa2xx_i2s_readfn, pxa2xx_i2s_writefn, s); cpu_register_physical_memory(s->base & 0xfff00000, 0xfffff, iomemtype); register_savevm(\"pxa2xx_i2s\", base, 0, pxa2xx_i2s_save, pxa2xx_i2s_load, s); return s; }"} {"target": 1, "idx": 25982, "func": "static void perf_nesting(void) { unsigned int i, maxcycles, maxnesting; double duration; maxcycles = 10000; maxnesting = 1000; Coroutine *root; g_test_timer_start(); for (i = 0; i < maxcycles; i++) { NestData nd = { .n_enter = 0, .n_return = 0, .max = maxnesting, }; root = qemu_coroutine_create(nest); qemu_coroutine_enter(root, &nd); } duration = g_test_timer_elapsed(); g_test_message(\"Nesting %u iterations of %u depth each: %f s\\n\", maxcycles, maxnesting, duration); }"} {"target": 1, "idx": 25986, "func": "void hpet_init(qemu_irq *irq) { int i, iomemtype; HPETState *s; DPRINTF (\"hpet_init\\n\"); s = qemu_mallocz(sizeof(HPETState)); hpet_statep = s; s->irqs = irq; for (i=0; itimer[i]; timer->qemu_timer = qemu_new_timer(vm_clock, hpet_timer, timer); } vmstate_register(-1, &vmstate_hpet, s); qemu_register_reset(hpet_reset, s); /* HPET Area */ iomemtype = cpu_register_io_memory(hpet_ram_read, hpet_ram_write, s); cpu_register_physical_memory(HPET_BASE, 0x400, iomemtype); }"} {"target": 0, "idx": 26007, "func": "void slavio_serial_ms_kbd_init(target_phys_addr_t base, qemu_irq irq, int disabled, int clock, int it_shift) { DeviceState *dev; SysBusDevice *s; dev = qdev_create(NULL, \"escc\"); qdev_prop_set_uint32(dev, \"disabled\", disabled); qdev_prop_set_uint32(dev, \"frequency\", clock); qdev_prop_set_uint32(dev, \"it_shift\", it_shift); qdev_prop_set_chr(dev, \"chrB\", NULL); qdev_prop_set_chr(dev, \"chrA\", NULL); qdev_prop_set_uint32(dev, \"chnBtype\", mouse); qdev_prop_set_uint32(dev, \"chnAtype\", kbd); qdev_init_nofail(dev); s = sysbus_from_qdev(dev); sysbus_connect_irq(s, 0, irq); sysbus_connect_irq(s, 1, irq); sysbus_mmio_map(s, 0, base); }"} {"target": 0, "idx": 26018, "func": "static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc) { S390PCIBusDevice *pbdev; uint32_t res_code, initial_l2, g_l2, finish; int rc, idx; uint64_t resume_token; rc = 0; if (lduw_p(&rrb->request.hdr.len) != 32) { res_code = CLP_RC_LEN; rc = -EINVAL; goto out; } if ((ldl_p(&rrb->request.fmt) & CLP_MASK_FMT) != 0) { res_code = CLP_RC_FMT; rc = -EINVAL; goto out; } if ((ldl_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 || ldq_p(&rrb->request.reserved1) != 0 || ldq_p(&rrb->request.reserved2) != 0) { res_code = CLP_RC_RESNOT0; rc = -EINVAL; goto out; } resume_token = ldq_p(&rrb->request.resume_token); if (resume_token) { pbdev = s390_pci_find_dev_by_idx(resume_token); if (!pbdev) { res_code = CLP_RC_LISTPCI_BADRT; rc = -EINVAL; goto out; } } if (lduw_p(&rrb->response.hdr.len) < 48) { res_code = CLP_RC_8K; rc = -EINVAL; goto out; } initial_l2 = lduw_p(&rrb->response.hdr.len); if ((initial_l2 - LIST_PCI_HDR_LEN) % sizeof(ClpFhListEntry) != 0) { res_code = CLP_RC_LEN; rc = -EINVAL; *cc = 3; goto out; } stl_p(&rrb->response.fmt, 0); stq_p(&rrb->response.reserved1, 0); stq_p(&rrb->response.reserved2, 0); stl_p(&rrb->response.mdd, FH_MASK_SHM); stw_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS); rrb->response.entry_size = sizeof(ClpFhListEntry); finish = 0; idx = resume_token; g_l2 = LIST_PCI_HDR_LEN; do { pbdev = s390_pci_find_dev_by_idx(idx); if (!pbdev) { finish = 1; break; } stw_p(&rrb->response.fh_list[idx - resume_token].device_id, pci_get_word(pbdev->pdev->config + PCI_DEVICE_ID)); stw_p(&rrb->response.fh_list[idx - resume_token].vendor_id, pci_get_word(pbdev->pdev->config + PCI_VENDOR_ID)); /* Ignore RESERVED devices. */ stl_p(&rrb->response.fh_list[idx - resume_token].config, pbdev->state == ZPCI_FS_STANDBY ? 0 : 1 << 31); stl_p(&rrb->response.fh_list[idx - resume_token].fid, pbdev->fid); stl_p(&rrb->response.fh_list[idx - resume_token].fh, pbdev->fh); g_l2 += sizeof(ClpFhListEntry); /* Add endian check for DPRINTF? */ DPRINTF(\"g_l2 %d vendor id 0x%x device id 0x%x fid 0x%x fh 0x%x\\n\", g_l2, lduw_p(&rrb->response.fh_list[idx - resume_token].vendor_id), lduw_p(&rrb->response.fh_list[idx - resume_token].device_id), ldl_p(&rrb->response.fh_list[idx - resume_token].fid), ldl_p(&rrb->response.fh_list[idx - resume_token].fh)); idx++; } while (g_l2 < initial_l2); if (finish == 1) { resume_token = 0; } else { resume_token = idx; } stq_p(&rrb->response.resume_token, resume_token); stw_p(&rrb->response.hdr.len, g_l2); stw_p(&rrb->response.hdr.rsp, CLP_RC_OK); out: if (rc) { DPRINTF(\"list pci failed rc 0x%x\\n\", rc); stw_p(&rrb->response.hdr.rsp, res_code); } return rc; }"} {"target": 0, "idx": 26027, "func": "abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp) { int ret; struct target_sigaltstack oss; /* XXX: test errors */ if(uoss_addr) { __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp); __put_user(target_sigaltstack_used.ss_size, &oss.ss_size); __put_user(sas_ss_flags(sp), &oss.ss_flags); } if(uss_addr) { struct target_sigaltstack *uss; struct target_sigaltstack ss; ret = -TARGET_EFAULT; if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1) || __get_user(ss.ss_sp, &uss->ss_sp) || __get_user(ss.ss_size, &uss->ss_size) || __get_user(ss.ss_flags, &uss->ss_flags)) goto out; unlock_user_struct(uss, uss_addr, 0); ret = -TARGET_EPERM; if (on_sig_stack(sp)) goto out; ret = -TARGET_EINVAL; if (ss.ss_flags != TARGET_SS_DISABLE && ss.ss_flags != TARGET_SS_ONSTACK && ss.ss_flags != 0) goto out; if (ss.ss_flags == TARGET_SS_DISABLE) { ss.ss_size = 0; ss.ss_sp = 0; } else { ret = -TARGET_ENOMEM; if (ss.ss_size < MINSIGSTKSZ) goto out; } target_sigaltstack_used.ss_sp = ss.ss_sp; target_sigaltstack_used.ss_size = ss.ss_size; } if (uoss_addr) { ret = -TARGET_EFAULT; if (copy_to_user(uoss_addr, &oss, sizeof(oss))) goto out; } ret = 0; out: return ret; }"} {"target": 1, "idx": 26037, "func": "void ff_mov_close_hinting(MOVTrack *track) { AVFormatContext* rtp_ctx = track->rtp_ctx; uint8_t *ptr; av_freep(&track->enc); sample_queue_free(&track->sample_queue); if (!rtp_ctx) return; if (rtp_ctx->pb) { av_write_trailer(rtp_ctx); url_close_dyn_buf(rtp_ctx->pb, &ptr); av_free(ptr); } av_metadata_free(&rtp_ctx->streams[0]->metadata); av_metadata_free(&rtp_ctx->metadata); av_free(rtp_ctx->streams[0]); av_freep(&rtp_ctx); }"} {"target": 1, "idx": 26049, "func": "void laio_cleanup(void *s_) { struct qemu_laio_state *s = s_; event_notifier_cleanup(&s->e); g_free(s);"} {"target": 0, "idx": 26114, "func": "static void mxf_write_cdci_common(AVFormatContext *s, AVStream *st, const UID key, unsigned size) { MXFStreamContext *sc = st->priv_data; AVIOContext *pb = s->pb; int stored_height = (st->codec->height+15)/16*16; int display_height; int f1, f2; unsigned desc_size = size+8+8+8+8+8+8+5+16+sc->interlaced*4+12+20; if (sc->interlaced && sc->field_dominance) desc_size += 5; mxf_write_generic_desc(s, st, key, desc_size); mxf_write_local_tag(pb, 4, 0x3203); avio_wb32(pb, st->codec->width); mxf_write_local_tag(pb, 4, 0x3202); avio_wb32(pb, stored_height>>sc->interlaced); mxf_write_local_tag(pb, 4, 0x3209); avio_wb32(pb, st->codec->width); if (st->codec->height == 608) // PAL + VBI display_height = 576; else if (st->codec->height == 512) // NTSC + VBI display_height = 486; else display_height = st->codec->height; mxf_write_local_tag(pb, 4, 0x3208); avio_wb32(pb, display_height>>sc->interlaced); // component depth mxf_write_local_tag(pb, 4, 0x3301); avio_wb32(pb, sc->component_depth); // horizontal subsampling mxf_write_local_tag(pb, 4, 0x3302); avio_wb32(pb, 2); // frame layout mxf_write_local_tag(pb, 1, 0x320C); avio_w8(pb, sc->interlaced); // video line map switch (st->codec->height) { case 576: f1 = 23; f2 = st->codec->codec_id == AV_CODEC_ID_DVVIDEO ? 335 : 336; break; case 608: f1 = 7; f2 = 320; break; case 480: f1 = 20; f2 = st->codec->codec_id == AV_CODEC_ID_DVVIDEO ? 285 : 283; break; case 512: f1 = 7; f2 = 270; break; case 720: f1 = 26; f2 = 0; break; // progressive case 1080: f1 = 21; f2 = 584; break; default: f1 = 0; f2 = 0; break; } if (!sc->interlaced) { f2 = 0; f1 *= 2; } mxf_write_local_tag(pb, 12+sc->interlaced*4, 0x320D); avio_wb32(pb, sc->interlaced ? 2 : 1); avio_wb32(pb, 4); avio_wb32(pb, f1); if (sc->interlaced) avio_wb32(pb, f2); mxf_write_local_tag(pb, 8, 0x320E); avio_wb32(pb, sc->aspect_ratio.num); avio_wb32(pb, sc->aspect_ratio.den); mxf_write_local_tag(pb, 16, 0x3201); avio_write(pb, *sc->codec_ul, 16); if (sc->interlaced && sc->field_dominance) { mxf_write_local_tag(pb, 1, 0x3212); avio_w8(pb, sc->field_dominance); } }"} {"target": 0, "idx": 26118, "func": "static void pc_dimm_init(Object *obj) { PCDIMMDevice *dimm = PC_DIMM(obj); object_property_add(obj, PC_DIMM_SIZE_PROP, \"int\", pc_dimm_get_size, NULL, NULL, NULL, &error_abort); object_property_add_link(obj, PC_DIMM_MEMDEV_PROP, TYPE_MEMORY_BACKEND, (Object **)&dimm->hostmem, qdev_prop_allow_set_link_before_realize, OBJ_PROP_LINK_UNREF_ON_RELEASE, &error_abort); }"} {"target": 0, "idx": 26122, "func": "static always_inline int find_pte (CPUState *env, mmu_ctx_t *ctx, int h, int rw) { #if defined(TARGET_PPC64) if (env->mmu_model == POWERPC_MMU_64B) return find_pte64(ctx, h, rw); #endif return find_pte32(ctx, h, rw); }"} {"target": 1, "idx": 26141, "func": "static void tcp_chr_accept(void *opaque) { CharDriverState *chr = opaque; TCPCharDriver *s = chr->opaque; struct sockaddr_in saddr; #ifndef _WIN32 struct sockaddr_un uaddr; #endif struct sockaddr *addr; socklen_t len; int fd; for(;;) { #ifndef _WIN32 if (s->is_unix) { len = sizeof(uaddr); addr = (struct sockaddr *)&uaddr; } else #endif { len = sizeof(saddr); addr = (struct sockaddr *)&saddr; } fd = accept(s->listen_fd, addr, &len); if (fd < 0 && errno != EINTR) { return; } else if (fd >= 0) { if (s->do_telnetopt) tcp_chr_telnet_init(fd); break; } } socket_set_nonblock(fd); if (s->do_nodelay) socket_set_nodelay(fd); s->fd = fd; qemu_set_fd_handler(s->listen_fd, NULL, NULL, NULL); tcp_chr_connect(chr); }"} {"target": 0, "idx": 26149, "func": "static void test_visitor_out_native_list_int8(TestOutputVisitorData *data, const void *unused) { test_native_list(data, unused, USER_DEF_NATIVE_LIST_UNION_KIND_S8); }"} {"target": 0, "idx": 26156, "func": "static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def, const TCGArg *args, uint16_t dead_args, uint8_t sync_args) { TCGRegSet allocated_regs; TCGTemp *ts, *ots; TCGType otype, itype; tcg_regset_set(allocated_regs, s->reserved_regs); ots = &s->temps[args[0]]; ts = &s->temps[args[1]]; /* Note that otype != itype for no-op truncation. */ otype = ots->type; itype = ts->type; /* If the source value is not in a register, and we're going to be forced to have it in a register in order to perform the copy, then copy the SOURCE value into its own register first. That way we don't have to reload SOURCE the next time it is used. */ if (((NEED_SYNC_ARG(0) || ots->fixed_reg) && ts->val_type != TEMP_VAL_REG) || ts->val_type == TEMP_VAL_MEM) { temp_load(s, ts, tcg_target_available_regs[itype], allocated_regs); } if (IS_DEAD_ARG(0) && !ots->fixed_reg) { /* mov to a non-saved dead register makes no sense (even with liveness analysis disabled). */ assert(NEED_SYNC_ARG(0)); /* The code above should have moved the temp to a register. */ assert(ts->val_type == TEMP_VAL_REG); if (!ots->mem_allocated) { temp_allocate_frame(s, args[0]); } if (ots->indirect_reg) { tcg_regset_set_reg(allocated_regs, ts->reg); temp_load(s, ots->mem_base, tcg_target_available_regs[TCG_TYPE_PTR], allocated_regs); } tcg_out_st(s, otype, ts->reg, ots->mem_base->reg, ots->mem_offset); if (IS_DEAD_ARG(1)) { temp_dead(s, ts); } temp_dead(s, ots); } else if (ts->val_type == TEMP_VAL_CONST) { /* propagate constant */ if (ots->val_type == TEMP_VAL_REG) { s->reg_to_temp[ots->reg] = NULL; } ots->val_type = TEMP_VAL_CONST; ots->val = ts->val; if (IS_DEAD_ARG(1)) { temp_dead(s, ts); } } else { /* The code in the first if block should have moved the temp to a register. */ assert(ts->val_type == TEMP_VAL_REG); if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) { /* the mov can be suppressed */ if (ots->val_type == TEMP_VAL_REG) { s->reg_to_temp[ots->reg] = NULL; } ots->reg = ts->reg; temp_dead(s, ts); } else { if (ots->val_type != TEMP_VAL_REG) { /* When allocating a new register, make sure to not spill the input one. */ tcg_regset_set_reg(allocated_regs, ts->reg); ots->reg = tcg_reg_alloc(s, tcg_target_available_regs[otype], allocated_regs, ots->indirect_base); } tcg_out_mov(s, otype, ots->reg, ts->reg); } ots->val_type = TEMP_VAL_REG; ots->mem_coherent = 0; s->reg_to_temp[ots->reg] = ots; if (NEED_SYNC_ARG(0)) { tcg_reg_sync(s, ots->reg, allocated_regs); } } }"} {"target": 1, "idx": 26174, "func": "static void start_children(FFStream *feed) { if (no_launch) return; for (; feed; feed = feed->next) { if (feed->child_argv && !feed->pid) { feed->pid_start = time(0); feed->pid = fork(); if (feed->pid < 0) { fprintf(stderr, \"Unable to create children\\n\"); exit(1); } if (!feed->pid) { /* In child */ char pathname[1024]; char *slash; int i; for (i = 3; i < 256; i++) { close(i); } if (!ffserver_debug) { i = open(\"/dev/null\", O_RDWR); if (i) dup2(i, 0); dup2(i, 1); dup2(i, 2); if (i) close(i); } pstrcpy(pathname, sizeof(pathname), my_program_name); slash = strrchr(pathname, '/'); if (!slash) { slash = pathname; } else { slash++; } strcpy(slash, \"ffmpeg\"); /* This is needed to make relative pathnames work */ chdir(my_program_dir); execvp(pathname, feed->child_argv); _exit(1); } } } }"} {"target": 0, "idx": 26180, "func": "int ff_h264_check_intra_pred_mode(H264Context *h, int mode){ MpegEncContext * const s = &h->s; static const int8_t top [7]= {LEFT_DC_PRED8x8, 1,-1,-1}; static const int8_t left[7]= { TOP_DC_PRED8x8,-1, 2,-1,DC_128_PRED8x8}; if(mode > 6U) { av_log(h->s.avctx, AV_LOG_ERROR, \"out of range intra chroma pred mode at %d %d\\n\", s->mb_x, s->mb_y); return -1; } if(!(h->top_samples_available&0x8000)){ mode= top[ mode ]; if(mode<0){ av_log(h->s.avctx, AV_LOG_ERROR, \"top block unavailable for requested intra mode at %d %d\\n\", s->mb_x, s->mb_y); return -1; } } if((h->left_samples_available&0x8080) != 0x8080){ mode= left[ mode ]; if(h->left_samples_available&0x8080){ //mad cow disease mode, aka MBAFF + constrained_intra_pred mode= ALZHEIMER_DC_L0T_PRED8x8 + (!(h->left_samples_available&0x8000)) + 2*(mode == DC_128_PRED8x8); } if(mode<0){ av_log(h->s.avctx, AV_LOG_ERROR, \"left block unavailable for requested intra mode at %d %d\\n\", s->mb_x, s->mb_y); return -1; } } return mode; }"} {"target": 0, "idx": 26199, "func": "void replay_finish_event(void) { replay_has_unread_data = 0; replay_fetch_data_kind(); }"} {"target": 0, "idx": 26202, "func": "static int64_t coroutine_fn qcow2_co_get_block_status(BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum, BlockDriverState **file) { BDRVQcow2State *s = bs->opaque; uint64_t cluster_offset; int index_in_cluster, ret; unsigned int bytes; int64_t status = 0; bytes = MIN(INT_MAX, nb_sectors * BDRV_SECTOR_SIZE); qemu_co_mutex_lock(&s->lock); ret = qcow2_get_cluster_offset(bs, sector_num << 9, &bytes, &cluster_offset); qemu_co_mutex_unlock(&s->lock); if (ret < 0) { return ret; } *pnum = bytes >> BDRV_SECTOR_BITS; if (cluster_offset != 0 && ret != QCOW2_CLUSTER_COMPRESSED && !s->cipher) { index_in_cluster = sector_num & (s->cluster_sectors - 1); cluster_offset |= (index_in_cluster << BDRV_SECTOR_BITS); *file = bs->file->bs; status |= BDRV_BLOCK_OFFSET_VALID | cluster_offset; } if (ret == QCOW2_CLUSTER_ZERO_PLAIN || ret == QCOW2_CLUSTER_ZERO_ALLOC) { status |= BDRV_BLOCK_ZERO; } else if (ret != QCOW2_CLUSTER_UNALLOCATED) { status |= BDRV_BLOCK_DATA; } return status; }"} {"target": 0, "idx": 26203, "func": "static void vfio_iommu_map_notify(Notifier *n, void *data) { VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); VFIOContainer *container = giommu->container; IOMMUTLBEntry *iotlb = data; hwaddr iova = iotlb->iova + giommu->iommu_offset; MemoryRegion *mr; hwaddr xlat; hwaddr len = iotlb->addr_mask + 1; void *vaddr; int ret; trace_vfio_iommu_map_notify(iova, iova + iotlb->addr_mask); if (iotlb->target_as != &address_space_memory) { error_report(\"Wrong target AS \\\"%s\\\", only system memory is allowed\", iotlb->target_as->name ? iotlb->target_as->name : \"none\"); return; } /* * The IOMMU TLB entry we have just covers translation through * this IOMMU to its immediate target. We need to translate * it the rest of the way through to memory. */ rcu_read_lock(); mr = address_space_translate(&address_space_memory, iotlb->translated_addr, &xlat, &len, iotlb->perm & IOMMU_WO); if (!memory_region_is_ram(mr)) { error_report(\"iommu map to non memory area %\"HWADDR_PRIx\"\", xlat); goto out; } /* * Translation truncates length to the IOMMU page size, * check that it did not truncate too much. */ if (len & iotlb->addr_mask) { error_report(\"iommu has granularity incompatible with target AS\"); goto out; } if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { vaddr = memory_region_get_ram_ptr(mr) + xlat; ret = vfio_dma_map(container, iova, iotlb->addr_mask + 1, vaddr, !(iotlb->perm & IOMMU_WO) || mr->readonly); if (ret) { error_report(\"vfio_dma_map(%p, 0x%\"HWADDR_PRIx\", \" \"0x%\"HWADDR_PRIx\", %p) = %d (%m)\", container, iova, iotlb->addr_mask + 1, vaddr, ret); } } else { ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1); if (ret) { error_report(\"vfio_dma_unmap(%p, 0x%\"HWADDR_PRIx\", \" \"0x%\"HWADDR_PRIx\") = %d (%m)\", container, iova, iotlb->addr_mask + 1, ret); } } out: rcu_read_unlock(); }"} {"target": 0, "idx": 26223, "func": "static int local_truncate(FsContext *ctx, V9fsPath *fs_path, off_t size) { char buffer[PATH_MAX]; char *path = fs_path->data; return truncate(rpath(ctx, path, buffer), size); }"} {"target": 0, "idx": 26227, "func": "static bool is_zero_sectors(BlockDriverState *bs, int64_t start, uint32_t count) { int nr; BlockDriverState *file; int64_t res; if (start + count > bs->total_sectors) { count = bs->total_sectors - start; } if (!count) { return true; } res = bdrv_get_block_status_above(bs, NULL, start, count, &nr, &file); return res >= 0 && (res & BDRV_BLOCK_ZERO) && nr == count; }"} {"target": 0, "idx": 26230, "func": "static int mc_subpel(DiracContext *s, DiracBlock *block, const uint8_t *src[5], int x, int y, int ref, int plane) { Plane *p = &s->plane[plane]; uint8_t **ref_hpel = s->ref_pics[ref]->hpel[plane]; int motion_x = block->u.mv[ref][0]; int motion_y = block->u.mv[ref][1]; int mx, my, i, epel, nplanes = 0; if (plane) { motion_x >>= s->chroma_x_shift; motion_y >>= s->chroma_y_shift; } mx = motion_x & ~(-1 << s->mv_precision); my = motion_y & ~(-1 << s->mv_precision); motion_x >>= s->mv_precision; motion_y >>= s->mv_precision; /* normalize subpel coordinates to epel */ /* TODO: template this function? */ mx <<= 3 - s->mv_precision; my <<= 3 - s->mv_precision; x += motion_x; y += motion_y; epel = (mx|my)&1; /* hpel position */ if (!((mx|my)&3)) { nplanes = 1; src[0] = ref_hpel[(my>>1)+(mx>>2)] + y*p->stride + x; } else { /* qpel or epel */ nplanes = 4; for (i = 0; i < 4; i++) src[i] = ref_hpel[i] + y*p->stride + x; /* if we're interpolating in the right/bottom halves, adjust the planes as needed we increment x/y because the edge changes for half of the pixels */ if (mx > 4) { src[0] += 1; src[2] += 1; x++; } if (my > 4) { src[0] += p->stride; src[1] += p->stride; y++; } /* hpel planes are: [0]: F [1]: H [2]: V [3]: C */ if (!epel) { /* check if we really only need 2 planes since either mx or my is a hpel position. (epel weights of 0 handle this there) */ if (!(mx&3)) { /* mx == 0: average [0] and [2] mx == 4: average [1] and [3] */ src[!mx] = src[2 + !!mx]; nplanes = 2; } else if (!(my&3)) { src[0] = src[(my>>1) ]; src[1] = src[(my>>1)+1]; nplanes = 2; } } else { /* adjust the ordering if needed so the weights work */ if (mx > 4) { FFSWAP(const uint8_t *, src[0], src[1]); FFSWAP(const uint8_t *, src[2], src[3]); } if (my > 4) { FFSWAP(const uint8_t *, src[0], src[2]); FFSWAP(const uint8_t *, src[1], src[3]); } src[4] = epel_weights[my&3][mx&3]; } } /* fixme: v/h _edge_pos */ if ((unsigned)x > p->width +EDGE_WIDTH/2 - p->xblen || (unsigned)y > p->height+EDGE_WIDTH/2 - p->yblen) { for (i = 0; i < nplanes; i++) { ff_emulated_edge_mc(s->edge_emu_buffer[i], src[i], p->stride, p->xblen, p->yblen, x, y, p->width+EDGE_WIDTH/2, p->height+EDGE_WIDTH/2); src[i] = s->edge_emu_buffer[i]; } } return (nplanes>>1) + epel; }"} {"target": 1, "idx": 26237, "func": "static void iscsi_nop_timed_event(void *opaque) { IscsiLun *iscsilun = opaque; if (iscsi_get_nops_in_flight(iscsilun->iscsi) > MAX_NOP_FAILURES) { error_report(\"iSCSI: NOP timeout. Reconnecting...\"); iscsi_reconnect(iscsilun->iscsi); } if (iscsi_nop_out_async(iscsilun->iscsi, NULL, NULL, 0, NULL) != 0) { error_report(\"iSCSI: failed to sent NOP-Out. Disabling NOP messages.\"); return; } timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL); iscsi_set_events(iscsilun); }"} {"target": 0, "idx": 26275, "func": "void qemu_mutex_lock_iothread(void) { if (kvm_enabled()) { qemu_mutex_lock(&qemu_fair_mutex); qemu_mutex_lock(&qemu_global_mutex); qemu_mutex_unlock(&qemu_fair_mutex); } else qemu_signal_lock(100); }"} {"target": 1, "idx": 26296, "func": "static bool is_special_wait_psw(CPUState *cs) { /* signal quiesce */ return cs->kvm_run->psw_addr == 0xfffUL; }"} {"target": 1, "idx": 26300, "func": "static void implicit_weight_table(const H264Context *h, H264SliceContext *sl, int field) { int ref0, ref1, i, cur_poc, ref_start, ref_count0, ref_count1; for (i = 0; i < 2; i++) { sl->luma_weight_flag[i] = 0; sl->chroma_weight_flag[i] = 0; } if (field < 0) { if (h->picture_structure == PICT_FRAME) { cur_poc = h->cur_pic_ptr->poc; } else { cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure - 1]; } if (sl->ref_count[0] == 1 && sl->ref_count[1] == 1 && !FRAME_MBAFF(h) && sl->ref_list[0][0].poc + sl->ref_list[1][0].poc == 2 * cur_poc) { sl->use_weight = 0; sl->use_weight_chroma = 0; return; } ref_start = 0; ref_count0 = sl->ref_count[0]; ref_count1 = sl->ref_count[1]; } else { cur_poc = h->cur_pic_ptr->field_poc[field]; ref_start = 16; ref_count0 = 16 + 2 * sl->ref_count[0]; ref_count1 = 16 + 2 * sl->ref_count[1]; } sl->use_weight = 2; sl->use_weight_chroma = 2; sl->luma_log2_weight_denom = 5; sl->chroma_log2_weight_denom = 5; for (ref0 = ref_start; ref0 < ref_count0; ref0++) { int poc0 = sl->ref_list[0][ref0].poc; for (ref1 = ref_start; ref1 < ref_count1; ref1++) { int w = 32; if (!sl->ref_list[0][ref0].parent->long_ref && !sl->ref_list[1][ref1].parent->long_ref) { int poc1 = sl->ref_list[1][ref1].poc; int td = av_clip_int8(poc1 - poc0); if (td) { int tb = av_clip_int8(cur_poc - poc0); int tx = (16384 + (FFABS(td) >> 1)) / td; int dist_scale_factor = (tb * tx + 32) >> 8; if (dist_scale_factor >= -64 && dist_scale_factor <= 128) w = 64 - dist_scale_factor; } } if (field < 0) { sl->implicit_weight[ref0][ref1][0] = sl->implicit_weight[ref0][ref1][1] = w; } else { sl->implicit_weight[ref0][ref1][field] = w; } } } }"} {"target": 1, "idx": 26321, "func": "int inet_connect_opts(QemuOpts *opts) { struct addrinfo ai,*res,*e; const char *addr; const char *port; char uaddr[INET6_ADDRSTRLEN+1]; char uport[33]; int sock,rc; memset(&ai,0, sizeof(ai)); ai.ai_flags = AI_CANONNAME | AI_ADDRCONFIG; ai.ai_family = PF_UNSPEC; ai.ai_socktype = SOCK_STREAM; addr = qemu_opt_get(opts, \"host\"); port = qemu_opt_get(opts, \"port\"); if (addr == NULL || port == NULL) { fprintf(stderr, \"inet_connect: host and/or port not specified\\n\"); return -1; } if (qemu_opt_get_bool(opts, \"ipv4\", 0)) ai.ai_family = PF_INET; if (qemu_opt_get_bool(opts, \"ipv6\", 0)) ai.ai_family = PF_INET6; /* lookup */ if (0 != (rc = getaddrinfo(addr, port, &ai, &res))) { fprintf(stderr,\"getaddrinfo(%s,%s): %s\\n\", addr, port, gai_strerror(rc)); return -1; } if (sockets_debug) inet_print_addrinfo(__FUNCTION__, res); for (e = res; e != NULL; e = e->ai_next) { if (getnameinfo((struct sockaddr*)e->ai_addr,e->ai_addrlen, uaddr,INET6_ADDRSTRLEN,uport,32, NI_NUMERICHOST | NI_NUMERICSERV) != 0) { fprintf(stderr,\"%s: getnameinfo: oops\\n\", __FUNCTION__); continue; } sock = socket(e->ai_family, e->ai_socktype, e->ai_protocol); if (sock < 0) { fprintf(stderr,\"%s: socket(%s): %s\\n\", __FUNCTION__, inet_strfamily(e->ai_family), strerror(errno)); continue; } setsockopt(sock,SOL_SOCKET,SO_REUSEADDR,(void*)&on,sizeof(on)); /* connect to peer */ if (connect(sock,e->ai_addr,e->ai_addrlen) < 0) { if (sockets_debug || NULL == e->ai_next) fprintf(stderr, \"%s: connect(%s,%s,%s,%s): %s\\n\", __FUNCTION__, inet_strfamily(e->ai_family), e->ai_canonname, uaddr, uport, strerror(errno)); closesocket(sock); continue; } if (sockets_debug) fprintf(stderr, \"%s: connect(%s,%s,%s,%s): OK\\n\", __FUNCTION__, inet_strfamily(e->ai_family), e->ai_canonname, uaddr, uport); freeaddrinfo(res); return sock; } freeaddrinfo(res); return -1; }"} {"target": 1, "idx": 26331, "func": "static void vhost_commit(MemoryListener *listener) { }"} {"target": 1, "idx": 26346, "func": "static struct dirent *local_readdir(FsContext *ctx, V9fsFidOpenState *fs) { struct dirent *entry; again: entry = readdir(fs->dir.stream); if (!entry) { return NULL; } if (ctx->export_flags & V9FS_SM_MAPPED) { entry->d_type = DT_UNKNOWN; } else if (ctx->export_flags & V9FS_SM_MAPPED_FILE) { if (!strcmp(entry->d_name, VIRTFS_META_DIR)) { /* skp the meta data directory */ goto again; } entry->d_type = DT_UNKNOWN; } return entry; }"} {"target": 1, "idx": 26368, "func": "void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb) { int i = 0, start = 0, num_reqs = 0, niov = 0, nb_sectors = 0; int max_xfer_len = 0; int64_t sector_num = 0; if (mrb->num_reqs == 1) { submit_requests(blk, mrb, 0, 1, -1); mrb->num_reqs = 0; return; } max_xfer_len = blk_get_max_transfer_length(mrb->reqs[0]->dev->blk); max_xfer_len = MIN_NON_ZERO(max_xfer_len, BDRV_REQUEST_MAX_SECTORS); qsort(mrb->reqs, mrb->num_reqs, sizeof(*mrb->reqs), &multireq_compare); for (i = 0; i < mrb->num_reqs; i++) { VirtIOBlockReq *req = mrb->reqs[i]; if (num_reqs > 0) { bool merge = true; /* merge would exceed maximum number of IOVs */ if (niov + req->qiov.niov > IOV_MAX) { merge = false; } /* merge would exceed maximum transfer length of backend device */ if (req->qiov.size / BDRV_SECTOR_SIZE + nb_sectors > max_xfer_len) { merge = false; } /* requests are not sequential */ if (sector_num + nb_sectors != req->sector_num) { merge = false; } if (!merge) { submit_requests(blk, mrb, start, num_reqs, niov); num_reqs = 0; } } if (num_reqs == 0) { sector_num = req->sector_num; nb_sectors = niov = 0; start = i; } nb_sectors += req->qiov.size / BDRV_SECTOR_SIZE; niov += req->qiov.niov; num_reqs++; } submit_requests(blk, mrb, start, num_reqs, niov); mrb->num_reqs = 0; }"} {"target": 1, "idx": 26372, "func": "static int xen_pt_bar_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, uint32_t *value, uint32_t valid_mask) { XenPTRegInfo *reg = cfg_entry->reg; uint32_t valid_emu_mask = 0; uint32_t bar_emu_mask = 0; int index; /* get BAR index */ index = xen_pt_bar_offset_to_index(reg->offset); if (index < 0 || index >= PCI_NUM_REGIONS) { XEN_PT_ERR(&s->dev, \"Internal error: Invalid BAR index [%d].\\n\", index); return -1; } /* use fixed-up value from kernel sysfs */ *value = base_address_with_flags(&s->real_device.io_regions[index]); /* set emulate mask depend on BAR flag */ switch (s->bases[index].bar_flag) { case XEN_PT_BAR_FLAG_MEM: bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK; break; case XEN_PT_BAR_FLAG_IO: bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK; break; case XEN_PT_BAR_FLAG_UPPER: bar_emu_mask = XEN_PT_BAR_ALLF; break; default: break; } /* emulate BAR */ valid_emu_mask = bar_emu_mask & valid_mask; *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); return 0; }"} {"target": 1, "idx": 26379, "func": "static int encode_apng(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet) { PNGEncContext *s = avctx->priv_data; int ret; int enc_row_size; size_t max_packet_size; APNGFctlChunk fctl_chunk = {0}; if (pict && avctx->codec_id == AV_CODEC_ID_APNG && s->color_type == PNG_COLOR_TYPE_PALETTE) { uint32_t checksum = ~av_crc(av_crc_get_table(AV_CRC_32_IEEE_LE), ~0U, pict->data[1], 256 * sizeof(uint32_t)); if (avctx->frame_number == 0) { s->palette_checksum = checksum; } else if (checksum != s->palette_checksum) { av_log(avctx, AV_LOG_ERROR, \"Input contains more than one unique palette. APNG does not support multiple palettes.\\n\"); return -1; } } enc_row_size = deflateBound(&s->zstream, (avctx->width * s->bits_per_pixel + 7) >> 3); max_packet_size = AV_INPUT_BUFFER_MIN_SIZE + // headers avctx->height * ( enc_row_size + (4 + 12) * (((int64_t)enc_row_size + IOBUF_SIZE - 1) / IOBUF_SIZE) // fdAT * ceil(enc_row_size / IOBUF_SIZE) ); if (max_packet_size > INT_MAX) return AVERROR(ENOMEM); if (avctx->frame_number == 0) { s->bytestream = avctx->extradata = av_malloc(FF_MIN_BUFFER_SIZE); if (!avctx->extradata) return AVERROR(ENOMEM); ret = encode_headers(avctx, pict); if (ret < 0) return ret; avctx->extradata_size = s->bytestream - avctx->extradata; s->last_frame_packet = av_malloc(max_packet_size); if (!s->last_frame_packet) return AVERROR(ENOMEM); } else if (s->last_frame) { ret = ff_alloc_packet2(avctx, pkt, max_packet_size, 0); if (ret < 0) return ret; memcpy(pkt->data, s->last_frame_packet, s->last_frame_packet_size); pkt->size = s->last_frame_packet_size; pkt->pts = pkt->dts = s->last_frame->pts; } if (pict) { s->bytestream_start = s->bytestream = s->last_frame_packet; s->bytestream_end = s->bytestream + max_packet_size; // We're encoding the frame first, so we have to do a bit of shuffling around // to have the image data write to the correct place in the buffer fctl_chunk.sequence_number = s->sequence_number; ++s->sequence_number; s->bytestream += 26 + 12; ret = apng_encode_frame(avctx, pict, &fctl_chunk, &s->last_frame_fctl); if (ret < 0) return ret; fctl_chunk.delay_num = 0; // delay filled in during muxing fctl_chunk.delay_den = 0; } else { s->last_frame_fctl.dispose_op = APNG_DISPOSE_OP_NONE; } if (s->last_frame) { uint8_t* last_fctl_chunk_start = pkt->data; uint8_t buf[26]; AV_WB32(buf + 0, s->last_frame_fctl.sequence_number); AV_WB32(buf + 4, s->last_frame_fctl.width); AV_WB32(buf + 8, s->last_frame_fctl.height); AV_WB32(buf + 12, s->last_frame_fctl.x_offset); AV_WB32(buf + 16, s->last_frame_fctl.y_offset); AV_WB16(buf + 20, s->last_frame_fctl.delay_num); AV_WB16(buf + 22, s->last_frame_fctl.delay_den); buf[24] = s->last_frame_fctl.dispose_op; buf[25] = s->last_frame_fctl.blend_op; png_write_chunk(&last_fctl_chunk_start, MKTAG('f', 'c', 'T', 'L'), buf, 26); *got_packet = 1; } if (pict) { if (!s->last_frame) { s->last_frame = av_frame_alloc(); if (!s->last_frame) return AVERROR(ENOMEM); } else if (s->last_frame_fctl.dispose_op != APNG_DISPOSE_OP_PREVIOUS) { if (!s->prev_frame) { s->prev_frame = av_frame_alloc(); if (!s->prev_frame) return AVERROR(ENOMEM); s->prev_frame->format = pict->format; s->prev_frame->width = pict->width; s->prev_frame->height = pict->height; if ((ret = av_frame_get_buffer(s->prev_frame, 32)) < 0) return ret; } // Do disposal, but not blending memcpy(s->prev_frame->data[0], s->last_frame->data[0], s->last_frame->linesize[0] * s->last_frame->height); if (s->last_frame_fctl.dispose_op == APNG_DISPOSE_OP_BACKGROUND) { uint32_t y; uint8_t bpp = (s->bits_per_pixel + 7) >> 3; for (y = s->last_frame_fctl.y_offset; y < s->last_frame_fctl.y_offset + s->last_frame_fctl.height; ++y) { size_t row_start = s->last_frame->linesize[0] * y + bpp * s->last_frame_fctl.x_offset; memset(s->prev_frame->data[0] + row_start, 0, bpp * s->last_frame_fctl.width); } } } av_frame_unref(s->last_frame); ret = av_frame_ref(s->last_frame, (AVFrame*)pict); if (ret < 0) return ret; s->last_frame_fctl = fctl_chunk; s->last_frame_packet_size = s->bytestream - s->bytestream_start; } else { av_frame_free(&s->last_frame); } return 0; }"} {"target": 1, "idx": 26381, "func": "void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, uint8_t *segment, uint8_t *ref) { VP56RangeCoder *c = &s->c; if (s->segmentation.update_map) *segment = vp8_rac_get_tree(c, vp8_segmentid_tree, s->prob->segmentid); else *segment = ref ? *ref : *segment; s->segment = *segment; mb->skip = s->mbskip_enabled ? vp56_rac_get_prob(c, s->prob->mbskip) : 0; if (s->keyframe) { mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_intra, vp8_pred16x16_prob_intra); if (mb->mode == MODE_I4x4) { decode_intra4x4_modes(s, c, mb_x, 1); } else { const uint32_t modes = vp8_pred4x4_mode[mb->mode] * 0x01010101u; AV_WN32A(s->intra4x4_pred_mode_top + 4 * mb_x, modes); AV_WN32A(s->intra4x4_pred_mode_left, modes); } s->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree, vp8_pred8x8c_prob_intra); mb->ref_frame = VP56_FRAME_CURRENT; } else if (vp56_rac_get_prob_branchy(c, s->prob->intra)) { // inter MB, 16.2 if (vp56_rac_get_prob_branchy(c, s->prob->last)) mb->ref_frame = vp56_rac_get_prob(c, s->prob->golden) ? VP56_FRAME_GOLDEN2 /* altref */ : VP56_FRAME_GOLDEN; else mb->ref_frame = VP56_FRAME_PREVIOUS; s->ref_count[mb->ref_frame-1]++; // motion vectors, 16.3 decode_mvs(s, mb, mb_x, mb_y); } else { // intra MB, 16.1 mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_inter, s->prob->pred16x16); if (mb->mode == MODE_I4x4) decode_intra4x4_modes(s, c, mb_x, 0); s->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree, s->prob->pred8x8c); mb->ref_frame = VP56_FRAME_CURRENT; mb->partitioning = VP8_SPLITMVMODE_NONE; AV_ZERO32(&mb->bmv[0]); } }"} {"target": 1, "idx": 26389, "func": "static av_cold int join_init(AVFilterContext *ctx) { JoinContext *s = ctx->priv; int ret, i; if (!(s->channel_layout = av_get_channel_layout(s->channel_layout_str))) { av_log(ctx, AV_LOG_ERROR, \"Error parsing channel layout '%s'.\\n\", s->channel_layout_str); return AVERROR(EINVAL); } s->nb_channels = av_get_channel_layout_nb_channels(s->channel_layout); s->channels = av_mallocz_array(s->nb_channels, sizeof(*s->channels)); s->buffers = av_mallocz_array(s->nb_channels, sizeof(*s->buffers)); s->input_frames = av_mallocz_array(s->inputs, sizeof(*s->input_frames)); if (!s->channels || !s->buffers|| !s->input_frames) return AVERROR(ENOMEM); for (i = 0; i < s->nb_channels; i++) { s->channels[i].out_channel = av_channel_layout_extract_channel(s->channel_layout, i); s->channels[i].input = -1; } if ((ret = parse_maps(ctx)) < 0) return ret; for (i = 0; i < s->inputs; i++) { char name[32]; AVFilterPad pad = { 0 }; snprintf(name, sizeof(name), \"input%d\", i); pad.type = AVMEDIA_TYPE_AUDIO; pad.name = av_strdup(name); if (!pad.name) return AVERROR(ENOMEM); pad.filter_frame = filter_frame; pad.needs_fifo = 1; ff_insert_inpad(ctx, i, &pad); } return 0; }"} {"target": 0, "idx": 26394, "func": "static int virtio_net_can_receive(VLANClientState *vc) { VirtIONet *n = vc->opaque; return do_virtio_net_can_receive(n, VIRTIO_NET_MAX_BUFSIZE); }"} {"target": 1, "idx": 26414, "func": "static av_cold int v4l2_encode_init(AVCodecContext *avctx) { V4L2m2mContext *s = avctx->priv_data; V4L2Context *capture = &s->capture; V4L2Context *output = &s->output; int ret; /* common settings output/capture */ output->height = capture->height = avctx->height; output->width = capture->width = avctx->width; /* output context */ output->av_codec_id = AV_CODEC_ID_RAWVIDEO; output->av_pix_fmt = avctx->pix_fmt; /* capture context */ capture->av_codec_id = avctx->codec_id; capture->av_pix_fmt = AV_PIX_FMT_NONE; ret = ff_v4l2_m2m_codec_init(avctx); if (ret) { av_log(avctx, AV_LOG_ERROR, \"can't configure encoder\\n\"); return ret; } return v4l2_prepare_encoder(s); }"} {"target": 0, "idx": 26423, "func": "static int nvdec_vp9_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size) { NVDECContext *ctx = avctx->internal->hwaccel_priv_data; void *tmp; tmp = av_fast_realloc(ctx->slice_offsets, &ctx->slice_offsets_allocated, (ctx->nb_slices + 1) * sizeof(*ctx->slice_offsets)); if (!tmp) return AVERROR(ENOMEM); ctx->slice_offsets = tmp; if (!ctx->bitstream) ctx->bitstream = (uint8_t*)buffer; ctx->slice_offsets[ctx->nb_slices] = buffer - ctx->bitstream; ctx->bitstream_len += size; ctx->nb_slices++; return 0; }"} {"target": 1, "idx": 26426, "func": "bool visit_start_union(Visitor *v, bool data_present, Error **errp) { if (v->start_union) { return v->start_union(v, data_present, errp); } return true; }"} {"target": 0, "idx": 26430, "func": "static int mxf_read_header(AVFormatContext *s) { MXFContext *mxf = s->priv_data; KLVPacket klv; int64_t essence_offset = 0; int ret; mxf->last_forward_tell = INT64_MAX; mxf->edit_units_per_packet = 1; if (!mxf_read_sync(s->pb, mxf_header_partition_pack_key, 14)) { av_log(s, AV_LOG_ERROR, \"could not find header partition pack key\\n\"); return AVERROR_INVALIDDATA; } avio_seek(s->pb, -14, SEEK_CUR); mxf->fc = s; mxf->run_in = avio_tell(s->pb); mxf_read_random_index_pack(s); while (!url_feof(s->pb)) { const MXFMetadataReadTableEntry *metadata; if (klv_read_packet(&klv, s->pb) < 0) { /* EOF - seek to previous partition or stop */ if(mxf_parse_handle_partition_or_eof(mxf) <= 0) break; else continue; } PRINT_KEY(s, \"read header\", klv.key); av_dlog(s, \"size %\"PRIu64\" offset %#\"PRIx64\"\\n\", klv.length, klv.offset); if (IS_KLV_KEY(klv.key, mxf_encrypted_triplet_key) || IS_KLV_KEY(klv.key, mxf_essence_element_key) || IS_KLV_KEY(klv.key, mxf_avid_essence_element_key) || IS_KLV_KEY(klv.key, mxf_system_item_key)) { if (!mxf->current_partition) { av_log(mxf->fc, AV_LOG_ERROR, \"found essence prior to first PartitionPack\\n\"); return AVERROR_INVALIDDATA; } if (!mxf->current_partition->essence_offset) { /* for OP1a we compute essence_offset * for OPAtom we point essence_offset after the KL (usually op1a_essence_offset + 20 or 25) * TODO: for OP1a we could eliminate this entire if statement, always stopping parsing at op1a_essence_offset * for OPAtom we still need the actual essence_offset though (the KL's length can vary) */ int64_t op1a_essence_offset = round_to_kag(mxf->current_partition->this_partition + mxf->current_partition->pack_length, mxf->current_partition->kag_size) + round_to_kag(mxf->current_partition->header_byte_count, mxf->current_partition->kag_size) + round_to_kag(mxf->current_partition->index_byte_count, mxf->current_partition->kag_size); if (mxf->op == OPAtom) { /* point essence_offset to the actual data * OPAtom has all the essence in one big KLV */ mxf->current_partition->essence_offset = avio_tell(s->pb); mxf->current_partition->essence_length = klv.length; } else { /* NOTE: op1a_essence_offset may be less than to klv.offset (C0023S01.mxf) */ mxf->current_partition->essence_offset = op1a_essence_offset; } } if (!essence_offset) essence_offset = klv.offset; /* seek to footer, previous partition or stop */ if (mxf_parse_handle_essence(mxf) <= 0) break; continue; } else if (!memcmp(klv.key, mxf_header_partition_pack_key, 13) && klv.key[13] >= 2 && klv.key[13] <= 4 && mxf->current_partition) { /* next partition pack - keep going, seek to previous partition or stop */ if(mxf_parse_handle_partition_or_eof(mxf) <= 0) break; else if (mxf->parsing_backward) continue; /* we're still parsing forward. proceed to parsing this partition pack */ } for (metadata = mxf_metadata_read_table; metadata->read; metadata++) { if (IS_KLV_KEY(klv.key, metadata->key)) { int res; if (klv.key[5] == 0x53) { res = mxf_read_local_tags(mxf, &klv, metadata->read, metadata->ctx_size, metadata->type); } else { uint64_t next = avio_tell(s->pb) + klv.length; res = metadata->read(mxf, s->pb, 0, klv.length, klv.key, klv.offset); /* only seek forward, else this can loop for a long time */ if (avio_tell(s->pb) > next) { av_log(s, AV_LOG_ERROR, \"read past end of KLV @ %#\"PRIx64\"\\n\", klv.offset); return AVERROR_INVALIDDATA; } avio_seek(s->pb, next, SEEK_SET); } if (res < 0) { av_log(s, AV_LOG_ERROR, \"error reading header metadata\\n\"); return res; } break; } } if (!metadata->read) avio_skip(s->pb, klv.length); } /* FIXME avoid seek */ if (!essence_offset) { av_log(s, AV_LOG_ERROR, \"no essence\\n\"); return AVERROR_INVALIDDATA; } avio_seek(s->pb, essence_offset, SEEK_SET); mxf_compute_essence_containers(mxf); /* we need to do this before computing the index tables * to be able to fill in zero IndexDurations with st->duration */ if ((ret = mxf_parse_structural_metadata(mxf)) < 0) goto fail; if ((ret = mxf_compute_index_tables(mxf)) < 0) goto fail; if (mxf->nb_index_tables > 1) { /* TODO: look up which IndexSID to use via EssenceContainerData */ av_log(mxf->fc, AV_LOG_INFO, \"got %i index tables - only the first one (IndexSID %i) will be used\\n\", mxf->nb_index_tables, mxf->index_tables[0].index_sid); } else if (mxf->nb_index_tables == 0 && mxf->op == OPAtom) { av_log(mxf->fc, AV_LOG_ERROR, \"cannot demux OPAtom without an index\\n\"); ret = AVERROR_INVALIDDATA; goto fail; } mxf_handle_small_eubc(s); return 0; fail: mxf_read_close(s); return ret; }"} {"target": 1, "idx": 26445, "func": "static bool arm_cpu_has_work(CPUState *cs) { ARMCPU *cpu = ARM_CPU(cs); return !cpu->powered_off && cs->interrupt_request & (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_EXITTB); }"} {"target": 0, "idx": 26452, "func": "exynos4210_combiner_read(void *opaque, target_phys_addr_t offset, unsigned size) { struct Exynos4210CombinerState *s = (struct Exynos4210CombinerState *)opaque; uint32_t req_quad_base_n; /* Base of registers quad. Multiply it by 4 and get a start of corresponding group quad */ uint32_t grp_quad_base_n; /* Base of group quad */ uint32_t reg_n; /* Register number inside the quad */ uint32_t val; req_quad_base_n = offset >> 4; grp_quad_base_n = req_quad_base_n << 2; reg_n = (offset - (req_quad_base_n << 4)) >> 2; if (req_quad_base_n >= IIC_NGRP) { /* Read of ICIPSR register */ return s->icipsr[reg_n]; } val = 0; switch (reg_n) { /* IISTR */ case 2: val |= s->group[grp_quad_base_n].src_pending; val |= s->group[grp_quad_base_n + 1].src_pending << 8; val |= s->group[grp_quad_base_n + 2].src_pending << 16; val |= s->group[grp_quad_base_n + 3].src_pending << 24; break; /* IIMSR */ case 3: val |= s->group[grp_quad_base_n].src_mask & s->group[grp_quad_base_n].src_pending; val |= (s->group[grp_quad_base_n + 1].src_mask & s->group[grp_quad_base_n + 1].src_pending) << 8; val |= (s->group[grp_quad_base_n + 2].src_mask & s->group[grp_quad_base_n + 2].src_pending) << 16; val |= (s->group[grp_quad_base_n + 3].src_mask & s->group[grp_quad_base_n + 3].src_pending) << 24; break; default: if (offset >> 2 >= IIC_REGSET_SIZE) { hw_error(\"exynos4210.combiner: overflow of reg_set by 0x\" TARGET_FMT_plx \"offset\\n\", offset); } val = s->reg_set[offset >> 2]; return 0; } return val; }"} {"target": 0, "idx": 26459, "func": "static void int8x8_fmul_int32_c(float *dst, const int8_t *src, int scale) { float fscale = scale / 16.0; int i; for (i = 0; i < 8; i++) dst[i] = src[i] * fscale; }"} {"target": 0, "idx": 26463, "func": "static inline void gen_movcf_d (DisasContext *ctx, int fs, int fd, int cc, int tf) { int cond; TCGv_i32 t0 = tcg_temp_new_i32(); TCGv_i64 fp0; int l1 = gen_new_label(); if (tf) cond = TCG_COND_EQ; else cond = TCG_COND_NE; tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc)); tcg_gen_brcondi_i32(cond, t0, 0, l1); tcg_temp_free_i32(t0); fp0 = tcg_temp_new_i64(); gen_load_fpr64(ctx, fp0, fs); gen_store_fpr64(ctx, fp0, fd); tcg_temp_free_i64(fp0); gen_set_label(l1); }"} {"target": 0, "idx": 26464, "func": "int kvm_irqchip_remove_irqfd(KVMState *s, int fd, int virq) { return -ENOSYS; }"} {"target": 0, "idx": 26465, "func": "static void noop_conv (st_sample_t *dst, const void *src, int samples, volume_t *vol) { (void) src; (void) dst; (void) samples; (void) vol; }"} {"target": 0, "idx": 26479, "func": "static void vga_ioport_write(void *opaque, uint32_t addr, uint32_t val) { CirrusVGAState *s = opaque; int index; /* check port range access depending on color/monochrome mode */ if ((addr >= 0x3b0 && addr <= 0x3bf && (s->msr & MSR_COLOR_EMULATION)) || (addr >= 0x3d0 && addr <= 0x3df && !(s->msr & MSR_COLOR_EMULATION))) return; #ifdef DEBUG_VGA printf(\"VGA: write addr=0x%04x data=0x%02x\\n\", addr, val); #endif switch (addr) { case 0x3c0: if (s->ar_flip_flop == 0) { val &= 0x3f; s->ar_index = val; } else { index = s->ar_index & 0x1f; switch (index) { case 0x00 ... 0x0f: s->ar[index] = val & 0x3f; break; case 0x10: s->ar[index] = val & ~0x10; break; case 0x11: s->ar[index] = val; break; case 0x12: s->ar[index] = val & ~0xc0; break; case 0x13: s->ar[index] = val & ~0xf0; break; case 0x14: s->ar[index] = val & ~0xf0; break; default: break; } } s->ar_flip_flop ^= 1; break; case 0x3c2: s->msr = val & ~0x10; break; case 0x3c4: s->sr_index = val; break; case 0x3c5: if (cirrus_hook_write_sr(s, s->sr_index, val)) break; #ifdef DEBUG_VGA_REG printf(\"vga: write SR%x = 0x%02x\\n\", s->sr_index, val); #endif s->sr[s->sr_index] = val & sr_mask[s->sr_index]; break; case 0x3c6: cirrus_write_hidden_dac(s, val); break; case 0x3c7: s->dac_read_index = val; s->dac_sub_index = 0; s->dac_state = 3; break; case 0x3c8: s->dac_write_index = val; s->dac_sub_index = 0; s->dac_state = 0; break; case 0x3c9: if (cirrus_hook_write_palette(s, val)) break; s->dac_cache[s->dac_sub_index] = val; if (++s->dac_sub_index == 3) { memcpy(&s->palette[s->dac_write_index * 3], s->dac_cache, 3); s->dac_sub_index = 0; s->dac_write_index++; } break; case 0x3ce: s->gr_index = val; break; case 0x3cf: if (cirrus_hook_write_gr(s, s->gr_index, val)) break; #ifdef DEBUG_VGA_REG printf(\"vga: write GR%x = 0x%02x\\n\", s->gr_index, val); #endif s->gr[s->gr_index] = val & gr_mask[s->gr_index]; break; case 0x3b4: case 0x3d4: s->cr_index = val; break; case 0x3b5: case 0x3d5: if (cirrus_hook_write_cr(s, s->cr_index, val)) break; #ifdef DEBUG_VGA_REG printf(\"vga: write CR%x = 0x%02x\\n\", s->cr_index, val); #endif /* handle CR0-7 protection */ if ((s->cr[11] & 0x80) && s->cr_index <= 7) { /* can always write bit 4 of CR7 */ if (s->cr_index == 7) s->cr[7] = (s->cr[7] & ~0x10) | (val & 0x10); return; } switch (s->cr_index) { case 0x01: /* horizontal display end */ case 0x07: case 0x09: case 0x0c: case 0x0d: case 0x12: /* veritcal display end */ s->cr[s->cr_index] = val; break; default: s->cr[s->cr_index] = val; break; } break; case 0x3ba: case 0x3da: s->fcr = val & 0x10; break; } }"} {"target": 1, "idx": 26509, "func": "static int xen_pt_cmd_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, uint16_t *value, uint16_t valid_mask) { XenPTRegInfo *reg = cfg_entry->reg; uint16_t valid_emu_mask = 0; uint16_t emu_mask = reg->emu_mask; if (s->is_virtfn) { emu_mask |= PCI_COMMAND_MEMORY; } /* emulate word register */ valid_emu_mask = emu_mask & valid_mask; *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); return 0; }"} {"target": 1, "idx": 26516, "func": "static int virtio_net_has_buffers(VirtIONet *n, int bufsize) { if (virtio_queue_empty(n->rx_vq) || (n->mergeable_rx_bufs && !virtqueue_avail_bytes(n->rx_vq, bufsize, 0))) { virtio_queue_set_notification(n->rx_vq, 1); return 0; } virtio_queue_set_notification(n->rx_vq, 0); return 1; }"} {"target": 0, "idx": 26521, "func": "static int mov_read_chap(MOVContext *c, AVIOContext *pb, MOVAtom atom) { c->chapter_track = avio_rb32(pb); return 0; }"} {"target": 1, "idx": 26522, "func": "static void qbus_initfn(Object *obj) { BusState *bus = BUS(obj); QTAILQ_INIT(&bus->children); object_property_add_link(obj, QDEV_HOTPLUG_HANDLER_PROPERTY, TYPE_HOTPLUG_HANDLER, (Object **)&bus->hotplug_handler, NULL); object_property_add_bool(obj, \"realized\", bus_get_realized, bus_set_realized, NULL); }"} {"target": 0, "idx": 26525, "func": "static void final(Real144_internal *glob, short *i1, short *i2, void *out, int *statbuf, int len) { int x, sum; int buffer[10]; short *ptr; short *ptr2; memcpy(glob->work, statbuf,20); memcpy(glob->work + 10, i2, len * 2); buffer[9] = i1[0]; buffer[8] = i1[1]; buffer[7] = i1[2]; buffer[6] = i1[3]; buffer[5] = i1[4]; buffer[4] = i1[5]; buffer[3] = i1[6]; buffer[2] = i1[7]; buffer[1] = i1[8]; buffer[0] = i1[9]; ptr2 = (ptr = glob->work) + len; while (ptr < ptr2) { for(sum=0, x=0; x<=9; x++) sum += buffer[x] * (ptr[x]); sum = sum >> 12; x = ptr[10] - sum; if (x<-32768 || x>32767) { memset(out, 0, len * 2); memset(statbuf, 0, 20); return; } ptr[10] = x; ptr++; } memcpy(out, ptr+10 - len, len * 2); memcpy(statbuf, ptr, 20); }"} {"target": 1, "idx": 26527, "func": "static ssize_t mipsnet_receive(NetClientState *nc, const uint8_t *buf, size_t size) { MIPSnetState *s = qemu_get_nic_opaque(nc); trace_mipsnet_receive(size); if (!mipsnet_can_receive(nc)) s->busy = 1; /* Just accept everything. */ /* Write packet data. */ memcpy(s->rx_buffer, buf, size); s->rx_count = size; s->rx_read = 0; /* Now we can signal we have received something. */ s->intctl |= MIPSNET_INTCTL_RXDONE; mipsnet_update_irq(s); return size;"} {"target": 0, "idx": 26537, "func": "static AVBufferRef *vaapi_encode_alloc_output_buffer(void *opaque, int size) { AVCodecContext *avctx = opaque; VAAPIEncodeContext *ctx = avctx->priv_data; VABufferID buffer_id; VAStatus vas; AVBufferRef *ref; // The output buffer size is fixed, so it needs to be large enough // to hold the largest possible compressed frame. We assume here // that the uncompressed frame plus some header data is an upper // bound on that. vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context, VAEncCodedBufferType, 3 * ctx->aligned_width * ctx->aligned_height + (1 << 16), 1, 0, &buffer_id); if (vas != VA_STATUS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, \"Failed to create bitstream \" \"output buffer: %d (%s).\\n\", vas, vaErrorStr(vas)); return NULL; } av_log(avctx, AV_LOG_DEBUG, \"Allocated output buffer %#x\\n\", buffer_id); ref = av_buffer_create((uint8_t*)(uintptr_t)buffer_id, sizeof(buffer_id), &vaapi_encode_free_output_buffer, avctx, AV_BUFFER_FLAG_READONLY); if (!ref) { vaDestroyBuffer(ctx->hwctx->display, buffer_id); return NULL; } return ref; }"} {"target": 0, "idx": 26566, "func": "static struct omap_tipb_bridge_s *omap_tipb_bridge_init( MemoryRegion *memory, target_phys_addr_t base, qemu_irq abort_irq, omap_clk clk) { struct omap_tipb_bridge_s *s = (struct omap_tipb_bridge_s *) g_malloc0(sizeof(struct omap_tipb_bridge_s)); s->abort = abort_irq; omap_tipb_bridge_reset(s); memory_region_init_io(&s->iomem, &omap_tipb_bridge_ops, s, \"omap-tipb-bridge\", 0x100); memory_region_add_subregion(memory, base, &s->iomem); return s; }"} {"target": 0, "idx": 26569, "func": "void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg) { thread_pool_submit_aio(pool, func, arg, NULL, NULL); }"} {"target": 0, "idx": 26573, "func": "int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) { BlockDriver *drv = bs->drv; if (!drv) { return -ENOMEDIUM; } else if (drv->bdrv_save_vmstate) { return drv->bdrv_save_vmstate(bs, qiov, pos); } else if (bs->file) { return bdrv_writev_vmstate(bs->file, qiov, pos); } return -ENOTSUP; }"} {"target": 0, "idx": 26576, "func": "static int local_statfs(FsContext *s, V9fsPath *fs_path, struct statfs *stbuf) { char buffer[PATH_MAX]; char *path = fs_path->data; return statfs(rpath(s, path, buffer), stbuf); }"} {"target": 1, "idx": 26584, "func": "static void init_multbl2(uint8_t tbl[1024], const int c[4], const uint8_t *log8, const uint8_t *alog8, const uint8_t *sbox) { int i, j; for (i = 0; i < 1024; i++) { int x = sbox[i >> 2]; if (x) tbl[i] = alog8[log8[x] + log8[c[i & 3]]]; } #if !CONFIG_SMALL for (j = 256; j < 1024; j++) for (i = 0; i < 4; i++) tbl[4*j + i] = tbl[4*j + ((i - 1) & 3) - 1024]; #endif }"} {"target": 0, "idx": 26600, "func": "static int kvm_get_msr(CPUState *env, struct kvm_msr_entry *msrs, int n) { struct kvm_msrs *kmsrs = qemu_malloc(sizeof *kmsrs + n * sizeof *msrs); int r; kmsrs->nmsrs = n; memcpy(kmsrs->entries, msrs, n * sizeof *msrs); r = kvm_vcpu_ioctl(env, KVM_GET_MSRS, kmsrs); memcpy(msrs, kmsrs->entries, n * sizeof *msrs); free(kmsrs); return r; }"} {"target": 0, "idx": 26606, "func": "static void do_balloon(Monitor *mon, const QDict *qdict, QObject **ret_data) { int value = qdict_get_int(qdict, \"value\"); ram_addr_t target = value; qemu_balloon(target << 20); }"} {"target": 0, "idx": 26614, "func": "static void blur(uint8_t *dst, const int dst_linesize, const uint8_t *src, const int src_linesize, const int w, const int h, FilterParam *fp) { int x, y; FilterParam f = *fp; const int radius = f.dist_width/2; const uint8_t * const src2[NB_PLANES] = { src }; int src2_linesize[NB_PLANES] = { src_linesize }; uint8_t *dst2[NB_PLANES] = { f.pre_filter_buf }; int dst2_linesize[NB_PLANES] = { f.pre_filter_linesize }; sws_scale(f.pre_filter_context, src2, src2_linesize, 0, h, dst2, dst2_linesize); #define UPDATE_FACTOR do { \\ int factor; \\ factor = f.color_diff_coeff[COLOR_DIFF_COEFF_SIZE/2 + pre_val - \\ f.pre_filter_buf[ix + iy*f.pre_filter_linesize]] * f.dist_coeff[dx + dy*f.dist_linesize]; \\ sum += src[ix + iy*src_linesize] * factor; \\ div += factor; \\ } while (0) for (y = 0; y < h; y++) { for (x = 0; x < w; x++) { int sum = 0; int div = 0; int dy; const int pre_val = f.pre_filter_buf[x + y*f.pre_filter_linesize]; if (x >= radius && x < w - radius) { for (dy = 0; dy < radius*2 + 1; dy++) { int dx; int iy = y+dy - radius; if (iy < 0) iy = -iy; else if (iy >= h) iy = h+h-iy-1; for (dx = 0; dx < radius*2 + 1; dx++) { const int ix = x+dx - radius; UPDATE_FACTOR; } } } else { for (dy = 0; dy < radius*2+1; dy++) { int dx; int iy = y+dy - radius; if (iy < 0) iy = -iy; else if (iy >= h) iy = h+h-iy-1; for (dx = 0; dx < radius*2 + 1; dx++) { int ix = x+dx - radius; if (ix < 0) ix = -ix; else if (ix >= w) ix = w+w-ix-1; UPDATE_FACTOR; } } } dst[x + y*dst_linesize] = (sum + div/2) / div; } } }"} {"target": 1, "idx": 26616, "func": "FWCfgState *pc_memory_init(PCMachineState *pcms, MemoryRegion *system_memory, MemoryRegion *rom_memory, MemoryRegion **ram_memory, PcGuestInfo *guest_info) { int linux_boot, i; MemoryRegion *ram, *option_rom_mr; MemoryRegion *ram_below_4g, *ram_above_4g; FWCfgState *fw_cfg; MachineState *machine = MACHINE(pcms); assert(machine->ram_size == pcms->below_4g_mem_size + pcms->above_4g_mem_size); linux_boot = (machine->kernel_filename != NULL); /* Allocate RAM. We allocate it as a single memory region and use * aliases to address portions of it, mostly for backwards compatibility * with older qemus that used qemu_ram_alloc(). */ ram = g_malloc(sizeof(*ram)); memory_region_allocate_system_memory(ram, NULL, \"pc.ram\", machine->ram_size); *ram_memory = ram; ram_below_4g = g_malloc(sizeof(*ram_below_4g)); memory_region_init_alias(ram_below_4g, NULL, \"ram-below-4g\", ram, 0, pcms->below_4g_mem_size); memory_region_add_subregion(system_memory, 0, ram_below_4g); e820_add_entry(0, pcms->below_4g_mem_size, E820_RAM); if (pcms->above_4g_mem_size > 0) { ram_above_4g = g_malloc(sizeof(*ram_above_4g)); memory_region_init_alias(ram_above_4g, NULL, \"ram-above-4g\", ram, pcms->below_4g_mem_size, pcms->above_4g_mem_size); memory_region_add_subregion(system_memory, 0x100000000ULL, ram_above_4g); e820_add_entry(0x100000000ULL, pcms->above_4g_mem_size, E820_RAM); } if (!guest_info->has_reserved_memory && (machine->ram_slots || (machine->maxram_size > machine->ram_size))) { MachineClass *mc = MACHINE_GET_CLASS(machine); error_report(\"\\\"-memory 'slots|maxmem'\\\" is not supported by: %s\", mc->name); exit(EXIT_FAILURE); } /* initialize hotplug memory address space */ if (guest_info->has_reserved_memory && (machine->ram_size < machine->maxram_size)) { ram_addr_t hotplug_mem_size = machine->maxram_size - machine->ram_size; if (machine->ram_slots > ACPI_MAX_RAM_SLOTS) { error_report(\"unsupported amount of memory slots: %\"PRIu64, machine->ram_slots); exit(EXIT_FAILURE); } if (QEMU_ALIGN_UP(machine->maxram_size, TARGET_PAGE_SIZE) != machine->maxram_size) { error_report(\"maximum memory size must by aligned to multiple of \" \"%d bytes\", TARGET_PAGE_SIZE); exit(EXIT_FAILURE); } pcms->hotplug_memory.base = ROUND_UP(0x100000000ULL + pcms->above_4g_mem_size, 1ULL << 30); if (pcms->enforce_aligned_dimm) { /* size hotplug region assuming 1G page max alignment per slot */ hotplug_mem_size += (1ULL << 30) * machine->ram_slots; } if ((pcms->hotplug_memory.base + hotplug_mem_size) < hotplug_mem_size) { error_report(\"unsupported amount of maximum memory: \" RAM_ADDR_FMT, machine->maxram_size); exit(EXIT_FAILURE); } memory_region_init(&pcms->hotplug_memory.mr, OBJECT(pcms), \"hotplug-memory\", hotplug_mem_size); memory_region_add_subregion(system_memory, pcms->hotplug_memory.base, &pcms->hotplug_memory.mr); } /* Initialize PC system firmware */ pc_system_firmware_init(rom_memory, guest_info->isapc_ram_fw); option_rom_mr = g_malloc(sizeof(*option_rom_mr)); memory_region_init_ram(option_rom_mr, NULL, \"pc.rom\", PC_ROM_SIZE, &error_abort); vmstate_register_ram_global(option_rom_mr); memory_region_add_subregion_overlap(rom_memory, PC_ROM_MIN_VGA, option_rom_mr, 1); fw_cfg = bochs_bios_init(); rom_set_fw(fw_cfg); if (guest_info->has_reserved_memory && pcms->hotplug_memory.base) { uint64_t *val = g_malloc(sizeof(*val)); PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); uint64_t res_mem_end = pcms->hotplug_memory.base; if (!pcmc->broken_reserved_end) { res_mem_end += memory_region_size(&pcms->hotplug_memory.mr); } *val = cpu_to_le64(ROUND_UP(res_mem_end, 0x1ULL << 30)); fw_cfg_add_file(fw_cfg, \"etc/reserved-memory-end\", val, sizeof(*val)); } if (linux_boot) { load_linux(pcms, fw_cfg); } for (i = 0; i < nb_option_roms; i++) { rom_add_option(option_rom[i].name, option_rom[i].bootindex); } guest_info->fw_cfg = fw_cfg; return fw_cfg; }"} {"target": 1, "idx": 26619, "func": "static void migrate_fd_cancel(MigrationState *s) { int old_state ; QEMUFile *f = migrate_get_current()->to_dst_file; trace_migrate_fd_cancel(); if (s->rp_state.from_dst_file) { /* shutdown the rp socket, so causing the rp thread to shutdown */ qemu_file_shutdown(s->rp_state.from_dst_file); do { old_state = s->state; if (!migration_is_setup_or_active(old_state)) { break; migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING); } while (s->state != MIGRATION_STATUS_CANCELLING); /* * If we're unlucky the migration code might be stuck somewhere in a * send/write while the network has failed and is waiting to timeout; * if we've got shutdown(2) available then we can force it to quit. * The outgoing qemu file gets closed in migrate_fd_cleanup that is * called in a bh, so there is no race against this cancel. */ if (s->state == MIGRATION_STATUS_CANCELLING && f) { qemu_file_shutdown(f);"} {"target": 0, "idx": 26629, "func": "static int decode_init_thread_copy(AVCodecContext *avctx) { H264Context *h = avctx->priv_data; int ret; if (!avctx->internal->is_copy) return 0; memset(h->sps_buffers, 0, sizeof(h->sps_buffers)); memset(h->pps_buffers, 0, sizeof(h->pps_buffers)); ret = h264_init_context(avctx, h); if (ret < 0) return ret; h->context_initialized = 0; return 0; }"} {"target": 0, "idx": 26646, "func": "static IOMMUTLBEntry vtd_iommu_translate(MemoryRegion *iommu, hwaddr addr, bool is_write) { VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu); IntelIOMMUState *s = vtd_as->iommu_state; IOMMUTLBEntry ret = { .target_as = &address_space_memory, .iova = addr, .translated_addr = 0, .addr_mask = ~(hwaddr)0, .perm = IOMMU_NONE, }; if (!s->dmar_enabled) { /* DMAR disabled, passthrough, use 4k-page*/ ret.iova = addr & VTD_PAGE_MASK_4K; ret.translated_addr = addr & VTD_PAGE_MASK_4K; ret.addr_mask = ~VTD_PAGE_MASK_4K; ret.perm = IOMMU_RW; return ret; } vtd_do_iommu_translate(vtd_as, vtd_as->bus, vtd_as->devfn, addr, is_write, &ret); VTD_DPRINTF(MMU, \"bus %\"PRIu8 \" slot %\"PRIu8 \" func %\"PRIu8 \" devfn %\"PRIu8 \" iova 0x%\"PRIx64 \" hpa 0x%\"PRIx64, pci_bus_num(vtd_as->bus), VTD_PCI_SLOT(vtd_as->devfn), VTD_PCI_FUNC(vtd_as->devfn), vtd_as->devfn, addr, ret.translated_addr); return ret; }"} {"target": 0, "idx": 26647, "func": "static gboolean io_watch_poll_check(GSource *source) { IOWatchPoll *iwp = io_watch_poll_from_source(source); if (iwp->max_size == 0) { return FALSE; } return g_io_watch_funcs.check(source); }"} {"target": 0, "idx": 26664, "func": "static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter, AVFilterInOut *in) { AVFilterContext *last_filter; const AVFilter *abuffer_filt = avfilter_get_by_name(\"abuffer\"); InputStream *ist = ifilter->ist; InputFile *f = input_files[ist->file_index]; char args[255], name[255]; int ret, pad_idx = 0; snprintf(args, sizeof(args), \"time_base=%d/%d:sample_rate=%d:sample_fmt=%s\" \":channel_layout=0x%\"PRIx64, 1, ist->st->codec->sample_rate, ist->st->codec->sample_rate, av_get_sample_fmt_name(ist->st->codec->sample_fmt), ist->st->codec->channel_layout); snprintf(name, sizeof(name), \"graph %d input from stream %d:%d\", fg->index, ist->file_index, ist->st->index); if ((ret = avfilter_graph_create_filter(&ifilter->filter, abuffer_filt, name, args, NULL, fg->graph)) < 0) return ret; last_filter = ifilter->filter; if (audio_sync_method > 0) { AVFilterContext *async; int len = 0; av_log(NULL, AV_LOG_WARNING, \"-async has been deprecated. Used the \" \"asyncts audio filter instead.\\n\"); if (audio_sync_method > 1) len += snprintf(args + len, sizeof(args) - len, \"compensate=1:\" \"max_comp=%d:\", audio_sync_method); snprintf(args + len, sizeof(args) - len, \"min_delta=%f\", audio_drift_threshold); snprintf(name, sizeof(name), \"graph %d audio sync for input stream %d:%d\", fg->index, ist->file_index, ist->st->index); ret = avfilter_graph_create_filter(&async, avfilter_get_by_name(\"asyncts\"), name, args, NULL, fg->graph); if (ret < 0) return ret; ret = avfilter_link(last_filter, 0, async, 0); if (ret < 0) return ret; last_filter = async; } if (audio_volume != 256) { AVFilterContext *volume; av_log(NULL, AV_LOG_WARNING, \"-vol has been deprecated. Use the volume \" \"audio filter instead.\\n\"); snprintf(args, sizeof(args), \"volume=%f\", audio_volume / 256.0); snprintf(name, sizeof(name), \"graph %d volume for input stream %d:%d\", fg->index, ist->file_index, ist->st->index); ret = avfilter_graph_create_filter(&volume, avfilter_get_by_name(\"volume\"), name, args, NULL, fg->graph); if (ret < 0) return ret; ret = avfilter_link(last_filter, 0, volume, 0); if (ret < 0) return ret; last_filter = volume; } snprintf(name, sizeof(name), \"trim for input stream %d:%d\", ist->file_index, ist->st->index); ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ? AV_NOPTS_VALUE : 0, INT64_MAX, &last_filter, &pad_idx, name); if (ret < 0) return ret; if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0) return ret; return 0; }"} {"target": 0, "idx": 26665, "func": "static int mpegts_write_packet_internal(AVFormatContext *s, AVPacket *pkt) { AVStream *st = s->streams[pkt->stream_index]; int size = pkt->size; uint8_t *buf = pkt->data; uint8_t *data = NULL; MpegTSWrite *ts = s->priv_data; MpegTSWriteStream *ts_st = st->priv_data; const uint64_t delay = av_rescale(s->max_delay, 90000, AV_TIME_BASE) * 2; int64_t dts = AV_NOPTS_VALUE, pts = AV_NOPTS_VALUE; if (ts->reemit_pat_pmt) { av_log(s, AV_LOG_WARNING, \"resend_headers option is deprecated, use -mpegts_flags resend_headers\\n\"); ts->reemit_pat_pmt = 0; ts->flags |= MPEGTS_FLAG_REEMIT_PAT_PMT; } if (ts->flags & MPEGTS_FLAG_REEMIT_PAT_PMT) { ts->pat_packet_count = ts->pat_packet_period - 1; ts->sdt_packet_count = ts->sdt_packet_period - 1; ts->flags &= ~MPEGTS_FLAG_REEMIT_PAT_PMT; } if (pkt->pts != AV_NOPTS_VALUE) pts = pkt->pts + delay; if (pkt->dts != AV_NOPTS_VALUE) dts = pkt->dts + delay; if (ts_st->first_pts_check && pts == AV_NOPTS_VALUE) { av_log(s, AV_LOG_ERROR, \"first pts value must set\\n\"); return AVERROR(EINVAL); } ts_st->first_pts_check = 0; if (st->codec->codec_id == AV_CODEC_ID_H264) { const uint8_t *p = buf, *buf_end = p + size; uint32_t state = -1; if (pkt->size < 5 || AV_RB32(pkt->data) != 0x0000001) { av_log(s, AV_LOG_ERROR, \"H.264 bitstream malformed, \" \"no startcode found, use -bsf h264_mp4toannexb\\n\"); return AVERROR(EINVAL); } do { p = avpriv_find_start_code(p, buf_end, &state); av_dlog(s, \"nal %d\\n\", state & 0x1f); } while (p < buf_end && (state & 0x1f) != 9 && (state & 0x1f) != 5 && (state & 0x1f) != 1); if ((state & 0x1f) != 9) { // AUD NAL data = av_malloc(pkt->size + 6); if (!data) return AVERROR(ENOMEM); memcpy(data + 6, pkt->data, pkt->size); AV_WB32(data, 0x00000001); data[4] = 0x09; data[5] = 0xf0; // any slice type (0xe) + rbsp stop one bit buf = data; size = pkt->size + 6; } } else if (st->codec->codec_id == AV_CODEC_ID_AAC) { if (pkt->size < 2) { av_log(s, AV_LOG_ERROR, \"AAC packet too short\\n\"); return AVERROR(EINVAL); } if ((AV_RB16(pkt->data) & 0xfff0) != 0xfff0) { int ret; AVPacket pkt2; if (!ts_st->amux) { av_log(s, AV_LOG_ERROR, \"AAC bitstream not in ADTS format \" \"and extradata missing\\n\"); return AVERROR(EINVAL); } av_init_packet(&pkt2); pkt2.data = pkt->data; pkt2.size = pkt->size; ret = avio_open_dyn_buf(&ts_st->amux->pb); if (ret < 0) return AVERROR(ENOMEM); ret = av_write_frame(ts_st->amux, &pkt2); if (ret < 0) { avio_close_dyn_buf(ts_st->amux->pb, &data); ts_st->amux->pb = NULL; av_free(data); return ret; } size = avio_close_dyn_buf(ts_st->amux->pb, &data); ts_st->amux->pb = NULL; buf = data; } } if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO) { // for video and subtitle, write a single pes packet mpegts_write_pes(s, st, buf, size, pts, dts, pkt->flags & AV_PKT_FLAG_KEY); av_free(data); return 0; } if (ts_st->payload_size + size > ts->pes_payload_size) { if (ts_st->payload_size) { mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_size, ts_st->payload_pts, ts_st->payload_dts, ts_st->payload_flags & AV_PKT_FLAG_KEY); ts_st->payload_size = 0; } if (size > ts->pes_payload_size) { mpegts_write_pes(s, st, buf, size, pts, dts, pkt->flags & AV_PKT_FLAG_KEY); av_free(data); return 0; } } if (!ts_st->payload_size) { ts_st->payload_pts = pts; ts_st->payload_dts = dts; ts_st->payload_flags = pkt->flags; } memcpy(ts_st->payload + ts_st->payload_size, buf, size); ts_st->payload_size += size; av_free(data); return 0; }"} {"target": 0, "idx": 26676, "func": "void qmp_object_add(const char *type, const char *id, bool has_props, QObject *props, Error **errp) { const QDict *pdict = NULL; QmpInputVisitor *qiv; Object *obj; if (props) { pdict = qobject_to_qdict(props); if (!pdict) { error_setg(errp, QERR_INVALID_PARAMETER_TYPE, \"props\", \"dict\"); return; } } qiv = qmp_input_visitor_new(props, false); obj = user_creatable_add_type(type, id, pdict, qmp_input_get_visitor(qiv), errp); qmp_input_visitor_cleanup(qiv); if (obj) { object_unref(obj); } }"} {"target": 1, "idx": 26715, "func": "static inline TranslationBlock *tb_find(CPUState *cpu, TranslationBlock *last_tb, int tb_exit) { CPUArchState *env = (CPUArchState *)cpu->env_ptr; TranslationBlock *tb; target_ulong cs_base, pc; uint32_t flags; bool acquired_tb_lock = false; /* we record a subset of the CPU state. It will always be the same before a given translated block is executed. */ cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); tb = atomic_rcu_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]); if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base || tb->flags != flags || tb->trace_vcpu_dstate != *cpu->trace_dstate)) { tb = tb_htable_lookup(cpu, pc, cs_base, flags); if (!tb) { /* mmap_lock is needed by tb_gen_code, and mmap_lock must be * taken outside tb_lock. As system emulation is currently * single threaded the locks are NOPs. */ mmap_lock(); tb_lock(); acquired_tb_lock = true; /* There's a chance that our desired tb has been translated while * taking the locks so we check again inside the lock. */ tb = tb_htable_lookup(cpu, pc, cs_base, flags); if (!tb) { /* if no translated code available, then translate it now */ tb = tb_gen_code(cpu, pc, cs_base, flags, 0); } mmap_unlock(); } /* We add the TB in the virtual pc hash table for the fast lookup */ atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); } #ifndef CONFIG_USER_ONLY /* We don't take care of direct jumps when address mapping changes in * system emulation. So it's not safe to make a direct jump to a TB * spanning two pages because the mapping for the second page can change. */ if (tb->page_addr[1] != -1) { last_tb = NULL; } #endif /* See if we can patch the calling TB. */ if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { if (!acquired_tb_lock) { tb_lock(); acquired_tb_lock = true; } if (!tb->invalid) { tb_add_jump(last_tb, tb_exit, tb); } } if (acquired_tb_lock) { tb_unlock(); } return tb; }"} {"target": 1, "idx": 26727, "func": "static inline void RENAME(hyscale_fast)(SwsContext *c, int16_t *dst, int dstWidth, const uint8_t *src, int srcW, int xInc) { #if ARCH_X86 #if COMPILE_TEMPLATE_MMX2 int32_t *filterPos = c->hLumFilterPos; int16_t *filter = c->hLumFilter; int canMMX2BeUsed = c->canMMX2BeUsed; void *mmx2FilterCode= c->lumMmx2FilterCode; int i; #if defined(PIC) DECLARE_ALIGNED(8, uint64_t, ebxsave); #endif if (canMMX2BeUsed) { __asm__ volatile( #if defined(PIC) \"mov %%\"REG_b\", %5 \\n\\t\" #endif \"pxor %%mm7, %%mm7 \\n\\t\" \"mov %0, %%\"REG_c\" \\n\\t\" \"mov %1, %%\"REG_D\" \\n\\t\" \"mov %2, %%\"REG_d\" \\n\\t\" \"mov %3, %%\"REG_b\" \\n\\t\" \"xor %%\"REG_a\", %%\"REG_a\" \\n\\t\" // i PREFETCH\" (%%\"REG_c\") \\n\\t\" PREFETCH\" 32(%%\"REG_c\") \\n\\t\" PREFETCH\" 64(%%\"REG_c\") \\n\\t\" #if ARCH_X86_64 #define CALL_MMX2_FILTER_CODE \\ \"movl (%%\"REG_b\"), %%esi \\n\\t\"\\ \"call *%4 \\n\\t\"\\ \"movl (%%\"REG_b\", %%\"REG_a\"), %%esi \\n\\t\"\\ \"add %%\"REG_S\", %%\"REG_c\" \\n\\t\"\\ \"add %%\"REG_a\", %%\"REG_D\" \\n\\t\"\\ \"xor %%\"REG_a\", %%\"REG_a\" \\n\\t\"\\ #else #define CALL_MMX2_FILTER_CODE \\ \"movl (%%\"REG_b\"), %%esi \\n\\t\"\\ \"call *%4 \\n\\t\"\\ \"addl (%%\"REG_b\", %%\"REG_a\"), %%\"REG_c\" \\n\\t\"\\ \"add %%\"REG_a\", %%\"REG_D\" \\n\\t\"\\ \"xor %%\"REG_a\", %%\"REG_a\" \\n\\t\"\\ #endif /* ARCH_X86_64 */ CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE #if defined(PIC) \"mov %5, %%\"REG_b\" \\n\\t\" #endif :: \"m\" (src), \"m\" (dst), \"m\" (filter), \"m\" (filterPos), \"m\" (mmx2FilterCode) #if defined(PIC) ,\"m\" (ebxsave) #endif : \"%\"REG_a, \"%\"REG_c, \"%\"REG_d, \"%\"REG_S, \"%\"REG_D #if !defined(PIC) ,\"%\"REG_b #endif ); for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128; } else { #endif /* COMPILE_TEMPLATE_MMX2 */ x86_reg dstWidth_reg = dstWidth; x86_reg xInc_shr16 = xInc >> 16; uint16_t xInc_mask = xInc & 0xffff; //NO MMX just normal asm ... __asm__ volatile( \"xor %%\"REG_a\", %%\"REG_a\" \\n\\t\" // i \"xor %%\"REG_d\", %%\"REG_d\" \\n\\t\" // xx \"xorl %%ecx, %%ecx \\n\\t\" // xalpha ASMALIGN(4) \"1: \\n\\t\" \"movzbl (%0, %%\"REG_d\"), %%edi \\n\\t\" //src[xx] \"movzbl 1(%0, %%\"REG_d\"), %%esi \\n\\t\" //src[xx+1] FAST_BILINEAR_X86 \"movw %%si, (%%\"REG_D\", %%\"REG_a\", 2) \\n\\t\" \"addw %4, %%cx \\n\\t\" //xalpha += xInc&0xFFFF \"adc %3, %%\"REG_d\" \\n\\t\" //xx+= xInc>>16 + carry \"movzbl (%0, %%\"REG_d\"), %%edi \\n\\t\" //src[xx] \"movzbl 1(%0, %%\"REG_d\"), %%esi \\n\\t\" //src[xx+1] FAST_BILINEAR_X86 \"movw %%si, 2(%%\"REG_D\", %%\"REG_a\", 2) \\n\\t\" \"addw %4, %%cx \\n\\t\" //xalpha += xInc&0xFFFF \"adc %3, %%\"REG_d\" \\n\\t\" //xx+= xInc>>16 + carry \"add $2, %%\"REG_a\" \\n\\t\" \"cmp %2, %%\"REG_a\" \\n\\t\" \" jb 1b \\n\\t\" :: \"r\" (src), \"m\" (dst), \"m\" (dstWidth_reg), \"m\" (xInc_shr16), \"m\" (xInc_mask) : \"%\"REG_a, \"%\"REG_d, \"%ecx\", \"%\"REG_D, \"%esi\" ); #if COMPILE_TEMPLATE_MMX2 } //if MMX2 can't be used #endif #else int i; unsigned int xpos=0; for (i=0;i>16; register unsigned int xalpha=(xpos&0xFFFF)>>9; dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha; xpos+=xInc; } #endif /* ARCH_X86 */ }"} {"target": 1, "idx": 26728, "func": "static int decrypt_init(AVFormatContext *s, ID3v2ExtraMeta *em, uint8_t *header) { OMAContext *oc = s->priv_data; ID3v2ExtraMetaGEOB *geob = NULL; uint8_t *gdata; oc->encrypted = 1; av_log(s, AV_LOG_INFO, \"File is encrypted\\n\"); /* find GEOB metadata */ while (em) { if (!strcmp(em->tag, \"GEOB\") && (geob = em->data) && (!strcmp(geob->description, \"OMG_LSI\") || !strcmp(geob->description, \"OMG_BKLSI\"))) { break; } em = em->next; } if (!em) { av_log(s, AV_LOG_ERROR, \"No encryption header found\\n\"); return AVERROR_INVALIDDATA; } if (geob->datasize < 64) { av_log(s, AV_LOG_ERROR, \"Invalid GEOB data size: %u\\n\", geob->datasize); return AVERROR_INVALIDDATA; } gdata = geob->data; if (AV_RB16(gdata) != 1) av_log(s, AV_LOG_WARNING, \"Unknown version in encryption header\\n\"); oc->k_size = AV_RB16(&gdata[2]); oc->e_size = AV_RB16(&gdata[4]); oc->i_size = AV_RB16(&gdata[6]); oc->s_size = AV_RB16(&gdata[8]); if (memcmp(&gdata[OMA_ENC_HEADER_SIZE], \"KEYRING \", 12)) { av_log(s, AV_LOG_ERROR, \"Invalid encryption header\\n\"); return AVERROR_INVALIDDATA; } oc->rid = AV_RB32(&gdata[OMA_ENC_HEADER_SIZE + 28]); av_log(s, AV_LOG_DEBUG, \"RID: %.8x\\n\", oc->rid); memcpy(oc->iv, &header[0x58], 8); hex_log(s, AV_LOG_DEBUG, \"IV\", oc->iv, 8); hex_log(s, AV_LOG_DEBUG, \"CBC-MAC\", &gdata[OMA_ENC_HEADER_SIZE + oc->k_size + oc->e_size + oc->i_size], 8); if (s->keylen > 0) { kset(s, s->key, s->key, s->keylen); } if (!memcmp(oc->r_val, (const uint8_t[8]){0}, 8) || rprobe(s, gdata, oc->r_val) < 0 && nprobe(s, gdata, geob->datasize, oc->n_val) < 0) { int i; for (i = 0; i < FF_ARRAY_ELEMS(leaf_table); i += 2) { uint8_t buf[16]; AV_WL64(buf, leaf_table[i]); AV_WL64(&buf[8], leaf_table[i + 1]); kset(s, buf, buf, 16); if (!rprobe(s, gdata, oc->r_val) || !nprobe(s, gdata, geob->datasize, oc->n_val)) break; } if (i >= sizeof(leaf_table)) { av_log(s, AV_LOG_ERROR, \"Invalid key\\n\"); return AVERROR_INVALIDDATA; } } /* e_val */ av_des_init(&oc->av_des, oc->m_val, 64, 0); av_des_crypt(&oc->av_des, oc->e_val, &gdata[OMA_ENC_HEADER_SIZE + 40], 1, NULL, 0); hex_log(s, AV_LOG_DEBUG, \"EK\", oc->e_val, 8); /* init e_val */ av_des_init(&oc->av_des, oc->e_val, 64, 1); return 0; }"} {"target": 1, "idx": 26739, "func": "void superh_cpu_do_interrupt(CPUState *cs) { SuperHCPU *cpu = SUPERH_CPU(cs); CPUSH4State *env = &cpu->env; int do_irq = cs->interrupt_request & CPU_INTERRUPT_HARD; int do_exp, irq_vector = cs->exception_index; /* prioritize exceptions over interrupts */ do_exp = cs->exception_index != -1; do_irq = do_irq && (cs->exception_index == -1); if (env->sr & (1u << SR_BL)) { if (do_exp && cs->exception_index != 0x1e0) { cs->exception_index = 0x000; /* masked exception -> reset */ } if (do_irq && !env->in_sleep) { return; /* masked */ } } env->in_sleep = 0; if (do_irq) { irq_vector = sh_intc_get_pending_vector(env->intc_handle, (env->sr >> 4) & 0xf); if (irq_vector == -1) { return; /* masked */ } } if (qemu_loglevel_mask(CPU_LOG_INT)) { const char *expname; switch (cs->exception_index) { case 0x0e0: expname = \"addr_error\"; break; case 0x040: expname = \"tlb_miss\"; break; case 0x0a0: expname = \"tlb_violation\"; break; case 0x180: expname = \"illegal_instruction\"; break; case 0x1a0: expname = \"slot_illegal_instruction\"; break; case 0x800: expname = \"fpu_disable\"; break; case 0x820: expname = \"slot_fpu\"; break; case 0x100: expname = \"data_write\"; break; case 0x060: expname = \"dtlb_miss_write\"; break; case 0x0c0: expname = \"dtlb_violation_write\"; break; case 0x120: expname = \"fpu_exception\"; break; case 0x080: expname = \"initial_page_write\"; break; case 0x160: expname = \"trapa\"; break; default: expname = do_irq ? \"interrupt\" : \"???\"; break; } qemu_log(\"exception 0x%03x [%s] raised\\n\", irq_vector, expname); log_cpu_state(cs, 0); } env->ssr = cpu_read_sr(env); env->spc = env->pc; env->sgr = env->gregs[15]; env->sr |= (1u << SR_BL) | (1u << SR_MD) | (1u << SR_RB); if (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { /* Branch instruction should be executed again before delay slot. */ env->spc -= 2; /* Clear flags for exception/interrupt routine. */ env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); } if (do_exp) { env->expevt = cs->exception_index; switch (cs->exception_index) { case 0x000: case 0x020: case 0x140: env->sr &= ~(1u << SR_FD); env->sr |= 0xf << 4; /* IMASK */ env->pc = 0xa0000000; break; case 0x040: case 0x060: env->pc = env->vbr + 0x400; break; case 0x160: env->spc += 2; /* special case for TRAPA */ /* fall through */ default: env->pc = env->vbr + 0x100; break; } return; } if (do_irq) { env->intevt = irq_vector; env->pc = env->vbr + 0x600; return; } }"} {"target": 0, "idx": 26746, "func": "static int smacker_read_packet(AVFormatContext *s, AVPacket *pkt) { SmackerContext *smk = s->priv_data; int flags; int ret; int i; int frame_size = 0; int palchange = 0; if (s->pb->eof_reached || smk->cur_frame >= smk->frames) return AVERROR_EOF; /* if we demuxed all streams, pass another frame */ if(smk->curstream < 0) { avio_seek(s->pb, smk->nextpos, 0); frame_size = smk->frm_size[smk->cur_frame] & (~3); flags = smk->frm_flags[smk->cur_frame]; /* handle palette change event */ if(flags & SMACKER_PAL){ int size, sz, t, off, j, pos; uint8_t *pal = smk->pal; uint8_t oldpal[768]; memcpy(oldpal, pal, 768); size = avio_r8(s->pb); size = size * 4 - 1; frame_size -= size; frame_size--; sz = 0; pos = avio_tell(s->pb) + size; while(sz < 256){ t = avio_r8(s->pb); if(t & 0x80){ /* skip palette entries */ sz += (t & 0x7F) + 1; pal += ((t & 0x7F) + 1) * 3; } else if(t & 0x40){ /* copy with offset */ off = avio_r8(s->pb); j = (t & 0x3F) + 1; if (off + j > 0x100) { av_log(s, AV_LOG_ERROR, \"Invalid palette update, offset=%d length=%d extends beyond palette size\\n\", off, j); return AVERROR_INVALIDDATA; } off *= 3; while(j-- && sz < 256) { *pal++ = oldpal[off + 0]; *pal++ = oldpal[off + 1]; *pal++ = oldpal[off + 2]; sz++; off += 3; } } else { /* new entries */ *pal++ = smk_pal[t]; *pal++ = smk_pal[avio_r8(s->pb) & 0x3F]; *pal++ = smk_pal[avio_r8(s->pb) & 0x3F]; sz++; } } avio_seek(s->pb, pos, 0); palchange |= 1; } flags >>= 1; smk->curstream = -1; /* if audio chunks are present, put them to stack and retrieve later */ for(i = 0; i < 7; i++) { if(flags & 1) { int size; uint8_t *tmpbuf; size = avio_rl32(s->pb) - 4; frame_size -= size; frame_size -= 4; smk->curstream++; tmpbuf = av_realloc(smk->bufs[smk->curstream], size); if (!tmpbuf) return AVERROR(ENOMEM); smk->bufs[smk->curstream] = tmpbuf; smk->buf_sizes[smk->curstream] = size; ret = avio_read(s->pb, smk->bufs[smk->curstream], size); if(ret != size) return AVERROR(EIO); smk->stream_id[smk->curstream] = smk->indexes[i]; } flags >>= 1; } if (frame_size < 0) return AVERROR_INVALIDDATA; if (av_new_packet(pkt, frame_size + 769)) return AVERROR(ENOMEM); if(smk->frm_size[smk->cur_frame] & 1) palchange |= 2; pkt->data[0] = palchange; memcpy(pkt->data + 1, smk->pal, 768); ret = avio_read(s->pb, pkt->data + 769, frame_size); if(ret != frame_size) return AVERROR(EIO); pkt->stream_index = smk->videoindex; pkt->pts = smk->cur_frame; pkt->size = ret + 769; smk->cur_frame++; smk->nextpos = avio_tell(s->pb); } else { if (av_new_packet(pkt, smk->buf_sizes[smk->curstream])) return AVERROR(ENOMEM); memcpy(pkt->data, smk->bufs[smk->curstream], smk->buf_sizes[smk->curstream]); pkt->size = smk->buf_sizes[smk->curstream]; pkt->stream_index = smk->stream_id[smk->curstream]; pkt->pts = smk->aud_pts[smk->curstream]; smk->aud_pts[smk->curstream] += AV_RL32(pkt->data); smk->curstream--; } return 0; }"} {"target": 1, "idx": 26754, "func": "static void parse_type_size(Visitor *v, const char *name, uint64_t *obj, Error **errp) { StringInputVisitor *siv = to_siv(v); Error *err = NULL; uint64_t val; if (siv->string) { parse_option_size(name, siv->string, &val, &err); } else { error_setg(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : \"null\", \"size\"); return; } if (err) { error_propagate(errp, err); return; } *obj = val; }"} {"target": 0, "idx": 26762, "func": "static int parse_fragment(AVFormatContext *s, const char *filename, int64_t *start_ts, int64_t *duration, int64_t *moof_size, int64_t size) { AVIOContext *in; int ret; uint32_t len; if ((ret = avio_open2(&in, filename, AVIO_FLAG_READ, &s->interrupt_callback, NULL)) < 0) return ret; ret = AVERROR(EIO); *moof_size = avio_rb32(in); if (*moof_size < 8 || *moof_size > size) goto fail; if (avio_rl32(in) != MKTAG('m','o','o','f')) goto fail; len = avio_rb32(in); if (len > *moof_size) goto fail; if (avio_rl32(in) != MKTAG('m','f','h','d')) goto fail; avio_seek(in, len - 8, SEEK_CUR); avio_rb32(in); /* traf size */ if (avio_rl32(in) != MKTAG('t','r','a','f')) goto fail; while (avio_tell(in) < *moof_size) { uint32_t len = avio_rb32(in); uint32_t tag = avio_rl32(in); int64_t end = avio_tell(in) + len - 8; if (len < 8 || len >= *moof_size) goto fail; if (tag == MKTAG('u','u','i','d')) { const uint8_t tfxd[] = { 0x6d, 0x1d, 0x9b, 0x05, 0x42, 0xd5, 0x44, 0xe6, 0x80, 0xe2, 0x14, 0x1d, 0xaf, 0xf7, 0x57, 0xb2 }; uint8_t uuid[16]; avio_read(in, uuid, 16); if (!memcmp(uuid, tfxd, 16) && len >= 8 + 16 + 4 + 16) { avio_seek(in, 4, SEEK_CUR); *start_ts = avio_rb64(in); *duration = avio_rb64(in); ret = 0; break; } } avio_seek(in, end, SEEK_SET); } fail: avio_close(in); return ret; }"} {"target": 0, "idx": 26791, "func": "void acpi_pm1_cnt_init(ACPIREGS *ar, qemu_irq cmos_s3) { ar->pm1.cnt.cmos_s3 = cmos_s3; }"} {"target": 0, "idx": 26807, "func": "static void console_refresh(QemuConsole *s) { DisplaySurface *surface = qemu_console_surface(s); TextCell *c; int x, y, y1; if (s->ds->have_text) { s->text_x[0] = 0; s->text_y[0] = 0; s->text_x[1] = s->width - 1; s->text_y[1] = s->height - 1; s->cursor_invalidate = 1; } vga_fill_rect(s, 0, 0, surface_width(surface), surface_height(surface), color_table_rgb[0][COLOR_BLACK]); y1 = s->y_displayed; for (y = 0; y < s->height; y++) { c = s->cells + y1 * s->width; for (x = 0; x < s->width; x++) { vga_putcharxy(s, x, y, c->ch, &(c->t_attrib)); c++; } if (++y1 == s->total_height) { y1 = 0; } } console_show_cursor(s, 1); dpy_gfx_update(s, 0, 0, surface_width(surface), surface_height(surface)); }"} {"target": 0, "idx": 26808, "func": "static void qemu_init_child_watch(void) { struct sigaction act; sigchld_bh = qemu_bh_new(sigchld_bh_handler, NULL); memset(&act, 0, sizeof(act)); act.sa_handler = sigchld_handler; act.sa_flags = SA_NOCLDSTOP; sigaction(SIGCHLD, &act, NULL); }"} {"target": 0, "idx": 26818, "func": "int av_packet_split_side_data(AVPacket *pkt){ if (!pkt->side_data_elems && pkt->size >12 && AV_RB64(pkt->data + pkt->size - 8) == FF_MERGE_MARKER){ int i; unsigned int size, orig_pktsize = pkt->size; uint8_t *p; p = pkt->data + pkt->size - 8 - 5; for (i=1; ; i++){ size = AV_RB32(p); if (size>INT_MAX || p - pkt->data < size) return 0; if (p[4]&128) break; p-= size+5; } pkt->side_data = av_malloc(i * sizeof(*pkt->side_data)); if (!pkt->side_data) return AVERROR(ENOMEM); p= pkt->data + pkt->size - 8 - 5; for (i=0; ; i++){ size= AV_RB32(p); av_assert0(size<=INT_MAX && p - pkt->data >= size); pkt->side_data[i].data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE); pkt->side_data[i].size = size; pkt->side_data[i].type = p[4]&127; if (!pkt->side_data[i].data) return AVERROR(ENOMEM); memcpy(pkt->side_data[i].data, p-size, size); pkt->size -= size + 5; if(p[4]&128) break; p-= size+5; } pkt->size -= 8; /* FFMIN() prevents overflow in case the packet wasn't allocated with * proper padding. * If the side data is smaller than the buffer padding size, the * remaining bytes should have already been filled with zeros by the * original packet allocation anyway. */ memset(pkt->data + pkt->size, 0, FFMIN(orig_pktsize - pkt->size, FF_INPUT_BUFFER_PADDING_SIZE)); pkt->side_data_elems = i+1; return 1; } return 0; }"} {"target": 1, "idx": 26856, "func": "int vnc_tls_client_setup(VncState *vs, int needX509Creds) { VNC_DEBUG(\"Do TLS setup\\n\"); if (vnc_tls_initialize() < 0) { VNC_DEBUG(\"Failed to init TLS\\n\"); vnc_client_error(vs); return -1; } if (vs->tls.session == NULL) { if (gnutls_init(&vs->tls.session, GNUTLS_SERVER) < 0) { vnc_client_error(vs); return -1; } if (gnutls_set_default_priority(vs->tls.session) < 0) { gnutls_deinit(vs->tls.session); vs->tls.session = NULL; vnc_client_error(vs); return -1; } if (vnc_set_gnutls_priority(vs->tls.session, needX509Creds) < 0) { gnutls_deinit(vs->tls.session); vs->tls.session = NULL; vnc_client_error(vs); return -1; } if (needX509Creds) { gnutls_certificate_server_credentials x509_cred = vnc_tls_initialize_x509_cred(vs->vd); if (!x509_cred) { gnutls_deinit(vs->tls.session); vs->tls.session = NULL; vnc_client_error(vs); return -1; } if (gnutls_credentials_set(vs->tls.session, GNUTLS_CRD_CERTIFICATE, x509_cred) < 0) { gnutls_deinit(vs->tls.session); vs->tls.session = NULL; gnutls_certificate_free_credentials(x509_cred); vnc_client_error(vs); return -1; } if (vs->vd->tls.x509verify) { VNC_DEBUG(\"Requesting a client certificate\\n\"); gnutls_certificate_server_set_request(vs->tls.session, GNUTLS_CERT_REQUEST); } } else { gnutls_anon_server_credentials_t anon_cred = vnc_tls_initialize_anon_cred(); if (!anon_cred) { gnutls_deinit(vs->tls.session); vs->tls.session = NULL; vnc_client_error(vs); return -1; } if (gnutls_credentials_set(vs->tls.session, GNUTLS_CRD_ANON, anon_cred) < 0) { gnutls_deinit(vs->tls.session); vs->tls.session = NULL; gnutls_anon_free_server_credentials(anon_cred); vnc_client_error(vs); return -1; } } gnutls_transport_set_ptr(vs->tls.session, (gnutls_transport_ptr_t)vs); gnutls_transport_set_push_function(vs->tls.session, vnc_tls_push); gnutls_transport_set_pull_function(vs->tls.session, vnc_tls_pull); } return 0; }"} {"target": 0, "idx": 26870, "func": "static void av_always_inline filter_mb_edgech( uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, H264Context *h, int intra ) { const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); const unsigned int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset; const int alpha = alpha_table[index_a]; const int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset]; if (alpha ==0 || beta == 0) return; if( bS[0] < 4 || !intra ) { int8_t tc[4]; tc[0] = tc0_table[index_a][bS[0]]+1; tc[1] = tc0_table[index_a][bS[1]]+1; tc[2] = tc0_table[index_a][bS[2]]+1; tc[3] = tc0_table[index_a][bS[3]]+1; h->h264dsp.h264_v_loop_filter_chroma(pix, stride, alpha, beta, tc); } else { h->h264dsp.h264_v_loop_filter_chroma_intra(pix, stride, alpha, beta); } }"} {"target": 0, "idx": 26875, "func": "static inline void bt_hci_lmp_acl_data(struct bt_hci_s *hci, uint16_t handle, const uint8_t *data, int start, int len) { struct hci_acl_hdr *pkt = (void *) hci->acl_buf; /* TODO: packet flags */ /* TODO: avoid memcpy'ing */ if (len + HCI_ACL_HDR_SIZE > sizeof(hci->acl_buf)) { fprintf(stderr, \"%s: can't take ACL packets %i bytes long\\n\", __FUNCTION__, len); return; } memcpy(hci->acl_buf + HCI_ACL_HDR_SIZE, data, len); pkt->handle = cpu_to_le16( acl_handle_pack(handle, start ? ACL_START : ACL_CONT)); pkt->dlen = cpu_to_le16(len); hci->info.acl_recv(hci->info.opaque, hci->acl_buf, len + HCI_ACL_HDR_SIZE); }"} {"target": 0, "idx": 26892, "func": "static void xilinx_enet_init(Object *obj) { XilinxAXIEnet *s = XILINX_AXI_ENET(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); Error *errp = NULL; object_property_add_link(obj, \"axistream-connected\", TYPE_STREAM_SLAVE, (Object **) &s->tx_dev, &errp); assert_no_error(errp); object_initialize(&s->rx_data_dev, TYPE_XILINX_AXI_ENET_DATA_STREAM); object_property_add_child(OBJECT(s), \"axistream-connected-target\", (Object *)&s->rx_data_dev, &errp); assert_no_error(errp); sysbus_init_irq(sbd, &s->irq); memory_region_init_io(&s->iomem, &enet_ops, s, \"enet\", 0x40000); sysbus_init_mmio(sbd, &s->iomem); }"} {"target": 0, "idx": 26893, "func": "bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, int len, bool is_write) { hwaddr l; uint8_t *ptr; uint64_t val; hwaddr addr1; MemoryRegion *mr; bool error = false; while (len > 0) { l = len; mr = address_space_translate(as, addr, &addr1, &l, is_write); if (is_write) { if (!memory_access_is_direct(mr, is_write)) { l = memory_access_size(mr, l, addr1); /* XXX: could force current_cpu to NULL to avoid potential bugs */ if (l == 4) { /* 32 bit write access */ val = ldl_p(buf); error |= io_mem_write(mr, addr1, val, 4); } else if (l == 2) { /* 16 bit write access */ val = lduw_p(buf); error |= io_mem_write(mr, addr1, val, 2); } else { /* 8 bit write access */ val = ldub_p(buf); error |= io_mem_write(mr, addr1, val, 1); } } else { addr1 += memory_region_get_ram_addr(mr); /* RAM case */ ptr = qemu_get_ram_ptr(addr1); memcpy(ptr, buf, l); invalidate_and_set_dirty(addr1, l); } } else { if (!memory_access_is_direct(mr, is_write)) { /* I/O case */ l = memory_access_size(mr, l, addr1); if (l == 4) { /* 32 bit read access */ error |= io_mem_read(mr, addr1, &val, 4); stl_p(buf, val); } else if (l == 2) { /* 16 bit read access */ error |= io_mem_read(mr, addr1, &val, 2); stw_p(buf, val); } else { /* 8 bit read access */ error |= io_mem_read(mr, addr1, &val, 1); stb_p(buf, val); } } else { /* RAM case */ ptr = qemu_get_ram_ptr(mr->ram_addr + addr1); memcpy(buf, ptr, l); } } len -= l; buf += l; addr += l; } return error; }"} {"target": 0, "idx": 26906, "func": "static int create_ppc_opcodes (CPUPPCState *env, const ppc_def_t *def) { opcode_t *opc, *start, *end; fill_new_table(env->opcodes, 0x40); if (&opc_start < &opc_end) { start = &opc_start; end = &opc_end; } else { start = &opc_end; end = &opc_start; } for (opc = start + 1; opc != end; opc++) { if ((opc->handler.type & def->insns_flags) != 0) { if (register_insn(env->opcodes, opc) < 0) { printf(\"*** ERROR initializing PowerPC instruction \" \"0x%02x 0x%02x 0x%02x\\n\", opc->opc1, opc->opc2, opc->opc3); return -1; } } } fix_opcode_tables(env->opcodes); fflush(stdout); fflush(stderr); return 0; }"} {"target": 1, "idx": 26933, "func": "Object *object_dynamic_cast_assert(Object *obj, const char *typename) { Object *inst; inst = object_dynamic_cast(obj, typename); if (!inst) { fprintf(stderr, \"Object %p is not an instance of type %s\\n\", obj, typename); abort(); } return inst; }"} {"target": 1, "idx": 26934, "func": "static inline int gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb, int search_pc) { DisasContext dc1, *dc = &dc1; uint16_t *gen_opc_end; int j, lj; target_ulong pc_start; uint32_t next_page_start; /* generate intermediate code */ pc_start = tb->pc; dc->tb = tb; gen_opc_ptr = gen_opc_buf; gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; gen_opparam_ptr = gen_opparam_buf; dc->is_jmp = DISAS_NEXT; dc->pc = pc_start; dc->singlestep_enabled = env->singlestep_enabled; dc->condjmp = 0; dc->thumb = env->thumb; #if !defined(CONFIG_USER_ONLY) dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR; #endif next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; nb_gen_labels = 0; lj = -1; do { if (env->nb_breakpoints > 0) { for(j = 0; j < env->nb_breakpoints; j++) { if (env->breakpoints[j] == dc->pc) { gen_op_movl_T0_im((long)dc->pc); gen_op_movl_reg_TN[0][15](); gen_op_debug(); dc->is_jmp = DISAS_JUMP; break; } } } if (search_pc) { j = gen_opc_ptr - gen_opc_buf; if (lj < j) { lj++; while (lj < j) gen_opc_instr_start[lj++] = 0; } gen_opc_pc[lj] = dc->pc; gen_opc_instr_start[lj] = 1; } if (env->thumb) disas_thumb_insn(dc); else disas_arm_insn(env, dc); if (dc->condjmp && !dc->is_jmp) { gen_set_label(dc->condlabel); dc->condjmp = 0; } /* Terminate the TB on memory ops if watchpoints are present. */ /* FIXME: This should be replacd by the deterministic execution * IRQ raising bits. */ if (dc->is_mem && env->nb_watchpoints) break; /* Translation stops when a conditional branch is enoutered. * Otherwise the subsequent code could get translated several times. * Also stop translation when a page boundary is reached. This * ensures prefech aborts occur at the right place. */ } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end && !env->singlestep_enabled && dc->pc < next_page_start); /* At this stage dc->condjmp will only be set when the skipped * instruction was a conditional branch, and the PC has already been * written. */ if (__builtin_expect(env->singlestep_enabled, 0)) { /* Make sure the pc is updated, and raise a debug exception. */ if (dc->condjmp) { gen_op_debug(); gen_set_label(dc->condlabel); } if (dc->condjmp || !dc->is_jmp) { gen_op_movl_T0_im((long)dc->pc); gen_op_movl_reg_TN[0][15](); dc->condjmp = 0; } gen_op_debug(); } else { switch(dc->is_jmp) { case DISAS_NEXT: gen_goto_tb(dc, 1, dc->pc); break; default: case DISAS_JUMP: case DISAS_UPDATE: /* indicate that the hash table must be used to find the next TB */ gen_op_movl_T0_0(); gen_op_exit_tb(); break; case DISAS_TB_JUMP: /* nothing more to generate */ break; } if (dc->condjmp) { gen_set_label(dc->condlabel); gen_goto_tb(dc, 1, dc->pc); dc->condjmp = 0; } } *gen_opc_ptr = INDEX_op_end; #ifdef DEBUG_DISAS if (loglevel & CPU_LOG_TB_IN_ASM) { fprintf(logfile, \"----------------\\n\"); fprintf(logfile, \"IN: %s\\n\", lookup_symbol(pc_start)); target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb); fprintf(logfile, \"\\n\"); if (loglevel & (CPU_LOG_TB_OP)) { fprintf(logfile, \"OP:\\n\"); dump_ops(gen_opc_buf, gen_opparam_buf); fprintf(logfile, \"\\n\"); } } #endif if (search_pc) { j = gen_opc_ptr - gen_opc_buf; lj++; while (lj <= j) gen_opc_instr_start[lj++] = 0; tb->size = 0; } else { tb->size = dc->pc - pc_start; } return 0; }"} {"target": 0, "idx": 26953, "func": "static void qcow_aio_read_cb(void *opaque, int ret) { QCowAIOCB *acb = opaque; BlockDriverState *bs = acb->common.bs; BDRVQcowState *s = bs->opaque; int index_in_cluster, n1; acb->hd_aiocb = NULL; if (ret < 0) goto done; /* post process the read buffer */ if (!acb->cluster_offset) { /* nothing to do */ } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) { /* nothing to do */ } else { if (s->crypt_method) { qcow2_encrypt_sectors(s, acb->sector_num, acb->buf, acb->buf, acb->cur_nr_sectors, 0, &s->aes_decrypt_key); } } acb->remaining_sectors -= acb->cur_nr_sectors; acb->sector_num += acb->cur_nr_sectors; acb->buf += acb->cur_nr_sectors * 512; if (acb->remaining_sectors == 0) { /* request completed */ ret = 0; goto done; } /* prepare next AIO request */ acb->cur_nr_sectors = acb->remaining_sectors; acb->cluster_offset = qcow2_get_cluster_offset(bs, acb->sector_num << 9, &acb->cur_nr_sectors); index_in_cluster = acb->sector_num & (s->cluster_sectors - 1); if (!acb->cluster_offset) { if (bs->backing_hd) { /* read from the base image */ n1 = qcow2_backing_read1(bs->backing_hd, acb->sector_num, acb->buf, acb->cur_nr_sectors); if (n1 > 0) { acb->hd_iov.iov_base = (void *)acb->buf; acb->hd_iov.iov_len = acb->cur_nr_sectors * 512; qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1); BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); acb->hd_aiocb = bdrv_aio_readv(bs->backing_hd, acb->sector_num, &acb->hd_qiov, acb->cur_nr_sectors, qcow_aio_read_cb, acb); if (acb->hd_aiocb == NULL) goto done; } else { ret = qcow_schedule_bh(qcow_aio_read_bh, acb); if (ret < 0) goto done; } } else { /* Note: in this case, no need to wait */ memset(acb->buf, 0, 512 * acb->cur_nr_sectors); ret = qcow_schedule_bh(qcow_aio_read_bh, acb); if (ret < 0) goto done; } } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) { /* add AIO support for compressed blocks ? */ if (qcow2_decompress_cluster(bs, acb->cluster_offset) < 0) goto done; memcpy(acb->buf, s->cluster_cache + index_in_cluster * 512, 512 * acb->cur_nr_sectors); ret = qcow_schedule_bh(qcow_aio_read_bh, acb); if (ret < 0) goto done; } else { if ((acb->cluster_offset & 511) != 0) { ret = -EIO; goto done; } acb->hd_iov.iov_base = (void *)acb->buf; acb->hd_iov.iov_len = acb->cur_nr_sectors * 512; qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1); BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); acb->hd_aiocb = bdrv_aio_readv(bs->file, (acb->cluster_offset >> 9) + index_in_cluster, &acb->hd_qiov, acb->cur_nr_sectors, qcow_aio_read_cb, acb); if (acb->hd_aiocb == NULL) { ret = -EIO; goto done; } } return; done: if (acb->qiov->niov > 1) { qemu_iovec_from_buffer(acb->qiov, acb->orig_buf, acb->qiov->size); qemu_vfree(acb->orig_buf); } acb->common.cb(acb->common.opaque, ret); qemu_aio_release(acb); }"} {"target": 0, "idx": 26965, "func": "opts_end_list(Visitor *v, Error **errp) { OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v); assert(ov->list_mode == LM_STARTED || ov->list_mode == LM_IN_PROGRESS); ov->repeated_opts = NULL; ov->list_mode = LM_NONE; }"} {"target": 0, "idx": 26969, "func": "static CaptureVoiceOut *audio_pcm_capture_find_specific ( AudioState *s, audsettings_t *as ) { CaptureVoiceOut *cap; for (cap = s->cap_head.lh_first; cap; cap = cap->entries.le_next) { if (audio_pcm_info_eq (&cap->hw.info, as)) { return cap; } } return NULL; }"} {"target": 1, "idx": 26983, "func": "AVBufferRef *av_buffer_pool_get(AVBufferPool *pool) { AVBufferRef *ret; BufferPoolEntry *buf; /* check whether the pool is empty */ buf = get_pool(pool); if (!buf) return pool_alloc_buffer(pool); /* keep the first entry, return the rest of the list to the pool */ add_to_pool(buf->next); buf->next = NULL; ret = av_buffer_create(buf->data, pool->size, pool_release_buffer, buf, 0); if (!ret) { add_to_pool(buf); return NULL; } avpriv_atomic_int_add_and_fetch(&pool->refcount, 1); return ret; }"} {"target": 1, "idx": 26984, "func": "static void QEMU_NORETURN force_sig(int target_sig) { CPUState *cpu = thread_cpu; CPUArchState *env = cpu->env_ptr; TaskState *ts = (TaskState *)cpu->opaque; int host_sig, core_dumped = 0; struct sigaction act; host_sig = target_to_host_signal(target_sig); trace_user_force_sig(env, target_sig, host_sig); gdb_signalled(env, target_sig); /* dump core if supported by target binary format */ if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) { stop_all_tasks(); core_dumped = ((*ts->bprm->core_dump)(target_sig, env) == 0); } if (core_dumped) { /* we already dumped the core of target process, we don't want * a coredump of qemu itself */ struct rlimit nodump; getrlimit(RLIMIT_CORE, &nodump); nodump.rlim_cur=0; setrlimit(RLIMIT_CORE, &nodump); (void) fprintf(stderr, \"qemu: uncaught target signal %d (%s) - %s\\n\", target_sig, strsignal(host_sig), \"core dumped\" ); } /* The proper exit code for dying from an uncaught signal is * -. The kernel doesn't allow exit() or _exit() to pass * a negative value. To get the proper exit code we need to * actually die from an uncaught signal. Here the default signal * handler is installed, we send ourself a signal and we wait for * it to arrive. */ sigfillset(&act.sa_mask); act.sa_handler = SIG_DFL; act.sa_flags = 0; sigaction(host_sig, &act, NULL); /* For some reason raise(host_sig) doesn't send the signal when * statically linked on x86-64. */ kill(getpid(), host_sig); /* Make sure the signal isn't masked (just reuse the mask inside of act) */ sigdelset(&act.sa_mask, host_sig); sigsuspend(&act.sa_mask); /* unreachable */ abort(); }"} {"target": 1, "idx": 26997, "func": "static int scale_vector(int16_t *dst, const int16_t *vector, int length) { int bits, max = 0; int i; for (i = 0; i < length; i++) max |= FFABS(vector[i]); bits = normalize_bits(max, 15); if (bits == 15) for (i = 0; i < length; i++) dst[i] = vector[i] * 0x7fff >> 3; else for (i = 0; i < length; i++) dst[i] = vector[i] << bits >> 3; return bits - 3; }"} {"target": 0, "idx": 27008, "func": "static int virtio_ccw_hcall_notify(const uint64_t *args) { uint64_t subch_id = args[0]; uint64_t queue = args[1]; SubchDev *sch; int cssid, ssid, schid, m; if (ioinst_disassemble_sch_ident(subch_id, &m, &cssid, &ssid, &schid)) { return -EINVAL; } sch = css_find_subch(m, cssid, ssid, schid); if (!sch || !css_subch_visible(sch)) { return -EINVAL; } if (queue >= VIRTIO_PCI_QUEUE_MAX) { return -EINVAL; } virtio_queue_notify(virtio_ccw_get_vdev(sch), queue); return 0; }"} {"target": 0, "idx": 27025, "func": "void prepare_grab(void) { int has_video, has_audio, i, j; AVFormatContext *oc; AVFormatContext *ic; AVFormatParameters ap1, *ap = &ap1; /* see if audio/video inputs are needed */ has_video = 0; has_audio = 0; memset(ap, 0, sizeof(*ap)); for(j=0;jnb_streams;i++) { AVCodecContext *enc = &oc->streams[i]->codec; switch(enc->codec_type) { case CODEC_TYPE_AUDIO: if (enc->sample_rate > ap->sample_rate) ap->sample_rate = enc->sample_rate; if (enc->channels > ap->channels) ap->channels = enc->channels; has_audio = 1; break; case CODEC_TYPE_VIDEO: if (enc->width > ap->width) ap->width = enc->width; if (enc->height > ap->height) ap->height = enc->height; if (enc->frame_rate > ap->frame_rate) ap->frame_rate = enc->frame_rate; has_video = 1; break; default: abort(); } } } if (has_video == 0 && has_audio == 0) { fprintf(stderr, \"Output file must have at least one audio or video stream\\n\"); exit(1); } if (has_video) { AVInputFormat *fmt1; fmt1 = av_find_input_format(\"video_grab_device\"); if (av_open_input_file(&ic, \"\", fmt1, 0, ap) < 0) { fprintf(stderr, \"Could not find video grab device\\n\"); exit(1); } /* by now video grab has one stream */ ic->streams[0]->r_frame_rate = ap->frame_rate; input_files[nb_input_files] = ic; dump_format(ic, nb_input_files, v4l_device, 0); nb_input_files++; } if (has_audio) { AVInputFormat *fmt1; fmt1 = av_find_input_format(\"audio_device\"); if (av_open_input_file(&ic, \"\", fmt1, 0, ap) < 0) { fprintf(stderr, \"Could not find audio grab device\\n\"); exit(1); } input_files[nb_input_files] = ic; dump_format(ic, nb_input_files, audio_device, 0); nb_input_files++; } }"} {"target": 0, "idx": 27026, "func": "static void ipvideo_decode_opcodes(IpvideoContext *s) { int x, y; unsigned char opcode; int ret; static int frame = 0; GetBitContext gb; debug_interplay(\"------------------ frame %d\\n\", frame); frame++; /* this is PAL8, so make the palette available */ memcpy(s->current_frame.data[1], s->avctx->palctrl->palette, PALETTE_COUNT * 4); s->stride = s->current_frame.linesize[0]; s->stream_ptr = s->buf + 14; /* data starts 14 bytes in */ s->stream_end = s->buf + s->size; s->line_inc = s->stride - 8; s->upper_motion_limit_offset = (s->avctx->height - 8) * s->stride + s->avctx->width - 8; init_get_bits(&gb, s->decoding_map, s->decoding_map_size * 8); for (y = 0; y < (s->stride * s->avctx->height); y += s->stride * 8) { for (x = y; x < y + s->avctx->width; x += 8) { opcode = get_bits(&gb, 4); debug_interplay(\" block @ (%3d, %3d): encoding 0x%X, data ptr @ %p\\n\", x - y, y / s->stride, opcode, s->stream_ptr); s->pixel_ptr = s->current_frame.data[0] + x; ret = ipvideo_decode_block[opcode](s); if (ret != 0) { av_log(s->avctx, AV_LOG_ERROR, \" Interplay video: decode problem on frame %d, @ block (%d, %d)\\n\", frame, x - y, y / s->stride); return; } } } if (s->stream_end - s->stream_ptr > 1) { av_log(s->avctx, AV_LOG_ERROR, \" Interplay video: decode finished with %td bytes left over\\n\", s->stream_end - s->stream_ptr); } }"} {"target": 1, "idx": 27036, "func": "static inline void cris_fidx_i(unsigned int x) { register unsigned int v asm(\"$r10\") = x; asm (\"fidxi\\t[%0]\\n\" : : \"r\" (v) ); }"} {"target": 0, "idx": 27060, "func": "static void test_nested_struct_list(gconstpointer opaque) { TestArgs *args = (TestArgs *) opaque; const SerializeOps *ops = args->ops; UserDefNestedList *listp = NULL, *tmp, *tmp_copy, *listp_copy = NULL; Error *err = NULL; void *serialize_data; int i = 0; for (i = 0; i < 8; i++) { tmp = g_malloc0(sizeof(UserDefNestedList)); tmp->value = nested_struct_create(); tmp->next = listp; listp = tmp; } ops->serialize(listp, &serialize_data, visit_nested_struct_list, &err); ops->deserialize((void **)&listp_copy, serialize_data, visit_nested_struct_list, &err); g_assert(err == NULL); tmp = listp; tmp_copy = listp_copy; while (listp_copy) { g_assert(listp); nested_struct_compare(listp->value, listp_copy->value); listp = listp->next; listp_copy = listp_copy->next; } qapi_free_UserDefNestedList(tmp); qapi_free_UserDefNestedList(tmp_copy); ops->cleanup(serialize_data); g_free(args); }"} {"target": 0, "idx": 27067, "func": "static void handle_char(CCaptionSubContext *ctx, char hi, char lo, int64_t pts) { struct Screen *screen = get_writing_screen(ctx); char *row = screen->characters[ctx->cursor_row]; int ret; SET_FLAG(screen->row_used,ctx->cursor_row); ret = write_char(ctx, row, ctx->cursor_column, hi); if( ret == 0 ) ctx->cursor_column++; if(lo) { ret = write_char(ctx, row, ctx->cursor_column, lo); if ( ret == 0 ) ctx->cursor_column++; } write_char(ctx, row, ctx->cursor_column, 0); /* reset prev command since character can repeat */ ctx->prev_cmd[0] = 0; ctx->prev_cmd[1] = 0; if (lo) av_dlog(ctx, \"(%c,%c)\\n\",hi,lo); else av_dlog(ctx, \"(%c)\\n\",hi); }"} {"target": 1, "idx": 27069, "func": "int qemu_cpu_is_self(void *env) { return 1; }"} {"target": 1, "idx": 27072, "func": "BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *iov, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque) { return bdrv_aio_rw_vector(bs, sector_num, iov, nb_sectors, cb, opaque, 1); }"} {"target": 0, "idx": 27095, "func": "static void omap_mcbsp_writeh(void *opaque, target_phys_addr_t addr, uint32_t value) { struct omap_mcbsp_s *s = (struct omap_mcbsp_s *) opaque; int offset = addr & OMAP_MPUI_REG_MASK; switch (offset) { case 0x00: /* DRR2 */ case 0x02: /* DRR1 */ OMAP_RO_REG(addr); return; case 0x04: /* DXR2 */ if (((s->xcr[0] >> 5) & 7) < 3) /* XWDLEN1 */ return; /* Fall through. */ case 0x06: /* DXR1 */ if (s->tx_req > 1) { s->tx_req -= 2; if (s->codec && s->codec->cts) { s->codec->out.fifo[s->codec->out.len ++] = (value >> 8) & 0xff; s->codec->out.fifo[s->codec->out.len ++] = (value >> 0) & 0xff; } if (s->tx_req < 2) omap_mcbsp_tx_done(s); } else printf(\"%s: Tx FIFO overrun\\n\", __FUNCTION__); return; case 0x08: /* SPCR2 */ s->spcr[1] &= 0x0002; s->spcr[1] |= 0x03f9 & value; s->spcr[1] |= 0x0004 & (value << 2); /* XEMPTY := XRST */ if (~value & 1) /* XRST */ s->spcr[1] &= ~6; omap_mcbsp_req_update(s); return; case 0x0a: /* SPCR1 */ s->spcr[0] &= 0x0006; s->spcr[0] |= 0xf8f9 & value; if (value & (1 << 15)) /* DLB */ printf(\"%s: Digital Loopback mode enable attempt\\n\", __FUNCTION__); if (~value & 1) { /* RRST */ s->spcr[0] &= ~6; s->rx_req = 0; omap_mcbsp_rx_done(s); } omap_mcbsp_req_update(s); return; case 0x0c: /* RCR2 */ s->rcr[1] = value & 0xffff; return; case 0x0e: /* RCR1 */ s->rcr[0] = value & 0x7fe0; return; case 0x10: /* XCR2 */ s->xcr[1] = value & 0xffff; return; case 0x12: /* XCR1 */ s->xcr[0] = value & 0x7fe0; return; case 0x14: /* SRGR2 */ s->srgr[1] = value & 0xffff; omap_mcbsp_req_update(s); return; case 0x16: /* SRGR1 */ s->srgr[0] = value & 0xffff; omap_mcbsp_req_update(s); return; case 0x18: /* MCR2 */ s->mcr[1] = value & 0x03e3; if (value & 3) /* XMCM */ printf(\"%s: Tx channel selection mode enable attempt\\n\", __FUNCTION__); return; case 0x1a: /* MCR1 */ s->mcr[0] = value & 0x03e1; if (value & 1) /* RMCM */ printf(\"%s: Rx channel selection mode enable attempt\\n\", __FUNCTION__); return; case 0x1c: /* RCERA */ s->rcer[0] = value & 0xffff; return; case 0x1e: /* RCERB */ s->rcer[1] = value & 0xffff; return; case 0x20: /* XCERA */ s->xcer[0] = value & 0xffff; return; case 0x22: /* XCERB */ s->xcer[1] = value & 0xffff; return; case 0x24: /* PCR0 */ s->pcr = value & 0x7faf; return; case 0x26: /* RCERC */ s->rcer[2] = value & 0xffff; return; case 0x28: /* RCERD */ s->rcer[3] = value & 0xffff; return; case 0x2a: /* XCERC */ s->xcer[2] = value & 0xffff; return; case 0x2c: /* XCERD */ s->xcer[3] = value & 0xffff; return; case 0x2e: /* RCERE */ s->rcer[4] = value & 0xffff; return; case 0x30: /* RCERF */ s->rcer[5] = value & 0xffff; return; case 0x32: /* XCERE */ s->xcer[4] = value & 0xffff; return; case 0x34: /* XCERF */ s->xcer[5] = value & 0xffff; return; case 0x36: /* RCERG */ s->rcer[6] = value & 0xffff; return; case 0x38: /* RCERH */ s->rcer[7] = value & 0xffff; return; case 0x3a: /* XCERG */ s->xcer[6] = value & 0xffff; return; case 0x3c: /* XCERH */ s->xcer[7] = value & 0xffff; return; } OMAP_BAD_REG(addr); }"} {"target": 0, "idx": 27102, "func": "static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0) { DECLARE_ALIGNED_8(uint64_t, tmp0[2]); __asm__ volatile( \"movq (%1,%3), %%mm0 \\n\\t\" //p1 \"movq (%1,%3,2), %%mm1 \\n\\t\" //p0 \"movq (%2), %%mm2 \\n\\t\" //q0 \"movq (%2,%3), %%mm3 \\n\\t\" //q1 H264_DEBLOCK_MASK(%6, %7) \"movd %5, %%mm4 \\n\\t\" \"punpcklbw %%mm4, %%mm4 \\n\\t\" \"punpcklwd %%mm4, %%mm4 \\n\\t\" \"pcmpeqb %%mm3, %%mm3 \\n\\t\" \"movq %%mm4, %%mm6 \\n\\t\" \"pcmpgtb %%mm3, %%mm4 \\n\\t\" \"movq %%mm6, 8+%0 \\n\\t\" \"pand %%mm4, %%mm7 \\n\\t\" \"movq %%mm7, %0 \\n\\t\" /* filter p1 */ \"movq (%1), %%mm3 \\n\\t\" //p2 DIFF_GT2_MMX(%%mm1, %%mm3, %%mm5, %%mm6, %%mm4) // |p2-p0|>beta-1 \"pand %%mm7, %%mm6 \\n\\t\" // mask & |p2-p0|beta-1 \"pand %0, %%mm6 \\n\\t\" \"movq 8+%0, %%mm5 \\n\\t\" // can be merged with the and below but is slower then \"pand %%mm6, %%mm5 \\n\\t\" \"psubb %%mm6, %%mm7 \\n\\t\" \"movq (%2,%3), %%mm3 \\n\\t\" H264_DEBLOCK_Q1(%%mm3, %%mm4, \"(%2,%3,2)\", \"(%2,%3)\", %%mm5, %%mm6) /* filter p0, q0 */ H264_DEBLOCK_P0_Q0(%8, unused) \"movq %%mm1, (%1,%3,2) \\n\\t\" \"movq %%mm2, (%2) \\n\\t\" : \"=m\"(*tmp0) : \"r\"(pix-3*stride), \"r\"(pix), \"r\"((x86_reg)stride), \"m\"(*tmp0/*unused*/), \"m\"(*(uint32_t*)tc0), \"m\"(alpha1), \"m\"(beta1), \"m\"(ff_bone) ); }"} {"target": 0, "idx": 27104, "func": "static int filter_frame(AVFilterLink *inlink, AVFrame *buf) { AVFilterContext *ctx = inlink->dst; AVFilterLink *outlink = ctx->outputs[0]; DeflickerContext *s = ctx->priv; AVDictionary **metadata; AVFrame *out, *in; float f; int y; if (s->q.available < s->size && !s->eof) { s->luminance[s->available] = s->calc_avgy(ctx, buf); ff_bufqueue_add(ctx, &s->q, buf); s->available++; return 0; } in = ff_bufqueue_peek(&s->q, 0); out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) { av_frame_free(&buf); return AVERROR(ENOMEM); } s->get_factor(ctx, &f); s->deflicker(ctx, in->data[0], in->linesize[0], out->data[0], out->linesize[0], outlink->w, outlink->h, f); for (y = 1; y < s->nb_planes; y++) { av_image_copy_plane(out->data[y], out->linesize[y], in->data[y], in->linesize[y], s->planewidth[y] * (1 + (s->depth > 8)), s->planeheight[y]); } av_frame_copy_props(out, in); metadata = &out->metadata; if (metadata) { uint8_t value[128]; snprintf(value, sizeof(value), \"%f\", s->luminance[0]); av_dict_set(metadata, \"lavfi.deflicker.luminance\", value, 0); snprintf(value, sizeof(value), \"%f\", s->luminance[0] * f); av_dict_set(metadata, \"lavfi.deflicker.new_luminance\", value, 0); snprintf(value, sizeof(value), \"%f\", f - 1.0f); av_dict_set(metadata, \"lavfi.deflicker.relative_change\", value, 0); } in = ff_bufqueue_get(&s->q); av_frame_free(&in); memmove(&s->luminance[0], &s->luminance[1], sizeof(*s->luminance) * (s->size - 1)); s->luminance[s->available - 1] = s->calc_avgy(ctx, buf); ff_bufqueue_add(ctx, &s->q, buf); return ff_filter_frame(outlink, out); }"} {"target": 1, "idx": 27115, "func": "static DriveInfo *blockdev_init(QDict *bs_opts, BlockInterfaceType type, Error **errp) { const char *buf; const char *file = NULL; const char *serial; int ro = 0; int bdrv_flags = 0; int on_read_error, on_write_error; DriveInfo *dinfo; ThrottleConfig cfg; int snapshot = 0; bool copy_on_read; int ret; Error *error = NULL; QemuOpts *opts; const char *id; bool has_driver_specific_opts; BlockDriver *drv = NULL; /* Check common options by copying from bs_opts to opts, all other options * stay in bs_opts for processing by bdrv_open(). */ id = qdict_get_try_str(bs_opts, \"id\"); opts = qemu_opts_create(&qemu_common_drive_opts, id, 1, &error); if (error_is_set(&error)) { error_propagate(errp, error); return NULL; } qemu_opts_absorb_qdict(opts, bs_opts, &error); if (error_is_set(&error)) { error_propagate(errp, error); return NULL; } if (id) { qdict_del(bs_opts, \"id\"); } has_driver_specific_opts = !!qdict_size(bs_opts); /* extract parameters */ snapshot = qemu_opt_get_bool(opts, \"snapshot\", 0); ro = qemu_opt_get_bool(opts, \"read-only\", 0); copy_on_read = qemu_opt_get_bool(opts, \"copy-on-read\", false); file = qemu_opt_get(opts, \"file\"); serial = qemu_opt_get(opts, \"serial\"); if ((buf = qemu_opt_get(opts, \"discard\")) != NULL) { if (bdrv_parse_discard_flags(buf, &bdrv_flags) != 0) { error_setg(errp, \"invalid discard option\"); return NULL; } } if (qemu_opt_get_bool(opts, \"cache.writeback\", true)) { bdrv_flags |= BDRV_O_CACHE_WB; } if (qemu_opt_get_bool(opts, \"cache.direct\", false)) { bdrv_flags |= BDRV_O_NOCACHE; } if (qemu_opt_get_bool(opts, \"cache.no-flush\", false)) { bdrv_flags |= BDRV_O_NO_FLUSH; } #ifdef CONFIG_LINUX_AIO if ((buf = qemu_opt_get(opts, \"aio\")) != NULL) { if (!strcmp(buf, \"native\")) { bdrv_flags |= BDRV_O_NATIVE_AIO; } else if (!strcmp(buf, \"threads\")) { /* this is the default */ } else { error_setg(errp, \"invalid aio option\"); return NULL; } } #endif if ((buf = qemu_opt_get(opts, \"format\")) != NULL) { if (is_help_option(buf)) { error_printf(\"Supported formats:\"); bdrv_iterate_format(bdrv_format_print, NULL); error_printf(\"\\n\"); return NULL; } drv = bdrv_find_format(buf); if (!drv) { error_setg(errp, \"'%s' invalid format\", buf); return NULL; } } /* disk I/O throttling */ memset(&cfg, 0, sizeof(cfg)); cfg.buckets[THROTTLE_BPS_TOTAL].avg = qemu_opt_get_number(opts, \"throttling.bps-total\", 0); cfg.buckets[THROTTLE_BPS_READ].avg = qemu_opt_get_number(opts, \"throttling.bps-read\", 0); cfg.buckets[THROTTLE_BPS_WRITE].avg = qemu_opt_get_number(opts, \"throttling.bps-write\", 0); cfg.buckets[THROTTLE_OPS_TOTAL].avg = qemu_opt_get_number(opts, \"throttling.iops-total\", 0); cfg.buckets[THROTTLE_OPS_READ].avg = qemu_opt_get_number(opts, \"throttling.iops-read\", 0); cfg.buckets[THROTTLE_OPS_WRITE].avg = qemu_opt_get_number(opts, \"throttling.iops-write\", 0); cfg.buckets[THROTTLE_BPS_TOTAL].max = qemu_opt_get_number(opts, \"throttling.bps-total-max\", 0); cfg.buckets[THROTTLE_BPS_READ].max = qemu_opt_get_number(opts, \"throttling.bps-read-max\", 0); cfg.buckets[THROTTLE_BPS_WRITE].max = qemu_opt_get_number(opts, \"throttling.bps-write-max\", 0); cfg.buckets[THROTTLE_OPS_TOTAL].max = qemu_opt_get_number(opts, \"throttling.iops-total-max\", 0); cfg.buckets[THROTTLE_OPS_READ].max = qemu_opt_get_number(opts, \"throttling.iops-read-max\", 0); cfg.buckets[THROTTLE_OPS_WRITE].max = qemu_opt_get_number(opts, \"throttling.iops-write-max\", 0); cfg.op_size = qemu_opt_get_number(opts, \"throttling.iops-size\", 0); if (!check_throttle_config(&cfg, &error)) { error_propagate(errp, error); return NULL; } on_write_error = BLOCKDEV_ON_ERROR_ENOSPC; if ((buf = qemu_opt_get(opts, \"werror\")) != NULL) { if (type != IF_IDE && type != IF_SCSI && type != IF_VIRTIO && type != IF_NONE) { error_setg(errp, \"werror is not supported by this bus type\"); return NULL; } on_write_error = parse_block_error_action(buf, 0, &error); if (error_is_set(&error)) { error_propagate(errp, error); return NULL; } } on_read_error = BLOCKDEV_ON_ERROR_REPORT; if ((buf = qemu_opt_get(opts, \"rerror\")) != NULL) { if (type != IF_IDE && type != IF_VIRTIO && type != IF_SCSI && type != IF_NONE) { error_report(\"rerror is not supported by this bus type\"); return NULL; } on_read_error = parse_block_error_action(buf, 1, &error); if (error_is_set(&error)) { error_propagate(errp, error); return NULL; } } /* init */ dinfo = g_malloc0(sizeof(*dinfo)); dinfo->id = g_strdup(qemu_opts_id(opts)); dinfo->bdrv = bdrv_new(dinfo->id); dinfo->bdrv->open_flags = snapshot ? BDRV_O_SNAPSHOT : 0; dinfo->bdrv->read_only = ro; dinfo->type = type; dinfo->refcount = 1; if (serial != NULL) { dinfo->serial = g_strdup(serial); } QTAILQ_INSERT_TAIL(&drives, dinfo, next); bdrv_set_on_error(dinfo->bdrv, on_read_error, on_write_error); /* disk I/O throttling */ if (throttle_enabled(&cfg)) { bdrv_io_limits_enable(dinfo->bdrv); bdrv_set_io_limits(dinfo->bdrv, &cfg); } if (!file || !*file) { if (has_driver_specific_opts) { file = NULL; } else { return dinfo; } } if (snapshot) { /* always use cache=unsafe with snapshot */ bdrv_flags &= ~BDRV_O_CACHE_MASK; bdrv_flags |= (BDRV_O_SNAPSHOT|BDRV_O_CACHE_WB|BDRV_O_NO_FLUSH); } if (copy_on_read) { bdrv_flags |= BDRV_O_COPY_ON_READ; } if (runstate_check(RUN_STATE_INMIGRATE)) { bdrv_flags |= BDRV_O_INCOMING; } bdrv_flags |= ro ? 0 : BDRV_O_RDWR; QINCREF(bs_opts); ret = bdrv_open(dinfo->bdrv, file, bs_opts, bdrv_flags, drv, &error); if (ret < 0) { error_setg(errp, \"could not open disk image %s: %s\", file ?: dinfo->id, error_get_pretty(error)); error_free(error); goto err; } if (bdrv_key_required(dinfo->bdrv)) autostart = 0; QDECREF(bs_opts); qemu_opts_del(opts); return dinfo; err: qemu_opts_del(opts); QDECREF(bs_opts); bdrv_unref(dinfo->bdrv); g_free(dinfo->id); QTAILQ_REMOVE(&drives, dinfo, next); g_free(dinfo); return NULL; }"} {"target": 0, "idx": 27131, "func": "inline static void RENAME(hcscale)(uint16_t *dst, long dstWidth, uint8_t *src1, uint8_t *src2, int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter, int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode, int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter, int32_t *mmx2FilterPos) { if(srcFormat==IMGFMT_YUY2) { RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); src1= formatConvBuffer; src2= formatConvBuffer+2048; } else if(srcFormat==IMGFMT_UYVY) { RENAME(uyvyToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); src1= formatConvBuffer; src2= formatConvBuffer+2048; } else if(srcFormat==IMGFMT_BGR32) { RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); src1= formatConvBuffer; src2= formatConvBuffer+2048; } else if(srcFormat==IMGFMT_BGR24) { RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); src1= formatConvBuffer; src2= formatConvBuffer+2048; } else if(srcFormat==IMGFMT_BGR16) { RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); src1= formatConvBuffer; src2= formatConvBuffer+2048; } else if(srcFormat==IMGFMT_BGR15) { RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); src1= formatConvBuffer; src2= formatConvBuffer+2048; } else if(srcFormat==IMGFMT_RGB32) { RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); src1= formatConvBuffer; src2= formatConvBuffer+2048; } else if(srcFormat==IMGFMT_RGB24) { RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); src1= formatConvBuffer; src2= formatConvBuffer+2048; } else if(isGray(srcFormat)) { return; } #ifdef HAVE_MMX // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one) if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed)) #else if(!(flags&SWS_FAST_BILINEAR)) #endif { RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize); RENAME(hScale)(dst+2048, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize); } else // Fast Bilinear upscale / crap downscale { #if defined(ARCH_X86) || defined(ARCH_X86_64) #ifdef HAVE_MMX2 int i; if(canMMX2BeUsed) { asm volatile( \"pxor %%mm7, %%mm7 \\n\\t\" \"mov %0, %%\"REG_c\" \\n\\t\" \"mov %1, %%\"REG_D\" \\n\\t\" \"mov %2, %%\"REG_d\" \\n\\t\" \"mov %3, %%\"REG_b\" \\n\\t\" \"xor %%\"REG_a\", %%\"REG_a\" \\n\\t\" // i PREFETCH\" (%%\"REG_c\") \\n\\t\" PREFETCH\" 32(%%\"REG_c\") \\n\\t\" PREFETCH\" 64(%%\"REG_c\") \\n\\t\" #ifdef ARCH_X86_64 #define FUNNY_UV_CODE \\ \"movl (%%\"REG_b\"), %%esi \\n\\t\"\\ \"call *%4 \\n\\t\"\\ \"movl (%%\"REG_b\", %%\"REG_a\"), %%esi\\n\\t\"\\ \"add %%\"REG_S\", %%\"REG_c\" \\n\\t\"\\ \"add %%\"REG_a\", %%\"REG_D\" \\n\\t\"\\ \"xor %%\"REG_a\", %%\"REG_a\" \\n\\t\"\\ #else #define FUNNY_UV_CODE \\ \"movl (%%\"REG_b\"), %%esi \\n\\t\"\\ \"call *%4 \\n\\t\"\\ \"addl (%%\"REG_b\", %%\"REG_a\"), %%\"REG_c\"\\n\\t\"\\ \"add %%\"REG_a\", %%\"REG_D\" \\n\\t\"\\ \"xor %%\"REG_a\", %%\"REG_a\" \\n\\t\"\\ #endif FUNNY_UV_CODE FUNNY_UV_CODE FUNNY_UV_CODE FUNNY_UV_CODE \"xor %%\"REG_a\", %%\"REG_a\" \\n\\t\" // i \"mov %5, %%\"REG_c\" \\n\\t\" // src \"mov %1, %%\"REG_D\" \\n\\t\" // buf1 \"add $4096, %%\"REG_D\" \\n\\t\" PREFETCH\" (%%\"REG_c\") \\n\\t\" PREFETCH\" 32(%%\"REG_c\") \\n\\t\" PREFETCH\" 64(%%\"REG_c\") \\n\\t\" FUNNY_UV_CODE FUNNY_UV_CODE FUNNY_UV_CODE FUNNY_UV_CODE :: \"m\" (src1), \"m\" (dst), \"m\" (mmx2Filter), \"m\" (mmx2FilterPos), \"m\" (funnyUVCode), \"m\" (src2) : \"%\"REG_a, \"%\"REG_b, \"%\"REG_c, \"%\"REG_d, \"%\"REG_S, \"%\"REG_D ); for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) { // printf(\"%d %d %d\\n\", dstWidth, i, srcW); dst[i] = src1[srcW-1]*128; dst[i+2048] = src2[srcW-1]*128; } } else { #endif long xInc_shr16 = (long) (xInc >> 16); uint16_t xInc_mask = xInc & 0xffff; asm volatile( \"xor %%\"REG_a\", %%\"REG_a\" \\n\\t\" // i \"xor %%\"REG_b\", %%\"REG_b\" \\n\\t\" // xx \"xorl %%ecx, %%ecx \\n\\t\" // 2*xalpha ASMALIGN16 \"1: \\n\\t\" \"mov %0, %%\"REG_S\" \\n\\t\" \"movzbl (%%\"REG_S\", %%\"REG_b\"), %%edi \\n\\t\" //src[xx] \"movzbl 1(%%\"REG_S\", %%\"REG_b\"), %%esi \\n\\t\" //src[xx+1] \"subl %%edi, %%esi \\n\\t\" //src[xx+1] - src[xx] \"imull %%ecx, %%esi \\n\\t\" //(src[xx+1] - src[xx])*2*xalpha \"shll $16, %%edi \\n\\t\" \"addl %%edi, %%esi \\n\\t\" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) \"mov %1, %%\"REG_D\" \\n\\t\" \"shrl $9, %%esi \\n\\t\" \"movw %%si, (%%\"REG_D\", %%\"REG_a\", 2)\\n\\t\" \"movzbl (%5, %%\"REG_b\"), %%edi \\n\\t\" //src[xx] \"movzbl 1(%5, %%\"REG_b\"), %%esi \\n\\t\" //src[xx+1] \"subl %%edi, %%esi \\n\\t\" //src[xx+1] - src[xx] \"imull %%ecx, %%esi \\n\\t\" //(src[xx+1] - src[xx])*2*xalpha \"shll $16, %%edi \\n\\t\" \"addl %%edi, %%esi \\n\\t\" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) \"mov %1, %%\"REG_D\" \\n\\t\" \"shrl $9, %%esi \\n\\t\" \"movw %%si, 4096(%%\"REG_D\", %%\"REG_a\", 2)\\n\\t\" \"addw %4, %%cx \\n\\t\" //2*xalpha += xInc&0xFF \"adc %3, %%\"REG_b\" \\n\\t\" //xx+= xInc>>8 + carry \"add $1, %%\"REG_a\" \\n\\t\" \"cmp %2, %%\"REG_a\" \\n\\t\" \" jb 1b \\n\\t\" /* GCC-3.3 makes MPlayer crash on IA-32 machines when using \"g\" operand here, which is needed to support GCC-4.0 */ #if defined(ARCH_X86_64) && ((__GNUC__ > 3) || ( __GNUC__ == 3 && __GNUC_MINOR__ >= 4)) :: \"m\" (src1), \"m\" (dst), \"g\" ((long)dstWidth), \"m\" (xInc_shr16), \"m\" (xInc_mask), #else :: \"m\" (src1), \"m\" (dst), \"m\" ((long)dstWidth), \"m\" (xInc_shr16), \"m\" (xInc_mask), #endif \"r\" (src2) : \"%\"REG_a, \"%\"REG_b, \"%ecx\", \"%\"REG_D, \"%esi\" ); #ifdef HAVE_MMX2 } //if MMX2 can't be used #endif #else int i; unsigned int xpos=0; for(i=0;i>16; register unsigned int xalpha=(xpos&0xFFFF)>>9; dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha); dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha); /* slower dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha; dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha; */ xpos+=xInc; } #endif } }"} {"target": 1, "idx": 27135, "func": "static void dequantization_int_97(int x, int y, Jpeg2000Cblk *cblk, Jpeg2000Component *comp, Jpeg2000T1Context *t1, Jpeg2000Band *band) { int i, j; int w = cblk->coord[0][1] - cblk->coord[0][0]; for (j = 0; j < (cblk->coord[1][1] - cblk->coord[1][0]); ++j) { int32_t *datap = &comp->i_data[(comp->coord[0][1] - comp->coord[0][0]) * (y + j) + x]; int *src = t1->data[j]; for (i = 0; i < w; ++i) datap[i] = (src[i] * band->i_stepsize + (1<<14)) >> 15; } }"} {"target": 0, "idx": 27137, "func": "yuv2rgb_full_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha) { const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0]; int i; int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4; int err[4] = {0}; if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8) step = 1; if (uvalpha < 2048) { int A = 0; //init to silence warning for (i = 0; i < dstW; i++) { int Y = buf0[i] << 2; int U = (ubuf0[i] - (128<<7)) << 2; int V = (vbuf0[i] - (128<<7)) << 2; if (hasAlpha) { A = (abuf0[i] + 64) >> 7; if (A & 0x100) A = av_clip_uint8(A); } yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err); dest += step; } } else { const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1]; int A = 0; //init to silence warning for (i = 0; i < dstW; i++) { int Y = buf0[i] << 2; int U = (ubuf0[i] + ubuf1[i] - (128<<8)) << 1; int V = (vbuf0[i] + vbuf1[i] - (128<<8)) << 1; if (hasAlpha) { A = (abuf0[i] + 64) >> 7; if (A & 0x100) A = av_clip_uint8(A); } yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err); dest += step; } } c->dither_error[0][i] = err[0]; c->dither_error[1][i] = err[1]; c->dither_error[2][i] = err[2]; }"} {"target": 1, "idx": 27142, "func": "void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width, int height, int64_t pts) { int err = 0; ContextInfo *ci = (ContextInfo *) ctx; AVPicture picture1; AVPicture picture2; AVPicture *pict = picture; int out_width; int out_height; int i; uint8_t *ptr = NULL; FILE *in = rwpipe_reader( ci->rw ); FILE *out = rwpipe_writer( ci->rw ); /* Check that we have a pipe to talk to. */ if ( in == NULL || out == NULL ) err = 1; /* Convert to RGB24 if necessary */ if ( !err && pix_fmt != PIX_FMT_RGB24 ) { int size = avpicture_get_size(PIX_FMT_RGB24, width, height); if ( size != ci->size1 ) { av_free( ci->buf1 ); ci->buf1 = av_malloc(size); ci->size1 = size; err = ci->buf1 == NULL; } if ( !err ) { avpicture_fill(&picture1, ci->buf1, PIX_FMT_RGB24, width, height); // if we already got a SWS context, let's realloc if is not re-useable ci->toRGB_convert_ctx = sws_getCachedContext(ci->toRGB_convert_ctx, width, height, pix_fmt, width, height, PIX_FMT_RGB24, sws_flags, NULL, NULL, NULL); if (ci->toRGB_convert_ctx == NULL) { av_log(NULL, AV_LOG_ERROR, \"Cannot initialize the toRGB conversion context\\n\"); return; } // img_convert parameters are 2 first destination, then 4 source // sws_scale parameters are context, 4 first source, then 2 destination sws_scale(ci->toRGB_convert_ctx, picture->data, picture->linesize, 0, height, picture1.data, picture1.linesize); pict = &picture1; } } /* Write out the PPM */ if ( !err ) { ptr = pict->data[ 0 ]; fprintf( out, \"P6\\n%d %d\\n255\\n\", width, height ); for ( i = 0; !err && i < height; i ++ ) { err = !fwrite( ptr, width * 3, 1, out ); ptr += pict->linesize[ 0 ]; } if ( !err ) err = fflush( out ); } /* Read the PPM returned. */ if ( !err && !rwpipe_read_ppm_header( ci->rw, &out_width, &out_height ) ) { int size = avpicture_get_size(PIX_FMT_RGB24, out_width, out_height); if ( size != ci->size2 ) { av_free( ci->buf2 ); ci->buf2 = av_malloc(size); ci->size2 = size; err = ci->buf2 == NULL; } if ( !err ) { avpicture_fill(&picture2, ci->buf2, PIX_FMT_RGB24, out_width, out_height); ptr = picture2.data[ 0 ]; for ( i = 0; !err && i < out_height; i ++ ) { err = !fread( ptr, out_width * 3, 1, in ); ptr += picture2.linesize[ 0 ]; } } } /* Convert the returned PPM back to the input format */ if ( !err ) { /* The out_width/out_height returned from the PPM * filter won't necessarily be the same as width and height * but it will be scaled anyway to width/height. */ av_log(NULL, AV_LOG_DEBUG, \"PPM vhook: Input dimensions: %d x %d Output dimensions: %d x %d\\n\", width, height, out_width, out_height); ci->fromRGB_convert_ctx = sws_getCachedContext(ci->fromRGB_convert_ctx, out_width, out_height, PIX_FMT_RGB24, width, height, pix_fmt, sws_flags, NULL, NULL, NULL); if (ci->fromRGB_convert_ctx == NULL) { av_log(NULL, AV_LOG_ERROR, \"Cannot initialize the fromRGB conversion context\\n\"); return; } // img_convert parameters are 2 first destination, then 4 source // sws_scale parameters are context, 4 first source, then 2 destination sws_scale(ci->fromRGB_convert_ctx, picture2.data, picture2.linesize, 0, out_height, picture->data, picture->linesize); } }"} {"target": 0, "idx": 27164, "func": "static int v4l2_set_parameters(AVFormatContext *s1, AVFormatParameters *ap) { struct video_data *s = s1->priv_data; struct v4l2_input input; struct v4l2_standard standard; struct v4l2_streamparm streamparm = { 0 }; struct v4l2_fract *tpf = &streamparm.parm.capture.timeperframe; int i, ret; AVRational framerate_q; streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; if (s->framerate && (ret = av_parse_video_rate(&framerate_q, s->framerate)) < 0) { av_log(s1, AV_LOG_ERROR, \"Could not parse framerate '%s'.\\n\", s->framerate); return ret; } /* set tv video input */ memset (&input, 0, sizeof (input)); input.index = s->channel; if (ioctl(s->fd, VIDIOC_ENUMINPUT, &input) < 0) { av_log(s1, AV_LOG_ERROR, \"The V4L2 driver ioctl enum input failed:\\n\"); return AVERROR(EIO); } av_log(s1, AV_LOG_DEBUG, \"The V4L2 driver set input_id: %d, input: %s\\n\", s->channel, input.name); if (ioctl(s->fd, VIDIOC_S_INPUT, &input.index) < 0) { av_log(s1, AV_LOG_ERROR, \"The V4L2 driver ioctl set input(%d) failed\\n\", s->channel); return AVERROR(EIO); } if (s->standard) { av_log(s1, AV_LOG_DEBUG, \"The V4L2 driver set standard: %s\\n\", s->standard); /* set tv standard */ memset (&standard, 0, sizeof (standard)); for(i=0;;i++) { standard.index = i; if (ioctl(s->fd, VIDIOC_ENUMSTD, &standard) < 0) { av_log(s1, AV_LOG_ERROR, \"The V4L2 driver ioctl set standard(%s) failed\\n\", s->standard); return AVERROR(EIO); } if (!av_strcasecmp(standard.name, s->standard)) { break; } } av_log(s1, AV_LOG_DEBUG, \"The V4L2 driver set standard: %s, id: %\"PRIu64\"\\n\", s->standard, (uint64_t)standard.id); if (ioctl(s->fd, VIDIOC_S_STD, &standard.id) < 0) { av_log(s1, AV_LOG_ERROR, \"The V4L2 driver ioctl set standard(%s) failed\\n\", s->standard); return AVERROR(EIO); } } if (framerate_q.num && framerate_q.den) { av_log(s1, AV_LOG_DEBUG, \"Setting time per frame to %d/%d\\n\", framerate_q.den, framerate_q.num); tpf->numerator = framerate_q.den; tpf->denominator = framerate_q.num; if (ioctl(s->fd, VIDIOC_S_PARM, &streamparm) != 0) { av_log(s1, AV_LOG_ERROR, \"ioctl set time per frame(%d/%d) failed\\n\", framerate_q.den, framerate_q.num); return AVERROR(EIO); } if (framerate_q.num != tpf->denominator || framerate_q.den != tpf->numerator) { av_log(s1, AV_LOG_INFO, \"The driver changed the time per frame from \" \"%d/%d to %d/%d\\n\", framerate_q.den, framerate_q.num, tpf->numerator, tpf->denominator); } } else { if (ioctl(s->fd, VIDIOC_G_PARM, &streamparm) != 0) { av_log(s1, AV_LOG_ERROR, \"ioctl(VIDIOC_G_PARM): %s\\n\", strerror(errno)); return AVERROR(errno); } } s1->streams[0]->codec->time_base.den = tpf->denominator; s1->streams[0]->codec->time_base.num = tpf->numerator; s->timeout = 100 + av_rescale_q(1, s1->streams[0]->codec->time_base, (AVRational){1, 1000}); return 0; }"} {"target": 0, "idx": 27183, "func": "static int mpegts_read_packet(AVFormatContext *s, AVPacket *pkt) { MpegTSContext *ts = s->priv_data; if (!ts->mpeg2ts_raw) { ts->pkt = pkt; return handle_packets(ts, 0); } else { return mpegts_raw_read_packet(s, pkt); } }"} {"target": 0, "idx": 27187, "func": "static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr, int selector) { SegmentCache *dt; int index; uint8_t *ptr; if (selector & 0x4) dt = &env->ldt; else dt = &env->gdt; index = selector & ~7; if ((index + 7) > dt->limit) return -1; ptr = dt->base + index; *e1_ptr = ldl_kernel(ptr); *e2_ptr = ldl_kernel(ptr + 4); return 0; }"} {"target": 0, "idx": 27194, "func": "static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref) { AVFilterContext *ctx = link->dst; YADIFContext *yadif = ctx->priv; if (yadif->frame_pending) return_frame(ctx, 1); if (yadif->prev) avfilter_unref_buffer(yadif->prev); yadif->prev = yadif->cur; yadif->cur = yadif->next; yadif->next = picref; if (!yadif->cur) return 0; if (yadif->auto_enable && !yadif->cur->video->interlaced) { yadif->out = avfilter_ref_buffer(yadif->cur, AV_PERM_READ); avfilter_unref_bufferp(&yadif->prev); if (yadif->out->pts != AV_NOPTS_VALUE) yadif->out->pts *= 2; return ff_start_frame(ctx->outputs[0], yadif->out); } if (!yadif->prev) yadif->prev = avfilter_ref_buffer(yadif->cur, AV_PERM_READ); yadif->out = ff_get_video_buffer(ctx->outputs[0], AV_PERM_WRITE | AV_PERM_PRESERVE | AV_PERM_REUSE, link->w, link->h); avfilter_copy_buffer_ref_props(yadif->out, yadif->cur); yadif->out->video->interlaced = 0; if (yadif->out->pts != AV_NOPTS_VALUE) yadif->out->pts *= 2; return ff_start_frame(ctx->outputs[0], yadif->out); }"} {"target": 0, "idx": 27199, "func": "static void av_always_inline filter_mb_edgeh( uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, H264Context *h, int intra ) { const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); const unsigned int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset; const int alpha = alpha_table[index_a]; const int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset]; if (alpha ==0 || beta == 0) return; if( bS[0] < 4 || !intra ) { int8_t tc[4]; tc[0] = tc0_table[index_a][bS[0]]; tc[1] = tc0_table[index_a][bS[1]]; tc[2] = tc0_table[index_a][bS[2]]; tc[3] = tc0_table[index_a][bS[3]]; h->h264dsp.h264_v_loop_filter_luma(pix, stride, alpha, beta, tc); } else { h->h264dsp.h264_v_loop_filter_luma_intra(pix, stride, alpha, beta); } }"} {"target": 1, "idx": 27216, "func": "static void calc_thr_3gpp(const FFPsyWindowInfo *wi, const int num_bands, AacPsyChannel *pch, const uint8_t *band_sizes, const float *coefs) { int i, w, g; int start = 0; for (w = 0; w < wi->num_windows*16; w += 16) { for (g = 0; g < num_bands; g++) { AacPsyBand *band = &pch->band[w+g]; float form_factor = 0.0f; float Temp; band->energy = 0.0f; for (i = 0; i < band_sizes[g]; i++) { band->energy += coefs[start+i] * coefs[start+i]; form_factor += sqrtf(fabs(coefs[start+i])); } Temp = band->energy > 0 ? sqrtf((float)band_sizes[g] / band->energy) : 0; band->thr = band->energy * 0.001258925f; band->nz_lines = form_factor * sqrtf(Temp); start += band_sizes[g]; } } }"} {"target": 0, "idx": 27242, "func": "void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque) { QEMUBH *bh; bh = g_new(QEMUBH, 1); *bh = (QEMUBH){ .ctx = ctx, .cb = cb, .opaque = opaque, }; qemu_lockcnt_lock(&ctx->list_lock); bh->next = ctx->first_bh; bh->scheduled = 1; bh->deleted = 1; /* Make sure that the members are ready before putting bh into list */ smp_wmb(); ctx->first_bh = bh; qemu_lockcnt_unlock(&ctx->list_lock); aio_notify(ctx); }"} {"target": 0, "idx": 27247, "func": "static int32_t virtio_net_flush_tx(VirtIONetQueue *q) { VirtIONet *n = q->n; VirtIODevice *vdev = VIRTIO_DEVICE(n); VirtQueueElement elem; int32_t num_packets = 0; int queue_index = vq2q(virtio_get_queue_index(q->tx_vq)); if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { return num_packets; } if (q->async_tx.elem.out_num) { virtio_queue_set_notification(q->tx_vq, 0); return num_packets; } while (virtqueue_pop(q->tx_vq, &elem)) { ssize_t ret; unsigned int out_num = elem.out_num; struct iovec *out_sg = &elem.out_sg[0]; struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1]; struct virtio_net_hdr_mrg_rxbuf mhdr; if (out_num < 1) { error_report(\"virtio-net header not in first element\"); exit(1); } if (n->has_vnet_hdr) { if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) < n->guest_hdr_len) { error_report(\"virtio-net header incorrect\"); exit(1); } if (virtio_needs_swap(vdev)) { virtio_net_hdr_swap(vdev, (void *) &mhdr); sg2[0].iov_base = &mhdr; sg2[0].iov_len = n->guest_hdr_len; out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1, out_sg, out_num, n->guest_hdr_len, -1); if (out_num == VIRTQUEUE_MAX_SIZE) { goto drop; } out_num += 1; out_sg = sg2; } } /* * If host wants to see the guest header as is, we can * pass it on unchanged. Otherwise, copy just the parts * that host is interested in. */ assert(n->host_hdr_len <= n->guest_hdr_len); if (n->host_hdr_len != n->guest_hdr_len) { unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg), out_sg, out_num, 0, n->host_hdr_len); sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num, out_sg, out_num, n->guest_hdr_len, -1); out_num = sg_num; out_sg = sg; } ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index), out_sg, out_num, virtio_net_tx_complete); if (ret == 0) { virtio_queue_set_notification(q->tx_vq, 0); q->async_tx.elem = elem; return -EBUSY; } drop: virtqueue_push(q->tx_vq, &elem, 0); virtio_notify(vdev, q->tx_vq); if (++num_packets >= n->tx_burst) { break; } } return num_packets; }"} {"target": 1, "idx": 27259, "func": "static void sdhci_data_transfer(void *opaque) { SDHCIState *s = (SDHCIState *)opaque; if (s->trnmod & SDHC_TRNS_DMA) { switch (SDHC_DMA_TYPE(s->hostctl)) { case SDHC_CTRL_SDMA: if ((s->blkcnt == 1) || !(s->trnmod & SDHC_TRNS_MULTI)) { sdhci_sdma_transfer_single_block(s); } else { sdhci_sdma_transfer_multi_blocks(s); } break; case SDHC_CTRL_ADMA1_32: if (!(s->capareg & SDHC_CAN_DO_ADMA1)) { ERRPRINT(\"ADMA1 not supported\\n\"); break; } sdhci_do_adma(s); break; case SDHC_CTRL_ADMA2_32: if (!(s->capareg & SDHC_CAN_DO_ADMA2)) { ERRPRINT(\"ADMA2 not supported\\n\"); break; } sdhci_do_adma(s); break; case SDHC_CTRL_ADMA2_64: if (!(s->capareg & SDHC_CAN_DO_ADMA2) || !(s->capareg & SDHC_64_BIT_BUS_SUPPORT)) { ERRPRINT(\"64 bit ADMA not supported\\n\"); break; } sdhci_do_adma(s); break; default: ERRPRINT(\"Unsupported DMA type\\n\"); break; } } else { if ((s->trnmod & SDHC_TRNS_READ) && sdbus_data_ready(&s->sdbus)) { s->prnsts |= SDHC_DOING_READ | SDHC_DATA_INHIBIT | SDHC_DAT_LINE_ACTIVE; sdhci_read_block_from_card(s); } else { s->prnsts |= SDHC_DOING_WRITE | SDHC_DAT_LINE_ACTIVE | SDHC_SPACE_AVAILABLE | SDHC_DATA_INHIBIT; sdhci_write_block_to_card(s); } } }"} {"target": 0, "idx": 27264, "func": "void cpu_dump_state(CPUState *env, FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...), int flags) { int eflags, i, nb; char cc_op_name[32]; static const char *seg_name[6] = { \"ES\", \"CS\", \"SS\", \"DS\", \"FS\", \"GS\" }; if (kvm_enabled()) kvm_arch_get_registers(env); eflags = env->eflags; #ifdef TARGET_X86_64 if (env->hflags & HF_CS64_MASK) { cpu_fprintf(f, \"RAX=%016\" PRIx64 \" RBX=%016\" PRIx64 \" RCX=%016\" PRIx64 \" RDX=%016\" PRIx64 \"\\n\" \"RSI=%016\" PRIx64 \" RDI=%016\" PRIx64 \" RBP=%016\" PRIx64 \" RSP=%016\" PRIx64 \"\\n\" \"R8 =%016\" PRIx64 \" R9 =%016\" PRIx64 \" R10=%016\" PRIx64 \" R11=%016\" PRIx64 \"\\n\" \"R12=%016\" PRIx64 \" R13=%016\" PRIx64 \" R14=%016\" PRIx64 \" R15=%016\" PRIx64 \"\\n\" \"RIP=%016\" PRIx64 \" RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\\n\", env->regs[R_EAX], env->regs[R_EBX], env->regs[R_ECX], env->regs[R_EDX], env->regs[R_ESI], env->regs[R_EDI], env->regs[R_EBP], env->regs[R_ESP], env->regs[8], env->regs[9], env->regs[10], env->regs[11], env->regs[12], env->regs[13], env->regs[14], env->regs[15], env->eip, eflags, eflags & DF_MASK ? 'D' : '-', eflags & CC_O ? 'O' : '-', eflags & CC_S ? 'S' : '-', eflags & CC_Z ? 'Z' : '-', eflags & CC_A ? 'A' : '-', eflags & CC_P ? 'P' : '-', eflags & CC_C ? 'C' : '-', env->hflags & HF_CPL_MASK, (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1, (int)(env->a20_mask >> 20) & 1, (env->hflags >> HF_SMM_SHIFT) & 1, env->halted); } else #endif { cpu_fprintf(f, \"EAX=%08x EBX=%08x ECX=%08x EDX=%08x\\n\" \"ESI=%08x EDI=%08x EBP=%08x ESP=%08x\\n\" \"EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\\n\", (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_EBX], (uint32_t)env->regs[R_ECX], (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI], (uint32_t)env->regs[R_EBP], (uint32_t)env->regs[R_ESP], (uint32_t)env->eip, eflags, eflags & DF_MASK ? 'D' : '-', eflags & CC_O ? 'O' : '-', eflags & CC_S ? 'S' : '-', eflags & CC_Z ? 'Z' : '-', eflags & CC_A ? 'A' : '-', eflags & CC_P ? 'P' : '-', eflags & CC_C ? 'C' : '-', env->hflags & HF_CPL_MASK, (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1, (int)(env->a20_mask >> 20) & 1, (env->hflags >> HF_SMM_SHIFT) & 1, env->halted); } for(i = 0; i < 6; i++) { cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i], &env->segs[i]); } cpu_x86_dump_seg_cache(env, f, cpu_fprintf, \"LDT\", &env->ldt); cpu_x86_dump_seg_cache(env, f, cpu_fprintf, \"TR\", &env->tr); #ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) { cpu_fprintf(f, \"GDT= %016\" PRIx64 \" %08x\\n\", env->gdt.base, env->gdt.limit); cpu_fprintf(f, \"IDT= %016\" PRIx64 \" %08x\\n\", env->idt.base, env->idt.limit); cpu_fprintf(f, \"CR0=%08x CR2=%016\" PRIx64 \" CR3=%016\" PRIx64 \" CR4=%08x\\n\", (uint32_t)env->cr[0], env->cr[2], env->cr[3], (uint32_t)env->cr[4]); for(i = 0; i < 4; i++) cpu_fprintf(f, \"DR%d=%016\" PRIx64 \" \", i, env->dr[i]); cpu_fprintf(f, \"\\nDR6=%016\" PRIx64 \" DR7=%016\" PRIx64 \"\\n\", env->dr[6], env->dr[7]); } else #endif { cpu_fprintf(f, \"GDT= %08x %08x\\n\", (uint32_t)env->gdt.base, env->gdt.limit); cpu_fprintf(f, \"IDT= %08x %08x\\n\", (uint32_t)env->idt.base, env->idt.limit); cpu_fprintf(f, \"CR0=%08x CR2=%08x CR3=%08x CR4=%08x\\n\", (uint32_t)env->cr[0], (uint32_t)env->cr[2], (uint32_t)env->cr[3], (uint32_t)env->cr[4]); for(i = 0; i < 4; i++) cpu_fprintf(f, \"DR%d=%08x \", i, env->dr[i]); cpu_fprintf(f, \"\\nDR6=%08x DR7=%08x\\n\", env->dr[6], env->dr[7]); } if (flags & X86_DUMP_CCOP) { if ((unsigned)env->cc_op < CC_OP_NB) snprintf(cc_op_name, sizeof(cc_op_name), \"%s\", cc_op_str[env->cc_op]); else snprintf(cc_op_name, sizeof(cc_op_name), \"[%d]\", env->cc_op); #ifdef TARGET_X86_64 if (env->hflags & HF_CS64_MASK) { cpu_fprintf(f, \"CCS=%016\" PRIx64 \" CCD=%016\" PRIx64 \" CCO=%-8s\\n\", env->cc_src, env->cc_dst, cc_op_name); } else #endif { cpu_fprintf(f, \"CCS=%08x CCD=%08x CCO=%-8s\\n\", (uint32_t)env->cc_src, (uint32_t)env->cc_dst, cc_op_name); } } if (flags & X86_DUMP_FPU) { int fptag; fptag = 0; for(i = 0; i < 8; i++) { fptag |= ((!env->fptags[i]) << i); } cpu_fprintf(f, \"FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\\n\", env->fpuc, (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11, env->fpstt, fptag, env->mxcsr); for(i=0;i<8;i++) { #if defined(USE_X86LDOUBLE) union { long double d; struct { uint64_t lower; uint16_t upper; } l; } tmp; tmp.d = env->fpregs[i].d; cpu_fprintf(f, \"FPR%d=%016\" PRIx64 \" %04x\", i, tmp.l.lower, tmp.l.upper); #else cpu_fprintf(f, \"FPR%d=%016\" PRIx64, i, env->fpregs[i].mmx.q); #endif if ((i & 1) == 1) cpu_fprintf(f, \"\\n\"); else cpu_fprintf(f, \" \"); } if (env->hflags & HF_CS64_MASK) nb = 16; else nb = 8; for(i=0;ixmm_regs[i].XMM_L(3), env->xmm_regs[i].XMM_L(2), env->xmm_regs[i].XMM_L(1), env->xmm_regs[i].XMM_L(0)); if ((i & 1) == 1) cpu_fprintf(f, \"\\n\"); else cpu_fprintf(f, \" \"); } } }"} {"target": 0, "idx": 27269, "func": "static void rtas_start_cpu(PowerPCCPU *cpu_, sPAPRMachineState *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { target_ulong id, start, r3; PowerPCCPU *cpu; if (nargs != 3 || nret != 1) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; } id = rtas_ld(args, 0); start = rtas_ld(args, 1); r3 = rtas_ld(args, 2); cpu = spapr_find_cpu(id); if (cpu != NULL) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); Error *local_err = NULL; if (!cs->halted) { rtas_st(rets, 0, RTAS_OUT_HW_ERROR); return; } /* This will make sure qemu state is up to date with kvm, and * mark it dirty so our changes get flushed back before the * new cpu enters */ kvm_cpu_synchronize_state(cs); /* Set compatibility mode to match existing cpus */ ppc_set_compat(cpu, POWERPC_CPU(first_cpu)->compat_pvr, &local_err); if (local_err) { error_report_err(local_err); rtas_st(rets, 0, RTAS_OUT_HW_ERROR); return; } env->msr = (1ULL << MSR_SF) | (1ULL << MSR_ME); /* Enable Power-saving mode Exit Cause exceptions for the new CPU */ env->spr[SPR_LPCR] |= pcc->lpcr_pm; env->nip = start; env->gpr[3] = r3; cs->halted = 0; spapr_cpu_set_endianness(cpu); spapr_cpu_update_tb_offset(cpu); qemu_cpu_kick(cs); rtas_st(rets, 0, RTAS_OUT_SUCCESS); return; } /* Didn't find a matching cpu */ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); }"} {"target": 0, "idx": 27275, "func": "void net_slirp_redir(Monitor *mon, const char *redir_str, const char *redir_opt2) { struct slirp_config_str *config; if (!slirp_inited) { if (mon) { monitor_printf(mon, \"user mode network stack not in use\\n\"); } else { config = qemu_malloc(sizeof(*config)); config->str = redir_str; config->next = slirp_redirs; slirp_redirs = config; } return; } if (!strcmp(redir_str, \"remove\")) { net_slirp_redir_rm(mon, redir_opt2); return; } slirp_redirection(mon, redir_str); }"} {"target": 0, "idx": 27276, "func": "int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, int mmu_idx) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; ppc_slb_t *slb; unsigned apshift; hwaddr ptex; ppc_hash_pte64_t pte; int pp_prot, amr_prot, prot; uint64_t new_pte1, dsisr; const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC}; hwaddr raddr; assert((rwx == 0) || (rwx == 1) || (rwx == 2)); /* Note on LPCR usage: 970 uses HID4, but our special variant * of store_spr copies relevant fields into env->spr[SPR_LPCR]. * Similarily we filter unimplemented bits when storing into * LPCR depending on the MMU version. This code can thus just * use the LPCR \"as-is\". */ /* 1. Handle real mode accesses */ if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { /* Translation is supposedly \"off\" */ /* In real mode the top 4 effective address bits are (mostly) ignored */ raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; /* In HV mode, add HRMOR if top EA bit is clear */ if (msr_hv || !env->has_hv_mode) { if (!(eaddr >> 63)) { raddr |= env->spr[SPR_HRMOR]; } } else { /* Otherwise, check VPM for RMA vs VRMA */ if (env->spr[SPR_LPCR] & LPCR_VPM0) { slb = &env->vrma_slb; if (slb->sps) { goto skip_slb_search; } /* Not much else to do here */ cs->exception_index = POWERPC_EXCP_MCHECK; env->error_code = 0; return 1; } else if (raddr < env->rmls) { /* RMA. Check bounds in RMLS */ raddr |= env->spr[SPR_RMOR]; } else { /* The access failed, generate the approriate interrupt */ if (rwx == 2) { ppc_hash64_set_isi(cs, env, 0x08000000); } else { dsisr = 0x08000000; if (rwx == 1) { dsisr |= 0x02000000; } ppc_hash64_set_dsi(cs, env, eaddr, dsisr); } return 1; } } tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, TARGET_PAGE_SIZE); return 0; } /* 2. Translation is on, so look up the SLB */ slb = slb_lookup(cpu, eaddr); if (!slb) { /* No entry found, check if in-memory segment tables are in use */ if ((env->mmu_model & POWERPC_MMU_V3) && ppc64_use_proc_tbl(cpu)) { /* TODO - Unsupported */ error_report(\"Segment Table Support Unimplemented\"); exit(1); } /* Segment still not found, generate the appropriate interrupt */ if (rwx == 2) { cs->exception_index = POWERPC_EXCP_ISEG; env->error_code = 0; } else { cs->exception_index = POWERPC_EXCP_DSEG; env->error_code = 0; env->spr[SPR_DAR] = eaddr; } return 1; } skip_slb_search: /* 3. Check for segment level no-execute violation */ if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) { ppc_hash64_set_isi(cs, env, 0x10000000); return 1; } /* 4. Locate the PTE in the hash table */ ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift); if (ptex == -1) { dsisr = 0x40000000; if (rwx == 2) { ppc_hash64_set_isi(cs, env, dsisr); } else { if (rwx == 1) { dsisr |= 0x02000000; } ppc_hash64_set_dsi(cs, env, eaddr, dsisr); } return 1; } qemu_log_mask(CPU_LOG_MMU, \"found PTE at index %08\" HWADDR_PRIx \"\\n\", ptex); /* 5. Check access permissions */ pp_prot = ppc_hash64_pte_prot(cpu, slb, pte); amr_prot = ppc_hash64_amr_prot(cpu, pte); prot = pp_prot & amr_prot; if ((need_prot[rwx] & ~prot) != 0) { /* Access right violation */ qemu_log_mask(CPU_LOG_MMU, \"PTE access rejected\\n\"); if (rwx == 2) { ppc_hash64_set_isi(cs, env, 0x08000000); } else { dsisr = 0; if (need_prot[rwx] & ~pp_prot) { dsisr |= 0x08000000; } if (rwx == 1) { dsisr |= 0x02000000; } if (need_prot[rwx] & ~amr_prot) { dsisr |= 0x00200000; } ppc_hash64_set_dsi(cs, env, eaddr, dsisr); } return 1; } qemu_log_mask(CPU_LOG_MMU, \"PTE access granted !\\n\"); /* 6. Update PTE referenced and changed bits if necessary */ new_pte1 = pte.pte1 | HPTE64_R_R; /* set referenced bit */ if (rwx == 1) { new_pte1 |= HPTE64_R_C; /* set changed (dirty) bit */ } else { /* Treat the page as read-only for now, so that a later write * will pass through this function again to set the C bit */ prot &= ~PAGE_WRITE; } if (new_pte1 != pte.pte1) { ppc_hash64_store_hpte(cpu, ptex, pte.pte0, new_pte1); } /* 7. Determine the real address from the PTE */ raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr); tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, prot, mmu_idx, 1ULL << apshift); return 0; }"} {"target": 1, "idx": 27290, "func": "static void qxl_reset_surfaces(PCIQXLDevice *d) { dprint(d, 1, \"%s:\\n\", __FUNCTION__); d->mode = QXL_MODE_UNDEFINED; qemu_mutex_unlock_iothread(); d->ssd.worker->destroy_surfaces(d->ssd.worker); qemu_mutex_lock_iothread(); memset(&d->guest_surfaces.cmds, 0, sizeof(d->guest_surfaces.cmds)); }"}