target
int64
0
1
func
stringlengths
7
484k
func_no_comments
stringlengths
7
484k
idx
int64
1
368k
1
static void search_for_quantizers_anmr(AVCodecContext *avctx, AACEncContext *s, SingleChannelElement *sce, const float lambda) { int q, w, w2, g, start = 0; int i, j; int idx; TrellisPath paths[TRELLIS_STAGES][TRELLIS_STATES]; int bandaddr[TRELLIS_STAGES]; int minq; float mincost; float q0f = FLT_MAX, q1f = 0.0f, qnrgf = 0.0f; int q0, q1, qcnt = 0; for (i = 0; i < 1024; i++) { float t = fabsf(sce->coeffs[i]); if (t > 0.0f) { q0f = FFMIN(q0f, t); q1f = FFMAX(q1f, t); qnrgf += t*t; qcnt++; } } if (!qcnt) { memset(sce->sf_idx, 0, sizeof(sce->sf_idx)); memset(sce->zeroes, 1, sizeof(sce->zeroes)); return; } //minimum scalefactor index is when minimum nonzero coefficient after quantizing is not clipped q0 = coef2minsf(q0f); //maximum scalefactor index is when maximum coefficient after quantizing is still not zero q1 = coef2maxsf(q1f); if (q1 - q0 > 60) { int q0low = q0; int q1high = q1; //minimum scalefactor index is when maximum nonzero coefficient after quantizing is not clipped int qnrg = av_clip_uint8(log2f(sqrtf(qnrgf/qcnt))*4 - 31 + SCALE_ONE_POS - SCALE_DIV_512); q1 = qnrg + 30; q0 = qnrg - 30; if (q0 < q0low) { q1 += q0low - q0; q0 = q0low; } else if (q1 > q1high) { q0 -= q1 - q1high; q1 = q1high; } } for (i = 0; i < TRELLIS_STATES; i++) { paths[0][i].cost = 0.0f; paths[0][i].prev = -1; } for (j = 1; j < TRELLIS_STAGES; j++) { for (i = 0; i < TRELLIS_STATES; i++) { paths[j][i].cost = INFINITY; paths[j][i].prev = -2; } } idx = 1; abs_pow34_v(s->scoefs, sce->coeffs, 1024); for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { start = w*128; for (g = 0; g < sce->ics.num_swb; g++) { const float *coefs = &sce->coeffs[start]; float qmin, qmax; int nz = 0; bandaddr[idx] = w * 16 + g; qmin = INT_MAX; qmax = 0.0f; for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { FFPsyBand *band = &s->psy.ch[s->cur_channel].psy_bands[(w+w2)*16+g]; if (band->energy <= band->threshold || band->threshold == 0.0f) { sce->zeroes[(w+w2)*16+g] = 1; continue; } sce->zeroes[(w+w2)*16+g] = 0; nz = 1; for (i = 0; i < sce->ics.swb_sizes[g]; i++) { float t = fabsf(coefs[w2*128+i]); if (t > 0.0f) qmin = FFMIN(qmin, t); qmax = FFMAX(qmax, t); } } if (nz) { int minscale, maxscale; float minrd = INFINITY; float maxval; //minimum scalefactor index is when minimum nonzero coefficient after quantizing is not clipped minscale = coef2minsf(qmin); //maximum scalefactor index is when maximum coefficient after quantizing is still not zero maxscale = coef2maxsf(qmax); minscale = av_clip(minscale - q0, 0, TRELLIS_STATES - 1); maxscale = av_clip(maxscale - q0, 0, TRELLIS_STATES); maxval = find_max_val(sce->ics.group_len[w], sce->ics.swb_sizes[g], s->scoefs+start); for (q = minscale; q < maxscale; q++) { float dist = 0; int cb = find_min_book(maxval, sce->sf_idx[w*16+g]); for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { FFPsyBand *band = &s->psy.ch[s->cur_channel].psy_bands[(w+w2)*16+g]; dist += quantize_band_cost(s, coefs + w2*128, s->scoefs + start + w2*128, sce->ics.swb_sizes[g], q + q0, cb, lambda / band->threshold, INFINITY, NULL, NULL, 0); } minrd = FFMIN(minrd, dist); for (i = 0; i < q1 - q0; i++) { float cost; cost = paths[idx - 1][i].cost + dist + ff_aac_scalefactor_bits[q - i + SCALE_DIFF_ZERO]; if (cost < paths[idx][q].cost) { paths[idx][q].cost = cost; paths[idx][q].prev = i; } } } } else { for (q = 0; q < q1 - q0; q++) { paths[idx][q].cost = paths[idx - 1][q].cost + 1; paths[idx][q].prev = q; } } sce->zeroes[w*16+g] = !nz; start += sce->ics.swb_sizes[g]; idx++; } } idx--; mincost = paths[idx][0].cost; minq = 0; for (i = 1; i < TRELLIS_STATES; i++) { if (paths[idx][i].cost < mincost) { mincost = paths[idx][i].cost; minq = i; } } while (idx) { sce->sf_idx[bandaddr[idx]] = minq + q0; minq = paths[idx][minq].prev; idx--; } //set the same quantizers inside window groups for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) for (g = 0; g < sce->ics.num_swb; g++) for (w2 = 1; w2 < sce->ics.group_len[w]; w2++) sce->sf_idx[(w+w2)*16+g] = sce->sf_idx[w*16+g]; }
static void search_for_quantizers_anmr(AVCodecContext *avctx, AACEncContext *s, SingleChannelElement *sce, const float lambda) { int q, w, w2, g, start = 0; int i, j; int idx; TrellisPath paths[TRELLIS_STAGES][TRELLIS_STATES]; int bandaddr[TRELLIS_STAGES]; int minq; float mincost; float q0f = FLT_MAX, q1f = 0.0f, qnrgf = 0.0f; int q0, q1, qcnt = 0; for (i = 0; i < 1024; i++) { float t = fabsf(sce->coeffs[i]); if (t > 0.0f) { q0f = FFMIN(q0f, t); q1f = FFMAX(q1f, t); qnrgf += t*t; qcnt++; } } if (!qcnt) { memset(sce->sf_idx, 0, sizeof(sce->sf_idx)); memset(sce->zeroes, 1, sizeof(sce->zeroes)); return; }
892
0
CURLcode Curl_http_done ( struct connectdata * conn , CURLcode status , bool premature ) { struct Curl_easy * data = conn -> data ; struct HTTP * http = data -> req . protop ; infof ( data , "Curl_http_done: called premature == %d\n" , premature ) ; Curl_unencode_cleanup ( conn ) ; # ifdef USE_SPNEGO if ( data -> state . proxyneg . state == GSS_AUTHSENT || data -> state . negotiate . state == GSS_AUTHSENT ) { if ( ( data -> req . httpcode != 401 ) && ( data -> req . httpcode != 407 ) && ! data -> set . connect_only ) streamclose ( conn , "Negotiate transfer completed" ) ; Curl_cleanup_negotiate ( data ) ; } # endif conn -> seek_func = data -> set . seek_func ; conn -> seek_client = data -> set . seek_client ; if ( ! http ) return CURLE_OK ; if ( http -> send_buffer ) { Curl_add_buffer_free ( http -> send_buffer ) ; http -> send_buffer = NULL ; } Curl_http2_done ( conn , premature ) ; if ( HTTPREQ_POST_FORM == data -> set . httpreq ) { data -> req . bytecount = http -> readbytecount + http -> writebytecount ; Curl_formclean ( & http -> sendit ) ; if ( http -> form . fp ) { fclose ( http -> form . fp ) ; http -> form . fp = NULL ; } } else if ( HTTPREQ_PUT == data -> set . httpreq ) data -> req . bytecount = http -> readbytecount + http -> writebytecount ; if ( status ) return status ; if ( ! premature && ! conn -> bits . retry && ! data -> set . connect_only && ( http -> readbytecount + data -> req . headerbytecount - data -> req . deductheadercount ) <= 0 ) { failf ( data , "Empty reply from server" ) ; return CURLE_GOT_NOTHING ; } return CURLE_OK ; }
CURLcode Curl_http_done ( struct connectdata * conn , CURLcode status , bool premature ) { struct Curl_easy * data = conn -> data ; struct HTTP * http = data -> req . protop ; infof ( data , "Curl_http_done: called premature == %d\n" , premature ) ; Curl_unencode_cleanup ( conn ) ; # ifdef USE_SPNEGO if ( data -> state . proxyneg . state == GSS_AUTHSENT || data -> state . negotiate . state == GSS_AUTHSENT ) { if ( ( data -> req . httpcode != 401 ) && ( data -> req . httpcode != 407 ) && ! data -> set . connect_only ) streamclose ( conn , "Negotiate transfer completed" ) ; Curl_cleanup_negotiate ( data ) ; } # endif conn -> seek_func = data -> set . seek_func ; conn -> seek_client = data -> set . seek_client ; if ( ! http ) return CURLE_OK ; if ( http -> send_buffer ) { Curl_add_buffer_free ( http -> send_buffer ) ; http -> send_buffer = NULL ; } Curl_http2_done ( conn , premature ) ; if ( HTTPREQ_POST_FORM == data -> set . httpreq ) { data -> req . bytecount = http -> readbytecount + http -> writebytecount ; Curl_formclean ( & http -> sendit ) ; if ( http -> form . fp ) { fclose ( http -> form . fp ) ; http -> form . fp = NULL ; } } else if ( HTTPREQ_PUT == data -> set . httpreq ) data -> req . bytecount = http -> readbytecount + http -> writebytecount ; if ( status ) return status ; if ( ! premature && ! conn -> bits . retry && ! data -> set . connect_only && ( http -> readbytecount + data -> req . headerbytecount - data -> req . deductheadercount ) <= 0 ) { failf ( data , "Empty reply from server" ) ; return CURLE_GOT_NOTHING ; } return CURLE_OK ; }
893
1
QDict *qdict_get_qdict(const QDict *qdict, const char *key) { return qobject_to_qdict(qdict_get_obj(qdict, key, QTYPE_QDICT)); }
QDict *qdict_get_qdict(const QDict *qdict, const char *key) { return qobject_to_qdict(qdict_get_obj(qdict, key, QTYPE_QDICT)); }
894
1
gss_accept_sec_context (minor_status, context_handle, verifier_cred_handle, input_token_buffer, input_chan_bindings, src_name, mech_type, output_token, ret_flags, time_rec, d_cred) OM_uint32 * minor_status; gss_ctx_id_t * context_handle; gss_cred_id_t verifier_cred_handle; gss_buffer_t input_token_buffer; gss_channel_bindings_t input_chan_bindings; gss_name_t * src_name; gss_OID * mech_type; gss_buffer_t output_token; OM_uint32 * ret_flags; OM_uint32 * time_rec; gss_cred_id_t * d_cred; { OM_uint32 status, temp_status, temp_minor_status; OM_uint32 temp_ret_flags = 0; gss_union_ctx_id_t union_ctx_id = NULL; gss_cred_id_t input_cred_handle = GSS_C_NO_CREDENTIAL; gss_cred_id_t tmp_d_cred = GSS_C_NO_CREDENTIAL; gss_name_t internal_name = GSS_C_NO_NAME; gss_name_t tmp_src_name = GSS_C_NO_NAME; gss_OID_desc token_mech_type_desc; gss_OID token_mech_type = &token_mech_type_desc; gss_OID actual_mech = GSS_C_NO_OID; gss_OID selected_mech = GSS_C_NO_OID; gss_OID public_mech; gss_mechanism mech = NULL; gss_union_cred_t uc; int i; status = val_acc_sec_ctx_args(minor_status, context_handle, verifier_cred_handle, input_token_buffer, input_chan_bindings, src_name, mech_type, output_token, ret_flags, time_rec, d_cred); if (status != GSS_S_COMPLETE) return (status); /* * if context_handle is GSS_C_NO_CONTEXT, allocate a union context * descriptor to hold the mech type information as well as the * underlying mechanism context handle. Otherwise, cast the * value of *context_handle to the union context variable. */ if(*context_handle == GSS_C_NO_CONTEXT) { if (input_token_buffer == GSS_C_NO_BUFFER) return (GSS_S_CALL_INACCESSIBLE_READ); /* Get the token mech type */ status = gssint_get_mech_type(token_mech_type, input_token_buffer); if (status) return status; /* * An interposer calling back into the mechglue can't pass in a special * mech, so we have to recognize it using verifier_cred_handle. Use * the mechanism for which we have matching creds, if available. */ if (verifier_cred_handle != GSS_C_NO_CREDENTIAL) { uc = (gss_union_cred_t)verifier_cred_handle; for (i = 0; i < uc->count; i++) { public_mech = gssint_get_public_oid(&uc->mechs_array[i]); if (public_mech && g_OID_equal(token_mech_type, public_mech)) { selected_mech = &uc->mechs_array[i]; break; } } } if (selected_mech == GSS_C_NO_OID) { status = gssint_select_mech_type(minor_status, token_mech_type, &selected_mech); if (status) return status; } } else { union_ctx_id = (gss_union_ctx_id_t)*context_handle; selected_mech = union_ctx_id->mech_type; } /* Now create a new context if we didn't get one. */ if (*context_handle == GSS_C_NO_CONTEXT) { status = GSS_S_FAILURE; union_ctx_id = (gss_union_ctx_id_t) malloc(sizeof(gss_union_ctx_id_desc)); if (!union_ctx_id) return (GSS_S_FAILURE); union_ctx_id->loopback = union_ctx_id; union_ctx_id->internal_ctx_id = GSS_C_NO_CONTEXT; status = generic_gss_copy_oid(&temp_minor_status, selected_mech, &union_ctx_id->mech_type); if (status != GSS_S_COMPLETE) { free(union_ctx_id); return (status); } /* set the new context handle to caller's data */ *context_handle = (gss_ctx_id_t)union_ctx_id; } /* * get the appropriate cred handle from the union cred struct. */ if (verifier_cred_handle != GSS_C_NO_CREDENTIAL) { input_cred_handle = gssint_get_mechanism_cred((gss_union_cred_t)verifier_cred_handle, selected_mech); if (input_cred_handle == GSS_C_NO_CREDENTIAL) { /* verifier credential specified but no acceptor credential found */ status = GSS_S_NO_CRED; goto error_out; } } else if (!allow_mech_by_default(selected_mech)) { status = GSS_S_NO_CRED; goto error_out; } /* * now select the approprate underlying mechanism routine and * call it. */ mech = gssint_get_mechanism(selected_mech); if (mech && mech->gss_accept_sec_context) { status = mech->gss_accept_sec_context(minor_status, &union_ctx_id->internal_ctx_id, input_cred_handle, input_token_buffer, input_chan_bindings, src_name ? &internal_name : NULL, &actual_mech, output_token, &temp_ret_flags, time_rec, d_cred ? &tmp_d_cred : NULL); /* If there's more work to do, keep going... */ if (status == GSS_S_CONTINUE_NEEDED) return GSS_S_CONTINUE_NEEDED; /* if the call failed, return with failure */ if (status != GSS_S_COMPLETE) { map_error(minor_status, mech); goto error_out; } /* * if src_name is non-NULL, * convert internal_name into a union name equivalent * First call the mechanism specific display_name() * then call gss_import_name() to create * the union name struct cast to src_name */ if (src_name != NULL) { if (internal_name != GSS_C_NO_NAME) { /* consumes internal_name regardless of success */ temp_status = gssint_convert_name_to_union_name( &temp_minor_status, mech, internal_name, &tmp_src_name); if (temp_status != GSS_S_COMPLETE) { status = temp_status; *minor_status = temp_minor_status; map_error(minor_status, mech); if (output_token->length) (void) gss_release_buffer(&temp_minor_status, output_token); goto error_out; } *src_name = tmp_src_name; } else *src_name = GSS_C_NO_NAME; } #define g_OID_prefix_equal(o1, o2) \ (((o1)->length >= (o2)->length) && \ (memcmp((o1)->elements, (o2)->elements, (o2)->length) == 0)) /* Ensure we're returning correct creds format */ if ((temp_ret_flags & GSS_C_DELEG_FLAG) && tmp_d_cred != GSS_C_NO_CREDENTIAL) { public_mech = gssint_get_public_oid(selected_mech); if (actual_mech != GSS_C_NO_OID && public_mech != GSS_C_NO_OID && !g_OID_prefix_equal(actual_mech, public_mech)) { *d_cred = tmp_d_cred; /* unwrapped pseudo-mech */ } else { gss_union_cred_t d_u_cred = NULL; d_u_cred = malloc(sizeof (gss_union_cred_desc)); if (d_u_cred == NULL) { status = GSS_S_FAILURE; goto error_out; } (void) memset(d_u_cred, 0, sizeof (gss_union_cred_desc)); d_u_cred->count = 1; status = generic_gss_copy_oid(&temp_minor_status, selected_mech, &d_u_cred->mechs_array); if (status != GSS_S_COMPLETE) { free(d_u_cred); goto error_out; } d_u_cred->cred_array = malloc(sizeof(gss_cred_id_t)); if (d_u_cred->cred_array != NULL) { d_u_cred->cred_array[0] = tmp_d_cred; } else { free(d_u_cred); status = GSS_S_FAILURE; goto error_out; } d_u_cred->loopback = d_u_cred; *d_cred = (gss_cred_id_t)d_u_cred; } } if (mech_type != NULL) *mech_type = gssint_get_public_oid(actual_mech); if (ret_flags != NULL) *ret_flags = temp_ret_flags; return (status); } else { status = GSS_S_BAD_MECH; } error_out: if (union_ctx_id) { if (union_ctx_id->mech_type) { if (union_ctx_id->mech_type->elements) free(union_ctx_id->mech_type->elements); free(union_ctx_id->mech_type); } if (union_ctx_id->internal_ctx_id && mech && mech->gss_delete_sec_context) { mech->gss_delete_sec_context(&temp_minor_status, &union_ctx_id->internal_ctx_id, GSS_C_NO_BUFFER); } free(union_ctx_id); *context_handle = GSS_C_NO_CONTEXT; } if (src_name) *src_name = GSS_C_NO_NAME; if (tmp_src_name != GSS_C_NO_NAME) (void) gss_release_buffer(&temp_minor_status, (gss_buffer_t)tmp_src_name); return (status); }
gss_accept_sec_context (minor_status, context_handle, verifier_cred_handle, input_token_buffer, input_chan_bindings, src_name, mech_type, output_token, ret_flags, time_rec, d_cred) OM_uint32 * minor_status; gss_ctx_id_t * context_handle; gss_cred_id_t verifier_cred_handle; gss_buffer_t input_token_buffer; gss_channel_bindings_t input_chan_bindings; gss_name_t * src_name; gss_OID * mech_type; gss_buffer_t output_token; OM_uint32 * ret_flags; OM_uint32 * time_rec; gss_cred_id_t * d_cred; { OM_uint32 status, temp_status, temp_minor_status; OM_uint32 temp_ret_flags = 0; gss_union_ctx_id_t union_ctx_id = NULL; gss_cred_id_t input_cred_handle = GSS_C_NO_CREDENTIAL; gss_cred_id_t tmp_d_cred = GSS_C_NO_CREDENTIAL; gss_name_t internal_name = GSS_C_NO_NAME; gss_name_t tmp_src_name = GSS_C_NO_NAME; gss_OID_desc token_mech_type_desc; gss_OID token_mech_type = &token_mech_type_desc; gss_OID actual_mech = GSS_C_NO_OID; gss_OID selected_mech = GSS_C_NO_OID; gss_OID public_mech; gss_mechanism mech = NULL; gss_union_cred_t uc; int i; status = val_acc_sec_ctx_args(minor_status, context_handle, verifier_cred_handle, input_token_buffer, input_chan_bindings, src_name, mech_type, output_token, ret_flags, time_rec, d_cred); if (status != GSS_S_COMPLETE) return (status); if(*context_handle == GSS_C_NO_CONTEXT) { if (input_token_buffer == GSS_C_NO_BUFFER) return (GSS_S_CALL_INACCESSIBLE_READ); status = gssint_get_mech_type(token_mech_type, input_token_buffer); if (status) return status; if (verifier_cred_handle != GSS_C_NO_CREDENTIAL) { uc = (gss_union_cred_t)verifier_cred_handle; for (i = 0; i < uc->count; i++) { public_mech = gssint_get_public_oid(&uc->mechs_array[i]); if (public_mech && g_OID_equal(token_mech_type, public_mech)) { selected_mech = &uc->mechs_array[i]; break; } } } if (selected_mech == GSS_C_NO_OID) { status = gssint_select_mech_type(minor_status, token_mech_type, &selected_mech); if (status) return status; } } else { union_ctx_id = (gss_union_ctx_id_t)*context_handle; selected_mech = union_ctx_id->mech_type; } if (*context_handle == GSS_C_NO_CONTEXT) { status = GSS_S_FAILURE; union_ctx_id = (gss_union_ctx_id_t) malloc(sizeof(gss_union_ctx_id_desc)); if (!union_ctx_id) return (GSS_S_FAILURE); union_ctx_id->loopback = union_ctx_id; union_ctx_id->internal_ctx_id = GSS_C_NO_CONTEXT; status = generic_gss_copy_oid(&temp_minor_status, selected_mech, &union_ctx_id->mech_type); if (status != GSS_S_COMPLETE) { free(union_ctx_id); return (status); } *context_handle = (gss_ctx_id_t)union_ctx_id; } if (verifier_cred_handle != GSS_C_NO_CREDENTIAL) { input_cred_handle = gssint_get_mechanism_cred((gss_union_cred_t)verifier_cred_handle, selected_mech); if (input_cred_handle == GSS_C_NO_CREDENTIAL) { status = GSS_S_NO_CRED; goto error_out; } } else if (!allow_mech_by_default(selected_mech)) { status = GSS_S_NO_CRED; goto error_out; } mech = gssint_get_mechanism(selected_mech); if (mech && mech->gss_accept_sec_context) { status = mech->gss_accept_sec_context(minor_status, &union_ctx_id->internal_ctx_id, input_cred_handle, input_token_buffer, input_chan_bindings, src_name ? &internal_name : NULL, &actual_mech, output_token, &temp_ret_flags, time_rec, d_cred ? &tmp_d_cred : NULL); if (status == GSS_S_CONTINUE_NEEDED) return GSS_S_CONTINUE_NEEDED; if (status != GSS_S_COMPLETE) { map_error(minor_status, mech); goto error_out; } if (src_name != NULL) { if (internal_name != GSS_C_NO_NAME) { temp_status = gssint_convert_name_to_union_name( &temp_minor_status, mech, internal_name, &tmp_src_name); if (temp_status != GSS_S_COMPLETE) { status = temp_status; *minor_status = temp_minor_status; map_error(minor_status, mech); if (output_token->length) (void) gss_release_buffer(&temp_minor_status, output_token); goto error_out; } *src_name = tmp_src_name; } else *src_name = GSS_C_NO_NAME; } #define g_OID_prefix_equal(o1, o2) \ (((o1)->length >= (o2)->length) && \ (memcmp((o1)->elements, (o2)->elements, (o2)->length) == 0)) if ((temp_ret_flags & GSS_C_DELEG_FLAG) && tmp_d_cred != GSS_C_NO_CREDENTIAL) { public_mech = gssint_get_public_oid(selected_mech); if (actual_mech != GSS_C_NO_OID && public_mech != GSS_C_NO_OID && !g_OID_prefix_equal(actual_mech, public_mech)) { *d_cred = tmp_d_cred; } else { gss_union_cred_t d_u_cred = NULL; d_u_cred = malloc(sizeof (gss_union_cred_desc)); if (d_u_cred == NULL) { status = GSS_S_FAILURE; goto error_out; } (void) memset(d_u_cred, 0, sizeof (gss_union_cred_desc)); d_u_cred->count = 1; status = generic_gss_copy_oid(&temp_minor_status, selected_mech, &d_u_cred->mechs_array); if (status != GSS_S_COMPLETE) { free(d_u_cred); goto error_out; } d_u_cred->cred_array = malloc(sizeof(gss_cred_id_t)); if (d_u_cred->cred_array != NULL) { d_u_cred->cred_array[0] = tmp_d_cred; } else { free(d_u_cred); status = GSS_S_FAILURE; goto error_out; } d_u_cred->loopback = d_u_cred; *d_cred = (gss_cred_id_t)d_u_cred; } } if (mech_type != NULL) *mech_type = gssint_get_public_oid(actual_mech); if (ret_flags != NULL) *ret_flags = temp_ret_flags; return (status); } else { status = GSS_S_BAD_MECH; } error_out: if (union_ctx_id) { if (union_ctx_id->mech_type) { if (union_ctx_id->mech_type->elements) free(union_ctx_id->mech_type->elements); free(union_ctx_id->mech_type); } if (union_ctx_id->internal_ctx_id && mech && mech->gss_delete_sec_context) { mech->gss_delete_sec_context(&temp_minor_status, &union_ctx_id->internal_ctx_id, GSS_C_NO_BUFFER); } free(union_ctx_id); *context_handle = GSS_C_NO_CONTEXT; } if (src_name) *src_name = GSS_C_NO_NAME; if (tmp_src_name != GSS_C_NO_NAME) (void) gss_release_buffer(&temp_minor_status, (gss_buffer_t)tmp_src_name); return (status); }
895
1
static sctp_disposition_t sctp_sf_violation_paramlen( const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { static const char err_str[] = "The following parameter had invalid length:"; return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str, sizeof(err_str)); }
static sctp_disposition_t sctp_sf_violation_paramlen( const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { static const char err_str[] = "The following parameter had invalid length:"; return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str, sizeof(err_str)); }
896
1
static int sctp_process_inv_paramlength(const struct sctp_association *asoc, struct sctp_paramhdr *param, const struct sctp_chunk *chunk, struct sctp_chunk **errp) { static const char error[] = "The following parameter had invalid length:"; size_t payload_len = WORD_ROUND(sizeof(error)) + sizeof(sctp_paramhdr_t); /* This is a fatal error. Any accumulated non-fatal errors are * not reported. */ if (*errp) sctp_chunk_free(*errp); /* Create an error chunk and fill it in with our payload. */ *errp = sctp_make_op_error_space(asoc, chunk, payload_len); if (*errp) { sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, sizeof(error) + sizeof(sctp_paramhdr_t)); sctp_addto_chunk(*errp, sizeof(error), error); sctp_addto_param(*errp, sizeof(sctp_paramhdr_t), param); } return 0; }
static int sctp_process_inv_paramlength(const struct sctp_association *asoc, struct sctp_paramhdr *param, const struct sctp_chunk *chunk, struct sctp_chunk **errp) { static const char error[] = "The following parameter had invalid length:"; size_t payload_len = WORD_ROUND(sizeof(error)) + sizeof(sctp_paramhdr_t); if (*errp) sctp_chunk_free(*errp); *errp = sctp_make_op_error_space(asoc, chunk, payload_len); if (*errp) { sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, sizeof(error) + sizeof(sctp_paramhdr_t)); sctp_addto_chunk(*errp, sizeof(error), error); sctp_addto_param(*errp, sizeof(sctp_paramhdr_t), param); } return 0; }
897
0
VALUE rb_dlhandle_initialize ( int argc , VALUE argv [ ] , VALUE self ) { void * ptr ; struct dl_handle * dlhandle ; VALUE lib , flag ; char * clib ; int cflag ; const char * err ; switch ( rb_scan_args ( argc , argv , "02" , & lib , & flag ) ) { case 0 : clib = NULL ; cflag = RTLD_LAZY | RTLD_GLOBAL ; break ; case 1 : clib = NIL_P ( lib ) ? NULL : SafeStringValuePtr ( lib ) ; cflag = RTLD_LAZY | RTLD_GLOBAL ; break ; case 2 : clib = NIL_P ( lib ) ? NULL : SafeStringValuePtr ( lib ) ; cflag = NUM2INT ( flag ) ; break ; default : rb_bug ( "rb_dlhandle_new" ) ; } ptr = dlopen ( clib , cflag ) ; # if defined ( HAVE_DLERROR ) if ( ! ptr && ( err = dlerror ( ) ) ) { rb_raise ( rb_eDLError , "%s" , err ) ; } # else if ( ! ptr ) { err = dlerror ( ) ; rb_raise ( rb_eDLError , "%s" , err ) ; } # endif Data_Get_Struct ( self , struct dl_handle , dlhandle ) ; if ( dlhandle -> ptr && dlhandle -> open && dlhandle -> enable_close ) { dlclose ( dlhandle -> ptr ) ; } dlhandle -> ptr = ptr ; dlhandle -> open = 1 ; dlhandle -> enable_close = 0 ; if ( rb_block_given_p ( ) ) { rb_ensure ( rb_yield , self , rb_dlhandle_close , self ) ; } return Qnil ; }
VALUE rb_dlhandle_initialize ( int argc , VALUE argv [ ] , VALUE self ) { void * ptr ; struct dl_handle * dlhandle ; VALUE lib , flag ; char * clib ; int cflag ; const char * err ; switch ( rb_scan_args ( argc , argv , "02" , & lib , & flag ) ) { case 0 : clib = NULL ; cflag = RTLD_LAZY | RTLD_GLOBAL ; break ; case 1 : clib = NIL_P ( lib ) ? NULL : SafeStringValuePtr ( lib ) ; cflag = RTLD_LAZY | RTLD_GLOBAL ; break ; case 2 : clib = NIL_P ( lib ) ? NULL : SafeStringValuePtr ( lib ) ; cflag = NUM2INT ( flag ) ; break ; default : rb_bug ( "rb_dlhandle_new" ) ; } ptr = dlopen ( clib , cflag ) ; # if defined ( HAVE_DLERROR ) if ( ! ptr && ( err = dlerror ( ) ) ) { rb_raise ( rb_eDLError , "%s" , err ) ; } # else if ( ! ptr ) { err = dlerror ( ) ; rb_raise ( rb_eDLError , "%s" , err ) ; } # endif Data_Get_Struct ( self , struct dl_handle , dlhandle ) ; if ( dlhandle -> ptr && dlhandle -> open && dlhandle -> enable_close ) { dlclose ( dlhandle -> ptr ) ; } dlhandle -> ptr = ptr ; dlhandle -> open = 1 ; dlhandle -> enable_close = 0 ; if ( rb_block_given_p ( ) ) { rb_ensure ( rb_yield , self , rb_dlhandle_close , self ) ; } return Qnil ; }
900
0
gss_context_time (minor_status, context_handle, time_rec) OM_uint32 * minor_status; gss_ctx_id_t context_handle; OM_uint32 * time_rec; { OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; if (minor_status == NULL) return (GSS_S_CALL_INACCESSIBLE_WRITE); *minor_status = 0; if (time_rec == NULL) return (GSS_S_CALL_INACCESSIBLE_WRITE); if (context_handle == GSS_C_NO_CONTEXT) return (GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT); /* * select the approprate underlying mechanism routine and * call it. */ ctx = (gss_union_ctx_id_t) context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (mech) { if (mech->gss_context_time) { status = mech->gss_context_time( minor_status, ctx->internal_ctx_id, time_rec); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); } else status = GSS_S_UNAVAILABLE; return(status); } return (GSS_S_BAD_MECH); }
gss_context_time (minor_status, context_handle, time_rec) OM_uint32 * minor_status; gss_ctx_id_t context_handle; OM_uint32 * time_rec; { OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; if (minor_status == NULL) return (GSS_S_CALL_INACCESSIBLE_WRITE); *minor_status = 0; if (time_rec == NULL) return (GSS_S_CALL_INACCESSIBLE_WRITE); if (context_handle == GSS_C_NO_CONTEXT) return (GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT); ctx = (gss_union_ctx_id_t) context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (mech) { if (mech->gss_context_time) { status = mech->gss_context_time( minor_status, ctx->internal_ctx_id, time_rec); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); } else status = GSS_S_UNAVAILABLE; return(status); } return (GSS_S_BAD_MECH); }
901
1
static int nut_write_trailer(AVFormatContext *s) { NUTContext *nut = s->priv_data; AVIOContext *bc = s->pb, *dyn_bc; int i, ret; while (nut->header_count < 3) write_headers(s, bc); ret = avio_open_dyn_buf(&dyn_bc); if (ret >= 0 && nut->sp_count) { av_assert1(nut->write_index); write_index(nut, dyn_bc); put_packet(nut, bc, dyn_bc, 1, INDEX_STARTCODE); } ff_nut_free_sp(nut); for (i=0; i<s->nb_streams; i++) av_freep(&nut->stream[i].keyframe_pts); av_freep(&nut->stream); av_freep(&nut->chapter); av_freep(&nut->time_base); return 0; }
static int nut_write_trailer(AVFormatContext *s) { NUTContext *nut = s->priv_data; AVIOContext *bc = s->pb, *dyn_bc; int i, ret; while (nut->header_count < 3) write_headers(s, bc); ret = avio_open_dyn_buf(&dyn_bc); if (ret >= 0 && nut->sp_count) { av_assert1(nut->write_index); write_index(nut, dyn_bc); put_packet(nut, bc, dyn_bc, 1, INDEX_STARTCODE); } ff_nut_free_sp(nut); for (i=0; i<s->nb_streams; i++) av_freep(&nut->stream[i].keyframe_pts); av_freep(&nut->stream); av_freep(&nut->chapter); av_freep(&nut->time_base); return 0; }
902
1
sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; struct sctp_chunk *asconf_ack = NULL; struct sctp_paramhdr *err_param = NULL; sctp_addiphdr_t *hdr; union sctp_addr_param *addr_param; __u32 serial; int length; if (!sctp_vtag_verify(chunk, asoc)) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, SCTP_NULL()); return sctp_sf_pdiscard(ep, asoc, type, arg, commands); } /* ADD-IP: Section 4.1.1 * This chunk MUST be sent in an authenticated way by using * the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk * is received unauthenticated it MUST be silently discarded as * described in [I-D.ietf-tsvwg-sctp-auth]. */ if (!sctp_addip_noauth && !chunk->auth) return sctp_sf_discard_chunk(ep, asoc, type, arg, commands); /* Make sure that the ASCONF ADDIP chunk has a valid length. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_addip_chunk_t))) return sctp_sf_violation_chunklen(ep, asoc, type, arg, commands); hdr = (sctp_addiphdr_t *)chunk->skb->data; serial = ntohl(hdr->serial); addr_param = (union sctp_addr_param *)hdr->params; length = ntohs(addr_param->p.length); if (length < sizeof(sctp_paramhdr_t)) return sctp_sf_violation_paramlen(ep, asoc, type, (void *)addr_param, commands); /* Verify the ASCONF chunk before processing it. */ if (!sctp_verify_asconf(asoc, (sctp_paramhdr_t *)((void *)addr_param + length), (void *)chunk->chunk_end, &err_param)) return sctp_sf_violation_paramlen(ep, asoc, type, (void *)&err_param, commands); /* ADDIP 5.2 E1) Compare the value of the serial number to the value * the endpoint stored in a new association variable * 'Peer-Serial-Number'. */ if (serial == asoc->peer.addip_serial + 1) { /* If this is the first instance of ASCONF in the packet, * we can clean our old ASCONF-ACKs. */ if (!chunk->has_asconf) sctp_assoc_clean_asconf_ack_cache(asoc); /* ADDIP 5.2 E4) When the Sequence Number matches the next one * expected, process the ASCONF as described below and after * processing the ASCONF Chunk, append an ASCONF-ACK Chunk to * the response packet and cache a copy of it (in the event it * later needs to be retransmitted). * * Essentially, do V1-V5. */ asconf_ack = sctp_process_asconf((struct sctp_association *) asoc, chunk); if (!asconf_ack) return SCTP_DISPOSITION_NOMEM; } else if (serial < asoc->peer.addip_serial + 1) { /* ADDIP 5.2 E2) * If the value found in the Sequence Number is less than the * ('Peer- Sequence-Number' + 1), simply skip to the next * ASCONF, and include in the outbound response packet * any previously cached ASCONF-ACK response that was * sent and saved that matches the Sequence Number of the * ASCONF. Note: It is possible that no cached ASCONF-ACK * Chunk exists. This will occur when an older ASCONF * arrives out of order. In such a case, the receiver * should skip the ASCONF Chunk and not include ASCONF-ACK * Chunk for that chunk. */ asconf_ack = sctp_assoc_lookup_asconf_ack(asoc, hdr->serial); if (!asconf_ack) return SCTP_DISPOSITION_DISCARD; } else { /* ADDIP 5.2 E5) Otherwise, the ASCONF Chunk is discarded since * it must be either a stale packet or from an attacker. */ return SCTP_DISPOSITION_DISCARD; } /* ADDIP 5.2 E6) The destination address of the SCTP packet * containing the ASCONF-ACK Chunks MUST be the source address of * the SCTP packet that held the ASCONF Chunks. * * To do this properly, we'll set the destination address of the chunk * and at the transmit time, will try look up the transport to use. * Since ASCONFs may be bundled, the correct transport may not be * created untill we process the entire packet, thus this workaround. */ asconf_ack->dest = chunk->source; sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack)); return SCTP_DISPOSITION_CONSUME; }
sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; struct sctp_chunk *asconf_ack = NULL; struct sctp_paramhdr *err_param = NULL; sctp_addiphdr_t *hdr; union sctp_addr_param *addr_param; __u32 serial; int length; if (!sctp_vtag_verify(chunk, asoc)) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, SCTP_NULL()); return sctp_sf_pdiscard(ep, asoc, type, arg, commands); } if (!sctp_addip_noauth && !chunk->auth) return sctp_sf_discard_chunk(ep, asoc, type, arg, commands); if (!sctp_chunk_length_valid(chunk, sizeof(sctp_addip_chunk_t))) return sctp_sf_violation_chunklen(ep, asoc, type, arg, commands); hdr = (sctp_addiphdr_t *)chunk->skb->data; serial = ntohl(hdr->serial); addr_param = (union sctp_addr_param *)hdr->params; length = ntohs(addr_param->p.length); if (length < sizeof(sctp_paramhdr_t)) return sctp_sf_violation_paramlen(ep, asoc, type, (void *)addr_param, commands); if (!sctp_verify_asconf(asoc, (sctp_paramhdr_t *)((void *)addr_param + length), (void *)chunk->chunk_end, &err_param)) return sctp_sf_violation_paramlen(ep, asoc, type, (void *)&err_param, commands); if (serial == asoc->peer.addip_serial + 1) { if (!chunk->has_asconf) sctp_assoc_clean_asconf_ack_cache(asoc); asconf_ack = sctp_process_asconf((struct sctp_association *) asoc, chunk); if (!asconf_ack) return SCTP_DISPOSITION_NOMEM; } else if (serial < asoc->peer.addip_serial + 1) { asconf_ack = sctp_assoc_lookup_asconf_ack(asoc, hdr->serial); if (!asconf_ack) return SCTP_DISPOSITION_DISCARD; } else { return SCTP_DISPOSITION_DISCARD; } asconf_ack->dest = chunk->source; sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack)); return SCTP_DISPOSITION_CONSUME; }
903
0
char get_typtype ( Oid typid ) { HeapTuple tp ; tp = SearchSysCache1 ( TYPEOID , ObjectIdGetDatum ( typid ) ) ; if ( HeapTupleIsValid ( tp ) ) { Form_pg_type typtup = ( Form_pg_type ) GETSTRUCT ( tp ) ; char result ; result = typtup -> typtype ; ReleaseSysCache ( tp ) ; return result ; } else return '\0' ; }
char get_typtype ( Oid typid ) { HeapTuple tp ; tp = SearchSysCache1 ( TYPEOID , ObjectIdGetDatum ( typid ) ) ; if ( HeapTupleIsValid ( tp ) ) { Form_pg_type typtup = ( Form_pg_type ) GETSTRUCT ( tp ) ; char result ; result = typtup -> typtype ; ReleaseSysCache ( tp ) ; return result ; } else return '\0' ; }
904
1
static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; MemTxAttrs attrs = {}; AddressSpace *as; attrs.secure = is_secure; as = arm_addressspace(cs, attrs); addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); if (fi->s1ptw) { return 0; } if (regime_translation_big_endian(env, mmu_idx)) { return address_space_ldl_be(as, addr, attrs, NULL); } else { return address_space_ldl_le(as, addr, attrs, NULL); } }
static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; MemTxAttrs attrs = {}; AddressSpace *as; attrs.secure = is_secure; as = arm_addressspace(cs, attrs); addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); if (fi->s1ptw) { return 0; } if (regime_translation_big_endian(env, mmu_idx)) { return address_space_ldl_be(as, addr, attrs, NULL); } else { return address_space_ldl_le(as, addr, attrs, NULL); } }
906
0
void dissect_geographical_description ( tvbuff_t * tvb , packet_info * pinfo _U_ , proto_tree * tree ) { proto_item * lat_item , * long_item , * major_item , * minor_item , * alt_item , * uncer_item ; guint8 type_of_shape ; int offset = 0 ; int length ; guint8 value ; guint32 uvalue32 ; gint32 svalue32 ; length = tvb_reported_length_remaining ( tvb , 0 ) ; proto_tree_add_item ( tree , hf_gsm_a_geo_loc_type_of_shape , tvb , 0 , 1 , ENC_BIG_ENDIAN ) ; if ( length < 2 ) return ; type_of_shape = tvb_get_guint8 ( tvb , offset ) >> 4 ; switch ( type_of_shape ) { case ELLIPSOID_POINT : case ELLIPSOID_POINT_WITH_UNCERT_CIRC : case ELLIPSOID_POINT_WITH_UNCERT_ELLIPSE : case ELLIPSOID_POINT_WITH_ALT : case ELLIPSOID_POINT_WITH_ALT_AND_UNCERT_ELLIPSOID : case ELLIPSOID_ARC : offset ++ ; if ( length < 4 ) return ; proto_tree_add_item ( tree , hf_gsm_a_geo_loc_sign_of_lat , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; uvalue32 = tvb_get_ntoh24 ( tvb , offset ) ; lat_item = proto_tree_add_item ( tree , hf_gsm_a_geo_loc_deg_of_lat , tvb , offset , 3 , ENC_BIG_ENDIAN ) ; proto_item_append_text ( lat_item , " (%s%.5f degrees)" , ( uvalue32 & 0x00800000 ) ? "-" : "" , ( ( double ) ( uvalue32 & 0x7fffff ) / 8388607.0 ) * 90 ) ; if ( length < 7 ) return ; offset = offset + 3 ; svalue32 = tvb_get_ntoh24 ( tvb , offset ) ; svalue32 |= ( svalue32 & 0x800000 ) ? 0xff000000 : 0x00000000 ; long_item = proto_tree_add_item ( tree , hf_gsm_a_geo_loc_deg_of_long , tvb , offset , 3 , ENC_BIG_ENDIAN ) ; proto_item_append_text ( long_item , " (%.5f degrees)" , ( ( double ) svalue32 / 16777215.0 ) * 360 ) ; offset = offset + 3 ; if ( type_of_shape == ELLIPSOID_POINT_WITH_UNCERT_CIRC ) { if ( length < 8 ) return ; value = tvb_get_guint8 ( tvb , offset ) & 0x7f ; uncer_item = proto_tree_add_item ( tree , hf_gsm_a_geo_loc_uncertainty_code , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; proto_item_append_text ( uncer_item , " (%.1f m)" , 10 * ( pow ( 1.1 , ( double ) value ) - 1 ) ) ; } else if ( type_of_shape == ELLIPSOID_POINT_WITH_UNCERT_ELLIPSE ) { value = tvb_get_guint8 ( tvb , offset ) & 0x7f ; major_item = proto_tree_add_item ( tree , hf_gsm_a_geo_loc_uncertainty_semi_major , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; proto_item_append_text ( major_item , " (%.1f m)" , 10 * ( pow ( 1.1 , ( double ) value ) - 1 ) ) ; offset ++ ; value = tvb_get_guint8 ( tvb , offset ) & 0x7f ; minor_item = proto_tree_add_item ( tree , hf_gsm_a_geo_loc_uncertainty_semi_minor , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; proto_item_append_text ( minor_item , " (%.1f m)" , 10 * ( pow ( 1.1 , ( double ) value ) - 1 ) ) ; offset ++ ; value = tvb_get_guint8 ( tvb , offset ) & 0x7f ; proto_tree_add_uint ( tree , hf_gsm_a_geo_loc_orientation_of_major_axis , tvb , offset , 1 , value * 2 ) ; offset ++ ; proto_tree_add_item ( tree , hf_gsm_a_geo_loc_confidence , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; offset ++ ; } else if ( type_of_shape == ELLIPSOID_POINT_WITH_ALT ) { proto_tree_add_item ( tree , hf_gsm_a_geo_loc_D , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; proto_tree_add_item ( tree , hf_gsm_a_geo_loc_altitude , tvb , offset , 2 , ENC_BIG_ENDIAN ) ; } else if ( type_of_shape == ELLIPSOID_POINT_WITH_ALT_AND_UNCERT_ELLIPSOID ) { proto_tree_add_item ( tree , hf_gsm_a_geo_loc_D , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; proto_tree_add_item ( tree , hf_gsm_a_geo_loc_altitude , tvb , offset , 2 , ENC_BIG_ENDIAN ) ; offset = offset + 2 ; value = tvb_get_guint8 ( tvb , offset ) & 0x7f ; major_item = proto_tree_add_item ( tree , hf_gsm_a_geo_loc_uncertainty_semi_major , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; proto_item_append_text ( major_item , " (%.1f m)" , 10 * ( pow ( 1.1 , ( double ) value ) - 1 ) ) ; offset ++ ; value = tvb_get_guint8 ( tvb , offset ) & 0x7f ; minor_item = proto_tree_add_item ( tree , hf_gsm_a_geo_loc_uncertainty_semi_minor , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; proto_item_append_text ( minor_item , " (%.1f m)" , 10 * ( pow ( 1.1 , ( double ) value ) - 1 ) ) ; offset ++ ; value = tvb_get_guint8 ( tvb , offset ) & 0x7f ; proto_tree_add_uint ( tree , hf_gsm_a_geo_loc_orientation_of_major_axis , tvb , offset , 1 , value * 2 ) ; offset ++ ; value = tvb_get_guint8 ( tvb , offset ) & 0x7f ; alt_item = proto_tree_add_item ( tree , hf_gsm_a_geo_loc_uncertainty_altitude , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; proto_item_append_text ( alt_item , " (%.1f m)" , 45 * ( pow ( 1.025 , ( double ) value ) - 1 ) ) ; offset ++ ; proto_tree_add_item ( tree , hf_gsm_a_geo_loc_confidence , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; } else if ( type_of_shape == ELLIPSOID_ARC ) { proto_tree_add_item ( tree , hf_gsm_a_geo_loc_inner_radius , tvb , offset , 2 , ENC_BIG_ENDIAN ) ; offset = offset + 2 ; proto_tree_add_item ( tree , hf_gsm_a_geo_loc_uncertainty_radius , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; offset ++ ; proto_tree_add_item ( tree , hf_gsm_a_geo_loc_offset_angle , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; offset ++ ; proto_tree_add_item ( tree , hf_gsm_a_geo_loc_included_angle , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; offset ++ ; proto_tree_add_item ( tree , hf_gsm_a_geo_loc_confidence , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; } break ; case POLYGON : proto_tree_add_item ( tree , hf_gsm_a_geo_loc_no_of_points , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; # if 0 no_of_points = tvb_get_guint8 ( tvb , offset ) & 0x0f ; while ( no_of_points > 0 ) { offset ++ ; no_of_points -- ; } # endif break ; default : break ; } }
void dissect_geographical_description ( tvbuff_t * tvb , packet_info * pinfo _U_ , proto_tree * tree ) { proto_item * lat_item , * long_item , * major_item , * minor_item , * alt_item , * uncer_item ; guint8 type_of_shape ; int offset = 0 ; int length ; guint8 value ; guint32 uvalue32 ; gint32 svalue32 ; length = tvb_reported_length_remaining ( tvb , 0 ) ; proto_tree_add_item ( tree , hf_gsm_a_geo_loc_type_of_shape , tvb , 0 , 1 , ENC_BIG_ENDIAN ) ; if ( length < 2 ) return ; type_of_shape = tvb_get_guint8 ( tvb , offset ) >> 4 ; switch ( type_of_shape ) { case ELLIPSOID_POINT : case ELLIPSOID_POINT_WITH_UNCERT_CIRC : case ELLIPSOID_POINT_WITH_UNCERT_ELLIPSE : case ELLIPSOID_POINT_WITH_ALT : case ELLIPSOID_POINT_WITH_ALT_AND_UNCERT_ELLIPSOID : case ELLIPSOID_ARC : offset ++ ; if ( length < 4 ) return ; proto_tree_add_item ( tree , hf_gsm_a_geo_loc_sign_of_lat , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; uvalue32 = tvb_get_ntoh24 ( tvb , offset ) ; lat_item = proto_tree_add_item ( tree , hf_gsm_a_geo_loc_deg_of_lat , tvb , offset , 3 , ENC_BIG_ENDIAN ) ; proto_item_append_text ( lat_item , " (%s%.5f degrees)" , ( uvalue32 & 0x00800000 ) ? "-" : "" , ( ( double ) ( uvalue32 & 0x7fffff ) / 8388607.0 ) * 90 ) ; if ( length < 7 ) return ; offset = offset + 3 ; svalue32 = tvb_get_ntoh24 ( tvb , offset ) ; svalue32 |= ( svalue32 & 0x800000 ) ? 0xff000000 : 0x00000000 ; long_item = proto_tree_add_item ( tree , hf_gsm_a_geo_loc_deg_of_long , tvb , offset , 3 , ENC_BIG_ENDIAN ) ; proto_item_append_text ( long_item , " (%.5f degrees)" , ( ( double ) svalue32 / 16777215.0 ) * 360 ) ; offset = offset + 3 ; if ( type_of_shape == ELLIPSOID_POINT_WITH_UNCERT_CIRC ) { if ( length < 8 ) return ; value = tvb_get_guint8 ( tvb , offset ) & 0x7f ; uncer_item = proto_tree_add_item ( tree , hf_gsm_a_geo_loc_uncertainty_code , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; proto_item_append_text ( uncer_item , " (%.1f m)" , 10 * ( pow ( 1.1 , ( double ) value ) - 1 ) ) ; } else if ( type_of_shape == ELLIPSOID_POINT_WITH_UNCERT_ELLIPSE ) { value = tvb_get_guint8 ( tvb , offset ) & 0x7f ; major_item = proto_tree_add_item ( tree , hf_gsm_a_geo_loc_uncertainty_semi_major , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; proto_item_append_text ( major_item , " (%.1f m)" , 10 * ( pow ( 1.1 , ( double ) value ) - 1 ) ) ; offset ++ ; value = tvb_get_guint8 ( tvb , offset ) & 0x7f ; minor_item = proto_tree_add_item ( tree , hf_gsm_a_geo_loc_uncertainty_semi_minor , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; proto_item_append_text ( minor_item , " (%.1f m)" , 10 * ( pow ( 1.1 , ( double ) value ) - 1 ) ) ; offset ++ ; value = tvb_get_guint8 ( tvb , offset ) & 0x7f ; proto_tree_add_uint ( tree , hf_gsm_a_geo_loc_orientation_of_major_axis , tvb , offset , 1 , value * 2 ) ; offset ++ ; proto_tree_add_item ( tree , hf_gsm_a_geo_loc_confidence , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; offset ++ ; } else if ( type_of_shape == ELLIPSOID_POINT_WITH_ALT ) { proto_tree_add_item ( tree , hf_gsm_a_geo_loc_D , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; proto_tree_add_item ( tree , hf_gsm_a_geo_loc_altitude , tvb , offset , 2 , ENC_BIG_ENDIAN ) ; } else if ( type_of_shape == ELLIPSOID_POINT_WITH_ALT_AND_UNCERT_ELLIPSOID ) { proto_tree_add_item ( tree , hf_gsm_a_geo_loc_D , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; proto_tree_add_item ( tree , hf_gsm_a_geo_loc_altitude , tvb , offset , 2 , ENC_BIG_ENDIAN ) ; offset = offset + 2 ; value = tvb_get_guint8 ( tvb , offset ) & 0x7f ; major_item = proto_tree_add_item ( tree , hf_gsm_a_geo_loc_uncertainty_semi_major , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; proto_item_append_text ( major_item , " (%.1f m)" , 10 * ( pow ( 1.1 , ( double ) value ) - 1 ) ) ; offset ++ ; value = tvb_get_guint8 ( tvb , offset ) & 0x7f ; minor_item = proto_tree_add_item ( tree , hf_gsm_a_geo_loc_uncertainty_semi_minor , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; proto_item_append_text ( minor_item , " (%.1f m)" , 10 * ( pow ( 1.1 , ( double ) value ) - 1 ) ) ; offset ++ ; value = tvb_get_guint8 ( tvb , offset ) & 0x7f ; proto_tree_add_uint ( tree , hf_gsm_a_geo_loc_orientation_of_major_axis , tvb , offset , 1 , value * 2 ) ; offset ++ ; value = tvb_get_guint8 ( tvb , offset ) & 0x7f ; alt_item = proto_tree_add_item ( tree , hf_gsm_a_geo_loc_uncertainty_altitude , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; proto_item_append_text ( alt_item , " (%.1f m)" , 45 * ( pow ( 1.025 , ( double ) value ) - 1 ) ) ; offset ++ ; proto_tree_add_item ( tree , hf_gsm_a_geo_loc_confidence , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; } else if ( type_of_shape == ELLIPSOID_ARC ) { proto_tree_add_item ( tree , hf_gsm_a_geo_loc_inner_radius , tvb , offset , 2 , ENC_BIG_ENDIAN ) ; offset = offset + 2 ; proto_tree_add_item ( tree , hf_gsm_a_geo_loc_uncertainty_radius , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; offset ++ ; proto_tree_add_item ( tree , hf_gsm_a_geo_loc_offset_angle , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; offset ++ ; proto_tree_add_item ( tree , hf_gsm_a_geo_loc_included_angle , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; offset ++ ; proto_tree_add_item ( tree , hf_gsm_a_geo_loc_confidence , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; } break ; case POLYGON : proto_tree_add_item ( tree , hf_gsm_a_geo_loc_no_of_points , tvb , offset , 1 , ENC_BIG_ENDIAN ) ; # if 0 no_of_points = tvb_get_guint8 ( tvb , offset ) & 0x0f ; while ( no_of_points > 0 ) { offset ++ ; no_of_points -- ; } # endif break ; default : break ; } }
907
1
void LibRaw::identify_process_dng_fields() { if (!dng_version) return; int c; { /* copy DNG data from per-IFD field to color.dng */ int iifd = find_ifd_by_offset(data_offset); int pifd = find_ifd_by_offset(thumb_offset); #define CFAROUND(value, filters) \ filters ? (filters >= 1000 ? ((value + 1) / 2) * 2 : ((value + 5) / 6) * 6) \ : value #define IFDCOLORINDEX(ifd, subset, bit) \ (tiff_ifd[ifd].dng_color[subset].parsedfields & bit) \ ? ifd \ : ((tiff_ifd[0].dng_color[subset].parsedfields & bit) ? 0 : -1) #define IFDLEVELINDEX(ifd, bit) \ (tiff_ifd[ifd].dng_levels.parsedfields & bit) \ ? ifd \ : ((tiff_ifd[0].dng_levels.parsedfields & bit) ? 0 : -1) #define COPYARR(to, from) memmove(&to, &from, sizeof(from)) if (iifd < (int)tiff_nifds && iifd >= 0) { int sidx; // Per field, not per structure if (!(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_DONT_CHECK_DNG_ILLUMINANT)) { int illidx[2], cmidx[2], calidx[2], abidx; for (int i = 0; i < 2; i++) { illidx[i] = IFDCOLORINDEX(iifd, i, LIBRAW_DNGFM_ILLUMINANT); cmidx[i] = IFDCOLORINDEX(iifd, i, LIBRAW_DNGFM_COLORMATRIX); calidx[i] = IFDCOLORINDEX(iifd, i, LIBRAW_DNGFM_CALIBRATION); } abidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_ANALOGBALANCE); // Data found, all in same ifd, illuminants are inited if (illidx[0] >= 0 && illidx[0] < (int)tiff_nifds && illidx[0] == illidx[1] && illidx[0] == cmidx[0] && illidx[0] == cmidx[1] && tiff_ifd[illidx[0]].dng_color[0].illuminant > 0 && tiff_ifd[illidx[0]].dng_color[1].illuminant > 0) { sidx = illidx[0]; // => selected IFD double cc[4][4], cm[4][3], cam_xyz[4][3]; // CM -> Color Matrix // CC -> Camera calibration for (int j = 0; j < 4; j++) for (int i = 0; i < 4; i++) cc[j][i] = i == j; int colidx = -1; // IS D65 here? for (int i = 0; i < 2; i++) { if (tiff_ifd[sidx].dng_color[i].illuminant == LIBRAW_WBI_D65) { colidx = i; break; } } // Other daylight-type ill if (colidx < 0) for (int i = 0; i < 2; i++) { int ill = tiff_ifd[sidx].dng_color[i].illuminant; if (ill == LIBRAW_WBI_Daylight || ill == LIBRAW_WBI_D55 || ill == LIBRAW_WBI_D75 || ill == LIBRAW_WBI_D50 || ill == LIBRAW_WBI_Flash) { colidx = i; break; } } if (colidx >= 0) // Selected { // Init camera matrix from DNG FORCC for (int j = 0; j < 3; j++) cm[c][j] = tiff_ifd[sidx].dng_color[colidx].colormatrix[c][j]; if (calidx[colidx] == sidx) { for (int i = 0; i < colors; i++) FORCC cc[i][c] = tiff_ifd[sidx].dng_color[colidx].calibration[i][c]; } if (abidx == sidx) for (int i = 0; i < colors; i++) FORCC cc[i][c] *= tiff_ifd[sidx].dng_levels.analogbalance[i]; int j; FORCC for (int i = 0; i < 3; i++) for (cam_xyz[c][i] = j = 0; j < colors; j++) cam_xyz[c][i] += cc[c][j] * cm[j][i]; // add AsShotXY later * xyz[i]; cam_xyz_coeff(cmatrix, cam_xyz); } } } bool noFujiDNGCrop = makeIs(LIBRAW_CAMERAMAKER_Fujifilm) && (!strcmp(normalized_model, "S3Pro") || !strcmp(normalized_model, "S5Pro") || !strcmp(normalized_model, "S2Pro")); if (!noFujiDNGCrop && (imgdata.params.raw_processing_options &LIBRAW_PROCESSING_USE_DNG_DEFAULT_CROP)) { sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_CROPORIGIN); int sidx2 = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_CROPSIZE); if (sidx >= 0 && sidx == sidx2 && tiff_ifd[sidx].dng_levels.default_crop[2] > 0 && tiff_ifd[sidx].dng_levels.default_crop[3] > 0) { int lm = tiff_ifd[sidx].dng_levels.default_crop[0]; int lmm = CFAROUND(lm, filters); int tm = tiff_ifd[sidx].dng_levels.default_crop[1]; int tmm = CFAROUND(tm, filters); int ww = tiff_ifd[sidx].dng_levels.default_crop[2]; int hh = tiff_ifd[sidx].dng_levels.default_crop[3]; if (lmm > lm) ww -= (lmm - lm); if (tmm > tm) hh -= (tmm - tm); if (left_margin + lm + ww <= raw_width && top_margin + tm + hh <= raw_height) { left_margin += lmm; top_margin += tmm; width = ww; height = hh; } } } if (!(imgdata.color.dng_color[0].parsedfields & LIBRAW_DNGFM_FORWARDMATRIX)) // Not set already (Leica makernotes) { sidx = IFDCOLORINDEX(iifd, 0, LIBRAW_DNGFM_FORWARDMATRIX); if (sidx >= 0) COPYARR(imgdata.color.dng_color[0].forwardmatrix, tiff_ifd[sidx].dng_color[0].forwardmatrix); } if (!(imgdata.color.dng_color[1].parsedfields & LIBRAW_DNGFM_FORWARDMATRIX)) // Not set already (Leica makernotes) { sidx = IFDCOLORINDEX(iifd, 1, LIBRAW_DNGFM_FORWARDMATRIX); if (sidx >= 0) COPYARR(imgdata.color.dng_color[1].forwardmatrix, tiff_ifd[sidx].dng_color[1].forwardmatrix); } for (int ss = 0; ss < 2; ss++) { sidx = IFDCOLORINDEX(iifd, ss, LIBRAW_DNGFM_COLORMATRIX); if (sidx >= 0) COPYARR(imgdata.color.dng_color[ss].colormatrix, tiff_ifd[sidx].dng_color[ss].colormatrix); sidx = IFDCOLORINDEX(iifd, ss, LIBRAW_DNGFM_CALIBRATION); if (sidx >= 0) COPYARR(imgdata.color.dng_color[ss].calibration, tiff_ifd[sidx].dng_color[ss].calibration); sidx = IFDCOLORINDEX(iifd, ss, LIBRAW_DNGFM_ILLUMINANT); if (sidx >= 0) imgdata.color.dng_color[ss].illuminant = tiff_ifd[sidx].dng_color[ss].illuminant; } // Levels sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_ANALOGBALANCE); if (sidx >= 0) COPYARR(imgdata.color.dng_levels.analogbalance, tiff_ifd[sidx].dng_levels.analogbalance); sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_BASELINEEXPOSURE); if (sidx >= 0) imgdata.color.dng_levels.baseline_exposure = tiff_ifd[sidx].dng_levels.baseline_exposure; sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_WHITE); if (sidx >= 0 && tiff_ifd[sidx].dng_levels.dng_whitelevel[0]) COPYARR(imgdata.color.dng_levels.dng_whitelevel, tiff_ifd[sidx].dng_levels.dng_whitelevel); else if (tiff_ifd[iifd].sample_format <= 2 && tiff_ifd[iifd].bps > 0 && tiff_ifd[iifd].bps < 32) FORC4 imgdata.color.dng_levels.dng_whitelevel[c] = (1 << tiff_ifd[iifd].bps) - 1; sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_ASSHOTNEUTRAL); if (sidx >= 0) { COPYARR(imgdata.color.dng_levels.asshotneutral, tiff_ifd[sidx].dng_levels.asshotneutral); if (imgdata.color.dng_levels.asshotneutral[0]) { cam_mul[3] = 0; FORCC if (fabs(imgdata.color.dng_levels.asshotneutral[c]) > 0.0001) cam_mul[c] = 1 / imgdata.color.dng_levels.asshotneutral[c]; } } sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_BLACK); if (sidx >= 0) { imgdata.color.dng_levels.dng_fblack = tiff_ifd[sidx].dng_levels.dng_fblack; imgdata.color.dng_levels.dng_black = tiff_ifd[sidx].dng_levels.dng_black; COPYARR(imgdata.color.dng_levels.dng_cblack, tiff_ifd[sidx].dng_levels.dng_cblack); COPYARR(imgdata.color.dng_levels.dng_fcblack, tiff_ifd[sidx].dng_levels.dng_fcblack); } if (pifd >= 0) { sidx = IFDLEVELINDEX(pifd, LIBRAW_DNGFM_PREVIEWCS); if (sidx >= 0) imgdata.color.dng_levels.preview_colorspace = tiff_ifd[sidx].dng_levels.preview_colorspace; } sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_OPCODE2); if (sidx >= 0) meta_offset = tiff_ifd[sidx].opcode2_offset; sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_LINTABLE); INT64 linoff = -1; int linlen = 0; if (sidx >= 0) { linoff = tiff_ifd[sidx].lineartable_offset; linlen = tiff_ifd[sidx].lineartable_len; } if (linoff >= 0 && linlen > 0) { INT64 pos = ftell(ifp); fseek(ifp, linoff, SEEK_SET); linear_table(linlen); fseek(ifp, pos, SEEK_SET); } // Need to add curve too } /* Copy DNG black level to LibRaw's */ if (load_raw == &LibRaw::lossy_dng_load_raw) { maximum = 0xffff; FORC4 imgdata.color.linear_max[c] = imgdata.color.dng_levels.dng_whitelevel[c] = 0xffff; } else { maximum = imgdata.color.dng_levels.dng_whitelevel[0]; } black = imgdata.color.dng_levels.dng_black; if (tiff_samples == 2 && imgdata.color.dng_levels.dng_cblack[4] * imgdata.color.dng_levels.dng_cblack[5] * tiff_samples == imgdata.color.dng_levels.dng_cblack[LIBRAW_CBLACK_SIZE - 1]) { unsigned ff = filters; if (filters > 999 && colors == 3) filters |= ((filters >> 2 & 0x22222222) | (filters << 2 & 0x88888888)) & filters << 1; /* Special case, Fuji SuperCCD dng */ int csum[4] = { 0,0,0,0 }, ccount[4] = { 0,0,0,0 }; int i = 6 + shot_select; for (unsigned row = 0; row < imgdata.color.dng_levels.dng_cblack[4]; row++) for (unsigned col = 0; col < imgdata.color.dng_levels.dng_cblack[5]; col++) { csum[FC(row, col)] += imgdata.color.dng_levels.dng_cblack[i]; ccount[FC(row, col)]++; i += tiff_samples; } for (int c = 0; c < 4; c++) if (ccount[c]) imgdata.color.dng_levels.dng_cblack[c] += csum[c] / ccount[c]; imgdata.color.dng_levels.dng_cblack[4] = imgdata.color.dng_levels.dng_cblack[5] = 0; filters = ff; } else if (tiff_samples > 2 && tiff_samples <= 4 && imgdata.color.dng_levels.dng_cblack[4] * imgdata.color.dng_levels.dng_cblack[5] * tiff_samples == imgdata.color.dng_levels.dng_cblack[LIBRAW_CBLACK_SIZE - 1]) { /* Special case, per_channel blacks in RepeatDim, average for per-channel */ int csum[4] = { 0,0,0,0 }, ccount[4] = { 0,0,0,0 }; int i = 6; for (unsigned row = 0; row < imgdata.color.dng_levels.dng_cblack[4]; row++) for (unsigned col = 0; col < imgdata.color.dng_levels.dng_cblack[5]; col++) for (unsigned c = 0; c < tiff_samples; c++) { csum[c] += imgdata.color.dng_levels.dng_cblack[i]; ccount[c]++; i++; } for (int c = 0; c < 4; c++) if (ccount[c]) imgdata.color.dng_levels.dng_cblack[c] += csum[c] / ccount[c]; imgdata.color.dng_levels.dng_cblack[4] = imgdata.color.dng_levels.dng_cblack[5] = 0; } memmove(cblack, imgdata.color.dng_levels.dng_cblack, sizeof(cblack)); if (iifd < (int)tiff_nifds && iifd >= 0) { int sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_LINEARRESPONSELIMIT); if (sidx >= 0) { imgdata.color.dng_levels.LinearResponseLimit = tiff_ifd[sidx].dng_levels.LinearResponseLimit; if (imgdata.color.dng_levels.LinearResponseLimit > 0.1 && imgdata.color.dng_levels.LinearResponseLimit <= 1.0) { // And approx promote it to linear_max: int bl4 = 0, bl64 = 0; for (int chan = 0; chan < colors && chan < 4; chan++) bl4 += cblack[chan]; bl4 /= LIM(colors, 1, 4); if (cblack[4] * cblack[5] > 0) { unsigned cnt = 0; for (unsigned c = 0; c < 4096 && c < cblack[4] * cblack[5]; c++) { bl64 += cblack[c + 6]; cnt++; } bl64 /= LIM(cnt, 1, 4096); } int rblack = black + bl4 + bl64; for (int chan = 0; chan < colors && chan < 4; chan++) imgdata.color.linear_max[chan] = (maximum - rblack) * imgdata.color.dng_levels.LinearResponseLimit + rblack; } } } } }
void LibRaw::identify_process_dng_fields() { if (!dng_version) return; int c; { int iifd = find_ifd_by_offset(data_offset); int pifd = find_ifd_by_offset(thumb_offset); #define CFAROUND(value, filters) \ filters ? (filters >= 1000 ? ((value + 1) / 2) * 2 : ((value + 5) / 6) * 6) \ : value #define IFDCOLORINDEX(ifd, subset, bit) \ (tiff_ifd[ifd].dng_color[subset].parsedfields & bit) \ ? ifd \ : ((tiff_ifd[0].dng_color[subset].parsedfields & bit) ? 0 : -1) #define IFDLEVELINDEX(ifd, bit) \ (tiff_ifd[ifd].dng_levels.parsedfields & bit) \ ? ifd \ : ((tiff_ifd[0].dng_levels.parsedfields & bit) ? 0 : -1) #define COPYARR(to, from) memmove(&to, &from, sizeof(from)) if (iifd < (int)tiff_nifds && iifd >= 0) { int sidx; if (!(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_DONT_CHECK_DNG_ILLUMINANT)) { int illidx[2], cmidx[2], calidx[2], abidx; for (int i = 0; i < 2; i++) { illidx[i] = IFDCOLORINDEX(iifd, i, LIBRAW_DNGFM_ILLUMINANT); cmidx[i] = IFDCOLORINDEX(iifd, i, LIBRAW_DNGFM_COLORMATRIX); calidx[i] = IFDCOLORINDEX(iifd, i, LIBRAW_DNGFM_CALIBRATION); } abidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_ANALOGBALANCE); if (illidx[0] >= 0 && illidx[0] < (int)tiff_nifds && illidx[0] == illidx[1] && illidx[0] == cmidx[0] && illidx[0] == cmidx[1] && tiff_ifd[illidx[0]].dng_color[0].illuminant > 0 && tiff_ifd[illidx[0]].dng_color[1].illuminant > 0) { sidx = illidx[0]; double cc[4][4], cm[4][3], cam_xyz[4][3]; for (int j = 0; j < 4; j++) for (int i = 0; i < 4; i++) cc[j][i] = i == j; int colidx = -1; for (int i = 0; i < 2; i++) { if (tiff_ifd[sidx].dng_color[i].illuminant == LIBRAW_WBI_D65) { colidx = i; break; } } if (colidx < 0) for (int i = 0; i < 2; i++) { int ill = tiff_ifd[sidx].dng_color[i].illuminant; if (ill == LIBRAW_WBI_Daylight || ill == LIBRAW_WBI_D55 || ill == LIBRAW_WBI_D75 || ill == LIBRAW_WBI_D50 || ill == LIBRAW_WBI_Flash) { colidx = i; break; } } if (colidx >= 0) { FORCC for (int j = 0; j < 3; j++) cm[c][j] = tiff_ifd[sidx].dng_color[colidx].colormatrix[c][j]; if (calidx[colidx] == sidx) { for (int i = 0; i < colors; i++) FORCC cc[i][c] = tiff_ifd[sidx].dng_color[colidx].calibration[i][c]; } if (abidx == sidx) for (int i = 0; i < colors; i++) FORCC cc[i][c] *= tiff_ifd[sidx].dng_levels.analogbalance[i]; int j; FORCC for (int i = 0; i < 3; i++) for (cam_xyz[c][i] = j = 0; j < colors; j++) cam_xyz[c][i] += cc[c][j] * cm[j][i]; cam_xyz_coeff(cmatrix, cam_xyz); } } } bool noFujiDNGCrop = makeIs(LIBRAW_CAMERAMAKER_Fujifilm) && (!strcmp(normalized_model, "S3Pro") || !strcmp(normalized_model, "S5Pro") || !strcmp(normalized_model, "S2Pro")); if (!noFujiDNGCrop && (imgdata.params.raw_processing_options &LIBRAW_PROCESSING_USE_DNG_DEFAULT_CROP)) { sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_CROPORIGIN); int sidx2 = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_CROPSIZE); if (sidx >= 0 && sidx == sidx2 && tiff_ifd[sidx].dng_levels.default_crop[2] > 0 && tiff_ifd[sidx].dng_levels.default_crop[3] > 0) { int lm = tiff_ifd[sidx].dng_levels.default_crop[0]; int lmm = CFAROUND(lm, filters); int tm = tiff_ifd[sidx].dng_levels.default_crop[1]; int tmm = CFAROUND(tm, filters); int ww = tiff_ifd[sidx].dng_levels.default_crop[2]; int hh = tiff_ifd[sidx].dng_levels.default_crop[3]; if (lmm > lm) ww -= (lmm - lm); if (tmm > tm) hh -= (tmm - tm); if (left_margin + lm + ww <= raw_width && top_margin + tm + hh <= raw_height) { left_margin += lmm; top_margin += tmm; width = ww; height = hh; } } } if (!(imgdata.color.dng_color[0].parsedfields & LIBRAW_DNGFM_FORWARDMATRIX)) { sidx = IFDCOLORINDEX(iifd, 0, LIBRAW_DNGFM_FORWARDMATRIX); if (sidx >= 0) COPYARR(imgdata.color.dng_color[0].forwardmatrix, tiff_ifd[sidx].dng_color[0].forwardmatrix); } if (!(imgdata.color.dng_color[1].parsedfields & LIBRAW_DNGFM_FORWARDMATRIX)) { sidx = IFDCOLORINDEX(iifd, 1, LIBRAW_DNGFM_FORWARDMATRIX); if (sidx >= 0) COPYARR(imgdata.color.dng_color[1].forwardmatrix, tiff_ifd[sidx].dng_color[1].forwardmatrix); } for (int ss = 0; ss < 2; ss++) { sidx = IFDCOLORINDEX(iifd, ss, LIBRAW_DNGFM_COLORMATRIX); if (sidx >= 0) COPYARR(imgdata.color.dng_color[ss].colormatrix, tiff_ifd[sidx].dng_color[ss].colormatrix); sidx = IFDCOLORINDEX(iifd, ss, LIBRAW_DNGFM_CALIBRATION); if (sidx >= 0) COPYARR(imgdata.color.dng_color[ss].calibration, tiff_ifd[sidx].dng_color[ss].calibration); sidx = IFDCOLORINDEX(iifd, ss, LIBRAW_DNGFM_ILLUMINANT); if (sidx >= 0) imgdata.color.dng_color[ss].illuminant = tiff_ifd[sidx].dng_color[ss].illuminant; } sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_ANALOGBALANCE); if (sidx >= 0) COPYARR(imgdata.color.dng_levels.analogbalance, tiff_ifd[sidx].dng_levels.analogbalance); sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_BASELINEEXPOSURE); if (sidx >= 0) imgdata.color.dng_levels.baseline_exposure = tiff_ifd[sidx].dng_levels.baseline_exposure; sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_WHITE); if (sidx >= 0 && tiff_ifd[sidx].dng_levels.dng_whitelevel[0]) COPYARR(imgdata.color.dng_levels.dng_whitelevel, tiff_ifd[sidx].dng_levels.dng_whitelevel); else if (tiff_ifd[iifd].sample_format <= 2 && tiff_ifd[iifd].bps > 0 && tiff_ifd[iifd].bps < 32) FORC4 imgdata.color.dng_levels.dng_whitelevel[c] = (1 << tiff_ifd[iifd].bps) - 1; sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_ASSHOTNEUTRAL); if (sidx >= 0) { COPYARR(imgdata.color.dng_levels.asshotneutral, tiff_ifd[sidx].dng_levels.asshotneutral); if (imgdata.color.dng_levels.asshotneutral[0]) { cam_mul[3] = 0; FORCC if (fabs(imgdata.color.dng_levels.asshotneutral[c]) > 0.0001) cam_mul[c] = 1 / imgdata.color.dng_levels.asshotneutral[c]; } } sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_BLACK); if (sidx >= 0) { imgdata.color.dng_levels.dng_fblack = tiff_ifd[sidx].dng_levels.dng_fblack; imgdata.color.dng_levels.dng_black = tiff_ifd[sidx].dng_levels.dng_black; COPYARR(imgdata.color.dng_levels.dng_cblack, tiff_ifd[sidx].dng_levels.dng_cblack); COPYARR(imgdata.color.dng_levels.dng_fcblack, tiff_ifd[sidx].dng_levels.dng_fcblack); } if (pifd >= 0) { sidx = IFDLEVELINDEX(pifd, LIBRAW_DNGFM_PREVIEWCS); if (sidx >= 0) imgdata.color.dng_levels.preview_colorspace = tiff_ifd[sidx].dng_levels.preview_colorspace; } sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_OPCODE2); if (sidx >= 0) meta_offset = tiff_ifd[sidx].opcode2_offset; sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_LINTABLE); INT64 linoff = -1; int linlen = 0; if (sidx >= 0) { linoff = tiff_ifd[sidx].lineartable_offset; linlen = tiff_ifd[sidx].lineartable_len; } if (linoff >= 0 && linlen > 0) { INT64 pos = ftell(ifp); fseek(ifp, linoff, SEEK_SET); linear_table(linlen); fseek(ifp, pos, SEEK_SET); } } if (load_raw == &LibRaw::lossy_dng_load_raw) { maximum = 0xffff; FORC4 imgdata.color.linear_max[c] = imgdata.color.dng_levels.dng_whitelevel[c] = 0xffff; } else { maximum = imgdata.color.dng_levels.dng_whitelevel[0]; } black = imgdata.color.dng_levels.dng_black; if (tiff_samples == 2 && imgdata.color.dng_levels.dng_cblack[4] * imgdata.color.dng_levels.dng_cblack[5] * tiff_samples == imgdata.color.dng_levels.dng_cblack[LIBRAW_CBLACK_SIZE - 1]) { unsigned ff = filters; if (filters > 999 && colors == 3) filters |= ((filters >> 2 & 0x22222222) | (filters << 2 & 0x88888888)) & filters << 1; int csum[4] = { 0,0,0,0 }, ccount[4] = { 0,0,0,0 }; int i = 6 + shot_select; for (unsigned row = 0; row < imgdata.color.dng_levels.dng_cblack[4]; row++) for (unsigned col = 0; col < imgdata.color.dng_levels.dng_cblack[5]; col++) { csum[FC(row, col)] += imgdata.color.dng_levels.dng_cblack[i]; ccount[FC(row, col)]++; i += tiff_samples; } for (int c = 0; c < 4; c++) if (ccount[c]) imgdata.color.dng_levels.dng_cblack[c] += csum[c] / ccount[c]; imgdata.color.dng_levels.dng_cblack[4] = imgdata.color.dng_levels.dng_cblack[5] = 0; filters = ff; } else if (tiff_samples > 2 && tiff_samples <= 4 && imgdata.color.dng_levels.dng_cblack[4] * imgdata.color.dng_levels.dng_cblack[5] * tiff_samples == imgdata.color.dng_levels.dng_cblack[LIBRAW_CBLACK_SIZE - 1]) { int csum[4] = { 0,0,0,0 }, ccount[4] = { 0,0,0,0 }; int i = 6; for (unsigned row = 0; row < imgdata.color.dng_levels.dng_cblack[4]; row++) for (unsigned col = 0; col < imgdata.color.dng_levels.dng_cblack[5]; col++) for (unsigned c = 0; c < tiff_samples; c++) { csum[c] += imgdata.color.dng_levels.dng_cblack[i]; ccount[c]++; i++; } for (int c = 0; c < 4; c++) if (ccount[c]) imgdata.color.dng_levels.dng_cblack[c] += csum[c] / ccount[c]; imgdata.color.dng_levels.dng_cblack[4] = imgdata.color.dng_levels.dng_cblack[5] = 0; } memmove(cblack, imgdata.color.dng_levels.dng_cblack, sizeof(cblack)); if (iifd < (int)tiff_nifds && iifd >= 0) { int sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_LINEARRESPONSELIMIT); if (sidx >= 0) { imgdata.color.dng_levels.LinearResponseLimit = tiff_ifd[sidx].dng_levels.LinearResponseLimit; if (imgdata.color.dng_levels.LinearResponseLimit > 0.1 && imgdata.color.dng_levels.LinearResponseLimit <= 1.0) { int bl4 = 0, bl64 = 0; for (int chan = 0; chan < colors && chan < 4; chan++) bl4 += cblack[chan]; bl4 /= LIM(colors, 1, 4); if (cblack[4] * cblack[5] > 0) { unsigned cnt = 0; for (unsigned c = 0; c < 4096 && c < cblack[4] * cblack[5]; c++) { bl64 += cblack[c + 6]; cnt++; } bl64 /= LIM(cnt, 1, 4096); } int rblack = black + bl4 + bl64; for (int chan = 0; chan < colors && chan < 4; chan++) imgdata.color.linear_max[chan] = (maximum - rblack) * imgdata.color.dng_levels.LinearResponseLimit + rblack; } } } } }
909
1
static int rtc_dev_fasync(int fd, struct file *file, int on) { struct rtc_device *rtc = file->private_data; return fasync_helper(fd, file, on, &rtc->async_queue); }
static int rtc_dev_fasync(int fd, struct file *file, int on) { struct rtc_device *rtc = file->private_data; return fasync_helper(fd, file, on, &rtc->async_queue); }
910
0
static int gs_heap_register_root ( gs_memory_t * mem , gs_gc_root_t * rp , gs_ptr_type_t ptype , void * * up , client_name_t cname ) { return 0 ; }
static int gs_heap_register_root ( gs_memory_t * mem , gs_gc_root_t * rp , gs_ptr_type_t ptype , void * * up , client_name_t cname ) { return 0 ; }
911
0
void aio_context_acquire(AioContext *ctx) { qemu_rec_mutex_lock(&ctx->lock); }
void aio_context_acquire(AioContext *ctx) { qemu_rec_mutex_lock(&ctx->lock); }
912
1
static inline char *parse_ip_address_ex(const char *str, size_t str_len, int *portno, int get_err, zend_string **err) { char *colon; char *host = NULL; #ifdef HAVE_IPV6 char *p; if (*(str) == '[' && str_len > 1) { /* IPV6 notation to specify raw address with port (i.e. [fe80::1]:80) */ p = memchr(str + 1, ']', str_len - 2); if (!p || *(p + 1) != ':') { if (get_err) { *err = strpprintf(0, "Failed to parse IPv6 address \"%s\"", str); } return NULL; } *portno = atoi(p + 2); return estrndup(str + 1, p - str - 1); } #endif if (str_len) { colon = memchr(str, ':', str_len - 1); } else { colon = NULL; } if (colon) { *portno = atoi(colon + 1); host = estrndup(str, colon - str); } else { if (get_err) { *err = strpprintf(0, "Failed to parse address \"%s\"", str); } return NULL; } return host; }
static inline char *parse_ip_address_ex(const char *str, size_t str_len, int *portno, int get_err, zend_string **err) { char *colon; char *host = NULL; #ifdef HAVE_IPV6 char *p; if (*(str) == '[' && str_len > 1) { p = memchr(str + 1, ']', str_len - 2); if (!p || *(p + 1) != ':') { if (get_err) { *err = strpprintf(0, "Failed to parse IPv6 address \"%s\"", str); } return NULL; } *portno = atoi(p + 2); return estrndup(str + 1, p - str - 1); } #endif if (str_len) { colon = memchr(str, ':', str_len - 1); } else { colon = NULL; } if (colon) { *portno = atoi(colon + 1); host = estrndup(str, colon - str); } else { if (get_err) { *err = strpprintf(0, "Failed to parse address \"%s\"", str); } return NULL; } return host; }
914
0
static void qjson_finalizefn(Object *obj) { QJSON *json = QJSON(obj); qobject_decref(QOBJECT(json->str)); }
static void qjson_finalizefn(Object *obj) { QJSON *json = QJSON(obj); qobject_decref(QOBJECT(json->str)); }
915
0
gss_export_sec_context(minor_status, context_handle, interprocess_token) OM_uint32 * minor_status; gss_ctx_id_t * context_handle; gss_buffer_t interprocess_token; { OM_uint32 status; OM_uint32 length; gss_union_ctx_id_t ctx = NULL; gss_mechanism mech; gss_buffer_desc token = GSS_C_EMPTY_BUFFER; char *buf; status = val_exp_sec_ctx_args(minor_status, context_handle, interprocess_token); if (status != GSS_S_COMPLETE) return (status); /* * select the approprate underlying mechanism routine and * call it. */ ctx = (gss_union_ctx_id_t) *context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (!mech) return GSS_S_BAD_MECH; if (!mech->gss_export_sec_context) return (GSS_S_UNAVAILABLE); status = mech->gss_export_sec_context(minor_status, &ctx->internal_ctx_id, &token); if (status != GSS_S_COMPLETE) { map_error(minor_status, mech); goto cleanup; } length = token.length + 4 + ctx->mech_type->length; interprocess_token->length = length; interprocess_token->value = malloc(length); if (interprocess_token->value == 0) { *minor_status = ENOMEM; status = GSS_S_FAILURE; goto cleanup; } buf = interprocess_token->value; length = ctx->mech_type->length; buf[3] = (unsigned char) (length & 0xFF); length >>= 8; buf[2] = (unsigned char) (length & 0xFF); length >>= 8; buf[1] = (unsigned char) (length & 0xFF); length >>= 8; buf[0] = (unsigned char) (length & 0xFF); memcpy(buf+4, ctx->mech_type->elements, (size_t) ctx->mech_type->length); memcpy(buf+4+ctx->mech_type->length, token.value, token.length); status = GSS_S_COMPLETE; cleanup: (void) gss_release_buffer(minor_status, &token); if (ctx != NULL && ctx->internal_ctx_id == GSS_C_NO_CONTEXT) { /* If the mech deleted its context, delete the union context. */ free(ctx->mech_type->elements); free(ctx->mech_type); free(ctx); *context_handle = GSS_C_NO_CONTEXT; } return status; }
gss_export_sec_context(minor_status, context_handle, interprocess_token) OM_uint32 * minor_status; gss_ctx_id_t * context_handle; gss_buffer_t interprocess_token; { OM_uint32 status; OM_uint32 length; gss_union_ctx_id_t ctx = NULL; gss_mechanism mech; gss_buffer_desc token = GSS_C_EMPTY_BUFFER; char *buf; status = val_exp_sec_ctx_args(minor_status, context_handle, interprocess_token); if (status != GSS_S_COMPLETE) return (status); ctx = (gss_union_ctx_id_t) *context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (!mech) return GSS_S_BAD_MECH; if (!mech->gss_export_sec_context) return (GSS_S_UNAVAILABLE); status = mech->gss_export_sec_context(minor_status, &ctx->internal_ctx_id, &token); if (status != GSS_S_COMPLETE) { map_error(minor_status, mech); goto cleanup; } length = token.length + 4 + ctx->mech_type->length; interprocess_token->length = length; interprocess_token->value = malloc(length); if (interprocess_token->value == 0) { *minor_status = ENOMEM; status = GSS_S_FAILURE; goto cleanup; } buf = interprocess_token->value; length = ctx->mech_type->length; buf[3] = (unsigned char) (length & 0xFF); length >>= 8; buf[2] = (unsigned char) (length & 0xFF); length >>= 8; buf[1] = (unsigned char) (length & 0xFF); length >>= 8; buf[0] = (unsigned char) (length & 0xFF); memcpy(buf+4, ctx->mech_type->elements, (size_t) ctx->mech_type->length); memcpy(buf+4+ctx->mech_type->length, token.value, token.length); status = GSS_S_COMPLETE; cleanup: (void) gss_release_buffer(minor_status, &token); if (ctx != NULL && ctx->internal_ctx_id == GSS_C_NO_CONTEXT) { free(ctx->mech_type->elements); free(ctx->mech_type); free(ctx); *context_handle = GSS_C_NO_CONTEXT; } return status; }
916
1
struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, const union sctp_addr *addr, const gfp_t gfp, const int peer_state) { struct sctp_transport *peer; struct sctp_sock *sp; unsigned short port; sp = sctp_sk(asoc->base.sk); /* AF_INET and AF_INET6 share common port field. */ port = ntohs(addr->v4.sin_port); SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ", " port: %d state:%d\n", asoc, addr, port, peer_state); /* Set the port if it has not been set yet. */ if (0 == asoc->peer.port) asoc->peer.port = port; /* Check to see if this is a duplicate. */ peer = sctp_assoc_lookup_paddr(asoc, addr); if (peer) { if (peer->state == SCTP_UNKNOWN) { if (peer_state == SCTP_ACTIVE) peer->state = SCTP_ACTIVE; if (peer_state == SCTP_UNCONFIRMED) peer->state = SCTP_UNCONFIRMED; } return peer; } peer = sctp_transport_new(addr, gfp); if (!peer) return NULL; sctp_transport_set_owner(peer, asoc); /* Initialize the peer's heartbeat interval based on the * association configured value. */ peer->hbinterval = asoc->hbinterval; /* Set the path max_retrans. */ peer->pathmaxrxt = asoc->pathmaxrxt; /* Initialize the peer's SACK delay timeout based on the * association configured value. */ peer->sackdelay = asoc->sackdelay; peer->sackfreq = asoc->sackfreq; /* Enable/disable heartbeat, SACK delay, and path MTU discovery * based on association setting. */ peer->param_flags = asoc->param_flags; /* Initialize the pmtu of the transport. */ if (peer->param_flags & SPP_PMTUD_ENABLE) sctp_transport_pmtu(peer); else if (asoc->pathmtu) peer->pathmtu = asoc->pathmtu; else peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT; /* If this is the first transport addr on this association, * initialize the association PMTU to the peer's PMTU. * If not and the current association PMTU is higher than the new * peer's PMTU, reset the association PMTU to the new peer's PMTU. */ if (asoc->pathmtu) asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu); else asoc->pathmtu = peer->pathmtu; SCTP_DEBUG_PRINTK("sctp_assoc_add_peer:association %p PMTU set to " "%d\n", asoc, asoc->pathmtu); peer->pmtu_pending = 0; asoc->frag_point = sctp_frag_point(sp, asoc->pathmtu); /* The asoc->peer.port might not be meaningful yet, but * initialize the packet structure anyway. */ sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port, asoc->peer.port); /* 7.2.1 Slow-Start * * o The initial cwnd before DATA transmission or after a sufficiently * long idle period MUST be set to * min(4*MTU, max(2*MTU, 4380 bytes)) * * o The initial value of ssthresh MAY be arbitrarily high * (for example, implementations MAY use the size of the * receiver advertised window). */ peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); /* At this point, we may not have the receiver's advertised window, * so initialize ssthresh to the default value and it will be set * later when we process the INIT. */ peer->ssthresh = SCTP_DEFAULT_MAXWINDOW; peer->partial_bytes_acked = 0; peer->flight_size = 0; /* Set the transport's RTO.initial value */ peer->rto = asoc->rto_initial; /* Set the peer's active state. */ peer->state = peer_state; /* Attach the remote transport to our asoc. */ list_add_tail(&peer->transports, &asoc->peer.transport_addr_list); asoc->peer.transport_count++; /* If we do not yet have a primary path, set one. */ if (!asoc->peer.primary_path) { sctp_assoc_set_primary(asoc, peer); asoc->peer.retran_path = peer; } if (asoc->peer.active_path == asoc->peer.retran_path) { asoc->peer.retran_path = peer; } return peer; }
struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, const union sctp_addr *addr, const gfp_t gfp, const int peer_state) { struct sctp_transport *peer; struct sctp_sock *sp; unsigned short port; sp = sctp_sk(asoc->base.sk); port = ntohs(addr->v4.sin_port); SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ", " port: %d state:%d\n", asoc, addr, port, peer_state); if (0 == asoc->peer.port) asoc->peer.port = port; peer = sctp_assoc_lookup_paddr(asoc, addr); if (peer) { if (peer->state == SCTP_UNKNOWN) { if (peer_state == SCTP_ACTIVE) peer->state = SCTP_ACTIVE; if (peer_state == SCTP_UNCONFIRMED) peer->state = SCTP_UNCONFIRMED; } return peer; } peer = sctp_transport_new(addr, gfp); if (!peer) return NULL; sctp_transport_set_owner(peer, asoc); peer->hbinterval = asoc->hbinterval; peer->pathmaxrxt = asoc->pathmaxrxt; peer->sackdelay = asoc->sackdelay; peer->sackfreq = asoc->sackfreq; peer->param_flags = asoc->param_flags; if (peer->param_flags & SPP_PMTUD_ENABLE) sctp_transport_pmtu(peer); else if (asoc->pathmtu) peer->pathmtu = asoc->pathmtu; else peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT; if (asoc->pathmtu) asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu); else asoc->pathmtu = peer->pathmtu; SCTP_DEBUG_PRINTK("sctp_assoc_add_peer:association %p PMTU set to " "%d\n", asoc, asoc->pathmtu); peer->pmtu_pending = 0; asoc->frag_point = sctp_frag_point(sp, asoc->pathmtu); sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port, asoc->peer.port); peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); peer->ssthresh = SCTP_DEFAULT_MAXWINDOW; peer->partial_bytes_acked = 0; peer->flight_size = 0; peer->rto = asoc->rto_initial; peer->state = peer_state; list_add_tail(&peer->transports, &asoc->peer.transport_addr_list); asoc->peer.transport_count++; if (!asoc->peer.primary_path) { sctp_assoc_set_primary(asoc, peer); asoc->peer.retran_path = peer; } if (asoc->peer.active_path == asoc->peer.retran_path) { asoc->peer.retran_path = peer; } return peer; }
917
1
static void _ISCIIReset ( UConverter * cnv , UConverterResetChoice choice ) { UConverterDataISCII * data = ( UConverterDataISCII * ) ( cnv -> extraInfo ) ; if ( choice <= UCNV_RESET_TO_UNICODE ) { cnv -> toUnicodeStatus = missingCharMarker ; cnv -> mode = 0 ; data -> currentDeltaToUnicode = data -> defDeltaToUnicode ; data -> currentMaskToUnicode = data -> defMaskToUnicode ; data -> contextCharToUnicode = NO_CHAR_MARKER ; data -> prevToUnicodeStatus = 0x0000 ; } if ( choice != UCNV_RESET_TO_UNICODE ) { cnv -> fromUChar32 = 0x0000 ; data -> contextCharFromUnicode = 0x00 ; data -> currentMaskFromUnicode = data -> defMaskToUnicode ; data -> currentDeltaFromUnicode = data -> defDeltaToUnicode ; data -> isFirstBuffer = TRUE ; data -> resetToDefaultToUnicode = FALSE ; } }
static void _ISCIIReset ( UConverter * cnv , UConverterResetChoice choice ) { UConverterDataISCII * data = ( UConverterDataISCII * ) ( cnv -> extraInfo ) ; if ( choice <= UCNV_RESET_TO_UNICODE ) { cnv -> toUnicodeStatus = missingCharMarker ; cnv -> mode = 0 ; data -> currentDeltaToUnicode = data -> defDeltaToUnicode ; data -> currentMaskToUnicode = data -> defMaskToUnicode ; data -> contextCharToUnicode = NO_CHAR_MARKER ; data -> prevToUnicodeStatus = 0x0000 ; } if ( choice != UCNV_RESET_TO_UNICODE ) { cnv -> fromUChar32 = 0x0000 ; data -> contextCharFromUnicode = 0x00 ; data -> currentMaskFromUnicode = data -> defMaskToUnicode ; data -> currentDeltaFromUnicode = data -> defDeltaToUnicode ; data -> isFirstBuffer = TRUE ; data -> resetToDefaultToUnicode = FALSE ; } }
918
0
static inline char *parse_ip_address_ex(const char *str, size_t str_len, int *portno, int get_err, zend_string **err) { char *colon; char *host = NULL; #ifdef HAVE_IPV6 if (*(str) == '[' && str_len > 1) { /* IPV6 notation to specify raw address with port (i.e. [fe80::1]:80) */ char *p = memchr(str + 1, ']', str_len - 2), *e = NULL; if (!p || *(p + 1) != ':') { if (get_err) { *err = strpprintf(0, "Failed to parse IPv6 address \"%s\"", str); } return NULL; } *portno = strtol(p + 2, &e, 10); if (e && *e) { if (get_err) { *err = strpprintf(0, "Failed to parse address \"%s\"", str); } return NULL; } return estrndup(str + 1, p - str - 1); } #endif if (str_len) { colon = memchr(str, ':', str_len - 1); } else { colon = NULL; } if (colon) { char *e = NULL; *portno = strtol(colon + 1, &e, 10); if (!e || !*e) { return estrndup(str, colon - str); } } if (get_err) { *err = strpprintf(0, "Failed to parse address \"%s\"", str); } return NULL; }
static inline char *parse_ip_address_ex(const char *str, size_t str_len, int *portno, int get_err, zend_string **err) { char *colon; char *host = NULL; #ifdef HAVE_IPV6 if (*(str) == '[' && str_len > 1) { char *p = memchr(str + 1, ']', str_len - 2), *e = NULL; if (!p || *(p + 1) != ':') { if (get_err) { *err = strpprintf(0, "Failed to parse IPv6 address \"%s\"", str); } return NULL; } *portno = strtol(p + 2, &e, 10); if (e && *e) { if (get_err) { *err = strpprintf(0, "Failed to parse address \"%s\"", str); } return NULL; } return estrndup(str + 1, p - str - 1); } #endif if (str_len) { colon = memchr(str, ':', str_len - 1); } else { colon = NULL; } if (colon) { char *e = NULL; *portno = strtol(colon + 1, &e, 10); if (!e || !*e) { return estrndup(str, colon - str); } } if (get_err) { *err = strpprintf(0, "Failed to parse address \"%s\"", str); } return NULL; }
919
1
int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid, const union sctp_addr *peer_addr, sctp_init_chunk_t *peer_init, gfp_t gfp) { union sctp_params param; struct sctp_transport *transport; struct list_head *pos, *temp; char *cookie; /* We must include the address that the INIT packet came from. * This is the only address that matters for an INIT packet. * When processing a COOKIE ECHO, we retrieve the from address * of the INIT from the cookie. */ /* This implementation defaults to making the first transport * added as the primary transport. The source address seems to * be a a better choice than any of the embedded addresses. */ if (peer_addr) { if(!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE)) goto nomem; } /* Process the initialization parameters. */ sctp_walk_params(param, peer_init, init_hdr.params) { if (!sctp_process_param(asoc, param, peer_addr, gfp)) goto clean_up; } /* AUTH: After processing the parameters, make sure that we * have all the required info to potentially do authentications. */ if (asoc->peer.auth_capable && (!asoc->peer.peer_random || !asoc->peer.peer_hmacs)) asoc->peer.auth_capable = 0; /* In a non-backward compatible mode, if the peer claims * support for ADD-IP but not AUTH, the ADD-IP spec states * that we MUST ABORT the association. Section 6. The section * also give us an option to silently ignore the packet, which * is what we'll do here. */ if (!sctp_addip_noauth && (asoc->peer.asconf_capable && !asoc->peer.auth_capable)) { asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP | SCTP_PARAM_DEL_IP | SCTP_PARAM_SET_PRIMARY); asoc->peer.asconf_capable = 0; goto clean_up; } /* Walk list of transports, removing transports in the UNKNOWN state. */ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { transport = list_entry(pos, struct sctp_transport, transports); if (transport->state == SCTP_UNKNOWN) { sctp_assoc_rm_peer(asoc, transport); } } /* The fixed INIT headers are always in network byte * order. */ asoc->peer.i.init_tag = ntohl(peer_init->init_hdr.init_tag); asoc->peer.i.a_rwnd = ntohl(peer_init->init_hdr.a_rwnd); asoc->peer.i.num_outbound_streams = ntohs(peer_init->init_hdr.num_outbound_streams); asoc->peer.i.num_inbound_streams = ntohs(peer_init->init_hdr.num_inbound_streams); asoc->peer.i.initial_tsn = ntohl(peer_init->init_hdr.initial_tsn); /* Apply the upper bounds for output streams based on peer's * number of inbound streams. */ if (asoc->c.sinit_num_ostreams > ntohs(peer_init->init_hdr.num_inbound_streams)) { asoc->c.sinit_num_ostreams = ntohs(peer_init->init_hdr.num_inbound_streams); } if (asoc->c.sinit_max_instreams > ntohs(peer_init->init_hdr.num_outbound_streams)) { asoc->c.sinit_max_instreams = ntohs(peer_init->init_hdr.num_outbound_streams); } /* Copy Initiation tag from INIT to VT_peer in cookie. */ asoc->c.peer_vtag = asoc->peer.i.init_tag; /* Peer Rwnd : Current calculated value of the peer's rwnd. */ asoc->peer.rwnd = asoc->peer.i.a_rwnd; /* Copy cookie in case we need to resend COOKIE-ECHO. */ cookie = asoc->peer.cookie; if (cookie) { asoc->peer.cookie = kmemdup(cookie, asoc->peer.cookie_len, gfp); if (!asoc->peer.cookie) goto clean_up; } /* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily * high (for example, implementations MAY use the size of the receiver * advertised window). */ list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { transport->ssthresh = asoc->peer.i.a_rwnd; } /* Set up the TSN tracking pieces. */ sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_SIZE, asoc->peer.i.initial_tsn); /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number * * The stream sequence number in all the streams shall start * from 0 when the association is established. Also, when the * stream sequence number reaches the value 65535 the next * stream sequence number shall be set to 0. */ /* Allocate storage for the negotiated streams if it is not a temporary * association. */ if (!asoc->temp) { int error; asoc->ssnmap = sctp_ssnmap_new(asoc->c.sinit_max_instreams, asoc->c.sinit_num_ostreams, gfp); if (!asoc->ssnmap) goto clean_up; error = sctp_assoc_set_id(asoc, gfp); if (error) goto clean_up; } /* ADDIP Section 4.1 ASCONF Chunk Procedures * * When an endpoint has an ASCONF signaled change to be sent to the * remote endpoint it should do the following: * ... * A2) A serial number should be assigned to the Chunk. The serial * number should be a monotonically increasing number. All serial * numbers are defined to be initialized at the start of the * association to the same value as the Initial TSN. */ asoc->peer.addip_serial = asoc->peer.i.initial_tsn - 1; return 1; clean_up: /* Release the transport structures. */ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { transport = list_entry(pos, struct sctp_transport, transports); list_del_init(pos); sctp_transport_free(transport); } asoc->peer.transport_count = 0; nomem: return 0; }
int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid, const union sctp_addr *peer_addr, sctp_init_chunk_t *peer_init, gfp_t gfp) { union sctp_params param; struct sctp_transport *transport; struct list_head *pos, *temp; char *cookie; if (peer_addr) { if(!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE)) goto nomem; } sctp_walk_params(param, peer_init, init_hdr.params) { if (!sctp_process_param(asoc, param, peer_addr, gfp)) goto clean_up; } if (asoc->peer.auth_capable && (!asoc->peer.peer_random || !asoc->peer.peer_hmacs)) asoc->peer.auth_capable = 0; if (!sctp_addip_noauth && (asoc->peer.asconf_capable && !asoc->peer.auth_capable)) { asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP | SCTP_PARAM_DEL_IP | SCTP_PARAM_SET_PRIMARY); asoc->peer.asconf_capable = 0; goto clean_up; } list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { transport = list_entry(pos, struct sctp_transport, transports); if (transport->state == SCTP_UNKNOWN) { sctp_assoc_rm_peer(asoc, transport); } } asoc->peer.i.init_tag = ntohl(peer_init->init_hdr.init_tag); asoc->peer.i.a_rwnd = ntohl(peer_init->init_hdr.a_rwnd); asoc->peer.i.num_outbound_streams = ntohs(peer_init->init_hdr.num_outbound_streams); asoc->peer.i.num_inbound_streams = ntohs(peer_init->init_hdr.num_inbound_streams); asoc->peer.i.initial_tsn = ntohl(peer_init->init_hdr.initial_tsn); if (asoc->c.sinit_num_ostreams > ntohs(peer_init->init_hdr.num_inbound_streams)) { asoc->c.sinit_num_ostreams = ntohs(peer_init->init_hdr.num_inbound_streams); } if (asoc->c.sinit_max_instreams > ntohs(peer_init->init_hdr.num_outbound_streams)) { asoc->c.sinit_max_instreams = ntohs(peer_init->init_hdr.num_outbound_streams); } asoc->c.peer_vtag = asoc->peer.i.init_tag; asoc->peer.rwnd = asoc->peer.i.a_rwnd; cookie = asoc->peer.cookie; if (cookie) { asoc->peer.cookie = kmemdup(cookie, asoc->peer.cookie_len, gfp); if (!asoc->peer.cookie) goto clean_up; } list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { transport->ssthresh = asoc->peer.i.a_rwnd; } sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_SIZE, asoc->peer.i.initial_tsn); if (!asoc->temp) { int error; asoc->ssnmap = sctp_ssnmap_new(asoc->c.sinit_max_instreams, asoc->c.sinit_num_ostreams, gfp); if (!asoc->ssnmap) goto clean_up; error = sctp_assoc_set_id(asoc, gfp); if (error) goto clean_up; } asoc->peer.addip_serial = asoc->peer.i.initial_tsn - 1; return 1; clean_up: list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { transport = list_entry(pos, struct sctp_transport, transports); list_del_init(pos); sctp_transport_free(transport); } asoc->peer.transport_count = 0; nomem: return 0; }
920
0
static void cchip_write(void *opaque, hwaddr addr, uint64_t v32, unsigned size) { TyphoonState *s = opaque; uint64_t val, oldval, newval; if (addr & 4) { val = v32 << 32 | s->latch_tmp; addr ^= 4; } else { s->latch_tmp = v32; return; } switch (addr) { case 0x0000: /* CSC: Cchip System Configuration Register. */ /* All sorts of data here; nothing relevant RW. */ break; case 0x0040: /* MTR: Memory Timing Register. */ /* All sorts of stuff related to real DRAM. */ break; case 0x0080: /* MISC: Miscellaneous Register. */ newval = oldval = s->cchip.misc; newval &= ~(val & 0x10000ff0); /* W1C fields */ if (val & 0x100000) { newval &= ~0xff0000ull; /* ACL clears ABT and ABW */ } else { newval |= val & 0x00f00000; /* ABT field is W1S */ if ((newval & 0xf0000) == 0) { newval |= val & 0xf0000; /* ABW field is W1S iff zero */ } } newval |= (val & 0xf000) >> 4; /* IPREQ field sets IPINTR. */ newval &= ~0xf0000000000ull; /* WO and RW fields */ newval |= val & 0xf0000000000ull; s->cchip.misc = newval; /* Pass on changes to IPI and ITI state. */ if ((newval ^ oldval) & 0xff0) { int i; for (i = 0; i < 4; ++i) { AlphaCPU *cpu = s->cchip.cpu[i]; if (cpu != NULL) { CPUState *cs = CPU(cpu); /* IPI can be either cleared or set by the write. */ if (newval & (1 << (i + 8))) { cpu_interrupt(cs, CPU_INTERRUPT_SMP); } else { cpu_reset_interrupt(cs, CPU_INTERRUPT_SMP); } /* ITI can only be cleared by the write. */ if ((newval & (1 << (i + 4))) == 0) { cpu_reset_interrupt(cs, CPU_INTERRUPT_TIMER); } } } } break; case 0x00c0: /* MPD: Memory Presence Detect Register. */ break; case 0x0100: /* AAR0 */ case 0x0140: /* AAR1 */ case 0x0180: /* AAR2 */ case 0x01c0: /* AAR3 */ /* AAR: Array Address Register. */ /* All sorts of information about DRAM. */ break; case 0x0200: /* DIM0 */ /* DIM: Device Interrupt Mask Register, CPU0. */ s->cchip.dim[0] = val; cpu_irq_change(s->cchip.cpu[0], val & s->cchip.drir); break; case 0x0240: /* DIM1 */ /* DIM: Device Interrupt Mask Register, CPU1. */ s->cchip.dim[0] = val; cpu_irq_change(s->cchip.cpu[1], val & s->cchip.drir); break; case 0x0280: /* DIR0 (RO) */ case 0x02c0: /* DIR1 (RO) */ case 0x0300: /* DRIR (RO) */ break; case 0x0340: /* PRBEN: Probe Enable Register. */ break; case 0x0380: /* IIC0 */ s->cchip.iic[0] = val & 0xffffff; break; case 0x03c0: /* IIC1 */ s->cchip.iic[1] = val & 0xffffff; break; case 0x0400: /* MPR0 */ case 0x0440: /* MPR1 */ case 0x0480: /* MPR2 */ case 0x04c0: /* MPR3 */ /* MPR: Memory Programming Register. */ break; case 0x0580: /* TTR: TIGbus Timing Register. */ /* All sorts of stuff related to interrupt delivery timings. */ break; case 0x05c0: /* TDR: TIGbug Device Timing Register. */ break; case 0x0600: /* DIM2: Device Interrupt Mask Register, CPU2. */ s->cchip.dim[2] = val; cpu_irq_change(s->cchip.cpu[2], val & s->cchip.drir); break; case 0x0640: /* DIM3: Device Interrupt Mask Register, CPU3. */ s->cchip.dim[3] = val; cpu_irq_change(s->cchip.cpu[3], val & s->cchip.drir); break; case 0x0680: /* DIR2 (RO) */ case 0x06c0: /* DIR3 (RO) */ break; case 0x0700: /* IIC2 */ s->cchip.iic[2] = val & 0xffffff; break; case 0x0740: /* IIC3 */ s->cchip.iic[3] = val & 0xffffff; break; case 0x0780: /* PWR: Power Management Control. */ break; case 0x0c00: /* CMONCTLA */ case 0x0c40: /* CMONCTLB */ case 0x0c80: /* CMONCNT01 */ case 0x0cc0: /* CMONCNT23 */ break; default: cpu_unassigned_access(current_cpu, addr, true, false, 0, size); return; } }
static void cchip_write(void *opaque, hwaddr addr, uint64_t v32, unsigned size) { TyphoonState *s = opaque; uint64_t val, oldval, newval; if (addr & 4) { val = v32 << 32 | s->latch_tmp; addr ^= 4; } else { s->latch_tmp = v32; return; } switch (addr) { case 0x0000: break; case 0x0040: break; case 0x0080: newval = oldval = s->cchip.misc; newval &= ~(val & 0x10000ff0); if (val & 0x100000) { newval &= ~0xff0000ull; } else { newval |= val & 0x00f00000; if ((newval & 0xf0000) == 0) { newval |= val & 0xf0000; } } newval |= (val & 0xf000) >> 4; newval &= ~0xf0000000000ull; newval |= val & 0xf0000000000ull; s->cchip.misc = newval; if ((newval ^ oldval) & 0xff0) { int i; for (i = 0; i < 4; ++i) { AlphaCPU *cpu = s->cchip.cpu[i]; if (cpu != NULL) { CPUState *cs = CPU(cpu); if (newval & (1 << (i + 8))) { cpu_interrupt(cs, CPU_INTERRUPT_SMP); } else { cpu_reset_interrupt(cs, CPU_INTERRUPT_SMP); } if ((newval & (1 << (i + 4))) == 0) { cpu_reset_interrupt(cs, CPU_INTERRUPT_TIMER); } } } } break; case 0x00c0: break; case 0x0100: case 0x0140: case 0x0180: case 0x01c0: break; case 0x0200: s->cchip.dim[0] = val; cpu_irq_change(s->cchip.cpu[0], val & s->cchip.drir); break; case 0x0240: s->cchip.dim[0] = val; cpu_irq_change(s->cchip.cpu[1], val & s->cchip.drir); break; case 0x0280: case 0x02c0: case 0x0300: break; case 0x0340: break; case 0x0380: s->cchip.iic[0] = val & 0xffffff; break; case 0x03c0: s->cchip.iic[1] = val & 0xffffff; break; case 0x0400: case 0x0440: case 0x0480: case 0x04c0: break; case 0x0580: break; case 0x05c0: break; case 0x0600: s->cchip.dim[2] = val; cpu_irq_change(s->cchip.cpu[2], val & s->cchip.drir); break; case 0x0640: s->cchip.dim[3] = val; cpu_irq_change(s->cchip.cpu[3], val & s->cchip.drir); break; case 0x0680: case 0x06c0: break; case 0x0700: s->cchip.iic[2] = val & 0xffffff; break; case 0x0740: s->cchip.iic[3] = val & 0xffffff; break; case 0x0780: break; case 0x0c00: case 0x0c40: case 0x0c80: case 0x0cc0: break; default: cpu_unassigned_access(current_cpu, addr, true, false, 0, size); return; } }
921
1
gss_init_sec_context (minor_status, claimant_cred_handle, context_handle, target_name, req_mech_type, req_flags, time_req, input_chan_bindings, input_token, actual_mech_type, output_token, ret_flags, time_rec) OM_uint32 * minor_status; gss_cred_id_t claimant_cred_handle; gss_ctx_id_t * context_handle; gss_name_t target_name; gss_OID req_mech_type; OM_uint32 req_flags; OM_uint32 time_req; gss_channel_bindings_t input_chan_bindings; gss_buffer_t input_token; gss_OID * actual_mech_type; gss_buffer_t output_token; OM_uint32 * ret_flags; OM_uint32 * time_rec; { OM_uint32 status, temp_minor_status; gss_union_name_t union_name; gss_union_cred_t union_cred; gss_name_t internal_name; gss_union_ctx_id_t union_ctx_id; gss_OID selected_mech; gss_mechanism mech; gss_cred_id_t input_cred_handle; status = val_init_sec_ctx_args(minor_status, claimant_cred_handle, context_handle, target_name, req_mech_type, req_flags, time_req, input_chan_bindings, input_token, actual_mech_type, output_token, ret_flags, time_rec); if (status != GSS_S_COMPLETE) return (status); status = gssint_select_mech_type(minor_status, req_mech_type, &selected_mech); if (status != GSS_S_COMPLETE) return (status); union_name = (gss_union_name_t)target_name; /* * obtain the gss mechanism information for the requested * mechanism. If mech_type is NULL, set it to the resultant * mechanism */ mech = gssint_get_mechanism(selected_mech); if (mech == NULL) return (GSS_S_BAD_MECH); if (mech->gss_init_sec_context == NULL) return (GSS_S_UNAVAILABLE); /* * If target_name is mechanism_specific, then it must match the * mech_type that we're about to use. Otherwise, do an import on * the external_name form of the target name. */ if (union_name->mech_type && g_OID_equal(union_name->mech_type, selected_mech)) { internal_name = union_name->mech_name; } else { if ((status = gssint_import_internal_name(minor_status, selected_mech, union_name, &internal_name)) != GSS_S_COMPLETE) return (status); } /* * if context_handle is GSS_C_NO_CONTEXT, allocate a union context * descriptor to hold the mech type information as well as the * underlying mechanism context handle. Otherwise, cast the * value of *context_handle to the union context variable. */ if(*context_handle == GSS_C_NO_CONTEXT) { status = GSS_S_FAILURE; union_ctx_id = (gss_union_ctx_id_t) malloc(sizeof(gss_union_ctx_id_desc)); if (union_ctx_id == NULL) goto end; if (generic_gss_copy_oid(&temp_minor_status, selected_mech, &union_ctx_id->mech_type) != GSS_S_COMPLETE) { free(union_ctx_id); goto end; } /* copy the supplied context handle */ union_ctx_id->internal_ctx_id = GSS_C_NO_CONTEXT; } else union_ctx_id = (gss_union_ctx_id_t)*context_handle; /* * get the appropriate cred handle from the union cred struct. * defaults to GSS_C_NO_CREDENTIAL if there is no cred, which will * use the default credential. */ union_cred = (gss_union_cred_t) claimant_cred_handle; input_cred_handle = gssint_get_mechanism_cred(union_cred, selected_mech); /* * now call the approprate underlying mechanism routine */ status = mech->gss_init_sec_context( minor_status, input_cred_handle, &union_ctx_id->internal_ctx_id, internal_name, gssint_get_public_oid(selected_mech), req_flags, time_req, input_chan_bindings, input_token, actual_mech_type, output_token, ret_flags, time_rec); if (status != GSS_S_COMPLETE && status != GSS_S_CONTINUE_NEEDED) { /* * The spec says the preferred method is to delete all context info on * the first call to init, and on all subsequent calls make the caller * responsible for calling gss_delete_sec_context. However, if the * mechanism decided to delete the internal context, we should also * delete the union context. */ map_error(minor_status, mech); if (union_ctx_id->internal_ctx_id == GSS_C_NO_CONTEXT) *context_handle = GSS_C_NO_CONTEXT; if (*context_handle == GSS_C_NO_CONTEXT) { free(union_ctx_id->mech_type->elements); free(union_ctx_id->mech_type); free(union_ctx_id); } } else if (*context_handle == GSS_C_NO_CONTEXT) { union_ctx_id->loopback = union_ctx_id; *context_handle = (gss_ctx_id_t)union_ctx_id; } end: if (union_name->mech_name == NULL || union_name->mech_name != internal_name) { (void) gssint_release_internal_name(&temp_minor_status, selected_mech, &internal_name); } return(status); }
gss_init_sec_context (minor_status, claimant_cred_handle, context_handle, target_name, req_mech_type, req_flags, time_req, input_chan_bindings, input_token, actual_mech_type, output_token, ret_flags, time_rec) OM_uint32 * minor_status; gss_cred_id_t claimant_cred_handle; gss_ctx_id_t * context_handle; gss_name_t target_name; gss_OID req_mech_type; OM_uint32 req_flags; OM_uint32 time_req; gss_channel_bindings_t input_chan_bindings; gss_buffer_t input_token; gss_OID * actual_mech_type; gss_buffer_t output_token; OM_uint32 * ret_flags; OM_uint32 * time_rec; { OM_uint32 status, temp_minor_status; gss_union_name_t union_name; gss_union_cred_t union_cred; gss_name_t internal_name; gss_union_ctx_id_t union_ctx_id; gss_OID selected_mech; gss_mechanism mech; gss_cred_id_t input_cred_handle; status = val_init_sec_ctx_args(minor_status, claimant_cred_handle, context_handle, target_name, req_mech_type, req_flags, time_req, input_chan_bindings, input_token, actual_mech_type, output_token, ret_flags, time_rec); if (status != GSS_S_COMPLETE) return (status); status = gssint_select_mech_type(minor_status, req_mech_type, &selected_mech); if (status != GSS_S_COMPLETE) return (status); union_name = (gss_union_name_t)target_name; mech = gssint_get_mechanism(selected_mech); if (mech == NULL) return (GSS_S_BAD_MECH); if (mech->gss_init_sec_context == NULL) return (GSS_S_UNAVAILABLE); if (union_name->mech_type && g_OID_equal(union_name->mech_type, selected_mech)) { internal_name = union_name->mech_name; } else { if ((status = gssint_import_internal_name(minor_status, selected_mech, union_name, &internal_name)) != GSS_S_COMPLETE) return (status); } if(*context_handle == GSS_C_NO_CONTEXT) { status = GSS_S_FAILURE; union_ctx_id = (gss_union_ctx_id_t) malloc(sizeof(gss_union_ctx_id_desc)); if (union_ctx_id == NULL) goto end; if (generic_gss_copy_oid(&temp_minor_status, selected_mech, &union_ctx_id->mech_type) != GSS_S_COMPLETE) { free(union_ctx_id); goto end; } union_ctx_id->internal_ctx_id = GSS_C_NO_CONTEXT; } else union_ctx_id = (gss_union_ctx_id_t)*context_handle; union_cred = (gss_union_cred_t) claimant_cred_handle; input_cred_handle = gssint_get_mechanism_cred(union_cred, selected_mech); status = mech->gss_init_sec_context( minor_status, input_cred_handle, &union_ctx_id->internal_ctx_id, internal_name, gssint_get_public_oid(selected_mech), req_flags, time_req, input_chan_bindings, input_token, actual_mech_type, output_token, ret_flags, time_rec); if (status != GSS_S_COMPLETE && status != GSS_S_CONTINUE_NEEDED) { map_error(minor_status, mech); if (union_ctx_id->internal_ctx_id == GSS_C_NO_CONTEXT) *context_handle = GSS_C_NO_CONTEXT; if (*context_handle == GSS_C_NO_CONTEXT) { free(union_ctx_id->mech_type->elements); free(union_ctx_id->mech_type); free(union_ctx_id); } } else if (*context_handle == GSS_C_NO_CONTEXT) { union_ctx_id->loopback = union_ctx_id; *context_handle = (gss_ctx_id_t)union_ctx_id; } end: if (union_name->mech_name == NULL || union_name->mech_name != internal_name) { (void) gssint_release_internal_name(&temp_minor_status, selected_mech, &internal_name); } return(status); }
922
0
static void visitor_output_setup(TestOutputVisitorData *data, const void *unused) { data->qov = qmp_output_visitor_new(); g_assert(data->qov != NULL); data->ov = qmp_output_get_visitor(data->qov); g_assert(data->ov != NULL); }
static void visitor_output_setup(TestOutputVisitorData *data, const void *unused) { data->qov = qmp_output_visitor_new(); g_assert(data->qov != NULL); data->ov = qmp_output_get_visitor(data->qov); g_assert(data->ov != NULL); }
924
1
static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { int ret; if (unlikely(!out->f_op || !out->f_op->splice_write)) return -EINVAL; if (unlikely(!(out->f_mode & FMODE_WRITE))) return -EBADF; ret = rw_verify_area(WRITE, out, ppos, len); if (unlikely(ret < 0)) return ret; return out->f_op->splice_write(pipe, out, ppos, len, flags); }
static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { int ret; if (unlikely(!out->f_op || !out->f_op->splice_write)) return -EINVAL; if (unlikely(!(out->f_mode & FMODE_WRITE))) return -EBADF; ret = rw_verify_area(WRITE, out, ppos, len); if (unlikely(ret < 0)) return ret; return out->f_op->splice_write(pipe, out, ppos, len, flags); }
925
0
gss_init_sec_context (minor_status, claimant_cred_handle, context_handle, target_name, req_mech_type, req_flags, time_req, input_chan_bindings, input_token, actual_mech_type, output_token, ret_flags, time_rec) OM_uint32 * minor_status; gss_cred_id_t claimant_cred_handle; gss_ctx_id_t * context_handle; gss_name_t target_name; gss_OID req_mech_type; OM_uint32 req_flags; OM_uint32 time_req; gss_channel_bindings_t input_chan_bindings; gss_buffer_t input_token; gss_OID * actual_mech_type; gss_buffer_t output_token; OM_uint32 * ret_flags; OM_uint32 * time_rec; { OM_uint32 status, temp_minor_status; gss_union_name_t union_name; gss_union_cred_t union_cred; gss_name_t internal_name; gss_union_ctx_id_t union_ctx_id; gss_OID selected_mech; gss_mechanism mech; gss_cred_id_t input_cred_handle; status = val_init_sec_ctx_args(minor_status, claimant_cred_handle, context_handle, target_name, req_mech_type, req_flags, time_req, input_chan_bindings, input_token, actual_mech_type, output_token, ret_flags, time_rec); if (status != GSS_S_COMPLETE) return (status); status = gssint_select_mech_type(minor_status, req_mech_type, &selected_mech); if (status != GSS_S_COMPLETE) return (status); union_name = (gss_union_name_t)target_name; /* * obtain the gss mechanism information for the requested * mechanism. If mech_type is NULL, set it to the resultant * mechanism */ mech = gssint_get_mechanism(selected_mech); if (mech == NULL) return (GSS_S_BAD_MECH); if (mech->gss_init_sec_context == NULL) return (GSS_S_UNAVAILABLE); /* * If target_name is mechanism_specific, then it must match the * mech_type that we're about to use. Otherwise, do an import on * the external_name form of the target name. */ if (union_name->mech_type && g_OID_equal(union_name->mech_type, selected_mech)) { internal_name = union_name->mech_name; } else { if ((status = gssint_import_internal_name(minor_status, selected_mech, union_name, &internal_name)) != GSS_S_COMPLETE) return (status); } /* * if context_handle is GSS_C_NO_CONTEXT, allocate a union context * descriptor to hold the mech type information as well as the * underlying mechanism context handle. Otherwise, cast the * value of *context_handle to the union context variable. */ if(*context_handle == GSS_C_NO_CONTEXT) { status = GSS_S_FAILURE; union_ctx_id = (gss_union_ctx_id_t) malloc(sizeof(gss_union_ctx_id_desc)); if (union_ctx_id == NULL) goto end; if (generic_gss_copy_oid(&temp_minor_status, selected_mech, &union_ctx_id->mech_type) != GSS_S_COMPLETE) { free(union_ctx_id); goto end; } /* copy the supplied context handle */ union_ctx_id->internal_ctx_id = GSS_C_NO_CONTEXT; } else { union_ctx_id = (gss_union_ctx_id_t)*context_handle; if (union_ctx_id->internal_ctx_id == GSS_C_NO_CONTEXT) { status = GSS_S_NO_CONTEXT; goto end; } } /* * get the appropriate cred handle from the union cred struct. * defaults to GSS_C_NO_CREDENTIAL if there is no cred, which will * use the default credential. */ union_cred = (gss_union_cred_t) claimant_cred_handle; input_cred_handle = gssint_get_mechanism_cred(union_cred, selected_mech); /* * now call the approprate underlying mechanism routine */ status = mech->gss_init_sec_context( minor_status, input_cred_handle, &union_ctx_id->internal_ctx_id, internal_name, gssint_get_public_oid(selected_mech), req_flags, time_req, input_chan_bindings, input_token, actual_mech_type, output_token, ret_flags, time_rec); if (status != GSS_S_COMPLETE && status != GSS_S_CONTINUE_NEEDED) { /* * RFC 2744 5.19 requires that we not create a context on a failed * first call to init, and recommends that on a failed subsequent call * we make the caller responsible for calling gss_delete_sec_context. * Even if the mech deleted its context, keep the union context around * for the caller to delete. */ map_error(minor_status, mech); if (*context_handle == GSS_C_NO_CONTEXT) { free(union_ctx_id->mech_type->elements); free(union_ctx_id->mech_type); free(union_ctx_id); } } else if (*context_handle == GSS_C_NO_CONTEXT) { union_ctx_id->loopback = union_ctx_id; *context_handle = (gss_ctx_id_t)union_ctx_id; } end: if (union_name->mech_name == NULL || union_name->mech_name != internal_name) { (void) gssint_release_internal_name(&temp_minor_status, selected_mech, &internal_name); } return(status); }
gss_init_sec_context (minor_status, claimant_cred_handle, context_handle, target_name, req_mech_type, req_flags, time_req, input_chan_bindings, input_token, actual_mech_type, output_token, ret_flags, time_rec) OM_uint32 * minor_status; gss_cred_id_t claimant_cred_handle; gss_ctx_id_t * context_handle; gss_name_t target_name; gss_OID req_mech_type; OM_uint32 req_flags; OM_uint32 time_req; gss_channel_bindings_t input_chan_bindings; gss_buffer_t input_token; gss_OID * actual_mech_type; gss_buffer_t output_token; OM_uint32 * ret_flags; OM_uint32 * time_rec; { OM_uint32 status, temp_minor_status; gss_union_name_t union_name; gss_union_cred_t union_cred; gss_name_t internal_name; gss_union_ctx_id_t union_ctx_id; gss_OID selected_mech; gss_mechanism mech; gss_cred_id_t input_cred_handle; status = val_init_sec_ctx_args(minor_status, claimant_cred_handle, context_handle, target_name, req_mech_type, req_flags, time_req, input_chan_bindings, input_token, actual_mech_type, output_token, ret_flags, time_rec); if (status != GSS_S_COMPLETE) return (status); status = gssint_select_mech_type(minor_status, req_mech_type, &selected_mech); if (status != GSS_S_COMPLETE) return (status); union_name = (gss_union_name_t)target_name; mech = gssint_get_mechanism(selected_mech); if (mech == NULL) return (GSS_S_BAD_MECH); if (mech->gss_init_sec_context == NULL) return (GSS_S_UNAVAILABLE); if (union_name->mech_type && g_OID_equal(union_name->mech_type, selected_mech)) { internal_name = union_name->mech_name; } else { if ((status = gssint_import_internal_name(minor_status, selected_mech, union_name, &internal_name)) != GSS_S_COMPLETE) return (status); } if(*context_handle == GSS_C_NO_CONTEXT) { status = GSS_S_FAILURE; union_ctx_id = (gss_union_ctx_id_t) malloc(sizeof(gss_union_ctx_id_desc)); if (union_ctx_id == NULL) goto end; if (generic_gss_copy_oid(&temp_minor_status, selected_mech, &union_ctx_id->mech_type) != GSS_S_COMPLETE) { free(union_ctx_id); goto end; } union_ctx_id->internal_ctx_id = GSS_C_NO_CONTEXT; } else { union_ctx_id = (gss_union_ctx_id_t)*context_handle; if (union_ctx_id->internal_ctx_id == GSS_C_NO_CONTEXT) { status = GSS_S_NO_CONTEXT; goto end; } } union_cred = (gss_union_cred_t) claimant_cred_handle; input_cred_handle = gssint_get_mechanism_cred(union_cred, selected_mech); status = mech->gss_init_sec_context( minor_status, input_cred_handle, &union_ctx_id->internal_ctx_id, internal_name, gssint_get_public_oid(selected_mech), req_flags, time_req, input_chan_bindings, input_token, actual_mech_type, output_token, ret_flags, time_rec); if (status != GSS_S_COMPLETE && status != GSS_S_CONTINUE_NEEDED) { map_error(minor_status, mech); if (*context_handle == GSS_C_NO_CONTEXT) { free(union_ctx_id->mech_type->elements); free(union_ctx_id->mech_type); free(union_ctx_id); } } else if (*context_handle == GSS_C_NO_CONTEXT) { union_ctx_id->loopback = union_ctx_id; *context_handle = (gss_ctx_id_t)union_ctx_id; } end: if (union_name->mech_name == NULL || union_name->mech_name != internal_name) { (void) gssint_release_internal_name(&temp_minor_status, selected_mech, &internal_name); } return(status); }
926
0
static void * Type_Curve_Dup ( struct _cms_typehandler_struct * self , const void * Ptr , cmsUInt32Number n ) { return ( void * ) cmsDupToneCurve ( ( cmsToneCurve * ) Ptr ) ; cmsUNUSED_PARAMETER ( n ) ; cmsUNUSED_PARAMETER ( self ) ; }
static void * Type_Curve_Dup ( struct _cms_typehandler_struct * self , const void * Ptr , cmsUInt32Number n ) { return ( void * ) cmsDupToneCurve ( ( cmsToneCurve * ) Ptr ) ; cmsUNUSED_PARAMETER ( n ) ; cmsUNUSED_PARAMETER ( self ) ; }
927
0
static void group_exponents(AC3EncodeContext *s) { int blk, ch, i; int group_size, nb_groups, bit_count; uint8_t *p; int delta0, delta1, delta2; int exp0, exp1; bit_count = 0; for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { AC3Block *block = &s->blocks[blk]; for (ch = 0; ch < s->channels; ch++) { if (s->exp_strategy[ch][blk] == EXP_REUSE) continue; group_size = s->exp_strategy[ch][blk] + (s->exp_strategy[ch][blk] == EXP_D45); nb_groups = exponent_group_tab[s->exp_strategy[ch][blk]-1][s->nb_coefs[ch]]; bit_count += 4 + (nb_groups * 7); p = block->exp[ch]; /* DC exponent */ exp1 = *p++; block->grouped_exp[ch][0] = exp1; /* remaining exponents are delta encoded */ for (i = 1; i <= nb_groups; i++) { /* merge three delta in one code */ exp0 = exp1; exp1 = p[0]; p += group_size; delta0 = exp1 - exp0 + 2; exp0 = exp1; exp1 = p[0]; p += group_size; delta1 = exp1 - exp0 + 2; exp0 = exp1; exp1 = p[0]; p += group_size; delta2 = exp1 - exp0 + 2; block->grouped_exp[ch][i] = ((delta0 * 5 + delta1) * 5) + delta2; } } } s->exponent_bits = bit_count; }
static void group_exponents(AC3EncodeContext *s) { int blk, ch, i; int group_size, nb_groups, bit_count; uint8_t *p; int delta0, delta1, delta2; int exp0, exp1; bit_count = 0; for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { AC3Block *block = &s->blocks[blk]; for (ch = 0; ch < s->channels; ch++) { if (s->exp_strategy[ch][blk] == EXP_REUSE) continue; group_size = s->exp_strategy[ch][blk] + (s->exp_strategy[ch][blk] == EXP_D45); nb_groups = exponent_group_tab[s->exp_strategy[ch][blk]-1][s->nb_coefs[ch]]; bit_count += 4 + (nb_groups * 7); p = block->exp[ch]; exp1 = *p++; block->grouped_exp[ch][0] = exp1; for (i = 1; i <= nb_groups; i++) { exp0 = exp1; exp1 = p[0]; p += group_size; delta0 = exp1 - exp0 + 2; exp0 = exp1; exp1 = p[0]; p += group_size; delta1 = exp1 - exp0 + 2; exp0 = exp1; exp1 = p[0]; p += group_size; delta2 = exp1 - exp0 + 2; block->grouped_exp[ch][i] = ((delta0 * 5 + delta1) * 5) + delta2; } } } s->exponent_bits = bit_count; }
929
1
static unsigned HuffmanTree_makeFromFrequencies(HuffmanTree* tree, const unsigned* frequencies, size_t mincodes, size_t numcodes, unsigned maxbitlen) { unsigned error = 0; while(!frequencies[numcodes - 1] && numcodes > mincodes) numcodes--; /*trim zeroes*/ tree->maxbitlen = maxbitlen; tree->numcodes = (unsigned)numcodes; /*number of symbols*/ tree->lengths = (unsigned*)realloc(tree->lengths, numcodes * sizeof(unsigned)); if(!tree->lengths) return 83; /*alloc fail*/ /*initialize all lengths to 0*/ memset(tree->lengths, 0, numcodes * sizeof(unsigned)); error = lodepng_huffman_code_lengths(tree->lengths, frequencies, numcodes, maxbitlen); if(!error) error = HuffmanTree_makeFromLengths2(tree); return error; }
static unsigned HuffmanTree_makeFromFrequencies(HuffmanTree* tree, const unsigned* frequencies, size_t mincodes, size_t numcodes, unsigned maxbitlen) { unsigned error = 0; while(!frequencies[numcodes - 1] && numcodes > mincodes) numcodes--; tree->maxbitlen = maxbitlen; tree->numcodes = (unsigned)numcodes; tree->lengths = (unsigned*)realloc(tree->lengths, numcodes * sizeof(unsigned)); if(!tree->lengths) return 83; memset(tree->lengths, 0, numcodes * sizeof(unsigned)); error = lodepng_huffman_code_lengths(tree->lengths, frequencies, numcodes, maxbitlen); if(!error) error = HuffmanTree_makeFromLengths2(tree); return error; }
930
1
int hfsplus_find_cat(struct super_block *sb, u32 cnid, struct hfs_find_data *fd) { hfsplus_cat_entry tmp; int err; u16 type; hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL); err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry)); if (err) return err; type = be16_to_cpu(tmp.type); if (type != HFSPLUS_FOLDER_THREAD && type != HFSPLUS_FILE_THREAD) { printk(KERN_ERR "hfs: found bad thread record in catalog\n"); return -EIO; } hfsplus_cat_build_key_uni(fd->search_key, be32_to_cpu(tmp.thread.parentID), &tmp.thread.nodeName); return hfs_brec_find(fd); }
int hfsplus_find_cat(struct super_block *sb, u32 cnid, struct hfs_find_data *fd) { hfsplus_cat_entry tmp; int err; u16 type; hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL); err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry)); if (err) return err; type = be16_to_cpu(tmp.type); if (type != HFSPLUS_FOLDER_THREAD && type != HFSPLUS_FILE_THREAD) { printk(KERN_ERR "hfs: found bad thread record in catalog\n"); return -EIO; } hfsplus_cat_build_key_uni(fd->search_key, be32_to_cpu(tmp.thread.parentID), &tmp.thread.nodeName); return hfs_brec_find(fd); }
931
0
void sd_write_data(SDState *sd, uint8_t value) { int i; if (!sd->bdrv || !bdrv_is_inserted(sd->bdrv) || !sd->enable) return; if (sd->state != sd_receivingdata_state) { fprintf(stderr, "sd_write_data: not in Receiving-Data state\n"); return; } if (sd->card_status & (ADDRESS_ERROR | WP_VIOLATION)) return; switch (sd->current_cmd) { case 24: /* CMD24: WRITE_SINGLE_BLOCK */ sd->data[sd->data_offset ++] = value; if (sd->data_offset >= sd->blk_len) { /* TODO: Check CRC before committing */ sd->state = sd_programming_state; BLK_WRITE_BLOCK(sd->data_start, sd->data_offset); sd->blk_written ++; sd->csd[14] |= 0x40; /* Bzzzzzzztt .... Operation complete. */ sd->state = sd_transfer_state; } break; case 25: /* CMD25: WRITE_MULTIPLE_BLOCK */ if (sd->data_offset == 0) { /* Start of the block - let's check the address is valid */ if (sd->data_start + sd->blk_len > sd->size) { sd->card_status |= ADDRESS_ERROR; break; } if (sd_wp_addr(sd, sd->data_start)) { sd->card_status |= WP_VIOLATION; break; } } sd->data[sd->data_offset++] = value; if (sd->data_offset >= sd->blk_len) { /* TODO: Check CRC before committing */ sd->state = sd_programming_state; BLK_WRITE_BLOCK(sd->data_start, sd->data_offset); sd->blk_written++; sd->data_start += sd->blk_len; sd->data_offset = 0; sd->csd[14] |= 0x40; /* Bzzzzzzztt .... Operation complete. */ sd->state = sd_receivingdata_state; } break; case 26: /* CMD26: PROGRAM_CID */ sd->data[sd->data_offset ++] = value; if (sd->data_offset >= sizeof(sd->cid)) { /* TODO: Check CRC before committing */ sd->state = sd_programming_state; for (i = 0; i < sizeof(sd->cid); i ++) if ((sd->cid[i] | 0x00) != sd->data[i]) sd->card_status |= CID_CSD_OVERWRITE; if (!(sd->card_status & CID_CSD_OVERWRITE)) for (i = 0; i < sizeof(sd->cid); i ++) { sd->cid[i] |= 0x00; sd->cid[i] &= sd->data[i]; } /* Bzzzzzzztt .... Operation complete. */ sd->state = sd_transfer_state; } break; case 27: /* CMD27: PROGRAM_CSD */ sd->data[sd->data_offset ++] = value; if (sd->data_offset >= sizeof(sd->csd)) { /* TODO: Check CRC before committing */ sd->state = sd_programming_state; for (i = 0; i < sizeof(sd->csd); i ++) if ((sd->csd[i] | sd_csd_rw_mask[i]) != (sd->data[i] | sd_csd_rw_mask[i])) sd->card_status |= CID_CSD_OVERWRITE; /* Copy flag (OTP) & Permanent write protect */ if (sd->csd[14] & ~sd->data[14] & 0x60) sd->card_status |= CID_CSD_OVERWRITE; if (!(sd->card_status & CID_CSD_OVERWRITE)) for (i = 0; i < sizeof(sd->csd); i ++) { sd->csd[i] |= sd_csd_rw_mask[i]; sd->csd[i] &= sd->data[i]; } /* Bzzzzzzztt .... Operation complete. */ sd->state = sd_transfer_state; } break; case 42: /* CMD42: LOCK_UNLOCK */ sd->data[sd->data_offset ++] = value; if (sd->data_offset >= sd->blk_len) { /* TODO: Check CRC before committing */ sd->state = sd_programming_state; sd_lock_command(sd); /* Bzzzzzzztt .... Operation complete. */ sd->state = sd_transfer_state; } break; case 56: /* CMD56: GEN_CMD */ sd->data[sd->data_offset ++] = value; if (sd->data_offset >= sd->blk_len) { APP_WRITE_BLOCK(sd->data_start, sd->data_offset); sd->state = sd_transfer_state; } break; default: fprintf(stderr, "sd_write_data: unknown command\n"); break; } }
void sd_write_data(SDState *sd, uint8_t value) { int i; if (!sd->bdrv || !bdrv_is_inserted(sd->bdrv) || !sd->enable) return; if (sd->state != sd_receivingdata_state) { fprintf(stderr, "sd_write_data: not in Receiving-Data state\n"); return; } if (sd->card_status & (ADDRESS_ERROR | WP_VIOLATION)) return; switch (sd->current_cmd) { case 24: sd->data[sd->data_offset ++] = value; if (sd->data_offset >= sd->blk_len) { sd->state = sd_programming_state; BLK_WRITE_BLOCK(sd->data_start, sd->data_offset); sd->blk_written ++; sd->csd[14] |= 0x40; sd->state = sd_transfer_state; } break; case 25: if (sd->data_offset == 0) { if (sd->data_start + sd->blk_len > sd->size) { sd->card_status |= ADDRESS_ERROR; break; } if (sd_wp_addr(sd, sd->data_start)) { sd->card_status |= WP_VIOLATION; break; } } sd->data[sd->data_offset++] = value; if (sd->data_offset >= sd->blk_len) { sd->state = sd_programming_state; BLK_WRITE_BLOCK(sd->data_start, sd->data_offset); sd->blk_written++; sd->data_start += sd->blk_len; sd->data_offset = 0; sd->csd[14] |= 0x40; sd->state = sd_receivingdata_state; } break; case 26: sd->data[sd->data_offset ++] = value; if (sd->data_offset >= sizeof(sd->cid)) { sd->state = sd_programming_state; for (i = 0; i < sizeof(sd->cid); i ++) if ((sd->cid[i] | 0x00) != sd->data[i]) sd->card_status |= CID_CSD_OVERWRITE; if (!(sd->card_status & CID_CSD_OVERWRITE)) for (i = 0; i < sizeof(sd->cid); i ++) { sd->cid[i] |= 0x00; sd->cid[i] &= sd->data[i]; } sd->state = sd_transfer_state; } break; case 27: sd->data[sd->data_offset ++] = value; if (sd->data_offset >= sizeof(sd->csd)) { sd->state = sd_programming_state; for (i = 0; i < sizeof(sd->csd); i ++) if ((sd->csd[i] | sd_csd_rw_mask[i]) != (sd->data[i] | sd_csd_rw_mask[i])) sd->card_status |= CID_CSD_OVERWRITE; if (sd->csd[14] & ~sd->data[14] & 0x60) sd->card_status |= CID_CSD_OVERWRITE; if (!(sd->card_status & CID_CSD_OVERWRITE)) for (i = 0; i < sizeof(sd->csd); i ++) { sd->csd[i] |= sd_csd_rw_mask[i]; sd->csd[i] &= sd->data[i]; } sd->state = sd_transfer_state; } break; case 42: sd->data[sd->data_offset ++] = value; if (sd->data_offset >= sd->blk_len) { sd->state = sd_programming_state; sd_lock_command(sd); sd->state = sd_transfer_state; } break; case 56: sd->data[sd->data_offset ++] = value; if (sd->data_offset >= sd->blk_len) { APP_WRITE_BLOCK(sd->data_start, sd->data_offset); sd->state = sd_transfer_state; } break; default: fprintf(stderr, "sd_write_data: unknown command\n"); break; } }
933
1
int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *max) { struct page *page; struct address_space *mapping; __be32 *pptr, *curr, *end; u32 mask, start, len, n; __be32 val; int i; len = *max; if (!len) return size; dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len); mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex); mapping = HFSPLUS_SB(sb).alloc_file->i_mapping; page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); pptr = kmap(page); curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; i = offset % 32; offset &= ~(PAGE_CACHE_BITS - 1); if ((size ^ offset) / PAGE_CACHE_BITS) end = pptr + PAGE_CACHE_BITS / 32; else end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32; /* scan the first partial u32 for zero bits */ val = *curr; if (~val) { n = be32_to_cpu(val); mask = (1U << 31) >> i; for (; i < 32; mask >>= 1, i++) { if (!(n & mask)) goto found; } } curr++; /* scan complete u32s for the first zero bit */ while (1) { while (curr < end) { val = *curr; if (~val) { n = be32_to_cpu(val); mask = 1 << 31; for (i = 0; i < 32; mask >>= 1, i++) { if (!(n & mask)) goto found; } } curr++; } kunmap(page); offset += PAGE_CACHE_BITS; if (offset >= size) break; page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); curr = pptr = kmap(page); if ((size ^ offset) / PAGE_CACHE_BITS) end = pptr + PAGE_CACHE_BITS / 32; else end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32; } dprint(DBG_BITMAP, "bitmap full\n"); start = size; goto out; found: start = offset + (curr - pptr) * 32 + i; if (start >= size) { dprint(DBG_BITMAP, "bitmap full\n"); goto out; } /* do any partial u32 at the start */ len = min(size - start, len); while (1) { n |= mask; if (++i >= 32) break; mask >>= 1; if (!--len || n & mask) goto done; } if (!--len) goto done; *curr++ = cpu_to_be32(n); /* do full u32s */ while (1) { while (curr < end) { n = be32_to_cpu(*curr); if (len < 32) goto last; if (n) { len = 32; goto last; } *curr++ = cpu_to_be32(0xffffffff); len -= 32; } set_page_dirty(page); kunmap(page); offset += PAGE_CACHE_BITS; page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); pptr = kmap(page); curr = pptr; end = pptr + PAGE_CACHE_BITS / 32; } last: /* do any partial u32 at end */ mask = 1U << 31; for (i = 0; i < len; i++) { if (n & mask) break; n |= mask; mask >>= 1; } done: *curr = cpu_to_be32(n); set_page_dirty(page); kunmap(page); *max = offset + (curr - pptr) * 32 + i - start; HFSPLUS_SB(sb).free_blocks -= *max; sb->s_dirt = 1; dprint(DBG_BITMAP, "-> %u,%u\n", start, *max); out: mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex); return start; }
int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *max) { struct page *page; struct address_space *mapping; __be32 *pptr, *curr, *end; u32 mask, start, len, n; __be32 val; int i; len = *max; if (!len) return size; dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len); mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex); mapping = HFSPLUS_SB(sb).alloc_file->i_mapping; page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); pptr = kmap(page); curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; i = offset % 32; offset &= ~(PAGE_CACHE_BITS - 1); if ((size ^ offset) / PAGE_CACHE_BITS) end = pptr + PAGE_CACHE_BITS / 32; else end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32; val = *curr; if (~val) { n = be32_to_cpu(val); mask = (1U << 31) >> i; for (; i < 32; mask >>= 1, i++) { if (!(n & mask)) goto found; } } curr++; while (1) { while (curr < end) { val = *curr; if (~val) { n = be32_to_cpu(val); mask = 1 << 31; for (i = 0; i < 32; mask >>= 1, i++) { if (!(n & mask)) goto found; } } curr++; } kunmap(page); offset += PAGE_CACHE_BITS; if (offset >= size) break; page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); curr = pptr = kmap(page); if ((size ^ offset) / PAGE_CACHE_BITS) end = pptr + PAGE_CACHE_BITS / 32; else end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32; } dprint(DBG_BITMAP, "bitmap full\n"); start = size; goto out; found: start = offset + (curr - pptr) * 32 + i; if (start >= size) { dprint(DBG_BITMAP, "bitmap full\n"); goto out; } len = min(size - start, len); while (1) { n |= mask; if (++i >= 32) break; mask >>= 1; if (!--len || n & mask) goto done; } if (!--len) goto done; *curr++ = cpu_to_be32(n); while (1) { while (curr < end) { n = be32_to_cpu(*curr); if (len < 32) goto last; if (n) { len = 32; goto last; } *curr++ = cpu_to_be32(0xffffffff); len -= 32; } set_page_dirty(page); kunmap(page); offset += PAGE_CACHE_BITS; page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); pptr = kmap(page); curr = pptr; end = pptr + PAGE_CACHE_BITS / 32; } last: mask = 1U << 31; for (i = 0; i < len; i++) { if (n & mask) break; n |= mask; mask >>= 1; } done: *curr = cpu_to_be32(n); set_page_dirty(page); kunmap(page); *max = offset + (curr - pptr) * 32 + i - start; HFSPLUS_SB(sb).free_blocks -= *max; sb->s_dirt = 1; dprint(DBG_BITMAP, "-> %u,%u\n", start, *max); out: mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex); return start; }
934
1
static UINT printer_process_irp_write(PRINTER_DEVICE* printer_dev, IRP* irp) { UINT32 Length; UINT64 Offset; rdpPrintJob* printjob = NULL; UINT error = CHANNEL_RC_OK; Stream_Read_UINT32(irp->input, Length); Stream_Read_UINT64(irp->input, Offset); Stream_Seek(irp->input, 20); /* Padding */ if (printer_dev->printer) printjob = printer_dev->printer->FindPrintJob(printer_dev->printer, irp->FileId); if (!printjob) { irp->IoStatus = STATUS_UNSUCCESSFUL; Length = 0; } else { error = printjob->Write(printjob, Stream_Pointer(irp->input), Length); } if (error) { WLog_ERR(TAG, "printjob->Write failed with error %" PRIu32 "!", error); return error; } Stream_Write_UINT32(irp->output, Length); Stream_Write_UINT8(irp->output, 0); /* Padding */ return irp->Complete(irp); }
static UINT printer_process_irp_write(PRINTER_DEVICE* printer_dev, IRP* irp) { UINT32 Length; UINT64 Offset; rdpPrintJob* printjob = NULL; UINT error = CHANNEL_RC_OK; Stream_Read_UINT32(irp->input, Length); Stream_Read_UINT64(irp->input, Offset); Stream_Seek(irp->input, 20); if (printer_dev->printer) printjob = printer_dev->printer->FindPrintJob(printer_dev->printer, irp->FileId); if (!printjob) { irp->IoStatus = STATUS_UNSUCCESSFUL; Length = 0; } else { error = printjob->Write(printjob, Stream_Pointer(irp->input), Length); } if (error) { WLog_ERR(TAG, "printjob->Write failed with error %" PRIu32 "!", error); return error; } Stream_Write_UINT32(irp->output, Length); Stream_Write_UINT8(irp->output, 0); return irp->Complete(irp); }
935
0
gss_pseudo_random (OM_uint32 *minor_status, gss_ctx_id_t context_handle, int prf_key, const gss_buffer_t prf_in, ssize_t desired_output_len, gss_buffer_t prf_out) { OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; if (minor_status == NULL) return GSS_S_CALL_INACCESSIBLE_WRITE; if (context_handle == GSS_C_NO_CONTEXT) return GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT; if (prf_in == GSS_C_NO_BUFFER) return GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT; if (prf_out == GSS_C_NO_BUFFER) return GSS_S_CALL_INACCESSIBLE_WRITE | GSS_S_NO_CONTEXT; prf_out->length = 0; prf_out->value = NULL; /* * select the approprate underlying mechanism routine and * call it. */ ctx = (gss_union_ctx_id_t) context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return GSS_S_NO_CONTEXT; mech = gssint_get_mechanism (ctx->mech_type); if (mech != NULL) { if (mech->gss_pseudo_random != NULL) { status = mech->gss_pseudo_random(minor_status, ctx->internal_ctx_id, prf_key, prf_in, desired_output_len, prf_out); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); } else status = GSS_S_UNAVAILABLE; return status; } return GSS_S_BAD_MECH; }
gss_pseudo_random (OM_uint32 *minor_status, gss_ctx_id_t context_handle, int prf_key, const gss_buffer_t prf_in, ssize_t desired_output_len, gss_buffer_t prf_out) { OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; if (minor_status == NULL) return GSS_S_CALL_INACCESSIBLE_WRITE; if (context_handle == GSS_C_NO_CONTEXT) return GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT; if (prf_in == GSS_C_NO_BUFFER) return GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT; if (prf_out == GSS_C_NO_BUFFER) return GSS_S_CALL_INACCESSIBLE_WRITE | GSS_S_NO_CONTEXT; prf_out->length = 0; prf_out->value = NULL; ctx = (gss_union_ctx_id_t) context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return GSS_S_NO_CONTEXT; mech = gssint_get_mechanism (ctx->mech_type); if (mech != NULL) { if (mech->gss_pseudo_random != NULL) { status = mech->gss_pseudo_random(minor_status, ctx->internal_ctx_id, prf_key, prf_in, desired_output_len, prf_out); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); } else status = GSS_S_UNAVAILABLE; return status; } return GSS_S_BAD_MECH; }
936
1
int hfs_cat_find_brec(struct super_block *sb, u32 cnid, struct hfs_find_data *fd) { hfs_cat_rec rec; int res, len, type; hfs_cat_build_key(sb, fd->search_key, cnid, NULL); res = hfs_brec_read(fd, &rec, sizeof(rec)); if (res) return res; type = rec.type; if (type != HFS_CDR_THD && type != HFS_CDR_FTH) { printk(KERN_ERR "hfs: found bad thread record in catalog\n"); return -EIO; } fd->search_key->cat.ParID = rec.thread.ParID; len = fd->search_key->cat.CName.len = rec.thread.CName.len; memcpy(fd->search_key->cat.CName.name, rec.thread.CName.name, len); return hfs_brec_find(fd); }
int hfs_cat_find_brec(struct super_block *sb, u32 cnid, struct hfs_find_data *fd) { hfs_cat_rec rec; int res, len, type; hfs_cat_build_key(sb, fd->search_key, cnid, NULL); res = hfs_brec_read(fd, &rec, sizeof(rec)); if (res) return res; type = rec.type; if (type != HFS_CDR_THD && type != HFS_CDR_FTH) { printk(KERN_ERR "hfs: found bad thread record in catalog\n"); return -EIO; } fd->search_key->cat.ParID = rec.thread.ParID; len = fd->search_key->cat.CName.len = rec.thread.CName.len; memcpy(fd->search_key->cat.CName.name, rec.thread.CName.name, len); return hfs_brec_find(fd); }
938
0
static void wma_window ( WMACodecContext * s , float * out ) { float * in = s -> output ; int block_len , bsize , n ; if ( s -> block_len_bits <= s -> prev_block_len_bits ) { block_len = s -> block_len ; bsize = s -> frame_len_bits - s -> block_len_bits ; s -> fdsp . vector_fmul_add ( out , in , s -> windows [ bsize ] , out , block_len ) ; } else { block_len = 1 << s -> prev_block_len_bits ; n = ( s -> block_len - block_len ) / 2 ; bsize = s -> frame_len_bits - s -> prev_block_len_bits ; s -> fdsp . vector_fmul_add ( out + n , in + n , s -> windows [ bsize ] , out + n , block_len ) ; memcpy ( out + n + block_len , in + n + block_len , n * sizeof ( float ) ) ; } out += s -> block_len ; in += s -> block_len ; if ( s -> block_len_bits <= s -> next_block_len_bits ) { block_len = s -> block_len ; bsize = s -> frame_len_bits - s -> block_len_bits ; s -> fdsp . vector_fmul_reverse ( out , in , s -> windows [ bsize ] , block_len ) ; } else { block_len = 1 << s -> next_block_len_bits ; n = ( s -> block_len - block_len ) / 2 ; bsize = s -> frame_len_bits - s -> next_block_len_bits ; memcpy ( out , in , n * sizeof ( float ) ) ; s -> fdsp . vector_fmul_reverse ( out + n , in + n , s -> windows [ bsize ] , block_len ) ; memset ( out + n + block_len , 0 , n * sizeof ( float ) ) ; } }
static void wma_window ( WMACodecContext * s , float * out ) { float * in = s -> output ; int block_len , bsize , n ; if ( s -> block_len_bits <= s -> prev_block_len_bits ) { block_len = s -> block_len ; bsize = s -> frame_len_bits - s -> block_len_bits ; s -> fdsp . vector_fmul_add ( out , in , s -> windows [ bsize ] , out , block_len ) ; } else { block_len = 1 << s -> prev_block_len_bits ; n = ( s -> block_len - block_len ) / 2 ; bsize = s -> frame_len_bits - s -> prev_block_len_bits ; s -> fdsp . vector_fmul_add ( out + n , in + n , s -> windows [ bsize ] , out + n , block_len ) ; memcpy ( out + n + block_len , in + n + block_len , n * sizeof ( float ) ) ; } out += s -> block_len ; in += s -> block_len ; if ( s -> block_len_bits <= s -> next_block_len_bits ) { block_len = s -> block_len ; bsize = s -> frame_len_bits - s -> block_len_bits ; s -> fdsp . vector_fmul_reverse ( out , in , s -> windows [ bsize ] , block_len ) ; } else { block_len = 1 << s -> next_block_len_bits ; n = ( s -> block_len - block_len ) / 2 ; bsize = s -> frame_len_bits - s -> next_block_len_bits ; memcpy ( out , in , n * sizeof ( float ) ) ; s -> fdsp . vector_fmul_reverse ( out + n , in + n , s -> windows [ bsize ] , block_len ) ; memset ( out + n + block_len , 0 , n * sizeof ( float ) ) ; } }
939
0
gss_process_context_token (minor_status, context_handle, token_buffer) OM_uint32 * minor_status; gss_ctx_id_t context_handle; gss_buffer_t token_buffer; { OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; if (minor_status == NULL) return (GSS_S_CALL_INACCESSIBLE_WRITE); *minor_status = 0; if (context_handle == GSS_C_NO_CONTEXT) return (GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT); if (token_buffer == GSS_C_NO_BUFFER) return (GSS_S_CALL_INACCESSIBLE_READ); if (GSS_EMPTY_BUFFER(token_buffer)) return (GSS_S_CALL_INACCESSIBLE_READ); /* * select the approprate underlying mechanism routine and * call it. */ ctx = (gss_union_ctx_id_t) context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (mech) { if (mech->gss_process_context_token) { status = mech->gss_process_context_token( minor_status, ctx->internal_ctx_id, token_buffer); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); } else status = GSS_S_UNAVAILABLE; return(status); } return (GSS_S_BAD_MECH); }
gss_process_context_token (minor_status, context_handle, token_buffer) OM_uint32 * minor_status; gss_ctx_id_t context_handle; gss_buffer_t token_buffer; { OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; if (minor_status == NULL) return (GSS_S_CALL_INACCESSIBLE_WRITE); *minor_status = 0; if (context_handle == GSS_C_NO_CONTEXT) return (GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT); if (token_buffer == GSS_C_NO_BUFFER) return (GSS_S_CALL_INACCESSIBLE_READ); if (GSS_EMPTY_BUFFER(token_buffer)) return (GSS_S_CALL_INACCESSIBLE_READ); ctx = (gss_union_ctx_id_t) context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (mech) { if (mech->gss_process_context_token) { status = mech->gss_process_context_token( minor_status, ctx->internal_ctx_id, token_buffer); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); } else status = GSS_S_UNAVAILABLE; return(status); } return (GSS_S_BAD_MECH); }
940
1
void __scm_destroy(struct scm_cookie *scm) { struct scm_fp_list *fpl = scm->fp; int i; if (fpl) { scm->fp = NULL; for (i=fpl->count-1; i>=0; i--) fput(fpl->fp[i]); kfree(fpl); } }
void __scm_destroy(struct scm_cookie *scm) { struct scm_fp_list *fpl = scm->fp; int i; if (fpl) { scm->fp = NULL; for (i=fpl->count-1; i>=0; i--) fput(fpl->fp[i]); kfree(fpl); } }
942
0
gss_wrap( OM_uint32 *minor_status, gss_ctx_id_t context_handle, int conf_req_flag, gss_qop_t qop_req, gss_buffer_t input_message_buffer, int *conf_state, gss_buffer_t output_message_buffer) { /* EXPORT DELETE START */ OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; status = val_wrap_args(minor_status, context_handle, conf_req_flag, qop_req, input_message_buffer, conf_state, output_message_buffer); if (status != GSS_S_COMPLETE) return (status); /* * select the approprate underlying mechanism routine and * call it. */ ctx = (gss_union_ctx_id_t) context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (mech) { if (mech->gss_wrap) { status = mech->gss_wrap(minor_status, ctx->internal_ctx_id, conf_req_flag, qop_req, input_message_buffer, conf_state, output_message_buffer); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); } else if (mech->gss_wrap_aead || (mech->gss_wrap_iov && mech->gss_wrap_iov_length)) { status = gssint_wrap_aead(mech, minor_status, ctx, conf_req_flag, (gss_qop_t)qop_req, GSS_C_NO_BUFFER, input_message_buffer, conf_state, output_message_buffer); } else status = GSS_S_UNAVAILABLE; return(status); } /* EXPORT DELETE END */ return (GSS_S_BAD_MECH); }
gss_wrap( OM_uint32 *minor_status, gss_ctx_id_t context_handle, int conf_req_flag, gss_qop_t qop_req, gss_buffer_t input_message_buffer, int *conf_state, gss_buffer_t output_message_buffer) { OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; status = val_wrap_args(minor_status, context_handle, conf_req_flag, qop_req, input_message_buffer, conf_state, output_message_buffer); if (status != GSS_S_COMPLETE) return (status); ctx = (gss_union_ctx_id_t) context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (mech) { if (mech->gss_wrap) { status = mech->gss_wrap(minor_status, ctx->internal_ctx_id, conf_req_flag, qop_req, input_message_buffer, conf_state, output_message_buffer); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); } else if (mech->gss_wrap_aead || (mech->gss_wrap_iov && mech->gss_wrap_iov_length)) { status = gssint_wrap_aead(mech, minor_status, ctx, conf_req_flag, (gss_qop_t)qop_req, GSS_C_NO_BUFFER, input_message_buffer, conf_state, output_message_buffer); } else status = GSS_S_UNAVAILABLE; return(status); } return (GSS_S_BAD_MECH); }
943
0
static void handle_mousemotion(SDL_Event *ev) { int max_x, max_y; struct sdl2_console *scon = get_scon_from_window(ev->key.windowID); if (qemu_input_is_absolute() || absolute_enabled) { int scr_w, scr_h; SDL_GetWindowSize(scon->real_window, &scr_w, &scr_h); max_x = scr_w - 1; max_y = scr_h - 1; if (gui_grab && (ev->motion.x == 0 || ev->motion.y == 0 || ev->motion.x == max_x || ev->motion.y == max_y)) { sdl_grab_end(scon); } if (!gui_grab && (ev->motion.x > 0 && ev->motion.x < max_x && ev->motion.y > 0 && ev->motion.y < max_y)) { sdl_grab_start(scon); } } if (gui_grab || qemu_input_is_absolute() || absolute_enabled) { sdl_send_mouse_event(scon, ev->motion.xrel, ev->motion.yrel, ev->motion.x, ev->motion.y, ev->motion.state); } }
static void handle_mousemotion(SDL_Event *ev) { int max_x, max_y; struct sdl2_console *scon = get_scon_from_window(ev->key.windowID); if (qemu_input_is_absolute() || absolute_enabled) { int scr_w, scr_h; SDL_GetWindowSize(scon->real_window, &scr_w, &scr_h); max_x = scr_w - 1; max_y = scr_h - 1; if (gui_grab && (ev->motion.x == 0 || ev->motion.y == 0 || ev->motion.x == max_x || ev->motion.y == max_y)) { sdl_grab_end(scon); } if (!gui_grab && (ev->motion.x > 0 && ev->motion.x < max_x && ev->motion.y > 0 && ev->motion.y < max_y)) { sdl_grab_start(scon); } } if (gui_grab || qemu_input_is_absolute() || absolute_enabled) { sdl_send_mouse_event(scon, ev->motion.xrel, ev->motion.yrel, ev->motion.x, ev->motion.y, ev->motion.state); } }
946
0
static int qemu_rdma_registration_stop(QEMUFile *f, void *opaque, uint64_t flags) { Error *local_err = NULL, **errp = &local_err; QEMUFileRDMA *rfile = opaque; RDMAContext *rdma = rfile->rdma; RDMAControlHeader head = { .len = 0, .repeat = 1 }; int ret = 0; CHECK_ERROR_STATE(); qemu_fflush(f); ret = qemu_rdma_drain_cq(f, rdma); if (ret < 0) { goto err; } if (flags == RAM_CONTROL_SETUP) { RDMAControlHeader resp = {.type = RDMA_CONTROL_RAM_BLOCKS_RESULT }; RDMALocalBlocks *local = &rdma->local_ram_blocks; int reg_result_idx, i, j, nb_remote_blocks; head.type = RDMA_CONTROL_RAM_BLOCKS_REQUEST; DPRINTF("Sending registration setup for ram blocks...\n"); /* * Make sure that we parallelize the pinning on both sides. * For very large guests, doing this serially takes a really * long time, so we have to 'interleave' the pinning locally * with the control messages by performing the pinning on this * side before we receive the control response from the other * side that the pinning has completed. */ ret = qemu_rdma_exchange_send(rdma, &head, NULL, &resp, &reg_result_idx, rdma->pin_all ? qemu_rdma_reg_whole_ram_blocks : NULL); if (ret < 0) { ERROR(errp, "receiving remote info!"); return ret; } qemu_rdma_move_header(rdma, reg_result_idx, &resp); memcpy(rdma->block, rdma->wr_data[reg_result_idx].control_curr, resp.len); nb_remote_blocks = resp.len / sizeof(RDMARemoteBlock); /* * The protocol uses two different sets of rkeys (mutually exclusive): * 1. One key to represent the virtual address of the entire ram block. * (dynamic chunk registration disabled - pin everything with one rkey.) * 2. One to represent individual chunks within a ram block. * (dynamic chunk registration enabled - pin individual chunks.) * * Once the capability is successfully negotiated, the destination transmits * the keys to use (or sends them later) including the virtual addresses * and then propagates the remote ram block descriptions to his local copy. */ if (local->nb_blocks != nb_remote_blocks) { ERROR(errp, "ram blocks mismatch #1! " "Your QEMU command line parameters are probably " "not identical on both the source and destination."); return -EINVAL; } for (i = 0; i < nb_remote_blocks; i++) { network_to_remote_block(&rdma->block[i]); /* search local ram blocks */ for (j = 0; j < local->nb_blocks; j++) { if (rdma->block[i].offset != local->block[j].offset) { continue; } if (rdma->block[i].length != local->block[j].length) { ERROR(errp, "ram blocks mismatch #2! " "Your QEMU command line parameters are probably " "not identical on both the source and destination."); return -EINVAL; } local->block[j].remote_host_addr = rdma->block[i].remote_host_addr; local->block[j].remote_rkey = rdma->block[i].remote_rkey; break; } if (j >= local->nb_blocks) { ERROR(errp, "ram blocks mismatch #3! " "Your QEMU command line parameters are probably " "not identical on both the source and destination."); return -EINVAL; } } } DDDPRINTF("Sending registration finish %" PRIu64 "...\n", flags); head.type = RDMA_CONTROL_REGISTER_FINISHED; ret = qemu_rdma_exchange_send(rdma, &head, NULL, NULL, NULL, NULL); if (ret < 0) { goto err; } return 0; err: rdma->error_state = ret; return ret; }
static int qemu_rdma_registration_stop(QEMUFile *f, void *opaque, uint64_t flags) { Error *local_err = NULL, **errp = &local_err; QEMUFileRDMA *rfile = opaque; RDMAContext *rdma = rfile->rdma; RDMAControlHeader head = { .len = 0, .repeat = 1 }; int ret = 0; CHECK_ERROR_STATE(); qemu_fflush(f); ret = qemu_rdma_drain_cq(f, rdma); if (ret < 0) { goto err; } if (flags == RAM_CONTROL_SETUP) { RDMAControlHeader resp = {.type = RDMA_CONTROL_RAM_BLOCKS_RESULT }; RDMALocalBlocks *local = &rdma->local_ram_blocks; int reg_result_idx, i, j, nb_remote_blocks; head.type = RDMA_CONTROL_RAM_BLOCKS_REQUEST; DPRINTF("Sending registration setup for ram blocks...\n"); ret = qemu_rdma_exchange_send(rdma, &head, NULL, &resp, &reg_result_idx, rdma->pin_all ? qemu_rdma_reg_whole_ram_blocks : NULL); if (ret < 0) { ERROR(errp, "receiving remote info!"); return ret; } qemu_rdma_move_header(rdma, reg_result_idx, &resp); memcpy(rdma->block, rdma->wr_data[reg_result_idx].control_curr, resp.len); nb_remote_blocks = resp.len / sizeof(RDMARemoteBlock); if (local->nb_blocks != nb_remote_blocks) { ERROR(errp, "ram blocks mismatch #1! " "Your QEMU command line parameters are probably " "not identical on both the source and destination."); return -EINVAL; } for (i = 0; i < nb_remote_blocks; i++) { network_to_remote_block(&rdma->block[i]); for (j = 0; j < local->nb_blocks; j++) { if (rdma->block[i].offset != local->block[j].offset) { continue; } if (rdma->block[i].length != local->block[j].length) { ERROR(errp, "ram blocks mismatch #2! " "Your QEMU command line parameters are probably " "not identical on both the source and destination."); return -EINVAL; } local->block[j].remote_host_addr = rdma->block[i].remote_host_addr; local->block[j].remote_rkey = rdma->block[i].remote_rkey; break; } if (j >= local->nb_blocks) { ERROR(errp, "ram blocks mismatch #3! " "Your QEMU command line parameters are probably " "not identical on both the source and destination."); return -EINVAL; } } } DDDPRINTF("Sending registration finish %" PRIu64 "...\n", flags); head.type = RDMA_CONTROL_REGISTER_FINISHED; ret = qemu_rdma_exchange_send(rdma, &head, NULL, NULL, NULL, NULL); if (ret < 0) { goto err; } return 0; err: rdma->error_state = ret; return ret; }
949
1
struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl) { struct scm_fp_list *new_fpl; int i; if (!fpl) return NULL; new_fpl = kmalloc(sizeof(*fpl), GFP_KERNEL); if (new_fpl) { for (i=fpl->count-1; i>=0; i--) get_file(fpl->fp[i]); memcpy(new_fpl, fpl, sizeof(*fpl)); } return new_fpl; }
struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl) { struct scm_fp_list *new_fpl; int i; if (!fpl) return NULL; new_fpl = kmalloc(sizeof(*fpl), GFP_KERNEL); if (new_fpl) { for (i=fpl->count-1; i>=0; i--) get_file(fpl->fp[i]); memcpy(new_fpl, fpl, sizeof(*fpl)); } return new_fpl; }
951
0
gss_get_mic (minor_status, context_handle, qop_req, message_buffer, msg_token) OM_uint32 * minor_status; gss_ctx_id_t context_handle; gss_qop_t qop_req; gss_buffer_t message_buffer; gss_buffer_t msg_token; { OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; status = val_get_mic_args(minor_status, context_handle, qop_req, message_buffer, msg_token); if (status != GSS_S_COMPLETE) return (status); /* * select the approprate underlying mechanism routine and * call it. */ ctx = (gss_union_ctx_id_t) context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (mech) { if (mech->gss_get_mic) { status = mech->gss_get_mic( minor_status, ctx->internal_ctx_id, qop_req, message_buffer, msg_token); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); } else status = GSS_S_UNAVAILABLE; return(status); } return (GSS_S_BAD_MECH); }
gss_get_mic (minor_status, context_handle, qop_req, message_buffer, msg_token) OM_uint32 * minor_status; gss_ctx_id_t context_handle; gss_qop_t qop_req; gss_buffer_t message_buffer; gss_buffer_t msg_token; { OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; status = val_get_mic_args(minor_status, context_handle, qop_req, message_buffer, msg_token); if (status != GSS_S_COMPLETE) return (status); ctx = (gss_union_ctx_id_t) context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (mech) { if (mech->gss_get_mic) { status = mech->gss_get_mic( minor_status, ctx->internal_ctx_id, qop_req, message_buffer, msg_token); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); } else status = GSS_S_UNAVAILABLE; return(status); } return (GSS_S_BAD_MECH); }
952
1
void nego_process_negotiation_request(rdpNego* nego, wStream* s) { BYTE flags; UINT16 length; Stream_Read_UINT8(s, flags); Stream_Read_UINT16(s, length); Stream_Read_UINT32(s, nego->RequestedProtocols); WLog_DBG(TAG, "RDP_NEG_REQ: RequestedProtocol: 0x%08" PRIX32 "", nego->RequestedProtocols); nego->state = NEGO_STATE_FINAL; }
void nego_process_negotiation_request(rdpNego* nego, wStream* s) { BYTE flags; UINT16 length; Stream_Read_UINT8(s, flags); Stream_Read_UINT16(s, length); Stream_Read_UINT32(s, nego->RequestedProtocols); WLog_DBG(TAG, "RDP_NEG_REQ: RequestedProtocol: 0x%08" PRIX32 "", nego->RequestedProtocols); nego->state = NEGO_STATE_FINAL; }
953
0
gss_unwrap (minor_status, context_handle, input_message_buffer, output_message_buffer, conf_state, qop_state) OM_uint32 * minor_status; gss_ctx_id_t context_handle; gss_buffer_t input_message_buffer; gss_buffer_t output_message_buffer; int * conf_state; gss_qop_t * qop_state; { /* EXPORT DELETE START */ OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; if (minor_status != NULL) *minor_status = 0; if (output_message_buffer != GSS_C_NO_BUFFER) { output_message_buffer->length = 0; output_message_buffer->value = NULL; } if (minor_status == NULL) return (GSS_S_CALL_INACCESSIBLE_WRITE); if (context_handle == GSS_C_NO_CONTEXT) return (GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT); if (input_message_buffer == GSS_C_NO_BUFFER || GSS_EMPTY_BUFFER(input_message_buffer)) return (GSS_S_CALL_INACCESSIBLE_READ); if (output_message_buffer == GSS_C_NO_BUFFER) return (GSS_S_CALL_INACCESSIBLE_WRITE); /* * select the approprate underlying mechanism routine and * call it. */ ctx = (gss_union_ctx_id_t) context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (mech) { if (mech->gss_unwrap) { status = mech->gss_unwrap(minor_status, ctx->internal_ctx_id, input_message_buffer, output_message_buffer, conf_state, qop_state); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); } else if (mech->gss_unwrap_aead || mech->gss_unwrap_iov) { status = gssint_unwrap_aead(mech, minor_status, ctx, input_message_buffer, GSS_C_NO_BUFFER, output_message_buffer, conf_state, (gss_qop_t *)qop_state); } else status = GSS_S_UNAVAILABLE; return(status); } /* EXPORT DELETE END */ return (GSS_S_BAD_MECH); }
gss_unwrap (minor_status, context_handle, input_message_buffer, output_message_buffer, conf_state, qop_state) OM_uint32 * minor_status; gss_ctx_id_t context_handle; gss_buffer_t input_message_buffer; gss_buffer_t output_message_buffer; int * conf_state; gss_qop_t * qop_state; { OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; if (minor_status != NULL) *minor_status = 0; if (output_message_buffer != GSS_C_NO_BUFFER) { output_message_buffer->length = 0; output_message_buffer->value = NULL; } if (minor_status == NULL) return (GSS_S_CALL_INACCESSIBLE_WRITE); if (context_handle == GSS_C_NO_CONTEXT) return (GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT); if (input_message_buffer == GSS_C_NO_BUFFER || GSS_EMPTY_BUFFER(input_message_buffer)) return (GSS_S_CALL_INACCESSIBLE_READ); if (output_message_buffer == GSS_C_NO_BUFFER) return (GSS_S_CALL_INACCESSIBLE_WRITE); ctx = (gss_union_ctx_id_t) context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (mech) { if (mech->gss_unwrap) { status = mech->gss_unwrap(minor_status, ctx->internal_ctx_id, input_message_buffer, output_message_buffer, conf_state, qop_state); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); } else if (mech->gss_unwrap_aead || mech->gss_unwrap_iov) { status = gssint_unwrap_aead(mech, minor_status, ctx, input_message_buffer, GSS_C_NO_BUFFER, output_message_buffer, conf_state, (gss_qop_t *)qop_state); } else status = GSS_S_UNAVAILABLE; return(status); } return (GSS_S_BAD_MECH); }
954
0
void qtest_clock_warp ( int64_t dest ) { int64_t clock = qemu_get_clock_ns ( vm_clock ) ; assert ( qtest_enabled ( ) ) ; while ( clock < dest ) { int64_t deadline = qemu_clock_deadline ( vm_clock ) ; int64_t warp = MIN ( dest - clock , deadline ) ; qemu_icount_bias += warp ; qemu_run_timers ( vm_clock ) ; clock = qemu_get_clock_ns ( vm_clock ) ; } qemu_notify_event ( ) ; }
void qtest_clock_warp ( int64_t dest ) { int64_t clock = qemu_get_clock_ns ( vm_clock ) ; assert ( qtest_enabled ( ) ) ; while ( clock < dest ) { int64_t deadline = qemu_clock_deadline ( vm_clock ) ; int64_t warp = MIN ( dest - clock , deadline ) ; qemu_icount_bias += warp ; qemu_run_timers ( vm_clock ) ; clock = qemu_get_clock_ns ( vm_clock ) ; } qemu_notify_event ( ) ; }
955
1
static void inc_inflight_move_tail(struct unix_sock *u) { atomic_long_inc(&u->inflight); /* * If this is still a candidate, move it to the end of the * list, so that it's checked even if it was already passed * over */ if (u->gc_candidate) list_move_tail(&u->link, &gc_candidates); }
static void inc_inflight_move_tail(struct unix_sock *u) { atomic_long_inc(&u->inflight); if (u->gc_candidate) list_move_tail(&u->link, &gc_candidates); }
956
0
static void xen_platform_ioport_writeb(void *opaque, uint32_t addr, uint32_t val) { PCIXenPlatformState *s = opaque; addr &= 0xff; val &= 0xff; switch (addr) { case 0: /* Platform flags */ platform_fixed_ioport_writeb(opaque, XEN_PLATFORM_IOPORT, val); break; case 8: log_writeb(s, val); break; default: break; } }
static void xen_platform_ioport_writeb(void *opaque, uint32_t addr, uint32_t val) { PCIXenPlatformState *s = opaque; addr &= 0xff; val &= 0xff; switch (addr) { case 0: platform_fixed_ioport_writeb(opaque, XEN_PLATFORM_IOPORT, val); break; case 8: log_writeb(s, val); break; default: break; } }
957
0
gss_unwrap_aead (minor_status, context_handle, input_message_buffer, input_assoc_buffer, output_payload_buffer, conf_state, qop_state) OM_uint32 * minor_status; gss_ctx_id_t context_handle; gss_buffer_t input_message_buffer; gss_buffer_t input_assoc_buffer; gss_buffer_t output_payload_buffer; int *conf_state; gss_qop_t *qop_state; { OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; status = val_unwrap_aead_args(minor_status, context_handle, input_message_buffer, input_assoc_buffer, output_payload_buffer, conf_state, qop_state); if (status != GSS_S_COMPLETE) return (status); /* * select the approprate underlying mechanism routine and * call it. */ ctx = (gss_union_ctx_id_t) context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (!mech) return (GSS_S_BAD_MECH); return gssint_unwrap_aead(mech, minor_status, ctx, input_message_buffer, input_assoc_buffer, output_payload_buffer, conf_state, qop_state); }
gss_unwrap_aead (minor_status, context_handle, input_message_buffer, input_assoc_buffer, output_payload_buffer, conf_state, qop_state) OM_uint32 * minor_status; gss_ctx_id_t context_handle; gss_buffer_t input_message_buffer; gss_buffer_t input_assoc_buffer; gss_buffer_t output_payload_buffer; int *conf_state; gss_qop_t *qop_state; { OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; status = val_unwrap_aead_args(minor_status, context_handle, input_message_buffer, input_assoc_buffer, output_payload_buffer, conf_state, qop_state); if (status != GSS_S_COMPLETE) return (status); ctx = (gss_union_ctx_id_t) context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (!mech) return (GSS_S_BAD_MECH); return gssint_unwrap_aead(mech, minor_status, ctx, input_message_buffer, input_assoc_buffer, output_payload_buffer, conf_state, qop_state); }
958
1
static void unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) { int i; for (i=scm->fp->count-1; i>=0; i--) unix_inflight(scm->fp->fp[i]); UNIXCB(skb).fp = scm->fp; skb->destructor = unix_destruct_fds; scm->fp = NULL; }
static void unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) { int i; for (i=scm->fp->count-1; i>=0; i--) unix_inflight(scm->fp->fp[i]); UNIXCB(skb).fp = scm->fp; skb->destructor = unix_destruct_fds; scm->fp = NULL; }
959
0
static void Type_Signature_Free ( struct _cms_typehandler_struct * self , void * Ptr ) { _cmsFree ( self -> ContextID , Ptr ) ; }
static void Type_Signature_Free ( struct _cms_typehandler_struct * self , void * Ptr ) { _cmsFree ( self -> ContextID , Ptr ) ; }
960
0
static uint32_t nabm_readb (void *opaque, uint32_t addr) { PCIAC97LinkState *d = opaque; AC97LinkState *s = &d->ac97; AC97BusMasterRegs *r = NULL; uint32_t index = addr - s->base[1]; uint32_t val = ~0U; switch (index) { case CAS: dolog ("CAS %d\n", s->cas); val = s->cas; s->cas = 1; break; case PI_CIV: case PO_CIV: case MC_CIV: r = &s->bm_regs[GET_BM (index)]; val = r->civ; dolog ("CIV[%d] -> %#x\n", GET_BM (index), val); break; case PI_LVI: case PO_LVI: case MC_LVI: r = &s->bm_regs[GET_BM (index)]; val = r->lvi; dolog ("LVI[%d] -> %#x\n", GET_BM (index), val); break; case PI_PIV: case PO_PIV: case MC_PIV: r = &s->bm_regs[GET_BM (index)]; val = r->piv; dolog ("PIV[%d] -> %#x\n", GET_BM (index), val); break; case PI_CR: case PO_CR: case MC_CR: r = &s->bm_regs[GET_BM (index)]; val = r->cr; dolog ("CR[%d] -> %#x\n", GET_BM (index), val); break; case PI_SR: case PO_SR: case MC_SR: r = &s->bm_regs[GET_BM (index)]; val = r->sr & 0xff; dolog ("SRb[%d] -> %#x\n", GET_BM (index), val); break; default: dolog ("U nabm readb %#x -> %#x\n", addr, val); break; } return val; }
static uint32_t nabm_readb (void *opaque, uint32_t addr) { PCIAC97LinkState *d = opaque; AC97LinkState *s = &d->ac97; AC97BusMasterRegs *r = NULL; uint32_t index = addr - s->base[1]; uint32_t val = ~0U; switch (index) { case CAS: dolog ("CAS %d\n", s->cas); val = s->cas; s->cas = 1; break; case PI_CIV: case PO_CIV: case MC_CIV: r = &s->bm_regs[GET_BM (index)]; val = r->civ; dolog ("CIV[%d] -> %#x\n", GET_BM (index), val); break; case PI_LVI: case PO_LVI: case MC_LVI: r = &s->bm_regs[GET_BM (index)]; val = r->lvi; dolog ("LVI[%d] -> %#x\n", GET_BM (index), val); break; case PI_PIV: case PO_PIV: case MC_PIV: r = &s->bm_regs[GET_BM (index)]; val = r->piv; dolog ("PIV[%d] -> %#x\n", GET_BM (index), val); break; case PI_CR: case PO_CR: case MC_CR: r = &s->bm_regs[GET_BM (index)]; val = r->cr; dolog ("CR[%d] -> %#x\n", GET_BM (index), val); break; case PI_SR: case PO_SR: case MC_SR: r = &s->bm_regs[GET_BM (index)]; val = r->sr & 0xff; dolog ("SRb[%d] -> %#x\n", GET_BM (index), val); break; default: dolog ("U nabm readb %#x -> %#x\n", addr, val); break; } return val; }
961
0
gss_unwrap_iov (minor_status, context_handle, conf_state, qop_state, iov, iov_count) OM_uint32 * minor_status; gss_ctx_id_t context_handle; int * conf_state; gss_qop_t *qop_state; gss_iov_buffer_desc * iov; int iov_count; { /* EXPORT DELETE START */ OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; status = val_unwrap_iov_args(minor_status, context_handle, conf_state, qop_state, iov, iov_count); if (status != GSS_S_COMPLETE) return (status); /* * select the approprate underlying mechanism routine and * call it. */ ctx = (gss_union_ctx_id_t) context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (mech) { if (mech->gss_unwrap_iov) { status = mech->gss_unwrap_iov( minor_status, ctx->internal_ctx_id, conf_state, qop_state, iov, iov_count); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); } else status = GSS_S_UNAVAILABLE; return(status); } /* EXPORT DELETE END */ return (GSS_S_BAD_MECH); }
gss_unwrap_iov (minor_status, context_handle, conf_state, qop_state, iov, iov_count) OM_uint32 * minor_status; gss_ctx_id_t context_handle; int * conf_state; gss_qop_t *qop_state; gss_iov_buffer_desc * iov; int iov_count; { OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; status = val_unwrap_iov_args(minor_status, context_handle, conf_state, qop_state, iov, iov_count); if (status != GSS_S_COMPLETE) return (status); ctx = (gss_union_ctx_id_t) context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (mech) { if (mech->gss_unwrap_iov) { status = mech->gss_unwrap_iov( minor_status, ctx->internal_ctx_id, conf_state, qop_state, iov, iov_count); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); } else status = GSS_S_UNAVAILABLE; return(status); } return (GSS_S_BAD_MECH); }
962
0
static int ac3_encode_frame(AVCodecContext *avctx, unsigned char *frame, int buf_size, void *data) { AC3EncodeContext *s = avctx->priv_data; const int16_t *samples = data; int16_t planar_samples[AC3_MAX_CHANNELS][AC3_BLOCK_SIZE+AC3_FRAME_SIZE]; int32_t mdct_coef[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS][AC3_MAX_COEFS]; uint8_t exp[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS][AC3_MAX_COEFS]; uint8_t exp_strategy[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS]; uint8_t encoded_exp[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS][AC3_MAX_COEFS]; uint8_t num_exp_groups[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS]; uint8_t grouped_exp[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS][AC3_MAX_EXP_GROUPS]; uint8_t bap[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS][AC3_MAX_COEFS]; int8_t exp_shift[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS]; uint16_t qmant[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS][AC3_MAX_COEFS]; int frame_bits; if (s->bit_alloc.sr_code == 1) adjust_frame_size(s); deinterleave_input_samples(s, samples, planar_samples); apply_mdct(s, planar_samples, exp_shift, mdct_coef); frame_bits = process_exponents(s, mdct_coef, exp_shift, exp, exp_strategy, encoded_exp, num_exp_groups, grouped_exp); compute_bit_allocation(s, bap, encoded_exp, exp_strategy, frame_bits); quantize_mantissas(s, mdct_coef, exp_shift, encoded_exp, bap, qmant); output_frame(s, frame, exp_strategy, num_exp_groups, grouped_exp, bap, qmant); return s->frame_size; }
static int ac3_encode_frame(AVCodecContext *avctx, unsigned char *frame, int buf_size, void *data) { AC3EncodeContext *s = avctx->priv_data; const int16_t *samples = data; int16_t planar_samples[AC3_MAX_CHANNELS][AC3_BLOCK_SIZE+AC3_FRAME_SIZE]; int32_t mdct_coef[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS][AC3_MAX_COEFS]; uint8_t exp[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS][AC3_MAX_COEFS]; uint8_t exp_strategy[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS]; uint8_t encoded_exp[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS][AC3_MAX_COEFS]; uint8_t num_exp_groups[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS]; uint8_t grouped_exp[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS][AC3_MAX_EXP_GROUPS]; uint8_t bap[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS][AC3_MAX_COEFS]; int8_t exp_shift[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS]; uint16_t qmant[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS][AC3_MAX_COEFS]; int frame_bits; if (s->bit_alloc.sr_code == 1) adjust_frame_size(s); deinterleave_input_samples(s, samples, planar_samples); apply_mdct(s, planar_samples, exp_shift, mdct_coef); frame_bits = process_exponents(s, mdct_coef, exp_shift, exp, exp_strategy, encoded_exp, num_exp_groups, grouped_exp); compute_bit_allocation(s, bap, encoded_exp, exp_strategy, frame_bits); quantize_mantissas(s, mdct_coef, exp_shift, encoded_exp, bap, qmant); output_frame(s, frame, exp_strategy, num_exp_groups, grouped_exp, bap, qmant); return s->frame_size; }
963
1
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), struct sk_buff_head *hitlist) { struct sk_buff *skb; struct sk_buff *next; spin_lock(&x->sk_receive_queue.lock); receive_queue_for_each_skb(x, next, skb) { /* * Do we have file descriptors ? */ if (UNIXCB(skb).fp) { bool hit = false; /* * Process the descriptors of this socket */ int nfd = UNIXCB(skb).fp->count; struct file **fp = UNIXCB(skb).fp->fp; while (nfd--) { /* * Get the socket the fd matches * if it indeed does so */ struct sock *sk = unix_get_socket(*fp++); if (sk) { hit = true; func(unix_sk(sk)); } } if (hit && hitlist != NULL) { __skb_unlink(skb, &x->sk_receive_queue); __skb_queue_tail(hitlist, skb); } } } spin_unlock(&x->sk_receive_queue.lock); }
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), struct sk_buff_head *hitlist) { struct sk_buff *skb; struct sk_buff *next; spin_lock(&x->sk_receive_queue.lock); receive_queue_for_each_skb(x, next, skb) { if (UNIXCB(skb).fp) { bool hit = false; int nfd = UNIXCB(skb).fp->count; struct file **fp = UNIXCB(skb).fp->fp; while (nfd--) { struct sock *sk = unix_get_socket(*fp++); if (sk) { hit = true; func(unix_sk(sk)); } } if (hit && hitlist != NULL) { __skb_unlink(skb, &x->sk_receive_queue); __skb_queue_tail(hitlist, skb); } } } spin_unlock(&x->sk_receive_queue.lock); }
964
0
static int SpoolssStartPagePrinter_r ( tvbuff_t * tvb , int offset , packet_info * pinfo , proto_tree * tree , dcerpc_info * di , guint8 * drep ) { offset = dissect_doserror ( tvb , offset , pinfo , tree , di , drep , hf_rc , NULL ) ; return offset ; }
static int SpoolssStartPagePrinter_r ( tvbuff_t * tvb , int offset , packet_info * pinfo , proto_tree * tree , dcerpc_info * di , guint8 * drep ) { offset = dissect_doserror ( tvb , offset , pinfo , tree , di , drep , hf_rc , NULL ) ; return offset ; }
966
0
void cpu_physical_memory_write_rom(target_phys_addr_t addr, const uint8_t *buf, int len) { AddressSpaceDispatch *d = address_space_memory.dispatch; int l; uint8_t *ptr; target_phys_addr_t page; MemoryRegionSection *section; while (len > 0) { page = addr & TARGET_PAGE_MASK; l = (page + TARGET_PAGE_SIZE) - addr; if (l > len) l = len; section = phys_page_find(d, page >> TARGET_PAGE_BITS); if (!(memory_region_is_ram(section->mr) || memory_region_is_romd(section->mr))) { /* do nothing */ } else { unsigned long addr1; addr1 = memory_region_get_ram_addr(section->mr) + memory_region_section_addr(section, addr); /* ROM/RAM case */ ptr = qemu_get_ram_ptr(addr1); memcpy(ptr, buf, l); invalidate_and_set_dirty(addr1, l); qemu_put_ram_ptr(ptr); } len -= l; buf += l; addr += l; } }
void cpu_physical_memory_write_rom(target_phys_addr_t addr, const uint8_t *buf, int len) { AddressSpaceDispatch *d = address_space_memory.dispatch; int l; uint8_t *ptr; target_phys_addr_t page; MemoryRegionSection *section; while (len > 0) { page = addr & TARGET_PAGE_MASK; l = (page + TARGET_PAGE_SIZE) - addr; if (l > len) l = len; section = phys_page_find(d, page >> TARGET_PAGE_BITS); if (!(memory_region_is_ram(section->mr) || memory_region_is_romd(section->mr))) { } else { unsigned long addr1; addr1 = memory_region_get_ram_addr(section->mr) + memory_region_section_addr(section, addr); ptr = qemu_get_ram_ptr(addr1); memcpy(ptr, buf, l); invalidate_and_set_dirty(addr1, l); qemu_put_ram_ptr(ptr); } len -= l; buf += l; addr += l; } }
968
1
void unix_gc(void) { static bool gc_in_progress = false; struct unix_sock *u; struct unix_sock *next; struct sk_buff_head hitlist; struct list_head cursor; spin_lock(&unix_gc_lock); /* Avoid a recursive GC. */ if (gc_in_progress) goto out; gc_in_progress = true; /* * First, select candidates for garbage collection. Only * in-flight sockets are considered, and from those only ones * which don't have any external reference. * * Holding unix_gc_lock will protect these candidates from * being detached, and hence from gaining an external * reference. This also means, that since there are no * possible receivers, the receive queues of these sockets are * static during the GC, even though the dequeue is done * before the detach without atomicity guarantees. */ list_for_each_entry_safe(u, next, &gc_inflight_list, link) { long total_refs; long inflight_refs; total_refs = file_count(u->sk.sk_socket->file); inflight_refs = atomic_long_read(&u->inflight); BUG_ON(inflight_refs < 1); BUG_ON(total_refs < inflight_refs); if (total_refs == inflight_refs) { list_move_tail(&u->link, &gc_candidates); u->gc_candidate = 1; } } /* * Now remove all internal in-flight reference to children of * the candidates. */ list_for_each_entry(u, &gc_candidates, link) scan_children(&u->sk, dec_inflight, NULL); /* * Restore the references for children of all candidates, * which have remaining references. Do this recursively, so * only those remain, which form cyclic references. * * Use a "cursor" link, to make the list traversal safe, even * though elements might be moved about. */ list_add(&cursor, &gc_candidates); while (cursor.next != &gc_candidates) { u = list_entry(cursor.next, struct unix_sock, link); /* Move cursor to after the current position. */ list_move(&cursor, &u->link); if (atomic_long_read(&u->inflight) > 0) { list_move_tail(&u->link, &gc_inflight_list); u->gc_candidate = 0; scan_children(&u->sk, inc_inflight_move_tail, NULL); } } list_del(&cursor); /* * Now gc_candidates contains only garbage. Restore original * inflight counters for these as well, and remove the skbuffs * which are creating the cycle(s). */ skb_queue_head_init(&hitlist); list_for_each_entry(u, &gc_candidates, link) scan_children(&u->sk, inc_inflight, &hitlist); spin_unlock(&unix_gc_lock); /* Here we are. Hitlist is filled. Die. */ __skb_queue_purge(&hitlist); spin_lock(&unix_gc_lock); /* All candidates should have been detached by now. */ BUG_ON(!list_empty(&gc_candidates)); gc_in_progress = false; out: spin_unlock(&unix_gc_lock); }
void unix_gc(void) { static bool gc_in_progress = false; struct unix_sock *u; struct unix_sock *next; struct sk_buff_head hitlist; struct list_head cursor; spin_lock(&unix_gc_lock); if (gc_in_progress) goto out; gc_in_progress = true; list_for_each_entry_safe(u, next, &gc_inflight_list, link) { long total_refs; long inflight_refs; total_refs = file_count(u->sk.sk_socket->file); inflight_refs = atomic_long_read(&u->inflight); BUG_ON(inflight_refs < 1); BUG_ON(total_refs < inflight_refs); if (total_refs == inflight_refs) { list_move_tail(&u->link, &gc_candidates); u->gc_candidate = 1; } } list_for_each_entry(u, &gc_candidates, link) scan_children(&u->sk, dec_inflight, NULL); list_add(&cursor, &gc_candidates); while (cursor.next != &gc_candidates) { u = list_entry(cursor.next, struct unix_sock, link); list_move(&cursor, &u->link); if (atomic_long_read(&u->inflight) > 0) { list_move_tail(&u->link, &gc_inflight_list); u->gc_candidate = 0; scan_children(&u->sk, inc_inflight_move_tail, NULL); } } list_del(&cursor); skb_queue_head_init(&hitlist); list_for_each_entry(u, &gc_candidates, link) scan_children(&u->sk, inc_inflight, &hitlist); spin_unlock(&unix_gc_lock); __skb_queue_purge(&hitlist); spin_lock(&unix_gc_lock); BUG_ON(!list_empty(&gc_candidates)); gc_in_progress = false; out: spin_unlock(&unix_gc_lock); }
969
0
static UChar GetUniFromLMBCSUni ( char const * * ppLMBCSin ) { uint8_t HighCh = * ( * ppLMBCSin ) ++ ; uint8_t LowCh = * ( * ppLMBCSin ) ++ ; if ( HighCh == ULMBCS_UNICOMPATZERO ) { HighCh = LowCh ; LowCh = 0 ; } return ( UChar ) ( ( HighCh << 8 ) | LowCh ) ; }
static UChar GetUniFromLMBCSUni ( char const * * ppLMBCSin ) { uint8_t HighCh = * ( * ppLMBCSin ) ++ ; uint8_t LowCh = * ( * ppLMBCSin ) ++ ; if ( HighCh == ULMBCS_UNICOMPATZERO ) { HighCh = LowCh ; LowCh = 0 ; } return ( UChar ) ( ( HighCh << 8 ) | LowCh ) ; }
970
1
av_cold int ff_lpc_init(LPCContext *s, int blocksize, int max_order, enum FFLPCType lpc_type) { s->blocksize = blocksize; s->max_order = max_order; s->lpc_type = lpc_type; if (lpc_type == FF_LPC_TYPE_LEVINSON) { s->windowed_samples = av_mallocz((blocksize + max_order + 2) * sizeof(*s->windowed_samples)); if (!s->windowed_samples) return AVERROR(ENOMEM); } else { s->windowed_samples = NULL; } s->lpc_apply_welch_window = lpc_apply_welch_window_c; s->lpc_compute_autocorr = lpc_compute_autocorr_c; if (HAVE_MMX) ff_lpc_init_x86(s); return 0; }
av_cold int ff_lpc_init(LPCContext *s, int blocksize, int max_order, enum FFLPCType lpc_type) { s->blocksize = blocksize; s->max_order = max_order; s->lpc_type = lpc_type; if (lpc_type == FF_LPC_TYPE_LEVINSON) { s->windowed_samples = av_mallocz((blocksize + max_order + 2) * sizeof(*s->windowed_samples)); if (!s->windowed_samples) return AVERROR(ENOMEM); } else { s->windowed_samples = NULL; } s->lpc_apply_welch_window = lpc_apply_welch_window_c; s->lpc_compute_autocorr = lpc_compute_autocorr_c; if (HAVE_MMX) ff_lpc_init_x86(s); return 0; }
972
1
static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock_iocb *siocb = kiocb_to_siocb(kiocb); struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct unix_sock *u = unix_sk(sk); struct sockaddr_un *sunaddr=msg->msg_name; struct sock *other = NULL; int namelen = 0; /* fake GCC */ int err; unsigned hash; struct sk_buff *skb; long timeo; struct scm_cookie tmp_scm; if (NULL == siocb->scm) siocb->scm = &tmp_scm; err = scm_send(sock, msg, siocb->scm); if (err < 0) return err; err = -EOPNOTSUPP; if (msg->msg_flags&MSG_OOB) goto out; if (msg->msg_namelen) { err = unix_mkname(sunaddr, msg->msg_namelen, &hash); if (err < 0) goto out; namelen = err; } else { sunaddr = NULL; err = -ENOTCONN; other = unix_peer_get(sk); if (!other) goto out; } if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr && (err = unix_autobind(sock)) != 0) goto out; err = -EMSGSIZE; if (len > sk->sk_sndbuf - 32) goto out; skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err); if (skb==NULL) goto out; memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); if (siocb->scm->fp) unix_attach_fds(siocb->scm, skb); unix_get_secdata(siocb->scm, skb); skb_reset_transport_header(skb); err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len); if (err) goto out_free; timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); restart: if (!other) { err = -ECONNRESET; if (sunaddr == NULL) goto out_free; other = unix_find_other(net, sunaddr, namelen, sk->sk_type, hash, &err); if (other==NULL) goto out_free; } unix_state_lock(other); err = -EPERM; if (!unix_may_send(sk, other)) goto out_unlock; if (sock_flag(other, SOCK_DEAD)) { /* * Check with 1003.1g - what should * datagram error */ unix_state_unlock(other); sock_put(other); err = 0; unix_state_lock(sk); if (unix_peer(sk) == other) { unix_peer(sk)=NULL; unix_state_unlock(sk); unix_dgram_disconnected(sk, other); sock_put(other); err = -ECONNREFUSED; } else { unix_state_unlock(sk); } other = NULL; if (err) goto out_free; goto restart; } err = -EPIPE; if (other->sk_shutdown & RCV_SHUTDOWN) goto out_unlock; if (sk->sk_type != SOCK_SEQPACKET) { err = security_unix_may_send(sk->sk_socket, other->sk_socket); if (err) goto out_unlock; } if (unix_peer(other) != sk && unix_recvq_full(other)) { if (!timeo) { err = -EAGAIN; goto out_unlock; } timeo = unix_wait_for_peer(other, timeo); err = sock_intr_errno(timeo); if (signal_pending(current)) goto out_free; goto restart; } skb_queue_tail(&other->sk_receive_queue, skb); unix_state_unlock(other); other->sk_data_ready(other, len); sock_put(other); scm_destroy(siocb->scm); return len; out_unlock: unix_state_unlock(other); out_free: kfree_skb(skb); out: if (other) sock_put(other); scm_destroy(siocb->scm); return err; }
static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock_iocb *siocb = kiocb_to_siocb(kiocb); struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct unix_sock *u = unix_sk(sk); struct sockaddr_un *sunaddr=msg->msg_name; struct sock *other = NULL; int namelen = 0; int err; unsigned hash; struct sk_buff *skb; long timeo; struct scm_cookie tmp_scm; if (NULL == siocb->scm) siocb->scm = &tmp_scm; err = scm_send(sock, msg, siocb->scm); if (err < 0) return err; err = -EOPNOTSUPP; if (msg->msg_flags&MSG_OOB) goto out; if (msg->msg_namelen) { err = unix_mkname(sunaddr, msg->msg_namelen, &hash); if (err < 0) goto out; namelen = err; } else { sunaddr = NULL; err = -ENOTCONN; other = unix_peer_get(sk); if (!other) goto out; } if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr && (err = unix_autobind(sock)) != 0) goto out; err = -EMSGSIZE; if (len > sk->sk_sndbuf - 32) goto out; skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err); if (skb==NULL) goto out; memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); if (siocb->scm->fp) unix_attach_fds(siocb->scm, skb); unix_get_secdata(siocb->scm, skb); skb_reset_transport_header(skb); err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len); if (err) goto out_free; timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); restart: if (!other) { err = -ECONNRESET; if (sunaddr == NULL) goto out_free; other = unix_find_other(net, sunaddr, namelen, sk->sk_type, hash, &err); if (other==NULL) goto out_free; } unix_state_lock(other); err = -EPERM; if (!unix_may_send(sk, other)) goto out_unlock; if (sock_flag(other, SOCK_DEAD)) { unix_state_unlock(other); sock_put(other); err = 0; unix_state_lock(sk); if (unix_peer(sk) == other) { unix_peer(sk)=NULL; unix_state_unlock(sk); unix_dgram_disconnected(sk, other); sock_put(other); err = -ECONNREFUSED; } else { unix_state_unlock(sk); } other = NULL; if (err) goto out_free; goto restart; } err = -EPIPE; if (other->sk_shutdown & RCV_SHUTDOWN) goto out_unlock; if (sk->sk_type != SOCK_SEQPACKET) { err = security_unix_may_send(sk->sk_socket, other->sk_socket); if (err) goto out_unlock; } if (unix_peer(other) != sk && unix_recvq_full(other)) { if (!timeo) { err = -EAGAIN; goto out_unlock; } timeo = unix_wait_for_peer(other, timeo); err = sock_intr_errno(timeo); if (signal_pending(current)) goto out_free; goto restart; } skb_queue_tail(&other->sk_receive_queue, skb); unix_state_unlock(other); other->sk_data_ready(other, len); sock_put(other); scm_destroy(siocb->scm); return len; out_unlock: unix_state_unlock(other); out_free: kfree_skb(skb); out: if (other) sock_put(other); scm_destroy(siocb->scm); return err; }
973
1
char * get_arg ( char * line , my_bool get_next_arg ) { char * ptr , * start ; my_bool quoted = 0 , valid_arg = 0 ; char qtype = 0 ; ptr = line ; if ( get_next_arg ) { for ( ; * ptr ; ptr ++ ) ; if ( * ( ptr + 1 ) ) ptr ++ ; } else { while ( my_isspace ( charset_info , * ptr ) ) ptr ++ ; if ( * ptr == '\\' ) ptr += 2 ; else while ( * ptr && ! my_isspace ( charset_info , * ptr ) ) ptr ++ ; } if ( ! * ptr ) return NullS ; while ( my_isspace ( charset_info , * ptr ) ) ptr ++ ; if ( * ptr == '\'' || * ptr == '\"' || * ptr == '`' ) { qtype = * ptr ; quoted = 1 ; ptr ++ ; } for ( start = ptr ; * ptr ; ptr ++ ) { if ( * ptr == '\\' && ptr [ 1 ] ) { strmov_overlapp ( ptr , ptr + 1 ) ; } else if ( ( ! quoted && * ptr == ' ' ) || ( quoted && * ptr == qtype ) ) { * ptr = 0 ; break ; } } valid_arg = ptr != start ; return valid_arg ? start : NullS ; }
char * get_arg ( char * line , my_bool get_next_arg ) { char * ptr , * start ; my_bool quoted = 0 , valid_arg = 0 ; char qtype = 0 ; ptr = line ; if ( get_next_arg ) { for ( ; * ptr ; ptr ++ ) ; if ( * ( ptr + 1 ) ) ptr ++ ; } else { while ( my_isspace ( charset_info , * ptr ) ) ptr ++ ; if ( * ptr == '\\' ) ptr += 2 ; else while ( * ptr && ! my_isspace ( charset_info , * ptr ) ) ptr ++ ; } if ( ! * ptr ) return NullS ; while ( my_isspace ( charset_info , * ptr ) ) ptr ++ ; if ( * ptr == '\'' || * ptr == '\"' || * ptr == '`' ) { qtype = * ptr ; quoted = 1 ; ptr ++ ; } for ( start = ptr ; * ptr ; ptr ++ ) { if ( * ptr == '\\' && ptr [ 1 ] ) { strmov_overlapp ( ptr , ptr + 1 ) ; } else if ( ( ! quoted && * ptr == ' ' ) || ( quoted && * ptr == qtype ) ) { * ptr = 0 ; break ; } } valid_arg = ptr != start ; return valid_arg ? start : NullS ; }
975
0
gss_wrap_aead (minor_status, context_handle, conf_req_flag, qop_req, input_assoc_buffer, input_payload_buffer, conf_state, output_message_buffer) OM_uint32 * minor_status; gss_ctx_id_t context_handle; int conf_req_flag; gss_qop_t qop_req; gss_buffer_t input_assoc_buffer; gss_buffer_t input_payload_buffer; int * conf_state; gss_buffer_t output_message_buffer; { OM_uint32 status; gss_mechanism mech; gss_union_ctx_id_t ctx; status = val_wrap_aead_args(minor_status, context_handle, conf_req_flag, qop_req, input_assoc_buffer, input_payload_buffer, conf_state, output_message_buffer); if (status != GSS_S_COMPLETE) return (status); /* * select the approprate underlying mechanism routine and * call it. */ ctx = (gss_union_ctx_id_t)context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (!mech) return (GSS_S_BAD_MECH); return gssint_wrap_aead(mech, minor_status, ctx, conf_req_flag, qop_req, input_assoc_buffer, input_payload_buffer, conf_state, output_message_buffer); }
gss_wrap_aead (minor_status, context_handle, conf_req_flag, qop_req, input_assoc_buffer, input_payload_buffer, conf_state, output_message_buffer) OM_uint32 * minor_status; gss_ctx_id_t context_handle; int conf_req_flag; gss_qop_t qop_req; gss_buffer_t input_assoc_buffer; gss_buffer_t input_payload_buffer; int * conf_state; gss_buffer_t output_message_buffer; { OM_uint32 status; gss_mechanism mech; gss_union_ctx_id_t ctx; status = val_wrap_aead_args(minor_status, context_handle, conf_req_flag, qop_req, input_assoc_buffer, input_payload_buffer, conf_state, output_message_buffer); if (status != GSS_S_COMPLETE) return (status); ctx = (gss_union_ctx_id_t)context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (!mech) return (GSS_S_BAD_MECH); return gssint_wrap_aead(mech, minor_status, ctx, conf_req_flag, qop_req, input_assoc_buffer, input_payload_buffer, conf_state, output_message_buffer); }
976
1
static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock_iocb *siocb = kiocb_to_siocb(kiocb); struct sock *sk = sock->sk; struct sock *other = NULL; struct sockaddr_un *sunaddr=msg->msg_name; int err,size; struct sk_buff *skb; int sent=0; struct scm_cookie tmp_scm; if (NULL == siocb->scm) siocb->scm = &tmp_scm; err = scm_send(sock, msg, siocb->scm); if (err < 0) return err; err = -EOPNOTSUPP; if (msg->msg_flags&MSG_OOB) goto out_err; if (msg->msg_namelen) { err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP; goto out_err; } else { sunaddr = NULL; err = -ENOTCONN; other = unix_peer(sk); if (!other) goto out_err; } if (sk->sk_shutdown & SEND_SHUTDOWN) goto pipe_err; while(sent < len) { /* * Optimisation for the fact that under 0.01% of X * messages typically need breaking up. */ size = len-sent; /* Keep two messages in the pipe so it schedules better */ if (size > ((sk->sk_sndbuf >> 1) - 64)) size = (sk->sk_sndbuf >> 1) - 64; if (size > SKB_MAX_ALLOC) size = SKB_MAX_ALLOC; /* * Grab a buffer */ skb=sock_alloc_send_skb(sk,size,msg->msg_flags&MSG_DONTWAIT, &err); if (skb==NULL) goto out_err; /* * If you pass two values to the sock_alloc_send_skb * it tries to grab the large buffer with GFP_NOFS * (which can fail easily), and if it fails grab the * fallback size buffer which is under a page and will * succeed. [Alan] */ size = min_t(int, size, skb_tailroom(skb)); memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); if (siocb->scm->fp) unix_attach_fds(siocb->scm, skb); if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) { kfree_skb(skb); goto out_err; } unix_state_lock(other); if (sock_flag(other, SOCK_DEAD) || (other->sk_shutdown & RCV_SHUTDOWN)) goto pipe_err_free; skb_queue_tail(&other->sk_receive_queue, skb); unix_state_unlock(other); other->sk_data_ready(other, size); sent+=size; } scm_destroy(siocb->scm); siocb->scm = NULL; return sent; pipe_err_free: unix_state_unlock(other); kfree_skb(skb); pipe_err: if (sent==0 && !(msg->msg_flags&MSG_NOSIGNAL)) send_sig(SIGPIPE,current,0); err = -EPIPE; out_err: scm_destroy(siocb->scm); siocb->scm = NULL; return sent ? : err; }
static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock_iocb *siocb = kiocb_to_siocb(kiocb); struct sock *sk = sock->sk; struct sock *other = NULL; struct sockaddr_un *sunaddr=msg->msg_name; int err,size; struct sk_buff *skb; int sent=0; struct scm_cookie tmp_scm; if (NULL == siocb->scm) siocb->scm = &tmp_scm; err = scm_send(sock, msg, siocb->scm); if (err < 0) return err; err = -EOPNOTSUPP; if (msg->msg_flags&MSG_OOB) goto out_err; if (msg->msg_namelen) { err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP; goto out_err; } else { sunaddr = NULL; err = -ENOTCONN; other = unix_peer(sk); if (!other) goto out_err; } if (sk->sk_shutdown & SEND_SHUTDOWN) goto pipe_err; while(sent < len) { size = len-sent; if (size > ((sk->sk_sndbuf >> 1) - 64)) size = (sk->sk_sndbuf >> 1) - 64; if (size > SKB_MAX_ALLOC) size = SKB_MAX_ALLOC; skb=sock_alloc_send_skb(sk,size,msg->msg_flags&MSG_DONTWAIT, &err); if (skb==NULL) goto out_err; size = min_t(int, size, skb_tailroom(skb)); memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); if (siocb->scm->fp) unix_attach_fds(siocb->scm, skb); if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) { kfree_skb(skb); goto out_err; } unix_state_lock(other); if (sock_flag(other, SOCK_DEAD) || (other->sk_shutdown & RCV_SHUTDOWN)) goto pipe_err_free; skb_queue_tail(&other->sk_receive_queue, skb); unix_state_unlock(other); other->sk_data_ready(other, size); sent+=size; } scm_destroy(siocb->scm); siocb->scm = NULL; return sent; pipe_err_free: unix_state_unlock(other); kfree_skb(skb); pipe_err: if (sent==0 && !(msg->msg_flags&MSG_NOSIGNAL)) send_sig(SIGPIPE,current,0); err = -EPIPE; out_err: scm_destroy(siocb->scm); siocb->scm = NULL; return sent ? : err; }
978
0
gss_get_mic_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle, gss_qop_t qop_req, gss_iov_buffer_desc *iov, int iov_count) { OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; status = val_wrap_iov_args(minor_status, context_handle, 0, qop_req, NULL, iov, iov_count); if (status != GSS_S_COMPLETE) return status; /* Select the approprate underlying mechanism routine and call it. */ ctx = (gss_union_ctx_id_t)context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return GSS_S_NO_CONTEXT; mech = gssint_get_mechanism(ctx->mech_type); if (mech == NULL) return GSS_S_BAD_MECH; if (mech->gss_get_mic_iov == NULL) return GSS_S_UNAVAILABLE; status = mech->gss_get_mic_iov(minor_status, ctx->internal_ctx_id, qop_req, iov, iov_count); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); return status; }
gss_get_mic_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle, gss_qop_t qop_req, gss_iov_buffer_desc *iov, int iov_count) { OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; status = val_wrap_iov_args(minor_status, context_handle, 0, qop_req, NULL, iov, iov_count); if (status != GSS_S_COMPLETE) return status; ctx = (gss_union_ctx_id_t)context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return GSS_S_NO_CONTEXT; mech = gssint_get_mechanism(ctx->mech_type); if (mech == NULL) return GSS_S_BAD_MECH; if (mech->gss_get_mic_iov == NULL) return GSS_S_UNAVAILABLE; status = mech->gss_get_mic_iov(minor_status, ctx->internal_ctx_id, qop_req, iov, iov_count); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); return status; }
980
0
void YM3812UpdateOne(FM_OPL *OPL, INT16 *buffer, int length) { int i; int data; OPLSAMPLE *buf = buffer; UINT32 amsCnt = OPL->amsCnt; UINT32 vibCnt = OPL->vibCnt; UINT8 rythm = OPL->rythm&0x20; OPL_CH *CH,*R_CH; if( (void *)OPL != cur_chip ){ cur_chip = (void *)OPL; /* channel pointers */ S_CH = OPL->P_CH; E_CH = &S_CH[9]; /* rythm slot */ SLOT7_1 = &S_CH[7].SLOT[SLOT1]; SLOT7_2 = &S_CH[7].SLOT[SLOT2]; SLOT8_1 = &S_CH[8].SLOT[SLOT1]; SLOT8_2 = &S_CH[8].SLOT[SLOT2]; /* LFO state */ amsIncr = OPL->amsIncr; vibIncr = OPL->vibIncr; ams_table = OPL->ams_table; vib_table = OPL->vib_table; } R_CH = rythm ? &S_CH[6] : E_CH; for( i=0; i < length ; i++ ) { /* channel A channel B channel C */ /* LFO */ ams = ams_table[(amsCnt+=amsIncr)>>AMS_SHIFT]; vib = vib_table[(vibCnt+=vibIncr)>>VIB_SHIFT]; outd[0] = 0; /* FM part */ for(CH=S_CH ; CH < R_CH ; CH++) OPL_CALC_CH(CH); /* Rythn part */ if(rythm) OPL_CALC_RH(S_CH); /* limit check */ data = Limit( outd[0] , OPL_MAXOUT, OPL_MINOUT ); /* store to sound buffer */ buf[i] = data >> OPL_OUTSB; } OPL->amsCnt = amsCnt; OPL->vibCnt = vibCnt; #ifdef OPL_OUTPUT_LOG if(opl_dbg_fp) { for(opl_dbg_chip=0;opl_dbg_chip<opl_dbg_maxchip;opl_dbg_chip++) if( opl_dbg_opl[opl_dbg_chip] == OPL) break; fprintf(opl_dbg_fp,"%c%c%c",0x20+opl_dbg_chip,length&0xff,length/256); } #endif }
void YM3812UpdateOne(FM_OPL *OPL, INT16 *buffer, int length) { int i; int data; OPLSAMPLE *buf = buffer; UINT32 amsCnt = OPL->amsCnt; UINT32 vibCnt = OPL->vibCnt; UINT8 rythm = OPL->rythm&0x20; OPL_CH *CH,*R_CH; if( (void *)OPL != cur_chip ){ cur_chip = (void *)OPL; S_CH = OPL->P_CH; E_CH = &S_CH[9]; SLOT7_1 = &S_CH[7].SLOT[SLOT1]; SLOT7_2 = &S_CH[7].SLOT[SLOT2]; SLOT8_1 = &S_CH[8].SLOT[SLOT1]; SLOT8_2 = &S_CH[8].SLOT[SLOT2]; amsIncr = OPL->amsIncr; vibIncr = OPL->vibIncr; ams_table = OPL->ams_table; vib_table = OPL->vib_table; } R_CH = rythm ? &S_CH[6] : E_CH; for( i=0; i < length ; i++ ) { ams = ams_table[(amsCnt+=amsIncr)>>AMS_SHIFT]; vib = vib_table[(vibCnt+=vibIncr)>>VIB_SHIFT]; outd[0] = 0; for(CH=S_CH ; CH < R_CH ; CH++) OPL_CALC_CH(CH); if(rythm) OPL_CALC_RH(S_CH); data = Limit( outd[0] , OPL_MAXOUT, OPL_MINOUT ); buf[i] = data >> OPL_OUTSB; } OPL->amsCnt = amsCnt; OPL->vibCnt = vibCnt; #ifdef OPL_OUTPUT_LOG if(opl_dbg_fp) { for(opl_dbg_chip=0;opl_dbg_chip<opl_dbg_maxchip;opl_dbg_chip++) if( opl_dbg_opl[opl_dbg_chip] == OPL) break; fprintf(opl_dbg_fp,"%c%c%c",0x20+opl_dbg_chip,length&0xff,length/256); } #endif }
981
0
static int claimintf ( struct usb_dev_state * ps , unsigned int ifnum ) { struct usb_device * dev = ps -> dev ; struct usb_interface * intf ; int err ; if ( ifnum >= 8 * sizeof ( ps -> ifclaimed ) ) return - EINVAL ; if ( test_bit ( ifnum , & ps -> ifclaimed ) ) return 0 ; if ( ps -> privileges_dropped && ! test_bit ( ifnum , & ps -> interface_allowed_mask ) ) return - EACCES ; intf = usb_ifnum_to_if ( dev , ifnum ) ; if ( ! intf ) err = - ENOENT ; else err = usb_driver_claim_interface ( & usbfs_driver , intf , ps ) ; if ( err == 0 ) set_bit ( ifnum , & ps -> ifclaimed ) ; return err ; }
static int claimintf ( struct usb_dev_state * ps , unsigned int ifnum ) { struct usb_device * dev = ps -> dev ; struct usb_interface * intf ; int err ; if ( ifnum >= 8 * sizeof ( ps -> ifclaimed ) ) return - EINVAL ; if ( test_bit ( ifnum , & ps -> ifclaimed ) ) return 0 ; if ( ps -> privileges_dropped && ! test_bit ( ifnum , & ps -> interface_allowed_mask ) ) return - EACCES ; intf = usb_ifnum_to_if ( dev , ifnum ) ; if ( ! intf ) err = - ENOENT ; else err = usb_driver_claim_interface ( & usbfs_driver , intf , ps ) ; if ( err == 0 ) set_bit ( ifnum , & ps -> ifclaimed ) ; return err ; }
982
1
static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr; struct sock *sk = sock->sk; struct unix_sock *u = unix_sk(sk), *newu, *otheru; struct sock *newsk = NULL; struct sock *other = NULL; struct sk_buff *skb = NULL; unsigned hash; int st; int err; long timeo; err = unix_mkname(sunaddr, addr_len, &hash); if (err < 0) goto out; addr_len = err; if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr && (err = unix_autobind(sock)) != 0) goto out; timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); /* First of all allocate resources. If we will make it after state is locked, we will have to recheck all again in any case. */ err = -ENOMEM; /* create new sock for complete connection */ newsk = unix_create1(NULL); if (newsk == NULL) goto out; /* Allocate skb for sending to listening sock */ skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL); if (skb == NULL) goto out; restart: /* Find listening sock. */ other = unix_find_other(sunaddr, addr_len, sk->sk_type, hash, &err); if (!other) goto out; /* Latch state of peer */ unix_state_lock(other); /* Apparently VFS overslept socket death. Retry. */ if (sock_flag(other, SOCK_DEAD)) { unix_state_unlock(other); sock_put(other); goto restart; } err = -ECONNREFUSED; if (other->sk_state != TCP_LISTEN) goto out_unlock; if (skb_queue_len(&other->sk_receive_queue) > other->sk_max_ack_backlog) { err = -EAGAIN; if (!timeo) goto out_unlock; timeo = unix_wait_for_peer(other, timeo); err = sock_intr_errno(timeo); if (signal_pending(current)) goto out; sock_put(other); goto restart; } /* Latch our state. It is tricky place. We need to grab write lock and cannot drop lock on peer. It is dangerous because deadlock is possible. Connect to self case and simultaneous attempt to connect are eliminated by checking socket state. other is TCP_LISTEN, if sk is TCP_LISTEN we check this before attempt to grab lock. Well, and we have to recheck the state after socket locked. */ st = sk->sk_state; switch (st) { case TCP_CLOSE: /* This is ok... continue with connect */ break; case TCP_ESTABLISHED: /* Socket is already connected */ err = -EISCONN; goto out_unlock; default: err = -EINVAL; goto out_unlock; } unix_state_lock_nested(sk); if (sk->sk_state != st) { unix_state_unlock(sk); unix_state_unlock(other); sock_put(other); goto restart; } err = security_unix_stream_connect(sock, other->sk_socket, newsk); if (err) { unix_state_unlock(sk); goto out_unlock; } /* The way is open! Fastly set all the necessary fields... */ sock_hold(sk); unix_peer(newsk) = sk; newsk->sk_state = TCP_ESTABLISHED; newsk->sk_type = sk->sk_type; newsk->sk_peercred.pid = current->tgid; newsk->sk_peercred.uid = current->euid; newsk->sk_peercred.gid = current->egid; newu = unix_sk(newsk); newsk->sk_sleep = &newu->peer_wait; otheru = unix_sk(other); /* copy address information from listening to new sock*/ if (otheru->addr) { atomic_inc(&otheru->addr->refcnt); newu->addr = otheru->addr; } if (otheru->dentry) { newu->dentry = dget(otheru->dentry); newu->mnt = mntget(otheru->mnt); } /* Set credentials */ sk->sk_peercred = other->sk_peercred; sock->state = SS_CONNECTED; sk->sk_state = TCP_ESTABLISHED; sock_hold(newsk); smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */ unix_peer(sk) = newsk; unix_state_unlock(sk); /* take ten and and send info to listening sock */ spin_lock(&other->sk_receive_queue.lock); __skb_queue_tail(&other->sk_receive_queue, skb); /* Undo artificially decreased inflight after embrion * is installed to listening socket. */ atomic_inc(&newu->inflight); spin_unlock(&other->sk_receive_queue.lock); unix_state_unlock(other); other->sk_data_ready(other, 0); sock_put(other); return 0; out_unlock: if (other) unix_state_unlock(other); out: if (skb) kfree_skb(skb); if (newsk) unix_release_sock(newsk, 0); if (other) sock_put(other); return err; }
static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr; struct sock *sk = sock->sk; struct unix_sock *u = unix_sk(sk), *newu, *otheru; struct sock *newsk = NULL; struct sock *other = NULL; struct sk_buff *skb = NULL; unsigned hash; int st; int err; long timeo; err = unix_mkname(sunaddr, addr_len, &hash); if (err < 0) goto out; addr_len = err; if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr && (err = unix_autobind(sock)) != 0) goto out; timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); err = -ENOMEM; newsk = unix_create1(NULL); if (newsk == NULL) goto out; skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL); if (skb == NULL) goto out; restart: other = unix_find_other(sunaddr, addr_len, sk->sk_type, hash, &err); if (!other) goto out; unix_state_lock(other); if (sock_flag(other, SOCK_DEAD)) { unix_state_unlock(other); sock_put(other); goto restart; } err = -ECONNREFUSED; if (other->sk_state != TCP_LISTEN) goto out_unlock; if (skb_queue_len(&other->sk_receive_queue) > other->sk_max_ack_backlog) { err = -EAGAIN; if (!timeo) goto out_unlock; timeo = unix_wait_for_peer(other, timeo); err = sock_intr_errno(timeo); if (signal_pending(current)) goto out; sock_put(other); goto restart; } st = sk->sk_state; switch (st) { case TCP_CLOSE: break; case TCP_ESTABLISHED: err = -EISCONN; goto out_unlock; default: err = -EINVAL; goto out_unlock; } unix_state_lock_nested(sk); if (sk->sk_state != st) { unix_state_unlock(sk); unix_state_unlock(other); sock_put(other); goto restart; } err = security_unix_stream_connect(sock, other->sk_socket, newsk); if (err) { unix_state_unlock(sk); goto out_unlock; } sock_hold(sk); unix_peer(newsk) = sk; newsk->sk_state = TCP_ESTABLISHED; newsk->sk_type = sk->sk_type; newsk->sk_peercred.pid = current->tgid; newsk->sk_peercred.uid = current->euid; newsk->sk_peercred.gid = current->egid; newu = unix_sk(newsk); newsk->sk_sleep = &newu->peer_wait; otheru = unix_sk(other); if (otheru->addr) { atomic_inc(&otheru->addr->refcnt); newu->addr = otheru->addr; } if (otheru->dentry) { newu->dentry = dget(otheru->dentry); newu->mnt = mntget(otheru->mnt); } sk->sk_peercred = other->sk_peercred; sock->state = SS_CONNECTED; sk->sk_state = TCP_ESTABLISHED; sock_hold(newsk); smp_mb__after_atomic_inc(); unix_peer(sk) = newsk; unix_state_unlock(sk); spin_lock(&other->sk_receive_queue.lock); __skb_queue_tail(&other->sk_receive_queue, skb); atomic_inc(&newu->inflight); spin_unlock(&other->sk_receive_queue.lock); unix_state_unlock(other); other->sk_data_ready(other, 0); sock_put(other); return 0; out_unlock: if (other) unix_state_unlock(other); out: if (skb) kfree_skb(skb); if (newsk) unix_release_sock(newsk, 0); if (other) sock_put(other); return err; }
983
1
int nego_recv(rdpTransport* transport, wStream* s, void* extra) { BYTE li; BYTE type; UINT16 length; rdpNego* nego = (rdpNego*)extra; if (!tpkt_read_header(s, &length)) return -1; if (!tpdu_read_connection_confirm(s, &li, length)) return -1; if (li > 6) { /* rdpNegData (optional) */ Stream_Read_UINT8(s, type); /* Type */ switch (type) { case TYPE_RDP_NEG_RSP: nego_process_negotiation_response(nego, s); WLog_DBG(TAG, "selected_protocol: %" PRIu32 "", nego->SelectedProtocol); /* enhanced security selected ? */ if (nego->SelectedProtocol) { if ((nego->SelectedProtocol == PROTOCOL_HYBRID) && (!nego->EnabledProtocols[PROTOCOL_HYBRID])) { nego->state = NEGO_STATE_FAIL; } if ((nego->SelectedProtocol == PROTOCOL_SSL) && (!nego->EnabledProtocols[PROTOCOL_SSL])) { nego->state = NEGO_STATE_FAIL; } } else if (!nego->EnabledProtocols[PROTOCOL_RDP]) { nego->state = NEGO_STATE_FAIL; } break; case TYPE_RDP_NEG_FAILURE: nego_process_negotiation_failure(nego, s); break; } } else if (li == 6) { WLog_DBG(TAG, "no rdpNegData"); if (!nego->EnabledProtocols[PROTOCOL_RDP]) nego->state = NEGO_STATE_FAIL; else nego->state = NEGO_STATE_FINAL; } else { WLog_ERR(TAG, "invalid negotiation response"); nego->state = NEGO_STATE_FAIL; } if (!tpkt_ensure_stream_consumed(s, length)) return -1; return 0; }
int nego_recv(rdpTransport* transport, wStream* s, void* extra) { BYTE li; BYTE type; UINT16 length; rdpNego* nego = (rdpNego*)extra; if (!tpkt_read_header(s, &length)) return -1; if (!tpdu_read_connection_confirm(s, &li, length)) return -1; if (li > 6) { Stream_Read_UINT8(s, type); switch (type) { case TYPE_RDP_NEG_RSP: nego_process_negotiation_response(nego, s); WLog_DBG(TAG, "selected_protocol: %" PRIu32 "", nego->SelectedProtocol); if (nego->SelectedProtocol) { if ((nego->SelectedProtocol == PROTOCOL_HYBRID) && (!nego->EnabledProtocols[PROTOCOL_HYBRID])) { nego->state = NEGO_STATE_FAIL; } if ((nego->SelectedProtocol == PROTOCOL_SSL) && (!nego->EnabledProtocols[PROTOCOL_SSL])) { nego->state = NEGO_STATE_FAIL; } } else if (!nego->EnabledProtocols[PROTOCOL_RDP]) { nego->state = NEGO_STATE_FAIL; } break; case TYPE_RDP_NEG_FAILURE: nego_process_negotiation_failure(nego, s); break; } } else if (li == 6) { WLog_DBG(TAG, "no rdpNegData"); if (!nego->EnabledProtocols[PROTOCOL_RDP]) nego->state = NEGO_STATE_FAIL; else nego->state = NEGO_STATE_FINAL; } else { WLog_ERR(TAG, "invalid negotiation response"); nego->state = NEGO_STATE_FAIL; } if (!tpkt_ensure_stream_consumed(s, length)) return -1; return 0; }
984
0
gss_get_mic_iov_length(OM_uint32 *minor_status, gss_ctx_id_t context_handle, gss_qop_t qop_req, gss_iov_buffer_desc *iov, int iov_count) { OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; status = val_wrap_iov_args(minor_status, context_handle, 0, qop_req, NULL, iov, iov_count); if (status != GSS_S_COMPLETE) return status; /* Select the approprate underlying mechanism routine and call it. */ ctx = (gss_union_ctx_id_t)context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return GSS_S_NO_CONTEXT; mech = gssint_get_mechanism(ctx->mech_type); if (mech == NULL) return GSS_S_BAD_MECH; if (mech->gss_get_mic_iov_length == NULL) return GSS_S_UNAVAILABLE; status = mech->gss_get_mic_iov_length(minor_status, ctx->internal_ctx_id, qop_req, iov, iov_count); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); return status; }
gss_get_mic_iov_length(OM_uint32 *minor_status, gss_ctx_id_t context_handle, gss_qop_t qop_req, gss_iov_buffer_desc *iov, int iov_count) { OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; status = val_wrap_iov_args(minor_status, context_handle, 0, qop_req, NULL, iov, iov_count); if (status != GSS_S_COMPLETE) return status; ctx = (gss_union_ctx_id_t)context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return GSS_S_NO_CONTEXT; mech = gssint_get_mechanism(ctx->mech_type); if (mech == NULL) return GSS_S_BAD_MECH; if (mech->gss_get_mic_iov_length == NULL) return GSS_S_UNAVAILABLE; status = mech->gss_get_mic_iov_length(minor_status, ctx->internal_ctx_id, qop_req, iov, iov_count); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); return status; }
985
1
int dtls1_get_record ( SSL * s ) { int ssl_major , ssl_minor ; int i , n ; SSL3_RECORD * rr ; unsigned char * p = NULL ; unsigned short version ; DTLS1_BITMAP * bitmap ; unsigned int is_next_epoch ; rr = RECORD_LAYER_get_rrec ( & s -> rlayer ) ; again : if ( dtls1_process_buffered_records ( s ) < 0 ) return - 1 ; if ( dtls1_get_processed_record ( s ) ) return 1 ; if ( ( RECORD_LAYER_get_rstate ( & s -> rlayer ) != SSL_ST_READ_BODY ) || ( RECORD_LAYER_get_packet_length ( & s -> rlayer ) < DTLS1_RT_HEADER_LENGTH ) ) { n = ssl3_read_n ( s , DTLS1_RT_HEADER_LENGTH , SSL3_BUFFER_get_len ( & s -> rlayer . rbuf ) , 0 , 1 ) ; if ( n <= 0 ) return ( n ) ; if ( RECORD_LAYER_get_packet_length ( & s -> rlayer ) != DTLS1_RT_HEADER_LENGTH ) { RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } RECORD_LAYER_set_rstate ( & s -> rlayer , SSL_ST_READ_BODY ) ; p = RECORD_LAYER_get_packet ( & s -> rlayer ) ; if ( s -> msg_callback ) s -> msg_callback ( 0 , 0 , SSL3_RT_HEADER , p , DTLS1_RT_HEADER_LENGTH , s , s -> msg_callback_arg ) ; rr -> type = * ( p ++ ) ; ssl_major = * ( p ++ ) ; ssl_minor = * ( p ++ ) ; version = ( ssl_major << 8 ) | ssl_minor ; n2s ( p , rr -> epoch ) ; memcpy ( & ( RECORD_LAYER_get_read_sequence ( & s -> rlayer ) [ 2 ] ) , p , 6 ) ; p += 6 ; n2s ( p , rr -> length ) ; if ( ! s -> first_packet ) { if ( version != s -> version ) { rr -> length = 0 ; RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } } if ( ( version & 0xff00 ) != ( s -> version & 0xff00 ) ) { rr -> length = 0 ; RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } if ( rr -> length > SSL3_RT_MAX_ENCRYPTED_LENGTH ) { rr -> length = 0 ; RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } } if ( rr -> length > RECORD_LAYER_get_packet_length ( & s -> rlayer ) - DTLS1_RT_HEADER_LENGTH ) { i = rr -> length ; n = ssl3_read_n ( s , i , i , 1 , 1 ) ; if ( n != i ) { rr -> length = 0 ; RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } } RECORD_LAYER_set_rstate ( & s -> rlayer , SSL_ST_READ_HEADER ) ; bitmap = dtls1_get_bitmap ( s , rr , & is_next_epoch ) ; if ( bitmap == NULL ) { rr -> length = 0 ; RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } # ifndef OPENSSL_NO_SCTP if ( ! BIO_dgram_is_sctp ( SSL_get_rbio ( s ) ) ) { # endif if ( ! dtls1_record_replay_check ( s , bitmap ) ) { rr -> length = 0 ; RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } # ifndef OPENSSL_NO_SCTP } # endif if ( rr -> length == 0 ) goto again ; if ( is_next_epoch ) { if ( ( SSL_in_init ( s ) || ossl_statem_get_in_handshake ( s ) ) ) { if ( dtls1_buffer_record ( s , & ( DTLS_RECORD_LAYER_get_unprocessed_rcds ( & s -> rlayer ) ) , rr -> seq_num ) < 0 ) return - 1 ; dtls1_record_bitmap_update ( s , bitmap ) ; } rr -> length = 0 ; RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } if ( ! dtls1_process_record ( s ) ) { rr -> length = 0 ; RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } dtls1_record_bitmap_update ( s , bitmap ) ; return ( 1 ) ; }
int dtls1_get_record ( SSL * s ) { int ssl_major , ssl_minor ; int i , n ; SSL3_RECORD * rr ; unsigned char * p = NULL ; unsigned short version ; DTLS1_BITMAP * bitmap ; unsigned int is_next_epoch ; rr = RECORD_LAYER_get_rrec ( & s -> rlayer ) ; again : if ( dtls1_process_buffered_records ( s ) < 0 ) return - 1 ; if ( dtls1_get_processed_record ( s ) ) return 1 ; if ( ( RECORD_LAYER_get_rstate ( & s -> rlayer ) != SSL_ST_READ_BODY ) || ( RECORD_LAYER_get_packet_length ( & s -> rlayer ) < DTLS1_RT_HEADER_LENGTH ) ) { n = ssl3_read_n ( s , DTLS1_RT_HEADER_LENGTH , SSL3_BUFFER_get_len ( & s -> rlayer . rbuf ) , 0 , 1 ) ; if ( n <= 0 ) return ( n ) ; if ( RECORD_LAYER_get_packet_length ( & s -> rlayer ) != DTLS1_RT_HEADER_LENGTH ) { RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } RECORD_LAYER_set_rstate ( & s -> rlayer , SSL_ST_READ_BODY ) ; p = RECORD_LAYER_get_packet ( & s -> rlayer ) ; if ( s -> msg_callback ) s -> msg_callback ( 0 , 0 , SSL3_RT_HEADER , p , DTLS1_RT_HEADER_LENGTH , s , s -> msg_callback_arg ) ; rr -> type = * ( p ++ ) ; ssl_major = * ( p ++ ) ; ssl_minor = * ( p ++ ) ; version = ( ssl_major << 8 ) | ssl_minor ; n2s ( p , rr -> epoch ) ; memcpy ( & ( RECORD_LAYER_get_read_sequence ( & s -> rlayer ) [ 2 ] ) , p , 6 ) ; p += 6 ; n2s ( p , rr -> length ) ; if ( ! s -> first_packet ) { if ( version != s -> version ) { rr -> length = 0 ; RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } } if ( ( version & 0xff00 ) != ( s -> version & 0xff00 ) ) { rr -> length = 0 ; RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } if ( rr -> length > SSL3_RT_MAX_ENCRYPTED_LENGTH ) { rr -> length = 0 ; RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } } if ( rr -> length > RECORD_LAYER_get_packet_length ( & s -> rlayer ) - DTLS1_RT_HEADER_LENGTH ) { i = rr -> length ; n = ssl3_read_n ( s , i , i , 1 , 1 ) ; if ( n != i ) { rr -> length = 0 ; RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } } RECORD_LAYER_set_rstate ( & s -> rlayer , SSL_ST_READ_HEADER ) ; bitmap = dtls1_get_bitmap ( s , rr , & is_next_epoch ) ; if ( bitmap == NULL ) { rr -> length = 0 ; RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } # ifndef OPENSSL_NO_SCTP if ( ! BIO_dgram_is_sctp ( SSL_get_rbio ( s ) ) ) { # endif if ( ! dtls1_record_replay_check ( s , bitmap ) ) { rr -> length = 0 ; RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } # ifndef OPENSSL_NO_SCTP } # endif if ( rr -> length == 0 ) goto again ; if ( is_next_epoch ) { if ( ( SSL_in_init ( s ) || ossl_statem_get_in_handshake ( s ) ) ) { if ( dtls1_buffer_record ( s , & ( DTLS_RECORD_LAYER_get_unprocessed_rcds ( & s -> rlayer ) ) , rr -> seq_num ) < 0 ) return - 1 ; dtls1_record_bitmap_update ( s , bitmap ) ; } rr -> length = 0 ; RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } if ( ! dtls1_process_record ( s ) ) { rr -> length = 0 ; RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } dtls1_record_bitmap_update ( s , bitmap ) ; return ( 1 ) ; }
986
0
static void omap_tipb_bridge_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { struct omap_tipb_bridge_s *s = (struct omap_tipb_bridge_s *) opaque; if (size < 2) { return omap_badwidth_write16(opaque, addr, value); } switch (addr) { case 0x00: /* TIPB_CNTL */ s->control = value & 0xffff; break; case 0x04: /* TIPB_BUS_ALLOC */ s->alloc = value & 0x003f; break; case 0x08: /* MPU_TIPB_CNTL */ s->buffer = value & 0x0003; break; case 0x0c: /* ENHANCED_TIPB_CNTL */ s->width_intr = !(value & 2); s->enh_control = value & 0x000f; break; case 0x10: /* ADDRESS_DBG */ case 0x14: /* DATA_DEBUG_LOW */ case 0x18: /* DATA_DEBUG_HIGH */ case 0x1c: /* DEBUG_CNTR_SIG */ OMAP_RO_REG(addr); break; default: OMAP_BAD_REG(addr); } }
static void omap_tipb_bridge_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { struct omap_tipb_bridge_s *s = (struct omap_tipb_bridge_s *) opaque; if (size < 2) { return omap_badwidth_write16(opaque, addr, value); } switch (addr) { case 0x00: s->control = value & 0xffff; break; case 0x04: s->alloc = value & 0x003f; break; case 0x08: s->buffer = value & 0x0003; break; case 0x0c: s->width_intr = !(value & 2); s->enh_control = value & 0x000f; break; case 0x10: case 0x14: case 0x18: case 0x1c: OMAP_RO_REG(addr); break; default: OMAP_BAD_REG(addr); } }
987
1
static inline int empty_stack(void) { return gc_current == GC_HEAD; }
static inline int empty_stack(void) { return gc_current == GC_HEAD; }
988
0
int dissect_h245_T38FaxProfile ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) { offset = dissect_per_sequence ( tvb , offset , actx , tree , hf_index , ett_h245_T38FaxProfile , T38FaxProfile_sequence ) ; return offset ; }
int dissect_h245_T38FaxProfile ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) { offset = dissect_per_sequence ( tvb , offset , actx , tree , hf_index , ett_h245_T38FaxProfile , T38FaxProfile_sequence ) ; return offset ; }
989
0
gss_wrap_iov (minor_status, context_handle, conf_req_flag, qop_req, conf_state, iov, iov_count) OM_uint32 * minor_status; gss_ctx_id_t context_handle; int conf_req_flag; gss_qop_t qop_req; int * conf_state; gss_iov_buffer_desc * iov; int iov_count; { /* EXPORT DELETE START */ OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; status = val_wrap_iov_args(minor_status, context_handle, conf_req_flag, qop_req, conf_state, iov, iov_count); if (status != GSS_S_COMPLETE) return (status); /* * select the approprate underlying mechanism routine and * call it. */ ctx = (gss_union_ctx_id_t) context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (mech) { if (mech->gss_wrap_iov) { status = mech->gss_wrap_iov( minor_status, ctx->internal_ctx_id, conf_req_flag, qop_req, conf_state, iov, iov_count); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); } else status = GSS_S_UNAVAILABLE; return(status); } /* EXPORT DELETE END */ return (GSS_S_BAD_MECH); }
gss_wrap_iov (minor_status, context_handle, conf_req_flag, qop_req, conf_state, iov, iov_count) OM_uint32 * minor_status; gss_ctx_id_t context_handle; int conf_req_flag; gss_qop_t qop_req; int * conf_state; gss_iov_buffer_desc * iov; int iov_count; { OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; status = val_wrap_iov_args(minor_status, context_handle, conf_req_flag, qop_req, conf_state, iov, iov_count); if (status != GSS_S_COMPLETE) return (status); ctx = (gss_union_ctx_id_t) context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (mech) { if (mech->gss_wrap_iov) { status = mech->gss_wrap_iov( minor_status, ctx->internal_ctx_id, conf_req_flag, qop_req, conf_state, iov, iov_count); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); } else status = GSS_S_UNAVAILABLE; return(status); } return (GSS_S_BAD_MECH); }
990
1
static void maybe_unmark_and_push(struct sock *x) { struct unix_sock *u = unix_sk(x); if (u->gc_tree != GC_ORPHAN) return; sock_hold(x); u->gc_tree = gc_current; gc_current = x; }
static void maybe_unmark_and_push(struct sock *x) { struct unix_sock *u = unix_sk(x); if (u->gc_tree != GC_ORPHAN) return; sock_hold(x); u->gc_tree = gc_current; gc_current = x; }
992
1
static BOOL ntlm_av_pair_add_copy(NTLM_AV_PAIR* pAvPairList, size_t cbAvPairList, NTLM_AV_PAIR* pAvPair, size_t cbAvPair) { if (!ntlm_av_pair_check(pAvPair, cbAvPair)) return FALSE; return ntlm_av_pair_add(pAvPairList, cbAvPairList, ntlm_av_pair_get_id(pAvPair), ntlm_av_pair_get_value_pointer(pAvPair), ntlm_av_pair_get_len(pAvPair)); }
static BOOL ntlm_av_pair_add_copy(NTLM_AV_PAIR* pAvPairList, size_t cbAvPairList, NTLM_AV_PAIR* pAvPair, size_t cbAvPair) { if (!ntlm_av_pair_check(pAvPair, cbAvPair)) return FALSE; return ntlm_av_pair_add(pAvPairList, cbAvPairList, ntlm_av_pair_get_id(pAvPair), ntlm_av_pair_get_value_pointer(pAvPair), ntlm_av_pair_get_len(pAvPair)); }
993
0
TEST_F ( TemplateURLTest , ReplaceSearchTerms ) { struct TestData { const std : : string url ; const std : : string expected_result ; } test_data [ ] = { { "http://foo/{ language} { searchTerms} { inputEncoding} " , "http://foo/{ language} XUTF-8" } , { "http://foo/{ language} { inputEncoding} { searchTerms} " , "http://foo/{ language} UTF-8X" } , { "http://foo/{ searchTerms} { language} { inputEncoding} " , "http://foo/X{ language} UTF-8" } , { "http://foo/{ searchTerms} { inputEncoding} { language} " , "http://foo/XUTF-8{ language} " } , { "http://foo/{ inputEncoding} { searchTerms} { language} " , "http://foo/UTF-8X{ language} " } , { "http://foo/{ inputEncoding} { language} { searchTerms} " , "http://foo/UTF-8{ language} X" } , { "http://foo/{ language} a{ searchTerms} a{ inputEncoding} a" , "http://foo/{ language} aXaUTF-8a" } , { "http://foo/{ language} a{ inputEncoding} a{ searchTerms} a" , "http://foo/{ language} aUTF-8aXa" } , { "http://foo/{ searchTerms} a{ language} a{ inputEncoding} a" , "http://foo/Xa{ language} aUTF-8a" } , { "http://foo/{ searchTerms} a{ inputEncoding} a{ language} a" , "http://foo/XaUTF-8a{ language} a" } , { "http://foo/{ inputEncoding} a{ searchTerms} a{ language} a" , "http://foo/UTF-8aXa{ language} a" } , { "http://foo/{ inputEncoding} a{ language} a{ searchTerms} a" , "http://foo/UTF-8a{ language} aXa" } , } ; TemplateURLData data ; data . input_encodings . push_back ( "UTF-8" ) ; for ( size_t i = 0 ; i < arraysize ( test_data ) ; ++ i ) { data . SetURL ( test_data [ i ] . url ) ; TemplateURL url ( data ) ; EXPECT_TRUE ( url . url_ref ( ) . IsValid ( search_terms_data_ ) ) ; ASSERT_TRUE ( url . url_ref ( ) . SupportsReplacement ( search_terms_data_ ) ) ; std : : string expected_result = test_data [ i ] . expected_result ; base : : ReplaceSubstringsAfterOffset ( & expected_result , 0 , "{ language} " , search_terms_data_ . GetApplicationLocale ( ) ) ; GURL result ( url . url_ref ( ) . ReplaceSearchTerms ( TemplateURLRef : : SearchTermsArgs ( ASCIIToUTF16 ( "X" ) ) , search_terms_data_ ) ) ; ASSERT_TRUE ( result . is_valid ( ) ) ; EXPECT_EQ ( expected_result , result . spec ( ) ) ; } }
TEST_F ( TemplateURLTest , ReplaceSearchTerms ) { struct TestData { const std : : string url ; const std : : string expected_result ; } test_data [ ] = { { "http://foo/{ language} { searchTerms} { inputEncoding} " , "http://foo/{ language} XUTF-8" } , { "http://foo/{ language} { inputEncoding} { searchTerms} " , "http://foo/{ language} UTF-8X" } , { "http://foo/{ searchTerms} { language} { inputEncoding} " , "http://foo/X{ language} UTF-8" } , { "http://foo/{ searchTerms} { inputEncoding} { language} " , "http://foo/XUTF-8{ language} " } , { "http://foo/{ inputEncoding} { searchTerms} { language} " , "http://foo/UTF-8X{ language} " } , { "http://foo/{ inputEncoding} { language} { searchTerms} " , "http://foo/UTF-8{ language} X" } , { "http://foo/{ language} a{ searchTerms} a{ inputEncoding} a" , "http://foo/{ language} aXaUTF-8a" } , { "http://foo/{ language} a{ inputEncoding} a{ searchTerms} a" , "http://foo/{ language} aUTF-8aXa" } , { "http://foo/{ searchTerms} a{ language} a{ inputEncoding} a" , "http://foo/Xa{ language} aUTF-8a" } , { "http://foo/{ searchTerms} a{ inputEncoding} a{ language} a" , "http://foo/XaUTF-8a{ language} a" } , { "http://foo/{ inputEncoding} a{ searchTerms} a{ language} a" , "http://foo/UTF-8aXa{ language} a" } , { "http://foo/{ inputEncoding} a{ language} a{ searchTerms} a" , "http://foo/UTF-8a{ language} aXa" } , } ; TemplateURLData data ; data . input_encodings . push_back ( "UTF-8" ) ; for ( size_t i = 0 ; i < arraysize ( test_data ) ; ++ i ) { data . SetURL ( test_data [ i ] . url ) ; TemplateURL url ( data ) ; EXPECT_TRUE ( url . url_ref ( ) . IsValid ( search_terms_data_ ) ) ; ASSERT_TRUE ( url . url_ref ( ) . SupportsReplacement ( search_terms_data_ ) ) ; std : : string expected_result = test_data [ i ] . expected_result ; base : : ReplaceSubstringsAfterOffset ( & expected_result , 0 , "{ language} " , search_terms_data_ . GetApplicationLocale ( ) ) ; GURL result ( url . url_ref ( ) . ReplaceSearchTerms ( TemplateURLRef : : SearchTermsArgs ( ASCIIToUTF16 ( "X" ) ) , search_terms_data_ ) ) ; ASSERT_TRUE ( result . is_valid ( ) ) ; EXPECT_EQ ( expected_result , result . spec ( ) ) ; } }
994
1
void unix_gc(void) { static DEFINE_MUTEX(unix_gc_sem); int i; struct sock *s; struct sk_buff_head hitlist; struct sk_buff *skb; /* * Avoid a recursive GC. */ if (!mutex_trylock(&unix_gc_sem)) return; spin_lock(&unix_table_lock); forall_unix_sockets(i, s) { unix_sk(s)->gc_tree = GC_ORPHAN; } /* * Everything is now marked */ /* Invariant to be maintained: - everything unmarked is either: -- (a) on the stack, or -- (b) has all of its children unmarked - everything on the stack is always unmarked - nothing is ever pushed onto the stack twice, because: -- nothing previously unmarked is ever pushed on the stack */ /* * Push root set */ forall_unix_sockets(i, s) { int open_count = 0; /* * If all instances of the descriptor are not * in flight we are in use. * * Special case: when socket s is embrion, it may be * hashed but still not in queue of listening socket. * In this case (see unix_create1()) we set artificial * negative inflight counter to close race window. * It is trick of course and dirty one. */ if (s->sk_socket && s->sk_socket->file) open_count = file_count(s->sk_socket->file); if (open_count > atomic_read(&unix_sk(s)->inflight)) maybe_unmark_and_push(s); } /* * Mark phase */ while (!empty_stack()) { struct sock *x = pop_stack(); struct sock *sk; spin_lock(&x->sk_receive_queue.lock); skb = skb_peek(&x->sk_receive_queue); /* * Loop through all but first born */ while (skb && skb != (struct sk_buff *)&x->sk_receive_queue) { /* * Do we have file descriptors ? */ if(UNIXCB(skb).fp) { /* * Process the descriptors of this socket */ int nfd=UNIXCB(skb).fp->count; struct file **fp = UNIXCB(skb).fp->fp; while(nfd--) { /* * Get the socket the fd matches if * it indeed does so */ if((sk=unix_get_socket(*fp++))!=NULL) { maybe_unmark_and_push(sk); } } } /* We have to scan not-yet-accepted ones too */ if (x->sk_state == TCP_LISTEN) maybe_unmark_and_push(skb->sk); skb=skb->next; } spin_unlock(&x->sk_receive_queue.lock); sock_put(x); } skb_queue_head_init(&hitlist); forall_unix_sockets(i, s) { struct unix_sock *u = unix_sk(s); if (u->gc_tree == GC_ORPHAN) { struct sk_buff *nextsk; spin_lock(&s->sk_receive_queue.lock); skb = skb_peek(&s->sk_receive_queue); while (skb && skb != (struct sk_buff *)&s->sk_receive_queue) { nextsk = skb->next; /* * Do we have file descriptors ? */ if (UNIXCB(skb).fp) { __skb_unlink(skb, &s->sk_receive_queue); __skb_queue_tail(&hitlist, skb); } skb = nextsk; } spin_unlock(&s->sk_receive_queue.lock); } u->gc_tree = GC_ORPHAN; } spin_unlock(&unix_table_lock); /* * Here we are. Hitlist is filled. Die. */ __skb_queue_purge(&hitlist); mutex_unlock(&unix_gc_sem); }
void unix_gc(void) { static DEFINE_MUTEX(unix_gc_sem); int i; struct sock *s; struct sk_buff_head hitlist; struct sk_buff *skb; if (!mutex_trylock(&unix_gc_sem)) return; spin_lock(&unix_table_lock); forall_unix_sockets(i, s) { unix_sk(s)->gc_tree = GC_ORPHAN; } forall_unix_sockets(i, s) { int open_count = 0; if (s->sk_socket && s->sk_socket->file) open_count = file_count(s->sk_socket->file); if (open_count > atomic_read(&unix_sk(s)->inflight)) maybe_unmark_and_push(s); } while (!empty_stack()) { struct sock *x = pop_stack(); struct sock *sk; spin_lock(&x->sk_receive_queue.lock); skb = skb_peek(&x->sk_receive_queue); while (skb && skb != (struct sk_buff *)&x->sk_receive_queue) { if(UNIXCB(skb).fp) { int nfd=UNIXCB(skb).fp->count; struct file **fp = UNIXCB(skb).fp->fp; while(nfd--) { if((sk=unix_get_socket(*fp++))!=NULL) { maybe_unmark_and_push(sk); } } } if (x->sk_state == TCP_LISTEN) maybe_unmark_and_push(skb->sk); skb=skb->next; } spin_unlock(&x->sk_receive_queue.lock); sock_put(x); } skb_queue_head_init(&hitlist); forall_unix_sockets(i, s) { struct unix_sock *u = unix_sk(s); if (u->gc_tree == GC_ORPHAN) { struct sk_buff *nextsk; spin_lock(&s->sk_receive_queue.lock); skb = skb_peek(&s->sk_receive_queue); while (skb && skb != (struct sk_buff *)&s->sk_receive_queue) { nextsk = skb->next; if (UNIXCB(skb).fp) { __skb_unlink(skb, &s->sk_receive_queue); __skb_queue_tail(&hitlist, skb); } skb = nextsk; } spin_unlock(&s->sk_receive_queue.lock); } u->gc_tree = GC_ORPHAN; } spin_unlock(&unix_table_lock); __skb_queue_purge(&hitlist); mutex_unlock(&unix_gc_sem); }
995
0
int64 float64_to_int64_round_to_zero( float64 a STATUS_PARAM ) { flag aSign; int16 aExp, shiftCount; bits64 aSig; int64 z; aSig = extractFloat64Frac( a ); aExp = extractFloat64Exp( a ); aSign = extractFloat64Sign( a ); if ( aExp ) aSig |= LIT64( 0x0010000000000000 ); shiftCount = aExp - 0x433; if ( 0 <= shiftCount ) { if ( 0x43E <= aExp ) { if ( a != LIT64( 0xC3E0000000000000 ) ) { float_raise( float_flag_invalid STATUS_VAR); if ( ! aSign || ( ( aExp == 0x7FF ) && ( aSig != LIT64( 0x0010000000000000 ) ) ) ) { return LIT64( 0x7FFFFFFFFFFFFFFF ); } } return (sbits64) LIT64( 0x8000000000000000 ); } z = aSig<<shiftCount; } else { if ( aExp < 0x3FE ) { if ( aExp | aSig ) STATUS(float_exception_flags) |= float_flag_inexact; return 0; } z = aSig>>( - shiftCount ); if ( (bits64) ( aSig<<( shiftCount & 63 ) ) ) { STATUS(float_exception_flags) |= float_flag_inexact; } } if ( aSign ) z = - z; return z; }
int64 float64_to_int64_round_to_zero( float64 a STATUS_PARAM ) { flag aSign; int16 aExp, shiftCount; bits64 aSig; int64 z; aSig = extractFloat64Frac( a ); aExp = extractFloat64Exp( a ); aSign = extractFloat64Sign( a ); if ( aExp ) aSig |= LIT64( 0x0010000000000000 ); shiftCount = aExp - 0x433; if ( 0 <= shiftCount ) { if ( 0x43E <= aExp ) { if ( a != LIT64( 0xC3E0000000000000 ) ) { float_raise( float_flag_invalid STATUS_VAR); if ( ! aSign || ( ( aExp == 0x7FF ) && ( aSig != LIT64( 0x0010000000000000 ) ) ) ) { return LIT64( 0x7FFFFFFFFFFFFFFF ); } } return (sbits64) LIT64( 0x8000000000000000 ); } z = aSig<<shiftCount; } else { if ( aExp < 0x3FE ) { if ( aExp | aSig ) STATUS(float_exception_flags) |= float_flag_inexact; return 0; } z = aSig>>( - shiftCount ); if ( (bits64) ( aSig<<( shiftCount & 63 ) ) ) { STATUS(float_exception_flags) |= float_flag_inexact; } } if ( aSign ) z = - z; return z; }
997
1
void unix_inflight(struct file *fp) { struct sock *s = unix_get_socket(fp); if(s) { atomic_inc(&unix_sk(s)->inflight); atomic_inc(&unix_tot_inflight); } }
void unix_inflight(struct file *fp) { struct sock *s = unix_get_socket(fp); if(s) { atomic_inc(&unix_sk(s)->inflight); atomic_inc(&unix_tot_inflight); } }
998
1
X509_NAME_oneline_ex(X509_NAME * a, char *buf, unsigned int *size, unsigned long flag) { BIO *out = NULL; out = BIO_new(BIO_s_mem ()); if (X509_NAME_print_ex(out, a, 0, flag) > 0) { if (buf != NULL && (*size) > (unsigned int) BIO_number_written(out)) { memset(buf, 0, *size); BIO_read(out, buf, (int) BIO_number_written(out)); } else { *size = BIO_number_written(out); } } BIO_free(out); return (buf); }
X509_NAME_oneline_ex(X509_NAME * a, char *buf, unsigned int *size, unsigned long flag) { BIO *out = NULL; out = BIO_new(BIO_s_mem ()); if (X509_NAME_print_ex(out, a, 0, flag) > 0) { if (buf != NULL && (*size) > (unsigned int) BIO_number_written(out)) { memset(buf, 0, *size); BIO_read(out, buf, (int) BIO_number_written(out)); } else { *size = BIO_number_written(out); } } BIO_free(out); return (buf); }
1,000
0
TEST_F ( ExtensionServiceSyncTest , DontSelfNotify ) { base : : FilePath source_install_dir = data_dir ( ) . AppendASCII ( "good" ) . AppendASCII ( "Extensions" ) ; base : : FilePath pref_path = source_install_dir . DirName ( ) . Append ( chrome : : kPreferencesFilename ) ; InitializeInstalledExtensionService ( pref_path , source_install_dir ) ; ProfileSyncServiceFactory : : GetForProfile ( profile ( ) ) -> SetFirstSetupComplete ( ) ; extension_sync_service ( ) ; service ( ) -> Init ( ) ; ASSERT_TRUE ( service ( ) -> is_ready ( ) ) ; ASSERT_EQ ( 3u , loaded_ . size ( ) ) ; ASSERT_TRUE ( service ( ) -> IsExtensionEnabled ( good0 ) ) ; syncer : : FakeSyncChangeProcessor * processor = new syncer : : FakeSyncChangeProcessor ; extension_sync_service ( ) -> MergeDataAndStartSyncing ( syncer : : EXTENSIONS , syncer : : SyncDataList ( ) , base : : WrapUnique ( processor ) , base : : MakeUnique < syncer : : SyncErrorFactoryMock > ( ) ) ; processor -> changes ( ) . clear ( ) ; { const Extension * extension = service ( ) -> GetExtensionById ( good0 , true ) ; ASSERT_TRUE ( extension ) ; ExtensionSyncData data ( * extension , false , extensions : : disable_reason : : DISABLE_USER_ACTION , false , false , ExtensionSyncData : : BOOLEAN_UNSET , false ) ; SyncChangeList list ( 1 , data . GetSyncChange ( SyncChange : : ACTION_UPDATE ) ) ; extension_sync_service ( ) -> ProcessSyncChanges ( FROM_HERE , list ) ; EXPECT_TRUE ( processor -> changes ( ) . empty ( ) ) ; } { const Extension * extension = service ( ) -> GetExtensionById ( good0 , true ) ; ASSERT_TRUE ( extension ) ; ExtensionSyncData data ( * extension , false , extensions : : disable_reason : : DISABLE_NONE , true , false , ExtensionSyncData : : BOOLEAN_UNSET , false ) ; SyncChangeList list ( 1 , data . GetSyncChange ( SyncChange : : ACTION_UPDATE ) ) ; extension_sync_service ( ) -> ProcessSyncChanges ( FROM_HERE , list ) ; EXPECT_TRUE ( processor -> changes ( ) . empty ( ) ) ; } { const Extension * extension = service ( ) -> GetExtensionById ( good0 , true ) ; ASSERT_TRUE ( extension ) ; ExtensionSyncData data ( * extension , false , extensions : : disable_reason : : DISABLE_USER_ACTION | extensions : : disable_reason : : DISABLE_PERMISSIONS_INCREASE , false , false , ExtensionSyncData : : BOOLEAN_UNSET , false ) ; SyncChangeList list ( 1 , data . GetSyncChange ( SyncChange : : ACTION_UPDATE ) ) ; extension_sync_service ( ) -> ProcessSyncChanges ( FROM_HERE , list ) ; EXPECT_TRUE ( processor -> changes ( ) . empty ( ) ) ; } { const Extension * extension = service ( ) -> GetExtensionById ( good0 , true ) ; ASSERT_TRUE ( extension ) ; ExtensionSyncData data ( * extension , false , extensions : : disable_reason : : DISABLE_USER_ACTION | extensions : : disable_reason : : DISABLE_PERMISSIONS_INCREASE , false , false , ExtensionSyncData : : BOOLEAN_UNSET , false ) ; SyncChangeList list ( 1 , data . GetSyncChange ( SyncChange : : ACTION_DELETE ) ) ; extension_sync_service ( ) -> ProcessSyncChanges ( FROM_HERE , list ) ; EXPECT_TRUE ( processor -> changes ( ) . empty ( ) ) ; } }
TEST_F ( ExtensionServiceSyncTest , DontSelfNotify ) { base : : FilePath source_install_dir = data_dir ( ) . AppendASCII ( "good" ) . AppendASCII ( "Extensions" ) ; base : : FilePath pref_path = source_install_dir . DirName ( ) . Append ( chrome : : kPreferencesFilename ) ; InitializeInstalledExtensionService ( pref_path , source_install_dir ) ; ProfileSyncServiceFactory : : GetForProfile ( profile ( ) ) -> SetFirstSetupComplete ( ) ; extension_sync_service ( ) ; service ( ) -> Init ( ) ; ASSERT_TRUE ( service ( ) -> is_ready ( ) ) ; ASSERT_EQ ( 3u , loaded_ . size ( ) ) ; ASSERT_TRUE ( service ( ) -> IsExtensionEnabled ( good0 ) ) ; syncer : : FakeSyncChangeProcessor * processor = new syncer : : FakeSyncChangeProcessor ; extension_sync_service ( ) -> MergeDataAndStartSyncing ( syncer : : EXTENSIONS , syncer : : SyncDataList ( ) , base : : WrapUnique ( processor ) , base : : MakeUnique < syncer : : SyncErrorFactoryMock > ( ) ) ; processor -> changes ( ) . clear ( ) ; { const Extension * extension = service ( ) -> GetExtensionById ( good0 , true ) ; ASSERT_TRUE ( extension ) ; ExtensionSyncData data ( * extension , false , extensions : : disable_reason : : DISABLE_USER_ACTION , false , false , ExtensionSyncData : : BOOLEAN_UNSET , false ) ; SyncChangeList list ( 1 , data . GetSyncChange ( SyncChange : : ACTION_UPDATE ) ) ; extension_sync_service ( ) -> ProcessSyncChanges ( FROM_HERE , list ) ; EXPECT_TRUE ( processor -> changes ( ) . empty ( ) ) ; } { const Extension * extension = service ( ) -> GetExtensionById ( good0 , true ) ; ASSERT_TRUE ( extension ) ; ExtensionSyncData data ( * extension , false , extensions : : disable_reason : : DISABLE_NONE , true , false , ExtensionSyncData : : BOOLEAN_UNSET , false ) ; SyncChangeList list ( 1 , data . GetSyncChange ( SyncChange : : ACTION_UPDATE ) ) ; extension_sync_service ( ) -> ProcessSyncChanges ( FROM_HERE , list ) ; EXPECT_TRUE ( processor -> changes ( ) . empty ( ) ) ; } { const Extension * extension = service ( ) -> GetExtensionById ( good0 , true ) ; ASSERT_TRUE ( extension ) ; ExtensionSyncData data ( * extension , false , extensions : : disable_reason : : DISABLE_USER_ACTION | extensions : : disable_reason : : DISABLE_PERMISSIONS_INCREASE , false , false , ExtensionSyncData : : BOOLEAN_UNSET , false ) ; SyncChangeList list ( 1 , data . GetSyncChange ( SyncChange : : ACTION_UPDATE ) ) ; extension_sync_service ( ) -> ProcessSyncChanges ( FROM_HERE , list ) ; EXPECT_TRUE ( processor -> changes ( ) . empty ( ) ) ; } { const Extension * extension = service ( ) -> GetExtensionById ( good0 , true ) ; ASSERT_TRUE ( extension ) ; ExtensionSyncData data ( * extension , false , extensions : : disable_reason : : DISABLE_USER_ACTION | extensions : : disable_reason : : DISABLE_PERMISSIONS_INCREASE , false , false , ExtensionSyncData : : BOOLEAN_UNSET , false ) ; SyncChangeList list ( 1 , data . GetSyncChange ( SyncChange : : ACTION_DELETE ) ) ; extension_sync_service ( ) -> ProcessSyncChanges ( FROM_HERE , list ) ; EXPECT_TRUE ( processor -> changes ( ) . empty ( ) ) ; } }
1,001
1
static BOOL ntlm_av_pair_check(NTLM_AV_PAIR* pAvPair, size_t cbAvPair) { if (!pAvPair || cbAvPair < sizeof(NTLM_AV_PAIR)) return FALSE; return cbAvPair >= ntlm_av_pair_get_next_offset(pAvPair); }
static BOOL ntlm_av_pair_check(NTLM_AV_PAIR* pAvPair, size_t cbAvPair) { if (!pAvPair || cbAvPair < sizeof(NTLM_AV_PAIR)) return FALSE; return cbAvPair >= ntlm_av_pair_get_next_offset(pAvPair); }
1,002
0
void e1000e_core_pre_save ( E1000ECore * core ) { int i ; NetClientState * nc = qemu_get_queue ( core -> owner_nic ) ; if ( nc -> link_down && e1000e_have_autoneg ( core ) ) { core -> phy [ 0 ] [ PHY_STATUS ] |= MII_SR_AUTONEG_COMPLETE ; e1000e_update_flowctl_status ( core ) ; } for ( i = 0 ; i < ARRAY_SIZE ( core -> tx ) ; i ++ ) { if ( net_tx_pkt_has_fragments ( core -> tx [ i ] . tx_pkt ) ) { core -> tx [ i ] . skip_cp = true ; } } }
void e1000e_core_pre_save ( E1000ECore * core ) { int i ; NetClientState * nc = qemu_get_queue ( core -> owner_nic ) ; if ( nc -> link_down && e1000e_have_autoneg ( core ) ) { core -> phy [ 0 ] [ PHY_STATUS ] |= MII_SR_AUTONEG_COMPLETE ; e1000e_update_flowctl_status ( core ) ; } for ( i = 0 ; i < ARRAY_SIZE ( core -> tx ) ; i ++ ) { if ( net_tx_pkt_has_fragments ( core -> tx [ i ] . tx_pkt ) ) { core -> tx [ i ] . skip_cp = true ; } } }
1,004
0
get_matching_data(krb5_context context, pkinit_plg_crypto_context plg_cryptoctx, pkinit_req_crypto_context req_cryptoctx, X509 *cert, pkinit_cert_matching_data **md_out) { krb5_error_code ret = ENOMEM; pkinit_cert_matching_data *md = NULL; krb5_principal *pkinit_sans = NULL, *upn_sans = NULL; size_t i, j; *md_out = NULL; md = calloc(1, sizeof(*md)); if (md == NULL) goto cleanup; ret = rfc2253_name(X509_get_subject_name(cert), &md->subject_dn); if (ret) goto cleanup; ret = rfc2253_name(X509_get_issuer_name(cert), &md->issuer_dn); if (ret) goto cleanup; /* Get the SAN data. */ ret = crypto_retrieve_X509_sans(context, plg_cryptoctx, req_cryptoctx, cert, &pkinit_sans, &upn_sans, NULL); if (ret) goto cleanup; j = 0; if (pkinit_sans != NULL) { for (i = 0; pkinit_sans[i] != NULL; i++) j++; } if (upn_sans != NULL) { for (i = 0; upn_sans[i] != NULL; i++) j++; } if (j != 0) { md->sans = calloc((size_t)j+1, sizeof(*md->sans)); if (md->sans == NULL) { ret = ENOMEM; goto cleanup; } j = 0; if (pkinit_sans != NULL) { for (i = 0; pkinit_sans[i] != NULL; i++) md->sans[j++] = pkinit_sans[i]; free(pkinit_sans); } if (upn_sans != NULL) { for (i = 0; upn_sans[i] != NULL; i++) md->sans[j++] = upn_sans[i]; free(upn_sans); } md->sans[j] = NULL; } else md->sans = NULL; /* Get the KU and EKU data. */ ret = crypto_retrieve_X509_key_usage(context, plg_cryptoctx, req_cryptoctx, cert, &md->ku_bits, &md->eku_bits); if (ret) goto cleanup; *md_out = md; md = NULL; cleanup: crypto_cert_free_matching_data(context, md); return ret; }
get_matching_data(krb5_context context, pkinit_plg_crypto_context plg_cryptoctx, pkinit_req_crypto_context req_cryptoctx, X509 *cert, pkinit_cert_matching_data **md_out) { krb5_error_code ret = ENOMEM; pkinit_cert_matching_data *md = NULL; krb5_principal *pkinit_sans = NULL, *upn_sans = NULL; size_t i, j; *md_out = NULL; md = calloc(1, sizeof(*md)); if (md == NULL) goto cleanup; ret = rfc2253_name(X509_get_subject_name(cert), &md->subject_dn); if (ret) goto cleanup; ret = rfc2253_name(X509_get_issuer_name(cert), &md->issuer_dn); if (ret) goto cleanup; ret = crypto_retrieve_X509_sans(context, plg_cryptoctx, req_cryptoctx, cert, &pkinit_sans, &upn_sans, NULL); if (ret) goto cleanup; j = 0; if (pkinit_sans != NULL) { for (i = 0; pkinit_sans[i] != NULL; i++) j++; } if (upn_sans != NULL) { for (i = 0; upn_sans[i] != NULL; i++) j++; } if (j != 0) { md->sans = calloc((size_t)j+1, sizeof(*md->sans)); if (md->sans == NULL) { ret = ENOMEM; goto cleanup; } j = 0; if (pkinit_sans != NULL) { for (i = 0; pkinit_sans[i] != NULL; i++) md->sans[j++] = pkinit_sans[i]; free(pkinit_sans); } if (upn_sans != NULL) { for (i = 0; upn_sans[i] != NULL; i++) md->sans[j++] = upn_sans[i]; free(upn_sans); } md->sans[j] = NULL; } else md->sans = NULL; ret = crypto_retrieve_X509_key_usage(context, plg_cryptoctx, req_cryptoctx, cert, &md->ku_bits, &md->eku_bits); if (ret) goto cleanup; *md_out = md; md = NULL; cleanup: crypto_cert_free_matching_data(context, md); return ret; }
1,006
1
get_matching_data(krb5_context context, pkinit_plg_crypto_context plg_cryptoctx, pkinit_req_crypto_context req_cryptoctx, X509 *cert, pkinit_cert_matching_data **md_out) { krb5_error_code ret = ENOMEM; pkinit_cert_matching_data *md = NULL; krb5_principal *pkinit_sans = NULL, *upn_sans = NULL; size_t i, j; char buf[DN_BUF_LEN]; unsigned int bufsize = sizeof(buf); *md_out = NULL; md = calloc(1, sizeof(*md)); if (md == NULL) goto cleanup; /* Get the subject name (in rfc2253 format). */ X509_NAME_oneline_ex(X509_get_subject_name(cert), buf, &bufsize, XN_FLAG_SEP_COMMA_PLUS); md->subject_dn = strdup(buf); if (md->subject_dn == NULL) { ret = ENOMEM; goto cleanup; } /* Get the issuer name (in rfc2253 format). */ X509_NAME_oneline_ex(X509_get_issuer_name(cert), buf, &bufsize, XN_FLAG_SEP_COMMA_PLUS); md->issuer_dn = strdup(buf); if (md->issuer_dn == NULL) { ret = ENOMEM; goto cleanup; } /* Get the SAN data. */ ret = crypto_retrieve_X509_sans(context, plg_cryptoctx, req_cryptoctx, cert, &pkinit_sans, &upn_sans, NULL); if (ret) goto cleanup; j = 0; if (pkinit_sans != NULL) { for (i = 0; pkinit_sans[i] != NULL; i++) j++; } if (upn_sans != NULL) { for (i = 0; upn_sans[i] != NULL; i++) j++; } if (j != 0) { md->sans = calloc((size_t)j+1, sizeof(*md->sans)); if (md->sans == NULL) { ret = ENOMEM; goto cleanup; } j = 0; if (pkinit_sans != NULL) { for (i = 0; pkinit_sans[i] != NULL; i++) md->sans[j++] = pkinit_sans[i]; free(pkinit_sans); } if (upn_sans != NULL) { for (i = 0; upn_sans[i] != NULL; i++) md->sans[j++] = upn_sans[i]; free(upn_sans); } md->sans[j] = NULL; } else md->sans = NULL; /* Get the KU and EKU data. */ ret = crypto_retrieve_X509_key_usage(context, plg_cryptoctx, req_cryptoctx, cert, &md->ku_bits, &md->eku_bits); if (ret) goto cleanup; *md_out = md; md = NULL; cleanup: crypto_cert_free_matching_data(context, md); return ret; }
get_matching_data(krb5_context context, pkinit_plg_crypto_context plg_cryptoctx, pkinit_req_crypto_context req_cryptoctx, X509 *cert, pkinit_cert_matching_data **md_out) { krb5_error_code ret = ENOMEM; pkinit_cert_matching_data *md = NULL; krb5_principal *pkinit_sans = NULL, *upn_sans = NULL; size_t i, j; char buf[DN_BUF_LEN]; unsigned int bufsize = sizeof(buf); *md_out = NULL; md = calloc(1, sizeof(*md)); if (md == NULL) goto cleanup; X509_NAME_oneline_ex(X509_get_subject_name(cert), buf, &bufsize, XN_FLAG_SEP_COMMA_PLUS); md->subject_dn = strdup(buf); if (md->subject_dn == NULL) { ret = ENOMEM; goto cleanup; } X509_NAME_oneline_ex(X509_get_issuer_name(cert), buf, &bufsize, XN_FLAG_SEP_COMMA_PLUS); md->issuer_dn = strdup(buf); if (md->issuer_dn == NULL) { ret = ENOMEM; goto cleanup; } ret = crypto_retrieve_X509_sans(context, plg_cryptoctx, req_cryptoctx, cert, &pkinit_sans, &upn_sans, NULL); if (ret) goto cleanup; j = 0; if (pkinit_sans != NULL) { for (i = 0; pkinit_sans[i] != NULL; i++) j++; } if (upn_sans != NULL) { for (i = 0; upn_sans[i] != NULL; i++) j++; } if (j != 0) { md->sans = calloc((size_t)j+1, sizeof(*md->sans)); if (md->sans == NULL) { ret = ENOMEM; goto cleanup; } j = 0; if (pkinit_sans != NULL) { for (i = 0; pkinit_sans[i] != NULL; i++) md->sans[j++] = pkinit_sans[i]; free(pkinit_sans); } if (upn_sans != NULL) { for (i = 0; upn_sans[i] != NULL; i++) md->sans[j++] = upn_sans[i]; free(upn_sans); } md->sans[j] = NULL; } else md->sans = NULL; ret = crypto_retrieve_X509_key_usage(context, plg_cryptoctx, req_cryptoctx, cert, &md->ku_bits, &md->eku_bits); if (ret) goto cleanup; *md_out = md; md = NULL; cleanup: crypto_cert_free_matching_data(context, md); return ret; }
1,008
1
static struct sock * unix_create1(struct socket *sock) { struct sock *sk = NULL; struct unix_sock *u; if (atomic_read(&unix_nr_socks) >= 2*get_max_files()) goto out; sk = sk_alloc(PF_UNIX, GFP_KERNEL, &unix_proto, 1); if (!sk) goto out; atomic_inc(&unix_nr_socks); sock_init_data(sock,sk); lockdep_set_class(&sk->sk_receive_queue.lock, &af_unix_sk_receive_queue_lock_key); sk->sk_write_space = unix_write_space; sk->sk_max_ack_backlog = sysctl_unix_max_dgram_qlen; sk->sk_destruct = unix_sock_destructor; u = unix_sk(sk); u->dentry = NULL; u->mnt = NULL; spin_lock_init(&u->lock); atomic_set(&u->inflight, sock ? 0 : -1); mutex_init(&u->readlock); /* single task reading lock */ init_waitqueue_head(&u->peer_wait); unix_insert_socket(unix_sockets_unbound, sk); out: return sk; }
static struct sock * unix_create1(struct socket *sock) { struct sock *sk = NULL; struct unix_sock *u; if (atomic_read(&unix_nr_socks) >= 2*get_max_files()) goto out; sk = sk_alloc(PF_UNIX, GFP_KERNEL, &unix_proto, 1); if (!sk) goto out; atomic_inc(&unix_nr_socks); sock_init_data(sock,sk); lockdep_set_class(&sk->sk_receive_queue.lock, &af_unix_sk_receive_queue_lock_key); sk->sk_write_space = unix_write_space; sk->sk_max_ack_backlog = sysctl_unix_max_dgram_qlen; sk->sk_destruct = unix_sock_destructor; u = unix_sk(sk); u->dentry = NULL; u->mnt = NULL; spin_lock_init(&u->lock); atomic_set(&u->inflight, sock ? 0 : -1); mutex_init(&u->readlock); init_waitqueue_head(&u->peer_wait); unix_insert_socket(unix_sockets_unbound, sk); out: return sk; }
1,009
0
static int dissect_h225_TBCD_STRING_SIZE_1_4 ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) { offset = dissect_per_size_constrained_type ( tvb , offset , actx , tree , hf_index , dissect_h225_TBCD_STRING , "TBCD_STRING" , 1 , 4 , FALSE ) ; return offset ; }
static int dissect_h225_TBCD_STRING_SIZE_1_4 ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) { offset = dissect_per_size_constrained_type ( tvb , offset , actx , tree , hf_index , dissect_h225_TBCD_STRING , "TBCD_STRING" , 1 , 4 , FALSE ) ; return offset ; }
1,010
0
static BLOCK_SIZE get_rd_var_based_fixed_partition ( VP9_COMP * cpi , int mi_row , int mi_col ) { unsigned int var = get_sby_perpixel_diff_variance ( cpi , & cpi -> mb . plane [ 0 ] . src , mi_row , mi_col , BLOCK_64X64 ) ; if ( var < 8 ) return BLOCK_64X64 ; else if ( var < 128 ) return BLOCK_32X32 ; else if ( var < 2048 ) return BLOCK_16X16 ; else return BLOCK_8X8 ; }
static BLOCK_SIZE get_rd_var_based_fixed_partition ( VP9_COMP * cpi , int mi_row , int mi_col ) { unsigned int var = get_sby_perpixel_diff_variance ( cpi , & cpi -> mb . plane [ 0 ] . src , mi_row , mi_col , BLOCK_64X64 ) ; if ( var < 8 ) return BLOCK_64X64 ; else if ( var < 128 ) return BLOCK_32X32 ; else if ( var < 2048 ) return BLOCK_16X16 ; else return BLOCK_8X8 ; }
1,011
0
rfc2253_name(X509_NAME *name, char **str_out) { BIO *b = NULL; char *str; *str_out = NULL; b = BIO_new(BIO_s_mem()); if (b == NULL) return ENOMEM; if (X509_NAME_print_ex(b, name, 0, XN_FLAG_SEP_COMMA_PLUS) < 0) goto error; str = calloc(BIO_number_written(b) + 1, 1); if (str == NULL) goto error; BIO_read(b, str, BIO_number_written(b)); BIO_free(b); *str_out = str; return 0; error: BIO_free(b); return ENOMEM; }
rfc2253_name(X509_NAME *name, char **str_out) { BIO *b = NULL; char *str; *str_out = NULL; b = BIO_new(BIO_s_mem()); if (b == NULL) return ENOMEM; if (X509_NAME_print_ex(b, name, 0, XN_FLAG_SEP_COMMA_PLUS) < 0) goto error; str = calloc(BIO_number_written(b) + 1, 1); if (str == NULL) goto error; BIO_read(b, str, BIO_number_written(b)); BIO_free(b); *str_out = str; return 0; error: BIO_free(b); return ENOMEM; }
1,012
1
void unix_notinflight(struct file *fp) { struct sock *s = unix_get_socket(fp); if(s) { atomic_dec(&unix_sk(s)->inflight); atomic_dec(&unix_tot_inflight); } }
void unix_notinflight(struct file *fp) { struct sock *s = unix_get_socket(fp); if(s) { atomic_dec(&unix_sk(s)->inflight); atomic_dec(&unix_tot_inflight); } }
1,014
0
kadm5_create_principal_3(void *server_handle, kadm5_principal_ent_t entry, long mask, int n_ks_tuple, krb5_key_salt_tuple *ks_tuple, char *password) { krb5_db_entry *kdb; osa_princ_ent_rec adb; kadm5_policy_ent_rec polent; krb5_boolean have_polent = FALSE; krb5_timestamp now; krb5_tl_data *tl_data_tail; unsigned int ret; kadm5_server_handle_t handle = server_handle; krb5_keyblock *act_mkey; krb5_kvno act_kvno; int new_n_ks_tuple = 0; krb5_key_salt_tuple *new_ks_tuple = NULL; CHECK_HANDLE(server_handle); krb5_clear_error_message(handle->context); check_1_6_dummy(entry, mask, n_ks_tuple, ks_tuple, &password); /* * Argument sanity checking, and opening up the DB */ if (entry == NULL) return EINVAL; if(!(mask & KADM5_PRINCIPAL) || (mask & KADM5_MOD_NAME) || (mask & KADM5_MOD_TIME) || (mask & KADM5_LAST_PWD_CHANGE) || (mask & KADM5_MKVNO) || (mask & KADM5_AUX_ATTRIBUTES) || (mask & KADM5_LAST_SUCCESS) || (mask & KADM5_LAST_FAILED) || (mask & KADM5_FAIL_AUTH_COUNT)) return KADM5_BAD_MASK; if ((mask & KADM5_KEY_DATA) && entry->n_key_data != 0) return KADM5_BAD_MASK; if((mask & KADM5_POLICY) && entry->policy == NULL) return KADM5_BAD_MASK; if((mask & KADM5_POLICY) && (mask & KADM5_POLICY_CLR)) return KADM5_BAD_MASK; if((mask & ~ALL_PRINC_MASK)) return KADM5_BAD_MASK; if (mask & KADM5_TL_DATA) { for (tl_data_tail = entry->tl_data; tl_data_tail != NULL; tl_data_tail = tl_data_tail->tl_data_next) { if (tl_data_tail->tl_data_type < 256) return KADM5_BAD_TL_TYPE; } } /* * Check to see if the principal exists */ ret = kdb_get_entry(handle, entry->principal, &kdb, &adb); switch(ret) { case KADM5_UNK_PRINC: break; case 0: kdb_free_entry(handle, kdb, &adb); return KADM5_DUP; default: return ret; } kdb = calloc(1, sizeof(*kdb)); if (kdb == NULL) return ENOMEM; memset(&adb, 0, sizeof(osa_princ_ent_rec)); /* * If a policy was specified, load it. * If we can not find the one specified return an error */ if ((mask & KADM5_POLICY)) { ret = get_policy(handle, entry->policy, &polent, &have_polent); if (ret) goto cleanup; } if (password) { ret = passwd_check(handle, password, have_polent ? &polent : NULL, entry->principal); if (ret) goto cleanup; } /* * Start populating the various DB fields, using the * "defaults" for fields that were not specified by the * mask. */ if ((ret = krb5_timeofday(handle->context, &now))) goto cleanup; kdb->magic = KRB5_KDB_MAGIC_NUMBER; kdb->len = KRB5_KDB_V1_BASE_LENGTH; /* gag me with a chainsaw */ if ((mask & KADM5_ATTRIBUTES)) kdb->attributes = entry->attributes; else kdb->attributes = handle->params.flags; if ((mask & KADM5_MAX_LIFE)) kdb->max_life = entry->max_life; else kdb->max_life = handle->params.max_life; if (mask & KADM5_MAX_RLIFE) kdb->max_renewable_life = entry->max_renewable_life; else kdb->max_renewable_life = handle->params.max_rlife; if ((mask & KADM5_PRINC_EXPIRE_TIME)) kdb->expiration = entry->princ_expire_time; else kdb->expiration = handle->params.expiration; kdb->pw_expiration = 0; if (have_polent) { if(polent.pw_max_life) kdb->pw_expiration = ts_incr(now, polent.pw_max_life); else kdb->pw_expiration = 0; } if ((mask & KADM5_PW_EXPIRATION)) kdb->pw_expiration = entry->pw_expiration; kdb->last_success = 0; kdb->last_failed = 0; kdb->fail_auth_count = 0; /* this is kind of gross, but in order to free the tl data, I need to free the entire kdb entry, and that will try to free the principal. */ ret = krb5_copy_principal(handle->context, entry->principal, &kdb->princ); if (ret) goto cleanup; if ((ret = krb5_dbe_update_last_pwd_change(handle->context, kdb, now))) goto cleanup; if (mask & KADM5_TL_DATA) { /* splice entry->tl_data onto the front of kdb->tl_data */ for (tl_data_tail = entry->tl_data; tl_data_tail; tl_data_tail = tl_data_tail->tl_data_next) { ret = krb5_dbe_update_tl_data(handle->context, kdb, tl_data_tail); if( ret ) goto cleanup; } } /* * We need to have setup the TL data, so we have strings, so we can * check enctype policy, which is why we check/initialize ks_tuple * this late. */ ret = apply_keysalt_policy(handle, entry->policy, n_ks_tuple, ks_tuple, &new_n_ks_tuple, &new_ks_tuple); if (ret) goto cleanup; /* initialize the keys */ ret = kdb_get_active_mkey(handle, &act_kvno, &act_mkey); if (ret) goto cleanup; if (mask & KADM5_KEY_DATA) { /* The client requested no keys for this principal. */ assert(entry->n_key_data == 0); } else if (password) { ret = krb5_dbe_cpw(handle->context, act_mkey, new_ks_tuple, new_n_ks_tuple, password, (mask & KADM5_KVNO)?entry->kvno:1, FALSE, kdb); } else { /* Null password means create with random key (new in 1.8). */ ret = krb5_dbe_crk(handle->context, &master_keyblock, new_ks_tuple, new_n_ks_tuple, FALSE, kdb); } if (ret) goto cleanup; /* Record the master key VNO used to encrypt this entry's keys */ ret = krb5_dbe_update_mkvno(handle->context, kdb, act_kvno); if (ret) goto cleanup; ret = k5_kadm5_hook_create(handle->context, handle->hook_handles, KADM5_HOOK_STAGE_PRECOMMIT, entry, mask, new_n_ks_tuple, new_ks_tuple, password); if (ret) goto cleanup; /* populate the admin-server-specific fields. In the OV server, this used to be in a separate database. Since there's already marshalling code for the admin fields, to keep things simple, I'm going to keep it, and make all the admin stuff occupy a single tl_data record, */ adb.admin_history_kvno = INITIAL_HIST_KVNO; if (mask & KADM5_POLICY) { adb.aux_attributes = KADM5_POLICY; /* this does *not* need to be strdup'ed, because adb is xdr */ /* encoded in osa_adb_create_princ, and not ever freed */ adb.policy = entry->policy; } /* In all cases key and the principal data is set, let the database provider know */ kdb->mask = mask | KADM5_KEY_DATA | KADM5_PRINCIPAL ; /* store the new db entry */ ret = kdb_put_entry(handle, kdb, &adb); (void) k5_kadm5_hook_create(handle->context, handle->hook_handles, KADM5_HOOK_STAGE_POSTCOMMIT, entry, mask, new_n_ks_tuple, new_ks_tuple, password); cleanup: free(new_ks_tuple); krb5_db_free_principal(handle->context, kdb); if (have_polent) (void) kadm5_free_policy_ent(handle->lhandle, &polent); return ret; }
kadm5_create_principal_3(void *server_handle, kadm5_principal_ent_t entry, long mask, int n_ks_tuple, krb5_key_salt_tuple *ks_tuple, char *password) { krb5_db_entry *kdb; osa_princ_ent_rec adb; kadm5_policy_ent_rec polent; krb5_boolean have_polent = FALSE; krb5_timestamp now; krb5_tl_data *tl_data_tail; unsigned int ret; kadm5_server_handle_t handle = server_handle; krb5_keyblock *act_mkey; krb5_kvno act_kvno; int new_n_ks_tuple = 0; krb5_key_salt_tuple *new_ks_tuple = NULL; CHECK_HANDLE(server_handle); krb5_clear_error_message(handle->context); check_1_6_dummy(entry, mask, n_ks_tuple, ks_tuple, &password); if (entry == NULL) return EINVAL; if(!(mask & KADM5_PRINCIPAL) || (mask & KADM5_MOD_NAME) || (mask & KADM5_MOD_TIME) || (mask & KADM5_LAST_PWD_CHANGE) || (mask & KADM5_MKVNO) || (mask & KADM5_AUX_ATTRIBUTES) || (mask & KADM5_LAST_SUCCESS) || (mask & KADM5_LAST_FAILED) || (mask & KADM5_FAIL_AUTH_COUNT)) return KADM5_BAD_MASK; if ((mask & KADM5_KEY_DATA) && entry->n_key_data != 0) return KADM5_BAD_MASK; if((mask & KADM5_POLICY) && entry->policy == NULL) return KADM5_BAD_MASK; if((mask & KADM5_POLICY) && (mask & KADM5_POLICY_CLR)) return KADM5_BAD_MASK; if((mask & ~ALL_PRINC_MASK)) return KADM5_BAD_MASK; if (mask & KADM5_TL_DATA) { for (tl_data_tail = entry->tl_data; tl_data_tail != NULL; tl_data_tail = tl_data_tail->tl_data_next) { if (tl_data_tail->tl_data_type < 256) return KADM5_BAD_TL_TYPE; } } ret = kdb_get_entry(handle, entry->principal, &kdb, &adb); switch(ret) { case KADM5_UNK_PRINC: break; case 0: kdb_free_entry(handle, kdb, &adb); return KADM5_DUP; default: return ret; } kdb = calloc(1, sizeof(*kdb)); if (kdb == NULL) return ENOMEM; memset(&adb, 0, sizeof(osa_princ_ent_rec)); if ((mask & KADM5_POLICY)) { ret = get_policy(handle, entry->policy, &polent, &have_polent); if (ret) goto cleanup; } if (password) { ret = passwd_check(handle, password, have_polent ? &polent : NULL, entry->principal); if (ret) goto cleanup; } if ((ret = krb5_timeofday(handle->context, &now))) goto cleanup; kdb->magic = KRB5_KDB_MAGIC_NUMBER; kdb->len = KRB5_KDB_V1_BASE_LENGTH; if ((mask & KADM5_ATTRIBUTES)) kdb->attributes = entry->attributes; else kdb->attributes = handle->params.flags; if ((mask & KADM5_MAX_LIFE)) kdb->max_life = entry->max_life; else kdb->max_life = handle->params.max_life; if (mask & KADM5_MAX_RLIFE) kdb->max_renewable_life = entry->max_renewable_life; else kdb->max_renewable_life = handle->params.max_rlife; if ((mask & KADM5_PRINC_EXPIRE_TIME)) kdb->expiration = entry->princ_expire_time; else kdb->expiration = handle->params.expiration; kdb->pw_expiration = 0; if (have_polent) { if(polent.pw_max_life) kdb->pw_expiration = ts_incr(now, polent.pw_max_life); else kdb->pw_expiration = 0; } if ((mask & KADM5_PW_EXPIRATION)) kdb->pw_expiration = entry->pw_expiration; kdb->last_success = 0; kdb->last_failed = 0; kdb->fail_auth_count = 0; ret = krb5_copy_principal(handle->context, entry->principal, &kdb->princ); if (ret) goto cleanup; if ((ret = krb5_dbe_update_last_pwd_change(handle->context, kdb, now))) goto cleanup; if (mask & KADM5_TL_DATA) { for (tl_data_tail = entry->tl_data; tl_data_tail; tl_data_tail = tl_data_tail->tl_data_next) { ret = krb5_dbe_update_tl_data(handle->context, kdb, tl_data_tail); if( ret ) goto cleanup; } } ret = apply_keysalt_policy(handle, entry->policy, n_ks_tuple, ks_tuple, &new_n_ks_tuple, &new_ks_tuple); if (ret) goto cleanup; ret = kdb_get_active_mkey(handle, &act_kvno, &act_mkey); if (ret) goto cleanup; if (mask & KADM5_KEY_DATA) { assert(entry->n_key_data == 0); } else if (password) { ret = krb5_dbe_cpw(handle->context, act_mkey, new_ks_tuple, new_n_ks_tuple, password, (mask & KADM5_KVNO)?entry->kvno:1, FALSE, kdb); } else { ret = krb5_dbe_crk(handle->context, &master_keyblock, new_ks_tuple, new_n_ks_tuple, FALSE, kdb); } if (ret) goto cleanup; ret = krb5_dbe_update_mkvno(handle->context, kdb, act_kvno); if (ret) goto cleanup; ret = k5_kadm5_hook_create(handle->context, handle->hook_handles, KADM5_HOOK_STAGE_PRECOMMIT, entry, mask, new_n_ks_tuple, new_ks_tuple, password); if (ret) goto cleanup; adb.admin_history_kvno = INITIAL_HIST_KVNO; if (mask & KADM5_POLICY) { adb.aux_attributes = KADM5_POLICY; adb.policy = entry->policy; } kdb->mask = mask | KADM5_KEY_DATA | KADM5_PRINCIPAL ; ret = kdb_put_entry(handle, kdb, &adb); (void) k5_kadm5_hook_create(handle->context, handle->hook_handles, KADM5_HOOK_STAGE_POSTCOMMIT, entry, mask, new_n_ks_tuple, new_ks_tuple, password); cleanup: free(new_ks_tuple); krb5_db_free_principal(handle->context, kdb); if (have_polent) (void) kadm5_free_policy_ent(handle->lhandle, &polent); return ret; }
1,015
1
static int uvc_parse_format(struct uvc_device *dev, struct uvc_streaming *streaming, struct uvc_format *format, __u32 **intervals, unsigned char *buffer, int buflen) { struct usb_interface *intf = streaming->intf; struct usb_host_interface *alts = intf->cur_altsetting; struct uvc_format_desc *fmtdesc; struct uvc_frame *frame; const unsigned char *start = buffer; unsigned int interval; unsigned int i, n; __u8 ftype; format->type = buffer[2]; format->index = buffer[3]; switch (buffer[2]) { case VS_FORMAT_UNCOMPRESSED: case VS_FORMAT_FRAME_BASED: if (buflen < 27) { uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" "interface %d FORMAT error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } /* Find the format descriptor from its GUID. */ fmtdesc = uvc_format_by_guid(&buffer[5]); if (fmtdesc != NULL) { strncpy(format->name, fmtdesc->name, sizeof format->name); format->fcc = fmtdesc->fcc; } else { uvc_printk(KERN_INFO, "Unknown video format " UVC_GUID_FORMAT "\n", UVC_GUID_ARGS(&buffer[5])); snprintf(format->name, sizeof format->name, UVC_GUID_FORMAT, UVC_GUID_ARGS(&buffer[5])); format->fcc = 0; } format->bpp = buffer[21]; if (buffer[2] == VS_FORMAT_UNCOMPRESSED) { ftype = VS_FRAME_UNCOMPRESSED; } else { ftype = VS_FRAME_FRAME_BASED; if (buffer[27]) format->flags = UVC_FMT_FLAG_COMPRESSED; } break; case VS_FORMAT_MJPEG: if (buflen < 11) { uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" "interface %d FORMAT error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } strncpy(format->name, "MJPEG", sizeof format->name); format->fcc = V4L2_PIX_FMT_MJPEG; format->flags = UVC_FMT_FLAG_COMPRESSED; format->bpp = 0; ftype = VS_FRAME_MJPEG; break; case VS_FORMAT_DV: if (buflen < 9) { uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" "interface %d FORMAT error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } switch (buffer[8] & 0x7f) { case 0: strncpy(format->name, "SD-DV", sizeof format->name); break; case 1: strncpy(format->name, "SDL-DV", sizeof format->name); break; case 2: strncpy(format->name, "HD-DV", sizeof format->name); break; default: uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" "interface %d: unknown DV format %u\n", dev->udev->devnum, alts->desc.bInterfaceNumber, buffer[8]); return -EINVAL; } strncat(format->name, buffer[8] & (1 << 7) ? " 60Hz" : " 50Hz", sizeof format->name); format->fcc = V4L2_PIX_FMT_DV; format->flags = UVC_FMT_FLAG_COMPRESSED | UVC_FMT_FLAG_STREAM; format->bpp = 0; ftype = 0; /* Create a dummy frame descriptor. */ frame = &format->frame[0]; memset(&format->frame[0], 0, sizeof format->frame[0]); frame->bFrameIntervalType = 1; frame->dwDefaultFrameInterval = 1; frame->dwFrameInterval = *intervals; *(*intervals)++ = 1; format->nframes = 1; break; case VS_FORMAT_MPEG2TS: case VS_FORMAT_STREAM_BASED: /* Not supported yet. */ default: uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" "interface %d unsupported format %u\n", dev->udev->devnum, alts->desc.bInterfaceNumber, buffer[2]); return -EINVAL; } uvc_trace(UVC_TRACE_DESCR, "Found format %s.\n", format->name); buflen -= buffer[0]; buffer += buffer[0]; /* Parse the frame descriptors. Only uncompressed, MJPEG and frame * based formats have frame descriptors. */ while (buflen > 2 && buffer[2] == ftype) { frame = &format->frame[format->nframes]; if (ftype != VS_FRAME_FRAME_BASED) n = buflen > 25 ? buffer[25] : 0; else n = buflen > 21 ? buffer[21] : 0; n = n ? n : 3; if (buflen < 26 + 4*n) { uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" "interface %d FRAME error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } frame->bFrameIndex = buffer[3]; frame->bmCapabilities = buffer[4]; frame->wWidth = le16_to_cpup((__le16 *)&buffer[5]); frame->wHeight = le16_to_cpup((__le16 *)&buffer[7]); frame->dwMinBitRate = le32_to_cpup((__le32 *)&buffer[9]); frame->dwMaxBitRate = le32_to_cpup((__le32 *)&buffer[13]); if (ftype != VS_FRAME_FRAME_BASED) { frame->dwMaxVideoFrameBufferSize = le32_to_cpup((__le32 *)&buffer[17]); frame->dwDefaultFrameInterval = le32_to_cpup((__le32 *)&buffer[21]); frame->bFrameIntervalType = buffer[25]; } else { frame->dwMaxVideoFrameBufferSize = 0; frame->dwDefaultFrameInterval = le32_to_cpup((__le32 *)&buffer[17]); frame->bFrameIntervalType = buffer[21]; } frame->dwFrameInterval = *intervals; /* Several UVC chipsets screw up dwMaxVideoFrameBufferSize * completely. Observed behaviours range from setting the * value to 1.1x the actual frame size of hardwiring the * 16 low bits to 0. This results in a higher than necessary * memory usage as well as a wrong image size information. For * uncompressed formats this can be fixed by computing the * value from the frame size. */ if (!(format->flags & UVC_FMT_FLAG_COMPRESSED)) frame->dwMaxVideoFrameBufferSize = format->bpp * frame->wWidth * frame->wHeight / 8; /* Some bogus devices report dwMinFrameInterval equal to * dwMaxFrameInterval and have dwFrameIntervalStep set to * zero. Setting all null intervals to 1 fixes the problem and * some other divisions by zero which could happen. */ for (i = 0; i < n; ++i) { interval = le32_to_cpup((__le32 *)&buffer[26+4*i]); *(*intervals)++ = interval ? interval : 1; } /* Make sure that the default frame interval stays between * the boundaries. */ n -= frame->bFrameIntervalType ? 1 : 2; frame->dwDefaultFrameInterval = min(frame->dwFrameInterval[n], max(frame->dwFrameInterval[0], frame->dwDefaultFrameInterval)); uvc_trace(UVC_TRACE_DESCR, "- %ux%u (%u.%u fps)\n", frame->wWidth, frame->wHeight, 10000000/frame->dwDefaultFrameInterval, (100000000/frame->dwDefaultFrameInterval)%10); format->nframes++; buflen -= buffer[0]; buffer += buffer[0]; } if (buflen > 2 && buffer[2] == VS_STILL_IMAGE_FRAME) { buflen -= buffer[0]; buffer += buffer[0]; } if (buflen > 2 && buffer[2] == VS_COLORFORMAT) { if (buflen < 6) { uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" "interface %d COLORFORMAT error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } format->colorspace = uvc_colorspace(buffer[3]); buflen -= buffer[0]; buffer += buffer[0]; } return buffer - start; }
static int uvc_parse_format(struct uvc_device *dev, struct uvc_streaming *streaming, struct uvc_format *format, __u32 **intervals, unsigned char *buffer, int buflen) { struct usb_interface *intf = streaming->intf; struct usb_host_interface *alts = intf->cur_altsetting; struct uvc_format_desc *fmtdesc; struct uvc_frame *frame; const unsigned char *start = buffer; unsigned int interval; unsigned int i, n; __u8 ftype; format->type = buffer[2]; format->index = buffer[3]; switch (buffer[2]) { case VS_FORMAT_UNCOMPRESSED: case VS_FORMAT_FRAME_BASED: if (buflen < 27) { uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" "interface %d FORMAT error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } fmtdesc = uvc_format_by_guid(&buffer[5]); if (fmtdesc != NULL) { strncpy(format->name, fmtdesc->name, sizeof format->name); format->fcc = fmtdesc->fcc; } else { uvc_printk(KERN_INFO, "Unknown video format " UVC_GUID_FORMAT "\n", UVC_GUID_ARGS(&buffer[5])); snprintf(format->name, sizeof format->name, UVC_GUID_FORMAT, UVC_GUID_ARGS(&buffer[5])); format->fcc = 0; } format->bpp = buffer[21]; if (buffer[2] == VS_FORMAT_UNCOMPRESSED) { ftype = VS_FRAME_UNCOMPRESSED; } else { ftype = VS_FRAME_FRAME_BASED; if (buffer[27]) format->flags = UVC_FMT_FLAG_COMPRESSED; } break; case VS_FORMAT_MJPEG: if (buflen < 11) { uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" "interface %d FORMAT error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } strncpy(format->name, "MJPEG", sizeof format->name); format->fcc = V4L2_PIX_FMT_MJPEG; format->flags = UVC_FMT_FLAG_COMPRESSED; format->bpp = 0; ftype = VS_FRAME_MJPEG; break; case VS_FORMAT_DV: if (buflen < 9) { uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" "interface %d FORMAT error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } switch (buffer[8] & 0x7f) { case 0: strncpy(format->name, "SD-DV", sizeof format->name); break; case 1: strncpy(format->name, "SDL-DV", sizeof format->name); break; case 2: strncpy(format->name, "HD-DV", sizeof format->name); break; default: uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" "interface %d: unknown DV format %u\n", dev->udev->devnum, alts->desc.bInterfaceNumber, buffer[8]); return -EINVAL; } strncat(format->name, buffer[8] & (1 << 7) ? " 60Hz" : " 50Hz", sizeof format->name); format->fcc = V4L2_PIX_FMT_DV; format->flags = UVC_FMT_FLAG_COMPRESSED | UVC_FMT_FLAG_STREAM; format->bpp = 0; ftype = 0; frame = &format->frame[0]; memset(&format->frame[0], 0, sizeof format->frame[0]); frame->bFrameIntervalType = 1; frame->dwDefaultFrameInterval = 1; frame->dwFrameInterval = *intervals; *(*intervals)++ = 1; format->nframes = 1; break; case VS_FORMAT_MPEG2TS: case VS_FORMAT_STREAM_BASED: default: uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" "interface %d unsupported format %u\n", dev->udev->devnum, alts->desc.bInterfaceNumber, buffer[2]); return -EINVAL; } uvc_trace(UVC_TRACE_DESCR, "Found format %s.\n", format->name); buflen -= buffer[0]; buffer += buffer[0]; while (buflen > 2 && buffer[2] == ftype) { frame = &format->frame[format->nframes]; if (ftype != VS_FRAME_FRAME_BASED) n = buflen > 25 ? buffer[25] : 0; else n = buflen > 21 ? buffer[21] : 0; n = n ? n : 3; if (buflen < 26 + 4*n) { uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" "interface %d FRAME error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } frame->bFrameIndex = buffer[3]; frame->bmCapabilities = buffer[4]; frame->wWidth = le16_to_cpup((__le16 *)&buffer[5]); frame->wHeight = le16_to_cpup((__le16 *)&buffer[7]); frame->dwMinBitRate = le32_to_cpup((__le32 *)&buffer[9]); frame->dwMaxBitRate = le32_to_cpup((__le32 *)&buffer[13]); if (ftype != VS_FRAME_FRAME_BASED) { frame->dwMaxVideoFrameBufferSize = le32_to_cpup((__le32 *)&buffer[17]); frame->dwDefaultFrameInterval = le32_to_cpup((__le32 *)&buffer[21]); frame->bFrameIntervalType = buffer[25]; } else { frame->dwMaxVideoFrameBufferSize = 0; frame->dwDefaultFrameInterval = le32_to_cpup((__le32 *)&buffer[17]); frame->bFrameIntervalType = buffer[21]; } frame->dwFrameInterval = *intervals; if (!(format->flags & UVC_FMT_FLAG_COMPRESSED)) frame->dwMaxVideoFrameBufferSize = format->bpp * frame->wWidth * frame->wHeight / 8; for (i = 0; i < n; ++i) { interval = le32_to_cpup((__le32 *)&buffer[26+4*i]); *(*intervals)++ = interval ? interval : 1; } n -= frame->bFrameIntervalType ? 1 : 2; frame->dwDefaultFrameInterval = min(frame->dwFrameInterval[n], max(frame->dwFrameInterval[0], frame->dwDefaultFrameInterval)); uvc_trace(UVC_TRACE_DESCR, "- %ux%u (%u.%u fps)\n", frame->wWidth, frame->wHeight, 10000000/frame->dwDefaultFrameInterval, (100000000/frame->dwDefaultFrameInterval)%10); format->nframes++; buflen -= buffer[0]; buffer += buffer[0]; } if (buflen > 2 && buffer[2] == VS_STILL_IMAGE_FRAME) { buflen -= buffer[0]; buffer += buffer[0]; } if (buflen > 2 && buffer[2] == VS_COLORFORMAT) { if (buflen < 6) { uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" "interface %d COLORFORMAT error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } format->colorspace = uvc_colorspace(buffer[3]); buflen -= buffer[0]; buffer += buffer[0]; } return buffer - start; }
1,017
1
NTLM_AV_PAIR* ntlm_av_pair_get(NTLM_AV_PAIR* pAvPairList, size_t cbAvPairList, NTLM_AV_ID AvId, size_t* pcbAvPairListRemaining) { size_t cbAvPair = cbAvPairList; NTLM_AV_PAIR* pAvPair = pAvPairList; if (!ntlm_av_pair_check(pAvPair, cbAvPair)) pAvPair = NULL; while (pAvPair) { UINT16 id = ntlm_av_pair_get_id(pAvPair); if (id == AvId) break; if (id == MsvAvEOL) { pAvPair = NULL; break; } pAvPair = ntlm_av_pair_next(pAvPair, &cbAvPair); } if (!pAvPair) cbAvPair = 0; if (pcbAvPairListRemaining) *pcbAvPairListRemaining = cbAvPair; return pAvPair; }
NTLM_AV_PAIR* ntlm_av_pair_get(NTLM_AV_PAIR* pAvPairList, size_t cbAvPairList, NTLM_AV_ID AvId, size_t* pcbAvPairListRemaining) { size_t cbAvPair = cbAvPairList; NTLM_AV_PAIR* pAvPair = pAvPairList; if (!ntlm_av_pair_check(pAvPair, cbAvPair)) pAvPair = NULL; while (pAvPair) { UINT16 id = ntlm_av_pair_get_id(pAvPair); if (id == AvId) break; if (id == MsvAvEOL) { pAvPair = NULL; break; } pAvPair = ntlm_av_pair_next(pAvPair, &cbAvPair); } if (!pAvPair) cbAvPair = 0; if (pcbAvPairListRemaining) *pcbAvPairListRemaining = cbAvPair; return pAvPair; }
1,018
0
void vp9_sad ## m ## x ## n ## x4d_c ( const uint8_t * src , int src_stride , const uint8_t * const refs [ ] , int ref_stride , unsigned int * sads ) { int i ; for ( i = 0 ; i < 4 ; ++ i ) sads [ i ] = vp9_sad ## m ## x ## n ## _c ( src , src_stride , refs [ i ] , ref_stride ) ; \ } sadMxN ( 64 , 64 ) sadMxNxK ( 64 , 64 , 3 ) sadMxNxK ( 64 , 64 , 8 ) sadMxNx4D ( 64 , 64 ) sadMxN ( 64 , 32 ) sadMxNx4D ( 64 , 32 ) sadMxN ( 32 , 64 ) sadMxNx4D ( 32 , 64 ) sadMxN ( 32 , 32 ) sadMxNxK ( 32 , 32 , 3 ) sadMxNxK ( 32 , 32 , 8 ) sadMxNx4D ( 32 , 32 ) sadMxN ( 32 , 16 ) sadMxNx4D ( 32 , 16 ) sadMxN ( 16 , 32 ) sadMxNx4D ( 16 , 32 ) sadMxN ( 16 , 16 ) sadMxNxK ( 16 , 16 , 3 ) sadMxNxK ( 16 , 16 , 8 ) sadMxNx4D ( 16 , 16 )
void vp9_sad ## m ## x ## n ## x4d_c ( const uint8_t * src , int src_stride , const uint8_t * const refs [ ] , int ref_stride , unsigned int * sads ) { int i ; for ( i = 0 ; i < 4 ; ++ i ) sads [ i ] = vp9_sad ## m ## x ## n ## _c ( src , src_stride , refs [ i ] , ref_stride ) ; \ } sadMxN ( 64 , 64 ) sadMxNxK ( 64 , 64 , 3 ) sadMxNxK ( 64 , 64 , 8 ) sadMxNx4D ( 64 , 64 ) sadMxN ( 64 , 32 ) sadMxNx4D ( 64 , 32 ) sadMxN ( 32 , 64 ) sadMxNx4D ( 32 , 64 ) sadMxN ( 32 , 32 ) sadMxNxK ( 32 , 32 , 3 ) sadMxNxK ( 32 , 32 , 8 ) sadMxNx4D ( 32 , 32 ) sadMxN ( 32 , 16 ) sadMxNx4D ( 32 , 16 ) sadMxN ( 16 , 32 ) sadMxNx4D ( 16 , 32 ) sadMxN ( 16 , 16 ) sadMxNxK ( 16 , 16 , 3 ) sadMxNxK ( 16 , 16 , 8 ) sadMxNx4D ( 16 , 16 )
1,019
0
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc) { void **lp, **p; p = (void **)l1_phys_map; #if TARGET_PHYS_ADDR_SPACE_BITS > 32 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS) #error unsupported TARGET_PHYS_ADDR_SPACE_BITS #endif lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1)); p = *lp; if (!p) { /* allocate if not found */ if (!alloc) return NULL; p = qemu_vmalloc(sizeof(void *) * L1_SIZE); memset(p, 0, sizeof(void *) * L1_SIZE); *lp = p; } #endif lp = p + ((index >> L2_BITS) & (L1_SIZE - 1)); p = *lp; if (!p) { /* allocate if not found */ if (!alloc) return NULL; p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE); memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE); *lp = p; } return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1)); }
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc) { void **lp, **p; p = (void **)l1_phys_map; #if TARGET_PHYS_ADDR_SPACE_BITS > 32 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS) #error unsupported TARGET_PHYS_ADDR_SPACE_BITS #endif lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1)); p = *lp; if (!p) { if (!alloc) return NULL; p = qemu_vmalloc(sizeof(void *) * L1_SIZE); memset(p, 0, sizeof(void *) * L1_SIZE); *lp = p; } #endif lp = p + ((index >> L2_BITS) & (L1_SIZE - 1)); p = *lp; if (!p) { if (!alloc) return NULL; p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE); memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE); *lp = p; } return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1)); }
1,020